From 05b76b597aedbc78e8260afc6b06e08bc5f3d56a Mon Sep 17 00:00:00 2001 From: Viktor Mitin Date: Tue, 23 Jul 2019 13:23:12 +0300 Subject: [PATCH] clang-format checkpatch output example clang-format checkpatch output example Signed-off-by: Viktor Mitin --- xen/arch/arm/acpi/boot.c | 50 +- xen/arch/arm/acpi/domain_build.c | 93 +- xen/arch/arm/acpi/lib.c | 4 +- xen/arch/arm/alternative.c | 21 +- xen/arch/arm/arm32/asm-offsets.c | 90 +- xen/arch/arm/arm32/domain.c | 58 +- xen/arch/arm/arm32/domctl.c | 4 +- xen/arch/arm/arm32/insn.c | 37 +- xen/arch/arm/arm32/livepatch.c | 44 +- xen/arch/arm/arm32/traps.c | 3 +- xen/arch/arm/arm32/vfp.c | 17 +- xen/arch/arm/arm64/asm-offsets.c | 63 +- xen/arch/arm/arm64/domain.c | 61 +- xen/arch/arm/arm64/domctl.c | 6 +- xen/arch/arm/arm64/insn.c | 381 +- xen/arch/arm/arm64/lib/find_next_bit.c | 344 +- xen/arch/arm/arm64/livepatch.c | 59 +- xen/arch/arm/arm64/smpboot.c | 9 +- xen/arch/arm/arm64/traps.c | 12 +- xen/arch/arm/arm64/vfp.c | 6 +- xen/arch/arm/arm64/vsysreg.c | 118 +- xen/arch/arm/bootfdt.c | 120 +- xen/arch/arm/cpuerrata.c | 76 +- xen/arch/arm/cpufeature.c | 52 +- xen/arch/arm/decode.c | 13 +- xen/arch/arm/device.c | 8 +- xen/arch/arm/domain.c | 147 +- xen/arch/arm/domain_build.c | 306 +- xen/arch/arm/domctl.c | 8 +- xen/arch/arm/early_printk.c | 5 +- xen/arch/arm/efi/efi-dom0.c | 46 +- xen/arch/arm/gic-v2.c | 416 +- xen/arch/arm/gic-v3-its.c | 107 +- xen/arch/arm/gic-v3-lpi.c | 58 +- xen/arch/arm/gic-v3.c | 448 +- xen/arch/arm/gic-vgic.c | 61 +- xen/arch/arm/gic.c | 46 +- xen/arch/arm/guest_walk.c | 121 +- xen/arch/arm/guestcopy.c | 30 +- xen/arch/arm/hvm.c | 2 +- xen/arch/arm/io.c | 20 +- xen/arch/arm/irq.c | 52 +- xen/arch/arm/kernel.c | 108 +- xen/arch/arm/livepatch.c | 15 +- xen/arch/arm/mem_access.c | 75 +- xen/arch/arm/mm.c | 322 +- xen/arch/arm/monitor.c | 6 +- xen/arch/arm/p2m.c | 268 +- xen/arch/arm/percpu.c | 15 +- xen/arch/arm/physdev.c | 1 - xen/arch/arm/platform.c | 1 - xen/arch/arm/platform_hypercall.c | 12 +- xen/arch/arm/platforms/brcm.c | 63 +- xen/arch/arm/platforms/exynos5.c | 83 +- xen/arch/arm/platforms/midway.c | 34 +- xen/arch/arm/platforms/omap5.c | 85 +- xen/arch/arm/platforms/rcar2.c | 45 +- xen/arch/arm/platforms/seattle.c | 32 +- xen/arch/arm/platforms/sunxi.c | 81 +- xen/arch/arm/platforms/thunderx.c | 18 +- xen/arch/arm/platforms/vexpress.c | 73 +- xen/arch/arm/platforms/xgene-storm.c | 54 +- xen/arch/arm/platforms/xilinx-zynqmp-eemi.c | 27 +- xen/arch/arm/platforms/xilinx-zynqmp.c | 31 +- xen/arch/arm/psci.c | 19 +- xen/arch/arm/setup.c | 208 +- xen/arch/arm/smp.c | 3 +- xen/arch/arm/smpboot.c | 78 +- xen/arch/arm/sysctl.c | 4 +- xen/arch/arm/tee/optee.c | 181 +- xen/arch/arm/time.c | 51 +- xen/arch/arm/traps.c | 635 +- xen/arch/arm/vcpreg.c | 232 +- xen/arch/arm/vgic-v2.c | 199 +- xen/arch/arm/vgic-v3-its.c | 261 +- xen/arch/arm/vgic-v3.c | 414 +- xen/arch/arm/vgic.c | 62 +- xen/arch/arm/vgic/vgic-init.c | 10 +- xen/arch/arm/vgic/vgic-mmio-v2.c | 143 +- xen/arch/arm/vgic/vgic-mmio.c | 77 +- xen/arch/arm/vgic/vgic-v2.c | 12 +- xen/arch/arm/vgic/vgic.c | 43 +- xen/arch/arm/vpl011.c | 93 +- xen/arch/arm/vpsci.c | 29 +- xen/arch/arm/vsmc.c | 27 +- xen/arch/arm/vtimer.c | 51 +- xen/arch/arm/vuart.c | 29 +- xen/arch/x86/acpi/boot.c | 1061 +-- xen/arch/x86/acpi/cpu_idle.c | 261 +- xen/arch/x86/acpi/cpufreq/cpufreq.c | 244 +- xen/arch/x86/acpi/cpufreq/powernow.c | 184 +- xen/arch/x86/acpi/cpuidle_menu.c | 118 +- xen/arch/x86/acpi/lib.c | 140 +- xen/arch/x86/acpi/power.c | 59 +- xen/arch/x86/acpi/suspend.c | 1 - xen/arch/x86/alternative.c | 89 +- xen/arch/x86/apic.c | 357 +- xen/arch/x86/bitops.c | 72 +- xen/arch/x86/boot/cmdline.c | 24 +- xen/arch/x86/boot/mkelf32.c | 337 +- xen/arch/x86/boot/reloc.c | 27 +- xen/arch/x86/bzimage.c | 89 +- xen/arch/x86/compat.c | 10 +- xen/arch/x86/cpu/amd.c | 1258 ++-- xen/arch/x86/cpu/centaur.c | 80 +- xen/arch/x86/cpu/common.c | 1221 ++-- xen/arch/x86/cpu/intel.c | 497 +- xen/arch/x86/cpu/intel_cacheinfo.c | 492 +- xen/arch/x86/cpu/mcheck/amd_nonfatal.c | 323 +- xen/arch/x86/cpu/mcheck/barrier.c | 11 +- xen/arch/x86/cpu/mcheck/mcaction.c | 47 +- xen/arch/x86/cpu/mcheck/mce-apei.c | 130 +- xen/arch/x86/cpu/mcheck/mce.c | 259 +- xen/arch/x86/cpu/mcheck/mce_amd.c | 46 +- xen/arch/x86/cpu/mcheck/mce_intel.c | 199 +- xen/arch/x86/cpu/mcheck/mctelem.c | 708 +- xen/arch/x86/cpu/mcheck/non-fatal.c | 159 +- xen/arch/x86/cpu/mcheck/util.c | 2 +- xen/arch/x86/cpu/mcheck/vmce.c | 103 +- xen/arch/x86/cpu/mtrr/generic.c | 796 +-- xen/arch/x86/cpu/mtrr/main.c | 747 ++- xen/arch/x86/cpu/mwait-idle.c | 1917 +++--- xen/arch/x86/cpu/shanghai.c | 6 +- xen/arch/x86/cpu/vpmu.c | 83 +- xen/arch/x86/cpu/vpmu_amd.c | 132 +- xen/arch/x86/cpu/vpmu_intel.c | 144 +- xen/arch/x86/cpuid.c | 141 +- xen/arch/x86/crash.c | 16 +- xen/arch/x86/debug.c | 86 +- xen/arch/x86/delay.c | 5 +- xen/arch/x86/dmi_scan.c | 1230 ++-- xen/arch/x86/dom0_build.c | 84 +- xen/arch/x86/domain.c | 376 +- xen/arch/x86/domain_page.c | 34 +- xen/arch/x86/domctl.c | 230 +- xen/arch/x86/e820.c | 296 +- xen/arch/x86/efi/mkreloc.c | 85 +- xen/arch/x86/efi/stub.c | 28 +- xen/arch/x86/emul-i8254.c | 91 +- xen/arch/x86/extable.c | 128 +- xen/arch/x86/flushtlb.c | 48 +- xen/arch/x86/gdbstub.c | 32 +- xen/arch/x86/genapic/bigsmp.c | 45 +- xen/arch/x86/genapic/default.c | 12 +- xen/arch/x86/genapic/delivery.c | 36 +- xen/arch/x86/genapic/probe.c | 160 +- xen/arch/x86/genapic/x2apic.c | 42 +- xen/arch/x86/guest/pvh-boot.c | 23 +- xen/arch/x86/guest/xen.c | 11 +- xen/arch/x86/hpet.c | 92 +- xen/arch/x86/hvm/asid.c | 17 +- xen/arch/x86/hvm/dm.c | 173 +- xen/arch/x86/hvm/dom0_build.c | 123 +- xen/arch/x86/hvm/domain.c | 80 +- xen/arch/x86/hvm/emulate.c | 947 ++- xen/arch/x86/hvm/grant_table.c | 13 +- xen/arch/x86/hvm/hpet.c | 173 +- xen/arch/x86/hvm/hvm.c | 1042 ++- xen/arch/x86/hvm/hypercall.c | 128 +- xen/arch/x86/hvm/intercept.c | 63 +- xen/arch/x86/hvm/io.c | 54 +- xen/arch/x86/hvm/ioreq.c | 177 +- xen/arch/x86/hvm/irq.c | 171 +- xen/arch/x86/hvm/monitor.c | 38 +- xen/arch/x86/hvm/mtrr.c | 424 +- xen/arch/x86/hvm/nestedhvm.c | 48 +- xen/arch/x86/hvm/pmtimer.c | 80 +- xen/arch/x86/hvm/quirks.c | 68 +- xen/arch/x86/hvm/rtc.c | 171 +- xen/arch/x86/hvm/save.c | 82 +- xen/arch/x86/hvm/stdvga.c | 126 +- xen/arch/x86/hvm/svm/asid.c | 8 +- xen/arch/x86/hvm/svm/emulate.c | 16 +- xen/arch/x86/hvm/svm/intr.c | 38 +- xen/arch/x86/hvm/svm/nestedsvm.c | 468 +- xen/arch/x86/hvm/svm/svm.c | 536 +- xen/arch/x86/hvm/svm/svmdebug.c | 88 +- xen/arch/x86/hvm/svm/vmcb.c | 73 +- xen/arch/x86/hvm/vioapic.c | 138 +- xen/arch/x86/hvm/viridian/synic.c | 20 +- xen/arch/x86/hvm/viridian/time.c | 19 +- xen/arch/x86/hvm/viridian/viridian.c | 217 +- xen/arch/x86/hvm/vlapic.c | 368 +- xen/arch/x86/hvm/vm_event.c | 3 +- xen/arch/x86/hvm/vmsi.c | 150 +- xen/arch/x86/hvm/vmx/intr.c | 53 +- xen/arch/x86/hvm/vmx/realmode.c | 46 +- xen/arch/x86/hvm/vmx/vmcs.c | 458 +- xen/arch/x86/hvm/vmx/vmx.c | 811 +-- xen/arch/x86/hvm/vmx/vvmx.c | 360 +- xen/arch/x86/hvm/vpic.c | 45 +- xen/arch/x86/hvm/vpt.c | 43 +- xen/arch/x86/hypercall.c | 128 +- xen/arch/x86/i387.c | 93 +- xen/arch/x86/i8259.c | 118 +- xen/arch/x86/io_apic.c | 1145 ++-- xen/arch/x86/ioport_emulate.c | 79 +- xen/arch/x86/irq.c | 540 +- xen/arch/x86/livepatch.c | 30 +- xen/arch/x86/machine_kexec.c | 18 +- xen/arch/x86/microcode.c | 65 +- xen/arch/x86/microcode_amd.c | 122 +- xen/arch/x86/microcode_intel.c | 94 +- xen/arch/x86/mm.c | 1210 ++-- xen/arch/x86/mm/altp2m.c | 8 +- xen/arch/x86/mm/guest_walk.c | 69 +- xen/arch/x86/mm/hap/guest_walk.c | 17 +- xen/arch/x86/mm/hap/hap.c | 172 +- xen/arch/x86/mm/hap/nested_ept.c | 62 +- xen/arch/x86/mm/hap/nested_hap.c | 86 +- xen/arch/x86/mm/mem_access.c | 73 +- xen/arch/x86/mm/mem_paging.c | 4 +- xen/arch/x86/mm/mem_sharing.c | 766 ++- xen/arch/x86/mm/p2m-ept.c | 347 +- xen/arch/x86/mm/p2m-pod.c | 196 +- xen/arch/x86/mm/p2m-pt.c | 305 +- xen/arch/x86/mm/p2m.c | 446 +- xen/arch/x86/mm/paging.c | 85 +- xen/arch/x86/mm/shadow/common.c | 1021 ++- xen/arch/x86/mm/shadow/hvm.c | 232 +- xen/arch/x86/mm/shadow/multi.c | 1741 +++-- xen/arch/x86/mm/shadow/none.c | 16 +- xen/arch/x86/monitor.c | 24 +- xen/arch/x86/mpparse.c | 1766 ++--- xen/arch/x86/msi.c | 338 +- xen/arch/x86/msr.c | 56 +- xen/arch/x86/nmi.c | 171 +- xen/arch/x86/numa.c | 129 +- xen/arch/x86/oprofile/backtrace.c | 64 +- xen/arch/x86/oprofile/nmi_int.c | 650 +- xen/arch/x86/oprofile/op_model_athlon.c | 775 +-- xen/arch/x86/oprofile/op_model_p4.c | 1229 ++-- xen/arch/x86/oprofile/op_model_ppro.c | 484 +- xen/arch/x86/oprofile/xenoprof.c | 22 +- xen/arch/x86/pci.c | 10 +- xen/arch/x86/percpu.c | 12 +- xen/arch/x86/physdev.c | 156 +- xen/arch/x86/platform_hypercall.c | 175 +- xen/arch/x86/psr.c | 177 +- xen/arch/x86/pv/callback.c | 51 +- xen/arch/x86/pv/descriptor-tables.c | 12 +- xen/arch/x86/pv/dom0_build.c | 243 +- xen/arch/x86/pv/domain.c | 44 +- xen/arch/x86/pv/emul-gate-op.c | 103 +- xen/arch/x86/pv/emul-inv-op.c | 2 +- xen/arch/x86/pv/emul-priv-op.c | 307 +- xen/arch/x86/pv/emulate.c | 18 +- xen/arch/x86/pv/grant_table.c | 51 +- xen/arch/x86/pv/hypercall.c | 160 +- xen/arch/x86/pv/iret.c | 22 +- xen/arch/x86/pv/misc-hypercalls.c | 24 +- xen/arch/x86/pv/mm.c | 3 +- xen/arch/x86/pv/ro-page-fault.c | 77 +- xen/arch/x86/pv/shim.c | 161 +- xen/arch/x86/pv/traps.c | 28 +- xen/arch/x86/setup.c | 341 +- xen/arch/x86/shutdown.c | 546 +- xen/arch/x86/smp.c | 25 +- xen/arch/x86/smpboot.c | 89 +- xen/arch/x86/spec_ctrl.c | 117 +- xen/arch/x86/srat.c | 812 +-- xen/arch/x86/string.c | 37 +- xen/arch/x86/sysctl.c | 153 +- xen/arch/x86/tboot.c | 143 +- xen/arch/x86/time.c | 319 +- xen/arch/x86/trace.c | 60 +- xen/arch/x86/traps.c | 395 +- xen/arch/x86/usercopy.c | 207 +- xen/arch/x86/vm_event.c | 18 +- xen/arch/x86/x86_64/acpi_mmcfg.c | 37 +- xen/arch/x86/x86_64/asm-offsets.c | 32 +- xen/arch/x86/x86_64/compat.c | 8 +- xen/arch/x86/x86_64/compat/mm.c | 28 +- xen/arch/x86/x86_64/cpu_idle.c | 87 +- xen/arch/x86/x86_64/cpufreq.c | 28 +- xen/arch/x86/x86_64/domain.c | 9 +- xen/arch/x86/x86_64/gdbstub.c | 229 +- xen/arch/x86/x86_64/machine_kexec.c | 6 +- xen/arch/x86/x86_64/mm.c | 442 +- xen/arch/x86/x86_64/mmconf-fam10h.c | 329 +- xen/arch/x86/x86_64/mmconfig-shared.c | 199 +- xen/arch/x86/x86_64/mmconfig_64.c | 95 +- xen/arch/x86/x86_64/pci.c | 32 +- xen/arch/x86/x86_64/physdev.c | 20 +- xen/arch/x86/x86_64/platform_hypercall.c | 12 +- xen/arch/x86/x86_64/traps.c | 134 +- xen/arch/x86/x86_emulate.c | 51 +- xen/arch/x86/x86_emulate/x86_emulate.c | 5833 +++++++++-------- xen/arch/x86/xstate.c | 178 +- xen/common/argo.c | 476 +- xen/common/bitmap.c | 439 +- xen/common/bsearch.c | 29 +- xen/common/bunzip2.c | 1348 ++-- xen/common/compat/domain.c | 12 +- xen/common/compat/grant_table.c | 133 +- xen/common/compat/memory.c | 204 +- xen/common/compat/multicall.c | 12 +- xen/common/core_parking.c | 23 +- xen/common/coverage/coverage.c | 2 +- xen/common/coverage/gcc_3_4.c | 68 +- xen/common/coverage/gcc_4_7.c | 23 +- xen/common/coverage/gcov.c | 7 +- xen/common/coverage/llvm.c | 48 +- xen/common/cpu.c | 34 +- xen/common/cpupool.c | 77 +- xen/common/decompress.c | 2 +- xen/common/device_tree.c | 270 +- xen/common/domain.c | 163 +- xen/common/domctl.c | 147 +- xen/common/earlycpio.c | 198 +- xen/common/efi/boot.c | 383 +- xen/common/efi/runtime.c | 129 +- xen/common/event_2l.c | 24 +- xen/common/event_channel.c | 293 +- xen/common/event_fifo.c | 71 +- xen/common/gdbstub.c | 177 +- xen/common/grant_table.c | 842 ++- xen/common/guestcopy.c | 4 +- xen/common/gunzip.c | 72 +- xen/common/inflate.c | 1001 +-- xen/common/irq.c | 2 +- xen/common/kernel.c | 76 +- xen/common/kexec.c | 153 +- xen/common/keyhandler.c | 131 +- xen/common/kimage.c | 110 +- xen/common/lib.c | 200 +- xen/common/libelf/libelf-dominfo.c | 121 +- xen/common/libelf/libelf-loader.c | 147 +- xen/common/libelf/libelf-tools.c | 122 +- xen/common/libfdt/fdt.c | 300 +- xen/common/libfdt/fdt_empty_tree.c | 38 +- xen/common/libfdt/fdt_ro.c | 744 +-- xen/common/libfdt/fdt_rw.c | 634 +- xen/common/libfdt/fdt_strerror.c | 53 +- xen/common/libfdt/fdt_sw.c | 274 +- xen/common/libfdt/fdt_wip.c | 65 +- xen/common/list_sort.c | 205 +- xen/common/livepatch.c | 294 +- xen/common/livepatch_elf.c | 104 +- xen/common/lz4/decompress.c | 498 +- xen/common/lzo.c | 485 +- xen/common/mem_access.c | 9 +- xen/common/memory.c | 283 +- xen/common/monitor.c | 9 +- xen/common/multicall.c | 19 +- xen/common/notifier.c | 13 +- xen/common/page_alloc.c | 353 +- xen/common/pdx.c | 27 +- xen/common/perfc.c | 80 +- xen/common/preempt.c | 10 +- xen/common/radix-tree.c | 985 +-- xen/common/rangeset.c | 110 +- xen/common/rbtree.c | 949 +-- xen/common/rcupdate.c | 156 +- xen/common/rwlock.c | 20 +- xen/common/sched_arinc653.c | 170 +- xen/common/sched_credit.c | 730 +-- xen/common/sched_credit2.c | 988 ++- xen/common/sched_null.c | 149 +- xen/common/sched_rt.c | 440 +- xen/common/schedule.c | 210 +- xen/common/shutdown.c | 5 +- xen/common/smp.c | 17 +- xen/common/softirq.c | 28 +- xen/common/sort.c | 6 +- xen/common/spinlock.c | 95 +- xen/common/stop_machine.c | 24 +- xen/common/string.c | 358 +- xen/common/symbols.c | 61 +- xen/common/sysctl.c | 106 +- xen/common/tasklet.c | 30 +- xen/common/time.c | 32 +- xen/common/timer.c | 119 +- xen/common/tmem.c | 502 +- xen/common/tmem_control.c | 240 +- xen/common/tmem_xen.c | 34 +- xen/common/trace.c | 163 +- xen/common/ubsan/ubsan.c | 541 +- xen/common/unlz4.c | 284 +- xen/common/unlzma.c | 951 +-- xen/common/unlzo.c | 417 +- xen/common/unxz.c | 281 +- xen/common/version.c | 6 +- xen/common/virtual_region.c | 5 +- xen/common/vm_event.c | 81 +- xen/common/vmap.c | 22 +- xen/common/vsprintf.c | 464 +- xen/common/wait.c | 74 +- xen/common/xenoprof.c | 115 +- xen/common/xmalloc_tlsf.c | 128 +- xen/common/xz/crc32.c | 36 +- xen/common/xz/dec_bcj.c | 899 +-- xen/common/xz/dec_lzma2.c | 1802 ++--- xen/common/xz/dec_stream.c | 1204 ++-- xen/crypto/rijndael.c | 2131 +++--- xen/crypto/vmac.c | 1284 ++-- xen/drivers/acpi/apei/apei-base.c | 306 +- xen/drivers/acpi/apei/apei-io.c | 432 +- xen/drivers/acpi/apei/erst.c | 1149 ++-- xen/drivers/acpi/apei/hest.c | 255 +- xen/drivers/acpi/hwregs.c | 823 ++- xen/drivers/acpi/numa.c | 259 +- xen/drivers/acpi/osl.c | 326 +- xen/drivers/acpi/pmstat.c | 107 +- xen/drivers/acpi/reboot.c | 52 +- xen/drivers/acpi/tables.c | 716 +- xen/drivers/acpi/tables/tbfadt.c | 566 +- xen/drivers/acpi/tables/tbinstal.c | 111 +- xen/drivers/acpi/tables/tbutils.c | 660 +- xen/drivers/acpi/tables/tbxface.c | 247 +- xen/drivers/acpi/tables/tbxfroot.c | 269 +- xen/drivers/acpi/utilities/utglobal.c | 196 +- xen/drivers/acpi/utilities/utmisc.c | 151 +- xen/drivers/char/arm-uart.c | 14 +- xen/drivers/char/cadence-uart.c | 71 +- xen/drivers/char/console.c | 184 +- xen/drivers/char/consoled.c | 4 +- xen/drivers/char/ehci-dbgp.c | 439 +- xen/drivers/char/exynos4210-uart.c | 75 +- xen/drivers/char/mvebu-uart.c | 130 +- xen/drivers/char/ns16550.c | 487 +- xen/drivers/char/omap-uart.c | 98 +- xen/drivers/char/pl011.c | 131 +- xen/drivers/char/scif-uart.c | 99 +- xen/drivers/char/serial.c | 66 +- xen/drivers/char/xen_pv_console.c | 4 +- xen/drivers/cpufreq/cpufreq.c | 286 +- xen/drivers/cpufreq/cpufreq_misc_governors.c | 65 +- xen/drivers/cpufreq/cpufreq_ondemand.c | 178 +- xen/drivers/cpufreq/utility.c | 193 +- xen/drivers/passthrough/amd/iommu_acpi.c | 348 +- xen/drivers/passthrough/amd/iommu_cmd.c | 131 +- xen/drivers/passthrough/amd/iommu_detect.c | 56 +- xen/drivers/passthrough/amd/iommu_guest.c | 207 +- xen/drivers/passthrough/amd/iommu_init.c | 280 +- xen/drivers/passthrough/amd/iommu_intr.c | 169 +- xen/drivers/passthrough/amd/iommu_map.c | 179 +- xen/drivers/passthrough/amd/pci_amd_iommu.c | 80 +- xen/drivers/passthrough/arm/io-pgtable-arm.c | 1689 ++--- xen/drivers/passthrough/arm/io-pgtable.c | 56 +- xen/drivers/passthrough/arm/iommu.c | 7 +- xen/drivers/passthrough/arm/ipmmu-vmsa-plat.c | 440 +- xen/drivers/passthrough/arm/ipmmu-vmsa.c | 3396 +++++----- xen/drivers/passthrough/arm/smmu.c | 3391 +++++----- xen/drivers/passthrough/device_tree.c | 15 +- xen/drivers/passthrough/io.c | 125 +- xen/drivers/passthrough/iommu.c | 77 +- xen/drivers/passthrough/pci.c | 456 +- xen/drivers/passthrough/vtd/dmar.c | 205 +- xen/drivers/passthrough/vtd/intremap.c | 248 +- xen/drivers/passthrough/vtd/iommu.c | 494 +- xen/drivers/passthrough/vtd/qinval.c | 49 +- xen/drivers/passthrough/vtd/quirks.c | 170 +- xen/drivers/passthrough/vtd/utils.c | 104 +- xen/drivers/passthrough/vtd/x86/ats.c | 18 +- xen/drivers/passthrough/vtd/x86/hvm.c | 2 +- xen/drivers/passthrough/vtd/x86/vtd.c | 2 +- xen/drivers/passthrough/x86/ats.c | 27 +- xen/drivers/passthrough/x86/iommu.c | 25 +- xen/drivers/pci/pci.c | 12 +- xen/drivers/video/font_8x14.c | 9 +- xen/drivers/video/font_8x16.c | 9 +- xen/drivers/video/font_8x8.c | 11 +- xen/drivers/video/lfb.c | 49 +- xen/drivers/video/vesa.c | 48 +- xen/drivers/video/vga.c | 47 +- xen/drivers/vpci/header.c | 57 +- xen/drivers/vpci/msi.c | 36 +- xen/drivers/vpci/msix.c | 57 +- xen/drivers/vpci/vpci.c | 58 +- xen/lib/x86/cpuid.c | 68 +- xen/lib/x86/msr.c | 24 +- xen/test/livepatch/xen_hello_world.c | 17 +- xen/test/livepatch/xen_hello_world_func.c | 3 +- xen/test/livepatch/xen_nop.c | 4 +- xen/test/livepatch/xen_replace_world.c | 15 +- xen/tools/kconfig/conf.c | 1356 ++-- xen/tools/kconfig/confdata.c | 2236 ++++--- xen/tools/kconfig/expr.c | 2199 ++++--- xen/tools/kconfig/gconf.c | 2372 ++++--- xen/tools/kconfig/images.c | 491 +- xen/tools/kconfig/kxgettext.c | 317 +- xen/tools/kconfig/lxdialog/checklist.c | 574 +- xen/tools/kconfig/lxdialog/inputbox.c | 518 +- xen/tools/kconfig/lxdialog/menubox.c | 708 +- xen/tools/kconfig/lxdialog/textbox.c | 647 +- xen/tools/kconfig/lxdialog/util.c | 943 +-- xen/tools/kconfig/lxdialog/yesno.c | 153 +- xen/tools/kconfig/mconf.c | 1933 +++--- xen/tools/kconfig/menu.c | 1128 ++-- xen/tools/kconfig/nconf.c | 2632 ++++---- xen/tools/kconfig/nconf.gui.c | 1127 ++-- xen/tools/kconfig/symbol.c | 2339 +++---- xen/tools/kconfig/util.c | 193 +- xen/tools/symbols.c | 994 +-- xen/xsm/dummy.c | 12 +- xen/xsm/flask/avc.c | 150 +- xen/xsm/flask/flask_op.c | 97 +- xen/xsm/flask/hooks.c | 249 +- xen/xsm/flask/ss/avtab.c | 152 +- xen/xsm/flask/ss/conditional.c | 124 +- xen/xsm/flask/ss/ebitmap.c | 15 +- xen/xsm/flask/ss/hashtab.c | 14 +- xen/xsm/flask/ss/mls.c | 117 +- xen/xsm/flask/ss/policydb.c | 441 +- xen/xsm/flask/ss/services.c | 628 +- xen/xsm/flask/ss/sidtab.c | 25 +- xen/xsm/flask/ss/symtab.c | 4 +- xen/xsm/xsm_core.c | 19 +- xen/xsm/xsm_policy.c | 22 +- 510 files changed, 69215 insertions(+), 67290 deletions(-) diff --git a/xen/arch/arm/acpi/boot.c b/xen/arch/arm/acpi/boot.c index 9b29769a10..474cf8450f 100644 --- a/xen/arch/arm/acpi/boot.c +++ b/xen/arch/arm/acpi/boot.c @@ -64,13 +64,14 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) total_cpus++; if ( !enabled ) { - printk("Skipping disabled CPU entry with 0x%"PRIx64" MPIDR\n", mpidr); + printk("Skipping disabled CPU entry with 0x%" PRIx64 " MPIDR\n", mpidr); return; } - if ( enabled_cpus >= NR_CPUS ) + if ( enabled_cpus >= NR_CPUS ) { - printk("NR_CPUS limit of %d reached, Processor %d/0x%"PRIx64" ignored.\n", + printk("NR_CPUS limit of %d reached, Processor %d/0x%" PRIx64 + " ignored.\n", NR_CPUS, total_cpus, mpidr); return; } @@ -80,7 +81,8 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) { if ( bootcpu_valid ) { - printk("Firmware bug, duplicate boot CPU MPIDR: 0x%"PRIx64" in MADT\n", + printk("Firmware bug, duplicate boot CPU MPIDR: 0x%" PRIx64 + " in MADT\n", mpidr); return; } @@ -97,7 +99,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) { if ( cpu_logical_map(i) == mpidr ) { - printk("Firmware bug, duplicate CPU MPIDR: 0x%"PRIx64" in MADT\n", + printk("Firmware bug, duplicate CPU MPIDR: 0x%" PRIx64 " in MADT\n", mpidr); return; } @@ -105,15 +107,14 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) if ( !acpi_psci_present() ) { - printk("PSCI not present, skipping CPU MPIDR 0x%"PRIx64"\n", - mpidr); + printk("PSCI not present, skipping CPU MPIDR 0x%" PRIx64 "\n", mpidr); return; } if ( (rc = arch_cpu_init(enabled_cpus, NULL)) < 0 ) { - printk("cpu%d: init failed (0x%"PRIx64" MPIDR): %d\n", - enabled_cpus, mpidr, rc); + printk("cpu%d: init failed (0x%" PRIx64 " MPIDR): %d\n", enabled_cpus, + mpidr, rc); return; } @@ -123,12 +124,11 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) enabled_cpus++; } -static int __init -acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, - const unsigned long end) +static int __init acpi_parse_gic_cpu_interface( + struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_generic_interrupt *processor = - container_of(header, struct acpi_madt_generic_interrupt, header); + container_of(header, struct acpi_madt_generic_interrupt, header); if ( BAD_MADT_ENTRY(processor, end) ) return -EINVAL; @@ -149,7 +149,7 @@ void __init acpi_smp_init_cpus(void) * we need for SMP init */ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, - acpi_parse_gic_cpu_interface, 0); + acpi_parse_gic_cpu_interface, 0); if ( count <= 0 ) { @@ -180,12 +180,13 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table) * we only deal with ACPI 6.0 or newer revision to get GIC and SMP * boot protocol configuration data, or we will disable ACPI. */ - if ( table->revision > 6 - || (table->revision == 6 && fadt->minor_revision >= 0) ) + if ( table->revision > 6 || + (table->revision == 6 && fadt->minor_revision >= 0) ) return 0; - printk("Unsupported FADT revision %d.%d, should be 6.0+, will disable ACPI\n", - table->revision, fadt->minor_revision); + printk( + "Unsupported FADT revision %d.%d, should be 6.0+, will disable ACPI\n", + table->revision, fadt->minor_revision); return -EINVAL; } @@ -219,7 +220,7 @@ static int __init dt_scan_depth1_nodes(const void *fdt, int node, * Return 1 as soon as we encounter a node at depth 1 that is * not the /chosen node. */ - if (depth == 1 && (strcmp(uname, "chosen") != 0)) + if ( depth == 1 && (strcmp(uname, "chosen") != 0) ) return 1; return 0; } @@ -246,9 +247,10 @@ int __init acpi_boot_table_init(void) * - the device tree is not empty (it has more than just a /chosen node) * and ACPI has not been force enabled (acpi=force) */ - if ( param_acpi_off || ( !param_acpi_force - && device_tree_for_each_node(device_tree_flattened, - dt_scan_depth1_nodes, NULL))) + if ( param_acpi_off || + (!param_acpi_force && + device_tree_for_each_node(device_tree_flattened, dt_scan_depth1_nodes, + NULL)) ) goto disable; /* @@ -261,8 +263,8 @@ int __init acpi_boot_table_init(void) error = acpi_table_init(); if ( error ) { - printk("%s: Unable to initialize table parser (%d)\n", - __FUNCTION__, error); + printk("%s: Unable to initialize table parser (%d)\n", __FUNCTION__, + error); goto disable; } diff --git a/xen/arch/arm/acpi/domain_build.c b/xen/arch/arm/acpi/domain_build.c index 5aae32ac20..2b9e0c0560 100644 --- a/xen/arch/arm/acpi/domain_build.c +++ b/xen/arch/arm/acpi/domain_build.c @@ -39,8 +39,8 @@ static int __init acpi_iomem_deny_access(struct domain *d) return rc; /* TODO: Deny MMIO access for SMMU, GIC ITS */ - status = acpi_get_table(ACPI_SIG_SPCR, 0, - (struct acpi_table_header **)&spcr); + status = + acpi_get_table(ACPI_SIG_SPCR, 0, (struct acpi_table_header **)&spcr); if ( ACPI_FAILURE(status) ) { @@ -67,14 +67,14 @@ static int __init acpi_route_spis(struct domain *d) * Route the IRQ to hardware domain and permit the access. * The interrupt type will be set by set by the hardware domain. */ - for( i = NR_LOCAL_IRQS; i < vgic_num_irqs(d); i++ ) + for ( i = NR_LOCAL_IRQS; i < vgic_num_irqs(d); i++ ) { /* * TODO: Exclude the SPIs SMMU uses which should not be routed to * the hardware domain. */ desc = irq_to_desc(i); - if ( desc->action != NULL) + if ( desc->action != NULL ) continue; /* XXX: Shall we use a proper devname? */ @@ -89,9 +89,9 @@ static int __init acpi_route_spis(struct domain *d) static int __init acpi_make_hypervisor_node(const struct kernel_info *kinfo, struct membank tbl_add[]) { - const char compat[] = - "xen,xen-"__stringify(XEN_VERSION)"."__stringify(XEN_SUBVERSION)"\0" - "xen,xen"; + const char compat[] = "xen,xen-" __stringify(XEN_VERSION) "." __stringify( + XEN_SUBVERSION) "\0" + "xen,xen"; int res; /* Convenience alias */ void *fdt = kinfo->fdt; @@ -176,7 +176,7 @@ static int __init create_acpi_dtb(struct kernel_info *kinfo, return 0; - err: +err: printk("Device tree generation failed (%d).\n", ret); xfree(kinfo->fdt); return -EINVAL; @@ -189,27 +189,23 @@ static void __init acpi_map_other_tables(struct domain *d) u64 addr, size; /* Map all ACPI tables to Dom0 using 1:1 mappings. */ - for( i = 0; i < acpi_gbl_root_table_list.count; i++ ) + for ( i = 0; i < acpi_gbl_root_table_list.count; i++ ) { addr = acpi_gbl_root_table_list.tables[i].address; size = acpi_gbl_root_table_list.tables[i].length; - res = map_regions_p2mt(d, - gaddr_to_gfn(addr), - PFN_UP(size), - maddr_to_mfn(addr), - p2m_mmio_direct_c); + res = map_regions_p2mt(d, gaddr_to_gfn(addr), PFN_UP(size), + maddr_to_mfn(addr), p2m_mmio_direct_c); if ( res ) { - panic(XENLOG_ERR "Unable to map ACPI region 0x%"PRIx64 - " - 0x%"PRIx64" in domain\n", - addr & PAGE_MASK, PAGE_ALIGN(addr + size) - 1); + panic(XENLOG_ERR "Unable to map ACPI region 0x%" PRIx64 + " - 0x%" PRIx64 " in domain\n", + addr & PAGE_MASK, PAGE_ALIGN(addr + size) - 1); } } } static int __init acpi_create_rsdp(struct domain *d, struct membank tbl_add[]) { - struct acpi_table_rsdp *rsdp = NULL; u64 addr; u64 table_size = sizeof(struct acpi_table_rsdp); @@ -217,14 +213,14 @@ static int __init acpi_create_rsdp(struct domain *d, struct membank tbl_add[]) u8 checksum; addr = acpi_os_get_root_pointer(); - if ( !addr ) + if ( !addr ) { printk("Unable to get acpi root pointer\n"); return -EINVAL; } rsdp = acpi_os_map_memory(addr, table_size); - base_ptr = d->arch.efi_acpi_table - + acpi_get_table_offset(tbl_add, TBL_RSDP); + base_ptr = + d->arch.efi_acpi_table + acpi_get_table_offset(tbl_add, TBL_RSDP); memcpy(base_ptr, rsdp, table_size); acpi_os_unmap_memory(rsdp, table_size); @@ -234,8 +230,8 @@ static int __init acpi_create_rsdp(struct domain *d, struct membank tbl_add[]) checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, rsdp), table_size); rsdp->checksum = rsdp->checksum - checksum; - tbl_add[TBL_RSDP].start = d->arch.efi_acpi_gpa - + acpi_get_table_offset(tbl_add, TBL_RSDP); + tbl_add[TBL_RSDP].start = + d->arch.efi_acpi_gpa + acpi_get_table_offset(tbl_add, TBL_RSDP); tbl_add[TBL_RSDP].size = table_size; return 0; @@ -249,7 +245,7 @@ static void __init acpi_xsdt_modify_entry(u64 entry[], struct acpi_table_header *table; u64 size = sizeof(struct acpi_table_header); - for( i = 0; i < entry_count; i++ ) + for ( i = 0; i < entry_count; i++ ) { table = acpi_os_map_memory(entry[i], size); if ( ACPI_COMPARE_NAME(table->signature, signature) ) @@ -284,27 +280,27 @@ static int __init acpi_create_xsdt(struct domain *d, struct membank tbl_add[]) /* Add place for STAO table in XSDT table */ table_size = table->length + sizeof(u64); - entry_count = (table->length - sizeof(struct acpi_table_header)) - / sizeof(u64); - base_ptr = d->arch.efi_acpi_table - + acpi_get_table_offset(tbl_add, TBL_XSDT); + entry_count = + (table->length - sizeof(struct acpi_table_header)) / sizeof(u64); + base_ptr = + d->arch.efi_acpi_table + acpi_get_table_offset(tbl_add, TBL_XSDT); memcpy(base_ptr, table, table->length); acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); acpi_os_unmap_memory(rsdp_tbl, sizeof(struct acpi_table_rsdp)); xsdt = (struct acpi_table_xsdt *)base_ptr; - acpi_xsdt_modify_entry(xsdt->table_offset_entry, entry_count, - ACPI_SIG_FADT, tbl_add[TBL_FADT].start); - acpi_xsdt_modify_entry(xsdt->table_offset_entry, entry_count, - ACPI_SIG_MADT, tbl_add[TBL_MADT].start); + acpi_xsdt_modify_entry(xsdt->table_offset_entry, entry_count, ACPI_SIG_FADT, + tbl_add[TBL_FADT].start); + acpi_xsdt_modify_entry(xsdt->table_offset_entry, entry_count, ACPI_SIG_MADT, + tbl_add[TBL_MADT].start); xsdt->table_offset_entry[entry_count] = tbl_add[TBL_STAO].start; xsdt->header.length = table_size; checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, xsdt), table_size); xsdt->header.checksum -= checksum; - tbl_add[TBL_XSDT].start = d->arch.efi_acpi_gpa - + acpi_get_table_offset(tbl_add, TBL_XSDT); + tbl_add[TBL_XSDT].start = + d->arch.efi_acpi_gpa + acpi_get_table_offset(tbl_add, TBL_XSDT); tbl_add[TBL_XSDT].size = table_size; return 0; @@ -381,7 +377,7 @@ static int __init acpi_create_madt(struct domain *d, struct membank tbl_add[]) } gicd = container_of(header, struct acpi_madt_generic_distributor, header); memcpy(base_ptr + table_size, gicd, - sizeof(struct acpi_madt_generic_distributor)); + sizeof(struct acpi_madt_generic_distributor)); table_size += sizeof(struct acpi_madt_generic_distributor); /* Add other subtables. */ @@ -424,8 +420,8 @@ static int __init acpi_create_fadt(struct domain *d, struct membank tbl_add[]) } table_size = table->length; - base_ptr = d->arch.efi_acpi_table - + acpi_get_table_offset(tbl_add, TBL_FADT); + base_ptr = + d->arch.efi_acpi_table + acpi_get_table_offset(tbl_add, TBL_FADT); memcpy(base_ptr, table, table_size); fadt = (struct acpi_table_fadt *)base_ptr; @@ -434,8 +430,8 @@ static int __init acpi_create_fadt(struct domain *d, struct membank tbl_add[]) checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, fadt), table_size); fadt->header.checksum -= checksum; - tbl_add[TBL_FADT].start = d->arch.efi_acpi_gpa - + acpi_get_table_offset(tbl_add, TBL_FADT); + tbl_add[TBL_FADT].start = + d->arch.efi_acpi_gpa + acpi_get_table_offset(tbl_add, TBL_FADT); tbl_add[TBL_FADT].size = table_size; return 0; @@ -485,8 +481,8 @@ static int __init estimate_acpi_efi_size(struct domain *d, acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); acpi_size += ROUNDUP(sizeof(struct acpi_table_rsdp), 8); - d->arch.efi_acpi_len = PAGE_ALIGN(ROUNDUP(efi_size, 8) - + ROUNDUP(acpi_size, 8)); + d->arch.efi_acpi_len = + PAGE_ALIGN(ROUNDUP(efi_size, 8) + ROUNDUP(acpi_size, 8)); return 0; } @@ -518,7 +514,8 @@ int __init prepare_acpi(struct domain *d, struct kernel_info *kinfo) d->arch.efi_acpi_gpa = kinfo->gnttab_start; if ( kinfo->gnttab_size < d->arch.efi_acpi_len ) { - printk("The grant table region is not enough to fit the ACPI tables!\n"); + printk( + "The grant table region is not enough to fit the ACPI tables!\n"); return -EINVAL; } @@ -547,15 +544,13 @@ int __init prepare_acpi(struct domain *d, struct kernel_info *kinfo) acpi_create_efi_mmap_table(d, &kinfo->mem, tbl_add); /* Map the EFI and ACPI tables to Dom0 */ - rc = map_regions_p2mt(d, - gaddr_to_gfn(d->arch.efi_acpi_gpa), - PFN_UP(d->arch.efi_acpi_len), - virt_to_mfn(d->arch.efi_acpi_table), - p2m_mmio_direct_c); + rc = map_regions_p2mt( + d, gaddr_to_gfn(d->arch.efi_acpi_gpa), PFN_UP(d->arch.efi_acpi_len), + virt_to_mfn(d->arch.efi_acpi_table), p2m_mmio_direct_c); if ( rc != 0 ) { - printk(XENLOG_ERR "Unable to map EFI/ACPI table 0x%"PRIx64 - " - 0x%"PRIx64" in domain %d\n", + printk(XENLOG_ERR "Unable to map EFI/ACPI table 0x%" PRIx64 + " - 0x%" PRIx64 " in domain %d\n", d->arch.efi_acpi_gpa & PAGE_MASK, PAGE_ALIGN(d->arch.efi_acpi_gpa + d->arch.efi_acpi_len) - 1, d->domain_id); diff --git a/xen/arch/arm/acpi/lib.c b/xen/arch/arm/acpi/lib.c index 4fc6e17322..a5f04fe02e 100644 --- a/xen/arch/arm/acpi/lib.c +++ b/xen/arch/arm/acpi/lib.c @@ -40,13 +40,13 @@ char *__acpi_map_table(paddr_t phys, unsigned long size) while ( mapped_size < size ) { if ( ++idx > FIXMAP_ACPI_END ) - return NULL; /* cannot handle this */ + return NULL; /* cannot handle this */ phys += PAGE_SIZE; set_fixmap(idx, maddr_to_mfn(phys), PAGE_HYPERVISOR); mapped_size += PAGE_SIZE; } - return ((char *) base + offset); + return ((char *)base + offset); } /* True to indicate PSCI 0.2+ is implemented */ diff --git a/xen/arch/arm/alternative.c b/xen/arch/arm/alternative.c index 52ed7edf69..a9c7d07dce 100644 --- a/xen/arch/arm/alternative.c +++ b/xen/arch/arm/alternative.c @@ -40,7 +40,8 @@ extern const struct alt_instr __alt_instructions[], __alt_instructions_end[]; -struct alt_region { +struct alt_region +{ const struct alt_instr *begin; const struct alt_instr *end; }; @@ -67,8 +68,8 @@ static bool branch_insn_requires_update(const struct alt_instr *alt, BUG(); } -static u32 get_alt_insn(const struct alt_instr *alt, - const u32 *insnptr, const u32 *altinsnptr) +static u32 get_alt_insn(const struct alt_instr *alt, const u32 *insnptr, + const u32 *altinsnptr) { u32 insn; @@ -97,8 +98,8 @@ static u32 get_alt_insn(const struct alt_instr *alt, } static void patch_alternative(const struct alt_instr *alt, - const uint32_t *origptr, - uint32_t *updptr, int nr_inst) + const uint32_t *origptr, uint32_t *updptr, + int nr_inst) { const uint32_t *replptr; unsigned int i; @@ -136,8 +137,7 @@ static int __apply_alternatives(const struct alt_region *region, int nr_inst; /* Use ARM_CB_PATCH as an unconditional patch */ - if ( alt->cpufeature < ARM_CB_PATCH && - !cpus_have_cap(alt->cpufeature) ) + if ( alt->cpufeature < ARM_CB_PATCH && !cpus_have_cap(alt->cpufeature) ) continue; if ( alt->cpufeature == ARM_CB_PATCH ) @@ -159,7 +159,7 @@ static int __apply_alternatives(const struct alt_region *region, /* Ensure the new instructions reached the memory and nuke */ clean_and_invalidate_dcache_va_range(origptr, - (sizeof (*origptr) * nr_inst)); + (sizeof(*origptr) * nr_inst)); } /* Nuke the instruction cache */ @@ -229,14 +229,15 @@ void __init apply_alternatives_all(void) ASSERT(system_state != SYS_STATE_active); - /* better not try code patching on a live SMP system */ + /* better not try code patching on a live SMP system */ ret = stop_machine_run(__apply_alternatives_multi_stop, NULL, NR_CPUS); /* stop_machine_run should never fail at this stage of the boot */ BUG_ON(ret); } -int apply_alternatives(const struct alt_instr *start, const struct alt_instr *end) +int apply_alternatives(const struct alt_instr *start, + const struct alt_instr *end) { const struct alt_region region = { .begin = start, diff --git a/xen/arch/arm/arm32/asm-offsets.c b/xen/arch/arm/arm32/asm-offsets.c index 2116ba5b95..3319bddac2 100644 --- a/xen/arch/arm/arm32/asm-offsets.c +++ b/xen/arch/arm/arm32/asm-offsets.c @@ -12,67 +12,65 @@ #include #include -#define DEFINE(_sym, _val) \ - asm volatile ("\n.ascii\"==>#define " #_sym " %0 /* " #_val " */<==\"" \ - : : "i" (_val) ) -#define BLANK() \ - asm volatile ( "\n.ascii\"==><==\"" : : ) -#define OFFSET(_sym, _str, _mem) \ - DEFINE(_sym, offsetof(_str, _mem)); +#define DEFINE(_sym, _val) \ + asm volatile("\n.ascii\"==>#define " #_sym " %0 /* " #_val " */<==\"" \ + : \ + : "i"(_val)) +#define BLANK() asm volatile("\n.ascii\"==><==\"" : :) +#define OFFSET(_sym, _str, _mem) DEFINE(_sym, offsetof(_str, _mem)); void __dummy__(void) { - OFFSET(UREGS_sp, struct cpu_user_regs, sp); - OFFSET(UREGS_lr, struct cpu_user_regs, lr); - OFFSET(UREGS_pc, struct cpu_user_regs, pc); - OFFSET(UREGS_cpsr, struct cpu_user_regs, cpsr); - OFFSET(UREGS_hsr, struct cpu_user_regs, hsr); + OFFSET(UREGS_sp, struct cpu_user_regs, sp); + OFFSET(UREGS_lr, struct cpu_user_regs, lr); + OFFSET(UREGS_pc, struct cpu_user_regs, pc); + OFFSET(UREGS_cpsr, struct cpu_user_regs, cpsr); + OFFSET(UREGS_hsr, struct cpu_user_regs, hsr); - OFFSET(UREGS_LR_usr, struct cpu_user_regs, lr_usr); - OFFSET(UREGS_SP_usr, struct cpu_user_regs, sp_usr); + OFFSET(UREGS_LR_usr, struct cpu_user_regs, lr_usr); + OFFSET(UREGS_SP_usr, struct cpu_user_regs, sp_usr); - OFFSET(UREGS_SP_svc, struct cpu_user_regs, sp_svc); - OFFSET(UREGS_LR_svc, struct cpu_user_regs, lr_svc); - OFFSET(UREGS_SPSR_svc, struct cpu_user_regs, spsr_svc); + OFFSET(UREGS_SP_svc, struct cpu_user_regs, sp_svc); + OFFSET(UREGS_LR_svc, struct cpu_user_regs, lr_svc); + OFFSET(UREGS_SPSR_svc, struct cpu_user_regs, spsr_svc); - OFFSET(UREGS_SP_abt, struct cpu_user_regs, sp_abt); - OFFSET(UREGS_LR_abt, struct cpu_user_regs, lr_abt); - OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt); + OFFSET(UREGS_SP_abt, struct cpu_user_regs, sp_abt); + OFFSET(UREGS_LR_abt, struct cpu_user_regs, lr_abt); + OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt); - OFFSET(UREGS_SP_und, struct cpu_user_regs, sp_und); - OFFSET(UREGS_LR_und, struct cpu_user_regs, lr_und); - OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und); + OFFSET(UREGS_SP_und, struct cpu_user_regs, sp_und); + OFFSET(UREGS_LR_und, struct cpu_user_regs, lr_und); + OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und); - OFFSET(UREGS_SP_irq, struct cpu_user_regs, sp_irq); - OFFSET(UREGS_LR_irq, struct cpu_user_regs, lr_irq); - OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq); + OFFSET(UREGS_SP_irq, struct cpu_user_regs, sp_irq); + OFFSET(UREGS_LR_irq, struct cpu_user_regs, lr_irq); + OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq); - OFFSET(UREGS_SP_fiq, struct cpu_user_regs, sp_fiq); - OFFSET(UREGS_LR_fiq, struct cpu_user_regs, lr_fiq); - OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq); + OFFSET(UREGS_SP_fiq, struct cpu_user_regs, sp_fiq); + OFFSET(UREGS_LR_fiq, struct cpu_user_regs, lr_fiq); + OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq); - OFFSET(UREGS_R8_fiq, struct cpu_user_regs, r8_fiq); - OFFSET(UREGS_R9_fiq, struct cpu_user_regs, r9_fiq); - OFFSET(UREGS_R10_fiq, struct cpu_user_regs, r10_fiq); - OFFSET(UREGS_R11_fiq, struct cpu_user_regs, r11_fiq); - OFFSET(UREGS_R12_fiq, struct cpu_user_regs, r12_fiq); + OFFSET(UREGS_R8_fiq, struct cpu_user_regs, r8_fiq); + OFFSET(UREGS_R9_fiq, struct cpu_user_regs, r9_fiq); + OFFSET(UREGS_R10_fiq, struct cpu_user_regs, r10_fiq); + OFFSET(UREGS_R11_fiq, struct cpu_user_regs, r11_fiq); + OFFSET(UREGS_R12_fiq, struct cpu_user_regs, r12_fiq); - OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, cpsr); - BLANK(); + OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, cpsr); + BLANK(); - DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); + DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); - OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context); + OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context); - BLANK(); - DEFINE(PROCINFO_sizeof, sizeof(struct proc_info_list)); - OFFSET(PROCINFO_cpu_val, struct proc_info_list, cpu_val); - OFFSET(PROCINFO_cpu_mask, struct proc_info_list, cpu_mask); - OFFSET(PROCINFO_cpu_init, struct proc_info_list, cpu_init); - - BLANK(); - OFFSET(INITINFO_stack, struct init_info, stack); + BLANK(); + DEFINE(PROCINFO_sizeof, sizeof(struct proc_info_list)); + OFFSET(PROCINFO_cpu_val, struct proc_info_list, cpu_val); + OFFSET(PROCINFO_cpu_mask, struct proc_info_list, cpu_mask); + OFFSET(PROCINFO_cpu_init, struct proc_info_list, cpu_init); + BLANK(); + OFFSET(INITINFO_stack, struct init_info, stack); } /* diff --git a/xen/arch/arm/arm32/domain.c b/xen/arch/arm/arm32/domain.c index 2ca1bf03c8..9e47df273d 100644 --- a/xen/arch/arm/arm32/domain.c +++ b/xen/arch/arm/arm32/domain.c @@ -7,27 +7,49 @@ /* C(hyp,user), hyp is Xen internal name, user is user API name. */ -#define ALLREGS \ - C(r0,r0_usr); C(r1,r1_usr); C(r2,r2_usr); C(r3,r3_usr); \ - C(r4,r4_usr); C(r5,r5_usr); C(r6,r6_usr); C(r7,r7_usr); \ - C(r8,r8_usr); C(r9,r9_usr); C(r10,r10_usr); C(r11,r11_usr); \ - C(r12,r12_usr); \ - C(sp_usr,sp_usr); \ - C(lr,lr_usr); \ - C(spsr_irq,spsr_irq); C(lr_irq,lr_irq); C(sp_irq,sp_irq); \ - C(spsr_svc,spsr_svc); C(lr_svc,lr_svc); C(sp_svc,sp_svc); \ - C(spsr_abt,spsr_abt); C(lr_abt,lr_abt); C(sp_abt,sp_abt); \ - C(spsr_und,spsr_und); C(lr_und,lr_und); C(sp_und,sp_und); \ - C(spsr_fiq,spsr_fiq); C(sp_fiq,sp_fiq); C(sp_fiq,sp_fiq); \ - C(r8_fiq,r8_fiq); C(r9_fiq,r9_fiq); \ - C(r10_fiq,r10_fiq); C(r11_fiq,r11_fiq); C(r12_fiq,r12_fiq); \ - C(pc,pc32); \ - C(cpsr,cpsr) +#define ALLREGS \ + C(r0, r0_usr); \ + C(r1, r1_usr); \ + C(r2, r2_usr); \ + C(r3, r3_usr); \ + C(r4, r4_usr); \ + C(r5, r5_usr); \ + C(r6, r6_usr); \ + C(r7, r7_usr); \ + C(r8, r8_usr); \ + C(r9, r9_usr); \ + C(r10, r10_usr); \ + C(r11, r11_usr); \ + C(r12, r12_usr); \ + C(sp_usr, sp_usr); \ + C(lr, lr_usr); \ + C(spsr_irq, spsr_irq); \ + C(lr_irq, lr_irq); \ + C(sp_irq, sp_irq); \ + C(spsr_svc, spsr_svc); \ + C(lr_svc, lr_svc); \ + C(sp_svc, sp_svc); \ + C(spsr_abt, spsr_abt); \ + C(lr_abt, lr_abt); \ + C(sp_abt, sp_abt); \ + C(spsr_und, spsr_und); \ + C(lr_und, lr_und); \ + C(sp_und, sp_und); \ + C(spsr_fiq, spsr_fiq); \ + C(sp_fiq, sp_fiq); \ + C(sp_fiq, sp_fiq); \ + C(r8_fiq, r8_fiq); \ + C(r9_fiq, r9_fiq); \ + C(r10_fiq, r10_fiq); \ + C(r11_fiq, r11_fiq); \ + C(r12_fiq, r12_fiq); \ + C(pc, pc32); \ + C(cpsr, cpsr) void vcpu_regs_hyp_to_user(const struct vcpu *vcpu, struct vcpu_guest_core_regs *regs) { -#define C(hyp,user) regs->user = vcpu->arch.cpu_info->guest_cpu_user_regs.hyp +#define C(hyp, user) regs->user = vcpu->arch.cpu_info->guest_cpu_user_regs.hyp ALLREGS; #undef C } @@ -35,7 +57,7 @@ void vcpu_regs_hyp_to_user(const struct vcpu *vcpu, void vcpu_regs_user_to_hyp(struct vcpu *vcpu, const struct vcpu_guest_core_regs *regs) { -#define C(hyp,user) vcpu->arch.cpu_info->guest_cpu_user_regs.hyp = regs->user +#define C(hyp, user) vcpu->arch.cpu_info->guest_cpu_user_regs.hyp = regs->user ALLREGS; #undef C } diff --git a/xen/arch/arm/arm32/domctl.c b/xen/arch/arm/arm32/domctl.c index fbf9d3bddc..b150fdb0e7 100644 --- a/xen/arch/arm/arm32/domctl.c +++ b/xen/arch/arm/arm32/domctl.c @@ -12,9 +12,9 @@ #include long subarch_do_domctl(struct xen_domctl *domctl, struct domain *d, - XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) { - switch ( domctl->cmd ) + switch (domctl->cmd) { case XEN_DOMCTL_set_address_size: return domctl->u.address_size.size == 32 ? 0 : -EINVAL; diff --git a/xen/arch/arm/arm32/insn.c b/xen/arch/arm/arm32/insn.c index 7a5dbc53ec..f6dbddd02f 100644 --- a/xen/arch/arm/arm32/insn.c +++ b/xen/arch/arm/arm32/insn.c @@ -1,27 +1,27 @@ /* - * Copyright (C) 2017 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ + * Copyright (C) 2017 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ #include #include #include #include /* Mask of branch instructions' immediate. */ -#define BRANCH_INSN_IMM_MASK GENMASK(23, 0) +#define BRANCH_INSN_IMM_MASK GENMASK(23, 0) /* Shift of branch instructions' immediate. */ -#define BRANCH_INSN_IMM_SHIFT 0 +#define BRANCH_INSN_IMM_SHIFT 0 static uint32_t branch_insn_encode_immediate(uint32_t insn, int32_t offset) { @@ -52,7 +52,7 @@ int32_t aarch32_get_branch_offset(uint32_t insn) uint32_t imm; /* Retrieve imm from branch instruction. */ - imm = ( insn >> BRANCH_INSN_IMM_SHIFT ) & BRANCH_INSN_IMM_MASK; + imm = (insn >> BRANCH_INSN_IMM_SHIFT) & BRANCH_INSN_IMM_MASK; /* * Check the imm signed bit. If the imm is a negative value, we @@ -73,8 +73,7 @@ uint32_t aarch32_set_branch_offset(uint32_t insn, int32_t offset) /* B/BL support [-32M, 32M) offset (see ARM DDI 0406C.c A4.3). */ if ( offset < -SZ_32M || offset >= SZ_32M ) { - printk(XENLOG_ERR - "%s: new branch offset out of range.\n", __func__); + printk(XENLOG_ERR "%s: new branch offset out of range.\n", __func__); return BUG_OPCODE; } diff --git a/xen/arch/arm/arm32/livepatch.c b/xen/arch/arm/arm32/livepatch.c index 41378a54ae..ab1eb09076 100644 --- a/xen/arch/arm/arm32/livepatch.c +++ b/xen/arch/arm/arm32/livepatch.c @@ -64,15 +64,15 @@ void arch_livepatch_apply(struct livepatch_func *func) *(new_ptr + i) = insn; /* - * When we upload the payload, it will go through the data cache - * (the region is cacheable). Until the data cache is cleaned, the data - * may not reach the memory. And in the case the data and instruction cache - * are separated, we may read invalid instruction from the memory because - * the data cache have not yet synced with the memory. Hence sync it. - */ + * When we upload the payload, it will go through the data cache + * (the region is cacheable). Until the data cache is cleaned, the data + * may not reach the memory. And in the case the data and instruction cache + * are separated, we may read invalid instruction from the memory because + * the data cache have not yet synced with the memory. Hence sync it. + */ if ( func->new_addr ) clean_and_invalidate_dcache_va_range(func->new_addr, func->new_size); - clean_and_invalidate_dcache_va_range(new_ptr, sizeof (*new_ptr) * len); + clean_and_invalidate_dcache_va_range(new_ptr, sizeof(*new_ptr) * len); } /* arch_livepatch_revert shared with ARM 32/ARM 64. */ @@ -81,8 +81,7 @@ int arch_livepatch_verify_elf(const struct livepatch_elf *elf) { const Elf_Ehdr *hdr = elf->hdr; - if ( hdr->e_machine != EM_ARM || - hdr->e_ident[EI_CLASS] != ELFCLASS32 ) + if ( hdr->e_machine != EM_ARM || hdr->e_ident[EI_CLASS] != ELFCLASS32 ) { dprintk(XENLOG_ERR, LIVEPATCH "%s: Unsupported ELF Machine type!\n", elf->name); @@ -107,7 +106,7 @@ bool arch_livepatch_symbol_deny(const struct livepatch_elf *elf, * them. If we do, abort. */ if ( sym->name && sym->name[0] == '$' && sym->name[1] == 't' ) - return ( !sym->name[2] || sym->name[2] == '.' ); + return (!sym->name[2] || sym->name[2] == '.'); return false; } @@ -116,7 +115,8 @@ static s32 get_addend(unsigned char type, void *dest) { s32 addend = 0; - switch ( type ) { + switch (type) + { case R_ARM_NONE: /* ignore */ break; @@ -131,7 +131,7 @@ static s32 get_addend(unsigned char type, void *dest) case R_ARM_MOVW_ABS_NC: case R_ARM_MOVT_ABS: - addend = (*(u32 *)dest & 0x00000FFF); + addend = (*(u32 *)dest & 0x00000FFF); addend |= (*(u32 *)dest & 0x000F0000) >> 4; /* Addend is to sign-extend ([19:16],[11:0]). */ addend = (s16)addend; @@ -150,8 +150,8 @@ static s32 get_addend(unsigned char type, void *dest) static int perform_rel(unsigned char type, void *dest, uint32_t val, s32 addend) { - - switch ( type ) { + switch (type) + { case R_ARM_NONE: /* ignore */ break; @@ -165,7 +165,7 @@ static int perform_rel(unsigned char type, void *dest, uint32_t val, s32 addend) break; case R_ARM_MOVW_ABS_NC: /* S + A */ - case R_ARM_MOVT_ABS: /* S + A */ + case R_ARM_MOVT_ABS: /* S + A */ /* Clear addend if needed . */ if ( addend ) *(u32 *)dest &= 0xFFF0F000; @@ -213,7 +213,7 @@ static int perform_rel(unsigned char type, void *dest, uint32_t val, s32 addend) break; default: - return -EOPNOTSUPP; + return -EOPNOTSUPP; } return 0; @@ -221,8 +221,7 @@ static int perform_rel(unsigned char type, void *dest, uint32_t val, s32 addend) int arch_livepatch_perform(struct livepatch_elf *elf, const struct livepatch_elf_sec *base, - const struct livepatch_elf_sec *rela, - bool use_rela) + const struct livepatch_elf_sec *rela, bool use_rela) { unsigned int i; int rc = 0; @@ -262,7 +261,9 @@ int arch_livepatch_perform(struct livepatch_elf *elf, } else if ( symndx >= elf->nsym ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative symbol wants symbol@%u which is past end!\n", + dprintk(XENLOG_ERR, + LIVEPATCH + "%s: Relative symbol wants symbol@%u which is past end!\n", elf->name, symndx); return -EINVAL; } @@ -276,10 +277,11 @@ int arch_livepatch_perform(struct livepatch_elf *elf, val = elf->sym[symndx].sym->st_value; /* S */ rc = perform_rel(type, dest, val, addend); - switch ( rc ) + switch (rc) { case -EOVERFLOW: - dprintk(XENLOG_ERR, LIVEPATCH "%s: Overflow in relocation %u in %s for %s!\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Overflow in relocation %u in %s for %s!\n", elf->name, i, rela->name, base->name); break; diff --git a/xen/arch/arm/arm32/traps.c b/xen/arch/arm/arm32/traps.c index 76f714a168..3067f58f1d 100644 --- a/xen/arch/arm/arm32/traps.c +++ b/xen/arch/arm/arm32/traps.c @@ -38,7 +38,8 @@ void do_trap_undefined_instruction(struct cpu_user_regs *regs) (system_state >= SYS_STATE_active || !is_kernel_inittext(pc)) ) goto die; - /* PC should be always a multiple of 4, as Xen is using ARM instruction set */ + /* PC should be always a multiple of 4, as Xen is using ARM instruction set + */ if ( regs->pc & 0x3 ) goto die; diff --git a/xen/arch/arm/arm32/vfp.c b/xen/arch/arm/arm32/vfp.c index 0069acd297..9636f0362e 100644 --- a/xen/arch/arm/arm32/vfp.c +++ b/xen/arch/arm/arm32/vfp.c @@ -23,14 +23,16 @@ void vfp_save_state(struct vcpu *v) /* Save {d0-d15} */ asm volatile("stc p11, cr0, [%1], #32*4" - : "=Q" (*v->arch.vfp.fpregs1) : "r" (v->arch.vfp.fpregs1)); + : "=Q"(*v->arch.vfp.fpregs1) + : "r"(v->arch.vfp.fpregs1)); /* 32 x 64 bits registers? */ if ( (READ_CP32(MVFR0) & MVFR0_A_SIMD_MASK) == 2 ) { /* Save {d16-d31} */ asm volatile("stcl p11, cr0, [%1], #32*4" - : "=Q" (*v->arch.vfp.fpregs2) : "r" (v->arch.vfp.fpregs2)); + : "=Q"(*v->arch.vfp.fpregs2) + : "r"(v->arch.vfp.fpregs2)); } WRITE_CP32(v->arch.vfp.fpexc & ~(FPEXC_EN), FPEXC); @@ -38,18 +40,21 @@ void vfp_save_state(struct vcpu *v) void vfp_restore_state(struct vcpu *v) { - //uint64_t test[16]; + // uint64_t test[16]; WRITE_CP32(READ_CP32(FPEXC) | FPEXC_EN, FPEXC); /* Restore {d0-d15} */ asm volatile("ldc p11, cr0, [%1], #32*4" - : : "Q" (*v->arch.vfp.fpregs1), "r" (v->arch.vfp.fpregs1)); + : + : "Q"(*v->arch.vfp.fpregs1), "r"(v->arch.vfp.fpregs1)); /* 32 x 64 bits registers? */ - if ( (READ_CP32(MVFR0) & MVFR0_A_SIMD_MASK) == 2 ) /* 32 x 64 bits registers */ + if ( (READ_CP32(MVFR0) & MVFR0_A_SIMD_MASK) == + 2 ) /* 32 x 64 bits registers */ /* Restore {d16-d31} */ asm volatile("ldcl p11, cr0, [%1], #32*4" - : : "Q" (*v->arch.vfp.fpregs2), "r" (v->arch.vfp.fpregs2)); + : + : "Q"(*v->arch.vfp.fpregs2), "r"(v->arch.vfp.fpregs2)); if ( v->arch.vfp.fpexc & FPEXC_EX ) { diff --git a/xen/arch/arm/arm64/asm-offsets.c b/xen/arch/arm/arm64/asm-offsets.c index 280ddb55bf..878f87f744 100644 --- a/xen/arch/arm/arm64/asm-offsets.c +++ b/xen/arch/arm/arm64/asm-offsets.c @@ -12,50 +12,49 @@ #include #include -#define DEFINE(_sym, _val) \ - asm volatile ("\n.ascii\"==>#define " #_sym " %0 /* " #_val " */<==\"" \ - : : "i" (_val) ) -#define BLANK() \ - asm volatile ( "\n.ascii\"==><==\"" : : ) -#define OFFSET(_sym, _str, _mem) \ - DEFINE(_sym, offsetof(_str, _mem)); +#define DEFINE(_sym, _val) \ + asm volatile("\n.ascii\"==>#define " #_sym " %0 /* " #_val " */<==\"" \ + : \ + : "i"(_val)) +#define BLANK() asm volatile("\n.ascii\"==><==\"" : :) +#define OFFSET(_sym, _str, _mem) DEFINE(_sym, offsetof(_str, _mem)); void __dummy__(void) { - OFFSET(UREGS_X0, struct cpu_user_regs, x0); - OFFSET(UREGS_X1, struct cpu_user_regs, x1); - OFFSET(UREGS_LR, struct cpu_user_regs, lr); + OFFSET(UREGS_X0, struct cpu_user_regs, x0); + OFFSET(UREGS_X1, struct cpu_user_regs, x1); + OFFSET(UREGS_LR, struct cpu_user_regs, lr); - OFFSET(UREGS_SP, struct cpu_user_regs, sp); - OFFSET(UREGS_PC, struct cpu_user_regs, pc); - OFFSET(UREGS_CPSR, struct cpu_user_regs, cpsr); - OFFSET(UREGS_ESR_el2, struct cpu_user_regs, hsr); + OFFSET(UREGS_SP, struct cpu_user_regs, sp); + OFFSET(UREGS_PC, struct cpu_user_regs, pc); + OFFSET(UREGS_CPSR, struct cpu_user_regs, cpsr); + OFFSET(UREGS_ESR_el2, struct cpu_user_regs, hsr); - OFFSET(UREGS_SPSR_el1, struct cpu_user_regs, spsr_el1); + OFFSET(UREGS_SPSR_el1, struct cpu_user_regs, spsr_el1); - OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq); - OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq); - OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und); - OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt); + OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq); + OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq); + OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und); + OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt); - OFFSET(UREGS_SP_el0, struct cpu_user_regs, sp_el0); - OFFSET(UREGS_SP_el1, struct cpu_user_regs, sp_el1); - OFFSET(UREGS_ELR_el1, struct cpu_user_regs, elr_el1); + OFFSET(UREGS_SP_el0, struct cpu_user_regs, sp_el0); + OFFSET(UREGS_SP_el1, struct cpu_user_regs, sp_el1); + OFFSET(UREGS_ELR_el1, struct cpu_user_regs, elr_el1); - OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, spsr_el1); - BLANK(); + OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, spsr_el1); + BLANK(); - DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); - OFFSET(CPUINFO_flags, struct cpu_info, flags); + DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); + OFFSET(CPUINFO_flags, struct cpu_info, flags); - OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context); + OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context); - BLANK(); - OFFSET(INITINFO_stack, struct init_info, stack); + BLANK(); + OFFSET(INITINFO_stack, struct init_info, stack); - BLANK(); - OFFSET(SMCCC_RES_a0, struct arm_smccc_res, a0); - OFFSET(SMCCC_RES_a2, struct arm_smccc_res, a2); + BLANK(); + OFFSET(SMCCC_RES_a0, struct arm_smccc_res, a0); + OFFSET(SMCCC_RES_a2, struct arm_smccc_res, a2); } /* diff --git a/xen/arch/arm/arm64/domain.c b/xen/arch/arm/arm64/domain.c index dd19098929..a10d0a3604 100644 --- a/xen/arch/arm/arm64/domain.c +++ b/xen/arch/arm/arm64/domain.c @@ -7,26 +7,57 @@ /* C(hyp,user), hyp is Xen internal name, user is user API name. */ -#define ALLREGS \ - C(x0,x0); C(x1,x1); C(x2,x2); C(x3,x3); \ - C(x4,x4); C(x5,x5); C(x6,x6); C(x7,x7); \ - C(x8,x8); C(x9,x9); C(x10,x10); C(x11,x11); \ - C(x12,x12); C(x13,x13); C(x14,x14); C(x15,x15); \ - C(x16,x16); C(x17,x17); C(x18,x18); C(x19,x19); \ - C(x20,x20); C(x21,x21); C(x22,x22); C(x23,x23); \ - C(x24,x24); C(x25,x25); C(x26,x26); C(x27,x27); \ - C(x28,x28); C(fp,x29); C(lr,x30); C(pc,pc64); \ - C(cpsr, cpsr); C(spsr_el1, spsr_el1) +#define ALLREGS \ + C(x0, x0); \ + C(x1, x1); \ + C(x2, x2); \ + C(x3, x3); \ + C(x4, x4); \ + C(x5, x5); \ + C(x6, x6); \ + C(x7, x7); \ + C(x8, x8); \ + C(x9, x9); \ + C(x10, x10); \ + C(x11, x11); \ + C(x12, x12); \ + C(x13, x13); \ + C(x14, x14); \ + C(x15, x15); \ + C(x16, x16); \ + C(x17, x17); \ + C(x18, x18); \ + C(x19, x19); \ + C(x20, x20); \ + C(x21, x21); \ + C(x22, x22); \ + C(x23, x23); \ + C(x24, x24); \ + C(x25, x25); \ + C(x26, x26); \ + C(x27, x27); \ + C(x28, x28); \ + C(fp, x29); \ + C(lr, x30); \ + C(pc, pc64); \ + C(cpsr, cpsr); \ + C(spsr_el1, spsr_el1) -#define ALLREGS32 C(spsr_fiq, spsr_fiq); C(spsr_irq,spsr_irq); \ - C(spsr_und,spsr_und); C(spsr_abt,spsr_abt) +#define ALLREGS32 \ + C(spsr_fiq, spsr_fiq); \ + C(spsr_irq, spsr_irq); \ + C(spsr_und, spsr_und); \ + C(spsr_abt, spsr_abt) -#define ALLREGS64 C(sp_el0,sp_el0); C(sp_el1,sp_el1); C(elr_el1,elr_el1) +#define ALLREGS64 \ + C(sp_el0, sp_el0); \ + C(sp_el1, sp_el1); \ + C(elr_el1, elr_el1) void vcpu_regs_hyp_to_user(const struct vcpu *vcpu, struct vcpu_guest_core_regs *regs) { -#define C(hyp,user) regs->user = vcpu->arch.cpu_info->guest_cpu_user_regs.hyp +#define C(hyp, user) regs->user = vcpu->arch.cpu_info->guest_cpu_user_regs.hyp ALLREGS; if ( is_32bit_domain(vcpu->domain) ) { @@ -42,7 +73,7 @@ void vcpu_regs_hyp_to_user(const struct vcpu *vcpu, void vcpu_regs_user_to_hyp(struct vcpu *vcpu, const struct vcpu_guest_core_regs *regs) { -#define C(hyp,user) vcpu->arch.cpu_info->guest_cpu_user_regs.hyp = regs->user +#define C(hyp, user) vcpu->arch.cpu_info->guest_cpu_user_regs.hyp = regs->user ALLREGS; if ( is_32bit_domain(vcpu->domain) ) { diff --git a/xen/arch/arm/arm64/domctl.c b/xen/arch/arm/arm64/domctl.c index ab8781fb91..2257213594 100644 --- a/xen/arch/arm/arm64/domctl.c +++ b/xen/arch/arm/arm64/domctl.c @@ -26,7 +26,7 @@ static long switch_mode(struct domain *d, enum domain_type type) d->arch.type = type; if ( is_64bit_domain(d) ) - for_each_vcpu(d, v) + for_each_vcpu (d, v) vcpu_switch_to_aarch64_mode(v); return 0; @@ -35,10 +35,10 @@ static long switch_mode(struct domain *d, enum domain_type type) long subarch_do_domctl(struct xen_domctl *domctl, struct domain *d, XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) { - switch ( domctl->cmd ) + switch (domctl->cmd) { case XEN_DOMCTL_set_address_size: - switch ( domctl->u.address_size.size ) + switch (domctl->u.address_size.size) { case 32: if ( !cpu_has_el1_32 ) diff --git a/xen/arch/arm/arm64/insn.c b/xen/arch/arm/arm64/insn.c index 73c18215a5..b6393167c4 100644 --- a/xen/arch/arm/arm64/insn.c +++ b/xen/arch/arm/arm64/insn.c @@ -27,194 +27,204 @@ #include #define __kprobes -#define pr_err(fmt, ...) printk(XENLOG_ERR fmt, ## __VA_ARGS__) +#define pr_err(fmt, ...) printk(XENLOG_ERR fmt, ##__VA_ARGS__) bool aarch64_insn_is_branch_imm(u32 insn) { - return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) || - aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) || - aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || - aarch64_insn_is_bcond(insn)); + return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) || + aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) || + aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || + aarch64_insn_is_bcond(insn)); } static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type, - u32 *maskp, int *shiftp) + u32 *maskp, int *shiftp) { - u32 mask; - int shift; - - switch (type) { - case AARCH64_INSN_IMM_26: - mask = BIT(26) - 1; - shift = 0; - break; - case AARCH64_INSN_IMM_19: - mask = BIT(19) - 1; - shift = 5; - break; - case AARCH64_INSN_IMM_16: - mask = BIT(16) - 1; - shift = 5; - break; - case AARCH64_INSN_IMM_14: - mask = BIT(14) - 1; - shift = 5; - break; - case AARCH64_INSN_IMM_12: - mask = BIT(12) - 1; - shift = 10; - break; - case AARCH64_INSN_IMM_9: - mask = BIT(9) - 1; - shift = 12; - break; - case AARCH64_INSN_IMM_7: - mask = BIT(7) - 1; - shift = 15; - break; - case AARCH64_INSN_IMM_6: - case AARCH64_INSN_IMM_S: - mask = BIT(6) - 1; - shift = 10; - break; - case AARCH64_INSN_IMM_R: - mask = BIT(6) - 1; - shift = 16; - break; - default: - return -EINVAL; - } - - *maskp = mask; - *shiftp = shift; - - return 0; + u32 mask; + int shift; + + switch (type) + { + case AARCH64_INSN_IMM_26: + mask = BIT(26) - 1; + shift = 0; + break; + case AARCH64_INSN_IMM_19: + mask = BIT(19) - 1; + shift = 5; + break; + case AARCH64_INSN_IMM_16: + mask = BIT(16) - 1; + shift = 5; + break; + case AARCH64_INSN_IMM_14: + mask = BIT(14) - 1; + shift = 5; + break; + case AARCH64_INSN_IMM_12: + mask = BIT(12) - 1; + shift = 10; + break; + case AARCH64_INSN_IMM_9: + mask = BIT(9) - 1; + shift = 12; + break; + case AARCH64_INSN_IMM_7: + mask = BIT(7) - 1; + shift = 15; + break; + case AARCH64_INSN_IMM_6: + case AARCH64_INSN_IMM_S: + mask = BIT(6) - 1; + shift = 10; + break; + case AARCH64_INSN_IMM_R: + mask = BIT(6) - 1; + shift = 16; + break; + default: + return -EINVAL; + } + + *maskp = mask; + *shiftp = shift; + + return 0; } -#define ADR_IMM_HILOSPLIT 2 -#define ADR_IMM_SIZE SZ_2M -#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1) -#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1) -#define ADR_IMM_LOSHIFT 29 -#define ADR_IMM_HISHIFT 5 +#define ADR_IMM_HILOSPLIT 2 +#define ADR_IMM_SIZE SZ_2M +#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1) +#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1) +#define ADR_IMM_LOSHIFT 29 +#define ADR_IMM_HISHIFT 5 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn) { - u32 immlo, immhi, mask; - int shift; - - switch (type) { - case AARCH64_INSN_IMM_ADR: - shift = 0; - immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK; - immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK; - insn = (immhi << ADR_IMM_HILOSPLIT) | immlo; - mask = ADR_IMM_SIZE - 1; - break; - default: - if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { - pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n", - type); - return 0; - } - } - - return (insn >> shift) & mask; + u32 immlo, immhi, mask; + int shift; + + switch (type) + { + case AARCH64_INSN_IMM_ADR: + shift = 0; + immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK; + immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK; + insn = (immhi << ADR_IMM_HILOSPLIT) | immlo; + mask = ADR_IMM_SIZE - 1; + break; + default: + if ( aarch64_get_imm_shift_mask(type, &mask, &shift) < 0 ) + { + pr_err("aarch64_insn_decode_immediate: unknown immediate encoding " + "%d\n", + type); + return 0; + } + } + + return (insn >> shift) & mask; } u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, - u32 insn, u64 imm) + u32 insn, u64 imm) { - u32 immlo, immhi, mask; - int shift; - - if (insn == AARCH64_BREAK_FAULT) - return AARCH64_BREAK_FAULT; - - switch (type) { - case AARCH64_INSN_IMM_ADR: - shift = 0; - immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT; - imm >>= ADR_IMM_HILOSPLIT; - immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT; - imm = immlo | immhi; - mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) | - (ADR_IMM_HIMASK << ADR_IMM_HISHIFT)); - break; - default: - if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { - pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", - type); - return AARCH64_BREAK_FAULT; - } - } - - /* Update the immediate field. */ - insn &= ~(mask << shift); - insn |= (imm & mask) << shift; - - return insn; + u32 immlo, immhi, mask; + int shift; + + if ( insn == AARCH64_BREAK_FAULT ) + return AARCH64_BREAK_FAULT; + + switch (type) + { + case AARCH64_INSN_IMM_ADR: + shift = 0; + immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT; + imm >>= ADR_IMM_HILOSPLIT; + immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT; + imm = immlo | immhi; + mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) | + (ADR_IMM_HIMASK << ADR_IMM_HISHIFT)); + break; + default: + if ( aarch64_get_imm_shift_mask(type, &mask, &shift) < 0 ) + { + pr_err("aarch64_insn_encode_immediate: unknown immediate encoding " + "%d\n", + type); + return AARCH64_BREAK_FAULT; + } + } + + /* Update the immediate field. */ + insn &= ~(mask << shift); + insn |= (imm & mask) << shift; + + return insn; } static inline long branch_imm_common(unsigned long pc, unsigned long addr, - long range) + long range) { - long offset; + long offset; - if ((pc & 0x3) || (addr & 0x3)) { - pr_err("%s: A64 instructions must be word aligned\n", __func__); - return range; - } + if ( (pc & 0x3) || (addr & 0x3) ) + { + pr_err("%s: A64 instructions must be word aligned\n", __func__); + return range; + } - offset = ((long)addr - (long)pc); + offset = ((long)addr - (long)pc); - if (offset < -range || offset >= range) { - pr_err("%s: offset out of range\n", __func__); - return range; - } + if ( offset < -range || offset >= range ) + { + pr_err("%s: offset out of range\n", __func__); + return range; + } - return offset; + return offset; } u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, - enum aarch64_insn_branch_type type) + enum aarch64_insn_branch_type type) { - u32 insn; - long offset; - - /* - * B/BL support [-128M, 128M) offset - * ARM64 virtual address arrangement guarantees all kernel and module - * texts are within +/-128M. - */ - offset = branch_imm_common(pc, addr, SZ_128M); - if (offset >= SZ_128M) - return AARCH64_BREAK_FAULT; - - switch (type) { - case AARCH64_INSN_BRANCH_LINK: - insn = aarch64_insn_get_bl_value(); - break; - case AARCH64_INSN_BRANCH_NOLINK: - insn = aarch64_insn_get_b_value(); - break; - default: - pr_err("%s: unknown branch encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, - offset >> 2); + u32 insn; + long offset; + + /* + * B/BL support [-128M, 128M) offset + * ARM64 virtual address arrangement guarantees all kernel and module + * texts are within +/-128M. + */ + offset = branch_imm_common(pc, addr, SZ_128M); + if ( offset >= SZ_128M ) + return AARCH64_BREAK_FAULT; + + switch (type) + { + case AARCH64_INSN_BRANCH_LINK: + insn = aarch64_insn_get_bl_value(); + break; + case AARCH64_INSN_BRANCH_NOLINK: + insn = aarch64_insn_get_b_value(); + break; + default: + pr_err("%s: unknown branch encoding %d\n", __func__, type); + return AARCH64_BREAK_FAULT; + } + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, + offset >> 2); } u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op) { - return aarch64_insn_get_hint_value() | op; + return aarch64_insn_get_hint_value() | op; } u32 __kprobes aarch64_insn_gen_nop(void) { - return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP); + return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP); } /* @@ -224,26 +234,29 @@ u32 __kprobes aarch64_insn_gen_nop(void) */ s32 aarch64_get_branch_offset(u32 insn) { - s32 imm; - - if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) { - imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn); - return (imm << 6) >> 4; - } - - if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || - aarch64_insn_is_bcond(insn)) { - imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn); - return (imm << 13) >> 11; - } - - if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) { - imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn); - return (imm << 18) >> 16; - } - - /* Unhandled instruction */ - BUG(); + s32 imm; + + if ( aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ) + { + imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn); + return (imm << 6) >> 4; + } + + if ( aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || + aarch64_insn_is_bcond(insn) ) + { + imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn); + return (imm << 13) >> 11; + } + + if ( aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ) + { + imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn); + return (imm << 18) >> 16; + } + + /* Unhandled instruction */ + BUG(); } /* @@ -252,21 +265,21 @@ s32 aarch64_get_branch_offset(u32 insn) */ u32 aarch64_set_branch_offset(u32 insn, s32 offset) { - if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, - offset >> 2); + if ( aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ) + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, + offset >> 2); - if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || - aarch64_insn_is_bcond(insn)) - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, - offset >> 2); + if ( aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || + aarch64_insn_is_bcond(insn) ) + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, + offset >> 2); - if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn, - offset >> 2); + if ( aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ) + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn, + offset >> 2); - /* Unhandled instruction */ - BUG(); + /* Unhandled instruction */ + BUG(); } /* diff --git a/xen/arch/arm/arm64/lib/find_next_bit.c b/xen/arch/arm/arm64/lib/find_next_bit.c index 17cb176266..c14673b8f5 100644 --- a/xen/arch/arm/arm64/lib/find_next_bit.c +++ b/xen/arch/arm/arm64/lib/find_next_bit.c @@ -12,49 +12,51 @@ #include #include -#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) #ifndef find_next_bit /* * Find the next set bit in a memory region. */ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) + unsigned long offset) { - const unsigned long *p = addr + BITOP_WORD(offset); - unsigned long result = offset & ~(BITS_PER_LONG-1); - unsigned long tmp; + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; - if (offset >= size) - return size; - size -= result; - offset %= BITS_PER_LONG; - if (offset) { - tmp = *(p++); - tmp &= (~0UL << offset); - if (size < BITS_PER_LONG) - goto found_first; - if (tmp) - goto found_middle; - size -= BITS_PER_LONG; - result += BITS_PER_LONG; - } - while (size & ~(BITS_PER_LONG-1)) { - if ((tmp = *(p++))) - goto found_middle; - result += BITS_PER_LONG; - size -= BITS_PER_LONG; - } - if (!size) - return result; - tmp = *p; + if ( offset >= size ) + return size; + size -= result; + offset %= BITS_PER_LONG; + if ( offset ) + { + tmp = *(p++); + tmp &= (~0UL << offset); + if ( size < BITS_PER_LONG ) + goto found_first; + if ( tmp ) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while ( size & ~(BITS_PER_LONG - 1) ) + { + if ( (tmp = *(p++)) ) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if ( !size ) + return result; + tmp = *p; found_first: - tmp &= (~0UL >> (BITS_PER_LONG - size)); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if ( tmp == 0UL ) /* Are any bits set? */ + return result + size; /* Nope. */ found_middle: - return result + __ffs(tmp); + return result + __ffs(tmp); } EXPORT_SYMBOL(find_next_bit); #endif @@ -65,42 +67,44 @@ EXPORT_SYMBOL(find_next_bit); * Linus' asm-alpha/bitops.h. */ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) + unsigned long offset) { - const unsigned long *p = addr + BITOP_WORD(offset); - unsigned long result = offset & ~(BITS_PER_LONG-1); - unsigned long tmp; + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; - if (offset >= size) - return size; - size -= result; - offset %= BITS_PER_LONG; - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (BITS_PER_LONG - offset); - if (size < BITS_PER_LONG) - goto found_first; - if (~tmp) - goto found_middle; - size -= BITS_PER_LONG; - result += BITS_PER_LONG; - } - while (size & ~(BITS_PER_LONG-1)) { - if (~(tmp = *(p++))) - goto found_middle; - result += BITS_PER_LONG; - size -= BITS_PER_LONG; - } - if (!size) - return result; - tmp = *p; + if ( offset >= size ) + return size; + size -= result; + offset %= BITS_PER_LONG; + if ( offset ) + { + tmp = *(p++); + tmp |= ~0UL >> (BITS_PER_LONG - offset); + if ( size < BITS_PER_LONG ) + goto found_first; + if ( ~tmp ) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while ( size & ~(BITS_PER_LONG - 1) ) + { + if ( ~(tmp = *(p++)) ) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if ( !size ) + return result; + tmp = *p; found_first: - tmp |= ~0UL << size; - if (tmp == ~0UL) /* Are any bits zero? */ - return result + size; /* Nope. */ + tmp |= ~0UL << size; + if ( tmp == ~0UL ) /* Are any bits zero? */ + return result + size; /* Nope. */ found_middle: - return result + ffz(tmp); + return result + ffz(tmp); } EXPORT_SYMBOL(find_next_zero_bit); #endif @@ -111,24 +115,25 @@ EXPORT_SYMBOL(find_next_zero_bit); */ unsigned long find_first_bit(const unsigned long *addr, unsigned long size) { - const unsigned long *p = addr; - unsigned long result = 0; - unsigned long tmp; + const unsigned long *p = addr; + unsigned long result = 0; + unsigned long tmp; - while (size & ~(BITS_PER_LONG-1)) { - if ((tmp = *(p++))) - goto found; - result += BITS_PER_LONG; - size -= BITS_PER_LONG; - } - if (!size) - return result; + while ( size & ~(BITS_PER_LONG - 1) ) + { + if ( (tmp = *(p++)) ) + goto found; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if ( !size ) + return result; - tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ + tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); + if ( tmp == 0UL ) /* Are any bits set? */ + return result + size; /* Nope. */ found: - return result + __ffs(tmp); + return result + __ffs(tmp); } EXPORT_SYMBOL(find_first_bit); #endif @@ -139,24 +144,25 @@ EXPORT_SYMBOL(find_first_bit); */ unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) { - const unsigned long *p = addr; - unsigned long result = 0; - unsigned long tmp; + const unsigned long *p = addr; + unsigned long result = 0; + unsigned long tmp; - while (size & ~(BITS_PER_LONG-1)) { - if (~(tmp = *(p++))) - goto found; - result += BITS_PER_LONG; - size -= BITS_PER_LONG; - } - if (!size) - return result; + while ( size & ~(BITS_PER_LONG - 1) ) + { + if ( ~(tmp = *(p++)) ) + goto found; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if ( !size ) + return result; - tmp = (*p) | (~0UL << size); - if (tmp == ~0UL) /* Are any bits zero? */ - return result + size; /* Nope. */ + tmp = (*p) | (~0UL << size); + if ( tmp == ~0UL ) /* Are any bits zero? */ + return result + size; /* Nope. */ found: - return result + ffz(tmp); + return result + ffz(tmp); } EXPORT_SYMBOL(find_first_zero_bit); #endif @@ -164,12 +170,12 @@ EXPORT_SYMBOL(find_first_zero_bit); #ifdef __BIG_ENDIAN /* include/linux/byteorder does not support "unsigned long" type */ -static inline unsigned long ext2_swabp(const unsigned long * x) +static inline unsigned long ext2_swabp(const unsigned long *x) { #if BITS_PER_LONG == 64 - return (unsigned long) __swab64p((u64 *) x); + return (unsigned long)__swab64p((u64 *)x); #elif BITS_PER_LONG == 32 - return (unsigned long) __swab32p((u32 *) x); + return (unsigned long)__swab32p((u32 *)x); #else #error BITS_PER_LONG not defined #endif @@ -179,103 +185,107 @@ static inline unsigned long ext2_swabp(const unsigned long * x) static inline unsigned long ext2_swab(const unsigned long y) { #if BITS_PER_LONG == 64 - return (unsigned long) __swab64((u64) y); + return (unsigned long)__swab64((u64)y); #elif BITS_PER_LONG == 32 - return (unsigned long) __swab32((u32) y); + return (unsigned long)__swab32((u32)y); #else #error BITS_PER_LONG not defined #endif } #ifndef find_next_zero_bit_le -unsigned long find_next_zero_bit_le(const void *addr, unsigned - long size, unsigned long offset) +unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, + unsigned long offset) { - const unsigned long *p = addr; - unsigned long result = offset & ~(BITS_PER_LONG - 1); - unsigned long tmp; + const unsigned long *p = addr; + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; - if (offset >= size) - return size; - p += BITOP_WORD(offset); - size -= result; - offset &= (BITS_PER_LONG - 1UL); - if (offset) { - tmp = ext2_swabp(p++); - tmp |= (~0UL >> (BITS_PER_LONG - offset)); - if (size < BITS_PER_LONG) - goto found_first; - if (~tmp) - goto found_middle; - size -= BITS_PER_LONG; - result += BITS_PER_LONG; - } + if ( offset >= size ) + return size; + p += BITOP_WORD(offset); + size -= result; + offset &= (BITS_PER_LONG - 1UL); + if ( offset ) + { + tmp = ext2_swabp(p++); + tmp |= (~0UL >> (BITS_PER_LONG - offset)); + if ( size < BITS_PER_LONG ) + goto found_first; + if ( ~tmp ) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } - while (size & ~(BITS_PER_LONG - 1)) { - if (~(tmp = *(p++))) - goto found_middle_swap; - result += BITS_PER_LONG; - size -= BITS_PER_LONG; - } - if (!size) - return result; - tmp = ext2_swabp(p); + while ( size & ~(BITS_PER_LONG - 1) ) + { + if ( ~(tmp = *(p++)) ) + goto found_middle_swap; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if ( !size ) + return result; + tmp = ext2_swabp(p); found_first: - tmp |= ~0UL << size; - if (tmp == ~0UL) /* Are any bits zero? */ - return result + size; /* Nope. Skip ffz */ + tmp |= ~0UL << size; + if ( tmp == ~0UL ) /* Are any bits zero? */ + return result + size; /* Nope. Skip ffz */ found_middle: - return result + ffz(tmp); + return result + ffz(tmp); found_middle_swap: - return result + ffz(ext2_swab(tmp)); + return result + ffz(ext2_swab(tmp)); } EXPORT_SYMBOL(find_next_zero_bit_le); #endif #ifndef find_next_bit_le -unsigned long find_next_bit_le(const void *addr, unsigned - long size, unsigned long offset) +unsigned long find_next_bit_le(const void *addr, unsigned long size, + unsigned long offset) { - const unsigned long *p = addr; - unsigned long result = offset & ~(BITS_PER_LONG - 1); - unsigned long tmp; + const unsigned long *p = addr; + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; - if (offset >= size) - return size; - p += BITOP_WORD(offset); - size -= result; - offset &= (BITS_PER_LONG - 1UL); - if (offset) { - tmp = ext2_swabp(p++); - tmp &= (~0UL << offset); - if (size < BITS_PER_LONG) - goto found_first; - if (tmp) - goto found_middle; - size -= BITS_PER_LONG; - result += BITS_PER_LONG; - } + if ( offset >= size ) + return size; + p += BITOP_WORD(offset); + size -= result; + offset &= (BITS_PER_LONG - 1UL); + if ( offset ) + { + tmp = ext2_swabp(p++); + tmp &= (~0UL << offset); + if ( size < BITS_PER_LONG ) + goto found_first; + if ( tmp ) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } - while (size & ~(BITS_PER_LONG - 1)) { - tmp = *(p++); - if (tmp) - goto found_middle_swap; - result += BITS_PER_LONG; - size -= BITS_PER_LONG; - } - if (!size) - return result; - tmp = ext2_swabp(p); + while ( size & ~(BITS_PER_LONG - 1) ) + { + tmp = *(p++); + if ( tmp ) + goto found_middle_swap; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if ( !size ) + return result; + tmp = ext2_swabp(p); found_first: - tmp &= (~0UL >> (BITS_PER_LONG - size)); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if ( tmp == 0UL ) /* Are any bits set? */ + return result + size; /* Nope. */ found_middle: - return result + __ffs(tmp); + return result + __ffs(tmp); found_middle_swap: - return result + __ffs(ext2_swab(tmp)); + return result + __ffs(ext2_swab(tmp)); } EXPORT_SYMBOL(find_next_bit_le); #endif diff --git a/xen/arch/arm/arm64/livepatch.c b/xen/arch/arm/arm64/livepatch.c index 2247b925a0..3918dcb3fc 100644 --- a/xen/arch/arm/arm64/livepatch.c +++ b/xen/arch/arm/arm64/livepatch.c @@ -51,15 +51,15 @@ void arch_livepatch_apply(struct livepatch_func *func) *(new_ptr + i) = insn; /* - * When we upload the payload, it will go through the data cache - * (the region is cacheable). Until the data cache is cleaned, the data - * may not reach the memory. And in the case the data and instruction cache - * are separated, we may read invalid instruction from the memory because - * the data cache have not yet synced with the memory. Hence sync it. - */ + * When we upload the payload, it will go through the data cache + * (the region is cacheable). Until the data cache is cleaned, the data + * may not reach the memory. And in the case the data and instruction cache + * are separated, we may read invalid instruction from the memory because + * the data cache have not yet synced with the memory. Hence sync it. + */ if ( func->new_addr ) clean_and_invalidate_dcache_va_range(func->new_addr, func->new_size); - clean_and_invalidate_dcache_va_range(new_ptr, sizeof (*new_ptr) * len); + clean_and_invalidate_dcache_va_range(new_ptr, sizeof(*new_ptr) * len); } /* arch_livepatch_revert shared with ARM 32/ARM 64. */ @@ -68,8 +68,7 @@ int arch_livepatch_verify_elf(const struct livepatch_elf *elf) { const Elf_Ehdr *hdr = elf->hdr; - if ( hdr->e_machine != EM_AARCH64 || - hdr->e_ident[EI_CLASS] != ELFCLASS64 ) + if ( hdr->e_machine != EM_AARCH64 || hdr->e_ident[EI_CLASS] != ELFCLASS64 ) { dprintk(XENLOG_ERR, LIVEPATCH "%s: Unsupported ELF Machine type!\n", elf->name); @@ -86,7 +85,8 @@ bool arch_livepatch_symbol_deny(const struct livepatch_elf *elf, return false; } -enum aarch64_reloc_op { +enum aarch64_reloc_op +{ RELOC_OP_NONE, RELOC_OP_ABS, RELOC_OP_PREL, @@ -95,7 +95,7 @@ enum aarch64_reloc_op { static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val) { - switch ( reloc_op ) + switch (reloc_op) { case RELOC_OP_ABS: return val; @@ -108,10 +108,10 @@ static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val) case RELOC_OP_NONE: return 0; - } - dprintk(XENLOG_DEBUG, LIVEPATCH "do_reloc: unknown relocation operation %d\n", reloc_op); + dprintk(XENLOG_DEBUG, + LIVEPATCH "do_reloc: unknown relocation operation %d\n", reloc_op); return 0; } @@ -120,18 +120,18 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) { s64 sval = do_reloc(op, place, val); - switch ( len ) + switch (len) { case 16: *(s16 *)place = sval; if ( sval < INT16_MIN || sval > UINT16_MAX ) - return -EOVERFLOW; + return -EOVERFLOW; break; case 32: *(s32 *)place = sval; if ( sval < INT32_MIN || sval > UINT32_MAX ) - return -EOVERFLOW; + return -EOVERFLOW; break; case 64: @@ -139,14 +139,16 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) break; default: - dprintk(XENLOG_DEBUG, LIVEPATCH "Invalid length (%d) for data relocation\n", len); + dprintk(XENLOG_DEBUG, + LIVEPATCH "Invalid length (%d) for data relocation\n", len); return 0; } return 0; } -enum aarch64_insn_movw_imm_type { +enum aarch64_insn_movw_imm_type +{ AARCH64_INSN_IMM_MOVNZ, AARCH64_INSN_IMM_MOVKZ, }; @@ -260,8 +262,11 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf, } else if ( symndx >= elf->nsym ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative relocation wants symbol@%u which is past end!\n", - elf->name, symndx); + dprintk( + XENLOG_ERR, + LIVEPATCH + "%s: Relative relocation wants symbol@%u which is past end!\n", + elf->name, symndx); return -EINVAL; } else if ( !elf->sym[symndx].sym ) @@ -271,14 +276,14 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf, return -EINVAL; } - val = elf->sym[symndx].sym->st_value + r->r_addend; /* S+A */ + val = elf->sym[symndx].sym->st_value + r->r_addend; /* S+A */ /* ARM64 operations at minimum are always 32-bit. */ if ( r->r_offset >= base->sec->sh_size || - (r->r_offset + sizeof(uint32_t)) > base->sec->sh_size ) + (r->r_offset + sizeof(uint32_t)) > base->sec->sh_size ) goto bad_offset; - switch ( ELF64_R_TYPE(r->r_info) ) + switch (ELF64_R_TYPE(r->r_info)) { /* Data */ case R_AARCH64_ABS64: @@ -472,15 +477,17 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf, if ( overflow_check && ovf == -EOVERFLOW ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Overflow in relocation %u in %s for %s!\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Overflow in relocation %u in %s for %s!\n", elf->name, i, rela->name, base->name); return ovf; } } return 0; - bad_offset: - dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative relocation offset is past %s section!\n", +bad_offset: + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Relative relocation offset is past %s section!\n", elf->name, base->name); return -EINVAL; } diff --git a/xen/arch/arm/arm64/smpboot.c b/xen/arch/arm/arm64/smpboot.c index 694fbf67e6..e419c34566 100644 --- a/xen/arch/arm/arm64/smpboot.c +++ b/xen/arch/arm/arm64/smpboot.c @@ -9,8 +9,9 @@ #include #include -struct smp_enable_ops { - int (*prepare_cpu)(int); +struct smp_enable_ops +{ + int (*prepare_cpu)(int); }; static paddr_t cpu_release_addr[NR_CPUS]; @@ -20,7 +21,7 @@ static int __init smp_spin_table_cpu_up(int cpu) { paddr_t __iomem *release; - if (!cpu_release_addr[cpu]) + if ( !cpu_release_addr[cpu] ) { printk("CPU%d: No release addr\n", cpu); return -ENODEV; @@ -76,7 +77,7 @@ static int __init dt_arch_cpu_init(int cpu, struct dt_device_node *dn) const char *enable_method; enable_method = dt_get_property(dn, "enable-method", NULL); - if (!enable_method) + if ( !enable_method ) { printk("CPU%d has no enable method\n", cpu); return -EINVAL; diff --git a/xen/arch/arm/arm64/traps.c b/xen/arch/arm/arm64/traps.c index babfc1d884..76d31b244c 100644 --- a/xen/arch/arm/arm64/traps.c +++ b/xen/arch/arm/arm64/traps.c @@ -24,19 +24,15 @@ #include -static const char *handler[]= { - "Synchronous Abort", - "IRQ", - "FIQ", - "Error" -}; +static const char *handler[] = {"Synchronous Abort", "IRQ", "FIQ", "Error"}; void do_bad_mode(struct cpu_user_regs *regs, int reason) { - union hsr hsr = { .bits = regs->hsr }; + union hsr hsr = {.bits = regs->hsr}; printk("Bad mode in %s handler detected\n", handler[reason]); - printk("ESR=0x%08"PRIx32": EC=%"PRIx32", IL=%"PRIx32", ISS=%"PRIx32"\n", + printk("ESR=0x%08" PRIx32 ": EC=%" PRIx32 ", IL=%" PRIx32 ", ISS=%" PRIx32 + "\n", hsr.bits, hsr.ec, hsr.len, hsr.iss); local_irq_disable(); diff --git a/xen/arch/arm/arm64/vfp.c b/xen/arch/arm/arm64/vfp.c index 999a0d58a5..d748dd8a55 100644 --- a/xen/arch/arm/arm64/vfp.c +++ b/xen/arch/arm/arm64/vfp.c @@ -24,7 +24,8 @@ void vfp_save_state(struct vcpu *v) "stp q26, q27, [%1, #16 * 26]\n\t" "stp q28, q29, [%1, #16 * 28]\n\t" "stp q30, q31, [%1, #16 * 30]\n\t" - : "=Q" (*v->arch.vfp.fpregs) : "r" (v->arch.vfp.fpregs)); + : "=Q"(*v->arch.vfp.fpregs) + : "r"(v->arch.vfp.fpregs)); v->arch.vfp.fpsr = READ_SYSREG32(FPSR); v->arch.vfp.fpcr = READ_SYSREG32(FPCR); @@ -53,7 +54,8 @@ void vfp_restore_state(struct vcpu *v) "ldp q26, q27, [%1, #16 * 26]\n\t" "ldp q28, q29, [%1, #16 * 28]\n\t" "ldp q30, q31, [%1, #16 * 30]\n\t" - : : "Q" (*v->arch.vfp.fpregs), "r" (v->arch.vfp.fpregs)); + : + : "Q"(*v->arch.vfp.fpregs), "r"(v->arch.vfp.fpregs)); WRITE_SYSREG32(v->arch.vfp.fpsr, FPSR); WRITE_SYSREG32(v->arch.vfp.fpcr, FPCR); diff --git a/xen/arch/arm/arm64/vsysreg.c b/xen/arch/arm/arm64/vsysreg.c index 8a85507d9d..da652df188 100644 --- a/xen/arch/arm/arm64/vsysreg.c +++ b/xen/arch/arm/arm64/vsysreg.c @@ -30,20 +30,20 @@ * * Note that it only traps NS write access from EL1. */ -#define TVM_REG(reg) \ -static bool vreg_emulate_##reg(struct cpu_user_regs *regs, \ - uint64_t *r, bool read) \ -{ \ - struct vcpu *v = current; \ - bool cache_enabled = vcpu_has_cache_enabled(v); \ - \ - GUEST_BUG_ON(read); \ - WRITE_SYSREG64(*r, reg); \ - \ - p2m_toggle_cache(v, cache_enabled); \ - \ - return true; \ -} +#define TVM_REG(reg) \ + static bool vreg_emulate_##reg(struct cpu_user_regs *regs, uint64_t *r, \ + bool read) \ + { \ + struct vcpu *v = current; \ + bool cache_enabled = vcpu_has_cache_enabled(v); \ + \ + GUEST_BUG_ON(read); \ + WRITE_SYSREG64(*r, reg); \ + \ + p2m_toggle_cache(v, cache_enabled); \ + \ + return true; \ + } /* Defining helpers for emulating sysreg registers. */ TVM_REG(SCTLR_EL1) @@ -59,23 +59,22 @@ TVM_REG(AMAIR_EL1) TVM_REG(CONTEXTIDR_EL1) /* Macro to generate easily case for co-processor emulation */ -#define GENERATE_CASE(reg) \ - case HSR_SYSREG_##reg: \ - { \ - bool res; \ - \ - res = vreg_emulate_sysreg64(regs, hsr, vreg_emulate_##reg); \ - ASSERT(res); \ - break; \ +#define GENERATE_CASE(reg) \ + case HSR_SYSREG_##reg: \ + { \ + bool res; \ + \ + res = vreg_emulate_sysreg64(regs, hsr, vreg_emulate_##reg); \ + ASSERT(res); \ + break; \ } -void do_sysreg(struct cpu_user_regs *regs, - const union hsr hsr) +void do_sysreg(struct cpu_user_regs *regs, const union hsr hsr) { int regidx = hsr.sysreg.reg; struct vcpu *v = current; - switch ( hsr.bits & HSR_SYSREG_REGS_MASK ) + switch (hsr.bits & HSR_SYSREG_REGS_MASK) { /* * HCR_EL2.TACR @@ -101,22 +100,22 @@ void do_sysreg(struct cpu_user_regs *regs, p2m_set_way_flush(current); break; - /* - * HCR_EL2.TVM - * - * ARMv8 (DDI 0487D.a): Table D1-38 - */ - GENERATE_CASE(SCTLR_EL1) - GENERATE_CASE(TTBR0_EL1) - GENERATE_CASE(TTBR1_EL1) - GENERATE_CASE(TCR_EL1) - GENERATE_CASE(ESR_EL1) - GENERATE_CASE(FAR_EL1) - GENERATE_CASE(AFSR0_EL1) - GENERATE_CASE(AFSR1_EL1) - GENERATE_CASE(MAIR_EL1) - GENERATE_CASE(AMAIR_EL1) - GENERATE_CASE(CONTEXTIDR_EL1) + /* + * HCR_EL2.TVM + * + * ARMv8 (DDI 0487D.a): Table D1-38 + */ + GENERATE_CASE(SCTLR_EL1) + GENERATE_CASE(TTBR0_EL1) + GENERATE_CASE(TTBR1_EL1) + GENERATE_CASE(TCR_EL1) + GENERATE_CASE(ESR_EL1) + GENERATE_CASE(FAR_EL1) + GENERATE_CASE(AFSR0_EL1) + GENERATE_CASE(AFSR1_EL1) + GENERATE_CASE(MAIR_EL1) + GENERATE_CASE(AMAIR_EL1) + GENERATE_CASE(CONTEXTIDR_EL1) /* * MDCR_EL2.TDRA @@ -167,11 +166,11 @@ void do_sysreg(struct cpu_user_regs *regs, * register as RAZ/WI above. So RO at both EL0 and EL1. */ return handle_ro_raz(regs, regidx, hsr.sysreg.read, hsr, 0); - HSR_SYSREG_DBG_CASES(DBGBVR): - HSR_SYSREG_DBG_CASES(DBGBCR): - HSR_SYSREG_DBG_CASES(DBGWVR): - HSR_SYSREG_DBG_CASES(DBGWCR): - return handle_raz_wi(regs, regidx, hsr.sysreg.read, hsr, 1); + HSR_SYSREG_DBG_CASES(DBGBVR) + : HSR_SYSREG_DBG_CASES(DBGBCR) + : HSR_SYSREG_DBG_CASES(DBGWVR) + : HSR_SYSREG_DBG_CASES(DBGWCR) + : return handle_raz_wi(regs, regidx, hsr.sysreg.read, hsr, 1); /* * MDCR_EL2.TPM @@ -275,22 +274,19 @@ void do_sysreg(struct cpu_user_regs *regs, * And all other unknown registers. */ default: - { - const struct hsr_sysreg sysreg = hsr.sysreg; + { + const struct hsr_sysreg sysreg = hsr.sysreg; - gdprintk(XENLOG_ERR, - "%s %d, %d, c%d, c%d, %d %s x%d @ 0x%"PRIregister"\n", - sysreg.read ? "mrs" : "msr", - sysreg.op0, sysreg.op1, - sysreg.crn, sysreg.crm, - sysreg.op2, - sysreg.read ? "=>" : "<=", - sysreg.reg, regs->pc); - gdprintk(XENLOG_ERR, "unhandled 64-bit sysreg access %#x\n", - hsr.bits & HSR_SYSREG_REGS_MASK); - inject_undef_exception(regs, hsr); - return; - } + gdprintk(XENLOG_ERR, + "%s %d, %d, c%d, c%d, %d %s x%d @ 0x%" PRIregister "\n", + sysreg.read ? "mrs" : "msr", sysreg.op0, sysreg.op1, + sysreg.crn, sysreg.crm, sysreg.op2, + sysreg.read ? "=>" : "<=", sysreg.reg, regs->pc); + gdprintk(XENLOG_ERR, "unhandled 64-bit sysreg access %#x\n", + hsr.bits & HSR_SYSREG_REGS_MASK); + inject_undef_exception(regs, hsr); + return; + } } regs->pc += 4; diff --git a/xen/arch/arm/bootfdt.c b/xen/arch/arm/bootfdt.c index 891b4b66ff..bf299cd434 100644 --- a/xen/arch/arm/bootfdt.c +++ b/xen/arch/arm/bootfdt.c @@ -27,8 +27,8 @@ static bool __init device_tree_node_matches(const void *fdt, int node, /* Match both "match" and "match@..." patterns but not "match-foo". */ - return strncmp(name, match, match_len) == 0 - && (name[match_len] == '@' || name[match_len] == '\0'); + return strncmp(name, match, match_len) == 0 && + (name[match_len] == '@' || name[match_len] == '\0'); } static bool __init device_tree_node_compatible(const void *fdt, int node, @@ -44,7 +44,8 @@ static bool __init device_tree_node_compatible(const void *fdt, int node, if ( prop == NULL ) return false; - while ( len > 0 ) { + while ( len > 0 ) + { if ( !dt_compat_cmp(prop, match) ) return true; l = strlen(prop) + 1; @@ -71,7 +72,7 @@ static u32 __init device_tree_get_u32(const void *fdt, int node, if ( !prop || prop->len < sizeof(u32) ) return dflt; - return fdt32_to_cpu(*(uint32_t*)prop->data); + return fdt32_to_cpu(*(uint32_t *)prop->data); } /** @@ -86,8 +87,7 @@ static u32 __init device_tree_get_u32(const void *fdt, int node, * returns a value different from 0, that value is returned immediately. */ int __init device_tree_for_each_node(const void *fdt, - device_tree_node_func func, - void *data) + device_tree_node_func func, void *data) { int node; int depth; @@ -95,8 +95,7 @@ int __init device_tree_for_each_node(const void *fdt, u32 size_cells[DEVICE_TREE_MAX_DEPTH]; int ret; - for ( node = 0, depth = 0; - node >=0 && depth >= 0; + for ( node = 0, depth = 0; node >= 0 && depth >= 0; node = fdt_next_node(fdt, node, &depth) ) { const char *name = fdt_get_name(fdt, node, NULL); @@ -104,18 +103,18 @@ int __init device_tree_for_each_node(const void *fdt, if ( depth >= DEVICE_TREE_MAX_DEPTH ) { - printk("Warning: device tree node `%s' is nested too deep\n", - name); + printk("Warning: device tree node `%s' is nested too deep\n", name); continue; } - as = depth > 0 ? address_cells[depth-1] : DT_ROOT_NODE_ADDR_CELLS_DEFAULT; - ss = depth > 0 ? size_cells[depth-1] : DT_ROOT_NODE_SIZE_CELLS_DEFAULT; + as = depth > 0 ? address_cells[depth - 1] + : DT_ROOT_NODE_ADDR_CELLS_DEFAULT; + ss = + depth > 0 ? size_cells[depth - 1] : DT_ROOT_NODE_SIZE_CELLS_DEFAULT; - address_cells[depth] = device_tree_get_u32(fdt, node, - "#address-cells", as); - size_cells[depth] = device_tree_get_u32(fdt, node, - "#size-cells", ss); + address_cells[depth] = + device_tree_get_u32(fdt, node, "#address-cells", as); + size_cells[depth] = device_tree_get_u32(fdt, node, "#size-cells", ss); ret = func(fdt, node, name, depth, as, ss, data); if ( ret != 0 ) @@ -125,8 +124,8 @@ int __init device_tree_for_each_node(const void *fdt, } static void __init process_memory_node(const void *fdt, int node, - const char *name, - u32 address_cells, u32 size_cells) + const char *name, u32 address_cells, + u32 size_cells) { const struct fdt_property *prop; int i; @@ -137,8 +136,7 @@ static void __init process_memory_node(const void *fdt, int node, if ( address_cells < 1 || size_cells < 1 ) { - printk("fdt: node `%s': invalid #address-cells or #size-cells", - name); + printk("fdt: node `%s': invalid #address-cells or #size-cells", name); return; } @@ -150,7 +148,7 @@ static void __init process_memory_node(const void *fdt, int node, } cell = (const __be32 *)prop->data; - banks = fdt32_to_cpu(prop->len) / (reg_cells * sizeof (u32)); + banks = fdt32_to_cpu(prop->len) / (reg_cells * sizeof(u32)); for ( i = 0; i < banks && bootinfo.mem.nr_banks < NR_MEM_BANKS; i++ ) { @@ -164,8 +162,8 @@ static void __init process_memory_node(const void *fdt, int node, } static void __init process_multiboot_node(const void *fdt, int node, - const char *name, - u32 address_cells, u32 size_cells) + const char *name, u32 address_cells, + u32 size_cells) { static int __initdata kind_guess = 0; const struct fdt_property *prop; @@ -182,7 +180,7 @@ static void __init process_multiboot_node(const void *fdt, int node, ASSERT(parent_node >= 0); /* Check that the node is under "/chosen" (first 7 chars of path) */ - ret = fdt_get_path(fdt, node, path, sizeof (path)); + ret = fdt_get_path(fdt, node, path, sizeof(path)); if ( ret != 0 || strncmp(path, "/chosen", 7) ) return; @@ -191,8 +189,7 @@ static void __init process_multiboot_node(const void *fdt, int node, panic("node %s missing `reg' property\n", name); if ( len < dt_cells_to_size(address_cells + size_cells) ) - panic("fdt: node `%s': `reg` property length is too short\n", - name); + panic("fdt: node `%s': `reg` property length is too short\n", name); cell = (const __be32 *)prop->data; device_tree_get_reg(&cell, address_cells, size_cells, &start, &size); @@ -221,13 +218,18 @@ static void __init process_multiboot_node(const void *fdt, int node, */ if ( kind == BOOTMOD_UNKNOWN ) { - switch ( kind_guess++ ) + switch (kind_guess++) { - case 0: kind = BOOTMOD_KERNEL; break; - case 1: kind = BOOTMOD_RAMDISK; break; - default: break; + case 0: + kind = BOOTMOD_KERNEL; + break; + case 1: + kind = BOOTMOD_RAMDISK; + break; + default: + break; } - if ( kind_guess > 1 && has_xsm_magic(start) ) + if ( kind_guess > 1 && has_xsm_magic(start) ) kind = BOOTMOD_XSM; } @@ -237,13 +239,13 @@ static void __init process_multiboot_node(const void *fdt, int node, prop = fdt_get_property(fdt, node, "bootargs", &len); if ( !prop ) return; - add_boot_cmdline(fdt_get_name(fdt, parent_node, &len), prop->data, - kind, start, domU); + add_boot_cmdline(fdt_get_name(fdt, parent_node, &len), prop->data, kind, + start, domU); } static void __init process_chosen_node(const void *fdt, int node, - const char *name, - u32 address_cells, u32 size_cells) + const char *name, u32 address_cells, + u32 size_cells) { const struct fdt_property *prop; paddr_t start, end; @@ -277,25 +279,25 @@ static void __init process_chosen_node(const void *fdt, int node, if ( start >= end ) { - printk("linux,initrd limits invalid: %"PRIpaddr" >= %"PRIpaddr"\n", - start, end); + printk("linux,initrd limits invalid: %" PRIpaddr " >= %" PRIpaddr "\n", + start, end); return; } - printk("Initrd %"PRIpaddr"-%"PRIpaddr"\n", start, end); + printk("Initrd %" PRIpaddr "-%" PRIpaddr "\n", start, end); - add_boot_module(BOOTMOD_RAMDISK, start, end-start, false); + add_boot_module(BOOTMOD_RAMDISK, start, end - start, false); } -static int __init early_scan_node(const void *fdt, - int node, const char *name, int depth, - u32 address_cells, u32 size_cells, +static int __init early_scan_node(const void *fdt, int node, const char *name, + int depth, u32 address_cells, u32 size_cells, void *data) { if ( device_tree_node_matches(fdt, node, "memory") ) process_memory_node(fdt, node, name, address_cells, size_cells); - else if ( depth <= 3 && (device_tree_node_compatible(fdt, node, "xen,multiboot-module" ) || - device_tree_node_compatible(fdt, node, "multiboot,module" ))) + else if ( depth <= 3 && + (device_tree_node_compatible(fdt, node, "xen,multiboot-module") || + device_tree_node_compatible(fdt, node, "multiboot,module")) ) process_multiboot_node(fdt, node, name, address_cells, size_cells); else if ( depth == 1 && device_tree_node_matches(fdt, node, "chosen") ) process_chosen_node(fdt, node, name, address_cells, size_cells); @@ -311,16 +313,14 @@ static void __init early_print_info(void) int i, nr_rsvd; for ( i = 0; i < mi->nr_banks; i++ ) - printk("RAM: %"PRIpaddr" - %"PRIpaddr"\n", - mi->bank[i].start, - mi->bank[i].start + mi->bank[i].size - 1); + printk("RAM: %" PRIpaddr " - %" PRIpaddr "\n", mi->bank[i].start, + mi->bank[i].start + mi->bank[i].size - 1); printk("\n"); - for ( i = 0 ; i < mods->nr_mods; i++ ) - printk("MODULE[%d]: %"PRIpaddr" - %"PRIpaddr" %-12s\n", - i, - mods->module[i].start, - mods->module[i].start + mods->module[i].size, - boot_module_kind_as_string(mods->module[i].kind)); + for ( i = 0; i < mods->nr_mods; i++ ) + printk("MODULE[%d]: %" PRIpaddr " - %" PRIpaddr " %-12s\n", i, + mods->module[i].start, + mods->module[i].start + mods->module[i].size, + boot_module_kind_as_string(mods->module[i].kind)); nr_rsvd = fdt_num_mem_rsv(device_tree_flattened); for ( i = 0; i < nr_rsvd; i++ ) @@ -330,14 +330,12 @@ static void __init early_print_info(void) continue; /* fdt_get_mem_rsv returns length */ e += s; - printk(" RESVD[%d]: %"PRIpaddr" - %"PRIpaddr"\n", - i, s, e); + printk(" RESVD[%d]: %" PRIpaddr " - %" PRIpaddr "\n", i, s, e); } printk("\n"); - for ( i = 0 ; i < cmds->nr_mods; i++ ) - printk("CMDLINE[%"PRIpaddr"]:%s %s\n", cmds->cmdline[i].start, - cmds->cmdline[i].dt_name, - &cmds->cmdline[i].cmdline[0]); + for ( i = 0; i < cmds->nr_mods; i++ ) + printk("CMDLINE[%" PRIpaddr "]:%s %s\n", cmds->cmdline[i].start, + cmds->cmdline[i].dt_name, &cmds->cmdline[i].cmdline[0]); printk("\n"); } @@ -378,8 +376,8 @@ const __init char *boot_fdt_cmdline(const void *fdt) struct bootcmdline *dom0_cmdline = boot_cmdline_find_by_kind(BOOTMOD_KERNEL); - if (fdt_get_property(fdt, node, "xen,dom0-bootargs", NULL) || - ( dom0_cmdline && dom0_cmdline->cmdline[0] ) ) + if ( fdt_get_property(fdt, node, "xen,dom0-bootargs", NULL) || + (dom0_cmdline && dom0_cmdline->cmdline[0]) ) prop = fdt_get_property(fdt, node, "bootargs", NULL); } if ( prop == NULL ) diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c index 4431b244fd..718fef0b96 100644 --- a/xen/arch/arm/cpuerrata.c +++ b/xen/arch/arm/cpuerrata.c @@ -59,9 +59,9 @@ static bool copy_hyp_vect_bpi(unsigned int slot, const char *hyp_vec_start, * Vectors are part of the text that are mapped read-only. So re-map * the vector table to be able to update vectors. */ - dst_remapped = __vmap(&dst_mfn, - 1UL << get_order_from_bytes(VECTOR_TABLE_SIZE), - 1, 1, PAGE_HYPERVISOR, VMAP_DEFAULT); + dst_remapped = + __vmap(&dst_mfn, 1UL << get_order_from_bytes(VECTOR_TABLE_SIZE), 1, 1, + PAGE_HYPERVISOR, VMAP_DEFAULT); if ( !dst_remapped ) return false; @@ -80,11 +80,9 @@ static bool copy_hyp_vect_bpi(unsigned int slot, const char *hyp_vec_start, return true; } -static bool __maybe_unused -install_bp_hardening_vec(const struct arm_cpu_capabilities *entry, - const char *hyp_vec_start, - const char *hyp_vec_end, - const char *desc) +static bool __maybe_unused install_bp_hardening_vec( + const struct arm_cpu_capabilities *entry, const char *hyp_vec_start, + const char *hyp_vec_end, const char *desc) { static int last_slot = -1; static DEFINE_SPINLOCK(bp_lock); @@ -99,8 +97,8 @@ install_bp_hardening_vec(const struct arm_cpu_capabilities *entry, if ( !entry->matches(entry) ) return true; - printk(XENLOG_INFO "CPU%u will %s on exception entry\n", - smp_processor_id(), desc); + printk(XENLOG_INFO "CPU%u will %s on exception entry\n", smp_processor_id(), + desc); /* * No need to install hardened vector when the processor has @@ -141,8 +139,9 @@ install_bp_hardening_vec(const struct arm_cpu_capabilities *entry, if ( ret ) { /* Install the new vector table. */ - WRITE_SYSREG((vaddr_t)(__bp_harden_hyp_vecs_start + slot * VECTOR_TABLE_SIZE), - VBAR_EL2); + WRITE_SYSREG( + (vaddr_t)(__bp_harden_hyp_vecs_start + slot * VECTOR_TABLE_SIZE), + VBAR_EL2); isb(); } @@ -176,7 +175,7 @@ static int enable_smccc_arch_workaround_1(void *data) if ( (int)res.a0 < 0 ) goto warn; - return !install_bp_hardening_vec(entry,__smccc_workaround_1_smc_start, + return !install_bp_hardening_vec(entry, __smccc_workaround_1_smc_start, __smccc_workaround_1_smc_end, "call ARM_SMCCC_ARCH_WORKAROUND_1"); @@ -218,22 +217,20 @@ install_bp_hardening_vecs(const struct arm_cpu_capabilities *entry, if ( !entry->matches(entry) ) return; - printk(XENLOG_INFO "CPU%u will %s on guest exit\n", - smp_processor_id(), desc); + printk(XENLOG_INFO "CPU%u will %s on guest exit\n", smp_processor_id(), + desc); this_cpu(bp_harden_vecs) = hyp_vecs; } static int enable_bp_inv_hardening(void *data) { - install_bp_hardening_vecs(data, hyp_traps_vector_bp_inv, - "execute BPIALL"); + install_bp_hardening_vecs(data, hyp_traps_vector_bp_inv, "execute BPIALL"); return 0; } static int enable_ic_inv_hardening(void *data) { - install_bp_hardening_vecs(data, hyp_traps_vector_ic_inv, - "execute ICIALLU"); + install_bp_hardening_vecs(data, hyp_traps_vector_ic_inv, "execute ICIALLU"); return 0; } @@ -279,8 +276,8 @@ custom_param("spec-ctrl", parse_spec_ctrl); /* Arm64 only for now as for Arm32 the workaround is currently handled in C. */ #ifdef CONFIG_ARM_64 void __init arm_enable_wa2_handling(const struct alt_instr *alt, - const uint32_t *origptr, - uint32_t *updptr, int nr_inst) + const uint32_t *origptr, uint32_t *updptr, + int nr_inst) { BUG_ON(nr_inst != 1); @@ -310,7 +307,7 @@ static bool has_ssbd_mitigation(const struct arm_cpu_capabilities *entry) arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FID, ARM_SMCCC_ARCH_WORKAROUND_2_FID, &res); - switch ( (int)res.a0 ) + switch ((int)res.a0) { case ARM_SMCCC_NOT_SUPPORTED: ssbd_state = ARM_SSBD_UNKNOWN; @@ -333,7 +330,7 @@ static bool has_ssbd_mitigation(const struct arm_cpu_capabilities *entry) return false; } - switch ( ssbd_state ) + switch (ssbd_state) { case ARM_SSBD_FORCE_DISABLE: { @@ -381,23 +378,20 @@ static bool has_ssbd_mitigation(const struct arm_cpu_capabilities *entry) } #endif -#define MIDR_RANGE(model, min, max) \ - .matches = is_affected_midr_range, \ - .midr_model = model, \ - .midr_range_min = min, \ - .midr_range_max = max +#define MIDR_RANGE(model, min, max) \ + .matches = is_affected_midr_range, .midr_model = model, \ + .midr_range_min = min, .midr_range_max = max -#define MIDR_ALL_VERSIONS(model) \ - .matches = is_affected_midr_range, \ - .midr_model = model, \ - .midr_range_min = 0, \ +#define MIDR_ALL_VERSIONS(model) \ + .matches = is_affected_midr_range, .midr_model = model, \ + .midr_range_min = 0, \ .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK) static bool __maybe_unused is_affected_midr_range(const struct arm_cpu_capabilities *entry) { - return MIDR_IS_CPU_MODEL_RANGE(current_cpu_data.midr.bits, entry->midr_model, - entry->midr_range_min, + return MIDR_IS_CPU_MODEL_RANGE(current_cpu_data.midr.bits, + entry->midr_model, entry->midr_range_min, entry->midr_range_max); } @@ -408,8 +402,7 @@ static const struct arm_cpu_capabilities arm_errata[] = { .capability = ARM32_WORKAROUND_766422, MIDR_RANGE(MIDR_CORTEX_A15, 0x04, 0x04), }, -#if defined(CONFIG_ARM64_ERRATUM_827319) || \ - defined(CONFIG_ARM64_ERRATUM_824069) +#if defined(CONFIG_ARM64_ERRATUM_827319) || defined(CONFIG_ARM64_ERRATUM_824069) { /* Cortex-A53 r0p[012] */ .desc = "ARM errata 827319, 824069", @@ -430,8 +423,7 @@ static const struct arm_cpu_capabilities arm_errata[] = { /* Cortex-A57 r0p0 - r1p2 */ .desc = "ARM erratum 832075", .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, - MIDR_RANGE(MIDR_CORTEX_A57, 0x00, - (1 << MIDR_VARIANT_SHIFT) | 2), + MIDR_RANGE(MIDR_CORTEX_A57, 0x00, (1 << MIDR_VARIANT_SHIFT) | 2), }, #endif #ifdef CONFIG_ARM64_ERRATUM_834220 @@ -439,8 +431,7 @@ static const struct arm_cpu_capabilities arm_errata[] = { /* Cortex-A57 r0p0 - r1p2 */ .desc = "ARM erratum 834220", .capability = ARM64_WORKAROUND_834220, - MIDR_RANGE(MIDR_CORTEX_A57, 0x00, - (1 << MIDR_VARIANT_SHIFT) | 2), + MIDR_RANGE(MIDR_CORTEX_A57, 0x00, (1 << MIDR_VARIANT_SHIFT) | 2), }, #endif #ifdef CONFIG_ARM64_HARDEN_BRANCH_PREDICTOR @@ -508,13 +499,12 @@ void __init enable_errata_workarounds(void) enable_cpu_capabilities(arm_errata); } -static int cpu_errata_callback(struct notifier_block *nfb, - unsigned long action, +static int cpu_errata_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int rc = 0; - switch ( action ) + switch (action) { case CPU_STARTING: /* diff --git a/xen/arch/arm/cpufeature.c b/xen/arch/arm/cpufeature.c index 44126dbf07..ca297a4b21 100644 --- a/xen/arch/arm/cpufeature.c +++ b/xen/arch/arm/cpufeature.c @@ -99,44 +99,44 @@ int enable_nonboot_cpu_caps(const struct arm_cpu_capabilities *caps) void identify_cpu(struct cpuinfo_arm *c) { - c->midr.bits = READ_SYSREG32(MIDR_EL1); - c->mpidr.bits = READ_SYSREG(MPIDR_EL1); + c->midr.bits = READ_SYSREG32(MIDR_EL1); + c->mpidr.bits = READ_SYSREG(MPIDR_EL1); #ifdef CONFIG_ARM_64 - c->pfr64.bits[0] = READ_SYSREG64(ID_AA64PFR0_EL1); - c->pfr64.bits[1] = READ_SYSREG64(ID_AA64PFR1_EL1); + c->pfr64.bits[0] = READ_SYSREG64(ID_AA64PFR0_EL1); + c->pfr64.bits[1] = READ_SYSREG64(ID_AA64PFR1_EL1); - c->dbg64.bits[0] = READ_SYSREG64(ID_AA64DFR0_EL1); - c->dbg64.bits[1] = READ_SYSREG64(ID_AA64DFR1_EL1); + c->dbg64.bits[0] = READ_SYSREG64(ID_AA64DFR0_EL1); + c->dbg64.bits[1] = READ_SYSREG64(ID_AA64DFR1_EL1); - c->aux64.bits[0] = READ_SYSREG64(ID_AA64AFR0_EL1); - c->aux64.bits[1] = READ_SYSREG64(ID_AA64AFR1_EL1); + c->aux64.bits[0] = READ_SYSREG64(ID_AA64AFR0_EL1); + c->aux64.bits[1] = READ_SYSREG64(ID_AA64AFR1_EL1); - c->mm64.bits[0] = READ_SYSREG64(ID_AA64MMFR0_EL1); - c->mm64.bits[1] = READ_SYSREG64(ID_AA64MMFR1_EL1); + c->mm64.bits[0] = READ_SYSREG64(ID_AA64MMFR0_EL1); + c->mm64.bits[1] = READ_SYSREG64(ID_AA64MMFR1_EL1); - c->isa64.bits[0] = READ_SYSREG64(ID_AA64ISAR0_EL1); - c->isa64.bits[1] = READ_SYSREG64(ID_AA64ISAR1_EL1); + c->isa64.bits[0] = READ_SYSREG64(ID_AA64ISAR0_EL1); + c->isa64.bits[1] = READ_SYSREG64(ID_AA64ISAR1_EL1); #endif - c->pfr32.bits[0] = READ_SYSREG32(ID_PFR0_EL1); - c->pfr32.bits[1] = READ_SYSREG32(ID_PFR1_EL1); + c->pfr32.bits[0] = READ_SYSREG32(ID_PFR0_EL1); + c->pfr32.bits[1] = READ_SYSREG32(ID_PFR1_EL1); - c->dbg32.bits[0] = READ_SYSREG32(ID_DFR0_EL1); + c->dbg32.bits[0] = READ_SYSREG32(ID_DFR0_EL1); - c->aux32.bits[0] = READ_SYSREG32(ID_AFR0_EL1); + c->aux32.bits[0] = READ_SYSREG32(ID_AFR0_EL1); - c->mm32.bits[0] = READ_SYSREG32(ID_MMFR0_EL1); - c->mm32.bits[1] = READ_SYSREG32(ID_MMFR1_EL1); - c->mm32.bits[2] = READ_SYSREG32(ID_MMFR2_EL1); - c->mm32.bits[3] = READ_SYSREG32(ID_MMFR3_EL1); + c->mm32.bits[0] = READ_SYSREG32(ID_MMFR0_EL1); + c->mm32.bits[1] = READ_SYSREG32(ID_MMFR1_EL1); + c->mm32.bits[2] = READ_SYSREG32(ID_MMFR2_EL1); + c->mm32.bits[3] = READ_SYSREG32(ID_MMFR3_EL1); - c->isa32.bits[0] = READ_SYSREG32(ID_ISAR0_EL1); - c->isa32.bits[1] = READ_SYSREG32(ID_ISAR1_EL1); - c->isa32.bits[2] = READ_SYSREG32(ID_ISAR2_EL1); - c->isa32.bits[3] = READ_SYSREG32(ID_ISAR3_EL1); - c->isa32.bits[4] = READ_SYSREG32(ID_ISAR4_EL1); - c->isa32.bits[5] = READ_SYSREG32(ID_ISAR5_EL1); + c->isa32.bits[0] = READ_SYSREG32(ID_ISAR0_EL1); + c->isa32.bits[1] = READ_SYSREG32(ID_ISAR1_EL1); + c->isa32.bits[2] = READ_SYSREG32(ID_ISAR2_EL1); + c->isa32.bits[3] = READ_SYSREG32(ID_ISAR3_EL1); + c->isa32.bits[4] = READ_SYSREG32(ID_ISAR4_EL1); + c->isa32.bits[5] = READ_SYSREG32(ID_ISAR5_EL1); } /* diff --git a/xen/arch/arm/decode.c b/xen/arch/arm/decode.c index 8b1e15d118..5dfc93555c 100644 --- a/xen/arch/arm/decode.c +++ b/xen/arch/arm/decode.c @@ -25,8 +25,7 @@ #include "decode.h" -static void update_dabt(struct hsr_dabt *dabt, int reg, - uint8_t size, bool sign) +static void update_dabt(struct hsr_dabt *dabt, int reg, uint8_t size, bool sign) { dabt->reg = reg; dabt->size = size; @@ -38,12 +37,12 @@ static int decode_thumb2(register_t pc, struct hsr_dabt *dabt, uint16_t hw1) uint16_t hw2; uint16_t rt; - if ( raw_copy_from_guest(&hw2, (void *__user)(pc + 2), sizeof (hw2)) ) + if ( raw_copy_from_guest(&hw2, (void *__user)(pc + 2), sizeof(hw2)) ) return -EFAULT; rt = (hw2 >> 12) & 0xf; - switch ( (hw1 >> 9) & 0xf ) + switch ((hw1 >> 9) & 0xf) { case 12: { @@ -87,10 +86,10 @@ static int decode_thumb(register_t pc, struct hsr_dabt *dabt) { uint16_t instr; - if ( raw_copy_from_guest(&instr, (void * __user)pc, sizeof (instr)) ) + if ( raw_copy_from_guest(&instr, (void *__user)pc, sizeof(instr)) ) return -EFAULT; - switch ( instr >> 12 ) + switch (instr >> 12) { case 5: { @@ -98,7 +97,7 @@ static int decode_thumb(register_t pc, struct hsr_dabt *dabt) uint16_t opB = (instr >> 9) & 0x7; int reg = instr & 7; - switch ( opB & 0x3 ) + switch (opB & 0x3) { case 0: /* Non-signed word */ update_dabt(dabt, reg, 2, false); diff --git a/xen/arch/arm/device.c b/xen/arch/arm/device.c index 70cd6c1a19..d04e73b3f8 100644 --- a/xen/arch/arm/device.c +++ b/xen/arch/arm/device.c @@ -33,7 +33,7 @@ int __init device_init(struct dt_device_node *dev, enum device_class class, ASSERT(dev != NULL); if ( !dt_device_is_available(dev) || dt_device_for_passthrough(dev) ) - return -ENODEV; + return -ENODEV; for ( desc = _sdevice; desc != _edevice; desc++ ) { @@ -46,19 +46,19 @@ int __init device_init(struct dt_device_node *dev, enum device_class class, return desc->init(dev, data); } - } return -EBADF; } -int __init acpi_device_init(enum device_class class, const void *data, int class_type) +int __init acpi_device_init(enum device_class class, const void *data, + int class_type) { const struct acpi_device_desc *desc; for ( desc = _asdevice; desc != _aedevice; desc++ ) { - if ( ( desc->class != class ) || ( desc->class_type != class_type ) ) + if ( (desc->class != class) || (desc->class_type != class_type) ) continue; ASSERT(desc->init != NULL); diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index 356d894f64..3809e3dec1 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -64,7 +64,7 @@ void idle_loop(void) { unsigned int cpu = smp_processor_id(); - for ( ; ; ) + for ( ;; ) { if ( cpu_is_offline(cpu) ) stop_cpu(); @@ -157,7 +157,7 @@ static void ctxt_switch_from(struct vcpu *p) #endif if ( is_32bit_domain(p->domain) ) - p->arch.ifsr = READ_SYSREG(IFSR32_EL2); + p->arch.ifsr = READ_SYSREG(IFSR32_EL2); p->arch.afsr0 = READ_SYSREG(AFSR0_EL1); p->arch.afsr1 = READ_SYSREG(AFSR1_EL1); @@ -376,20 +376,29 @@ void sync_vcpu_execstate(struct vcpu *v) /* Nothing to do -- no lazy switching */ } -#define next_arg(fmt, args) ({ \ - unsigned long __arg; \ - switch ( *(fmt)++ ) \ - { \ - case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \ - case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \ - case 'h': __arg = (unsigned long)va_arg(args, void *); break; \ - default: __arg = 0; BUG(); \ - } \ - __arg; \ -}) - -unsigned long hypercall_create_continuation( - unsigned int op, const char *format, ...) +#define next_arg(fmt, args) \ + ({ \ + unsigned long __arg; \ + switch (*(fmt)++) \ + { \ + case 'i': \ + __arg = (unsigned long)va_arg(args, unsigned int); \ + break; \ + case 'l': \ + __arg = (unsigned long)va_arg(args, unsigned long); \ + break; \ + case 'h': \ + __arg = (unsigned long)va_arg(args, void *); \ + break; \ + default: \ + __arg = 0; \ + BUG(); \ + } \ + __arg; \ + }) + +unsigned long hypercall_create_continuation(unsigned int op, const char *format, + ...) { struct mc_state *mcs = ¤t->mc_state; struct cpu_user_regs *regs; @@ -399,7 +408,7 @@ unsigned long hypercall_create_continuation( va_list args; /* All hypercalls take at least one argument */ - BUG_ON( !p || *p == '\0' ); + BUG_ON(!p || *p == '\0'); current->hcall_preempted = true; @@ -426,14 +435,26 @@ unsigned long hypercall_create_continuation( { arg = next_arg(p, args); - switch ( i ) + switch (i) { - case 0: regs->x0 = arg; break; - case 1: regs->x1 = arg; break; - case 2: regs->x2 = arg; break; - case 3: regs->x3 = arg; break; - case 4: regs->x4 = arg; break; - case 5: regs->x5 = arg; break; + case 0: + regs->x0 = arg; + break; + case 1: + regs->x1 = arg; + break; + case 2: + regs->x2 = arg; + break; + case 3: + regs->x3 = arg; + break; + case 4: + regs->x4 = arg; + break; + case 5: + regs->x5 = arg; + break; } } @@ -449,14 +470,26 @@ unsigned long hypercall_create_continuation( { arg = next_arg(p, args); - switch ( i ) + switch (i) { - case 0: regs->r0 = arg; break; - case 1: regs->r1 = arg; break; - case 2: regs->r2 = arg; break; - case 3: regs->r3 = arg; break; - case 4: regs->r4 = arg; break; - case 5: regs->r5 = arg; break; + case 0: + regs->r0 = arg; + break; + case 1: + regs->r1 = arg; + break; + case 2: + regs->r2 = arg; + break; + case 3: + regs->r3 = arg; + break; + case 4: + regs->r4 = arg; + break; + case 5: + regs->r5 = arg; + break; } } @@ -502,7 +535,6 @@ void free_domain_struct(struct domain *d) void dump_pageframe_info(struct domain *d) { - } /* @@ -510,9 +542,9 @@ void dump_pageframe_info(struct domain *d) * page on ARM64. Cowardly increase the limit in this case. */ #if defined(CONFIG_NEW_VGIC) && defined(CONFIG_ARM_64) -#define MAX_PAGES_PER_VCPU 2 +#define MAX_PAGES_PER_VCPU 2 #else -#define MAX_PAGES_PER_VCPU 1 +#define MAX_PAGES_PER_VCPU 1 #endif struct vcpu *alloc_vcpu_struct(const struct domain *d) @@ -541,15 +573,15 @@ int arch_vcpu_create(struct vcpu *v) { int rc = 0; - BUILD_BUG_ON( sizeof(struct cpu_info) > STACK_SIZE ); + BUILD_BUG_ON(sizeof(struct cpu_info) > STACK_SIZE); - v->arch.stack = alloc_xenheap_pages(STACK_ORDER, MEMF_node(vcpu_to_node(v))); + v->arch.stack = + alloc_xenheap_pages(STACK_ORDER, MEMF_node(vcpu_to_node(v))); if ( v->arch.stack == NULL ) return -ENOMEM; - v->arch.cpu_info = (struct cpu_info *)(v->arch.stack - + STACK_SIZE - - sizeof(struct cpu_info)); + v->arch.cpu_info = (struct cpu_info *)(v->arch.stack + STACK_SIZE - + sizeof(struct cpu_info)); memset(v->arch.cpu_info, 0, sizeof(*v->arch.cpu_info)); memset(&v->arch.saved_context, 0, sizeof(v->arch.saved_context)); @@ -611,7 +643,7 @@ int arch_sanitise_domain_config(struct xen_domctl_createdomain *config) /* Fill in the native GIC version, passed back to the toolstack. */ if ( config->arch.gic_version == XEN_DOMCTL_CONFIG_GIC_NATIVE ) { - switch ( gic_hw_version() ) + switch (gic_hw_version()) { case GIC_V2: config->arch.gic_version = XEN_DOMCTL_CONFIG_GIC_V2; @@ -646,8 +678,7 @@ int arch_sanitise_domain_config(struct xen_domctl_createdomain *config) return 0; } -int arch_domain_create(struct domain *d, - struct xen_domctl_createdomain *config) +int arch_domain_create(struct domain *d, struct xen_domctl_createdomain *config) { int rc, count = 0; @@ -674,7 +705,7 @@ int arch_domain_create(struct domain *d, clear_page(d->shared_info); share_xen_page_with_guest(virt_to_page(d->shared_info), d, SHARE_rw); - switch ( config->arch.gic_version ) + switch (config->arch.gic_version) { case XEN_DOMCTL_CONFIG_GIC_V2: d->arch.vgic.version = GIC_V2; @@ -804,7 +835,6 @@ static int is_guest_pv32_psr(uint32_t psr) } } - #ifdef CONFIG_ARM_64 static int is_guest_pv64_psr(uint32_t psr) { @@ -832,8 +862,7 @@ static int is_guest_pv64_psr(uint32_t psr) * toolstack (XEN_DOMCTL_setvcpucontext) or the guest * (VCPUOP_initialise) and therefore must be properly validated. */ -int arch_set_info_guest( - struct vcpu *v, vcpu_guest_context_u c) +int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c) { struct vcpu_guest_context *ctxt = c.nat; struct vcpu_guest_core_regs *regs = &c.nat->user_regs; @@ -896,12 +925,12 @@ int arch_vcpu_reset(struct vcpu *v) static int relinquish_memory(struct domain *d, struct page_list_head *list) { struct page_info *page, *tmp; - int ret = 0; + int ret = 0; /* Use a recursive lock, as we may enter 'free_domheap_page'. */ spin_lock_recursive(&d->page_alloc_lock); - page_list_for_each_safe( page, tmp, list ) + page_list_for_each_safe(page, tmp, list) { /* Grab a reference to the page so it won't disappear from under us. */ if ( unlikely(!get_page(page, d)) ) @@ -927,7 +956,7 @@ static int relinquish_memory(struct domain *d, struct page_list_head *list) } } - out: +out: spin_unlock_recursive(&d->page_alloc_lock); return ret; } @@ -936,7 +965,7 @@ int domain_relinquish_resources(struct domain *d) { int ret = 0; - switch ( d->arch.relmem ) + switch (d->arch.relmem) { case RELMEM_not_started: ret = iommu_release_dt_devices(d); @@ -954,7 +983,7 @@ int domain_relinquish_resources(struct domain *d) case RELMEM_tee: ret = tee_relinquish_resources(d); - if (ret ) + if ( ret ) return ret; d->arch.relmem = RELMEM_xen; @@ -999,16 +1028,16 @@ void arch_dump_domain_info(struct domain *d) p2m_dump_info(d); } - -long do_arm_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) +long do_arm_vcpu_op(int cmd, unsigned int vcpuid, + XEN_GUEST_HANDLE_PARAM(void) arg) { - switch ( cmd ) + switch (cmd) { - case VCPUOP_register_vcpu_info: - case VCPUOP_register_runstate_memory_area: - return do_vcpu_op(cmd, vcpuid, arg); - default: - return -EINVAL; + case VCPUOP_register_vcpu_info: + case VCPUOP_register_runstate_memory_area: + return do_vcpu_op(cmd, vcpuid, arg); + default: + return -EINVAL; } } diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c index 3c00ee1e89..6dc3dc9ee4 100644 --- a/xen/arch/arm/domain_build.c +++ b/xen/arch/arm/domain_build.c @@ -56,9 +56,11 @@ struct map_range_data //#define DEBUG_11_ALLOCATION #ifdef DEBUG_11_ALLOCATION -# define D11PRINT(fmt, args...) printk(XENLOG_DEBUG fmt, ##args) +#define D11PRINT(fmt, args...) printk(XENLOG_DEBUG fmt, ##args) #else -# define D11PRINT(fmt, args...) do {} while ( 0 ) +#define D11PRINT(fmt, args...) \ + do { \ + } while ( 0 ) #endif /* @@ -101,10 +103,8 @@ static unsigned int __init get_allocation_size(paddr_t size) * Returns false if the memory would be below bank 0 or we have run * out of banks. In this case it will free the pages. */ -static bool __init insert_11_bank(struct domain *d, - struct kernel_info *kinfo, - struct page_info *pg, - unsigned int order) +static bool __init insert_11_bank(struct domain *d, struct kernel_info *kinfo, + struct page_info *pg, unsigned int order) { int res, i; mfn_t smfn; @@ -114,15 +114,13 @@ static bool __init insert_11_bank(struct domain *d, start = mfn_to_maddr(smfn); size = pfn_to_paddr(1UL << order); - D11PRINT("Allocated %#"PRIpaddr"-%#"PRIpaddr" (%ldMB/%ldMB, order %d)\n", - start, start + size, - 1UL << (order + PAGE_SHIFT - 20), + D11PRINT("Allocated %#" PRIpaddr "-%#" PRIpaddr + " (%ldMB/%ldMB, order %d)\n", + start, start + size, 1UL << (order + PAGE_SHIFT - 20), /* Don't want format this as PRIpaddr (16 digit hex) */ - (unsigned long)(kinfo->unassigned_mem >> 20), - order); + (unsigned long)(kinfo->unassigned_mem >> 20), order); - if ( kinfo->mem.nr_banks > 0 && - size < MB(128) && + if ( kinfo->mem.nr_banks > 0 && size < MB(128) && start + size < kinfo->mem.bank[0].start ) { D11PRINT("Allocation below bank 0 is too small, not using\n"); @@ -143,12 +141,12 @@ static bool __init insert_11_bank(struct domain *d, return true; } - for( i = 0; i < kinfo->mem.nr_banks; i++ ) + for ( i = 0; i < kinfo->mem.nr_banks; i++ ) { struct membank *bank = &kinfo->mem.bank[i]; /* If possible merge new memory into the start of the bank */ - if ( bank->start == start+size ) + if ( bank->start == start + size ) { bank->start = start; bank->size += size; @@ -170,8 +168,7 @@ static bool __init insert_11_bank(struct domain *d, */ if ( start + size < bank->start && kinfo->mem.nr_banks < NR_MEM_BANKS ) { - memmove(bank + 1, bank, - sizeof(*bank) * (kinfo->mem.nr_banks - i)); + memmove(bank + 1, bank, sizeof(*bank) * (kinfo->mem.nr_banks - i)); kinfo->mem.nr_banks++; bank->start = start; bank->size = size; @@ -280,7 +277,7 @@ static void __init allocate_memory_11(struct domain *d, */ while ( order >= min_low_order ) { - for ( bits = order ; bits <= (lowmem ? 32 : PADDR_BITS); bits++ ) + for ( bits = order; bits <= (lowmem ? 32 : PADDR_BITS); bits++ ) { pg = alloc_domheap_pages(d, order, MEMF_bits(bits)); if ( pg != NULL ) @@ -302,7 +299,7 @@ static void __init allocate_memory_11(struct domain *d, printk(XENLOG_INFO "No bank has been allocated below 4GB.\n"); lowmem = false; - got_bank0: +got_bank0: /* * If we failed to allocate bank0 under 4GB, continue allocating @@ -314,9 +311,9 @@ static void __init allocate_memory_11(struct domain *d, pg = alloc_domheap_pages(d, order, lowmem ? MEMF_bits(32) : 0); if ( !pg ) { - order --; + order--; - if ( lowmem && order < min_low_order) + if ( lowmem && order < min_low_order ) { D11PRINT("Failed at min_low_order, allow high allocations\n"); order = get_allocation_size(kinfo->unassigned_mem); @@ -363,10 +360,9 @@ static void __init allocate_memory_11(struct domain *d, " %ldMB unallocated\n", (unsigned long)kinfo->unassigned_mem >> 20); - for( i = 0; i < kinfo->mem.nr_banks; i++ ) + for ( i = 0; i < kinfo->mem.nr_banks; i++ ) { - printk("BANK[%d] %#"PRIpaddr"-%#"PRIpaddr" (%ldMB)\n", - i, + printk("BANK[%d] %#" PRIpaddr "-%#" PRIpaddr " (%ldMB)\n", i, kinfo->mem.bank[i].start, kinfo->mem.bank[i].start + kinfo->mem.bank[i].size, /* Don't want format this as PRIpaddr (16 digit hex) */ @@ -375,9 +371,8 @@ static void __init allocate_memory_11(struct domain *d, } static bool __init allocate_bank_memory(struct domain *d, - struct kernel_info *kinfo, - gfn_t sgfn, - unsigned long tot_size) + struct kernel_info *kinfo, gfn_t sgfn, + unsigned long tot_size) { int res; struct page_info *pg; @@ -453,12 +448,11 @@ static void __init allocate_memory(struct domain *d, struct kernel_info *kinfo) if ( kinfo->unassigned_mem ) goto fail; - for( i = 0; i < kinfo->mem.nr_banks; i++ ) + for ( i = 0; i < kinfo->mem.nr_banks; i++ ) { - printk(XENLOG_INFO "%pd BANK[%d] %#"PRIpaddr"-%#"PRIpaddr" (%ldMB)\n", - d, - i, - kinfo->mem.bank[i].start, + printk(XENLOG_INFO "%pd BANK[%d] %#" PRIpaddr "-%#" PRIpaddr + " (%ldMB)\n", + d, i, kinfo->mem.bank[i].start, kinfo->mem.bank[i].start + kinfo->mem.bank[i].size, /* Don't want format this as PRIpaddr (16 digit hex) */ (unsigned long)(kinfo->mem.bank[i].size >> 20)); @@ -484,7 +478,7 @@ static int __init write_properties(struct domain *d, struct kernel_info *kinfo, if ( kinfo->cmdline && kinfo->cmdline[0] ) bootargs = &kinfo->cmdline[0]; - dt_for_each_property_node (node, prop) + dt_for_each_property_node(node, prop) { const void *prop_data = prop->value; u32 prop_len = prop->length; @@ -512,7 +506,7 @@ static int __init write_properties(struct domain *d, struct kernel_info *kinfo, dt_property_name_is_equal(prop, "linux,uefi-mmap-start") || dt_property_name_is_equal(prop, "linux,uefi-mmap-size") || dt_property_name_is_equal(prop, "linux,uefi-mmap-desc-size") || - dt_property_name_is_equal(prop, "linux,uefi-mmap-desc-ver")) + dt_property_name_is_equal(prop, "linux,uefi-mmap-desc-ver") ) continue; if ( dt_property_name_is_equal(prop, "xen,dom0-bootargs") ) @@ -523,7 +517,7 @@ static int __init write_properties(struct domain *d, struct kernel_info *kinfo, } if ( dt_property_name_is_equal(prop, "bootargs") ) { - if ( !bootargs && !had_dom0_bootargs ) + if ( !bootargs && !had_dom0_bootargs ) bootargs = prop->value; continue; } @@ -563,8 +557,7 @@ static int __init write_properties(struct domain *d, struct kernel_info *kinfo, if ( dt_device_for_passthrough(node) ) res = fdt_property_string(kinfo->fdt, "status", "disabled"); else if ( status ) - res = fdt_property(kinfo->fdt, "status", status->value, - status->length); + res = fdt_property(kinfo->fdt, "status", status->value, status->length); if ( res ) return res; @@ -608,18 +601,17 @@ static int __init write_properties(struct domain *d, struct kernel_info *kinfo, typedef __be32 gic_interrupt_t[3]; -static void __init set_interrupt(gic_interrupt_t interrupt, - unsigned int irq, - unsigned int cpumask, - unsigned int level) +static void __init set_interrupt(gic_interrupt_t interrupt, unsigned int irq, + unsigned int cpumask, unsigned int level) { __be32 *cells = interrupt; bool is_ppi = !!(irq < 32); BUG_ON(irq < 16); - irq -= (is_ppi) ? 16: 32; /* PPIs start at 16, SPIs at 32 */ + irq -= (is_ppi) ? 16 : 32; /* PPIs start at 16, SPIs at 32 */ - /* See linux Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt */ + /* See linux + * Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt */ dt_set_cell(&cells, 1, is_ppi); /* is a PPI? */ dt_set_cell(&cells, 1, irq); dt_set_cell(&cells, 1, (cpumask << 8) | level); @@ -636,7 +628,7 @@ static int __init fdt_property_interrupts(void *fdt, gic_interrupt_t *intr, { int res; - res = fdt_property(fdt, "interrupts", intr, sizeof (intr[0]) * num_irq); + res = fdt_property(fdt, "interrupts", intr, sizeof(intr[0]) * num_irq); if ( res ) return res; @@ -646,21 +638,20 @@ static int __init fdt_property_interrupts(void *fdt, gic_interrupt_t *intr, return res; } -static int __init make_memory_node(const struct domain *d, - void *fdt, +static int __init make_memory_node(const struct domain *d, void *fdt, int addrcells, int sizecells, const struct kernel_info *kinfo) { int res, i; int reg_size = addrcells + sizecells; - int nr_cells = reg_size*kinfo->mem.nr_banks; + int nr_cells = reg_size * kinfo->mem.nr_banks; __be32 reg[NR_MEM_BANKS * 4 /* Worst case addrcells + sizecells */]; __be32 *cells; BUG_ON(nr_cells >= ARRAY_SIZE(reg)); - dt_dprintk("Create memory node (reg size %d, nr cells %d)\n", - reg_size, nr_cells); + dt_dprintk("Create memory node (reg size %d, nr cells %d)\n", reg_size, + nr_cells); /* ePAPR 3.4 */ res = fdt_begin_node(fdt, "memory"); @@ -672,13 +663,13 @@ static int __init make_memory_node(const struct domain *d, return res; cells = ®[0]; - for ( i = 0 ; i < kinfo->mem.nr_banks; i++ ) + for ( i = 0; i < kinfo->mem.nr_banks; i++ ) { u64 start = kinfo->mem.bank[i].start; u64 size = kinfo->mem.bank[i].size; - dt_dprintk(" Bank %d: %#"PRIx64"->%#"PRIx64"\n", - i, start, start + size); + dt_dprintk(" Bank %d: %#" PRIx64 "->%#" PRIx64 "\n", i, start, + start + size); dt_child_set_range(&cells, addrcells, sizecells, start, size); } @@ -696,9 +687,9 @@ static int __init make_hypervisor_node(struct domain *d, const struct kernel_info *kinfo, int addrcells, int sizecells) { - const char compat[] = - "xen,xen-"__stringify(XEN_VERSION)"."__stringify(XEN_SUBVERSION)"\0" - "xen,xen"; + const char compat[] = "xen,xen-" __stringify(XEN_VERSION) "." __stringify( + XEN_SUBVERSION) "\0" + "xen,xen"; __be32 reg[4]; gic_interrupt_t intr; __be32 *cells; @@ -711,8 +702,8 @@ static int __init make_hypervisor_node(struct domain *d, * Sanity-check address sizes, since addresses and sizes which do * not take up exactly 4 or 8 bytes are not supported. */ - if ((addrcells != 1 && addrcells != 2) || - (sizecells != 1 && sizecells != 2)) + if ( (addrcells != 1 && addrcells != 2) || + (sizecells != 1 && sizecells != 2) ) panic("Cannot cope with this size\n"); /* See linux Documentation/devicetree/bindings/arm/xen.txt */ @@ -727,10 +718,10 @@ static int __init make_hypervisor_node(struct domain *d, /* reg 0 is grant table space */ cells = ®[0]; - dt_child_set_range(&cells, addrcells, sizecells, - kinfo->gnttab_start, kinfo->gnttab_size); - res = fdt_property(fdt, "reg", reg, - dt_cells_to_size(addrcells + sizecells)); + dt_child_set_range(&cells, addrcells, sizecells, kinfo->gnttab_start, + kinfo->gnttab_size); + res = + fdt_property(fdt, "reg", reg, dt_cells_to_size(addrcells + sizecells)); if ( res ) return res; @@ -752,14 +743,14 @@ static int __init make_hypervisor_node(struct domain *d, return res; } -static int __init make_psci_node(void *fdt, - const struct dt_device_node *parent) +static int __init make_psci_node(void *fdt, const struct dt_device_node *parent) { int res; - const char compat[] = - "arm,psci-1.0""\0" - "arm,psci-0.2""\0" - "arm,psci"; + const char compat[] = "arm,psci-1.0" + "\0" + "arm,psci-0.2" + "\0" + "arm,psci"; dt_dprintk("Create PSCI node\n"); @@ -824,7 +815,7 @@ static int __init make_cpus_node(const struct domain *d, void *fdt, { compatible = dt_get_property(npcpu, "compatible", &len); clock_valid = dt_property_read_u32(npcpu, "clock-frequency", - &clock_frequency); + &clock_frequency); break; } } @@ -859,10 +850,10 @@ static int __init make_cpus_node(const struct domain *d, void *fdt, * is enough for the current max vcpu number. */ mpidr_aff = vcpuid_to_vaffinity(cpu); - dt_dprintk("Create cpu@%"PRIx64" (logical CPUID: %d) node\n", + dt_dprintk("Create cpu@%" PRIx64 " (logical CPUID: %d) node\n", mpidr_aff, cpu); - snprintf(buf, sizeof(buf), "cpu@%"PRIx64, mpidr_aff); + snprintf(buf, sizeof(buf), "cpu@%" PRIx64, mpidr_aff); res = fdt_begin_node(fdt, buf); if ( res ) return res; @@ -975,11 +966,10 @@ static int __init make_gic_node(const struct domain *d, void *fdt, static int __init make_timer_node(const struct domain *d, void *fdt, const struct dt_device_node *node) { - static const struct dt_device_match timer_ids[] __initconst = - { + static const struct dt_device_match timer_ids[] __initconst = { DT_MATCH_COMPATIBLE("arm,armv7-timer"), DT_MATCH_COMPATIBLE("arm,armv8-timer"), - { /* sentinel */ }, + {/* sentinel */}, }; struct dt_device_node *dev; u32 len; @@ -1033,8 +1023,8 @@ static int __init make_timer_node(const struct domain *d, void *fdt, if ( res ) return res; - clock_valid = dt_property_read_u32(dev, "clock-frequency", - &clock_frequency); + clock_valid = + dt_property_read_u32(dev, "clock-frequency", &clock_frequency); if ( clock_valid ) { res = fdt_property_cell(fdt, "clock-frequency", clock_frequency); @@ -1069,7 +1059,7 @@ int __init make_chosen_node(const struct kernel_info *kinfo) bootargs = &kinfo->cmdline[0]; res = fdt_property(fdt, "bootargs", bootargs, strlen(bootargs) + 1); if ( res ) - return res; + return res; } /* @@ -1118,8 +1108,8 @@ int __init map_irq_to_domain(struct domain *d, unsigned int irq, res = route_irq_to_guest(d, irq, irq, devname); if ( res < 0 ) { - printk(XENLOG_ERR "Unable to map IRQ%"PRId32" to dom%d\n", - irq, d->domain_id); + printk(XENLOG_ERR "Unable to map IRQ%" PRId32 " to dom%d\n", irq, + d->domain_id); return res; } } @@ -1129,8 +1119,7 @@ int __init map_irq_to_domain(struct domain *d, unsigned int irq, } static int __init map_dt_irq_to_domain(const struct dt_device_node *dev, - const struct dt_irq *dt_irq, - void *data) + const struct dt_irq *dt_irq, void *data) { struct domain *d = data; unsigned int irq = dt_irq->irq; @@ -1139,7 +1128,7 @@ static int __init map_dt_irq_to_domain(const struct dt_device_node *dev, if ( irq < NR_LOCAL_IRQS ) { - printk(XENLOG_ERR "%s: IRQ%"PRId32" is not a SPI\n", + printk(XENLOG_ERR "%s: IRQ%" PRId32 " is not a SPI\n", dt_node_name(dev), irq); return -EINVAL; } @@ -1148,8 +1137,7 @@ static int __init map_dt_irq_to_domain(const struct dt_device_node *dev, res = irq_set_spi_type(irq, dt_irq->type); if ( res ) { - printk(XENLOG_ERR - "%s: Unable to setup IRQ%"PRId32" to dom%d\n", + printk(XENLOG_ERR "%s: Unable to setup IRQ%" PRId32 " to dom%d\n", dt_node_name(dev), irq, d->domain_id); return res; } @@ -1160,8 +1148,7 @@ static int __init map_dt_irq_to_domain(const struct dt_device_node *dev, } static int __init map_range_to_domain(const struct dt_device_node *dev, - u64 addr, u64 len, - void *data) + u64 addr, u64 len, void *data) { struct map_range_data *mr_data = data; struct domain *d = mr_data->d; @@ -1173,32 +1160,27 @@ static int __init map_range_to_domain(const struct dt_device_node *dev, if ( res ) { printk(XENLOG_ERR "Unable to permit to dom%d access to" - " 0x%"PRIx64" - 0x%"PRIx64"\n", - d->domain_id, - addr & PAGE_MASK, PAGE_ALIGN(addr + len) - 1); + " 0x%" PRIx64 " - 0x%" PRIx64 "\n", + d->domain_id, addr & PAGE_MASK, PAGE_ALIGN(addr + len) - 1); return res; } if ( need_mapping ) { - res = map_regions_p2mt(d, - gaddr_to_gfn(addr), - PFN_UP(len), - maddr_to_mfn(addr), - mr_data->p2mt); + res = map_regions_p2mt(d, gaddr_to_gfn(addr), PFN_UP(len), + maddr_to_mfn(addr), mr_data->p2mt); if ( res < 0 ) { - printk(XENLOG_ERR "Unable to map 0x%"PRIx64 - " - 0x%"PRIx64" in domain %d\n", - addr & PAGE_MASK, PAGE_ALIGN(addr + len) - 1, - d->domain_id); + printk(XENLOG_ERR "Unable to map 0x%" PRIx64 " - 0x%" PRIx64 + " in domain %d\n", + addr & PAGE_MASK, PAGE_ALIGN(addr + len) - 1, d->domain_id); return res; } } - dt_dprintk(" - MMIO: %010"PRIx64" - %010"PRIx64" P2MType=%x\n", - addr, addr + len, mr_data->p2mt); + dt_dprintk(" - MMIO: %010" PRIx64 " - %010" PRIx64 " P2MType=%x\n", addr, + addr + len, mr_data->p2mt); return 0; } @@ -1212,13 +1194,12 @@ static int __init map_device_children(struct domain *d, const struct dt_device_node *dev, p2m_type_t p2mt) { - struct map_range_data mr_data = { .d = d, .p2mt = p2mt }; + struct map_range_data mr_data = {.d = d, .p2mt = p2mt}; int ret; if ( dt_device_type_is_equal(dev, "pci") ) { - dt_dprintk("Mapping children of %s to guest\n", - dt_node_full_name(dev)); + dt_dprintk("Mapping children of %s to guest\n", dt_node_full_name(dev)); ret = dt_for_each_irq_map(dev, &map_dt_irq_to_domain, d); if ( ret < 0 ) @@ -1275,8 +1256,8 @@ static int __init handle_device(struct domain *d, struct dt_device_node *dev, res = dt_device_get_raw_irq(dev, i, &rirq); if ( res ) { - printk(XENLOG_ERR "Unable to retrieve irq %u for %s\n", - i, dt_node_full_name(dev)); + printk(XENLOG_ERR "Unable to retrieve irq %u for %s\n", i, + dt_node_full_name(dev)); return res; } @@ -1286,16 +1267,17 @@ static int __init handle_device(struct domain *d, struct dt_device_node *dev, */ if ( rirq.controller != dt_interrupt_controller ) { - dt_dprintk("irq %u not connected to primary controller. Connected to %s\n", - i, dt_node_full_name(rirq.controller)); + dt_dprintk( + "irq %u not connected to primary controller. Connected to %s\n", + i, dt_node_full_name(rirq.controller)); continue; } res = platform_get_irq(dev, i); if ( res < 0 ) { - printk(XENLOG_ERR "Unable to get irq %u for %s\n", - i, dt_node_full_name(dev)); + printk(XENLOG_ERR "Unable to get irq %u for %s\n", i, + dt_node_full_name(dev)); return res; } @@ -1307,12 +1289,12 @@ static int __init handle_device(struct domain *d, struct dt_device_node *dev, /* Give permission and map MMIOs */ for ( i = 0; i < naddr; i++ ) { - struct map_range_data mr_data = { .d = d, .p2mt = p2mt }; + struct map_range_data mr_data = {.d = d, .p2mt = p2mt}; res = dt_device_get_address(dev, i, &addr, &size); if ( res ) { - printk(XENLOG_ERR "Unable to retrieve address %u for %s\n", - i, dt_node_full_name(dev)); + printk(XENLOG_ERR "Unable to retrieve address %u for %s\n", i, + dt_node_full_name(dev)); return res; } @@ -1329,11 +1311,9 @@ static int __init handle_device(struct domain *d, struct dt_device_node *dev, } static int __init handle_node(struct domain *d, struct kernel_info *kinfo, - struct dt_device_node *node, - p2m_type_t p2mt) + struct dt_device_node *node, p2m_type_t p2mt) { - static const struct dt_device_match skip_matches[] __initconst = - { + static const struct dt_device_match skip_matches[] __initconst = { DT_MATCH_COMPATIBLE("xen,xen"), DT_MATCH_COMPATIBLE("xen,multiboot-module"), DT_MATCH_COMPATIBLE("multiboot,module"), @@ -1348,19 +1328,17 @@ static int __init handle_node(struct domain *d, struct kernel_info *kinfo, DT_MATCH_TYPE("memory"), /* The memory mapped timer is not supported by Xen. */ DT_MATCH_COMPATIBLE("arm,armv7-timer-mem"), - { /* sentinel */ }, + {/* sentinel */}, }; - static const struct dt_device_match timer_matches[] __initconst = - { + static const struct dt_device_match timer_matches[] __initconst = { DT_MATCH_TIMER, - { /* sentinel */ }, + {/* sentinel */}, }; - static const struct dt_device_match reserved_matches[] __initconst = - { + static const struct dt_device_match reserved_matches[] __initconst = { DT_MATCH_PATH("/psci"), DT_MATCH_PATH("/memory"), DT_MATCH_PATH("/hypervisor"), - { /* sentinel */ }, + {/* sentinel */}, }; struct dt_device_node *child; int res; @@ -1414,12 +1392,12 @@ static int __init handle_node(struct domain *d, struct kernel_info *kinfo, * already exists with the same path. */ if ( dt_match_node(reserved_matches, node) ) - printk(XENLOG_WARNING - "WARNING: Path %s is reserved, skip the node as we may re-use the path.\n", + printk(XENLOG_WARNING "WARNING: Path %s is reserved, skip the node as " + "we may re-use the path.\n", path); res = handle_device(d, node, p2mt); - if ( res) + if ( res ) return res; /* @@ -1475,7 +1453,6 @@ static int __init handle_node(struct domain *d, struct kernel_info *kinfo, res = make_memory_node(d, kinfo->fdt, addrcells, sizecells, kinfo); if ( res ) return res; - } res = fdt_end_node(kinfo->fdt); @@ -1489,7 +1466,8 @@ static int __init make_gicv2_domU_node(const struct domain *d, void *fdt) __be32 reg[(GUEST_ROOT_ADDRESS_CELLS + GUEST_ROOT_SIZE_CELLS) * 2]; __be32 *cells; - res = fdt_begin_node(fdt, "interrupt-controller@"__stringify(GUEST_GICD_BASE)); + res = fdt_begin_node(fdt, + "interrupt-controller@" __stringify(GUEST_GICD_BASE)); if ( res ) return res; @@ -1516,15 +1494,15 @@ static int __init make_gicv2_domU_node(const struct domain *d, void *fdt) GUEST_GICC_BASE, GUEST_GICC_SIZE); res = fdt_property(fdt, "reg", reg, sizeof(reg)); - if (res) + if ( res ) return res; res = fdt_property_cell(fdt, "linux,phandle", GUEST_PHANDLE_GIC); - if (res) + if ( res ) return res; res = fdt_property_cell(fdt, "phandle", GUEST_PHANDLE_GIC); - if (res) + if ( res ) return res; res = fdt_end_node(fdt); @@ -1538,7 +1516,8 @@ static int __init make_gicv3_domU_node(const struct domain *d, void *fdt) __be32 reg[(GUEST_ROOT_ADDRESS_CELLS + GUEST_ROOT_SIZE_CELLS) * 2]; __be32 *cells; - res = fdt_begin_node(fdt, "interrupt-controller@"__stringify(GUEST_GICV3_GICD_BASE)); + res = fdt_begin_node( + fdt, "interrupt-controller@" __stringify(GUEST_GICV3_GICD_BASE)); if ( res ) return res; @@ -1565,15 +1544,15 @@ static int __init make_gicv3_domU_node(const struct domain *d, void *fdt) GUEST_GICV3_GICR0_BASE, GUEST_GICV3_GICR0_SIZE); res = fdt_property(fdt, "reg", reg, sizeof(reg)); - if (res) + if ( res ) return res; res = fdt_property_cell(fdt, "linux,phandle", GUEST_PHANDLE_GIC); - if (res) + if ( res ) return res; res = fdt_property_cell(fdt, "phandle", GUEST_PHANDLE_GIC); - if (res) + if ( res ) return res; res = fdt_end_node(fdt); @@ -1583,7 +1562,7 @@ static int __init make_gicv3_domU_node(const struct domain *d, void *fdt) static int __init make_gic_domU_node(const struct domain *d, void *fdt) { - switch ( d->arch.vgic.version ) + switch (d->arch.vgic.version) { case GIC_V3: return make_gicv3_domU_node(d, fdt); @@ -1617,16 +1596,16 @@ static int __init make_timer_domU_node(const struct domain *d, void *fdt) } set_interrupt(intrs[0], GUEST_TIMER_PHYS_S_PPI, 0xf, DT_IRQ_TYPE_LEVEL_LOW); - set_interrupt(intrs[1], GUEST_TIMER_PHYS_NS_PPI, 0xf, DT_IRQ_TYPE_LEVEL_LOW); + set_interrupt(intrs[1], GUEST_TIMER_PHYS_NS_PPI, 0xf, + DT_IRQ_TYPE_LEVEL_LOW); set_interrupt(intrs[2], GUEST_TIMER_VIRT_PPI, 0xf, DT_IRQ_TYPE_LEVEL_LOW); - res = fdt_property(fdt, "interrupts", intrs, sizeof (intrs[0]) * 3); + res = fdt_property(fdt, "interrupts", intrs, sizeof(intrs[0]) * 3); if ( res ) return res; - res = fdt_property_cell(fdt, "interrupt-parent", - GUEST_PHANDLE_GIC); - if (res) + res = fdt_property_cell(fdt, "interrupt-parent", GUEST_PHANDLE_GIC); + if ( res ) return res; res = fdt_end_node(fdt); @@ -1642,7 +1621,7 @@ static int __init make_vpl011_uart_node(const struct domain *d, void *fdt) __be32 reg[GUEST_ROOT_ADDRESS_CELLS + GUEST_ROOT_SIZE_CELLS]; __be32 *cells; - res = fdt_begin_node(fdt, "sbsa-uart@"__stringify(GUEST_PL011_BASE)); + res = fdt_begin_node(fdt, "sbsa-uart@" __stringify(GUEST_PL011_BASE)); if ( res ) return res; @@ -1651,9 +1630,8 @@ static int __init make_vpl011_uart_node(const struct domain *d, void *fdt) return res; cells = ®[0]; - dt_child_set_range(&cells, GUEST_ROOT_ADDRESS_CELLS, - GUEST_ROOT_SIZE_CELLS, GUEST_PL011_BASE, - GUEST_PL011_SIZE); + dt_child_set_range(&cells, GUEST_ROOT_ADDRESS_CELLS, GUEST_ROOT_SIZE_CELLS, + GUEST_PL011_BASE, GUEST_PL011_SIZE); if ( res ) return res; res = fdt_property(fdt, "reg", reg, sizeof(reg)); @@ -1662,12 +1640,11 @@ static int __init make_vpl011_uart_node(const struct domain *d, void *fdt) set_interrupt(intr, GUEST_VPL011_SPI, 0xf, DT_IRQ_TYPE_LEVEL_HIGH); - res = fdt_property(fdt, "interrupts", intr, sizeof (intr)); + res = fdt_property(fdt, "interrupts", intr, sizeof(intr)); if ( res ) return res; - res = fdt_property_cell(fdt, "interrupt-parent", - GUEST_PHANDLE_GIC); + res = fdt_property_cell(fdt, "interrupt-parent", GUEST_PHANDLE_GIC); if ( res ) return res; @@ -1763,7 +1740,7 @@ static int __init prepare_dtb_domU(struct domain *d, struct kernel_info *kinfo) return 0; - err: +err: printk("Device tree generation failed (%d).\n", ret); xfree(kinfo->fdt); @@ -1802,7 +1779,7 @@ static int __init prepare_dtb_hwdom(struct domain *d, struct kernel_info *kinfo) return 0; - err: +err: printk("Device tree generation failed (%d).\n", ret); xfree(kinfo->fdt); return -EINVAL; @@ -1812,15 +1789,15 @@ static void __init dtb_load(struct kernel_info *kinfo) { unsigned long left; - printk("Loading dom0 DTB to 0x%"PRIpaddr"-0x%"PRIpaddr"\n", + printk("Loading dom0 DTB to 0x%" PRIpaddr "-0x%" PRIpaddr "\n", kinfo->dtb_paddr, kinfo->dtb_paddr + fdt_totalsize(kinfo->fdt)); - left = copy_to_guest_phys_flush_dcache(kinfo->d, kinfo->dtb_paddr, - kinfo->fdt, - fdt_totalsize(kinfo->fdt)); + left = copy_to_guest_phys_flush_dcache( + kinfo->d, kinfo->dtb_paddr, kinfo->fdt, fdt_totalsize(kinfo->fdt)); if ( left != 0 ) - panic("Unable to copy the DTB to dom0 memory (left = %lu bytes)\n", left); + panic("Unable to copy the DTB to dom0 memory (left = %lu bytes)\n", + left); xfree(kinfo->fdt); } @@ -1841,7 +1818,8 @@ static void __init initrd_load(struct kernel_info *kinfo) paddr = mod->start; len = mod->size; - printk("Loading dom0 initrd from %"PRIpaddr" to 0x%"PRIpaddr"-0x%"PRIpaddr"\n", + printk("Loading dom0 initrd from %" PRIpaddr " to 0x%" PRIpaddr + "-0x%" PRIpaddr "\n", paddr, load_addr, load_addr + len); /* Fix up linux,initrd-start and linux,initrd-end in /chosen */ @@ -1851,15 +1829,15 @@ static void __init initrd_load(struct kernel_info *kinfo) cellp = (__be32 *)val; dt_set_cell(&cellp, ARRAY_SIZE(val), load_addr); - res = fdt_setprop_inplace(kinfo->fdt, node, "linux,initrd-start", - val, sizeof(val)); + res = fdt_setprop_inplace(kinfo->fdt, node, "linux,initrd-start", val, + sizeof(val)); if ( res ) panic("Cannot fix up \"linux,initrd-start\" property\n"); cellp = (__be32 *)val; dt_set_cell(&cellp, ARRAY_SIZE(val), load_addr + len); - res = fdt_setprop_inplace(kinfo->fdt, node, "linux,initrd-end", - val, sizeof(val)); + res = fdt_setprop_inplace(kinfo->fdt, node, "linux,initrd-end", val, + sizeof(val)); if ( res ) panic("Cannot fix up \"linux,initrd-end\" property\n"); @@ -1867,8 +1845,7 @@ static void __init initrd_load(struct kernel_info *kinfo) if ( !initrd ) panic("Unable to map the hwdom initrd\n"); - res = copy_to_guest_phys_flush_dcache(kinfo->d, load_addr, - initrd, len); + res = copy_to_guest_phys_flush_dcache(kinfo->d, load_addr, initrd, len); if ( res != 0 ) panic("Unable to copy the initrd in the hwdom memory\n"); } @@ -1927,7 +1904,7 @@ static void __init find_gnttab_region(struct domain *d, BUG_ON((kinfo->gnttab_start + kinfo->gnttab_size) > GB(4)); #endif - printk("Grant table range: %#"PRIpaddr"-%#"PRIpaddr"\n", + printk("Grant table range: %#" PRIpaddr "-%#" PRIpaddr "\n", kinfo->gnttab_start, kinfo->gnttab_start + kinfo->gnttab_size); } @@ -1980,7 +1957,7 @@ static int __init construct_domain(struct domain *d, struct kernel_info *kinfo) * r1 = machine nr, r2 = atags or dtb pointer. *... */ - regs->r0 = 0; /* SBZ */ + regs->r0 = 0; /* SBZ */ regs->r1 = 0xffffffff; /* We use DTB therefore no machine id */ regs->r2 = kinfo->dtb_paddr; } @@ -2032,7 +2009,8 @@ static int __init construct_domU(struct domain *d, } kinfo.unassigned_mem = (paddr_t)mem * SZ_1K; - printk("*** LOADING DOMU cpus=%u memory=%"PRIx64"KB ***\n", d->max_vcpus, mem); + printk("*** LOADING DOMU cpus=%u memory=%" PRIx64 "KB ***\n", d->max_vcpus, + mem); kinfo.vpl011 = dt_property_read_bool(node, "vpl011"); diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c index 9da88b8c64..aac06d937a 100644 --- a/xen/arch/arm/domctl.c +++ b/xen/arch/arm/domctl.c @@ -22,7 +22,7 @@ void arch_get_domain_info(const struct domain *d, info->flags |= XEN_DOMINF_hap; } -static int handle_vuart_init(struct domain *d, +static int handle_vuart_init(struct domain *d, struct xen_domctl_vuart_op *vuart_op) { int rc; @@ -48,7 +48,7 @@ static int handle_vuart_init(struct domain *d, long arch_do_domctl(struct xen_domctl *domctl, struct domain *d, XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) { - switch ( domctl->cmd ) + switch (domctl->cmd) { case XEN_DOMCTL_cacheflush: { @@ -56,7 +56,7 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d, gfn_t e = gfn_add(s, domctl->u.cacheflush.nr_pfns); int rc; - if ( domctl->u.cacheflush.nr_pfns > (1U<u.cacheflush.nr_pfns > (1U << MAX_ORDER) ) return -EINVAL; if ( gfn_x(e) < gfn_x(s) ) @@ -161,7 +161,7 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d, if ( vuart_op->pad[i] ) return -EINVAL; - switch( vuart_op->cmd ) + switch (vuart_op->cmd) { case XEN_DOMCTL_VUART_OP_INIT: rc = handle_vuart_init(d, vuart_op); diff --git a/xen/arch/arm/early_printk.c b/xen/arch/arm/early_printk.c index 97466a12b1..113b5b4298 100644 --- a/xen/arch/arm/early_printk.c +++ b/xen/arch/arm/early_printk.c @@ -19,8 +19,9 @@ void early_flush(void); void early_puts(const char *s) { - while (*s != '\0') { - if (*s == '\n') + while ( *s != '\0' ) + { + if ( *s == '\n' ) early_putch('\r'); early_putch(*s); s++; diff --git a/xen/arch/arm/efi/efi-dom0.c b/xen/arch/arm/efi/efi-dom0.c index 1c356540f7..ec4d7bc52a 100644 --- a/xen/arch/arm/efi/efi-dom0.c +++ b/xen/arch/arm/efi/efi-dom0.c @@ -61,12 +61,12 @@ void __init acpi_create_efi_system_table(struct domain *d, EFI_CONFIGURATION_TABLE *efi_conf_tbl; EFI_SYSTEM_TABLE *efi_sys_tbl; - table_addr = d->arch.efi_acpi_gpa - + acpi_get_table_offset(tbl_add, TBL_EFIT); - table_size = sizeof(EFI_SYSTEM_TABLE) + sizeof(EFI_CONFIGURATION_TABLE) - + sizeof(xen_efi_fw_vendor); - base_ptr = d->arch.efi_acpi_table - + acpi_get_table_offset(tbl_add, TBL_EFIT); + table_addr = + d->arch.efi_acpi_gpa + acpi_get_table_offset(tbl_add, TBL_EFIT); + table_size = sizeof(EFI_SYSTEM_TABLE) + sizeof(EFI_CONFIGURATION_TABLE) + + sizeof(xen_efi_fw_vendor); + base_ptr = + d->arch.efi_acpi_table + acpi_get_table_offset(tbl_add, TBL_EFIT); efi_sys_tbl = (EFI_SYSTEM_TABLE *)base_ptr; efi_sys_tbl->Hdr.Signature = EFI_SYSTEM_TABLE_SIGNATURE; @@ -84,11 +84,11 @@ void __init acpi_create_efi_system_table(struct domain *d, efi_conf_tbl = (EFI_CONFIGURATION_TABLE *)(base_ptr + offset); efi_conf_tbl->VendorGuid = (EFI_GUID)ACPI_20_TABLE_GUID; efi_conf_tbl->VendorTable = (VOID *)tbl_add[TBL_RSDP].start; - efi_sys_tbl->ConfigurationTable = (EFI_CONFIGURATION_TABLE *)(table_addr - + offset); + efi_sys_tbl->ConfigurationTable = + (EFI_CONFIGURATION_TABLE *)(table_addr + offset); xz_crc32_init(); - efi_sys_tbl->Hdr.CRC32 = xz_crc32((uint8_t *)efi_sys_tbl, - efi_sys_tbl->Hdr.HeaderSize, 0); + efi_sys_tbl->Hdr.CRC32 = + xz_crc32((uint8_t *)efi_sys_tbl, efi_sys_tbl->Hdr.HeaderSize, 0); tbl_add[TBL_EFIT].start = table_addr; tbl_add[TBL_EFIT].size = table_size; @@ -114,8 +114,8 @@ void __init acpi_create_efi_mmap_table(struct domain *d, unsigned int i; u8 *base_ptr; - base_ptr = d->arch.efi_acpi_table - + acpi_get_table_offset(tbl_add, TBL_MMAP); + base_ptr = + d->arch.efi_acpi_table + acpi_get_table_offset(tbl_add, TBL_MMAP); desc = (EFI_MEMORY_DESCRIPTOR *)base_ptr; for ( i = 0; i < mem->nr_banks; i++, desc++ ) @@ -127,13 +127,13 @@ void __init acpi_create_efi_mmap_table(struct domain *d, bootinfo.acpi.bank[i].start, bootinfo.acpi.bank[i].size); - fill_efi_memory_descriptor(desc, EfiACPIReclaimMemory, - d->arch.efi_acpi_gpa, d->arch.efi_acpi_len); + fill_efi_memory_descriptor(desc, EfiACPIReclaimMemory, d->arch.efi_acpi_gpa, + d->arch.efi_acpi_len); - tbl_add[TBL_MMAP].start = d->arch.efi_acpi_gpa - + acpi_get_table_offset(tbl_add, TBL_MMAP); - tbl_add[TBL_MMAP].size = sizeof(EFI_MEMORY_DESCRIPTOR) - * (mem->nr_banks + bootinfo.acpi.nr_banks + 1); + tbl_add[TBL_MMAP].start = + d->arch.efi_acpi_gpa + acpi_get_table_offset(tbl_add, TBL_MMAP); + tbl_add[TBL_MMAP].size = sizeof(EFI_MEMORY_DESCRIPTOR) * + (mem->nr_banks + bootinfo.acpi.nr_banks + 1); } /* Create /hypervisor/uefi node for efi properties. */ @@ -145,18 +145,16 @@ int __init acpi_make_efi_nodes(void *fdt, struct membank tbl_add[]) if ( res ) return res; - res = fdt_property_u64(fdt, "xen,uefi-system-table", - tbl_add[TBL_EFIT].start); + res = + fdt_property_u64(fdt, "xen,uefi-system-table", tbl_add[TBL_EFIT].start); if ( res ) return res; - res = fdt_property_u64(fdt, "xen,uefi-mmap-start", - tbl_add[TBL_MMAP].start); + res = fdt_property_u64(fdt, "xen,uefi-mmap-start", tbl_add[TBL_MMAP].start); if ( res ) return res; - res = fdt_property_u32(fdt, "xen,uefi-mmap-size", - tbl_add[TBL_MMAP].size); + res = fdt_property_u32(fdt, "xen,uefi-mmap-size", tbl_add[TBL_MMAP].size); if ( res ) return res; diff --git a/xen/arch/arm/gic-v2.c b/xen/arch/arm/gic-v2.c index 256988c665..1e038c3a1c 100644 --- a/xen/arch/arm/gic-v2.c +++ b/xen/arch/arm/gic-v2.c @@ -45,70 +45,72 @@ * LR register definitions are GIC v2 specific. * Moved these definitions from header file to here */ -#define GICH_V2_LR_VIRTUAL_MASK 0x3ff -#define GICH_V2_LR_VIRTUAL_SHIFT 0 -#define GICH_V2_LR_PHYSICAL_MASK 0x3ff -#define GICH_V2_LR_PHYSICAL_SHIFT 10 -#define GICH_V2_LR_STATE_MASK 0x3 -#define GICH_V2_LR_STATE_SHIFT 28 -#define GICH_V2_LR_PENDING (1U << 28) -#define GICH_V2_LR_ACTIVE (1U << 29) -#define GICH_V2_LR_PRIORITY_SHIFT 23 -#define GICH_V2_LR_PRIORITY_MASK 0x1f -#define GICH_V2_LR_HW_SHIFT 31 -#define GICH_V2_LR_HW_MASK 0x1 -#define GICH_V2_LR_GRP_SHIFT 30 -#define GICH_V2_LR_GRP_MASK 0x1 +#define GICH_V2_LR_VIRTUAL_MASK 0x3ff +#define GICH_V2_LR_VIRTUAL_SHIFT 0 +#define GICH_V2_LR_PHYSICAL_MASK 0x3ff +#define GICH_V2_LR_PHYSICAL_SHIFT 10 +#define GICH_V2_LR_STATE_MASK 0x3 +#define GICH_V2_LR_STATE_SHIFT 28 +#define GICH_V2_LR_PENDING (1U << 28) +#define GICH_V2_LR_ACTIVE (1U << 29) +#define GICH_V2_LR_PRIORITY_SHIFT 23 +#define GICH_V2_LR_PRIORITY_MASK 0x1f +#define GICH_V2_LR_HW_SHIFT 31 +#define GICH_V2_LR_HW_MASK 0x1 +#define GICH_V2_LR_GRP_SHIFT 30 +#define GICH_V2_LR_GRP_MASK 0x1 #define GICH_V2_LR_MAINTENANCE_IRQ (1U << 19) -#define GICH_V2_LR_GRP1 (1U << 30) -#define GICH_V2_LR_HW (1U << GICH_V2_LR_HW_SHIFT) -#define GICH_V2_LR_CPUID_SHIFT 10 -#define GICH_V2_LR_CPUID_MASK 0x7 -#define GICH_V2_VTR_NRLRGS 0x3f +#define GICH_V2_LR_GRP1 (1U << 30) +#define GICH_V2_LR_HW (1U << GICH_V2_LR_HW_SHIFT) +#define GICH_V2_LR_CPUID_SHIFT 10 +#define GICH_V2_LR_CPUID_MASK 0x7 +#define GICH_V2_VTR_NRLRGS 0x3f -#define GICH_V2_VMCR_PRIORITY_MASK 0x1f -#define GICH_V2_VMCR_PRIORITY_SHIFT 27 +#define GICH_V2_VMCR_PRIORITY_MASK 0x1f +#define GICH_V2_VMCR_PRIORITY_SHIFT 27 /* GICv2m extension register definitions. */ /* -* MSI_TYPER: -* [31:26] Reserved -* [25:16] lowest SPI assigned to MSI -* [15:10] Reserved -* [9:0] Number of SPIs assigned to MSI -*/ -#define V2M_MSI_TYPER 0x008 -#define V2M_MSI_TYPER_BASE_SHIFT 16 -#define V2M_MSI_TYPER_BASE_MASK 0x3FF -#define V2M_MSI_TYPER_NUM_MASK 0x3FF -#define V2M_MSI_SETSPI_NS 0x040 -#define V2M_MIN_SPI 32 -#define V2M_MAX_SPI 1019 -#define V2M_MSI_IIDR 0xFCC - -#define V2M_MSI_TYPER_BASE_SPI(x) \ - (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) - -#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK) - -struct v2m_data { + * MSI_TYPER: + * [31:26] Reserved + * [25:16] lowest SPI assigned to MSI + * [15:10] Reserved + * [9:0] Number of SPIs assigned to MSI + */ +#define V2M_MSI_TYPER 0x008 +#define V2M_MSI_TYPER_BASE_SHIFT 16 +#define V2M_MSI_TYPER_BASE_MASK 0x3FF +#define V2M_MSI_TYPER_NUM_MASK 0x3FF +#define V2M_MSI_SETSPI_NS 0x040 +#define V2M_MIN_SPI 32 +#define V2M_MAX_SPI 1019 +#define V2M_MSI_IIDR 0xFCC + +#define V2M_MSI_TYPER_BASE_SPI(x) \ + (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) + +#define V2M_MSI_TYPER_NUM_SPI(x) ((x)&V2M_MSI_TYPER_NUM_MASK) + +struct v2m_data +{ struct list_head entry; /* Pointer to the DT node representing the v2m frame */ const struct dt_device_node *dt_node; - paddr_t addr; /* Register frame base */ - paddr_t size; /* Register frame size */ + paddr_t addr; /* Register frame base */ + paddr_t size; /* Register frame size */ u32 spi_start; /* The SPI number that MSIs start */ - u32 nr_spis; /* The number of SPIs for MSIs */ + u32 nr_spis; /* The number of SPIs for MSIs */ }; /* v2m extension register frame information list */ static LIST_HEAD(gicv2m_info); /* Global state */ -static struct { - void __iomem * map_dbase; /* IO mapped Address of distributor registers */ - void __iomem * map_cbase; /* IO mapped Address of CPU interface registers */ - void __iomem * map_hbase; /* IO Address of virtual interface registers */ +static struct +{ + void __iomem *map_dbase; /* IO mapped Address of distributor registers */ + void __iomem *map_cbase; /* IO mapped Address of CPU interface registers */ + void __iomem *map_hbase; /* IO Address of virtual interface registers */ spinlock_t lock; } gicv2; @@ -165,7 +167,7 @@ static unsigned int gicv2_cpu_mask(const cpumask_t *cpumask) cpumask_t possible_mask; cpumask_and(&possible_mask, cpumask, &cpu_possible_map); - for_each_cpu( cpu, &possible_mask ) + for_each_cpu (cpu, &possible_mask) { ASSERT(cpu < NR_GIC_CPU_IF); mask |= per_cpu(gic_cpu_id, cpu); @@ -210,8 +212,7 @@ static void gicv2_dump_state(const struct vcpu *v) if ( v == current ) { for ( i = 0; i < gicv2_info.nr_lrs; i++ ) - printk(" HW_LR[%d]=%x\n", i, - readl_gich(GICH_LR + i * 4)); + printk(" HW_LR[%d]=%x\n", i, readl_gich(GICH_LR + i * 4)); } else { @@ -307,24 +308,22 @@ static void gicv2_set_irq_type(struct irq_desc *desc, unsigned int type) writel_gicd(cfg, GICD_ICFGR + (irq / 16) * 4); actual = readl_gicd(GICD_ICFGR + (irq / 16) * 4); - if ( ( cfg & edgebit ) ^ ( actual & edgebit ) ) + if ( (cfg & edgebit) ^ (actual & edgebit) ) { - printk(XENLOG_WARNING "GICv2: WARNING: " + printk(XENLOG_WARNING + "GICv2: WARNING: " "CPU%d: Failed to configure IRQ%u as %s-triggered. " "H/w forces to %s-triggered.\n", - smp_processor_id(), desc->irq, - cfg & edgebit ? "Edge" : "Level", + smp_processor_id(), desc->irq, cfg & edgebit ? "Edge" : "Level", actual & edgebit ? "Edge" : "Level"); - desc->arch.type = actual & edgebit ? - IRQ_TYPE_EDGE_RISING : - IRQ_TYPE_LEVEL_HIGH; + desc->arch.type = + actual & edgebit ? IRQ_TYPE_EDGE_RISING : IRQ_TYPE_LEVEL_HIGH; } spin_unlock(&gicv2.lock); } -static void gicv2_set_irq_priority(struct irq_desc *desc, - unsigned int priority) +static void gicv2_set_irq_priority(struct irq_desc *desc, unsigned int priority) { unsigned int irq = desc->irq; @@ -358,9 +357,8 @@ static void __init gicv2_dist_init(void) gicv2_info.nr_lines = nr_lines; gic_cpus = 1 + ((type & GICD_TYPE_CPUS) >> 5); - printk("GICv2: %d lines, %d cpu%s%s (IID %8.8x).\n", - nr_lines, gic_cpus, (gic_cpus == 1) ? "" : "s", - (type & GICD_TYPE_SEC) ? ", secure" : "", + printk("GICv2: %d lines, %d cpu%s%s (IID %8.8x).\n", nr_lines, gic_cpus, + (gic_cpus == 1) ? "" : "s", (type & GICD_TYPE_SEC) ? ", secure" : "", readl_gicd(GICD_IIDR)); /* Default all global IRQs to level, active low */ @@ -373,8 +371,8 @@ static void __init gicv2_dist_init(void) /* Default priority for global interrupts */ for ( i = 32; i < nr_lines; i += 4 ) - writel_gicd(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | - GIC_PRI_IRQ << 8 | GIC_PRI_IRQ, + writel_gicd(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | GIC_PRI_IRQ << 8 | + GIC_PRI_IRQ, GICD_IPRIORITYR + (i / 4) * 4); /* Disable all global interrupts */ @@ -403,14 +401,14 @@ static void gicv2_cpu_init(void) /* Set SGI priorities */ for ( i = 0; i < 16; i += 4 ) - writel_gicd(GIC_PRI_IPI << 24 | GIC_PRI_IPI << 16 | - GIC_PRI_IPI << 8 | GIC_PRI_IPI, + writel_gicd(GIC_PRI_IPI << 24 | GIC_PRI_IPI << 16 | GIC_PRI_IPI << 8 | + GIC_PRI_IPI, GICD_IPRIORITYR + (i / 4) * 4); /* Set PPI priorities */ for ( i = 16; i < 32; i += 4 ) - writel_gicd(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | - GIC_PRI_IRQ << 8 | GIC_PRI_IRQ, + writel_gicd(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | GIC_PRI_IRQ << 8 | + GIC_PRI_IRQ, GICD_IPRIORITYR + (i / 4) * 4); /* Local settings: interface controller */ @@ -419,7 +417,7 @@ static void gicv2_cpu_init(void) /* Finest granularity of priority */ writel_gicc(0x0, GICC_BPR); /* Turn on delivery */ - writel_gicc(GICC_CTL_ENABLE|GICC_CTL_EOI, GICC_CTLR); + writel_gicc(GICC_CTL_ENABLE | GICC_CTL_EOI, GICC_CTLR); } static void gicv2_cpu_disable(void) @@ -433,7 +431,7 @@ static void gicv2_hyp_init(void) uint8_t nr_lrs; vtr = readl_gich(GICH_VTR); - nr_lrs = (vtr & GICH_V2_VTR_NRLRGS) + 1; + nr_lrs = (vtr & GICH_V2_VTR_NRLRGS) + 1; gicv2_info.nr_lrs = nr_lrs; } @@ -466,7 +464,7 @@ static void gicv2_send_SGI(enum gic_sgi sgi, enum gic_sgi_mode irqmode, */ dmb(ishst); - switch ( irqmode ) + switch (irqmode) { case SGI_TARGET_OTHERS: writel_gicd(GICD_SGI_TARGET_OTHERS | sgi, GICD_SGIR); @@ -477,8 +475,8 @@ static void gicv2_send_SGI(enum gic_sgi sgi, enum gic_sgi_mode irqmode, case SGI_TARGET_LIST: cpumask_and(&online_mask, cpu_mask, &cpu_online_map); mask = gicv2_cpu_mask(&online_mask); - writel_gicd(GICD_SGI_TARGET_LIST | - (mask << GICD_SGI_TARGET_SHIFT) | sgi, + writel_gicd(GICD_SGI_TARGET_LIST | (mask << GICD_SGI_TARGET_SHIFT) | + sgi, GICD_SGIR); break; default: @@ -503,13 +501,13 @@ static void gicv2_update_lr(int lr, unsigned int virq, uint8_t priority, BUG_ON(lr >= gicv2_info.nr_lrs); BUG_ON(lr < 0); - lr_reg = (((state & GICH_V2_LR_STATE_MASK) << GICH_V2_LR_STATE_SHIFT) | + lr_reg = (((state & GICH_V2_LR_STATE_MASK) << GICH_V2_LR_STATE_SHIFT) | ((GIC_PRI_TO_GUEST(priority) & GICH_V2_LR_PRIORITY_MASK) - << GICH_V2_LR_PRIORITY_SHIFT) | + << GICH_V2_LR_PRIORITY_SHIFT) | ((virq & GICH_V2_LR_VIRTUAL_MASK) << GICH_V2_LR_VIRTUAL_SHIFT)); if ( hw_irq != INVALID_IRQ ) - lr_reg |= GICH_V2_LR_HW | ((hw_irq & GICH_V2_LR_PHYSICAL_MASK ) + lr_reg |= GICH_V2_LR_HW | ((hw_irq & GICH_V2_LR_PHYSICAL_MASK) << GICH_V2_LR_PHYSICAL_SHIFT); writel_gich(lr_reg, GICH_LR + lr * 4); @@ -524,9 +522,10 @@ static void gicv2_read_lr(int lr, struct gic_lr *lr_reg) { uint32_t lrv; - lrv = readl_gich(GICH_LR + lr * 4); + lrv = readl_gich(GICH_LR + lr * 4); lr_reg->virq = (lrv >> GICH_V2_LR_VIRTUAL_SHIFT) & GICH_V2_LR_VIRTUAL_MASK; - lr_reg->priority = (lrv >> GICH_V2_LR_PRIORITY_SHIFT) & GICH_V2_LR_PRIORITY_MASK; + lr_reg->priority = + (lrv >> GICH_V2_LR_PRIORITY_SHIFT) & GICH_V2_LR_PRIORITY_MASK; lr_reg->pending = lrv & GICH_V2_LR_PENDING; lr_reg->active = lrv & GICH_V2_LR_ACTIVE; lr_reg->hw_status = lrv & GICH_V2_LR_HW; @@ -543,8 +542,8 @@ static void gicv2_read_lr(int lr, struct gic_lr *lr_reg) * This is only valid for SGI, but it does not matter to always * read it as it should be 0 by default. */ - lr_reg->virt.source = (lrv >> GICH_V2_LR_CPUID_SHIFT) - & GICH_V2_LR_CPUID_MASK; + lr_reg->virt.source = + (lrv >> GICH_V2_LR_CPUID_SHIFT) & GICH_V2_LR_CPUID_MASK; } } @@ -552,9 +551,10 @@ static void gicv2_write_lr(int lr, const struct gic_lr *lr_reg) { uint32_t lrv = 0; - lrv = (((lr_reg->virq & GICH_V2_LR_VIRTUAL_MASK) << GICH_V2_LR_VIRTUAL_SHIFT) | - ((uint32_t)(lr_reg->priority & GICH_V2_LR_PRIORITY_MASK) - << GICH_V2_LR_PRIORITY_SHIFT) ); + lrv = (((lr_reg->virq & GICH_V2_LR_VIRTUAL_MASK) + << GICH_V2_LR_VIRTUAL_SHIFT) | + ((uint32_t)(lr_reg->priority & GICH_V2_LR_PRIORITY_MASK) + << GICH_V2_LR_PRIORITY_SHIFT)); if ( lr_reg->active ) lrv |= GICH_V2_LR_ACTIVE; @@ -596,13 +596,13 @@ static void gicv2_hcr_status(uint32_t flag, bool status) static unsigned int gicv2_read_vmcr_priority(void) { - return ((readl_gich(GICH_VMCR) >> GICH_V2_VMCR_PRIORITY_SHIFT) - & GICH_V2_VMCR_PRIORITY_MASK); + return ((readl_gich(GICH_VMCR) >> GICH_V2_VMCR_PRIORITY_SHIFT) & + GICH_V2_VMCR_PRIORITY_MASK); } static unsigned int gicv2_read_apr(int apr_reg) { - return readl_gich(GICH_APR); + return readl_gich(GICH_APR); } static bool gicv2_read_pending_state(struct irq_desc *irqd) @@ -669,7 +669,8 @@ static void gicv2_guest_irq_end(struct irq_desc *desc) /* Deactivation happens in maintenance interrupt / via GICV */ } -static void gicv2_irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask) +static void gicv2_irq_set_affinity(struct irq_desc *desc, + const cpumask_t *cpu_mask) { unsigned int mask; @@ -690,12 +691,13 @@ static int gicv2_map_hwdown_extra_mappings(struct domain *d) const struct v2m_data *v2m_data; /* For the moment, we'll assign all v2m frames to the hardware domain. */ - list_for_each_entry( v2m_data, &gicv2m_info, entry ) + list_for_each_entry (v2m_data, &gicv2m_info, entry) { int ret; u32 spi; - printk("GICv2: Mapping v2m frame to d%d: addr=0x%"PRIpaddr" size=0x%"PRIpaddr" spi_base=%u num_spis=%u\n", + printk("GICv2: Mapping v2m frame to d%d: addr=0x%" PRIpaddr + " size=0x%" PRIpaddr " spi_base=%u num_spis=%u\n", d->domain_id, v2m_data->addr, v2m_data->size, v2m_data->spi_start, v2m_data->nr_spis); @@ -724,7 +726,8 @@ static int gicv2_map_hwdown_extra_mappings(struct domain *d) if ( ret ) { printk(XENLOG_ERR - "GICv2: Failed to set v2m MSI SPI[%d] type.\n", spi); + "GICv2: Failed to set v2m MSI SPI[%d] type.\n", + spi); return ret; } @@ -758,8 +761,7 @@ static int gicv2_map_hwdown_extra_mappings(struct domain *d) * https://www.kernel.org/doc/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt */ static int gicv2m_make_dt_node(const struct domain *d, - const struct dt_device_node *gic, - void *fdt) + const struct dt_device_node *gic, void *fdt) { u32 len; int res; @@ -783,11 +785,12 @@ static int gicv2m_make_dt_node(const struct domain *d, if ( res ) return res; - list_for_each_entry( v2m_data, &gicv2m_info, entry ) + list_for_each_entry (v2m_data, &gicv2m_info, entry) { v2m = v2m_data->dt_node; - printk("GICv2: Creating v2m DT node for d%d: addr=0x%"PRIpaddr" size=0x%"PRIpaddr" spi_base=%u num_spis=%u\n", + printk("GICv2: Creating v2m DT node for d%d: addr=0x%" PRIpaddr + " size=0x%" PRIpaddr " spi_base=%u num_spis=%u\n", d->domain_id, v2m_data->addr, v2m_data->size, v2m_data->spi_start, v2m_data->nr_spis); @@ -851,8 +854,7 @@ static int gicv2m_make_dt_node(const struct domain *d, } static int gicv2_make_hwdom_dt_node(const struct domain *d, - const struct dt_device_node *gic, - void *fdt) + const struct dt_device_node *gic, void *fdt) { const void *compatible = NULL; u32 len; @@ -862,7 +864,8 @@ static int gicv2_make_hwdom_dt_node(const struct domain *d, compatible = dt_get_property(gic, "compatible", &len); if ( !compatible ) { - dprintk(XENLOG_ERR, "Can't find compatible property for the gic node\n"); + dprintk(XENLOG_ERR, + "Can't find compatible property for the gic node\n"); return -FDT_ERR_XEN(ENOENT); } @@ -896,24 +899,24 @@ static int gicv2_make_hwdom_dt_node(const struct domain *d, /* XXX different for level vs edge */ static hw_irq_controller gicv2_host_irq_type = { - .typename = "gic-v2", - .startup = gicv2_irq_startup, - .shutdown = gicv2_irq_shutdown, - .enable = gicv2_irq_enable, - .disable = gicv2_irq_disable, - .ack = gicv2_irq_ack, - .end = gicv2_host_irq_end, + .typename = "gic-v2", + .startup = gicv2_irq_startup, + .shutdown = gicv2_irq_shutdown, + .enable = gicv2_irq_enable, + .disable = gicv2_irq_disable, + .ack = gicv2_irq_ack, + .end = gicv2_host_irq_end, .set_affinity = gicv2_irq_set_affinity, }; static hw_irq_controller gicv2_guest_irq_type = { - .typename = "gic-v2", - .startup = gicv2_irq_startup, - .shutdown = gicv2_irq_shutdown, - .enable = gicv2_irq_enable, - .disable = gicv2_irq_disable, - .ack = gicv2_irq_ack, - .end = gicv2_guest_irq_end, + .typename = "gic-v2", + .startup = gicv2_irq_startup, + .shutdown = gicv2_irq_shutdown, + .enable = gicv2_irq_enable, + .disable = gicv2_irq_disable, + .ack = gicv2_irq_ack, + .end = gicv2_guest_irq_end, .set_affinity = gicv2_irq_set_affinity, }; @@ -964,9 +967,9 @@ static void gicv2_add_v2m_frame_to_list(paddr_t addr, paddr_t size, if ( spi_start < V2M_MIN_SPI ) panic("GICv2: Invalid v2m base SPI:%u\n", spi_start); - if ( ( nr_spis == 0 ) || ( spi_start + nr_spis > V2M_MAX_SPI ) ) - panic("GICv2: Number of v2m SPIs (%u) exceed maximum (%u)\n", - nr_spis, V2M_MAX_SPI - V2M_MIN_SPI + 1); + if ( (nr_spis == 0) || (spi_start + nr_spis > V2M_MAX_SPI) ) + panic("GICv2: Number of v2m SPIs (%u) exceed maximum (%u)\n", nr_spis, + V2M_MAX_SPI - V2M_MIN_SPI + 1); /* Allocate an entry to record new v2m frame information. */ v2m_data = xzalloc_bytes(sizeof(struct v2m_data)); @@ -981,12 +984,12 @@ static void gicv2_add_v2m_frame_to_list(paddr_t addr, paddr_t size, v2m_data->dt_node = v2m; printk("GICv2m extension register frame:\n" - " gic_v2m_addr=%"PRIpaddr"\n" - " gic_v2m_size=%"PRIpaddr"\n" + " gic_v2m_addr=%" PRIpaddr "\n" + " gic_v2m_size=%" PRIpaddr "\n" " gic_v2m_spi_base=%u\n" " gic_v2m_num_spis=%u\n", - v2m_data->addr, v2m_data->size, - v2m_data->spi_start, v2m_data->nr_spis); + v2m_data->addr, v2m_data->size, v2m_data->spi_start, + v2m_data->nr_spis); list_add_tail(&v2m_data->entry, &gicv2m_info); } @@ -1017,8 +1020,9 @@ static void gicv2_extension_dt_init(const struct dt_device_node *node) */ if ( dt_property_read_u32(v2m, "arm,msi-base-spi", &spi_start) && dt_property_read_u32(v2m, "arm,msi-num-spis", &nr_spis) ) - printk("GICv2: DT overriding v2m hardware setting (base:%u, num:%u)\n", - spi_start, nr_spis); + printk( + "GICv2: DT overriding v2m hardware setting (base:%u, num:%u)\n", + spi_start, nr_spis); /* Add this v2m frame information to list. */ gicv2_add_v2m_frame_to_list(addr, size, spi_start, nr_spis, v2m); @@ -1064,13 +1068,15 @@ static void __init gicv2_dt_init(void) if ( csize < SZ_8K ) { printk(XENLOG_WARNING "GICv2: WARNING: " - "The GICC size is too small: %#"PRIx64" expected %#x\n", + "The GICC size is too small: %#" PRIx64 + " expected %#x\n", csize, SZ_8K); if ( platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) ) { printk(XENLOG_WARNING "GICv2: enable platform quirk: 64K stride\n"); vsize = csize = SZ_128K; - } else + } + else csize = SZ_8K; } @@ -1079,8 +1085,9 @@ static void __init gicv2_dt_init(void) * same size. */ if ( csize != vsize ) - panic("GICv2: Sizes of GICC (%#"PRIpaddr") and GICV (%#"PRIpaddr") don't match\n", - csize, vsize); + panic("GICv2: Sizes of GICC (%#" PRIpaddr ") and GICV (%#" PRIpaddr + ") don't match\n", + csize, vsize); /* * Check whether this GIC implements the v2m extension. If so, @@ -1134,8 +1141,8 @@ static int gicv2_make_hwdom_madt(const struct domain *d, u32 offset) return -EINVAL; } - host_gicc = container_of(header, struct acpi_madt_generic_interrupt, - header); + host_gicc = + container_of(header, struct acpi_madt_generic_interrupt, header); size = sizeof(struct acpi_madt_generic_interrupt); /* Add Generic Interrupt */ for ( i = 0; i < d->max_vcpus; i++ ) @@ -1157,13 +1164,12 @@ static int gicv2_make_hwdom_madt(const struct domain *d, u32 offset) return table_len; } -static int __init -gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, - const unsigned long end) +static int __init gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, + const unsigned long end) { static int cpu_base_assigned = 0; struct acpi_madt_generic_interrupt *processor = - container_of(header, struct acpi_madt_generic_interrupt, header); + container_of(header, struct acpi_madt_generic_interrupt, header); if ( BAD_MADT_ENTRY(processor, end) ) return -EINVAL; @@ -1186,10 +1192,10 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, } else { - if ( cbase != processor->base_address - || hbase != processor->gich_base_address - || vbase != processor->gicv_base_address - || gicv2_info.maintenance_irq != processor->vgic_interrupt ) + if ( cbase != processor->base_address || + hbase != processor->gich_base_address || + vbase != processor->gicv_base_address || + gicv2_info.maintenance_irq != processor->vgic_interrupt ) { printk("GICv2: GICC entries are not same in MADT table\n"); return -EINVAL; @@ -1199,12 +1205,11 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, return 0; } -static int __init -gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header, - const unsigned long end) +static int __init gic_acpi_parse_madt_distributor( + struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_generic_distributor *dist = - container_of(header, struct acpi_madt_generic_distributor, header); + container_of(header, struct acpi_madt_generic_distributor, header); if ( BAD_MADT_ENTRY(dist, end) ) return -EINVAL; @@ -1247,7 +1252,9 @@ static void __init gicv2_acpi_init(void) panic("GICv2: No valid GICD entries exists\n"); } #else -static void __init gicv2_acpi_init(void) { } +static void __init gicv2_acpi_init(void) +{ +} static int gicv2_make_hwdom_madt(const struct domain *d, u32 offset) { return 0; @@ -1264,16 +1271,15 @@ static int __init gicv2_init(void) gicv2_acpi_init(); printk("GICv2 initialization:\n" - " gic_dist_addr=%"PRIpaddr"\n" - " gic_cpu_addr=%"PRIpaddr"\n" - " gic_hyp_addr=%"PRIpaddr"\n" - " gic_vcpu_addr=%"PRIpaddr"\n" - " gic_maintenance_irq=%u\n", - dbase, cbase, hbase, vbase, - gicv2_info.maintenance_irq); - - if ( (dbase & ~PAGE_MASK) || (cbase & ~PAGE_MASK) || - (hbase & ~PAGE_MASK) || (vbase & ~PAGE_MASK) ) + " gic_dist_addr=%" PRIpaddr "\n" + " gic_cpu_addr=%" PRIpaddr "\n" + " gic_hyp_addr=%" PRIpaddr "\n" + " gic_vcpu_addr=%" PRIpaddr "\n" + " gic_maintenance_irq=%u\n", + dbase, cbase, hbase, vbase, gicv2_info.maintenance_irq); + + if ( (dbase & ~PAGE_MASK) || (cbase & ~PAGE_MASK) || (hbase & ~PAGE_MASK) || + (vbase & ~PAGE_MASK) ) panic("GICv2 interfaces not page aligned\n"); gicv2.map_dbase = ioremap_nocache(dbase, PAGE_SIZE); @@ -1297,12 +1303,12 @@ static int __init gicv2_init(void) gicv2.map_cbase += aliased_offset; - printk(XENLOG_WARNING - "GICv2: Adjusting CPU interface base to %#"PRIx64"\n", + printk(XENLOG_WARNING "GICv2: Adjusting CPU interface base to %#" PRIx64 + "\n", cbase + aliased_offset); - } else if ( csize == SZ_128K ) - printk(XENLOG_WARNING - "GICv2: GICC size=%#"PRIx64" but not aliased\n", + } + else if ( csize == SZ_128K ) + printk(XENLOG_WARNING "GICv2: GICC size=%#" PRIx64 " but not aliased\n", csize); gicv2.map_hbase = ioremap_nocache(hbase, PAGE_SIZE); @@ -1331,37 +1337,37 @@ static void gicv2_do_LPI(unsigned int lpi) } const static struct gic_hw_operations gicv2_ops = { - .info = &gicv2_info, - .init = gicv2_init, - .secondary_init = gicv2_secondary_cpu_init, - .save_state = gicv2_save_state, - .restore_state = gicv2_restore_state, - .dump_state = gicv2_dump_state, - .gic_host_irq_type = &gicv2_host_irq_type, - .gic_guest_irq_type = &gicv2_guest_irq_type, - .eoi_irq = gicv2_eoi_irq, - .deactivate_irq = gicv2_dir_irq, - .read_irq = gicv2_read_irq, - .set_active_state = gicv2_set_active_state, - .set_pending_state = gicv2_set_pending_state, - .set_irq_type = gicv2_set_irq_type, - .set_irq_priority = gicv2_set_irq_priority, - .send_SGI = gicv2_send_SGI, - .disable_interface = gicv2_disable_interface, - .update_lr = gicv2_update_lr, - .update_hcr_status = gicv2_hcr_status, - .clear_lr = gicv2_clear_lr, - .read_lr = gicv2_read_lr, - .write_lr = gicv2_write_lr, - .read_vmcr_priority = gicv2_read_vmcr_priority, - .read_apr = gicv2_read_apr, - .read_pending_state = gicv2_read_pending_state, - .make_hwdom_dt_node = gicv2_make_hwdom_dt_node, - .make_hwdom_madt = gicv2_make_hwdom_madt, + .info = &gicv2_info, + .init = gicv2_init, + .secondary_init = gicv2_secondary_cpu_init, + .save_state = gicv2_save_state, + .restore_state = gicv2_restore_state, + .dump_state = gicv2_dump_state, + .gic_host_irq_type = &gicv2_host_irq_type, + .gic_guest_irq_type = &gicv2_guest_irq_type, + .eoi_irq = gicv2_eoi_irq, + .deactivate_irq = gicv2_dir_irq, + .read_irq = gicv2_read_irq, + .set_active_state = gicv2_set_active_state, + .set_pending_state = gicv2_set_pending_state, + .set_irq_type = gicv2_set_irq_type, + .set_irq_priority = gicv2_set_irq_priority, + .send_SGI = gicv2_send_SGI, + .disable_interface = gicv2_disable_interface, + .update_lr = gicv2_update_lr, + .update_hcr_status = gicv2_hcr_status, + .clear_lr = gicv2_clear_lr, + .read_lr = gicv2_read_lr, + .write_lr = gicv2_write_lr, + .read_vmcr_priority = gicv2_read_vmcr_priority, + .read_apr = gicv2_read_apr, + .read_pending_state = gicv2_read_pending_state, + .make_hwdom_dt_node = gicv2_make_hwdom_dt_node, + .make_hwdom_madt = gicv2_make_hwdom_madt, .get_hwdom_extra_madt_size = gicv2_get_hwdom_extra_madt_size, .map_hwdom_extra_mappings = gicv2_map_hwdown_extra_mappings, - .iomem_deny_access = gicv2_iomem_deny_access, - .do_LPI = gicv2_do_LPI, + .iomem_deny_access = gicv2_iomem_deny_access, + .do_LPI = gicv2_do_LPI, }; /* Set up the GIC */ @@ -1376,20 +1382,18 @@ static int __init gicv2_dt_preinit(struct dt_device_node *node, return 0; } -static const struct dt_device_match gicv2_dt_match[] __initconst = -{ +static const struct dt_device_match gicv2_dt_match[] __initconst = { DT_MATCH_GIC_V2, - { /* sentinel */ }, + {/* sentinel */}, }; -DT_DEVICE_START(gicv2, "GICv2", DEVICE_GIC) - .dt_match = gicv2_dt_match, - .init = gicv2_dt_preinit, -DT_DEVICE_END +DT_DEVICE_START(gicv2, "GICv2", DEVICE_GIC).dt_match = gicv2_dt_match, + .init = gicv2_dt_preinit, + DT_DEVICE_END #ifdef CONFIG_ACPI -/* Set up the GIC */ -static int __init gicv2_acpi_preinit(const void *data) + /* Set up the GIC */ + static int __init gicv2_acpi_preinit(const void *data) { gicv2_info.hw_version = GIC_V2; register_gic_ops(&gicv2_ops); @@ -1397,16 +1401,16 @@ static int __init gicv2_acpi_preinit(const void *data) return 0; } -ACPI_DEVICE_START(agicv2, "GICv2", DEVICE_GIC) - .class_type = ACPI_MADT_GIC_VERSION_V2, - .init = gicv2_acpi_preinit, -ACPI_DEVICE_END +ACPI_DEVICE_START(agicv2, "GICv2", DEVICE_GIC).class_type = + ACPI_MADT_GIC_VERSION_V2, + .init = gicv2_acpi_preinit, + ACPI_DEVICE_END #endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/gic-v3-its.c b/xen/arch/arm/gic-v3-its.c index ba4bc00df5..4ba023741f 100644 --- a/xen/arch/arm/gic-v3-its.c +++ b/xen/arch/arm/gic-v3-its.c @@ -33,7 +33,7 @@ #include #include -#define ITS_CMD_QUEUE_SZ SZ_1M +#define ITS_CMD_QUEUE_SZ SZ_1M /* * No lock here, as this list gets only populated upon boot while scanning @@ -49,16 +49,17 @@ LIST_HEAD(host_its_list); * property of MSIs in general and we can easily get to the base address * of the ITS and look that up. */ -struct its_device { +struct its_device +{ struct rb_node rbnode; struct host_its *hw_its; void *itt_addr; - paddr_t guest_doorbell; /* Identifies the virtual ITS */ + paddr_t guest_doorbell; /* Identifies the virtual ITS */ uint32_t host_devid; uint32_t guest_devid; - uint32_t eventids; /* Number of event IDs (MSIs) */ - uint32_t *host_lpi_blocks; /* Which LPIs are used on the host */ - struct pending_irq *pend_irqs; /* One struct per event */ + uint32_t eventids; /* Number of event IDs (MSIs) */ + uint32_t *host_lpi_blocks; /* Which LPIs are used on the host */ + struct pending_irq *pend_irqs; /* One struct per event */ }; bool gicv3_its_host_has_its(void) @@ -66,7 +67,7 @@ bool gicv3_its_host_has_its(void) return !list_empty(&host_its_list); } -#define BUFPTR_MASK GENMASK(19, 5) +#define BUFPTR_MASK GENMASK(19, 5) static int its_send_command(struct host_its *hw_its, const void *its_cmd) { /* @@ -182,9 +183,8 @@ static int its_send_cmd_sync(struct host_its *its, unsigned int cpu) return its_send_command(its, cmd); } -static int its_send_cmd_mapti(struct host_its *its, - uint32_t deviceid, uint32_t eventid, - uint32_t pintid, uint16_t icid) +static int its_send_cmd_mapti(struct host_its *its, uint32_t deviceid, + uint32_t eventid, uint32_t pintid, uint16_t icid) { uint64_t cmd[4]; @@ -234,8 +234,8 @@ static int its_send_cmd_mapd(struct host_its *its, uint32_t deviceid, return its_send_command(its, cmd); } -static int its_send_cmd_inv(struct host_its *its, - uint32_t deviceid, uint32_t eventid) +static int its_send_cmd_inv(struct host_its *its, uint32_t deviceid, + uint32_t eventid) { uint64_t cmd[4]; @@ -253,7 +253,7 @@ int gicv3_its_setup_collection(unsigned int cpu) struct host_its *its; int ret; - list_for_each_entry(its, &host_its_list, entry) + list_for_each_entry (its, &host_its_list, entry) { ret = its_send_cmd_mapc(its, cpu, cpu); if ( ret ) @@ -271,11 +271,11 @@ int gicv3_its_setup_collection(unsigned int cpu) return 0; } -#define BASER_ATTR_MASK \ - ((0x3UL << GITS_BASER_SHAREABILITY_SHIFT) | \ - (0x7UL << GITS_BASER_OUTER_CACHEABILITY_SHIFT) | \ - (0x7UL << GITS_BASER_INNER_CACHEABILITY_SHIFT)) -#define BASER_RO_MASK (GENMASK(58, 56) | GENMASK(52, 48)) +#define BASER_ATTR_MASK \ + ((0x3UL << GITS_BASER_SHAREABILITY_SHIFT) | \ + (0x7UL << GITS_BASER_OUTER_CACHEABILITY_SHIFT) | \ + (0x7UL << GITS_BASER_INNER_CACHEABILITY_SHIFT)) +#define BASER_RO_MASK (GENMASK(58, 56) | GENMASK(52, 48)) /* Check that the physical address can be encoded in the PROPBASER register. */ static bool check_baser_phys_addr(void *vaddr, unsigned int page_bits) @@ -302,7 +302,7 @@ static void *its_map_cbaser(struct host_its *its) uint64_t reg; void *buffer; - reg = GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT; + reg = GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT; reg |= GIC_BASER_CACHE_SameAsInner << GITS_BASER_OUTER_CACHEABILITY_SHIFT; reg |= GIC_BASER_CACHE_RaWaWb << GITS_BASER_INNER_CACHEABILITY_SHIFT; @@ -342,18 +342,18 @@ static void *its_map_cbaser(struct host_its *its) } /* The ITS BASE registers work with page sizes of 4K, 16K or 64K. */ -#define BASER_PAGE_BITS(sz) ((sz) * 2 + 12) +#define BASER_PAGE_BITS(sz) ((sz)*2 + 12) static int its_map_baser(void __iomem *basereg, uint64_t regc, unsigned int nr_items) { uint64_t attr, reg; unsigned int entry_size = GITS_BASER_ENTRY_SIZE(regc); - unsigned int pagesz = 2; /* try 64K pages first, then go down. */ + unsigned int pagesz = 2; /* try 64K pages first, then go down. */ unsigned int table_size; void *buffer; - attr = GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT; + attr = GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT; attr |= GIC_BASER_CACHE_SameAsInner << GITS_BASER_OUTER_CACHEABILITY_SHIFT; attr |= GIC_BASER_CACHE_RaWaWb << GITS_BASER_INNER_CACHEABILITY_SHIFT; @@ -377,13 +377,13 @@ retry: return -ERANGE; } - reg = attr; + reg = attr; reg |= (pagesz << GITS_BASER_PAGE_SIZE_SHIFT); reg |= (table_size >> BASER_PAGE_BITS(pagesz)) - 1; reg |= regc & BASER_RO_MASK; reg |= GITS_VALID_BIT; - reg |= encode_baser_phys_addr(virt_to_maddr(buffer), - BASER_PAGE_BITS(pagesz)); + reg |= + encode_baser_phys_addr(virt_to_maddr(buffer), BASER_PAGE_BITS(pagesz)); writeq_relaxed(reg, basereg); regc = readq_relaxed(basereg); @@ -478,7 +478,7 @@ static int gicv3_its_init_single_its(struct host_its *hw_its) reg = readq_relaxed(basereg); type = (reg & GITS_BASER_TYPE_MASK) >> GITS_BASER_TYPE_SHIFT; - switch ( type ) + switch (type) { case GITS_BASER_TYPE_NONE: continue; @@ -552,7 +552,7 @@ static struct host_its *gicv3_its_find_by_doorbell(paddr_t doorbell_address) { struct host_its *hw_its; - list_for_each_entry(hw_its, &host_its_list, entry) + list_for_each_entry (hw_its, &host_its_list, entry) { if ( hw_its->addr + ITS_DOORBELL_OFFSET == doorbell_address ) return hw_its; @@ -561,8 +561,8 @@ static struct host_its *gicv3_its_find_by_doorbell(paddr_t doorbell_address) return NULL; } -static int compare_its_guest_devices(struct its_device *dev, - paddr_t vdoorbell, uint32_t vdevid) +static int compare_its_guest_devices(struct its_device *dev, paddr_t vdoorbell, + uint32_t vdevid) { if ( dev->guest_doorbell < vdoorbell ) return -1; @@ -584,9 +584,9 @@ static int compare_its_guest_devices(struct its_device *dev, * The mapping connects a device @devid and event @eventid pair to LPI @lpi, * increasing both @eventid and @lpi to cover the number of requested LPIs. */ -static int gicv3_its_map_host_events(struct host_its *its, - uint32_t devid, uint32_t eventid, - uint32_t lpi, uint32_t nr_events) +static int gicv3_its_map_host_events(struct host_its *its, uint32_t devid, + uint32_t eventid, uint32_t lpi, + uint32_t nr_events) { uint32_t i; int ret; @@ -619,16 +619,16 @@ static int gicv3_its_map_host_events(struct host_its *its, * This does not check if this particular hardware device is already mapped * at another domain, it is expected that this would be done by the caller. */ -int gicv3_its_map_guest_device(struct domain *d, - paddr_t host_doorbell, uint32_t host_devid, - paddr_t guest_doorbell, uint32_t guest_devid, - uint64_t nr_events, bool valid) +int gicv3_its_map_guest_device(struct domain *d, paddr_t host_doorbell, + uint32_t host_devid, paddr_t guest_doorbell, + uint32_t guest_devid, uint64_t nr_events, + bool valid) { void *itt_addr = NULL; struct host_its *hw_its; struct its_device *dev = NULL; struct rb_node **new = &d->arch.vgic.its_devices.rb_node, *parent = NULL; - int i, ret = -ENOENT; /* "i" must be signed to check for >= 0 below. */ + int i, ret = -ENOENT; /* "i" must be signed to check for >= 0 below. */ hw_its = gicv3_its_find_by_doorbell(host_doorbell); if ( !hw_its ) @@ -671,8 +671,9 @@ int gicv3_its_map_guest_device(struct domain *d, if ( valid ) { - printk(XENLOG_G_WARNING "d%d tried to remap guest ITS device 0x%x to host device 0x%x\n", - d->domain_id, guest_devid, host_devid); + printk(XENLOG_G_WARNING "d%d tried to remap guest ITS device " + "0x%x to host device 0x%x\n", + d->domain_id, guest_devid, host_devid); return -EBUSY; } @@ -797,7 +798,7 @@ static struct its_device *get_its_device(struct domain *d, paddr_t vdoorbell, ASSERT(spin_is_locked(&d->arch.vgic.its_devices_lock)); - while (node) + while ( node ) { int cmp; @@ -816,11 +817,9 @@ static struct its_device *get_its_device(struct domain *d, paddr_t vdoorbell, return NULL; } -static struct pending_irq *get_event_pending_irq(struct domain *d, - paddr_t vdoorbell_address, - uint32_t vdevid, - uint32_t eventid, - uint32_t *host_lpi) +static struct pending_irq * +get_event_pending_irq(struct domain *d, paddr_t vdoorbell_address, + uint32_t vdevid, uint32_t eventid, uint32_t *host_lpi) { struct its_device *dev; struct pending_irq *pirq = NULL; @@ -880,8 +879,8 @@ struct pending_irq *gicv3_assign_guest_event(struct domain *d, struct pending_irq *pirq; uint32_t host_lpi = INVALID_LPI; - pirq = get_event_pending_irq(d, vdoorbell_address, vdevid, eventid, - &host_lpi); + pirq = + get_event_pending_irq(d, vdoorbell_address, vdevid, eventid, &host_lpi); if ( !pirq ) return NULL; @@ -897,7 +896,7 @@ int gicv3_its_deny_access(const struct domain *d) unsigned long mfn, nr; const struct host_its *its_data; - list_for_each_entry( its_data, &host_its_list, entry ) + list_for_each_entry (its_data, &host_its_list, entry) { mfn = paddr_to_pfn(its_data->addr); nr = PFN_UP(its_data->size); @@ -918,8 +917,7 @@ int gicv3_its_deny_access(const struct domain *d) * as the host. */ int gicv3_its_make_hwdom_dt_nodes(const struct domain *d, - const struct dt_device_node *gic, - void *fdt) + const struct dt_device_node *gic, void *fdt) { uint32_t len; int res; @@ -942,7 +940,7 @@ int gicv3_its_make_hwdom_dt_nodes(const struct domain *d, if ( res ) return res; - list_for_each_entry(its_data, &host_its_list, entry) + list_for_each_entry (its_data, &host_its_list, entry) { its = its_data->dt_node; @@ -1058,8 +1056,8 @@ unsigned long gicv3_its_make_hwdom_madt(const struct domain *d, void *base_ptr) for ( i = 0; i < vgic_v3_its_count(d); i++ ) { - fw_its = acpi_table_get_entry_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, - i); + fw_its = + acpi_table_get_entry_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, i); memcpy(hwdom_its, fw_its, sizeof(struct acpi_madt_generic_translator)); hwdom_its++; } @@ -1085,7 +1083,7 @@ int gicv3_its_init(void) else gicv3_its_acpi_init(); - list_for_each_entry(hw_its, &host_its_list, entry) + list_for_each_entry (hw_its, &host_its_list, entry) { ret = gicv3_its_init_single_its(hw_its); if ( ret ) @@ -1095,7 +1093,6 @@ int gicv3_its_init(void) return 0; } - /* * Local variables: * mode: C diff --git a/xen/arch/arm/gic-v3-lpi.c b/xen/arch/arm/gic-v3-lpi.c index e8c6e159ca..e63836ce87 100644 --- a/xen/arch/arm/gic-v3-lpi.c +++ b/xen/arch/arm/gic-v3-lpi.c @@ -45,17 +45,19 @@ */ union host_lpi { uint64_t data; - struct { + struct + { uint32_t virt_lpi; uint16_t dom_id; uint16_t pad; }; }; -#define LPI_PROPTABLE_NEEDS_FLUSHING (1U << 0) +#define LPI_PROPTABLE_NEEDS_FLUSHING (1U << 0) /* Global state */ -static struct { +static struct +{ /* The global LPI property table, shared by all redistributors. */ uint8_t *lpi_property; /* @@ -79,16 +81,17 @@ static struct { unsigned int flags; } lpi_data; -struct lpi_redist_data { - paddr_t redist_addr; - unsigned int redist_id; - void *pending_table; +struct lpi_redist_data +{ + paddr_t redist_addr; + unsigned int redist_id; + void *pending_table; }; static DEFINE_PER_CPU(struct lpi_redist_data, lpi_redist); -#define MAX_NR_HOST_LPIS (lpi_data.max_host_lpi_ids - LPI_OFFSET) -#define HOST_LPIS_PER_PAGE (PAGE_SIZE / sizeof(union host_lpi)) +#define MAX_NR_HOST_LPIS (lpi_data.max_host_lpi_ids - LPI_OFFSET) +#define HOST_LPIS_PER_PAGE (PAGE_SIZE / sizeof(union host_lpi)) static union host_lpi *gic_get_host_lpi(uint32_t plpi) { @@ -152,7 +155,7 @@ void vgic_vcpu_inject_lpi(struct domain *d, unsigned int virq) vcpu_id = ACCESS_ONCE(p->lpi_vcpu_id); if ( vcpu_id >= d->max_vcpus ) - return; + return; vgic_inject_irq(d, d->vcpu[vcpu_id], virq, true); } @@ -225,7 +228,8 @@ void gicv3_lpi_update_host_entry(uint32_t host_lpi, int domain_id, host_lpi -= LPI_OFFSET; - hlpip = &lpi_data.host_lpis[host_lpi / HOST_LPIS_PER_PAGE][host_lpi % HOST_LPIS_PER_PAGE]; + hlpip = &lpi_data.host_lpis[host_lpi / HOST_LPIS_PER_PAGE] + [host_lpi % HOST_LPIS_PER_PAGE]; hlpi.virt_lpi = virt_lpi; hlpi.dom_id = domain_id; @@ -241,8 +245,9 @@ static int gicv3_lpi_allocate_pendtable(uint64_t *reg) if ( this_cpu(lpi_redist).pending_table ) return -EBUSY; - val = GIC_BASER_CACHE_RaWaWb << GICR_PENDBASER_INNER_CACHEABILITY_SHIFT; - val |= GIC_BASER_CACHE_SameAsInner << GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT; + val = GIC_BASER_CACHE_RaWaWb << GICR_PENDBASER_INNER_CACHEABILITY_SHIFT; + val |= GIC_BASER_CACHE_SameAsInner + << GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT; val |= GIC_BASER_InnerShareable << GICR_PENDBASER_SHAREABILITY_SHIFT; /* @@ -279,12 +284,13 @@ static int gicv3_lpi_allocate_pendtable(uint64_t *reg) * Tell a redistributor about the (shared) property table, allocating one * if not already done. */ -static int gicv3_lpi_set_proptable(void __iomem * rdist_base) +static int gicv3_lpi_set_proptable(void __iomem *rdist_base) { uint64_t reg; - reg = GIC_BASER_CACHE_RaWaWb << GICR_PROPBASER_INNER_CACHEABILITY_SHIFT; - reg |= GIC_BASER_CACHE_SameAsInner << GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT; + reg = GIC_BASER_CACHE_RaWaWb << GICR_PROPBASER_INNER_CACHEABILITY_SHIFT; + reg |= GIC_BASER_CACHE_SameAsInner + << GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT; reg |= GIC_BASER_InnerShareable << GICR_PROPBASER_SHAREABILITY_SHIFT; /* @@ -336,7 +342,7 @@ static int gicv3_lpi_set_proptable(void __iomem * rdist_base) return 0; } -int gicv3_lpi_init_rdist(void __iomem * rdist_base) +int gicv3_lpi_init_rdist(void __iomem *rdist_base) { uint32_t reg; uint64_t table_reg; @@ -361,7 +367,8 @@ int gicv3_lpi_init_rdist(void __iomem * rdist_base) if ( !(table_reg & GICR_PENDBASER_SHAREABILITY_MASK) ) { table_reg &= ~GICR_PENDBASER_INNER_CACHEABILITY_MASK; - table_reg |= GIC_BASER_CACHE_nC << GICR_PENDBASER_INNER_CACHEABILITY_SHIFT; + table_reg |= GIC_BASER_CACHE_nC + << GICR_PENDBASER_INNER_CACHEABILITY_SHIFT; writeq_relaxed(table_reg, rdist_base + GICR_PENDBASER); } @@ -389,7 +396,8 @@ int gicv3_lpi_init_host_lpis(unsigned int host_lpi_bits) * Tell the user about it, the actual number is reported below. */ if ( max_lpi_bits < 14 || max_lpi_bits > 32 ) - printk(XENLOG_WARNING "WARNING: max_lpi_bits must be between 14 and 32, adjusting.\n"); + printk(XENLOG_WARNING + "WARNING: max_lpi_bits must be between 14 and 32, adjusting.\n"); max_lpi_bits = max(max_lpi_bits, 14U); lpi_data.max_host_lpi_ids = BIT(min(host_lpi_bits, max_lpi_bits)); @@ -400,7 +408,8 @@ int gicv3_lpi_init_host_lpis(unsigned int host_lpi_bits) * It's very unlikely that we need more than 24 bits worth of LPIs. */ if ( lpi_data.max_host_lpi_ids > BIT(24) ) - warning_add("Using high number of LPIs, limit memory usage with max_lpi_bits\n"); + warning_add("Using high number of LPIs, limit memory usage with " + "max_lpi_bits\n"); spin_lock_init(&lpi_data.host_lpis_lock); lpi_data.next_free_lpi = 0; @@ -422,8 +431,7 @@ static int find_unused_host_lpi(uint32_t start, uint32_t *index) ASSERT(spin_is_locked(&lpi_data.host_lpis_lock)); - for ( chunk = start; - chunk < MAX_NR_HOST_LPIS / HOST_LPIS_PER_PAGE; + for ( chunk = start; chunk < MAX_NR_HOST_LPIS / HOST_LPIS_PER_PAGE; chunk++ ) { /* If we hit an unallocated chunk, use entry 0 in that one. */ @@ -464,7 +472,7 @@ int gicv3_allocate_host_lpi_block(struct domain *d, uint32_t *first_lpi) chunk = find_unused_host_lpi(lpi_data.next_free_lpi / HOST_LPIS_PER_PAGE, &lpi_idx); - if ( chunk == - 1 ) /* rescan for a hole from the beginning */ + if ( chunk == -1 ) /* rescan for a hole from the beginning */ { lpi_idx = 0; chunk = find_unused_host_lpi(0, &lpi_idx); @@ -543,7 +551,7 @@ int gicv3_allocate_host_lpi_block(struct domain *d, uint32_t *first_lpi) void gicv3_free_host_lpi_block(uint32_t first_lpi) { - union host_lpi *hlpi, empty_lpi = { .dom_id = DOMID_INVALID }; + union host_lpi *hlpi, empty_lpi = {.dom_id = DOMID_INVALID}; int i; /* This should only be called with the beginning of a block. */ @@ -551,7 +559,7 @@ void gicv3_free_host_lpi_block(uint32_t first_lpi) hlpi = gic_get_host_lpi(first_lpi); if ( !hlpi ) - return; /* Nothing to free here. */ + return; /* Nothing to free here. */ spin_lock(&lpi_data.host_lpis_lock); diff --git a/xen/arch/arm/gic-v3.c b/xen/arch/arm/gic-v3.c index 0f6cbf6224..7b30d16b07 100644 --- a/xen/arch/arm/gic-v3.c +++ b/xen/arch/arm/gic-v3.c @@ -45,10 +45,11 @@ #include /* Global state */ -static struct { - void __iomem *map_dbase; /* Mapped address of distributor registers */ +static struct +{ + void __iomem *map_dbase; /* Mapped address of distributor registers */ struct rdist_region *rdist_regions; - uint32_t rdist_stride; + uint32_t rdist_stride; unsigned int rdist_count; /* Number of rdist regions count */ unsigned int nr_priorities; spinlock_t lock; @@ -57,11 +58,11 @@ static struct { static struct gic_info gicv3_info; /* per-cpu re-distributor base */ -static DEFINE_PER_CPU(void __iomem*, rbase); +static DEFINE_PER_CPU(void __iomem *, rbase); -#define GICD (gicv3.map_dbase) -#define GICD_RDIST_BASE (this_cpu(rbase)) -#define GICD_RDIST_SGI_BASE (GICD_RDIST_BASE + SZ_64K) +#define GICD (gicv3.map_dbase) +#define GICD_RDIST_BASE (this_cpu(rbase)) +#define GICD_RDIST_SGI_BASE (GICD_RDIST_BASE + SZ_64K) /* * Saves all 16(Max) LR registers. Though number of LRs implemented @@ -70,7 +71,7 @@ static DEFINE_PER_CPU(void __iomem*, rbase); static inline void gicv3_save_lrs(struct vcpu *v) { /* Fall through for all the cases */ - switch ( gicv3_info.nr_lrs ) + switch (gicv3_info.nr_lrs) { case 16: v->arch.gic.v3.lr[15] = READ_SYSREG(ICH_LR15_EL2); @@ -103,10 +104,10 @@ static inline void gicv3_save_lrs(struct vcpu *v) case 2: v->arch.gic.v3.lr[1] = READ_SYSREG(ICH_LR1_EL2); case 1: - v->arch.gic.v3.lr[0] = READ_SYSREG(ICH_LR0_EL2); - break; + v->arch.gic.v3.lr[0] = READ_SYSREG(ICH_LR0_EL2); + break; default: - BUG(); + BUG(); } } @@ -117,7 +118,7 @@ static inline void gicv3_save_lrs(struct vcpu *v) static inline void gicv3_restore_lrs(const struct vcpu *v) { /* Fall through for all the cases */ - switch ( gicv3_info.nr_lrs ) + switch (gicv3_info.nr_lrs) { case 16: WRITE_SYSREG(v->arch.gic.v3.lr[15], ICH_LR15_EL2); @@ -153,30 +154,46 @@ static inline void gicv3_restore_lrs(const struct vcpu *v) WRITE_SYSREG(v->arch.gic.v3.lr[0], ICH_LR0_EL2); break; default: - BUG(); + BUG(); } } static uint64_t gicv3_ich_read_lr(int lr) { - switch ( lr ) + switch (lr) { - case 0: return READ_SYSREG(ICH_LR0_EL2); - case 1: return READ_SYSREG(ICH_LR1_EL2); - case 2: return READ_SYSREG(ICH_LR2_EL2); - case 3: return READ_SYSREG(ICH_LR3_EL2); - case 4: return READ_SYSREG(ICH_LR4_EL2); - case 5: return READ_SYSREG(ICH_LR5_EL2); - case 6: return READ_SYSREG(ICH_LR6_EL2); - case 7: return READ_SYSREG(ICH_LR7_EL2); - case 8: return READ_SYSREG(ICH_LR8_EL2); - case 9: return READ_SYSREG(ICH_LR9_EL2); - case 10: return READ_SYSREG(ICH_LR10_EL2); - case 11: return READ_SYSREG(ICH_LR11_EL2); - case 12: return READ_SYSREG(ICH_LR12_EL2); - case 13: return READ_SYSREG(ICH_LR13_EL2); - case 14: return READ_SYSREG(ICH_LR14_EL2); - case 15: return READ_SYSREG(ICH_LR15_EL2); + case 0: + return READ_SYSREG(ICH_LR0_EL2); + case 1: + return READ_SYSREG(ICH_LR1_EL2); + case 2: + return READ_SYSREG(ICH_LR2_EL2); + case 3: + return READ_SYSREG(ICH_LR3_EL2); + case 4: + return READ_SYSREG(ICH_LR4_EL2); + case 5: + return READ_SYSREG(ICH_LR5_EL2); + case 6: + return READ_SYSREG(ICH_LR6_EL2); + case 7: + return READ_SYSREG(ICH_LR7_EL2); + case 8: + return READ_SYSREG(ICH_LR8_EL2); + case 9: + return READ_SYSREG(ICH_LR9_EL2); + case 10: + return READ_SYSREG(ICH_LR10_EL2); + case 11: + return READ_SYSREG(ICH_LR11_EL2); + case 12: + return READ_SYSREG(ICH_LR12_EL2); + case 13: + return READ_SYSREG(ICH_LR13_EL2); + case 14: + return READ_SYSREG(ICH_LR14_EL2); + case 15: + return READ_SYSREG(ICH_LR15_EL2); default: BUG(); } @@ -184,7 +201,7 @@ static uint64_t gicv3_ich_read_lr(int lr) static void gicv3_ich_write_lr(int lr, uint64_t val) { - switch ( lr ) + switch (lr) { case 0: WRITE_SYSREG(val, ICH_LR0_EL2); @@ -292,9 +309,9 @@ static void gicv3_redist_wait_for_rwp(void) static void gicv3_wait_for_rwp(int irq) { if ( irq < NR_LOCAL_IRQS ) - gicv3_redist_wait_for_rwp(); + gicv3_redist_wait_for_rwp(); else - gicv3_dist_wait_for_rwp(); + gicv3_dist_wait_for_rwp(); } static unsigned int gicv3_get_cpu_from_mask(const cpumask_t *cpumask) @@ -312,7 +329,7 @@ static void restore_aprn_regs(const union gic_state_data *d) { /* Write APRn register based on number of priorities platform has implemented */ - switch ( gicv3.nr_priorities ) + switch (gicv3.nr_priorities) { case 7: WRITE_SYSREG32(d->v3.apr0[2], ICH_AP0R2_EL2); @@ -335,7 +352,7 @@ static void save_aprn_regs(union gic_state_data *d) { /* Read APRn register based on number of priorities platform has implemented */ - switch ( gicv3.nr_priorities ) + switch (gicv3.nr_priorities) { case 7: d->v3.apr0[2] = READ_SYSREG32(ICH_AP0R2_EL2); @@ -360,7 +377,6 @@ static void save_aprn_regs(union gic_state_data *d) */ static void gicv3_save_state(struct vcpu *v) { - /* No need for spinlocks here because interrupts are disabled around * this call and it only accesses struct vcpu fields that cannot be * accessed simultaneously by another pCPU. @@ -447,7 +463,7 @@ static bool gicv3_peek_irq(struct irq_desc *irqd, u32 offset) void __iomem *base; unsigned int irq = irqd->irq; - if ( irq >= NR_GIC_LOCAL_IRQS) + if ( irq >= NR_GIC_LOCAL_IRQS ) base = GICD + (irq / 32) * 4; else base = GICD_RDIST_SGI_BASE; @@ -525,11 +541,11 @@ static void gicv3_set_pending_state(struct irq_desc *irqd, bool pending) static inline uint64_t gicv3_mpidr_to_affinity(int cpu) { - uint64_t mpidr = cpu_logical_map(cpu); - return (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | - MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | - MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | - MPIDR_AFFINITY_LEVEL(mpidr, 0)); + uint64_t mpidr = cpu_logical_map(cpu); + return (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); } static void gicv3_set_irq_type(struct irq_desc *desc, unsigned int type) @@ -543,7 +559,7 @@ static void gicv3_set_irq_type(struct irq_desc *desc, unsigned int type) spin_lock(&gicv3.lock); - if ( irq >= NR_GIC_LOCAL_IRQS) + if ( irq >= NR_GIC_LOCAL_IRQS ) base = GICD + GICD_ICFGR + (irq / 16) * 4; else base = GICD_RDIST_SGI_BASE + GICR_ICFGR1; @@ -559,23 +575,21 @@ static void gicv3_set_irq_type(struct irq_desc *desc, unsigned int type) writel_relaxed(cfg, base); actual = readl_relaxed(base); - if ( ( cfg & edgebit ) ^ ( actual & edgebit ) ) + if ( (cfg & edgebit) ^ (actual & edgebit) ) { - printk(XENLOG_WARNING "GICv3: WARNING: " + printk(XENLOG_WARNING + "GICv3: WARNING: " "CPU%d: Failed to configure IRQ%u as %s-triggered. " "H/w forces to %s-triggered.\n", - smp_processor_id(), desc->irq, - cfg & edgebit ? "Edge" : "Level", + smp_processor_id(), desc->irq, cfg & edgebit ? "Edge" : "Level", actual & edgebit ? "Edge" : "Level"); - desc->arch.type = actual & edgebit ? - IRQ_TYPE_EDGE_RISING : - IRQ_TYPE_LEVEL_HIGH; + desc->arch.type = + actual & edgebit ? IRQ_TYPE_EDGE_RISING : IRQ_TYPE_LEVEL_HIGH; } spin_unlock(&gicv3.lock); } -static void gicv3_set_irq_priority(struct irq_desc *desc, - unsigned int priority) +static void gicv3_set_irq_priority(struct irq_desc *desc, unsigned int priority) { unsigned int irq = desc->irq; @@ -611,8 +625,8 @@ static void __init gicv3_dist_init(void) nr_lines = min(1020U, nr_lines); gicv3_info.nr_lines = nr_lines; - printk("GICv3: %d lines, (IID %8.8x).\n", - nr_lines, readl_relaxed(GICD + GICD_IIDR)); + printk("GICv3: %d lines, (IID %8.8x).\n", nr_lines, + readl_relaxed(GICD + GICD_IIDR)); /* Default all global IRQs to level, active low */ for ( i = NR_GIC_LOCAL_IRQS; i < nr_lines; i += 16 ) @@ -621,8 +635,8 @@ static void __init gicv3_dist_init(void) /* Default priority for global interrupts */ for ( i = NR_GIC_LOCAL_IRQS; i < nr_lines; i += 4 ) { - priority = (GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | - GIC_PRI_IRQ << 8 | GIC_PRI_IRQ); + priority = (GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | GIC_PRI_IRQ << 8 | + GIC_PRI_IRQ); writel_relaxed(priority, GICD + GICD_IPRIORITYR + (i / 4) * 4); } @@ -643,8 +657,9 @@ static void __init gicv3_dist_init(void) gicv3_dist_wait_for_rwp(); /* Turn on the distributor */ - writel_relaxed(GICD_CTL_ENABLE | GICD_CTLR_ARE_NS | - GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, GICD + GICD_CTLR); + writel_relaxed(GICD_CTL_ENABLE | GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | + GICD_CTLR_ENABLE_G1, + GICD + GICD_CTLR); /* Route all global IRQs to this CPU */ affinity = gicv3_mpidr_to_affinity(smp_processor_id()); @@ -715,16 +730,16 @@ static int __init gicv3_populate_rdist(void) * If we ever get a cluster of more than 16 CPUs, just scream. */ if ( (mpidr & 0xff) >= 16 ) - dprintk(XENLOG_WARNING, "GICv3:Cluster with more than 16's cpus\n"); + dprintk(XENLOG_WARNING, "GICv3:Cluster with more than 16's cpus\n"); /* * Convert affinity to a 32bit value that can be matched to GICR_TYPER * bits [63:32] */ - aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | - MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | - MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | - MPIDR_AFFINITY_LEVEL(mpidr, 0)); + aff = + (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | MPIDR_AFFINITY_LEVEL(mpidr, 0)); for ( i = 0; i < gicv3.rdist_count; i++ ) { @@ -734,7 +749,7 @@ static int __init gicv3_populate_rdist(void) if ( reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4 ) { dprintk(XENLOG_ERR, - "GICv3: No redistributor present @%"PRIpaddr"\n", + "GICv3: No redistributor present @%" PRIpaddr "\n", gicv3.rdist_regions[i].base); break; } @@ -777,7 +792,7 @@ static int __init gicv3_populate_rdist(void) } printk("GICv3: CPU%d: Found redistributor in region %d @%p\n", - smp_processor_id(), i, ptr); + smp_processor_id(), i, ptr); return 0; } @@ -796,7 +811,8 @@ static int __init gicv3_populate_rdist(void) } while ( !(typer & GICR_TYPER_LAST) ); } - dprintk(XENLOG_ERR, "GICv3: CPU%d: mpidr 0x%"PRIregister" has no re-distributor!\n", + dprintk(XENLOG_ERR, + "GICv3: CPU%d: mpidr 0x%" PRIregister " has no re-distributor!\n", smp_processor_id(), cpu_logical_map(smp_processor_id())); return -ENODEV; @@ -827,15 +843,15 @@ static int gicv3_cpu_init(void) /* Set priority on PPI and SGI interrupts */ priority = (GIC_PRI_IPI << 24 | GIC_PRI_IPI << 16 | GIC_PRI_IPI << 8 | GIC_PRI_IPI); - for (i = 0; i < NR_GIC_SGI; i += 4) + for ( i = 0; i < NR_GIC_SGI; i += 4 ) writel_relaxed(priority, - GICD_RDIST_SGI_BASE + GICR_IPRIORITYR0 + (i / 4) * 4); + GICD_RDIST_SGI_BASE + GICR_IPRIORITYR0 + (i / 4) * 4); priority = (GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | GIC_PRI_IRQ << 8 | GIC_PRI_IRQ); - for (i = NR_GIC_SGI; i < NR_GIC_LOCAL_IRQS; i += 4) + for ( i = NR_GIC_SGI; i < NR_GIC_LOCAL_IRQS; i += 4 ) writel_relaxed(priority, - GICD_RDIST_SGI_BASE + GICR_IPRIORITYR0 + (i / 4) * 4); + GICD_RDIST_SGI_BASE + GICR_IPRIORITYR0 + (i / 4) * 4); /* * The activate state is unknown at boot, so make sure all @@ -885,9 +901,9 @@ static void gicv3_hyp_init(void) uint32_t vtr; vtr = READ_SYSREG32(ICH_VTR_EL2); - gicv3_info.nr_lrs = (vtr & ICH_VTR_NRLRGS) + 1; - gicv3.nr_priorities = ((vtr >> ICH_VTR_PRIBITS_SHIFT) & - ICH_VTR_PRIBITS_MASK) + 1; + gicv3_info.nr_lrs = (vtr & ICH_VTR_NRLRGS) + 1; + gicv3.nr_priorities = + ((vtr >> ICH_VTR_PRIBITS_SHIFT) & ICH_VTR_PRIBITS_MASK) + 1; if ( !((gicv3.nr_priorities > 4) && (gicv3.nr_priorities < 8)) ) panic("GICv3: Invalid number of priority bits\n"); @@ -949,7 +965,8 @@ static u16 gicv3_compute_target_list(int *base_cpu, const struct cpumask *mask, } mpidr = cpu_logical_map(cpu); - if ( cluster_id != (mpidr & ~MPIDR_AFF0_MASK) ) { + if ( cluster_id != (mpidr & ~MPIDR_AFF0_MASK) ) + { cpu--; goto out; } @@ -965,7 +982,7 @@ static void gicv3_send_sgi_list(enum gic_sgi sgi, const cpumask_t *cpumask) int cpu = 0; uint64_t val; - for_each_cpu(cpu, cpumask) + for_each_cpu (cpu, cpumask) { /* Mask lower 8 bits. It represent cpu in affinity level 0 */ uint64_t cluster_id = cpu_logical_map(cpu) & ~MPIDR_AFF0_MASK; @@ -978,11 +995,9 @@ static void gicv3_send_sgi_list(enum gic_sgi sgi, const cpumask_t *cpumask) * Prepare affinity path of the cluster for which SGI is generated * along with SGI number */ - val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 | - MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 | - sgi << 24 | - MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 | - tlist); + val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 | + MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 | sgi << 24 | + MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 | tlist); WRITE_SYSREG64(val, ICC_SGI1R_EL1); } @@ -999,11 +1014,11 @@ static void gicv3_send_sgi(enum gic_sgi sgi, enum gic_sgi_mode mode, */ dsb(st); - switch ( mode ) + switch (mode) { case SGI_TARGET_OTHERS: WRITE_SYSREG64(ICH_SGI_TARGET_OTHERS << ICH_SGI_IRQMODE_SHIFT | - (uint64_t)sgi << ICH_SGI_IRQ_SHIFT, + (uint64_t)sgi << ICH_SGI_IRQ_SHIFT, ICC_SGI1R_EL1); isb(); break; @@ -1037,7 +1052,7 @@ static void gicv3_update_lr(int lr, unsigned int virq, uint8_t priority, BUG_ON(lr >= gicv3_info.nr_lrs); BUG_ON(lr < 0); - val = (((uint64_t)state & 0x3) << ICH_LR_STATE_SHIFT); + val = (((uint64_t)state & 0x3) << ICH_LR_STATE_SHIFT); /* * When the guest is GICv3, all guest IRQs are Group 1, as Group0 @@ -1049,9 +1064,9 @@ static void gicv3_update_lr(int lr, unsigned int virq, uint8_t priority, val |= (uint64_t)priority << ICH_LR_PRIORITY_SHIFT; val |= ((uint64_t)virq & ICH_LR_VIRTUAL_MASK) << ICH_LR_VIRTUAL_SHIFT; - if ( hw_irq != INVALID_IRQ ) - val |= ICH_LR_HW | (((uint64_t)hw_irq & ICH_LR_PHYSICAL_MASK) - << ICH_LR_PHYSICAL_SHIFT); + if ( hw_irq != INVALID_IRQ ) + val |= ICH_LR_HW | (((uint64_t)hw_irq & ICH_LR_PHYSICAL_MASK) + << ICH_LR_PHYSICAL_SHIFT); gicv3_ich_write_lr(lr, val); } @@ -1069,9 +1084,9 @@ static void gicv3_read_lr(int lr, struct gic_lr *lr_reg) lr_reg->virq = (lrv >> ICH_LR_VIRTUAL_SHIFT) & ICH_LR_VIRTUAL_MASK; - lr_reg->priority = (lrv >> ICH_LR_PRIORITY_SHIFT) & ICH_LR_PRIORITY_MASK; - lr_reg->pending = lrv & ICH_LR_STATE_PENDING; - lr_reg->active = lrv & ICH_LR_STATE_ACTIVE; + lr_reg->priority = (lrv >> ICH_LR_PRIORITY_SHIFT) & ICH_LR_PRIORITY_MASK; + lr_reg->pending = lrv & ICH_LR_STATE_PENDING; + lr_reg->active = lrv & ICH_LR_STATE_ACTIVE; lr_reg->hw_status = lrv & ICH_LR_HW; if ( lr_reg->hw_status ) @@ -1086,8 +1101,8 @@ static void gicv3_read_lr(int lr, struct gic_lr *lr_reg) * This is only valid for SGI, but it does not matter to always * read it as it should be 0 by default. */ - lr_reg->virt.source = (lrv >> ICH_LR_CPUID_SHIFT) - & ICH_LR_CPUID_MASK; + lr_reg->virt.source = + (lrv >> ICH_LR_CPUID_SHIFT) & ICH_LR_CPUID_MASK; } } } @@ -1097,9 +1112,9 @@ static void gicv3_write_lr(int lr_reg, const struct gic_lr *lr) uint64_t lrv = 0; const enum gic_version vgic_version = current->domain->arch.vgic.version; - - lrv = ( ((u64)(lr->virq & ICH_LR_VIRTUAL_MASK) << ICH_LR_VIRTUAL_SHIFT) | - ((u64)(lr->priority & ICH_LR_PRIORITY_MASK) << ICH_LR_PRIORITY_SHIFT) ); + lrv = + (((u64)(lr->virq & ICH_LR_VIRTUAL_MASK) << ICH_LR_VIRTUAL_SHIFT) | + ((u64)(lr->priority & ICH_LR_PRIORITY_MASK) << ICH_LR_PRIORITY_SHIFT)); if ( lr->active ) lrv |= ICH_LR_STATE_ACTIVE; @@ -1152,14 +1167,14 @@ static void gicv3_hcr_status(uint32_t flag, bool status) static unsigned int gicv3_read_vmcr_priority(void) { - return ((READ_SYSREG32(ICH_VMCR_EL2) >> ICH_VMCR_PRIORITY_SHIFT) & + return ((READ_SYSREG32(ICH_VMCR_EL2) >> ICH_VMCR_PRIORITY_SHIFT) & ICH_VMCR_PRIORITY_MASK); } /* Only support reading GRP1 APRn registers */ static unsigned int gicv3_read_apr(int apr_reg) { - switch ( apr_reg ) + switch (apr_reg) { case 0: ASSERT(gicv3.nr_priorities > 4 && gicv3.nr_priorities < 8); @@ -1260,8 +1275,7 @@ static void gicv3_irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask) } static int gicv3_make_hwdom_dt_node(const struct domain *d, - const struct dt_device_node *gic, - void *fdt) + const struct dt_device_node *gic, void *fdt) { const void *compatible, *hw_reg; uint32_t len, new_len; @@ -1270,7 +1284,8 @@ static int gicv3_make_hwdom_dt_node(const struct domain *d, compatible = dt_get_property(gic, "compatible", &len); if ( !compatible ) { - dprintk(XENLOG_ERR, "Can't find compatible property for the gic node\n"); + dprintk(XENLOG_ERR, + "Can't find compatible property for the gic node\n"); return -FDT_ERR_XEN(ENOENT); } @@ -1307,24 +1322,24 @@ static int gicv3_make_hwdom_dt_node(const struct domain *d, } static const hw_irq_controller gicv3_host_irq_type = { - .typename = "gic-v3", - .startup = gicv3_irq_startup, - .shutdown = gicv3_irq_shutdown, - .enable = gicv3_irq_enable, - .disable = gicv3_irq_disable, - .ack = gicv3_irq_ack, - .end = gicv3_host_irq_end, + .typename = "gic-v3", + .startup = gicv3_irq_startup, + .shutdown = gicv3_irq_shutdown, + .enable = gicv3_irq_enable, + .disable = gicv3_irq_disable, + .ack = gicv3_irq_ack, + .end = gicv3_host_irq_end, .set_affinity = gicv3_irq_set_affinity, }; static const hw_irq_controller gicv3_guest_irq_type = { - .typename = "gic-v3", - .startup = gicv3_irq_startup, - .shutdown = gicv3_irq_shutdown, - .enable = gicv3_irq_enable, - .disable = gicv3_irq_disable, - .ack = gicv3_irq_ack, - .end = gicv3_guest_irq_end, + .typename = "gic-v3", + .startup = gicv3_irq_startup, + .shutdown = gicv3_irq_shutdown, + .enable = gicv3_irq_enable, + .disable = gicv3_irq_disable, + .ack = gicv3_irq_ack, + .end = gicv3_guest_irq_end, .set_affinity = gicv3_irq_set_affinity, }; @@ -1347,12 +1362,13 @@ static void __init gicv3_init_v2(void) { printk(XENLOG_WARNING "GICv3: WARNING: Not enabling support for GICv2 compat mode.\n" - "Size of GICV (%#"PRIpaddr") must at least be %#llx.\n", + "Size of GICV (%#" PRIpaddr ") must at least be %#llx.\n", vsize, GUEST_GICC_SIZE); return; } - printk("GICv3 compatible with GICv2 cbase %#"PRIpaddr" vbase %#"PRIpaddr"\n", + printk("GICv3 compatible with GICv2 cbase %#" PRIpaddr " vbase %#" PRIpaddr + "\n", cbase, vbase); vgic_v2_setup_hw(dbase, cbase, csize, vbase, 0); @@ -1361,7 +1377,7 @@ static void __init gicv3_init_v2(void) static void __init gicv3_ioremap_distributor(paddr_t dist_paddr) { if ( dist_paddr & ~PAGE_MASK ) - panic("GICv3: Found unaligned distributor address %"PRIpaddr"\n", + panic("GICv3: Found unaligned distributor address %" PRIpaddr "\n", dbase); gicv3.map_dbase = ioremap_nocache(dist_paddr, SZ_64K); @@ -1382,7 +1398,7 @@ static void __init gicv3_dt_init(void) gicv3_ioremap_distributor(dbase); if ( !dt_property_read_u32(node, "#redistributor-regions", - &gicv3.rdist_count) ) + &gicv3.rdist_count) ) gicv3.rdist_count = 1; rdist_regs = xzalloc_array(struct rdist_region, gicv3.rdist_count); @@ -1401,10 +1417,11 @@ static void __init gicv3_dt_init(void) rdist_regs[i].size = rdist_size; } - if ( !dt_property_read_u32(node, "redistributor-stride", &gicv3.rdist_stride) ) + if ( !dt_property_read_u32(node, "redistributor-stride", + &gicv3.rdist_stride) ) gicv3.rdist_stride = 0; - gicv3.rdist_regions= rdist_regs; + gicv3.rdist_regions = rdist_regs; res = platform_get_irq(node, 0); if ( res < 0 ) @@ -1415,11 +1432,9 @@ static void __init gicv3_dt_init(void) * For GICv3 supporting GICv2, GICC and GICV base address will be * provided. */ - res = dt_device_get_address(node, 1 + gicv3.rdist_count, - &cbase, &csize); + res = dt_device_get_address(node, 1 + gicv3.rdist_count, &cbase, &csize); if ( !res ) - dt_device_get_address(node, 1 + gicv3.rdist_count + 2, - &vbase, &vsize); + dt_device_get_address(node, 1 + gicv3.rdist_count + 2, &vbase, &vsize); } static int gicv3_iomem_deny_access(const struct domain *d) @@ -1466,8 +1481,8 @@ static int gicv3_iomem_deny_access(const struct domain *d) } #ifdef CONFIG_ACPI -static void __init -gic_acpi_add_rdist_region(paddr_t base, paddr_t size, bool single_rdist) +static void __init gic_acpi_add_rdist_region(paddr_t base, paddr_t size, + bool single_rdist) { unsigned int idx = gicv3.rdist_count++; @@ -1497,8 +1512,8 @@ static int gicv3_make_hwdom_madt(const struct domain *d, u32 offset) return -EINVAL; } - host_gicc = container_of(header, struct acpi_madt_generic_interrupt, - header); + host_gicc = + container_of(header, struct acpi_madt_generic_interrupt, header); size = sizeof(struct acpi_madt_generic_interrupt); for ( i = 0; i < d->max_vcpus; i++ ) { @@ -1544,19 +1559,17 @@ static unsigned long gicv3_get_hwdom_extra_madt_size(const struct domain *d) size = sizeof(struct acpi_madt_generic_redistributor) * gicv3.rdist_count; - size += sizeof(struct acpi_madt_generic_translator) - * vgic_v3_its_count(d); + size += sizeof(struct acpi_madt_generic_translator) * vgic_v3_its_count(d); return size; } -static int __init -gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, - const unsigned long end) +static int __init gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, + const unsigned long end) { static int cpu_base_assigned = 0; struct acpi_madt_generic_interrupt *processor = - container_of(header, struct acpi_madt_generic_interrupt, header); + container_of(header, struct acpi_madt_generic_interrupt, header); if ( BAD_MADT_ENTRY(processor, end) ) return -EINVAL; @@ -1577,9 +1590,9 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, } else { - if ( cbase != processor->base_address - || vbase != processor->gicv_base_address - || gicv3_info.maintenance_irq != processor->vgic_interrupt ) + if ( cbase != processor->base_address || + vbase != processor->gicv_base_address || + gicv3_info.maintenance_irq != processor->vgic_interrupt ) { printk("GICv3: GICC entries are not same in MADT table\n"); return -EINVAL; @@ -1589,12 +1602,11 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, return 0; } -static int __init -gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header, - const unsigned long end) +static int __init gic_acpi_parse_madt_distributor( + struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_generic_distributor *dist = - container_of(header, struct acpi_madt_generic_distributor, header); + container_of(header, struct acpi_madt_generic_distributor, header); if ( BAD_MADT_ENTRY(dist, end) ) return -EINVAL; @@ -1604,9 +1616,8 @@ gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header, return 0; } -static int __init -gic_acpi_parse_cpu_redistributor(struct acpi_subtable_header *header, - const unsigned long end) +static int __init gic_acpi_parse_cpu_redistributor( + struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_generic_interrupt *processor; u32 size; @@ -1621,9 +1632,8 @@ gic_acpi_parse_cpu_redistributor(struct acpi_subtable_header *header, return 0; } -static int __init -gic_acpi_get_madt_cpu_num(struct acpi_subtable_header *header, - const unsigned long end) +static int __init gic_acpi_get_madt_cpu_num(struct acpi_subtable_header *header, + const unsigned long end) { struct acpi_madt_generic_interrupt *cpuif; @@ -1634,9 +1644,8 @@ gic_acpi_get_madt_cpu_num(struct acpi_subtable_header *header, return 0; } -static int __init -gic_acpi_parse_madt_redistributor(struct acpi_subtable_header *header, - const unsigned long end) +static int __init gic_acpi_parse_madt_redistributor( + struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_generic_redistributor *rdist; @@ -1649,9 +1658,8 @@ gic_acpi_parse_madt_redistributor(struct acpi_subtable_header *header, return 0; } -static int __init -gic_acpi_get_madt_redistributor_num(struct acpi_subtable_header *header, - const unsigned long end) +static int __init gic_acpi_get_madt_redistributor_num( + struct acpi_subtable_header *header, const unsigned long end) { /* Nothing to do here since it only wants to get the number of GIC * redistributors. @@ -1680,10 +1688,11 @@ static void __init gicv3_acpi_init(void) count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, gic_acpi_get_madt_redistributor_num, 0); /* Count the total number of CPU interface entries */ - if ( count <= 0 ) { + if ( count <= 0 ) + { count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, gic_acpi_get_madt_cpu_num, 0); - if (count <= 0) + if ( count <= 0 ) panic("GICv3: No valid GICR entries exists\n"); gicr_table = false; @@ -1731,10 +1740,11 @@ static void __init gicv3_acpi_init(void) vbase = INVALID_PADDR; else vsize = GUEST_GICC_SIZE; - } #else -static void __init gicv3_acpi_init(void) { } +static void __init gicv3_acpi_init(void) +{ +} static int gicv3_make_hwdom_madt(const struct domain *d, u32 offset) { return 0; @@ -1771,33 +1781,32 @@ static int __init gicv3_init(void) reg = readl_relaxed(GICD + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; if ( reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4 ) - panic("GICv3: no distributor detected\n"); + panic("GICv3: no distributor detected\n"); for ( i = 0; i < gicv3.rdist_count; i++ ) { /* map dbase & rdist regions */ - gicv3.rdist_regions[i].map_base = - ioremap_nocache(gicv3.rdist_regions[i].base, - gicv3.rdist_regions[i].size); + gicv3.rdist_regions[i].map_base = ioremap_nocache( + gicv3.rdist_regions[i].base, gicv3.rdist_regions[i].size); if ( !gicv3.rdist_regions[i].map_base ) panic("GICv3: Failed to ioremap rdist region for region %d\n", i); } printk("GICv3 initialization:\n" - " gic_dist_addr=%#"PRIpaddr"\n" + " gic_dist_addr=%#" PRIpaddr "\n" " gic_maintenance_irq=%u\n" " gic_rdist_stride=%#x\n" " gic_rdist_regions=%d\n", - dbase, gicv3_info.maintenance_irq, - gicv3.rdist_stride, gicv3.rdist_count); + dbase, gicv3_info.maintenance_irq, gicv3.rdist_stride, + gicv3.rdist_count); printk(" redistributor regions:\n"); for ( i = 0; i < gicv3.rdist_count; i++ ) { const struct rdist_region *r = &gicv3.rdist_regions[i]; - printk(" - region %u: %#"PRIpaddr" - %#"PRIpaddr"\n", - i, r->base, r->base + r->size); + printk(" - region %u: %#" PRIpaddr " - %#" PRIpaddr "\n", i, + r->base, r->base + r->size); } reg = readl_relaxed(GICD + GICD_TYPER); @@ -1832,39 +1841,40 @@ out: } static const struct gic_hw_operations gicv3_ops = { - .info = &gicv3_info, - .init = gicv3_init, - .save_state = gicv3_save_state, - .restore_state = gicv3_restore_state, - .dump_state = gicv3_dump_state, - .gic_host_irq_type = &gicv3_host_irq_type, - .gic_guest_irq_type = &gicv3_guest_irq_type, - .eoi_irq = gicv3_eoi_irq, - .deactivate_irq = gicv3_dir_irq, - .read_irq = gicv3_read_irq, - .set_active_state = gicv3_set_active_state, - .set_pending_state = gicv3_set_pending_state, - .set_irq_type = gicv3_set_irq_type, - .set_irq_priority = gicv3_set_irq_priority, - .send_SGI = gicv3_send_sgi, - .disable_interface = gicv3_disable_interface, - .update_lr = gicv3_update_lr, - .update_hcr_status = gicv3_hcr_status, - .clear_lr = gicv3_clear_lr, - .read_lr = gicv3_read_lr, - .write_lr = gicv3_write_lr, - .read_vmcr_priority = gicv3_read_vmcr_priority, - .read_apr = gicv3_read_apr, - .read_pending_state = gicv3_read_pending_state, - .secondary_init = gicv3_secondary_cpu_init, - .make_hwdom_dt_node = gicv3_make_hwdom_dt_node, - .make_hwdom_madt = gicv3_make_hwdom_madt, + .info = &gicv3_info, + .init = gicv3_init, + .save_state = gicv3_save_state, + .restore_state = gicv3_restore_state, + .dump_state = gicv3_dump_state, + .gic_host_irq_type = &gicv3_host_irq_type, + .gic_guest_irq_type = &gicv3_guest_irq_type, + .eoi_irq = gicv3_eoi_irq, + .deactivate_irq = gicv3_dir_irq, + .read_irq = gicv3_read_irq, + .set_active_state = gicv3_set_active_state, + .set_pending_state = gicv3_set_pending_state, + .set_irq_type = gicv3_set_irq_type, + .set_irq_priority = gicv3_set_irq_priority, + .send_SGI = gicv3_send_sgi, + .disable_interface = gicv3_disable_interface, + .update_lr = gicv3_update_lr, + .update_hcr_status = gicv3_hcr_status, + .clear_lr = gicv3_clear_lr, + .read_lr = gicv3_read_lr, + .write_lr = gicv3_write_lr, + .read_vmcr_priority = gicv3_read_vmcr_priority, + .read_apr = gicv3_read_apr, + .read_pending_state = gicv3_read_pending_state, + .secondary_init = gicv3_secondary_cpu_init, + .make_hwdom_dt_node = gicv3_make_hwdom_dt_node, + .make_hwdom_madt = gicv3_make_hwdom_madt, .get_hwdom_extra_madt_size = gicv3_get_hwdom_extra_madt_size, - .iomem_deny_access = gicv3_iomem_deny_access, - .do_LPI = gicv3_do_LPI, + .iomem_deny_access = gicv3_iomem_deny_access, + .do_LPI = gicv3_do_LPI, }; -static int __init gicv3_dt_preinit(struct dt_device_node *node, const void *data) +static int __init gicv3_dt_preinit(struct dt_device_node *node, + const void *data) { gicv3_info.hw_version = GIC_V3; gicv3_info.node = node; @@ -1874,20 +1884,18 @@ static int __init gicv3_dt_preinit(struct dt_device_node *node, const void *data return 0; } -static const struct dt_device_match gicv3_dt_match[] __initconst = -{ +static const struct dt_device_match gicv3_dt_match[] __initconst = { DT_MATCH_GIC_V3, - { /* sentinel */ }, + {/* sentinel */}, }; -DT_DEVICE_START(gicv3, "GICv3", DEVICE_GIC) - .dt_match = gicv3_dt_match, - .init = gicv3_dt_preinit, -DT_DEVICE_END +DT_DEVICE_START(gicv3, "GICv3", DEVICE_GIC).dt_match = gicv3_dt_match, + .init = gicv3_dt_preinit, + DT_DEVICE_END #ifdef CONFIG_ACPI -/* Set up the GIC */ -static int __init gicv3_acpi_preinit(const void *data) + /* Set up the GIC */ + static int __init gicv3_acpi_preinit(const void *data) { gicv3_info.hw_version = GIC_V3; register_gic_ops(&gicv3_ops); @@ -1896,21 +1904,21 @@ static int __init gicv3_acpi_preinit(const void *data) } ACPI_DEVICE_START(agicv3, "GICv3", DEVICE_GIC) - .class_type = ACPI_MADT_GIC_VERSION_V3, - .init = gicv3_acpi_preinit, -ACPI_DEVICE_END - -ACPI_DEVICE_START(agicv4, "GICv4", DEVICE_GIC) - .class_type = ACPI_MADT_GIC_VERSION_V4, - .init = gicv3_acpi_preinit, -ACPI_DEVICE_END + .class_type = ACPI_MADT_GIC_VERSION_V3, + .init = gicv3_acpi_preinit, + ACPI_DEVICE_END + + ACPI_DEVICE_START(agicv4, "GICv4", DEVICE_GIC) + .class_type = ACPI_MADT_GIC_VERSION_V4, + .init = gicv3_acpi_preinit, + ACPI_DEVICE_END #endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/gic-vgic.c b/xen/arch/arm/gic-vgic.c index a3bba854da..1934ef0f1b 100644 --- a/xen/arch/arm/gic-vgic.c +++ b/xen/arch/arm/gic-vgic.c @@ -31,8 +31,7 @@ static void gic_update_one_lr(struct vcpu *v, int i); -static inline void gic_set_lr(int lr, struct pending_irq *p, - unsigned int state) +static inline void gic_set_lr(int lr, struct pending_irq *p, unsigned int state) { ASSERT(!local_irq_is_enabled()); @@ -55,7 +54,7 @@ static inline void gic_add_to_lr_pending(struct vcpu *v, struct pending_irq *n) if ( !list_empty(&n->lr_queue) ) return; - list_for_each_entry ( iter, &v->arch.vgic.lr_pending, lr_queue ) + list_for_each_entry (iter, &v->arch.vgic.lr_pending, lr_queue) { if ( iter->priority > n->priority ) { @@ -94,8 +93,10 @@ void gic_raise_inflight_irq(struct vcpu *v, unsigned int virtual_irq) } #ifdef GIC_DEBUG else - gdprintk(XENLOG_DEBUG, "trying to inject irq=%u into %pv, when it is still lr_pending\n", - virtual_irq, v); + gdprintk( + XENLOG_DEBUG, + "trying to inject irq=%u into %pv, when it is still lr_pending\n", + virtual_irq, v); #endif } @@ -106,12 +107,11 @@ void gic_raise_inflight_irq(struct vcpu *v, unsigned int virtual_irq) * event gets discarded while the LPI is in an LR, and a new LPI with the * same number gets mapped quickly afterwards. */ -static unsigned int gic_find_unused_lr(struct vcpu *v, - struct pending_irq *p, +static unsigned int gic_find_unused_lr(struct vcpu *v, struct pending_irq *p, unsigned int lr) { unsigned int nr_lrs = gic_get_nr_lrs(); - unsigned long *lr_mask = (unsigned long *) &this_cpu(lr_mask); + unsigned long *lr_mask = (unsigned long *)&this_cpu(lr_mask); struct gic_lr lr_val; ASSERT(spin_is_locked(&v->arch.vgic.lock)); @@ -120,7 +120,7 @@ static unsigned int gic_find_unused_lr(struct vcpu *v, { unsigned int used_lr; - for_each_set_bit(used_lr, lr_mask, nr_lrs) + for_each_set_bit (used_lr, lr_mask, nr_lrs) { gic_hw_ops->read_lr(used_lr, &lr_val); if ( lr_val.virq == p->irq ) @@ -134,7 +134,7 @@ static unsigned int gic_find_unused_lr(struct vcpu *v, } void gic_raise_guest_irq(struct vcpu *v, unsigned int virtual_irq, - unsigned int priority) + unsigned int priority) { int i; unsigned int nr_lrs = gic_get_nr_lrs(); @@ -150,7 +150,8 @@ void gic_raise_guest_irq(struct vcpu *v, unsigned int virtual_irq, { i = gic_find_unused_lr(v, p, 0); - if (i < nr_lrs) { + if ( i < nr_lrs ) + { set_bit(i, &this_cpu(lr_mask)); gic_set_lr(i, p, GICH_LR_PENDING); return; @@ -201,17 +202,22 @@ static void gic_update_one_lr(struct vcpu *v, int i) gic_hw_ops->write_lr(i, &lr_val); } else - gdprintk(XENLOG_WARNING, "unable to inject hw irq=%d into %pv: already active in LR%d\n", + gdprintk(XENLOG_WARNING, + "unable to inject hw irq=%d into %pv: already active " + "in LR%d\n", irq, v, i); } } else if ( lr_val.pending ) { - int q __attribute__ ((unused)) = test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status); + int q __attribute__((unused)) = + test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status); #ifdef GIC_DEBUG if ( q ) - gdprintk(XENLOG_DEBUG, "trying to inject irq=%d into %pv, when it is already pending in LR%d\n", - irq, v, i); + gdprintk(XENLOG_DEBUG, + "trying to inject irq=%d into %pv, when it is already " + "pending in LR%d\n", + irq, v, i); #endif } else @@ -230,7 +236,8 @@ static void gic_update_one_lr(struct vcpu *v, int i) test_bit(GIC_IRQ_GUEST_QUEUED, &p->status) && !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) ) gic_raise_guest_irq(v, irq, p->priority); - else { + else + { list_del_init(&p->inflight); /* * Remove from inflight, then change physical affinity. It @@ -265,8 +272,9 @@ void vgic_sync_from_lrs(struct vcpu *v) spin_lock_irqsave(&v->arch.vgic.lock, flags); - while ((i = find_next_bit((const unsigned long *) &this_cpu(lr_mask), - nr_lrs, i)) < nr_lrs ) { + while ( (i = find_next_bit((const unsigned long *)&this_cpu(lr_mask), + nr_lrs, i)) < nr_lrs ) + { gic_update_one_lr(v, i); i++; } @@ -289,13 +297,13 @@ static void gic_restore_pending_irqs(struct vcpu *v) goto out; inflight_r = &v->arch.vgic.inflight_irqs; - list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue ) + list_for_each_entry_safe(p, t, &v->arch.vgic.lr_pending, lr_queue) { lr = gic_find_unused_lr(v, p, lr); if ( lr >= nr_lrs ) { /* No more free LRs: find a lower priority irq to evict */ - list_for_each_entry_reverse( p_r, inflight_r, inflight ) + list_for_each_entry_reverse(p_r, inflight_r, inflight) { if ( p_r->priority == p->priority ) goto out; @@ -307,7 +315,7 @@ static void gic_restore_pending_irqs(struct vcpu *v) * time, so quit */ goto out; -found: + found: lr = p_r->lr; p_r->lr = GIC_INVALID_LR; set_bit(GIC_IRQ_GUEST_QUEUED, &p_r->status); @@ -337,7 +345,7 @@ void gic_clear_pending_irqs(struct vcpu *v) ASSERT(spin_is_locked(&v->arch.vgic.lock)); v->arch.lr_mask = 0; - list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue ) + list_for_each_entry_safe(p, t, &v->arch.vgic.lr_pending, lr_queue) gic_remove_from_lr_pending(v, p); } @@ -373,7 +381,7 @@ int vgic_vcpu_pending_irq(struct vcpu *v) /* find the first enabled non-active irq, the queue is already * ordered by priority */ - list_for_each_entry( p, &v->arch.vgic.inflight_irqs, inflight ) + list_for_each_entry (p, &v->arch.vgic.inflight_irqs, inflight) { if ( GIC_PRI_TO_GUEST(p->priority) >= mask_priority ) goto out; @@ -405,10 +413,10 @@ void gic_dump_vgic_info(struct vcpu *v) { struct pending_irq *p; - list_for_each_entry ( p, &v->arch.vgic.inflight_irqs, inflight ) + list_for_each_entry (p, &v->arch.vgic.inflight_irqs, inflight) printk("Inflight irq=%u lr=%u\n", p->irq, p->lr); - list_for_each_entry( p, &v->arch.vgic.lr_pending, lr_queue ) + list_for_each_entry (p, &v->arch.vgic.lr_pending, lr_queue) printk("Pending irq=%d\n", p->irq); } @@ -451,8 +459,7 @@ int vgic_connect_hw_irq(struct domain *d, struct vcpu *v, unsigned int virq, if ( connect ) { /* The VIRQ should not be already enabled by the guest */ - if ( !p->desc && - !test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) ) + if ( !p->desc && !test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) ) p->desc = desc; else ret = -EBUSY; diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c index 6cc7dec706..a01e1cec8c 100644 --- a/xen/arch/arm/gic.c +++ b/xen/arch/arm/gic.c @@ -56,7 +56,7 @@ static void clear_cpu_lr_mask(void) enum gic_version gic_hw_version(void) { - return gic_hw_ops->info->hw_version; + return gic_hw_ops->info->hw_version; } unsigned int gic_number_lines(void) @@ -113,8 +113,9 @@ static void gic_set_irq_priority(struct irq_desc *desc, unsigned int priority) */ void gic_route_irq_to_xen(struct irq_desc *desc, unsigned int priority) { - ASSERT(priority <= 0xff); /* Only 8 bits of priority */ - ASSERT(desc->irq < gic_number_lines());/* Can't route interrupts that don't exist */ + ASSERT(priority <= 0xff); /* Only 8 bits of priority */ + ASSERT(desc->irq < + gic_number_lines()); /* Can't route interrupts that don't exist */ ASSERT(test_bit(_IRQ_DISABLED, &desc->status)); ASSERT(spin_is_locked(&desc->lock)); @@ -195,8 +196,7 @@ int gic_remove_irq_from_guest(struct domain *d, unsigned int virq, } int gic_irq_xlate(const u32 *intspec, unsigned int intsize, - unsigned int *out_hwirq, - unsigned int *out_type) + unsigned int *out_hwirq, unsigned int *out_type) { if ( intsize < 3 ) return -EINVAL; @@ -229,7 +229,7 @@ static void __init gic_dt_preinit(void) struct dt_device_node *node; uint8_t num_gics = 0; - dt_for_each_device_node( dt_host, node ) + dt_for_each_device_node (dt_host, node) { if ( !dt_get_property(node, "interrupt-controller", NULL) ) continue; @@ -269,7 +269,9 @@ static void __init gic_acpi_preinit(void) panic("Unable to find compatible GIC in the ACPI table\n"); } #else -static void __init gic_acpi_preinit(void) { } +static void __init gic_acpi_preinit(void) +{ +} #endif /* Find the interrupt controller and set up the callback to translate @@ -313,9 +315,9 @@ void send_SGI_self(enum gic_sgi sgi) void send_SGI_allbutself(enum gic_sgi sgi) { - ASSERT(sgi < 16); /* There are only 16 SGIs */ + ASSERT(sgi < 16); /* There are only 16 SGIs */ - gic_hw_ops->send_SGI(sgi, SGI_TARGET_OTHERS, NULL); + gic_hw_ops->send_SGI(sgi, SGI_TARGET_OTHERS, NULL); } void smp_send_state_dump(unsigned int cpu) @@ -380,7 +382,7 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) { unsigned int irq; - do { + do { /* Reading IRQ will ACK it */ irq = gic_hw_ops->read_irq(); @@ -407,10 +409,11 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) local_irq_disable(); break; } - } while (1); + } while ( 1 ); } -static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) +static void maintenance_interrupt(int irq, void *dev_id, + struct cpu_user_regs *regs) { /* * This is a dummy interrupt handler. @@ -428,7 +431,8 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r void gic_dump_info(struct vcpu *v) { - printk("GICH_LRs (vcpu %d) mask=%"PRIx64"\n", v->vcpu_id, v->arch.lr_mask); + printk("GICH_LRs (vcpu %d) mask=%" PRIx64 "\n", v->vcpu_id, + v->arch.lr_mask); gic_hw_ops->dump_state(v); } @@ -439,8 +443,7 @@ void init_maintenance_interrupt(void) } int gic_make_hwdom_dt_node(const struct domain *d, - const struct dt_device_node *gic, - void *fdt) + const struct dt_device_node *gic, void *fdt) { ASSERT(gic == dt_interrupt_controller); @@ -456,10 +459,10 @@ unsigned long gic_get_hwdom_madt_size(const struct domain *d) { unsigned long madt_size; - madt_size = sizeof(struct acpi_table_madt) - + sizeof(struct acpi_madt_generic_interrupt) * d->max_vcpus - + sizeof(struct acpi_madt_generic_distributor) - + gic_hw_ops->get_hwdom_extra_madt_size(d); + madt_size = sizeof(struct acpi_table_madt) + + sizeof(struct acpi_madt_generic_interrupt) * d->max_vcpus + + sizeof(struct acpi_madt_generic_distributor) + + gic_hw_ops->get_hwdom_extra_madt_size(d); return madt_size; } @@ -469,11 +472,10 @@ int gic_iomem_deny_access(const struct domain *d) return gic_hw_ops->iomem_deny_access(d); } -static int cpu_gic_callback(struct notifier_block *nfb, - unsigned long action, +static int cpu_gic_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { - switch ( action ) + switch (action) { case CPU_DYING: /* This is reverting the work done in init_maintenance_interrupt */ diff --git a/xen/arch/arm/guest_walk.c b/xen/arch/arm/guest_walk.c index 7db7a7321b..6f11d951e5 100644 --- a/xen/arch/arm/guest_walk.c +++ b/xen/arch/arm/guest_walk.c @@ -28,8 +28,7 @@ * page table on a different vCPU, the following registers would need to be * loaded: TCR_EL1, TTBR0_EL1, TTBR1_EL1, and SCTLR_EL1. */ -static bool guest_walk_sd(const struct vcpu *v, - vaddr_t gva, paddr_t *ipa, +static bool guest_walk_sd(const struct vcpu *v, vaddr_t gva, paddr_t *ipa, unsigned int *perms) { int ret; @@ -95,11 +94,12 @@ static bool guest_walk_sd(const struct vcpu *v, paddr |= (gva & mask) >> 18; /* Access the guest's memory to read only one PTE. */ - ret = access_guest_memory_by_ipa(d, paddr, &pte, sizeof(short_desc_t), false); + ret = + access_guest_memory_by_ipa(d, paddr, &pte, sizeof(short_desc_t), false); if ( ret ) return false; - switch ( pte.walk.dt ) + switch (pte.walk.dt) { case L1DESC_INVALID: return false; @@ -120,7 +120,8 @@ static bool guest_walk_sd(const struct vcpu *v, paddr = ((paddr_t)pte.walk.base << 10) | ((gva & mask) >> 10); /* Access the guest's memory to read only one PTE. */ - ret = access_guest_memory_by_ipa(d, paddr, &pte, sizeof(short_desc_t), false); + ret = access_guest_memory_by_ipa(d, paddr, &pte, sizeof(short_desc_t), + false); if ( ret ) return false; @@ -130,7 +131,8 @@ static bool guest_walk_sd(const struct vcpu *v, if ( pte.pg.page ) /* Small page. */ { mask = (1ULL << L2DESC_SMALL_PAGE_SHIFT) - 1; - *ipa = ((paddr_t)pte.pg.base << L2DESC_SMALL_PAGE_SHIFT) | (gva & mask); + *ipa = ((paddr_t)pte.pg.base << L2DESC_SMALL_PAGE_SHIFT) | + (gva & mask); /* Set execute permissions associated with the small page. */ if ( !pte.pg.xn ) @@ -139,7 +141,8 @@ static bool guest_walk_sd(const struct vcpu *v, else /* Large page. */ { mask = (1ULL << L2DESC_LARGE_PAGE_SHIFT) - 1; - *ipa = ((paddr_t)pte.lpg.base << L2DESC_LARGE_PAGE_SHIFT) | (gva & mask); + *ipa = ((paddr_t)pte.lpg.base << L2DESC_LARGE_PAGE_SHIFT) | + (gva & mask); /* Set execute permissions associated with the large page. */ if ( !pte.lpg.xn ) @@ -157,15 +160,18 @@ static bool guest_walk_sd(const struct vcpu *v, if ( !pte.sec.supersec ) /* Section */ { mask = (1ULL << L1DESC_SECTION_SHIFT) - 1; - *ipa = ((paddr_t)pte.sec.base << L1DESC_SECTION_SHIFT) | (gva & mask); + *ipa = + ((paddr_t)pte.sec.base << L1DESC_SECTION_SHIFT) | (gva & mask); } else /* Supersection */ { mask = (1ULL << L1DESC_SUPERSECTION_SHIFT) - 1; *ipa = gva & mask; *ipa |= (paddr_t)(pte.supersec.base) << L1DESC_SUPERSECTION_SHIFT; - *ipa |= (paddr_t)(pte.supersec.extbase1) << L1DESC_SUPERSECTION_EXT_BASE1_SHIFT; - *ipa |= (paddr_t)(pte.supersec.extbase2) << L1DESC_SUPERSECTION_EXT_BASE2_SHIFT; + *ipa |= (paddr_t)(pte.supersec.extbase1) + << L1DESC_SUPERSECTION_EXT_BASE1_SHIFT; + *ipa |= (paddr_t)(pte.supersec.extbase2) + << L1DESC_SUPERSECTION_EXT_BASE2_SHIFT; } /* Set permissions so that the caller can check the flags by herself. */ @@ -189,14 +195,9 @@ static int get_ipa_output_size(struct domain *d, register_t tcr, register_t ips; static const unsigned int ipa_sizes[7] = { - TCR_EL1_IPS_32_BIT_VAL, - TCR_EL1_IPS_36_BIT_VAL, - TCR_EL1_IPS_40_BIT_VAL, - TCR_EL1_IPS_42_BIT_VAL, - TCR_EL1_IPS_44_BIT_VAL, - TCR_EL1_IPS_48_BIT_VAL, - TCR_EL1_IPS_52_BIT_VAL - }; + TCR_EL1_IPS_32_BIT_VAL, TCR_EL1_IPS_36_BIT_VAL, TCR_EL1_IPS_40_BIT_VAL, + TCR_EL1_IPS_42_BIT_VAL, TCR_EL1_IPS_44_BIT_VAL, TCR_EL1_IPS_48_BIT_VAL, + TCR_EL1_IPS_52_BIT_VAL}; if ( is_64bit_domain(d) ) { @@ -222,14 +223,16 @@ static int get_ipa_output_size(struct domain *d, register_t tcr, } /* Normalized page granule size indices. */ -enum granule_size_index { +enum granule_size_index +{ GRANULE_SIZE_INDEX_4K, GRANULE_SIZE_INDEX_16K, GRANULE_SIZE_INDEX_64K }; /* Represent whether TTBR0 or TTBR1 is active. */ -enum active_ttbr { +enum active_ttbr +{ TTBR0_ACTIVE, TTBR1_ACTIVE }; @@ -248,7 +251,7 @@ static bool get_ttbr_and_gran_64bit(uint64_t *ttbr, unsigned int *gran, if ( ttbrx == TTBR0_ACTIVE ) { /* Normalize granule size. */ - switch ( tcr & TCR_TG0_MASK ) + switch (tcr & TCR_TG0_MASK) { case TCR_TG0_16K: *gran = GRANULE_SIZE_INDEX_16K; @@ -279,7 +282,7 @@ static bool get_ttbr_and_gran_64bit(uint64_t *ttbr, unsigned int *gran, else { /* Normalize granule size. */ - switch ( tcr & TCR_EL1_TG1_MASK ) + switch (tcr & TCR_EL1_TG1_MASK) { case TCR_EL1_TG1_16K: *gran = GRANULE_SIZE_INDEX_16K; @@ -355,8 +358,7 @@ static bool check_base_size(unsigned int output_size, uint64_t base) * page table on a different vCPU, the following registers would need to be * loaded: TCR_EL1, TTBR0_EL1, TTBR1_EL1, and SCTLR_EL1. */ -static bool guest_walk_ld(const struct vcpu *v, - vaddr_t gva, paddr_t *ipa, +static bool guest_walk_ld(const struct vcpu *v, vaddr_t gva, paddr_t *ipa, unsigned int *perms) { int ret; @@ -371,43 +373,29 @@ static bool guest_walk_ld(const struct vcpu *v, register_t tcr = READ_SYSREG(TCR_EL1); struct domain *d = v->domain; -#define OFFSETS(gva, gran) \ -{ \ - zeroeth_table_offset_##gran(gva), \ - first_table_offset_##gran(gva), \ - second_table_offset_##gran(gva), \ - third_table_offset_##gran(gva) \ -} +#define OFFSETS(gva, gran) \ + { \ + zeroeth_table_offset_##gran(gva), first_table_offset_##gran(gva), \ + second_table_offset_##gran(gva), third_table_offset_##gran(gva) \ + } - const paddr_t offsets[3][4] = { - OFFSETS(gva, 4K), - OFFSETS(gva, 16K), - OFFSETS(gva, 64K) - }; + const paddr_t offsets[3][4] = {OFFSETS(gva, 4K), OFFSETS(gva, 16K), + OFFSETS(gva, 64K)}; #undef OFFSETS -#define MASKS(gran) \ -{ \ - zeroeth_size(gran) - 1, \ - first_size(gran) - 1, \ - second_size(gran) - 1, \ - third_size(gran) - 1 \ -} +#define MASKS(gran) \ + { \ + zeroeth_size(gran) - 1, first_size(gran) - 1, second_size(gran) - 1, \ + third_size(gran) - 1 \ + } - static const paddr_t masks[3][4] = { - MASKS(4K), - MASKS(16K), - MASKS(64K) - }; + static const paddr_t masks[3][4] = {MASKS(4K), MASKS(16K), MASKS(64K)}; #undef MASKS - static const unsigned int grainsizes[3] = { - PAGE_SHIFT_4K, - PAGE_SHIFT_16K, - PAGE_SHIFT_64K - }; + static const unsigned int grainsizes[3] = {PAGE_SHIFT_4K, PAGE_SHIFT_16K, + PAGE_SHIFT_64K}; t0_sz = (tcr >> TCR_T0SZ_SHIFT) & TCR_SZ_MASK; t1_sz = (tcr >> TCR_T1SZ_SHIFT) & TCR_SZ_MASK; @@ -417,7 +405,8 @@ static bool guest_walk_ld(const struct vcpu *v, if ( is_64bit_domain(d) ) { - /* Select the TTBR(0|1)_EL1 that will be used for address translation. */ + /* Select the TTBR(0|1)_EL1 that will be used for address translation. + */ if ( (gva & BIT_ULL(topbit)) == 0 ) { @@ -449,7 +438,8 @@ static bool guest_walk_ld(const struct vcpu *v, /* Granule size of AArch32 architectures is always 4K. */ gran = GRANULE_SIZE_INDEX_4K; - /* Select the TTBR(0|1)_EL1 that will be used for address translation. */ + /* Select the TTBR(0|1)_EL1 that will be used for address translation. + */ /* * Check if the bits <31:32-t0_sz> of the GVA are set to 0 (DDI 0487B.a @@ -493,7 +483,8 @@ static bool guest_walk_ld(const struct vcpu *v, * The starting level is the number of strides (grainsizes[gran] - 3) * needed to consume the input address (ARM DDI 0487B.a J1-5924). */ - level = 4 - DIV_ROUND_UP((input_size - grainsizes[gran]), (grainsizes[gran] - 3)); + level = 4 - DIV_ROUND_UP((input_size - grainsizes[gran]), + (grainsizes[gran] - 3)); /* Get the IPA output_size. */ ret = get_ipa_output_size(d, tcr, &output_size); @@ -512,7 +503,7 @@ static bool guest_walk_ld(const struct vcpu *v, mask = GENMASK_ULL(47, grainsizes[gran]); paddr = (ttbr & mask); - for ( ; ; level++ ) + for ( ;; level++ ) { /* * Add offset given by the GVA to the translation table base address. @@ -535,8 +526,7 @@ static bool guest_walk_ld(const struct vcpu *v, * appropriately. */ if ( (output_size < TCR_EL1_IPS_52_BIT_VAL) && - (gran == GRANULE_SIZE_INDEX_64K) && - (pte.walk.base & 0xf) ) + (gran == GRANULE_SIZE_INDEX_64K) && (pte.walk.base & 0xf) ) return false; /* @@ -546,15 +536,16 @@ static bool guest_walk_ld(const struct vcpu *v, * - The PTE is not valid. * - If (level < 3) and the PTE is valid, we found a block descriptor. */ - if ( level == 3 || !lpae_is_valid(pte) || lpae_is_superpage(pte, level) ) + if ( level == 3 || !lpae_is_valid(pte) || + lpae_is_superpage(pte, level) ) break; /* * Temporarily store permissions of the table descriptor as they are * inherited by page table attributes (ARM DDI 0487B.a J1-5928). */ - xn_table |= pte.pt.xnt; /* Execute-Never */ - ro_table |= pte.pt.apt & BIT(1); /* Read-Only */ + xn_table |= pte.pt.xnt; /* Execute-Never */ + ro_table |= pte.pt.apt & BIT(1); /* Read-Only */ /* Compute the base address of the next level translation table. */ mask = GENMASK_ULL(47, grainsizes[gran]); @@ -586,8 +577,8 @@ static bool guest_walk_ld(const struct vcpu *v, return true; } -bool guest_walk_tables(const struct vcpu *v, vaddr_t gva, - paddr_t *ipa, unsigned int *perms) +bool guest_walk_tables(const struct vcpu *v, vaddr_t gva, paddr_t *ipa, + unsigned int *perms) { uint32_t sctlr = READ_SYSREG(SCTLR_EL1); register_t tcr = READ_SYSREG(TCR_EL1); @@ -617,7 +608,7 @@ bool guest_walk_tables(const struct vcpu *v, vaddr_t gva, *ipa = gva; /* Memory can be accessed without any restrictions. */ - *perms = GV2M_READ|GV2M_WRITE|GV2M_EXEC; + *perms = GV2M_READ | GV2M_WRITE | GV2M_EXEC; return true; } diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c index 7a0f3e9d5f..9f07efde7d 100644 --- a/xen/arch/arm/guestcopy.c +++ b/xen/arch/arm/guestcopy.c @@ -5,14 +5,13 @@ #include #include -#define COPY_flush_dcache (1U << 0) -#define COPY_from_guest (0U << 1) -#define COPY_to_guest (1U << 1) -#define COPY_ipa (0U << 2) -#define COPY_linear (1U << 2) +#define COPY_flush_dcache (1U << 0) +#define COPY_from_guest (0U << 1) +#define COPY_to_guest (1U << 1) +#define COPY_ipa (0U << 2) +#define COPY_linear (1U << 2) -typedef union -{ +typedef union { struct { struct vcpu *v; @@ -24,8 +23,8 @@ typedef union } gpa; } copy_info_t; -#define GVA_INFO(vcpu) ((copy_info_t) { .gva = { vcpu } }) -#define GPA_INFO(domain) ((copy_info_t) { .gpa = { domain } }) +#define GVA_INFO(vcpu) ((copy_info_t){.gva = {vcpu}}) +#define GPA_INFO(domain) ((copy_info_t){.gpa = {domain}}) static struct page_info *translate_get_page(copy_info_t info, uint64_t addr, bool linear, bool write) @@ -107,8 +106,8 @@ static unsigned long copy_guest(void *buf, uint64_t addr, unsigned int len, unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len) { - return copy_guest((void *)from, (vaddr_t)to, len, - GVA_INFO(current), COPY_to_guest | COPY_linear); + return copy_guest((void *)from, (vaddr_t)to, len, GVA_INFO(current), + COPY_to_guest | COPY_linear); } unsigned long raw_copy_to_guest_flush_dcache(void *to, const void *from, @@ -124,16 +123,15 @@ unsigned long raw_clear_guest(void *to, unsigned len) COPY_to_guest | COPY_linear); } -unsigned long raw_copy_from_guest(void *to, const void __user *from, unsigned len) +unsigned long raw_copy_from_guest(void *to, const void __user *from, + unsigned len) { return copy_guest(to, (vaddr_t)from, len, GVA_INFO(current), COPY_from_guest | COPY_linear); } -unsigned long copy_to_guest_phys_flush_dcache(struct domain *d, - paddr_t gpa, - void *buf, - unsigned int len) +unsigned long copy_to_guest_phys_flush_dcache(struct domain *d, paddr_t gpa, + void *buf, unsigned int len) { return copy_guest(buf, gpa, len, GPA_INFO(d), COPY_to_guest | COPY_ipa | COPY_flush_dcache); diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c index 76b27c9168..af0a4317b6 100644 --- a/xen/arch/arm/hvm.c +++ b/xen/arch/arm/hvm.c @@ -35,7 +35,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) { long rc = 0; - switch ( op ) + switch (op) { case HVMOP_set_param: case HVMOP_get_param: diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c index ae7ef96981..babc3b2919 100644 --- a/xen/arch/arm/io.c +++ b/xen/arch/arm/io.c @@ -27,8 +27,7 @@ #include "decode.h" static enum io_state handle_read(const struct mmio_handler *handler, - struct vcpu *v, - mmio_info_t *info) + struct vcpu *v, mmio_info_t *info) { const struct hsr_dabt dabt = info->dabt; struct cpu_user_regs *regs = guest_cpu_user_regs(); @@ -65,8 +64,7 @@ static enum io_state handle_read(const struct mmio_handler *handler, } static enum io_state handle_write(const struct mmio_handler *handler, - struct vcpu *v, - mmio_info_t *info) + struct vcpu *v, mmio_info_t *info) { const struct hsr_dabt dabt = info->dabt; struct cpu_user_regs *regs = guest_cpu_user_regs(); @@ -107,17 +105,13 @@ static const struct mmio_handler *find_mmio_handler(struct domain *d, return handler; } -enum io_state try_handle_mmio(struct cpu_user_regs *regs, - const union hsr hsr, +enum io_state try_handle_mmio(struct cpu_user_regs *regs, const union hsr hsr, paddr_t gpa) { struct vcpu *v = current; const struct mmio_handler *handler = NULL; const struct hsr_dabt dabt = hsr.dabt; - mmio_info_t info = { - .gpa = gpa, - .dabt = dabt - }; + mmio_info_t info = {.gpa = gpa, .dabt = dabt}; ASSERT(hsr.ec == HSR_EC_DATA_ABORT_LOWER_EL); @@ -133,8 +127,7 @@ enum io_state try_handle_mmio(struct cpu_user_regs *regs, * Erratum 766422: Thumb store translation fault to Hypervisor may * not have correct HSR Rt value. */ - if ( check_workaround_766422() && (regs->cpsr & PSR_THUMB) && - dabt.write ) + if ( check_workaround_766422() && (regs->cpsr & PSR_THUMB) && dabt.write ) { int rc; @@ -152,8 +145,7 @@ enum io_state try_handle_mmio(struct cpu_user_regs *regs, return handle_read(handler, v, &info); } -void register_mmio_handler(struct domain *d, - const struct mmio_handler_ops *ops, +void register_mmio_handler(struct domain *d, const struct mmio_handler_ops *ops, paddr_t addr, paddr_t size, void *priv) { struct vmmio *vmmio = &d->arch.vmmio; diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c index 7318e16831..b42757d586 100644 --- a/xen/arch/arm/irq.c +++ b/xen/arch/arm/irq.c @@ -35,7 +35,7 @@ static DEFINE_SPINLOCK(local_irqs_type_lock); const int gsx_irq_num = 151; /* Total context count is 8, but the 8th context is always used by host */ -#define GSX_GUESTS_CNT 7 +#define GSX_GUESTS_CNT 7 /* Describe an IRQ assigned to a guest */ struct irq_guest @@ -59,15 +59,13 @@ static void end_none(struct irq_desc *irq) gic_hw_ops->gic_host_irq_type->end(irq); } -hw_irq_controller no_irq_type = { - .typename = "none", - .startup = irq_startup_none, - .shutdown = irq_shutdown_none, - .enable = irq_enable_none, - .disable = irq_disable_none, - .ack = ack_none, - .end = end_none -}; +hw_irq_controller no_irq_type = {.typename = "none", + .startup = irq_startup_none, + .shutdown = irq_shutdown_none, + .enable = irq_enable_none, + .disable = irq_disable_none, + .ack = ack_none, + .end = end_none}; static irq_desc_t irq_desc[NR_IRQS]; static DEFINE_PER_CPU(irq_desc_t[NR_LOCAL_IRQS], local_irq_desc); @@ -77,7 +75,7 @@ irq_desc_t *__irq_to_desc(int irq) if ( irq < NR_LOCAL_IRQS ) return &this_cpu(local_irq_desc)[irq]; - return &irq_desc[irq-NR_LOCAL_IRQS]; + return &irq_desc[irq - NR_LOCAL_IRQS]; } int arch_init_one_irq_desc(struct irq_desc *desc) @@ -86,7 +84,6 @@ int arch_init_one_irq_desc(struct irq_desc *desc) return 0; } - static int __init init_irq_data(void) { int irq; @@ -96,7 +93,7 @@ static int __init init_irq_data(void) struct irq_desc *desc = irq_to_desc(irq); init_one_irq_desc(desc); desc->irq = irq; - desc->action = NULL; + desc->action = NULL; } return 0; @@ -113,7 +110,7 @@ static int init_local_irq_data(void) struct irq_desc *desc = irq_to_desc(irq); init_one_irq_desc(desc); desc->irq = irq; - desc->action = NULL; + desc->action = NULL; /* PPIs are included in local_irqs, we copy the IRQ type from * local_irqs_type when bringing up local IRQ for this CPU in @@ -241,7 +238,7 @@ static void add_gsx_guest(struct domain *d, struct irq_guest *info) for ( i = 0; i < ARRAY_SIZE(info->gsx_guests); i++ ) { if ( info->gsx_guests[i] ) - continue; + continue; info->gsx_guests[i] = d; printk("Added GSX guest domain %u\n", d->domain_id); @@ -295,8 +292,7 @@ void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq) #ifndef NDEBUG if ( !desc->action ) { - printk("Unknown %s %#3.3x\n", - is_fiq ? "FIQ" : "IRQ", irq); + printk("Unknown %s %#3.3x\n", is_fiq ? "FIQ" : "IRQ", irq); goto out; } #endif @@ -342,8 +338,7 @@ void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq) spin_unlock_irq(&desc->lock); - do - { + do { action->handler(irq, action->dev_id, regs); action = action->next; } while ( action ); @@ -368,7 +363,7 @@ void release_irq(unsigned int irq, const void *dev_id) desc = irq_to_desc(irq); - spin_lock_irqsave(&desc->lock,flags); + spin_lock_irqsave(&desc->lock, flags); action_ptr = &desc->action; for ( ;; ) @@ -397,10 +392,12 @@ void release_irq(unsigned int irq, const void *dev_id) clear_bit(_IRQ_GUEST, &desc->status); } - spin_unlock_irqrestore(&desc->lock,flags); + spin_unlock_irqrestore(&desc->lock, flags); /* Wait to make sure it's not being used on another CPU */ - do { smp_mb(); } while ( test_bit(_IRQ_INPROGRESS, &desc->status) ); + do { + smp_mb(); + } while ( test_bit(_IRQ_INPROGRESS, &desc->status) ); if ( action->free_on_release ) xfree(action); @@ -417,7 +414,8 @@ static int __setup_irq(struct irq_desc *desc, unsigned int irqflags, * - if the IRQ is marked as shared * - dev_id is not NULL when IRQF_SHARED is set */ - if ( desc->action != NULL && (!test_bit(_IRQF_SHARED, &desc->status) || !shared) ) + if ( desc->action != NULL && + (!test_bit(_IRQF_SHARED, &desc->status) || !shared) ) return -EINVAL; if ( shared && new->dev_id == NULL ) return -EINVAL; @@ -502,8 +500,8 @@ bool irq_type_set_by_domain(const struct domain *d) * Route an IRQ to a specific guest. * For now only SPIs are assignable to the guest. */ -int route_irq_to_guest(struct domain *d, unsigned int virq, - unsigned int irq, const char * devname) +int route_irq_to_guest(struct domain *d, unsigned int virq, unsigned int irq, + const char *devname) { struct irqaction *action; struct irq_guest *info; @@ -706,7 +704,7 @@ void pirq_set_affinity(struct domain *d, int pirq, const cpumask_t *mask) static bool irq_validate_new_type(unsigned int curr, unsigned new) { - return (curr == IRQ_TYPE_INVALID || curr == new ); + return (curr == IRQ_TYPE_INVALID || curr == new); } int irq_set_spi_type(unsigned int spi, unsigned int type) @@ -757,7 +755,7 @@ static int irq_local_set_type(unsigned int irq, unsigned int type) local_irqs_type[irq] = type; - for_each_cpu( cpu, &cpu_online_map ) + for_each_cpu (cpu, &cpu_online_map) { desc = &per_cpu(local_irq_desc, cpu)[irq]; spin_lock_irqsave(&desc->lock, flags); diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c index d04a862f99..90ffde0fd2 100644 --- a/xen/arch/arm/kernel.c +++ b/xen/arch/arm/kernel.c @@ -18,20 +18,21 @@ #include #include -#define UIMAGE_MAGIC 0x27051956 -#define UIMAGE_NMLEN 32 +#define UIMAGE_MAGIC 0x27051956 +#define UIMAGE_NMLEN 32 #define ZIMAGE32_MAGIC_OFFSET 0x24 #define ZIMAGE32_START_OFFSET 0x28 -#define ZIMAGE32_END_OFFSET 0x2c -#define ZIMAGE32_HEADER_LEN 0x30 +#define ZIMAGE32_END_OFFSET 0x2c +#define ZIMAGE32_HEADER_LEN 0x30 #define ZIMAGE32_MAGIC 0x016f2818 #define ZIMAGE64_MAGIC_V0 0x14000008 #define ZIMAGE64_MAGIC_V1 0x644d5241 /* "ARM\x64" */ -struct minimal_dtb_header { +struct minimal_dtb_header +{ uint32_t magic; uint32_t total_size; /* There are other fields but we don't use them yet. */ @@ -49,10 +50,11 @@ void __init copy_from_paddr(void *dst, paddr_t paddr, unsigned long len) { void *src = (void *)FIXMAP_ADDR(FIXMAP_MISC); - while (len) { + while ( len ) + { unsigned long l, s; - s = paddr & (PAGE_SIZE-1); + s = paddr & (PAGE_SIZE - 1); l = min(PAGE_SIZE - s, len); set_fixmap(FIXMAP_MISC, maddr_to_mfn(paddr), PAGE_HYPERVISOR_WC); @@ -67,8 +69,8 @@ void __init copy_from_paddr(void *dst, paddr_t paddr, unsigned long len) clear_fixmap(FIXMAP_MISC); } -static void __init place_modules(struct kernel_info *info, - paddr_t kernbase, paddr_t kernend) +static void __init place_modules(struct kernel_info *info, paddr_t kernbase, + paddr_t kernend) { /* Align DTB and initrd size to 2Mb. Linux only requires 4 byte alignment */ const struct bootmodule *mod = info->initrd_bootmodule; @@ -86,7 +88,8 @@ static void __init place_modules(struct kernel_info *info, paddr_t modbase; if ( modsize + kernsize > ramsize ) - panic("Not enough memory in the first bank for the kernel+dtb+initrd\n"); + panic( + "Not enough memory in the first bank for the kernel+dtb+initrd\n"); /* * DTB must be loaded such that it does not conflict with the @@ -165,15 +168,14 @@ static void __init kernel_zimage_load(struct kernel_info *info) place_modules(info, load_addr, load_addr + len); - printk("Loading zImage from %"PRIpaddr" to %"PRIpaddr"-%"PRIpaddr"\n", + printk("Loading zImage from %" PRIpaddr " to %" PRIpaddr "-%" PRIpaddr "\n", paddr, load_addr, load_addr + len); kernel = ioremap_wc(paddr, len); if ( !kernel ) panic("Unable to map the hwdom kernel\n"); - rc = copy_to_guest_phys_flush_dcache(info->d, load_addr, - kernel, len); + rc = copy_to_guest_phys_flush_dcache(info->d, load_addr, kernel, len); if ( rc != 0 ) panic("Unable to copy the kernel in the hwdom memory\n"); @@ -183,27 +185,28 @@ static void __init kernel_zimage_load(struct kernel_info *info) /* * Uimage CPU Architecture Codes */ -#define IH_ARCH_ARM 2 /* ARM */ -#define IH_ARCH_ARM64 22 /* ARM64 */ +#define IH_ARCH_ARM 2 /* ARM */ +#define IH_ARCH_ARM64 22 /* ARM64 */ /* * Check if the image is a uImage and setup kernel_info */ -static int __init kernel_uimage_probe(struct kernel_info *info, - paddr_t addr, paddr_t size) +static int __init kernel_uimage_probe(struct kernel_info *info, paddr_t addr, + paddr_t size) { - struct { - __be32 magic; /* Image Header Magic Number */ - __be32 hcrc; /* Image Header CRC Checksum */ - __be32 time; /* Image Creation Timestamp */ - __be32 size; /* Image Data Size */ - __be32 load; /* Data Load Address */ - __be32 ep; /* Entry Point Address */ - __be32 dcrc; /* Image Data CRC Checksum */ - uint8_t os; /* Operating System */ - uint8_t arch; /* CPU architecture */ - uint8_t type; /* Image Type */ - uint8_t comp; /* Compression Type */ + struct + { + __be32 magic; /* Image Header Magic Number */ + __be32 hcrc; /* Image Header CRC Checksum */ + __be32 time; /* Image Creation Timestamp */ + __be32 size; /* Image Data Size */ + __be32 load; /* Data Load Address */ + __be32 ep; /* Entry Point Address */ + __be32 dcrc; /* Image Data CRC Checksum */ + uint8_t os; /* Operating System */ + uint8_t arch; /* CPU architecture */ + uint8_t type; /* Image Type */ + uint8_t comp; /* Compression Type */ uint8_t name[UIMAGE_NMLEN]; /* Image Name */ } uimage; @@ -229,7 +232,7 @@ static int __init kernel_uimage_probe(struct kernel_info *info, info->load = kernel_zimage_load; #ifdef CONFIG_ARM_64 - switch ( uimage.arch ) + switch (uimage.arch) { case IH_ARCH_ARM: info->type = DOMAIN_32BIT; @@ -286,7 +289,8 @@ static __init int kernel_decompress(struct bootmodule *mod) return -ENOMEM; } mfn = page_to_mfn(pages); - output = __vmap(&mfn, 1 << kernel_order_out, 1, 1, PAGE_HYPERVISOR, VMAP_DEFAULT); + output = __vmap(&mfn, 1 << kernel_order_out, 1, 1, PAGE_HYPERVISOR, + VMAP_DEFAULT); rc = perform_gunzip(output, input, size); clean_dcache_va_range(output, output_size); @@ -317,14 +321,15 @@ static __init int kernel_decompress(struct bootmodule *mod) /* * Check if the image is a 64-bit Image. */ -static int __init kernel_zimage64_probe(struct kernel_info *info, - paddr_t addr, paddr_t size) +static int __init kernel_zimage64_probe(struct kernel_info *info, paddr_t addr, + paddr_t size) { /* linux/Documentation/arm64/booting.txt */ - struct { + struct + { uint32_t magic0; uint32_t res0; - uint64_t text_offset; /* Image load offset */ + uint64_t text_offset; /* Image load offset */ uint64_t res1; uint64_t res2; /* zImage V1 only from here */ @@ -371,10 +376,10 @@ static int __init kernel_zimage64_probe(struct kernel_info *info, /* * Check if the image is a 32-bit zImage and setup kernel_info */ -static int __init kernel_zimage32_probe(struct kernel_info *info, - paddr_t addr, paddr_t size) +static int __init kernel_zimage32_probe(struct kernel_info *info, paddr_t addr, + paddr_t size) { - uint32_t zimage[ZIMAGE32_HEADER_LEN/4]; + uint32_t zimage[ZIMAGE32_HEADER_LEN / 4]; uint32_t start, end; struct minimal_dtb_header dtb_hdr; @@ -383,11 +388,11 @@ static int __init kernel_zimage32_probe(struct kernel_info *info, copy_from_paddr(zimage, addr, sizeof(zimage)); - if (zimage[ZIMAGE32_MAGIC_OFFSET/4] != ZIMAGE32_MAGIC) + if ( zimage[ZIMAGE32_MAGIC_OFFSET / 4] != ZIMAGE32_MAGIC ) return -EINVAL; - start = zimage[ZIMAGE32_START_OFFSET/4]; - end = zimage[ZIMAGE32_END_OFFSET/4]; + start = zimage[ZIMAGE32_START_OFFSET / 4]; + end = zimage[ZIMAGE32_END_OFFSET / 4]; if ( (end - start) > size ) return -EINVAL; @@ -398,7 +403,8 @@ static int __init kernel_zimage32_probe(struct kernel_info *info, if ( addr + end - start + sizeof(dtb_hdr) <= size ) { copy_from_paddr(&dtb_hdr, addr + end - start, sizeof(dtb_hdr)); - if (be32_to_cpu(dtb_hdr.magic) == DTB_MAGIC) { + if ( be32_to_cpu(dtb_hdr.magic) == DTB_MAGIC ) + { end += be32_to_cpu(dtb_hdr.total_size); if ( end > addr + size ) @@ -456,8 +462,8 @@ int __init kernel_probe(struct kernel_info *info, val = dt_get_property(node, "reg", &len); dt_get_range(&val, node, &kernel_addr, &size); - mod = boot_module_find_by_addr_and_kind( - BOOTMOD_KERNEL, kernel_addr); + mod = boot_module_find_by_addr_and_kind(BOOTMOD_KERNEL, + kernel_addr); info->kernel_bootmodule = mod; } else if ( dt_device_is_compatible(node, "multiboot,ramdisk") ) @@ -468,7 +474,7 @@ int __init kernel_probe(struct kernel_info *info, val = dt_get_property(node, "reg", &len); dt_get_range(&val, node, &initrd_addr, &size); info->initrd_bootmodule = boot_module_find_by_addr_and_kind( - BOOTMOD_RAMDISK, initrd_addr); + BOOTMOD_RAMDISK, initrd_addr); } else continue; @@ -484,23 +490,23 @@ int __init kernel_probe(struct kernel_info *info, return -ENOENT; } - printk("Loading Dom%pd kernel from boot module @ %"PRIpaddr"\n", - info->d, info->kernel_bootmodule->start); + printk("Loading Dom%pd kernel from boot module @ %" PRIpaddr "\n", info->d, + info->kernel_bootmodule->start); if ( info->initrd_bootmodule ) - printk("Loading ramdisk from boot module @ %"PRIpaddr"\n", + printk("Loading ramdisk from boot module @ %" PRIpaddr "\n", info->initrd_bootmodule->start); /* if it is a gzip'ed image, 32bit or 64bit, uncompress it */ rc = kernel_decompress(mod); - if (rc < 0 && rc != -EINVAL) + if ( rc < 0 && rc != -EINVAL ) return rc; #ifdef CONFIG_ARM_64 rc = kernel_zimage64_probe(info, mod->start, mod->size); - if (rc < 0) + if ( rc < 0 ) #endif rc = kernel_uimage_probe(info, mod->start, mod->size); - if (rc < 0) + if ( rc < 0 ) rc = kernel_zimage32_probe(info, mod->start, mod->size); return rc; diff --git a/xen/arch/arm/livepatch.c b/xen/arch/arm/livepatch.c index 279d52cc6c..ff0fec6fe4 100644 --- a/xen/arch/arm/livepatch.c +++ b/xen/arch/arm/livepatch.c @@ -33,12 +33,13 @@ int arch_livepatch_quiesce(void) * The text section is read-only. So re-map Xen to be able to patch * the code. */ - vmap_of_xen_text = __vmap(&text_mfn, 1U << text_order, 1, 1, PAGE_HYPERVISOR, - VMAP_DEFAULT); + vmap_of_xen_text = __vmap(&text_mfn, 1U << text_order, 1, 1, + PAGE_HYPERVISOR, VMAP_DEFAULT); if ( !vmap_of_xen_text ) { - printk(XENLOG_ERR LIVEPATCH "Failed to setup vmap of hypervisor! (order=%u)\n", + printk(XENLOG_ERR LIVEPATCH + "Failed to setup vmap of hypervisor! (order=%u)\n", text_order); return -ENOMEM; } @@ -64,7 +65,7 @@ int arch_livepatch_verify_func(const struct livepatch_func *func) { /* If NOPing only do up to maximum amount we can put in the ->opaque. */ if ( !func->new_addr && (func->new_size > sizeof(func->opaque) || - func->new_size % ARCH_PATCH_INSN_SIZE) ) + func->new_size % ARCH_PATCH_INSN_SIZE) ) return -EOPNOTSUPP; if ( func->old_size < ARCH_PATCH_INSN_SIZE ) @@ -120,7 +121,7 @@ bool arch_livepatch_symbol_ok(const struct livepatch_elf *elf, char p = sym->name[1]; size_t len = strlen(sym->name); - if ( (len >= 3 && (sym->name[2] == '.' )) || (len == 2) ) + if ( (len >= 3 && (sym->name[2] == '.')) || (len == 2) ) { if ( p == 'd' || #ifdef CONFIG_ARM_32 @@ -128,7 +129,7 @@ bool arch_livepatch_symbol_ok(const struct livepatch_elf *elf, #else p == 'x' #endif - ) + ) return false; } } @@ -143,7 +144,7 @@ int arch_livepatch_secure(const void *va, unsigned int pages, enum va_type type) ASSERT(va); ASSERT(pages); - switch ( type ) + switch (type) { case LIVEPATCH_VA_RX: flags = PAGE_HYPERVISOR_RX; diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c index db49372a2c..b49b09aab7 100644 --- a/xen/arch/arm/mem_access.c +++ b/xen/arch/arm/mem_access.c @@ -33,16 +33,8 @@ static int __p2m_get_mem_access(struct domain *d, gfn_t gfn, static const xenmem_access_t memaccess[] = { #define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac - ACCESS(n), - ACCESS(r), - ACCESS(w), - ACCESS(rw), - ACCESS(x), - ACCESS(rx), - ACCESS(wx), - ACCESS(rwx), - ACCESS(rx2rw), - ACCESS(n2rwx), + ACCESS(n), ACCESS(r), ACCESS(w), ACCESS(rw), ACCESS(x), + ACCESS(rx), ACCESS(wx), ACCESS(rwx), ACCESS(rx2rw), ACCESS(n2rwx), #undef ACCESS }; @@ -98,9 +90,9 @@ static int __p2m_get_mem_access(struct domain *d, gfn_t gfn, * Only in these cases we do a software-based type check and fetch the page if * we indeed found a conflicting mem_access setting. */ -struct page_info* -p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag, - const struct vcpu *v) +struct page_info *p2m_mem_access_check_and_get_page(vaddr_t gva, + unsigned long flag, + const struct vcpu *v) { long rc; unsigned int perms; @@ -156,21 +148,22 @@ p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag, goto err; /* Let's check if mem_access limited the access. */ - switch ( xma ) + switch (xma) { default: case XENMEM_access_rwx: case XENMEM_access_rw: /* - * If mem_access contains no rw perm restrictions at all then the original - * fault was correct. + * If mem_access contains no rw perm restrictions at all then the + * original fault was correct. */ goto err; case XENMEM_access_n2rwx: case XENMEM_access_n: case XENMEM_access_x: /* - * If no r/w is permitted by mem_access, this was a fault caused by mem_access. + * If no r/w is permitted by mem_access, this was a fault caused by + * mem_access. */ break; case XENMEM_access_wx: @@ -242,7 +235,7 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec) return true; /* Now check for mem_access violation. */ - switch ( xma ) + switch (xma) { case XENMEM_access_rwx: violation = false; @@ -279,14 +272,14 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec) /* First, handle rx2rw and n2rwx conversion automatically. */ if ( npfec.write_access && xma == XENMEM_access_rx2rw ) { - rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1, - 0, ~0, XENMEM_access_rw, 0); + rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1, 0, ~0, + XENMEM_access_rw, 0); return false; } else if ( xma == XENMEM_access_n2rwx ) { - rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1, - 0, ~0, XENMEM_access_rwx, 0); + rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1, 0, ~0, + XENMEM_access_rwx, 0); } /* Otherwise, check if there is a vm_event monitor subscriber */ @@ -295,9 +288,10 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec) /* No listener */ if ( p2m->access_required ) { - gdprintk(XENLOG_INFO, "Memory access permissions failure, " - "no vm_event listener VCPU %d, dom %d\n", - v->vcpu_id, v->domain->domain_id); + gdprintk(XENLOG_INFO, + "Memory access permissions failure, " + "no vm_event listener VCPU %d, dom %d\n", + v->vcpu_id, v->domain->domain_id); domain_crash(v->domain); } else @@ -307,8 +301,8 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec) { /* A listener is not required, so clear the access * restrictions. */ - rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1, - 0, ~0, XENMEM_access_rwx, 0); + rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1, 0, ~0, + XENMEM_access_rwx, 0); } } @@ -323,7 +317,7 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec) /* Send request to mem access subscriber */ req->u.mem_access.gfn = gpa >> PAGE_SHIFT; - req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1); + req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1); if ( npfec.gla_valid ) { req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID; @@ -334,9 +328,9 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec) else if ( npfec.kind == npfec_kind_in_gpt ) req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT; } - req->u.mem_access.flags |= npfec.read_access ? MEM_ACCESS_R : 0; - req->u.mem_access.flags |= npfec.write_access ? MEM_ACCESS_W : 0; - req->u.mem_access.flags |= npfec.insn_fetch ? MEM_ACCESS_X : 0; + req->u.mem_access.flags |= npfec.read_access ? MEM_ACCESS_R : 0; + req->u.mem_access.flags |= npfec.write_access ? MEM_ACCESS_W : 0; + req->u.mem_access.flags |= npfec.insn_fetch ? MEM_ACCESS_X : 0; if ( monitor_traps(v, (xma != XENMEM_access_n2rwx), req) < 0 ) domain_crash(v->domain); @@ -362,20 +356,12 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, static const p2m_access_t memaccess[] = { #define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac - ACCESS(n), - ACCESS(r), - ACCESS(w), - ACCESS(rw), - ACCESS(x), - ACCESS(rx), - ACCESS(wx), - ACCESS(rwx), - ACCESS(rx2rw), - ACCESS(n2rwx), + ACCESS(n), ACCESS(r), ACCESS(w), ACCESS(rw), ACCESS(x), + ACCESS(rx), ACCESS(wx), ACCESS(rwx), ACCESS(rx2rw), ACCESS(n2rwx), #undef ACCESS }; - switch ( access ) + switch (access) { case 0 ... ARRAY_SIZE(memaccess) - 1: a = memaccess[access]; @@ -408,7 +394,6 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, p2m_type_t t; mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order, NULL); - if ( !mfn_eq(mfn, INVALID_MFN) ) { order = 0; @@ -441,8 +426,8 @@ long p2m_set_mem_access_multi(struct domain *d, return -EOPNOTSUPP; } -int p2m_get_mem_access(struct domain *d, gfn_t gfn, - xenmem_access_t *access, unsigned int altp2m_idx) +int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access, + unsigned int altp2m_idx) { int ret; struct p2m_domain *p2m = p2m_get_hostp2m(d); diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index e6c2fee923..fae5b6b454 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -78,8 +78,8 @@ lpae_t boot_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096))); lpae_t boot_first[LPAE_ENTRIES] __attribute__((__aligned__(4096))); lpae_t boot_first_id[LPAE_ENTRIES] __attribute__((__aligned__(4096))); #endif -lpae_t boot_second[LPAE_ENTRIES] __attribute__((__aligned__(4096))); -lpae_t boot_third[LPAE_ENTRIES] __attribute__((__aligned__(4096))); +lpae_t boot_second[LPAE_ENTRIES] __attribute__((__aligned__(4096))); +lpae_t boot_third[LPAE_ENTRIES] __attribute__((__aligned__(4096))); /* Main runtime page tables */ @@ -109,8 +109,8 @@ static DEFINE_PER_CPU(lpae_t *, xen_dommap); /* Root of the trie for cpu0, other CPU's PTs are dynamically allocated */ lpae_t cpu0_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096))); /* cpu0's domheap page tables */ -lpae_t cpu0_dommap[LPAE_ENTRIES*DOMHEAP_SECOND_PAGES] - __attribute__((__aligned__(4096*DOMHEAP_SECOND_PAGES))); +lpae_t cpu0_dommap[LPAE_ENTRIES * DOMHEAP_SECOND_PAGES] + __attribute__((__aligned__(4096 * DOMHEAP_SECOND_PAGES))); #endif #ifdef CONFIG_ARM_64 @@ -131,7 +131,7 @@ static __initdata int xenheap_first_first_slot = -1; * addresses from 0 to 0x7fffffff. Offsets into it are calculated * with second_linear_offset(), not second_table_offset(). */ -lpae_t xen_second[LPAE_ENTRIES*2] __attribute__((__aligned__(4096*2))); +lpae_t xen_second[LPAE_ENTRIES * 2] __attribute__((__aligned__(4096 * 2))); /* First level page table used for fixmap */ lpae_t xen_fixmap[LPAE_ENTRIES] __attribute__((__aligned__(4096))); /* First level page table used to map Xen itself with the XN bit set @@ -160,7 +160,8 @@ unsigned long total_pages; extern char __init_begin[], __init_end[]; /* Checking VA memory layout alignment. */ -static inline void check_memory_layout_alignment_constraints(void) { +static inline void check_memory_layout_alignment_constraints(void) +{ /* 2MB aligned regions */ BUILD_BUG_ON(XEN_VIRT_START & ~SECOND_MASK); BUILD_BUG_ON(FIXMAP_ADDR(0) & ~SECOND_MASK); @@ -182,18 +183,14 @@ static inline void check_memory_layout_alignment_constraints(void) { #endif } -void dump_pt_walk(paddr_t ttbr, paddr_t addr, - unsigned int root_level, +void dump_pt_walk(paddr_t ttbr, paddr_t addr, unsigned int root_level, unsigned int nr_root_tables) { - static const char *level_strs[4] = { "0TH", "1ST", "2ND", "3RD" }; + static const char *level_strs[4] = {"0TH", "1ST", "2ND", "3RD"}; const mfn_t root_mfn = maddr_to_mfn(ttbr); const unsigned int offsets[4] = { - zeroeth_table_offset(addr), - first_table_offset(addr), - second_table_offset(addr), - third_table_offset(addr) - }; + zeroeth_table_offset(addr), first_table_offset(addr), + second_table_offset(addr), third_table_offset(addr)}; lpae_t pte, *mapping; unsigned int level, root_table; @@ -223,15 +220,15 @@ void dump_pt_walk(paddr_t ttbr, paddr_t addr, mapping = map_domain_page(mfn_add(root_mfn, root_table)); - for ( level = root_level; ; level++ ) + for ( level = root_level;; level++ ) { if ( offsets[level] > LPAE_ENTRIES ) break; pte = mapping[offsets[level]]; - printk("%s[0x%x] = 0x%"PRIpaddr"\n", - level_strs[level], offsets[level], pte.bits); + printk("%s[0x%x] = 0x%" PRIpaddr "\n", level_strs[level], + offsets[level], pte.bits); if ( level == 3 || !pte.walk.valid || !pte.walk.table ) break; @@ -249,14 +246,14 @@ void dump_hyp_walk(vaddr_t addr) uint64_t ttbr = READ_SYSREG64(TTBR0_EL2); lpae_t *pgtable = THIS_CPU_PGTABLE; - printk("Walking Hypervisor VA 0x%"PRIvaddr" " - "on CPU%d via TTBR 0x%016"PRIx64"\n", + printk("Walking Hypervisor VA 0x%" PRIvaddr " " + "on CPU%d via TTBR 0x%016" PRIx64 "\n", addr, smp_processor_id(), ttbr); if ( smp_processor_id() == 0 ) - BUG_ON( (lpae_t *)(unsigned long)(ttbr - phys_offset) != pgtable ); + BUG_ON((lpae_t *)(unsigned long)(ttbr - phys_offset) != pgtable); else - BUG_ON( virt_to_maddr(pgtable) != ttbr ); + BUG_ON(virt_to_maddr(pgtable) != ttbr); dump_pt_walk(ttbr, addr, HYP_PT_ROOT_LEVEL, 1); } @@ -267,20 +264,20 @@ void dump_hyp_walk(vaddr_t addr) */ static inline lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned attr) { - lpae_t e = (lpae_t) { - .pt = { - .valid = 1, /* Mappings are present */ - .table = 0, /* Set to 1 for links and 4k maps */ - .ai = attr, - .ns = 1, /* Hyp mode is in the non-secure world */ - .up = 1, /* See below */ - .ro = 0, /* Assume read-write */ - .af = 1, /* No need for access tracking */ - .ng = 1, /* Makes TLB flushes easier */ - .contig = 0, /* Assume non-contiguous */ - .xn = 1, /* No need to execute outside .text */ - .avail = 0, /* Reference count for domheap mapping */ - }}; + lpae_t e = + (lpae_t){.pt = { + .valid = 1, /* Mappings are present */ + .table = 0, /* Set to 1 for links and 4k maps */ + .ai = attr, + .ns = 1, /* Hyp mode is in the non-secure world */ + .up = 1, /* See below */ + .ro = 0, /* Assume read-write */ + .af = 1, /* No need for access tracking */ + .ng = 1, /* Makes TLB flushes easier */ + .contig = 0, /* Assume non-contiguous */ + .xn = 1, /* No need to execute outside .text */ + .avail = 0, /* Reference count for domheap mapping */ + }}; /* * For EL2 stage-1 page table, up (aka AP[1]) is RES1 as the translation * regime applies to only one exception level (see D4.4.4 and G4.6.1 @@ -288,7 +285,7 @@ static inline lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned attr) * hard-coded values in head.S too. */ - switch ( attr ) + switch (attr) { case MT_NORMAL_NC: /* @@ -317,7 +314,7 @@ static inline lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned attr) e.pt.sh = LPAE_SH_OUTER; break; default: - e.pt.sh = LPAE_SH_INNER; /* Xen mappings are SMP coherent */ + e.pt.sh = LPAE_SH_INNER; /* Xen mappings are SMP coherent */ break; } @@ -352,8 +349,7 @@ void clear_fixmap(unsigned map) * Size must be a multiple of mapping_size. * second must be a contiguous set of second level page tables * covering the region starting at virt_offset. */ -static void __init create_mappings(lpae_t *second, - unsigned long virt_offset, +static void __init create_mappings(lpae_t *second, unsigned long virt_offset, unsigned long base_mfn, unsigned long nr_mfns, unsigned int mapping_size) @@ -371,7 +367,7 @@ static void __init create_mappings(lpae_t *second, p = second + second_linear_offset(virt_offset); pte = mfn_to_xen_entry(_mfn(base_mfn), MT_NORMAL); if ( granularity == 16 * LPAE_ENTRIES ) - pte.pt.contig = 1; /* These maps are in 16-entry contiguous chunks. */ + pte.pt.contig = 1; /* These maps are in 16-entry contiguous chunks. */ for ( i = 0; i < count; i++ ) { write_pte(p + i, pte); @@ -408,11 +404,9 @@ void *map_domain_page(mfn_t mfn) * PTE as a reference count; when the refcount is zero the slot can * be reused. */ for ( slot = (slot_mfn >> LPAE_SHIFT) % DOMHEAP_ENTRIES, i = 0; - i < DOMHEAP_ENTRIES; - slot = (slot + 1) % DOMHEAP_ENTRIES, i++ ) + i < DOMHEAP_ENTRIES; slot = (slot + 1) % DOMHEAP_ENTRIES, i++ ) { - if ( map[slot].pt.avail < 0xf && - map[slot].pt.base == slot_mfn && + if ( map[slot].pt.avail < 0xf && map[slot].pt.base == slot_mfn && map[slot].pt.valid ) { /* This slot already points to the right place; reuse it */ @@ -427,7 +421,6 @@ void *map_domain_page(mfn_t mfn) write_pte(map + slot, pte); break; } - } /* If the map fills up, the callers have misbehaved. */ BUG_ON(i == DOMHEAP_ENTRIES); @@ -447,9 +440,8 @@ void *map_domain_page(mfn_t mfn) local_irq_restore(flags); - va = (DOMHEAP_VIRT_START - + (slot << SECOND_SHIFT) - + ((mfn_x(mfn) & LPAE_ENTRY_MASK) << THIRD_SHIFT)); + va = (DOMHEAP_VIRT_START + (slot << SECOND_SHIFT) + + ((mfn_x(mfn) & LPAE_ENTRY_MASK) << THIRD_SHIFT)); /* * We may not have flushed this specific subpage at map time, @@ -465,7 +457,7 @@ void unmap_domain_page(const void *va) { unsigned long flags; lpae_t *map = this_cpu(xen_dommap); - int slot = ((unsigned long) va - DOMHEAP_VIRT_START) >> SECOND_SHIFT; + int slot = ((unsigned long)va - DOMHEAP_VIRT_START) >> SECOND_SHIFT; local_irq_save(flags); @@ -482,7 +474,7 @@ mfn_t domain_page_map_to_mfn(const void *ptr) unsigned long va = (unsigned long)ptr; lpae_t *map = this_cpu(xen_dommap); int slot = (va - DOMHEAP_VIRT_START) >> SECOND_SHIFT; - unsigned long offset = (va>>THIRD_SHIFT) & LPAE_ENTRY_MASK; + unsigned long offset = (va >> THIRD_SHIFT) & LPAE_ENTRY_MASK; if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END ) return virt_to_mfn(va); @@ -547,7 +539,7 @@ static inline lpae_t pte_of_xenaddr(vaddr_t va) } /* Map the FDT in the early boot page table */ -void * __init early_fdt_map(paddr_t fdt_paddr) +void *__init early_fdt_map(paddr_t fdt_paddr) { /* We are using 2MB superpage for mapping the FDT */ paddr_t base_paddr = fdt_paddr & SECOND_MASK; @@ -585,8 +577,8 @@ void * __init early_fdt_map(paddr_t fdt_paddr) if ( (offset + size) > SZ_2M ) { create_mappings(boot_second, BOOT_FDT_VIRT_START + SZ_2M, - paddr_to_pfn(base_paddr + SZ_2M), - SZ_2M >> PAGE_SHIFT, SZ_2M); + paddr_to_pfn(base_paddr + SZ_2M), SZ_2M >> PAGE_SHIFT, + SZ_2M); } return fdt_virt; @@ -621,19 +613,19 @@ void __init setup_pagetables(unsigned long boot_phys_offset) phys_offset = boot_phys_offset; #ifdef CONFIG_ARM_64 - p = (void *) xen_pgtable; + p = (void *)xen_pgtable; p[0] = pte_of_xenaddr((uintptr_t)xen_first); p[0].pt.table = 1; p[0].pt.xn = 0; - p = (void *) xen_first; + p = (void *)xen_first; #else - p = (void *) cpu0_pgtable; + p = (void *)cpu0_pgtable; #endif /* Initialise first level entries, to point to second level entries */ - for ( i = 0; i < 2; i++) + for ( i = 0; i < 2; i++ ) { - p[i] = pte_of_xenaddr((uintptr_t)(xen_second+i*LPAE_ENTRIES)); + p[i] = pte_of_xenaddr((uintptr_t)(xen_second + i * LPAE_ENTRIES)); p[i].pt.table = 1; p[i].pt.xn = 0; } @@ -641,9 +633,9 @@ void __init setup_pagetables(unsigned long boot_phys_offset) #ifdef CONFIG_ARM_32 for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ ) { - p[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)] - = pte_of_xenaddr((uintptr_t)(cpu0_dommap+i*LPAE_ENTRIES)); - p[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)].pt.table = 1; + p[first_table_offset(DOMHEAP_VIRT_START + i * FIRST_SIZE)] = + pte_of_xenaddr((uintptr_t)(cpu0_dommap + i * LPAE_ENTRIES)); + p[first_table_offset(DOMHEAP_VIRT_START + i * FIRST_SIZE)].pt.table = 1; } #endif @@ -685,9 +677,9 @@ void __init setup_pagetables(unsigned long boot_phys_offset) xen_second[second_table_offset(BOOT_FDT_VIRT_START + SZ_2M)] = pte; #ifdef CONFIG_ARM_64 - ttbr = (uintptr_t) xen_pgtable + phys_offset; + ttbr = (uintptr_t)xen_pgtable + phys_offset; #else - ttbr = (uintptr_t) cpu0_pgtable + phys_offset; + ttbr = (uintptr_t)cpu0_pgtable + phys_offset; #endif switch_ttbr(ttbr); @@ -712,9 +704,9 @@ void __init setup_pagetables(unsigned long boot_phys_offset) per_cpu(xen_dommap, 0) = cpu0_dommap; /* Make sure it is clear */ - memset(this_cpu(xen_dommap), 0, DOMHEAP_SECOND_PAGES*PAGE_SIZE); + memset(this_cpu(xen_dommap), 0, DOMHEAP_SECOND_PAGES * PAGE_SIZE); clean_dcache_va_range(this_cpu(xen_dommap), - DOMHEAP_SECOND_PAGES*PAGE_SIZE); + DOMHEAP_SECOND_PAGES * PAGE_SIZE); #endif } @@ -723,7 +715,7 @@ int init_secondary_pagetables(int cpu) { /* Set init_ttbr for this CPU coming up. All CPus share a single setof * pagetables, but rewrite it each time for consistency with 32 bit. */ - init_ttbr = (uintptr_t) xen_pgtable + phys_offset; + init_ttbr = (uintptr_t)xen_pgtable + phys_offset; clean_dcache(init_ttbr); return 0; } @@ -733,8 +725,10 @@ int init_secondary_pagetables(int cpu) lpae_t *first, *domheap, pte; int i; - first = alloc_xenheap_page(); /* root == first level on 32-bit 3-level trie */ - domheap = alloc_xenheap_pages(get_order_from_pages(DOMHEAP_SECOND_PAGES), 0); + first = + alloc_xenheap_page(); /* root == first level on 32-bit 3-level trie */ + domheap = + alloc_xenheap_pages(get_order_from_pages(DOMHEAP_SECOND_PAGES), 0); if ( domheap == NULL || first == NULL ) { @@ -748,20 +742,22 @@ int init_secondary_pagetables(int cpu) memcpy(first, cpu0_pgtable, PAGE_SIZE); /* Ensure the domheap has no stray mappings */ - memset(domheap, 0, DOMHEAP_SECOND_PAGES*PAGE_SIZE); + memset(domheap, 0, DOMHEAP_SECOND_PAGES * PAGE_SIZE); /* Update the first level mapping to reference the local CPUs * domheap mapping pages. */ for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ ) { - pte = mfn_to_xen_entry(virt_to_mfn(domheap+i*LPAE_ENTRIES), + pte = mfn_to_xen_entry(virt_to_mfn(domheap + i * LPAE_ENTRIES), MT_NORMAL); pte.pt.table = 1; - write_pte(&first[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)], pte); + write_pte( + &first[first_table_offset(DOMHEAP_VIRT_START + i * FIRST_SIZE)], + pte); } clean_dcache_va_range(first, PAGE_SIZE); - clean_dcache_va_range(domheap, DOMHEAP_SECOND_PAGES*PAGE_SIZE); + clean_dcache_va_range(domheap, DOMHEAP_SECOND_PAGES * PAGE_SIZE); per_cpu(xen_pgtable, cpu) = first; per_cpu(xen_dommap, cpu) = domheap; @@ -803,14 +799,14 @@ void __init setup_xenheap_mappings(unsigned long base_mfn, vaddr_t vaddr; /* Align to previous 1GB boundary */ - mfn = base_mfn & ~((FIRST_SIZE>>PAGE_SHIFT)-1); + mfn = base_mfn & ~((FIRST_SIZE >> PAGE_SHIFT) - 1); /* First call sets the xenheap physical and virtual offset. */ if ( mfn_eq(xenheap_mfn_start, INVALID_MFN) ) { xenheap_mfn_start = _mfn(base_mfn); - xenheap_virt_start = DIRECTMAP_VIRT_START + - (base_mfn - mfn) * PAGE_SIZE; + xenheap_virt_start = + DIRECTMAP_VIRT_START + (base_mfn - mfn) * PAGE_SIZE; } if ( base_mfn < mfn_x(xenheap_mfn_start) ) @@ -834,10 +830,11 @@ void __init setup_xenheap_mappings(unsigned long base_mfn, { /* mfn_to_virt is not valid on the 1st 1st mfn, since it * is not within the xenheap. */ - first = slot == xenheap_first_first_slot ? - xenheap_first_first : mfn_to_virt(lpae_get_mfn(*p)); + first = slot == xenheap_first_first_slot + ? xenheap_first_first + : mfn_to_virt(lpae_get_mfn(*p)); } - else if ( xenheap_first_first_slot == -1) + else if ( xenheap_first_first_slot == -1 ) { /* Use xenheap_first_first to bootstrap the mappings */ first = xenheap_first_first; @@ -863,7 +860,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn, /* TODO: Set pte.pt.contig when appropriate. */ write_pte(&first[first_table_offset(vaddr)], pte); - mfn += FIRST_SIZE>>PAGE_SHIFT; + mfn += FIRST_SIZE >> PAGE_SHIFT; vaddr += FIRST_SIZE; } @@ -878,7 +875,8 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) unsigned long nr_pdxs = pfn_to_pdx(nr_pages); unsigned long frametable_size = nr_pdxs * sizeof(struct page_info); mfn_t base_mfn; - const unsigned long mapping_size = frametable_size < MB(32) ? MB(2) : MB(32); + const unsigned long mapping_size = + frametable_size < MB(32) ? MB(2) : MB(32); #ifdef CONFIG_ARM_64 lpae_t *second, pte; unsigned long nr_second; @@ -889,7 +887,7 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) frametable_base_pdx = pfn_to_pdx(ps >> PAGE_SHIFT); /* Round up to 2M or 32M boundary, as appropriate. */ frametable_size = ROUNDUP(frametable_size, mapping_size); - base_mfn = alloc_boot_pages(frametable_size >> PAGE_SHIFT, 32<<(20-12)); + base_mfn = alloc_boot_pages(frametable_size >> PAGE_SHIFT, 32 << (20 - 12)); #ifdef CONFIG_ARM_64 /* Compute the number of second level pages. */ @@ -901,7 +899,8 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) clear_page(mfn_to_virt(mfn_add(second_base, i))); pte = mfn_to_xen_entry(mfn_add(second_base, i), MT_NORMAL); pte.pt.table = 1; - write_pte(&xen_first[first_table_offset(FRAMETABLE_VIRT_START)+i], pte); + write_pte(&xen_first[first_table_offset(FRAMETABLE_VIRT_START) + i], + pte); } create_mappings(second, 0, mfn_x(base_mfn), frametable_size >> PAGE_SHIFT, mapping_size); @@ -914,7 +913,8 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) memset(&frame_table[nr_pdxs], -1, frametable_size - (nr_pdxs * sizeof(struct page_info))); - frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pdxs * sizeof(struct page_info)); + frametable_virt_end = + FRAMETABLE_VIRT_START + (nr_pdxs * sizeof(struct page_info)); } void *__init arch_vmap_virt_end(void) @@ -959,17 +959,16 @@ static int create_xen_table(lpae_t *entry) return 0; } -enum xenmap_operation { +enum xenmap_operation +{ INSERT, REMOVE, MODIFY, RESERVE }; -static int create_xen_entries(enum xenmap_operation op, - unsigned long virt, - mfn_t mfn, - unsigned long nr_mfns, +static int create_xen_entries(enum xenmap_operation op, unsigned long virt, + mfn_t mfn, unsigned long nr_mfns, unsigned int flags) { int rc; @@ -977,13 +976,14 @@ static int create_xen_entries(enum xenmap_operation op, lpae_t pte, *entry; lpae_t *third = NULL; - for(; addr < addr_end; addr += PAGE_SIZE, mfn = mfn_add(mfn, 1)) + for ( ; addr < addr_end; addr += PAGE_SIZE, mfn = mfn_add(mfn, 1) ) { entry = &xen_second[second_linear_offset(addr)]; if ( !lpae_is_valid(*entry) || !lpae_is_table(*entry, 2) ) { rc = create_xen_table(entry); - if ( rc < 0 ) { + if ( rc < 0 ) + { printk("%s: L2 failed\n", __func__); goto out; } @@ -994,50 +994,52 @@ static int create_xen_entries(enum xenmap_operation op, third = mfn_to_virt(lpae_get_mfn(*entry)); entry = &third[third_table_offset(addr)]; - switch ( op ) { - case INSERT: - case RESERVE: - if ( lpae_is_valid(*entry) ) - { - printk("%s: trying to replace an existing mapping addr=%lx mfn=%"PRI_mfn"\n", - __func__, addr, mfn_x(mfn)); - return -EINVAL; - } - if ( op == RESERVE ) - break; - pte = mfn_to_xen_entry(mfn, PAGE_AI_MASK(flags)); + switch (op) + { + case INSERT: + case RESERVE: + if ( lpae_is_valid(*entry) ) + { + printk("%s: trying to replace an existing mapping addr=%lx " + "mfn=%" PRI_mfn "\n", + __func__, addr, mfn_x(mfn)); + return -EINVAL; + } + if ( op == RESERVE ) + break; + pte = mfn_to_xen_entry(mfn, PAGE_AI_MASK(flags)); + pte.pt.ro = PAGE_RO_MASK(flags); + pte.pt.xn = PAGE_XN_MASK(flags); + BUG_ON(!pte.pt.ro && !pte.pt.xn); + pte.pt.table = 1; + write_pte(entry, pte); + break; + case MODIFY: + case REMOVE: + if ( !lpae_is_valid(*entry) ) + { + printk("%s: trying to %s a non-existing mapping addr=%lx\n", + __func__, op == REMOVE ? "remove" : "modify", addr); + return -EINVAL; + } + if ( op == REMOVE ) + pte.bits = 0; + else + { + pte = *entry; pte.pt.ro = PAGE_RO_MASK(flags); pte.pt.xn = PAGE_XN_MASK(flags); - BUG_ON(!pte.pt.ro && !pte.pt.xn); - pte.pt.table = 1; - write_pte(entry, pte); - break; - case MODIFY: - case REMOVE: - if ( !lpae_is_valid(*entry) ) + if ( !pte.pt.ro && !pte.pt.xn ) { - printk("%s: trying to %s a non-existing mapping addr=%lx\n", - __func__, op == REMOVE ? "remove" : "modify", addr); + printk("%s: Incorrect combination for addr=%lx\n", __func__, + addr); return -EINVAL; } - if ( op == REMOVE ) - pte.bits = 0; - else - { - pte = *entry; - pte.pt.ro = PAGE_RO_MASK(flags); - pte.pt.xn = PAGE_XN_MASK(flags); - if ( !pte.pt.ro && !pte.pt.xn ) - { - printk("%s: Incorrect combination for addr=%lx\n", - __func__, addr); - return -EINVAL; - } - } - write_pte(entry, pte); - break; - default: - BUG(); + } + write_pte(entry, pte); + break; + default: + BUG(); } } flush_xen_data_tlb_range_va(virt, PAGE_SIZE * nr_mfns); @@ -1048,9 +1050,7 @@ out: return rc; } -int map_pages_to_xen(unsigned long virt, - mfn_t mfn, - unsigned long nr_mfns, +int map_pages_to_xen(unsigned long virt, mfn_t mfn, unsigned long nr_mfns, unsigned int flags) { return create_xen_entries(INSERT, virt, mfn, nr_mfns, flags); @@ -1072,7 +1072,13 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int flags) flags); } -enum mg { mg_clear, mg_ro, mg_rw, mg_rx }; +enum mg +{ + mg_clear, + mg_ro, + mg_rw, + mg_rx +}; static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg) { lpae_t pte; @@ -1081,15 +1087,13 @@ static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg) ASSERT(is_kernel(p) && is_kernel(p + l)); /* Can only guard in page granularity */ - ASSERT(!((unsigned long) p & ~PAGE_MASK)); + ASSERT(!((unsigned long)p & ~PAGE_MASK)); ASSERT(!(l & ~PAGE_MASK)); - for ( i = (p - _start) / PAGE_SIZE; - i < (p + l - _start) / PAGE_SIZE; - i++ ) + for ( i = (p - _start) / PAGE_SIZE; i < (p + l - _start) / PAGE_SIZE; i++ ) { pte = xen_xenmap[i]; - switch ( mg ) + switch (mg) { case mg_clear: pte.pt.valid = 0; @@ -1140,7 +1144,8 @@ void free_init_memory(void) set_pte_flags_on_range(__init_begin, len, mg_clear); init_domheap_pages(pa, pa + len); - printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10); + printk("Freed %ldkB init memory.\n", + (long)(__init_end - __init_begin) >> 10); } void arch_dump_shared_mem_info(void) @@ -1153,8 +1158,7 @@ int donate_page(struct domain *d, struct page_info *page, unsigned int memflags) return -ENOSYS; } -int steal_page( - struct domain *d, struct page_info *page, unsigned int memflags) +int steal_page(struct domain *d, struct page_info *page, unsigned int memflags) { return -EOPNOTSUPP; } @@ -1198,19 +1202,16 @@ void share_xen_page_with_guest(struct page_info *page, struct domain *d, spin_unlock(&d->page_alloc_lock); } -int xenmem_add_to_physmap_one( - struct domain *d, - unsigned int space, - union xen_add_to_physmap_batch_extra extra, - unsigned long idx, - gfn_t gfn) +int xenmem_add_to_physmap_one(struct domain *d, unsigned int space, + union xen_add_to_physmap_batch_extra extra, + unsigned long idx, gfn_t gfn) { mfn_t mfn = INVALID_MFN; int rc; p2m_type_t t; struct page_info *page = NULL; - switch ( space ) + switch (space) { case XENMAPSPACE_grant_table: rc = gnttab_map_frame(d, idx, gfn, &mfn); @@ -1322,7 +1323,7 @@ static int process_p2m_lookup(struct domain *d, xen_pfn_t *pma, long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) { - switch ( op ) + switch (op) { /* XXX: memsharing not working yet */ case XENMEM_get_sharing_shared_pages: @@ -1338,14 +1339,14 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) if ( copy_from_guest(&req, arg, 1) ) return -EFAULT; - if ( guest_handle_is_null(req.pa) || guest_handle_is_null(req.ma)) + if ( guest_handle_is_null(req.pa) || guest_handle_is_null(req.ma) ) return -EINVAL; d = rcu_lock_domain_by_any_id(req.domid); if ( d == NULL ) return -ESRCH; - if ( likely (req.num_frames <= FRAMES_ON_STACK) ) + if ( likely(req.num_frames <= FRAMES_ON_STACK) ) { /* more than 95% cases for GSX operation */ xen_pfn_t pma[FRAMES_ON_STACK]; @@ -1391,8 +1392,7 @@ struct domain *page_get_owner_and_reference(struct page_info *page) */ if ( unlikely(((x + 1) & PGC_count_mask) <= 1) ) return NULL; - } - while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x ); + } while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x ); owner = page_get_owner(page); ASSERT(owner); @@ -1406,10 +1406,9 @@ void put_page(struct page_info *page) do { ASSERT((y & PGC_count_mask) != 0); - x = y; + x = y; nx = x - 1; - } - while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) ); + } while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) ); if ( unlikely((nx & PGC_count_mask) == 0) ) { @@ -1453,14 +1452,15 @@ void gnttab_clear_flag(unsigned long nr, uint16_t *addr) do { old = *addr; - } while (cmpxchg(addr, old, old & mask) != old); + } while ( cmpxchg(addr, old, old & mask) != old ); } void gnttab_mark_dirty(struct domain *d, mfn_t mfn) { /* XXX: mark dirty */ static int warning; - if (!warning) { + if ( !warning ) + { gdprintk(XENLOG_WARNING, "gnttab_mark_dirty not implemented yet\n"); warning = 1; } @@ -1472,14 +1472,14 @@ int create_grant_host_mapping(unsigned long addr, mfn_t frame, int rc; p2m_type_t t = p2m_grant_map_rw; - if ( cache_flags || (flags & ~GNTMAP_readonly) != GNTMAP_host_map ) + if ( cache_flags || (flags & ~GNTMAP_readonly) != GNTMAP_host_map ) return GNTST_general_error; if ( flags & GNTMAP_readonly ) t = p2m_grant_map_ro; - rc = guest_physmap_add_entry(current->domain, gaddr_to_gfn(addr), - frame, 0, t); + rc = guest_physmap_add_entry(current->domain, gaddr_to_gfn(addr), frame, 0, + t); if ( rc ) return GNTST_general_error; diff --git a/xen/arch/arm/monitor.c b/xen/arch/arm/monitor.c index 8c4a396e3c..67c0bd66a9 100644 --- a/xen/arch/arm/monitor.c +++ b/xen/arch/arm/monitor.c @@ -30,7 +30,7 @@ int arch_monitor_domctl_event(struct domain *d, struct arch_domain *ad = &d->arch; bool requested_status = (XEN_DOMCTL_MONITOR_OP_ENABLE == mop->op); - switch ( mop->event ) + switch (mop->event) { case XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL: { @@ -59,9 +59,7 @@ int arch_monitor_domctl_event(struct domain *d, int monitor_smc(void) { - vm_event_request_t req = { - .reason = VM_EVENT_REASON_PRIVILEGED_CALL - }; + vm_event_request_t req = {.reason = VM_EVENT_REASON_PRIVILEGED_CALL}; return monitor_traps(current, 1, &req); } diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c index e1fc374a2e..89d4c2fa5c 100644 --- a/xen/arch/arm/p2m.c +++ b/xen/arch/arm/p2m.c @@ -11,7 +11,7 @@ #include #include -#define MAX_VMID_8_BIT (1UL << 8) +#define MAX_VMID_8_BIT (1UL << 8) #define MAX_VMID_16_BIT (1UL << 16) #define INVALID_VMID 0 /* VMID 0 is reserved */ @@ -19,28 +19,28 @@ #ifdef CONFIG_ARM_64 static unsigned int __read_mostly p2m_root_order; static unsigned int __read_mostly p2m_root_level; -#define P2M_ROOT_ORDER p2m_root_order +#define P2M_ROOT_ORDER p2m_root_order #define P2M_ROOT_LEVEL p2m_root_level static unsigned int __read_mostly max_vmid = MAX_VMID_8_BIT; /* VMID is by default 8 bit width on AArch64 */ -#define MAX_VMID max_vmid +#define MAX_VMID max_vmid #else /* First level P2M is alway 2 consecutive pages */ #define P2M_ROOT_LEVEL 1 -#define P2M_ROOT_ORDER 1 +#define P2M_ROOT_ORDER 1 /* VMID is always 8 bit width on AArch32 */ -#define MAX_VMID MAX_VMID_8_BIT +#define MAX_VMID MAX_VMID_8_BIT #endif -#define P2M_ROOT_PAGES (1<domain_id, p2m->vmid); + printk("p2m mappings for domain %d (vmid %d):\n", d->domain_id, p2m->vmid); BUG_ON(p2m->stats.mappings[0] || p2m->stats.shattered[0]); - printk(" 1G mappings: %ld (shattered %ld)\n", - p2m->stats.mappings[1], p2m->stats.shattered[1]); - printk(" 2M mappings: %ld (shattered %ld)\n", - p2m->stats.mappings[2], p2m->stats.shattered[2]); + printk(" 1G mappings: %ld (shattered %ld)\n", p2m->stats.mappings[1], + p2m->stats.shattered[1]); + printk(" 2M mappings: %ld (shattered %ld)\n", p2m->stats.mappings[2], + p2m->stats.shattered[2]); printk(" 4K mappings: %ld\n", p2m->stats.mappings[3]); p2m_read_unlock(p2m); } @@ -86,13 +85,13 @@ void dump_p2m_lookup(struct domain *d, paddr_t addr) { struct p2m_domain *p2m = p2m_get_hostp2m(d); - printk("dom%d IPA 0x%"PRIpaddr"\n", d->domain_id, addr); + printk("dom%d IPA 0x%" PRIpaddr "\n", d->domain_id, addr); - printk("P2M @ %p mfn:%#"PRI_mfn"\n", - p2m->root, mfn_x(page_to_mfn(p2m->root))); + printk("P2M @ %p mfn:%#" PRI_mfn "\n", p2m->root, + mfn_x(page_to_mfn(p2m->root))); - dump_pt_walk(page_to_maddr(p2m->root), addr, - P2M_ROOT_LEVEL, P2M_ROOT_PAGES); + dump_pt_walk(page_to_maddr(p2m->root), addr, P2M_ROOT_LEVEL, + P2M_ROOT_PAGES); } /* @@ -222,8 +221,7 @@ void p2m_tlb_flush_sync(struct p2m_domain *p2m) * The function will return NULL if the offset of the root table is * invalid. */ -static lpae_t *p2m_get_root_pointer(struct p2m_domain *p2m, - gfn_t gfn) +static lpae_t *p2m_get_root_pointer(struct p2m_domain *p2m, gfn_t gfn) { unsigned int root_table; @@ -355,10 +353,8 @@ static int p2m_next_level(struct p2m_domain *p2m, bool read_only, * valid will contain the value of bit[0] (e.g valid bit) of the * entry. */ -mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn, - p2m_type_t *t, p2m_access_t *a, - unsigned int *page_order, - bool *valid) +mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn, p2m_type_t *t, + p2m_access_t *a, unsigned int *page_order, bool *valid) { paddr_t addr = gfn_to_gaddr(gfn); unsigned int level = 0; @@ -369,11 +365,8 @@ mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn, /* Convenience aliases */ const unsigned int offsets[4] = { - zeroeth_table_offset(addr), - first_table_offset(addr), - second_table_offset(addr), - third_table_offset(addr) - }; + zeroeth_table_offset(addr), first_table_offset(addr), + second_table_offset(addr), third_table_offset(addr)}; ASSERT(p2m_is_locked(p2m)); BUILD_BUG_ON(THIRD_MASK != PAGE_MASK); @@ -491,8 +484,7 @@ struct page_info *p2m_get_page_from_gfn(struct domain *d, gfn_t gfn, return get_page(page, d) ? page : NULL; } -int guest_physmap_mark_populate_on_demand(struct domain *d, - unsigned long gfn, +int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn, unsigned int order) { return -ENOSYS; @@ -507,7 +499,7 @@ unsigned long p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a) { /* First apply type permissions */ - switch ( t ) + switch (t) { case p2m_ram_rw: e->p2m.xn = 0; @@ -543,7 +535,7 @@ static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a) } /* Then restrict with access permissions */ - switch ( a ) + switch (a) { case p2m_access_rwx: break; @@ -583,7 +575,7 @@ static lpae_t mfn_to_p2m_entry(mfn_t mfn, p2m_type_t t, p2m_access_t a) * sh, xn and write bit will be defined in the following switches * based on mattr and t. */ - lpae_t e = (lpae_t) { + lpae_t e = (lpae_t){ .p2m.af = 1, .p2m.read = 1, .p2m.table = 1, @@ -593,7 +585,7 @@ static lpae_t mfn_to_p2m_entry(mfn_t mfn, p2m_type_t t, p2m_access_t a) BUILD_BUG_ON(p2m_max_real_type > (1 << 4)); - switch ( t ) + switch (t) { case p2m_mmio_direct_dev: e.p2m.mattr = MATTR_DEV; @@ -712,8 +704,7 @@ static int p2m_mem_access_radix_set(struct p2m_domain *p2m, gfn_t gfn, { /* If a setting already exists, change it to the new one */ radix_tree_replace_slot( - radix_tree_lookup_slot( - &p2m->mem_access_settings, gfn_x(gfn)), + radix_tree_lookup_slot(&p2m->mem_access_settings, gfn_x(gfn)), radix_tree_int_to_ptr(a)); rc = 0; } @@ -747,8 +738,8 @@ static void p2m_put_l3_page(const lpae_t pte) } /* Free lpae sub-tree behind an entry */ -static void p2m_free_entry(struct p2m_domain *p2m, - lpae_t entry, unsigned int level) +static void p2m_free_entry(struct p2m_domain *p2m, lpae_t entry, + unsigned int level) { unsigned int i; lpae_t *table; @@ -860,8 +851,8 @@ static bool p2m_split_superpage(struct p2m_domain *p2m, lpae_t *entry, * know whether the entry should be shattered for every entry. */ if ( next_level != target ) - rv = p2m_split_superpage(p2m, table + offsets[next_level], - level + 1, target, offsets); + rv = p2m_split_superpage(p2m, table + offsets[next_level], level + 1, + target, offsets); if ( p2m->clean_pte ) clean_dcache_va_range(table, PAGE_SIZE); @@ -881,11 +872,8 @@ static bool p2m_split_superpage(struct p2m_domain *p2m, lpae_t *entry, * Insert an entry in the p2m. This should be called with a mapping * equal to a page/superpage (4K, 2M, 1G). */ -static int __p2m_set_entry(struct p2m_domain *p2m, - gfn_t sgfn, - unsigned int page_order, - mfn_t smfn, - p2m_type_t t, +static int __p2m_set_entry(struct p2m_domain *p2m, gfn_t sgfn, + unsigned int page_order, mfn_t smfn, p2m_type_t t, p2m_access_t a) { paddr_t addr = gfn_to_gaddr(sgfn); @@ -898,11 +886,8 @@ static int __p2m_set_entry(struct p2m_domain *p2m, /* Convenience aliases */ const unsigned int offsets[4] = { - zeroeth_table_offset(addr), - first_table_offset(addr), - second_table_offset(addr), - third_table_offset(addr) - }; + zeroeth_table_offset(addr), first_table_offset(addr), + second_table_offset(addr), third_table_offset(addr)}; ASSERT(p2m_is_write_locked(p2m)); @@ -922,8 +907,8 @@ static int __p2m_set_entry(struct p2m_domain *p2m, * Don't try to allocate intermediate page table if the mapping * is about to be removed. */ - rc = p2m_next_level(p2m, removing_mapping, - level, &table, offsets[level]); + rc = p2m_next_level(p2m, removing_mapping, level, &table, + offsets[level]); if ( rc == GUEST_TABLE_MAP_FAILED ) { /* @@ -933,7 +918,7 @@ static int __p2m_set_entry(struct p2m_domain *p2m, * when removing a mapping as it may not exist in the * page table. In this case, just ignore it. */ - rc = removing_mapping ? 0 : -ENOENT; + rc = removing_mapping ? 0 : -ENOENT; goto out; } else if ( rc != GUEST_TABLE_NORMAL_PAGE ) @@ -1058,8 +1043,8 @@ static int __p2m_set_entry(struct p2m_domain *p2m, p2m_write_pte(entry, pte, p2m->clean_pte); - p2m->max_mapped_gfn = gfn_max(p2m->max_mapped_gfn, - gfn_add(sgfn, 1 << page_order)); + p2m->max_mapped_gfn = + gfn_max(p2m->max_mapped_gfn, gfn_add(sgfn, 1 << page_order)); p2m->lowest_mapped_gfn = gfn_min(p2m->lowest_mapped_gfn, sgfn); } @@ -1079,9 +1064,8 @@ static int __p2m_set_entry(struct p2m_domain *p2m, if ( need_iommu_pt_sync(p2m->domain) ) { if ( !mfn_eq(smfn, INVALID_MFN) ) - rc = iommu_map(p2m->domain, _dfn(gfn_x(sgfn)), smfn, - page_order, p2m_get_iommu_flags(t), - &flush_flags); + rc = iommu_map(p2m->domain, _dfn(gfn_x(sgfn)), smfn, page_order, + p2m_get_iommu_flags(t), &flush_flags); else rc = iommu_unmap(p2m->domain, _dfn(gfn_x(sgfn)), page_order, &flush_flags); @@ -1106,12 +1090,8 @@ out: return rc; } -int p2m_set_entry(struct p2m_domain *p2m, - gfn_t sgfn, - unsigned long nr, - mfn_t smfn, - p2m_type_t t, - p2m_access_t a) +int p2m_set_entry(struct p2m_domain *p2m, gfn_t sgfn, unsigned long nr, + mfn_t smfn, p2m_type_t t, p2m_access_t a) { int rc = 0; @@ -1146,7 +1126,7 @@ int p2m_set_entry(struct p2m_domain *p2m, sgfn = gfn_add(sgfn, (1 << order)); if ( !mfn_eq(smfn, INVALID_MFN) ) - smfn = mfn_add(smfn, (1 << order)); + smfn = mfn_add(smfn, (1 << order)); nr -= (1 << order); } @@ -1216,11 +1196,8 @@ bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn) /* Convenience aliases */ const unsigned int offsets[4] = { - zeroeth_table_offset(addr), - first_table_offset(addr), - second_table_offset(addr), - third_table_offset(addr) - }; + zeroeth_table_offset(addr), first_table_offset(addr), + second_table_offset(addr), third_table_offset(addr)}; p2m_write_lock(p2m); @@ -1312,11 +1289,8 @@ out: return resolved; } -static inline int p2m_insert_mapping(struct domain *d, - gfn_t start_gfn, - unsigned long nr, - mfn_t mfn, - p2m_type_t t) +static inline int p2m_insert_mapping(struct domain *d, gfn_t start_gfn, + unsigned long nr, mfn_t mfn, p2m_type_t t) { struct p2m_domain *p2m = p2m_get_hostp2m(d); int rc; @@ -1328,58 +1302,44 @@ static inline int p2m_insert_mapping(struct domain *d, return rc; } -static inline int p2m_remove_mapping(struct domain *d, - gfn_t start_gfn, - unsigned long nr, - mfn_t mfn) +static inline int p2m_remove_mapping(struct domain *d, gfn_t start_gfn, + unsigned long nr, mfn_t mfn) { struct p2m_domain *p2m = p2m_get_hostp2m(d); int rc; p2m_write_lock(p2m); - rc = p2m_set_entry(p2m, start_gfn, nr, INVALID_MFN, - p2m_invalid, p2m_access_rwx); + rc = p2m_set_entry(p2m, start_gfn, nr, INVALID_MFN, p2m_invalid, + p2m_access_rwx); p2m_write_unlock(p2m); return rc; } -int map_regions_p2mt(struct domain *d, - gfn_t gfn, - unsigned long nr, - mfn_t mfn, +int map_regions_p2mt(struct domain *d, gfn_t gfn, unsigned long nr, mfn_t mfn, p2m_type_t p2mt) { return p2m_insert_mapping(d, gfn, nr, mfn, p2mt); } -int unmap_regions_p2mt(struct domain *d, - gfn_t gfn, - unsigned long nr, - mfn_t mfn) +int unmap_regions_p2mt(struct domain *d, gfn_t gfn, unsigned long nr, mfn_t mfn) { return p2m_remove_mapping(d, gfn, nr, mfn); } -int map_mmio_regions(struct domain *d, - gfn_t start_gfn, - unsigned long nr, +int map_mmio_regions(struct domain *d, gfn_t start_gfn, unsigned long nr, mfn_t mfn) { return p2m_insert_mapping(d, start_gfn, nr, mfn, p2m_mmio_direct_dev); } -int unmap_mmio_regions(struct domain *d, - gfn_t start_gfn, - unsigned long nr, +int unmap_mmio_regions(struct domain *d, gfn_t start_gfn, unsigned long nr, mfn_t mfn) { return p2m_remove_mapping(d, start_gfn, nr, mfn); } -int map_dev_mmio_region(struct domain *d, - gfn_t gfn, - unsigned long nr, +int map_dev_mmio_region(struct domain *d, gfn_t gfn, unsigned long nr, mfn_t mfn) { int res; @@ -1390,7 +1350,8 @@ int map_dev_mmio_region(struct domain *d, res = p2m_insert_mapping(d, gfn, nr, mfn, p2m_mmio_direct_c); if ( res < 0 ) { - printk(XENLOG_G_ERR "Unable to map MFNs [%#"PRI_mfn" - %#"PRI_mfn" in Dom%d\n", + printk(XENLOG_G_ERR "Unable to map MFNs [%#" PRI_mfn " - %#" PRI_mfn + " in Dom%d\n", mfn_x(mfn), mfn_x(mfn) + nr - 1, d->domain_id); return res; } @@ -1398,11 +1359,8 @@ int map_dev_mmio_region(struct domain *d, return 0; } -int guest_physmap_add_entry(struct domain *d, - gfn_t gfn, - mfn_t mfn, - unsigned long page_order, - p2m_type_t t) +int guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, + unsigned long page_order, p2m_type_t t) { return p2m_insert_mapping(d, gfn, (1 << page_order), mfn, t); } @@ -1450,7 +1408,6 @@ static int p2m_alloc_table(struct domain *d) return 0; } - static spinlock_t vmid_alloc_lock = SPIN_LOCK_UNLOCKED; /* @@ -1565,8 +1522,8 @@ int p2m_init(struct domain *d) * shared with the CPU, Xen has to make sure that the PT changes have * reached the memory */ - p2m->clean_pte = iommu_enabled && - !iommu_has_feature(d, IOMMU_FEAT_COHERENT_WALK); + p2m->clean_pte = + iommu_enabled && !iommu_has_feature(d, IOMMU_FEAT_COHERENT_WALK); rc = p2m_alloc_table(d); @@ -1576,10 +1533,10 @@ int p2m_init(struct domain *d) * the INVALID_VCPU_ID. */ BUILD_BUG_ON((1 << (sizeof(p2m->last_vcpu_ran[0]) * 8)) < MAX_VIRT_CPUS); - BUILD_BUG_ON((1 << (sizeof(p2m->last_vcpu_ran[0])* 8)) < INVALID_VCPU_ID); + BUILD_BUG_ON((1 << (sizeof(p2m->last_vcpu_ran[0]) * 8)) < INVALID_VCPU_ID); - for_each_possible_cpu(cpu) - p2m->last_vcpu_ran[cpu] = INVALID_VCPU_ID; + for_each_possible_cpu (cpu) + p2m->last_vcpu_ran[cpu] = INVALID_VCPU_ID; /* * Besides getting a domain when we only have the p2m in hand, @@ -1611,8 +1568,7 @@ int relinquish_p2m_mapping(struct domain *d) start = p2m->lowest_mapped_gfn; end = p2m->max_mapped_gfn; - for ( ; gfn_x(start) < gfn_x(end); - start = gfn_next_boundary(start, order) ) + for ( ; gfn_x(start) < gfn_x(end); start = gfn_next_boundary(start, order) ) { mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order, NULL); @@ -1636,11 +1592,13 @@ int relinquish_p2m_mapping(struct domain *d) * For valid mapping, the start will always be aligned as * entry will be removed whilst relinquishing. */ - rc = __p2m_set_entry(p2m, start, order, INVALID_MFN, - p2m_invalid, p2m_access_rwx); + rc = __p2m_set_entry(p2m, start, order, INVALID_MFN, p2m_invalid, + p2m_access_rwx); if ( unlikely(rc) ) { - printk(XENLOG_G_ERR "Unable to remove mapping gfn=%#"PRI_gfn" order=%u from the p2m of domain %d\n", gfn_x(start), order, d->domain_id); + printk(XENLOG_G_ERR "Unable to remove mapping gfn=%#" PRI_gfn + " order=%u from the p2m of domain %d\n", + gfn_x(start), order, d->domain_id); break; } } @@ -1684,7 +1642,7 @@ int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end) while ( gfn_x(start) < gfn_x(end) ) { - /* + /* * Cleaning the cache for the P2M may take a long time. So we * need to be able to preempt. We will arbitrarily preempt every * time count reach 512 or above. @@ -1766,8 +1724,7 @@ void p2m_flush_vm(struct vcpu *v) ASSERT(local_irq_is_enabled()); ASSERT(v->arch.need_flush_to_ram); - do - { + do { rc = p2m_cache_flush_range(v->domain, &start, _gfn(ULONG_MAX)); if ( rc == -ERESTART ) do_softirq(); @@ -1775,8 +1732,7 @@ void p2m_flush_vm(struct vcpu *v) if ( rc != 0 ) gprintk(XENLOG_WARNING, - "P2M has not been correctly cleaned (rc = %d)\n", - rc); + "P2M has not been correctly cleaned (rc = %d)\n", rc); /* * Invalidate the p2m to track which page was modified by the guest @@ -1914,7 +1870,8 @@ struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va, if ( !guest_walk_tables(v, va, &ipa, &s1_perms) ) { dprintk(XENLOG_G_DEBUG, - "%pv: Failed to walk page-table va %#"PRIvaddr"\n", v, va); + "%pv: Failed to walk page-table va %#" PRIvaddr "\n", v, + va); return NULL; } @@ -1943,8 +1900,8 @@ struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va, if ( !mfn_valid(mfn) ) { - dprintk(XENLOG_G_DEBUG, "%pv: Invalid MFN %#"PRI_mfn"\n", - v, mfn_x(mfn)); + dprintk(XENLOG_G_DEBUG, "%pv: Invalid MFN %#" PRI_mfn "\n", v, + mfn_x(mfn)); return NULL; } @@ -1953,8 +1910,9 @@ struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va, if ( unlikely(!get_page(page, d)) ) { - dprintk(XENLOG_G_DEBUG, "%pv: Failing to acquire the MFN %#"PRI_mfn"\n", - v, mfn_x(maddr_to_mfn(maddr))); + dprintk(XENLOG_G_DEBUG, + "%pv: Failing to acquire the MFN %#" PRI_mfn "\n", v, + mfn_x(maddr_to_mfn(maddr))); return NULL; } @@ -1989,37 +1947,39 @@ static void setup_virt_paging_one(void *data) void __init setup_virt_paging(void) { /* Setup Stage 2 address translation */ - unsigned long val = VTCR_RES1|VTCR_SH0_IS|VTCR_ORGN0_WBWA|VTCR_IRGN0_WBWA; + unsigned long val = + VTCR_RES1 | VTCR_SH0_IS | VTCR_ORGN0_WBWA | VTCR_IRGN0_WBWA; #ifdef CONFIG_ARM_32 printk("P2M: 40-bit IPA\n"); p2m_ipa_bits = 40; val |= VTCR_T0SZ(0x18); /* 40 bit IPA */ - val |= VTCR_SL0(0x1); /* P2M starts at first level */ -#else /* CONFIG_ARM_64 */ - const struct { - unsigned int pabits; /* Physical Address Size */ - unsigned int t0sz; /* Desired T0SZ, minimum in comment */ + val |= VTCR_SL0(0x1); /* P2M starts at first level */ +#else /* CONFIG_ARM_64 */ + const struct + { + unsigned int pabits; /* Physical Address Size */ + unsigned int t0sz; /* Desired T0SZ, minimum in comment */ unsigned int root_order; /* Page order of the root of the p2m */ - unsigned int sl0; /* Desired SL0, maximum in comment */ + unsigned int sl0; /* Desired SL0, maximum in comment */ } pa_range_info[] = { /* T0SZ minimum and SL0 maximum from ARM DDI 0487A.b Table D4-5 */ /* PA size, t0sz(min), root-order, sl0(max) */ - [0] = { 32, 32/*32*/, 0, 1 }, - [1] = { 36, 28/*28*/, 0, 1 }, - [2] = { 40, 24/*24*/, 1, 1 }, - [3] = { 42, 24/*22*/, 1, 1 }, - [4] = { 44, 20/*20*/, 0, 2 }, - [5] = { 48, 16/*16*/, 0, 2 }, - [6] = { 0 }, /* Invalid */ - [7] = { 0 } /* Invalid */ + [0] = {32, 32 /*32*/, 0, 1}, + [1] = {36, 28 /*28*/, 0, 1}, + [2] = {40, 24 /*24*/, 1, 1}, + [3] = {42, 24 /*22*/, 1, 1}, + [4] = {44, 20 /*20*/, 0, 2}, + [5] = {48, 16 /*16*/, 0, 2}, + [6] = {0}, /* Invalid */ + [7] = {0} /* Invalid */ }; unsigned int cpu; unsigned int pa_range = 0x10; /* Larger than any possible value */ bool vmid_8_bit = false; - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) { const struct cpuinfo_arm *info = &cpu_data[cpu]; if ( info->mm64.pa_range < pa_range ) @@ -2046,7 +2006,8 @@ void __init setup_virt_paging(void) max_vmid = MAX_VMID_16_BIT; /* pa_range is 4 bits, but the defined encodings are only 3 bits */ - if ( pa_range >= ARRAY_SIZE(pa_range_info) || !pa_range_info[pa_range].pabits ) + if ( pa_range >= ARRAY_SIZE(pa_range_info) || + !pa_range_info[pa_range].pabits ) panic("Unknown encoding of ID_AA64MMFR0_EL1.PARange %x\n", pa_range); val |= VTCR_PS(pa_range); @@ -2062,10 +2023,9 @@ void __init setup_virt_paging(void) p2m_root_level = 2 - pa_range_info[pa_range].sl0; p2m_ipa_bits = 64 - pa_range_info[pa_range].t0sz; - printk("P2M: %d-bit IPA with %d-bit PA and %d-bit VMID\n", - p2m_ipa_bits, + printk("P2M: %d-bit IPA with %d-bit PA and %d-bit VMID\n", p2m_ipa_bits, pa_range_info[pa_range].pabits, - ( MAX_VMID == MAX_VMID_16_BIT ) ? 16 : 8); + (MAX_VMID == MAX_VMID_16_BIT) ? 16 : 8); #endif printk("P2M: %d levels with order-%d root, VTCR 0x%lx\n", 4 - P2M_ROOT_LEVEL, P2M_ROOT_ORDER, val); @@ -2073,7 +2033,7 @@ void __init setup_virt_paging(void) p2m_vmid_allocator_init(); /* It is not allowed to concatenate a level zero root */ - BUG_ON( P2M_ROOT_LEVEL == 0 && P2M_ROOT_ORDER > 0 ); + BUG_ON(P2M_ROOT_LEVEL == 0 && P2M_ROOT_ORDER > 0); vtcr = val; /* @@ -2086,7 +2046,8 @@ void __init setup_virt_paging(void) root = p2m_allocate_root(); if ( !root ) - panic("Unable to allocate root table for ARM64_WORKAROUND_AT_SPECULATE\n"); + panic("Unable to allocate root table for " + "ARM64_WORKAROUND_AT_SPECULATE\n"); empty_root_mfn = page_to_mfn(root); } @@ -2096,10 +2057,9 @@ void __init setup_virt_paging(void) } static int cpu_virt_paging_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) + unsigned long action, void *hcpu) { - switch ( action ) + switch (action) { case CPU_STARTING: ASSERT(system_state != SYS_STATE_boot); diff --git a/xen/arch/arm/percpu.c b/xen/arch/arm/percpu.c index 25442c48fe..a71637b92f 100644 --- a/xen/arch/arm/percpu.c +++ b/xen/arch/arm/percpu.c @@ -6,7 +6,8 @@ unsigned long __per_cpu_offset[NR_CPUS]; #define INVALID_PERCPU_AREA (-(long)__per_cpu_start) -#define PERCPU_ORDER (get_order_from_bytes(__per_cpu_data_end-__per_cpu_start)) +#define PERCPU_ORDER \ + (get_order_from_bytes(__per_cpu_data_end - __per_cpu_start)) void __init percpu_init_areas(void) { @@ -27,7 +28,8 @@ static int init_percpu_area(unsigned int cpu) return 0; } -struct free_info { +struct free_info +{ unsigned int cpu; struct rcu_head rcu; }; @@ -49,13 +51,13 @@ static void free_percpu_area(unsigned int cpu) call_rcu(&info->rcu, _free_percpu_area); } -static int cpu_percpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_percpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int rc = 0; - switch ( action ) + switch (action) { case CPU_UP_PREPARE: rc = init_percpu_area(cpu); @@ -72,8 +74,7 @@ static int cpu_percpu_callback( } static struct notifier_block cpu_percpu_nfb = { - .notifier_call = cpu_percpu_callback, - .priority = 100 /* highest priority */ + .notifier_call = cpu_percpu_callback, .priority = 100 /* highest priority */ }; static int __init percpu_presmp_init(void) diff --git a/xen/arch/arm/physdev.c b/xen/arch/arm/physdev.c index e91355fe22..6b2f20ccbe 100644 --- a/xen/arch/arm/physdev.c +++ b/xen/arch/arm/physdev.c @@ -10,7 +10,6 @@ #include #include - int do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { gdprintk(XENLOG_DEBUG, "PHYSDEVOP cmd=%d: not implemented\n", cmd); diff --git a/xen/arch/arm/platform.c b/xen/arch/arm/platform.c index 8eb0b6e57a..738b6d4ce4 100644 --- a/xen/arch/arm/platform.c +++ b/xen/arch/arm/platform.c @@ -27,7 +27,6 @@ extern const struct platform_desc _splatform[], _eplatform[]; /* Pointer to the current platform description */ static const struct platform_desc *platform; - static bool __init platform_is_compatible(const struct platform_desc *plat) { const char *const *compat; diff --git a/xen/arch/arm/platform_hypercall.c b/xen/arch/arm/platform_hypercall.c index 5aab856ce7..b40d0680dc 100644 --- a/xen/arch/arm/platform_hypercall.c +++ b/xen/arch/arm/platform_hypercall.c @@ -44,16 +44,16 @@ long do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) */ while ( !spin_trylock(&xenpf_lock) ) if ( hypercall_preempt_check() ) - return hypercall_create_continuation( - __HYPERVISOR_platform_op, "h", u_xenpf_op); + return hypercall_create_continuation(__HYPERVISOR_platform_op, "h", + u_xenpf_op); - switch ( op->cmd ) + switch (op->cmd) { case XENPF_settime64: if ( likely(!op->u.settime64.mbz) ) - do_settime(op->u.settime64.secs, - op->u.settime64.nsecs, - op->u.settime64.system_time + SECONDS(d->time_offset_seconds)); + do_settime(op->u.settime64.secs, op->u.settime64.nsecs, + op->u.settime64.system_time + + SECONDS(d->time_offset_seconds)); else ret = -EINVAL; break; diff --git a/xen/arch/arm/platforms/brcm.c b/xen/arch/arm/platforms/brcm.c index d481b2c60f..57375c118e 100644 --- a/xen/arch/arm/platforms/brcm.c +++ b/xen/arch/arm/platforms/brcm.c @@ -23,12 +23,13 @@ #include #include -struct brcm_plat_regs { - uint32_t hif_mask; - uint32_t hif_cpu_reset_config; - uint32_t hif_boot_continuation; - uint32_t cpu0_pwr_zone_ctrl; - uint32_t scratch_reg; +struct brcm_plat_regs +{ + uint32_t hif_mask; + uint32_t hif_cpu_reset_config; + uint32_t hif_boot_continuation; + uint32_t cpu0_pwr_zone_ctrl; + uint32_t scratch_reg; }; static u32 brcm_boot_continuation_pc; @@ -105,18 +106,17 @@ static __init int brcm_populate_plat_regs(void) regs.hif_boot_continuation = reg_base; dprintk(XENLOG_INFO, "hif_cpu_reset_config : %08xh\n", - regs.hif_cpu_reset_config); + regs.hif_cpu_reset_config); dprintk(XENLOG_INFO, "cpu0_pwr_zone_ctrl : %08xh\n", - regs.cpu0_pwr_zone_ctrl); + regs.cpu0_pwr_zone_ctrl); dprintk(XENLOG_INFO, "hif_boot_continuation : %08xh\n", - regs.hif_boot_continuation); - dprintk(XENLOG_INFO, "scratch_reg : %08xh\n", - regs.scratch_reg); + regs.hif_boot_continuation); + dprintk(XENLOG_INFO, "scratch_reg : %08xh\n", regs.scratch_reg); return 0; } -#define ZONE_PWR_UP_REQ (1 << 10) +#define ZONE_PWR_UP_REQ (1 << 10) #define ZONE_PWR_ON_STATE (1 << 26) static int brcm_cpu_power_on(int cpu) @@ -133,7 +133,7 @@ static int brcm_cpu_power_on(int cpu) if ( !pwr_ctl ) { dprintk(XENLOG_ERR, "%s: Unable to map \"cpu0_pwr_zone_ctrl\"\n", - __func__); + __func__); return -EFAULT; } @@ -218,7 +218,7 @@ static int brcm_set_boot_continuation(u32 cpu, u32 pc) static int brcm_cpu_up(int cpu) { - int rc; + int rc; rc = brcm_cpu_power_on(cpu); if ( rc ) @@ -228,7 +228,7 @@ static int brcm_cpu_up(int cpu) if ( rc ) return rc; - return brcm_cpu_release(cpu); + return brcm_cpu_release(cpu); } static int __init brcm_smp_init(void) @@ -271,24 +271,19 @@ static __init int brcm_init(void) return brcm_populate_plat_regs(); } -static const char *const brcm_dt_compat[] __initconst = -{ - "brcm,bcm7445d0", - NULL -}; +static const char *const brcm_dt_compat[] __initconst = {"brcm,bcm7445d0", + NULL}; -PLATFORM_START(brcm, "Broadcom B15") - .compatible = brcm_dt_compat, - .init = brcm_init, - .smp_init = brcm_smp_init, - .cpu_up = brcm_cpu_up, -PLATFORM_END +PLATFORM_START(brcm, "Broadcom B15").compatible = brcm_dt_compat, + .init = brcm_init, .smp_init = brcm_smp_init, + .cpu_up = brcm_cpu_up, + PLATFORM_END -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/platforms/exynos5.c b/xen/arch/arm/platforms/exynos5.c index 6560507092..4c2268064b 100644 --- a/xen/arch/arm/platforms/exynos5.c +++ b/xen/arch/arm/platforms/exynos5.c @@ -29,12 +29,12 @@ static bool secure_firmware; -#define EXYNOS_ARM_CORE0_CONFIG 0x2000 +#define EXYNOS_ARM_CORE0_CONFIG 0x2000 #define EXYNOS_ARM_CORE_CONFIG(_nr) (EXYNOS_ARM_CORE0_CONFIG + (0x80 * (_nr))) #define EXYNOS_ARM_CORE_STATUS(_nr) (EXYNOS_ARM_CORE_CONFIG(_nr) + 0x4) -#define S5P_CORE_LOCAL_PWR_EN 0x3 +#define S5P_CORE_LOCAL_PWR_EN 0x3 -#define SMC_CMD_CPU1BOOT (-4) +#define SMC_CMD_CPU1BOOT (-4) static int exynos5_init_time(void) { @@ -141,8 +141,8 @@ static int __init exynos5_smp_init(void) return -EFAULT; } - printk("Set SYSRAM to %"PRIpaddr" (%p)\n", - __pa(init_secondary), init_secondary); + printk("Set SYSRAM to %" PRIpaddr " (%p)\n", __pa(init_secondary), + init_secondary); writel(__pa(init_secondary), sysram + sysram_offset); iounmap(sysram); @@ -158,8 +158,7 @@ static int exynos_cpu_power_state(void __iomem *power, int cpu) static void exynos_cpu_power_up(void __iomem *power, int cpu) { - __raw_writel(S5P_CORE_LOCAL_PWR_EN, - power + EXYNOS_ARM_CORE_CONFIG(cpu)); + __raw_writel(S5P_CORE_LOCAL_PWR_EN, power + EXYNOS_ARM_CORE_CONFIG(cpu)); } static int exynos5_cpu_power_up(void __iomem *power, int cpu) @@ -193,12 +192,11 @@ static int exynos5_get_pmu_baseandsize(u64 *power_base_addr, u64 *size) { struct dt_device_node *node; int rc; - static const struct dt_device_match exynos_dt_pmu_matches[] = - { + static const struct dt_device_match exynos_dt_pmu_matches[] = { DT_MATCH_COMPATIBLE("samsung,exynos5250-pmu"), DT_MATCH_COMPATIBLE("samsung,exynos5410-pmu"), DT_MATCH_COMPATIBLE("samsung,exynos5420-pmu"), - { /*sentinel*/ }, + {/*sentinel*/}, }; node = dt_find_matching_node(NULL, exynos_dt_pmu_matches); @@ -277,53 +275,42 @@ static void exynos5_reset(void) iounmap(pmu); } -static const struct dt_device_match exynos5_blacklist_dev[] __initconst = -{ +static const struct dt_device_match exynos5_blacklist_dev[] __initconst = { /* Multi core Timer * TODO: this device set up IRQ to CPU 1 which is not yet handled by Xen. * This is result to random freeze. */ DT_MATCH_COMPATIBLE("samsung,exynos4210-mct"), DT_MATCH_COMPATIBLE("samsung,secure-firmware"), - { /* sentinel */ }, + {/* sentinel */}, }; -static const char * const exynos5250_dt_compat[] __initconst = -{ - "samsung,exynos5250", - NULL -}; +static const char *const exynos5250_dt_compat[] __initconst = { + "samsung,exynos5250", NULL}; -static const char * const exynos5_dt_compat[] __initconst = -{ - "samsung,exynos5410", - NULL -}; +static const char *const exynos5_dt_compat[] __initconst = { + "samsung,exynos5410", NULL}; PLATFORM_START(exynos5250, "SAMSUNG EXYNOS5250") .compatible = exynos5250_dt_compat, - .init_time = exynos5_init_time, - .specific_mapping = exynos5250_specific_mapping, - .smp_init = exynos5_smp_init, - .cpu_up = cpu_up_send_sgi, - .reset = exynos5_reset, - .blacklist_dev = exynos5_blacklist_dev, -PLATFORM_END - -PLATFORM_START(exynos5, "SAMSUNG EXYNOS5") - .compatible = exynos5_dt_compat, - .init_time = exynos5_init_time, - .smp_init = exynos5_smp_init, - .cpu_up = exynos5_cpu_up, - .reset = exynos5_reset, - .blacklist_dev = exynos5_blacklist_dev, -PLATFORM_END - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ + .init_time = exynos5_init_time, + .specific_mapping = exynos5250_specific_mapping, + .smp_init = exynos5_smp_init, .cpu_up = cpu_up_send_sgi, + .reset = exynos5_reset, .blacklist_dev = exynos5_blacklist_dev, + PLATFORM_END + + PLATFORM_START(exynos5, "SAMSUNG EXYNOS5") + .compatible = exynos5_dt_compat, + .init_time = exynos5_init_time, .smp_init = exynos5_smp_init, + .cpu_up = exynos5_cpu_up, .reset = exynos5_reset, + .blacklist_dev = exynos5_blacklist_dev, + PLATFORM_END + + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/platforms/midway.c b/xen/arch/arm/platforms/midway.c index b221279ec7..d76afc3b1c 100644 --- a/xen/arch/arm/platforms/midway.c +++ b/xen/arch/arm/platforms/midway.c @@ -42,22 +42,18 @@ static void midway_reset(void) iounmap(pmu); } -static const char * const midway_dt_compat[] __initconst = -{ - "calxeda,ecx-2000", - NULL -}; - -PLATFORM_START(midway, "CALXEDA MIDWAY") - .compatible = midway_dt_compat, - .reset = midway_reset, -PLATFORM_END - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ +static const char *const midway_dt_compat[] __initconst = {"calxeda,ecx-2000", + NULL}; + +PLATFORM_START(midway, "CALXEDA MIDWAY").compatible = midway_dt_compat, + .reset = midway_reset, + PLATFORM_END + + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/platforms/omap5.c b/xen/arch/arm/platforms/omap5.c index aee24e4d28..b7845af0f2 100644 --- a/xen/arch/arm/platforms/omap5.c +++ b/xen/arch/arm/platforms/omap5.c @@ -24,14 +24,14 @@ #include static uint16_t num_den[8][2] = { - { 0, 0 }, /* not used */ - { 26 * 64, 26 * 125 }, /* 12.0 Mhz */ - { 2 * 768, 2 * 1625 }, /* 13.0 Mhz */ - { 0, 0 }, /* not used */ - { 130 * 8, 130 * 25 }, /* 19.2 Mhz */ - { 2 * 384, 2 * 1625 }, /* 26.0 Mhz */ - { 3 * 256, 3 * 1125 }, /* 27.0 Mhz */ - { 130 * 4, 130 * 25 }, /* 38.4 Mhz */ + {0, 0}, /* not used */ + {26 * 64, 26 * 125}, /* 12.0 Mhz */ + {2 * 768, 2 * 1625}, /* 13.0 Mhz */ + {0, 0}, /* not used */ + {130 * 8, 130 * 25}, /* 19.2 Mhz */ + {2 * 384, 2 * 1625}, /* 26.0 Mhz */ + {3 * 256, 3 * 1125}, /* 27.0 Mhz */ + {130 * 4, 130 * 25}, /* 38.4 Mhz */ }; /* @@ -57,15 +57,15 @@ static int omap5_init_time(void) return -ENOMEM; } - sys_clksel = readl(ckgen_prm_base + OMAP5_CM_CLKSEL_SYS) & - ~SYS_CLKSEL_MASK; + sys_clksel = readl(ckgen_prm_base + OMAP5_CM_CLKSEL_SYS) & ~SYS_CLKSEL_MASK; iounmap(ckgen_prm_base); rt_ct_base = ioremap_nocache(REALTIME_COUNTER_BASE, 0x20); if ( !rt_ct_base ) { - dprintk(XENLOG_ERR, "%s: REALTIME_COUNTER_BASE ioremap failed\n", __func__); + dprintk(XENLOG_ERR, "%s: REALTIME_COUNTER_BASE ioremap failed\n", + __func__); return -ENOMEM; } @@ -127,8 +127,8 @@ static int __init omap5_smp_init(void) return -EFAULT; } - printk("Set AuxCoreBoot1 to %"PRIpaddr" (%p)\n", - __pa(init_secondary), init_secondary); + printk("Set AuxCoreBoot1 to %" PRIpaddr " (%p)\n", __pa(init_secondary), + init_secondary); writel(__pa(init_secondary), wugen_base + OMAP_AUX_CORE_BOOT_1_OFFSET); printk("Set AuxCoreBoot0 to 0x20\n"); @@ -139,38 +139,27 @@ static int __init omap5_smp_init(void) return 0; } -static const char * const omap5_dt_compat[] __initconst = -{ - "ti,omap5", - NULL -}; - -static const char * const dra7_dt_compat[] __initconst = -{ - "ti,dra7", - NULL -}; - -PLATFORM_START(omap5, "TI OMAP5") - .compatible = omap5_dt_compat, - .init_time = omap5_init_time, - .specific_mapping = omap5_specific_mapping, - .smp_init = omap5_smp_init, - .cpu_up = cpu_up_send_sgi, -PLATFORM_END - -PLATFORM_START(dra7, "TI DRA7") - .compatible = dra7_dt_compat, - .init_time = omap5_init_time, - .cpu_up = cpu_up_send_sgi, - .smp_init = omap5_smp_init, -PLATFORM_END - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ +static const char *const omap5_dt_compat[] __initconst = {"ti,omap5", NULL}; + +static const char *const dra7_dt_compat[] __initconst = {"ti,dra7", NULL}; + +PLATFORM_START(omap5, "TI OMAP5").compatible = omap5_dt_compat, + .init_time = omap5_init_time, + .specific_mapping = omap5_specific_mapping, + .smp_init = omap5_smp_init, .cpu_up = cpu_up_send_sgi, + PLATFORM_END + + PLATFORM_START(dra7, "TI DRA7") + .compatible = dra7_dt_compat, + .init_time = omap5_init_time, .cpu_up = cpu_up_send_sgi, + .smp_init = omap5_smp_init, + PLATFORM_END + + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/platforms/rcar2.c b/xen/arch/arm/platforms/rcar2.c index df0ac84709..0eb6a6486b 100644 --- a/xen/arch/arm/platforms/rcar2.c +++ b/xen/arch/arm/platforms/rcar2.c @@ -21,9 +21,9 @@ #include #include -#define RCAR2_RAM_ADDR 0xE63C0000 -#define RCAR2_RAM_SIZE 0x1000 -#define RCAR2_SMP_START_OFFSET 0xFFC +#define RCAR2_RAM_ADDR 0xE63C0000 +#define RCAR2_RAM_SIZE 0x1000 +#define RCAR2_SMP_START_OFFSET 0xFFC static int __init rcar2_smp_init(void) { @@ -31,9 +31,9 @@ static int __init rcar2_smp_init(void) /* map ICRAM */ pram = ioremap_nocache(RCAR2_RAM_ADDR, RCAR2_RAM_SIZE); - if( !pram ) + if ( !pram ) { - dprintk( XENLOG_ERR, "Unable to map RCAR2 ICRAM\n"); + dprintk(XENLOG_ERR, "Unable to map RCAR2 ICRAM\n"); return -ENOMEM; } @@ -46,23 +46,18 @@ static int __init rcar2_smp_init(void) return 0; } -static const char *const rcar2_dt_compat[] __initconst = -{ - "renesas,lager", - NULL -}; - -PLATFORM_START(rcar2, "Renesas R-Car Gen2") - .compatible = rcar2_dt_compat, - .cpu_up = cpu_up_send_sgi, - .smp_init = rcar2_smp_init, -PLATFORM_END - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ +static const char *const rcar2_dt_compat[] __initconst = {"renesas,lager", + NULL}; + +PLATFORM_START(rcar2, "Renesas R-Car Gen2").compatible = rcar2_dt_compat, + .cpu_up = cpu_up_send_sgi, .smp_init = rcar2_smp_init, + PLATFORM_END + + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/platforms/seattle.c b/xen/arch/arm/platforms/seattle.c index 64cc1868c2..7cbb4c8a1c 100644 --- a/xen/arch/arm/platforms/seattle.c +++ b/xen/arch/arm/platforms/seattle.c @@ -20,11 +20,8 @@ #include #include -static const char * const seattle_dt_compat[] __initconst = -{ - "amd,seattle", - NULL -}; +static const char *const seattle_dt_compat[] __initconst = {"amd,seattle", + NULL}; /* Seattle firmware only implements PSCI handler for * system off and system reset at this point. @@ -41,17 +38,16 @@ static void seattle_system_off(void) arm_smccc_smc(PSCI_0_2_FN32_SYSTEM_OFF, NULL); } -PLATFORM_START(seattle, "SEATTLE") - .compatible = seattle_dt_compat, - .reset = seattle_system_reset, - .poweroff = seattle_system_off, -PLATFORM_END +PLATFORM_START(seattle, "SEATTLE").compatible = seattle_dt_compat, + .reset = seattle_system_reset, + .poweroff = seattle_system_off, + PLATFORM_END -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/platforms/sunxi.c b/xen/arch/arm/platforms/sunxi.c index 55705b15b2..670448f09b 100644 --- a/xen/arch/arm/platforms/sunxi.c +++ b/xen/arch/arm/platforms/sunxi.c @@ -22,13 +22,13 @@ #include /* Watchdog constants: */ -#define SUNXI_WDT_MODE_REG 0x04 -#define SUNXI_WDT_MODE_EN (1 << 0) -#define SUNXI_WDT_MODE_RST_EN (1 << 1) +#define SUNXI_WDT_MODE_REG 0x04 +#define SUNXI_WDT_MODE_EN (1 << 0) +#define SUNXI_WDT_MODE_RST_EN (1 << 1) -#define SUNXI_WDT_CONFIG_SYSTEM_RESET (1 << 0) -#define SUNXI_WDOG0_CFG_REG 0x14 -#define SUNXI_WDOG0_MODE_REG 0x18 +#define SUNXI_WDT_CONFIG_SYSTEM_RESET (1 << 0) +#define SUNXI_WDOG0_CFG_REG 0x14 +#define SUNXI_WDOG0_MODE_REG 0x18 static void __iomem *sunxi_map_watchdog(bool *new_wdt) { @@ -40,7 +40,7 @@ static void __iomem *sunxi_map_watchdog(bool *new_wdt) node = dt_find_compatible_node(NULL, NULL, "allwinner,sun6i-a31-wdt"); if ( node ) - _new_wdt = true; + _new_wdt = true; else node = dt_find_compatible_node(NULL, NULL, "allwinner,sun4i-a10-wdt"); @@ -73,8 +73,7 @@ static void __iomem *sunxi_map_watchdog(bool *new_wdt) /* Enable watchdog to trigger a reset after 500 ms */ static void sunxi_old_wdt_reset(void __iomem *wdt) { - writel(SUNXI_WDT_MODE_EN | SUNXI_WDT_MODE_RST_EN, - wdt + SUNXI_WDT_MODE_REG); + writel(SUNXI_WDT_MODE_EN | SUNXI_WDT_MODE_RST_EN, wdt + SUNXI_WDT_MODE_REG); } static void sunxi_new_wdt_reset(void __iomem *wdt) @@ -99,55 +98,43 @@ static void sunxi_reset(void) iounmap(wdt); - for (;;) + for ( ;; ) wfi(); } -static const char * const sunxi_v7_dt_compat[] __initconst = -{ - "allwinner,sun6i-a31", - "allwinner,sun6i-a31s", - "allwinner,sun7i-a20", - "allwinner,sun8i-a23", - "allwinner,sun8i-a33", - "allwinner,sun8i-h2-plus", - "allwinner,sun8i-h3", - NULL -}; +static const char *const sunxi_v7_dt_compat[] __initconst = { + "allwinner,sun6i-a31", "allwinner,sun6i-a31s", + "allwinner,sun7i-a20", "allwinner,sun8i-a23", + "allwinner,sun8i-a33", "allwinner,sun8i-h2-plus", + "allwinner,sun8i-h3", NULL}; -static const char * const sunxi_v8_dt_compat[] __initconst = -{ - "allwinner,sun50i-a64", - "allwinner,sun50i-h5", - NULL -}; +static const char *const sunxi_v8_dt_compat[] __initconst = { + "allwinner,sun50i-a64", "allwinner,sun50i-h5", NULL}; -static const struct dt_device_match sunxi_blacklist_dev[] __initconst = -{ +static const struct dt_device_match sunxi_blacklist_dev[] __initconst = { /* * The UARTs share a page which runs the risk of mapping the Xen console * UART to dom0, so don't map any of them. */ DT_MATCH_COMPATIBLE("snps,dw-apb-uart"), - { /* sentinel */ }, + {/* sentinel */}, }; -PLATFORM_START(sunxi_v7, "Allwinner ARMv7") - .compatible = sunxi_v7_dt_compat, - .blacklist_dev = sunxi_blacklist_dev, - .reset = sunxi_reset, -PLATFORM_END +PLATFORM_START(sunxi_v7, "Allwinner ARMv7").compatible = sunxi_v7_dt_compat, + .blacklist_dev = sunxi_blacklist_dev, + .reset = sunxi_reset, + PLATFORM_END -PLATFORM_START(sunxi_v8, "Allwinner ARMv8") - .compatible = sunxi_v8_dt_compat, - .blacklist_dev = sunxi_blacklist_dev, -PLATFORM_END + PLATFORM_START(sunxi_v8, "Allwinner ARMv8") + .compatible = sunxi_v8_dt_compat, + .blacklist_dev = sunxi_blacklist_dev, + PLATFORM_END -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/platforms/thunderx.c b/xen/arch/arm/platforms/thunderx.c index 9b32a29c6b..21242dad70 100644 --- a/xen/arch/arm/platforms/thunderx.c +++ b/xen/arch/arm/platforms/thunderx.c @@ -20,20 +20,14 @@ #include -static const char * const thunderx_dt_compat[] __initconst = -{ - "cavium,thunder-88xx", - NULL -}; +static const char *const thunderx_dt_compat[] __initconst = { + "cavium,thunder-88xx", NULL}; -static const struct dt_device_match thunderx_blacklist_dev[] __initconst = -{ +static const struct dt_device_match thunderx_blacklist_dev[] __initconst = { /* Cavium has its own SMMU which is not yet supported. */ DT_MATCH_COMPATIBLE("cavium,smmu-v2"), - { /* sentinel */ }, + {/* sentinel */}, }; -PLATFORM_START(thunderx, "THUNDERX") - .compatible = thunderx_dt_compat, - .blacklist_dev = thunderx_blacklist_dev, -PLATFORM_END +PLATFORM_START(thunderx, "THUNDERX").compatible = thunderx_dt_compat, + .blacklist_dev = thunderx_blacklist_dev, PLATFORM_END diff --git a/xen/arch/arm/platforms/vexpress.c b/xen/arch/arm/platforms/vexpress.c index b6193f75b5..e743caa2c4 100644 --- a/xen/arch/arm/platforms/vexpress.c +++ b/xen/arch/arm/platforms/vexpress.c @@ -23,30 +23,30 @@ #include #include -#define DCC_SHIFT 26 +#define DCC_SHIFT 26 #define FUNCTION_SHIFT 20 -#define SITE_SHIFT 16 +#define SITE_SHIFT 16 #define POSITION_SHIFT 12 -#define DEVICE_SHIFT 0 +#define DEVICE_SHIFT 0 -static inline int vexpress_ctrl_start(uint32_t *syscfg, int write, - int function, int device) +static inline int vexpress_ctrl_start(uint32_t *syscfg, int write, int function, + int device) { - int dcc = 0; /* DCC to access */ - int site = 0; /* motherboard */ + int dcc = 0; /* DCC to access */ + int site = 0; /* motherboard */ int position = 0; /* daughterboard */ uint32_t stat; /* set control register */ - syscfg[V2M_SYS_CFGCTRL/4] = V2M_SYS_CFG_START | - (write ? V2M_SYS_CFG_WRITE : 0) | + syscfg[V2M_SYS_CFGCTRL / 4] = + V2M_SYS_CFG_START | (write ? V2M_SYS_CFG_WRITE : 0) | (dcc << DCC_SHIFT) | (function << FUNCTION_SHIFT) | (site << SITE_SHIFT) | (position << POSITION_SHIFT) | (device << DEVICE_SHIFT); /* wait for complete flag to be set */ do { - stat = syscfg[V2M_SYS_CFGSTAT/4]; + stat = syscfg[V2M_SYS_CFGSTAT / 4]; dsb(sy); } while ( !(stat & V2M_SYS_CFG_COMPLETE) ); @@ -78,10 +78,12 @@ static void vexpress_reset(void) /* switch to slow mode */ writel(0x3, sp810); - dsb(sy); isb(); + dsb(sy); + isb(); /* writing any value to SCSYSSTAT reg will reset the system */ writel(0x1, sp810 + 4); - dsb(sy); isb(); + dsb(sy); + isb(); iounmap(sp810); } @@ -99,8 +101,8 @@ static int __init vexpress_smp_init(void) return -EFAULT; } - printk("Set SYS_FLAGS to %"PRIpaddr" (%p)\n", - __pa(init_secondary), init_secondary); + printk("Set SYS_FLAGS to %" PRIpaddr " (%p)\n", __pa(init_secondary), + init_secondary); writel(~0, sysflags + V2M_SYS_FLAGSCLR); writel(__pa(init_secondary), sysflags + V2M_SYS_FLAGSSET); @@ -111,14 +113,10 @@ static int __init vexpress_smp_init(void) #endif -static const char * const vexpress_dt_compat[] __initconst = -{ - "arm,vexpress", - NULL -}; +static const char *const vexpress_dt_compat[] __initconst = {"arm,vexpress", + NULL}; -static const struct dt_device_match vexpress_blacklist_dev[] __initconst = -{ +static const struct dt_device_match vexpress_blacklist_dev[] __initconst = { /* Cache Coherent Interconnect */ DT_MATCH_COMPATIBLE("arm,cci-400"), DT_MATCH_COMPATIBLE("arm,cci-400-pmu"), @@ -130,24 +128,23 @@ static const struct dt_device_match vexpress_blacklist_dev[] __initconst = DT_MATCH_COMPATIBLE("arm,vexpress-reset"), DT_MATCH_COMPATIBLE("arm,vexpress-reboot"), DT_MATCH_COMPATIBLE("arm,vexpress-shutdown"), - { /* sentinel */ }, + {/* sentinel */}, }; -PLATFORM_START(vexpress, "VERSATILE EXPRESS") - .compatible = vexpress_dt_compat, +PLATFORM_START(vexpress, "VERSATILE EXPRESS").compatible = vexpress_dt_compat, #ifdef CONFIG_ARM_32 - .smp_init = vexpress_smp_init, - .cpu_up = cpu_up_send_sgi, + .smp_init = vexpress_smp_init, + .cpu_up = cpu_up_send_sgi, #endif - .reset = vexpress_reset, - .blacklist_dev = vexpress_blacklist_dev, -PLATFORM_END - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ + .reset = vexpress_reset, + .blacklist_dev = vexpress_blacklist_dev, + PLATFORM_END + + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/platforms/xgene-storm.c b/xen/arch/arm/platforms/xgene-storm.c index fced4d7c2c..7bea19c12d 100644 --- a/xen/arch/arm/platforms/xgene-storm.c +++ b/xen/arch/arm/platforms/xgene-storm.c @@ -24,26 +24,25 @@ #include /* XGENE RESET Specific defines */ -#define XGENE_RESET_ADDR 0x17000014UL -#define XGENE_RESET_SIZE 0x100 -#define XGENE_RESET_MASK 0x1 +#define XGENE_RESET_ADDR 0x17000014UL +#define XGENE_RESET_SIZE 0x100 +#define XGENE_RESET_MASK 0x1 /* Variables to save reset address of soc during platform initialization. */ static u64 reset_addr, reset_size; static u32 reset_mask; static bool reset_vals_valid = false; -#define XGENE_SEC_GICV2_DIST_ADDR 0x78010000 +#define XGENE_SEC_GICV2_DIST_ADDR 0x78010000 static void __init xgene_check_pirq_eoi(void) { const struct dt_device_node *node; int res; paddr_t dbase; - const struct dt_device_match xgene_dt_int_ctrl_match[] = - { + const struct dt_device_match xgene_dt_int_ctrl_match[] = { DT_MATCH_COMPATIBLE("arm,cortex-a15-gic"), - { /*sentinel*/ }, + {/*sentinel*/}, }; node = dt_find_interrupt_controller(xgene_dt_int_ctrl_match); @@ -52,7 +51,8 @@ static void __init xgene_check_pirq_eoi(void) res = dt_device_get_address(node, 0, &dbase, NULL); if ( !dbase ) - panic("%s: Cannot find a valid address for the distributor\n", __func__); + panic("%s: Cannot find a valid address for the distributor\n", + __func__); /* * In old X-Gene Storm firmware and DT, secure mode addresses have @@ -84,7 +84,8 @@ static void xgene_storm_reset(void) if ( !addr ) { - printk("XGENE: Unable to map xgene reset address, can not reset XGENE...\n"); + printk("XGENE: Unable to map xgene reset address, can not reset " + "XGENE...\n"); return; } @@ -109,24 +110,21 @@ static int xgene_storm_init(void) return 0; } -static const char * const xgene_storm_dt_compat[] __initconst = -{ - "apm,xgene-storm", - NULL -}; +static const char *const xgene_storm_dt_compat[] __initconst = { + "apm,xgene-storm", NULL}; -PLATFORM_START(xgene_storm, "APM X-GENE STORM") - .compatible = xgene_storm_dt_compat, - .init = xgene_storm_init, - .reset = xgene_storm_reset, - .quirks = xgene_storm_quirks, -PLATFORM_END +PLATFORM_START(xgene_storm, "APM X-GENE STORM").compatible = + xgene_storm_dt_compat, + .init = xgene_storm_init, + .reset = xgene_storm_reset, + .quirks = xgene_storm_quirks, + PLATFORM_END -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/platforms/xilinx-zynqmp-eemi.c b/xen/arch/arm/platforms/xilinx-zynqmp-eemi.c index 2053ed7ac5..f69f02831d 100644 --- a/xen/arch/arm/platforms/xilinx-zynqmp-eemi.c +++ b/xen/arch/arm/platforms/xilinx-zynqmp-eemi.c @@ -57,7 +57,7 @@ bool zynqmp_eemi(struct cpu_user_regs *regs) unsigned int pm_fn = fid & 0xFFFF; enum pm_ret_status ret; - switch ( fid ) + switch (fid) { /* Mandatory SMC32 functions. */ case ARM_SMCCC_CALL_COUNT_FID(SIP): @@ -87,8 +87,8 @@ bool zynqmp_eemi(struct cpu_user_regs *regs) case EEMI_FID(PM_SET_MAX_LATENCY): if ( !domain_has_node_access(current->domain, nodeid) ) { - gprintk(XENLOG_WARNING, - "zynqmp-pm: fn=%u No access to node %u\n", pm_fn, nodeid); + gprintk(XENLOG_WARNING, "zynqmp-pm: fn=%u No access to node %u\n", + pm_fn, nodeid); ret = XST_PM_NO_ACCESS; goto done; } @@ -98,8 +98,8 @@ bool zynqmp_eemi(struct cpu_user_regs *regs) case EEMI_FID(PM_RESET_GET_STATUS): if ( !domain_has_reset_access(current->domain, nodeid) ) { - gprintk(XENLOG_WARNING, - "zynqmp-pm: fn=%u No access to reset %u\n", pm_fn, nodeid); + gprintk(XENLOG_WARNING, "zynqmp-pm: fn=%u No access to reset %u\n", + pm_fn, nodeid); ret = XST_PM_NO_ACCESS; goto done; } @@ -114,8 +114,8 @@ bool zynqmp_eemi(struct cpu_user_regs *regs) /* No MMIO access is allowed from non-secure domains */ case EEMI_FID(PM_MMIO_WRITE): case EEMI_FID(PM_MMIO_READ): - gprintk(XENLOG_WARNING, - "zynqmp-pm: fn=%u No MMIO access to %u\n", pm_fn, nodeid); + gprintk(XENLOG_WARNING, "zynqmp-pm: fn=%u No MMIO access to %u\n", + pm_fn, nodeid); ret = XST_PM_NO_ACCESS; goto done; @@ -187,15 +187,10 @@ forward_to_fw: * can forward the whole command to firmware without additional * parameters checks. */ - arm_smccc_1_1_smc(get_user_reg(regs, 0), - get_user_reg(regs, 1), - get_user_reg(regs, 2), - get_user_reg(regs, 3), - get_user_reg(regs, 4), - get_user_reg(regs, 5), - get_user_reg(regs, 6), - get_user_reg(regs, 7), - &res); + arm_smccc_1_1_smc(get_user_reg(regs, 0), get_user_reg(regs, 1), + get_user_reg(regs, 2), get_user_reg(regs, 3), + get_user_reg(regs, 4), get_user_reg(regs, 5), + get_user_reg(regs, 6), get_user_reg(regs, 7), &res); set_user_reg(regs, 0, res.a0); set_user_reg(regs, 1, res.a1); diff --git a/xen/arch/arm/platforms/xilinx-zynqmp.c b/xen/arch/arm/platforms/xilinx-zynqmp.c index 08e3e11e1b..e18abee556 100644 --- a/xen/arch/arm/platforms/xilinx-zynqmp.c +++ b/xen/arch/arm/platforms/xilinx-zynqmp.c @@ -21,11 +21,7 @@ #include #include -static const char * const zynqmp_dt_compat[] __initconst = -{ - "xlnx,zynqmp", - NULL -}; +static const char *const zynqmp_dt_compat[] __initconst = {"xlnx,zynqmp", NULL}; static bool zynqmp_smc(struct cpu_user_regs *regs) { @@ -40,7 +36,7 @@ static bool zynqmp_smc(struct cpu_user_regs *regs) if ( once ) { printk(XENLOG_WARNING "ZynqMP firmware Error: no SMCCC 1.1 " - "support. Disabling firmware calls."); + "support. Disabling firmware calls."); once = false; } return false; @@ -48,16 +44,15 @@ static bool zynqmp_smc(struct cpu_user_regs *regs) return zynqmp_eemi(regs); } -PLATFORM_START(xilinx_zynqmp, "Xilinx ZynqMP") - .compatible = zynqmp_dt_compat, - .smc = zynqmp_smc, -PLATFORM_END +PLATFORM_START(xilinx_zynqmp, "Xilinx ZynqMP").compatible = zynqmp_dt_compat, + .smc = zynqmp_smc, + PLATFORM_END -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/psci.c b/xen/arch/arm/psci.c index 0c90c2305c..6fd410a58e 100644 --- a/xen/arch/arm/psci.c +++ b/xen/arch/arm/psci.c @@ -17,7 +17,6 @@ * GNU General Public License for more details. */ - #include #include #include @@ -33,9 +32,9 @@ * (native-width) function ID. */ #ifdef CONFIG_ARM_64 -#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN64_##name +#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN64_##name #else -#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN32_##name +#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN32_##name #endif uint32_t psci_ver; @@ -43,7 +42,7 @@ uint32_t smccc_ver; static uint32_t psci_cpu_on_nr; -#define PSCI_RET(res) ((int32_t)(res).a0) +#define PSCI_RET(res) ((int32_t)(res).a0) int call_psci_cpu_on(int cpu) { @@ -170,11 +169,10 @@ static int __init psci_init_0_1(void) static int __init psci_init_0_2(void) { - static const struct dt_device_match psci_ids[] __initconst = - { + static const struct dt_device_match psci_ids[] __initconst = { DT_MATCH_COMPATIBLE("arm,psci-0.2"), DT_MATCH_COMPATIBLE("arm,psci-1.0"), - { /* sentinel */ }, + {/* sentinel */}, }; int ret; struct arm_smccc_res res; @@ -193,7 +191,8 @@ static int __init psci_init_0_2(void) } else { - if ( acpi_psci_hvc_present() ) { + if ( acpi_psci_hvc_present() ) + { printk("PSCI conduit must be SMC, but is HVC\n"); return -EINVAL; } @@ -231,8 +230,8 @@ int __init psci_init(void) psci_init_smccc(); - printk(XENLOG_INFO "Using PSCI v%u.%u\n", - PSCI_VERSION_MAJOR(psci_ver), PSCI_VERSION_MINOR(psci_ver)); + printk(XENLOG_INFO "Using PSCI v%u.%u\n", PSCI_VERSION_MAJOR(psci_ver), + PSCI_VERSION_MINOR(psci_ver)); return 0; } diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 7602dd990c..4da770b036 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -87,7 +87,7 @@ static void __init init_idle_domain(void) /* TODO: setup_idle_pagetable(); */ } -static const char * __initdata processor_implementers[] = { +static const char *__initdata processor_implementers[] = { ['A'] = "ARM Limited", ['B'] = "Broadcom Corporation", ['C'] = "Cavium Inc.", @@ -115,31 +115,31 @@ static void __init processor_id(void) printk("Huh, cpu architecture %x, expected 0xf (defined by cpuid)\n", c->midr.architecture); - printk("Processor: %08"PRIx32": \"%s\", variant: 0x%x, part 0x%03x, rev 0x%x\n", - c->midr.bits, implementer, - c->midr.variant, c->midr.part_number, c->midr.revision); + printk("Processor: %08" PRIx32 + ": \"%s\", variant: 0x%x, part 0x%03x, rev 0x%x\n", + c->midr.bits, implementer, c->midr.variant, c->midr.part_number, + c->midr.revision); #if defined(CONFIG_ARM_64) printk("64-bit Execution:\n"); - printk(" Processor Features: %016"PRIx64" %016"PRIx64"\n", + printk(" Processor Features: %016" PRIx64 " %016" PRIx64 "\n", boot_cpu_data.pfr64.bits[0], boot_cpu_data.pfr64.bits[1]); printk(" Exception Levels: EL3:%s EL2:%s EL1:%s EL0:%s\n", cpu_has_el3_32 ? "64+32" : cpu_has_el3_64 ? "64" : "No", cpu_has_el2_32 ? "64+32" : cpu_has_el2_64 ? "64" : "No", cpu_has_el1_32 ? "64+32" : cpu_has_el1_64 ? "64" : "No", cpu_has_el0_32 ? "64+32" : cpu_has_el0_64 ? "64" : "No"); - printk(" Extensions:%s%s%s\n", - cpu_has_fp ? " FloatingPoint" : "", + printk(" Extensions:%s%s%s\n", cpu_has_fp ? " FloatingPoint" : "", cpu_has_simd ? " AdvancedSIMD" : "", cpu_has_gicv3 ? " GICv3-SysReg" : ""); - printk(" Debug Features: %016"PRIx64" %016"PRIx64"\n", + printk(" Debug Features: %016" PRIx64 " %016" PRIx64 "\n", boot_cpu_data.dbg64.bits[0], boot_cpu_data.dbg64.bits[1]); - printk(" Auxiliary Features: %016"PRIx64" %016"PRIx64"\n", + printk(" Auxiliary Features: %016" PRIx64 " %016" PRIx64 "\n", boot_cpu_data.aux64.bits[0], boot_cpu_data.aux64.bits[1]); - printk(" Memory Model Features: %016"PRIx64" %016"PRIx64"\n", + printk(" Memory Model Features: %016" PRIx64 " %016" PRIx64 "\n", boot_cpu_data.mm64.bits[0], boot_cpu_data.mm64.bits[1]); - printk(" ISA Features: %016"PRIx64" %016"PRIx64"\n", + printk(" ISA Features: %016" PRIx64 " %016" PRIx64 "\n", boot_cpu_data.isa64.bits[0], boot_cpu_data.isa64.bits[1]); #endif @@ -150,25 +150,22 @@ static void __init processor_id(void) if ( cpu_has_aarch32 ) { printk("32-bit Execution:\n"); - printk(" Processor Features: %08"PRIx32":%08"PRIx32"\n", + printk(" Processor Features: %08" PRIx32 ":%08" PRIx32 "\n", boot_cpu_data.pfr32.bits[0], boot_cpu_data.pfr32.bits[1]); printk(" Instruction Sets:%s%s%s%s%s%s\n", - cpu_has_aarch32 ? " AArch32" : "", - cpu_has_arm ? " A32" : "", - cpu_has_thumb ? " Thumb" : "", - cpu_has_thumb2 ? " Thumb-2" : "", + cpu_has_aarch32 ? " AArch32" : "", cpu_has_arm ? " A32" : "", + cpu_has_thumb ? " Thumb" : "", cpu_has_thumb2 ? " Thumb-2" : "", cpu_has_thumbee ? " ThumbEE" : "", cpu_has_jazelle ? " Jazelle" : ""); - printk(" Extensions:%s%s\n", - cpu_has_gentimer ? " GenericTimer" : "", + printk(" Extensions:%s%s\n", cpu_has_gentimer ? " GenericTimer" : "", cpu_has_security ? " Security" : ""); - printk(" Debug Features: %08"PRIx32"\n", + printk(" Debug Features: %08" PRIx32 "\n", boot_cpu_data.dbg32.bits[0]); - printk(" Auxiliary Features: %08"PRIx32"\n", + printk(" Auxiliary Features: %08" PRIx32 "\n", boot_cpu_data.aux32.bits[0]); printk(" Memory Model Features: " - "%08"PRIx32" %08"PRIx32" %08"PRIx32" %08"PRIx32"\n", + "%08" PRIx32 " %08" PRIx32 " %08" PRIx32 " %08" PRIx32 "\n", boot_cpu_data.mm32.bits[0], boot_cpu_data.mm32.bits[1], boot_cpu_data.mm32.bits[2], boot_cpu_data.mm32.bits[3]); printk(" ISA Features: %08x %08x %08x %08x %08x %08x\n", @@ -189,11 +186,11 @@ void __init dt_unreserved_regions(paddr_t s, paddr_t e, { int i, nr = fdt_num_mem_rsv(device_tree_flattened); - for ( i = first; i < nr ; i++ ) + for ( i = first; i < nr; i++ ) { paddr_t r_s, r_e; - if ( fdt_get_mem_rsv(device_tree_flattened, i, &r_s, &r_e ) < 0 ) + if ( fdt_get_mem_rsv(device_tree_flattened, i, &r_s, &r_e) < 0 ) /* If we can't read it, pretend it doesn't exist... */ continue; @@ -201,8 +198,8 @@ void __init dt_unreserved_regions(paddr_t s, paddr_t e, if ( s < r_e && r_s < e ) { - dt_unreserved_regions(r_e, e, cb, i+1); - dt_unreserved_regions(s, r_s, cb, i+1); + dt_unreserved_regions(r_e, e, cb, i + 1); + dt_unreserved_regions(s, r_s, cb, i + 1); return; } } @@ -210,9 +207,8 @@ void __init dt_unreserved_regions(paddr_t s, paddr_t e, cb(s, e); } -struct bootmodule __init *add_boot_module(bootmodule_kind kind, - paddr_t start, paddr_t size, - bool domU) +struct bootmodule __init *add_boot_module(bootmodule_kind kind, paddr_t start, + paddr_t size, bool domU) { struct bootmodules *mods = &bootinfo.modules; struct bootmodule *mod; @@ -220,11 +216,12 @@ struct bootmodule __init *add_boot_module(bootmodule_kind kind, if ( mods->nr_mods == MAX_MODULES ) { - printk("Ignoring %s boot module at %"PRIpaddr"-%"PRIpaddr" (too many)\n", + printk("Ignoring %s boot module at %" PRIpaddr "-%" PRIpaddr + " (too many)\n", boot_module_kind_as_string(kind), start, start + size); return NULL; } - for ( i = 0 ; i < mods->nr_mods ; i++ ) + for ( i = 0; i < mods->nr_mods; i++ ) { mod = &mods->module[i]; if ( mod->kind == kind && mod->start == start ) @@ -249,12 +246,12 @@ struct bootmodule __init *add_boot_module(bootmodule_kind kind, * XSM, DTB) or Dom0 modules. This is not suitable for looking up guest * modules. */ -struct bootmodule * __init boot_module_find_by_kind(bootmodule_kind kind) +struct bootmodule *__init boot_module_find_by_kind(bootmodule_kind kind) { struct bootmodules *mods = &bootinfo.modules; struct bootmodule *mod; int i; - for (i = 0 ; i < mods->nr_mods ; i++ ) + for ( i = 0; i < mods->nr_mods; i++ ) { mod = &mods->module[i]; if ( mod->kind == kind && !mod->domU ) @@ -293,13 +290,13 @@ void __init add_boot_cmdline(const char *name, const char *cmdline, * XSM, DTB) or Dom0 modules. This is not suitable for looking up guest * modules. */ -struct bootcmdline * __init boot_cmdline_find_by_kind(bootmodule_kind kind) +struct bootcmdline *__init boot_cmdline_find_by_kind(bootmodule_kind kind) { struct bootcmdlines *cmds = &bootinfo.cmdlines; struct bootcmdline *cmd; int i; - for ( i = 0 ; i < cmds->nr_mods ; i++ ) + for ( i = 0; i < cmds->nr_mods; i++ ) { cmd = &cmds->cmdline[i]; if ( cmd->kind == kind && !cmd->domU ) @@ -308,13 +305,13 @@ struct bootcmdline * __init boot_cmdline_find_by_kind(bootmodule_kind kind) return NULL; } -struct bootcmdline * __init boot_cmdline_find_by_name(const char *name) +struct bootcmdline *__init boot_cmdline_find_by_name(const char *name) { struct bootcmdlines *mods = &bootinfo.cmdlines; struct bootcmdline *mod; unsigned int i; - for (i = 0 ; i < mods->nr_mods ; i++ ) + for ( i = 0; i < mods->nr_mods; i++ ) { mod = &mods->cmdline[i]; if ( strcmp(mod->dt_name, name) == 0 ) @@ -323,14 +320,14 @@ struct bootcmdline * __init boot_cmdline_find_by_name(const char *name) return NULL; } -struct bootmodule * __init boot_module_find_by_addr_and_kind(bootmodule_kind kind, - paddr_t start) +struct bootmodule *__init +boot_module_find_by_addr_and_kind(bootmodule_kind kind, paddr_t start) { struct bootmodules *mods = &bootinfo.modules; struct bootmodule *mod; unsigned int i; - for (i = 0 ; i < mods->nr_mods ; i++ ) + for ( i = 0; i < mods->nr_mods; i++ ) { mod = &mods->module[i]; if ( mod->kind == kind && mod->start == start ) @@ -339,17 +336,24 @@ struct bootmodule * __init boot_module_find_by_addr_and_kind(bootmodule_kind kin return NULL; } -const char * __init boot_module_kind_as_string(bootmodule_kind kind) +const char *__init boot_module_kind_as_string(bootmodule_kind kind) { - switch ( kind ) + switch (kind) { - case BOOTMOD_XEN: return "Xen"; - case BOOTMOD_FDT: return "Device Tree"; - case BOOTMOD_KERNEL: return "Kernel"; - case BOOTMOD_RAMDISK: return "Ramdisk"; - case BOOTMOD_XSM: return "XSM"; - case BOOTMOD_UNKNOWN: return "Unknown"; - default: BUG(); + case BOOTMOD_XEN: + return "Xen"; + case BOOTMOD_FDT: + return "Device Tree"; + case BOOTMOD_KERNEL: + return "Kernel"; + case BOOTMOD_RAMDISK: + return "Ramdisk"; + case BOOTMOD_XSM: + return "XSM"; + case BOOTMOD_UNKNOWN: + return "Unknown"; + default: + BUG(); } } @@ -366,8 +370,7 @@ void __init discard_initial_modules(void) if ( mi->module[i].kind == BOOTMOD_XEN ) continue; - if ( !mfn_valid(maddr_to_mfn(s)) || - !mfn_valid(maddr_to_mfn(e)) ) + if ( !mfn_valid(maddr_to_mfn(s)) || !mfn_valid(maddr_to_mfn(e)) ) continue; dt_unreserved_regions(s, e, init_domheap_pages, 0); @@ -387,18 +390,17 @@ void __init discard_initial_modules(void) * For non-recursive callers first_mod should normally be 0 (all * modules and Xen itself) or 1 (all modules but not Xen). */ -static paddr_t __init consider_modules(paddr_t s, paddr_t e, - uint32_t size, paddr_t align, - int first_mod) +static paddr_t __init consider_modules(paddr_t s, paddr_t e, uint32_t size, + paddr_t align, int first_mod) { const struct bootmodules *mi = &bootinfo.modules; int i; int nr_rsvd; - s = (s+align-1) & ~(align-1); - e = e & ~(align-1); + s = (s + align - 1) & ~(align - 1); + e = e & ~(align - 1); - if ( s > e || e - s < size ) + if ( s > e || e - s < size ) return 0; /* First check the boot modules */ @@ -409,11 +411,11 @@ static paddr_t __init consider_modules(paddr_t s, paddr_t e, if ( s < mod_e && mod_s < e ) { - mod_e = consider_modules(mod_e, e, size, align, i+1); + mod_e = consider_modules(mod_e, e, size, align, i + 1); if ( mod_e ) return mod_e; - return consider_modules(s, mod_s, size, align, i+1); + return consider_modules(s, mod_s, size, align, i + 1); } } @@ -425,9 +427,8 @@ static paddr_t __init consider_modules(paddr_t s, paddr_t e, { paddr_t mod_s, mod_e; - if ( fdt_get_mem_rsv(device_tree_flattened, - i - mi->nr_mods, - &mod_s, &mod_e ) < 0 ) + if ( fdt_get_mem_rsv(device_tree_flattened, i - mi->nr_mods, &mod_s, + &mod_e) < 0 ) /* If we can't read it, pretend it doesn't exist... */ continue; @@ -436,11 +437,11 @@ static paddr_t __init consider_modules(paddr_t s, paddr_t e, if ( s < mod_e && mod_s < e ) { - mod_e = consider_modules(mod_e, e, size, align, i+1); + mod_e = consider_modules(mod_e, e, size, align, i + 1); if ( mod_e ) return mod_e; - return consider_modules(s, mod_s, size, align, i+1); + return consider_modules(s, mod_s, size, align, i + 1); } } return e; @@ -488,7 +489,7 @@ static void __init init_pdx(void) u64 mask = pdx_init_mask(bootinfo.mem.bank[0].start); int bank; - for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ ) + for ( bank = 0; bank < bootinfo.mem.nr_banks; bank++ ) { bank_start = bootinfo.mem.bank[bank].start; bank_size = bootinfo.mem.bank[bank].size; @@ -496,25 +497,24 @@ static void __init init_pdx(void) mask |= bank_start | pdx_region_mask(bank_start, bank_size); } - for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ ) + for ( bank = 0; bank < bootinfo.mem.nr_banks; bank++ ) { bank_start = bootinfo.mem.bank[bank].start; bank_size = bootinfo.mem.bank[bank].size; - if (~mask & pdx_region_mask(bank_start, bank_size)) + if ( ~mask & pdx_region_mask(bank_start, bank_size) ) mask = 0; } pfn_pdx_hole_setup(mask >> PAGE_SHIFT); - for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ ) + for ( bank = 0; bank < bootinfo.mem.nr_banks; bank++ ) { bank_start = bootinfo.mem.bank[bank].start; bank_size = bootinfo.mem.bank[bank].size; bank_end = bank_start + bank_size; - set_pdx_range(paddr_to_pfn(bank_start), - paddr_to_pfn(bank_end)); + set_pdx_range(paddr_to_pfn(bank_start), paddr_to_pfn(bank_end)); } } @@ -536,8 +536,8 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) init_pdx(); ram_start = bootinfo.mem.bank[0].start; - ram_size = bootinfo.mem.bank[0].size; - ram_end = ram_start + ram_size; + ram_size = bootinfo.mem.bank[0].size; + ram_end = ram_start + ram_size; for ( i = 1; i < bootinfo.mem.nr_banks; i++ ) { @@ -545,9 +545,9 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) paddr_t bank_size = bootinfo.mem.bank[i].size; paddr_t bank_end = bank_start + bank_size; - ram_size = ram_size + bank_size; - ram_start = min(ram_start,bank_start); - ram_end = max(ram_end,bank_end); + ram_size = ram_size + bank_size; + ram_start = min(ram_start, bank_start); + ram_end = max(ram_end, bank_end); } total_pages = ram_pages = ram_size >> PAGE_SHIFT; @@ -566,31 +566,30 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) */ heap_pages = ram_pages; if ( opt_xenheap_megabytes ) - xenheap_pages = opt_xenheap_megabytes << (20-PAGE_SHIFT); + xenheap_pages = opt_xenheap_megabytes << (20 - PAGE_SHIFT); else { - xenheap_pages = (heap_pages/32 + 0x1fffUL) & ~0x1fffUL; - xenheap_pages = max(xenheap_pages, 32UL<<(20-PAGE_SHIFT)); - xenheap_pages = min(xenheap_pages, 1UL<<(30-PAGE_SHIFT)); + xenheap_pages = (heap_pages / 32 + 0x1fffUL) & ~0x1fffUL; + xenheap_pages = max(xenheap_pages, 32UL << (20 - PAGE_SHIFT)); + xenheap_pages = min(xenheap_pages, 1UL << (30 - PAGE_SHIFT)); } - do - { - e = consider_modules(ram_start, ram_end, - pfn_to_paddr(xenheap_pages), - 32<<20, 0); + do { + e = consider_modules(ram_start, ram_end, pfn_to_paddr(xenheap_pages), + 32 << 20, 0); if ( e ) break; xenheap_pages >>= 1; - } while ( !opt_xenheap_megabytes && xenheap_pages > 32<<(20-PAGE_SHIFT) ); + } while ( !opt_xenheap_megabytes && + xenheap_pages > 32 << (20 - PAGE_SHIFT) ); - if ( ! e ) + if ( !e ) panic("Not not enough space for xenheap\n"); domheap_pages = heap_pages - xenheap_pages; - printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages%s)\n", + printk("Xen heap: %" PRIpaddr "-%" PRIpaddr " (%lu pages%s)\n", e - (pfn_to_paddr(xenheap_pages)), e, xenheap_pages, opt_xenheap_megabytes ? ", from command-line" : ""); printk("Dom heap: %lu pages\n", domheap_pages); @@ -601,7 +600,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) * Need a single mapped page for populating bootmem_region_list * and enough mapped pages for copying the DTB. */ - dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT; + dtb_pages = (dtb_size + PAGE_SIZE - 1) >> PAGE_SHIFT; boot_mfn_start = mfn_x(xenheap_mfn_end) - dtb_pages - 1; boot_mfn_end = mfn_x(xenheap_mfn_end); @@ -638,8 +637,8 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) e = bank_end; /* Avoid the xenheap */ - if ( s < mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages)) - && mfn_to_maddr(xenheap_mfn_start) < e ) + if ( s < mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages)) && + mfn_to_maddr(xenheap_mfn_start) < e ) { e = mfn_to_maddr(xenheap_mfn_start); n = mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages)); @@ -673,7 +672,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) init_pdx(); total_pages = 0; - for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ ) + for ( bank = 0; bank < bootinfo.mem.nr_banks; bank++ ) { paddr_t bank_start = bootinfo.mem.bank[bank].start; paddr_t bank_size = bootinfo.mem.bank[bank].size; @@ -681,10 +680,11 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) paddr_t s, e; ram_size = ram_size + bank_size; - ram_start = min(ram_start,bank_start); - ram_end = max(ram_end,bank_end); + ram_start = min(ram_start, bank_start); + ram_end = max(ram_end, bank_end); - setup_xenheap_mappings(bank_start>>PAGE_SHIFT, bank_size>>PAGE_SHIFT); + setup_xenheap_mappings(bank_start >> PAGE_SHIFT, + bank_size >> PAGE_SHIFT); s = bank_start; while ( s < bank_end ) @@ -715,7 +715,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) /* * Need enough mapped pages for copying the DTB. */ - dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT; + dtb_pages = (dtb_size + PAGE_SIZE - 1) >> PAGE_SHIFT; /* Copy the DTB. */ fdt = mfn_to_virt(mfn_x(alloc_boot_pages(dtb_pages, 1))); @@ -730,8 +730,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) size_t __read_mostly dcache_line_bytes; /* C entry point for boot CPU */ -void __init start_xen(unsigned long boot_phys_offset, - unsigned long fdt_paddr, +void __init start_xen(unsigned long boot_phys_offset, unsigned long fdt_paddr, unsigned long cpuid) { size_t fdt_size; @@ -755,7 +754,8 @@ void __init start_xen(unsigned long boot_phys_offset, idle_vcpu[0] = current; setup_virtual_regions(NULL, NULL); - /* Initialize traps early allow us to get backtrace when an error occurred */ + /* Initialize traps early allow us to get backtrace when an error occurred + */ init_traps(); smp_clear_cpu_maps(); @@ -763,7 +763,8 @@ void __init start_xen(unsigned long boot_phys_offset, device_tree_flattened = early_fdt_map(fdt_paddr); if ( !device_tree_flattened ) panic("Invalid device tree blob at physical address %#lx.\n" - "The DTB must be 8-byte aligned and must not exceed 2 MB in size.\n\n" + "The DTB must be 8-byte aligned and must not exceed 2 MB in " + "size.\n\n" "Please check your bootloader.\n", fdt_paddr); @@ -774,9 +775,9 @@ void __init start_xen(unsigned long boot_phys_offset, cmdline_parse(cmdline); /* Register Xen's load address as a boot module. */ - xen_bootmodule = add_boot_module(BOOTMOD_XEN, - (paddr_t)(uintptr_t)(_start + boot_phys_offset), - (paddr_t)(uintptr_t)(_end - _start + 1), false); + xen_bootmodule = add_boot_module( + BOOTMOD_XEN, (paddr_t)(uintptr_t)(_start + boot_phys_offset), + (paddr_t)(uintptr_t)(_end - _start + 1), false); BUG_ON(!xen_bootmodule); setup_pagetables(boot_phys_offset); @@ -837,7 +838,6 @@ void __init start_xen(unsigned long boot_phys_offset, tasklet_subsys_init(); - xsm_dt_init(); init_maintenance_interrupt(); @@ -862,7 +862,7 @@ void __init start_xen(unsigned long boot_phys_offset, do_presmp_initcalls(); - for_each_present_cpu ( i ) + for_each_present_cpu (i) { if ( (num_online_cpus() < cpus) && !cpu_online(i) ) { @@ -903,7 +903,7 @@ void __init start_xen(unsigned long boot_phys_offset, if ( IS_ERR(dom0) || (alloc_dom0_vcpu0(dom0) == NULL) ) panic("Error creating domain 0\n"); - if ( construct_dom0(dom0) != 0) + if ( construct_dom0(dom0) != 0 ) panic("Could not set up DOM0 guest OS\n"); heap_init_late(); diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c index 62f57f0ba2..9b1d1be686 100644 --- a/xen/arch/arm/smp.c +++ b/xen/arch/arm/smp.c @@ -7,7 +7,8 @@ void flush_tlb_mask(const cpumask_t *mask) { - /* No need to IPI other processors on ARM, the processor takes care of it. */ + /* No need to IPI other processors on ARM, the processor takes care of it. + */ flush_tlb_all(); } diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c index 25cd44549c..18ace0d026 100644 --- a/xen/arch/arm/smpboot.c +++ b/xen/arch/arm/smpboot.c @@ -43,18 +43,17 @@ cpumask_t cpu_possible_map; struct cpuinfo_arm cpu_data[NR_CPUS]; /* CPU logical map: map xen cpuid to an MPIDR */ -register_t __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; +register_t __cpu_logical_map[NR_CPUS] = {[0 ... NR_CPUS - 1] = MPIDR_INVALID}; /* Fake one node for now. See also include/asm-arm/numa.h */ -nodemask_t __read_mostly node_online_map = { { [0] = 1UL } }; +nodemask_t __read_mostly node_online_map = {{[0] = 1UL}}; /* Xen stack for bringing up the first CPU. */ static unsigned char __initdata cpu0_boot_stack[STACK_SIZE] - __attribute__((__aligned__(STACK_SIZE))); + __attribute__((__aligned__(STACK_SIZE))); /* Boot cpu data */ -struct init_info init_data = -{ +struct init_info init_data = { .stack = cpu0_boot_stack, }; @@ -95,8 +94,7 @@ static void remove_cpu_sibling_map(int cpu) free_cpumask_var(per_cpu(cpu_core_mask, cpu)); } -void __init -smp_clear_cpu_maps (void) +void __init smp_clear_cpu_maps(void) { cpumask_clear(&cpu_possible_map); cpumask_clear(&cpu_online_map); @@ -116,10 +114,8 @@ static void __init dt_smp_init_cpus(void) struct dt_device_node *cpu; unsigned int i, j; unsigned int cpuidx = 1; - static register_t tmp_map[NR_CPUS] __initdata = - { - [0 ... NR_CPUS - 1] = MPIDR_INVALID - }; + static register_t tmp_map[NR_CPUS] __initdata = {[0 ... NR_CPUS - 1] = + MPIDR_INVALID}; bool bootcpu_valid = false; int rc; @@ -128,11 +124,11 @@ static void __init dt_smp_init_cpus(void) if ( !cpus ) { printk(XENLOG_WARNING "WARNING: Can't find /cpus in the device tree.\n" - "Using only 1 CPU\n"); + "Using only 1 CPU\n"); return; } - dt_for_each_child_node( cpus, cpu ) + dt_for_each_child_node(cpus, cpu) { const __be32 *prop; u64 addr; @@ -166,7 +162,7 @@ static void __init dt_smp_init_cpus(void) hwid = addr; if ( hwid != addr ) { - printk(XENLOG_WARNING "cpu node `%s`: hwid overflow %"PRIx64"\n", + printk(XENLOG_WARNING "cpu node `%s`: hwid overflow %" PRIx64 "\n", dt_node_full_name(cpu), addr); continue; } @@ -177,7 +173,8 @@ static void __init dt_smp_init_cpus(void) */ if ( hwid & ~MPIDR_HWID_MASK ) { - printk(XENLOG_WARNING "cpu node `%s`: invalid hwid value (0x%"PRIregister")\n", + printk(XENLOG_WARNING + "cpu node `%s`: invalid hwid value (0x%" PRIregister ")\n", dt_node_full_name(cpu), hwid); continue; } @@ -192,9 +189,11 @@ static void __init dt_smp_init_cpus(void) { if ( tmp_map[j] == hwid ) { - printk(XENLOG_WARNING - "cpu node `%s`: duplicate /cpu reg properties %"PRIregister" in the DT\n", - dt_node_full_name(cpu), hwid); + printk( + XENLOG_WARNING + "cpu node `%s`: duplicate /cpu reg properties %" PRIregister + " in the DT\n", + dt_node_full_name(cpu), hwid); break; } } @@ -228,7 +227,8 @@ static void __init dt_smp_init_cpus(void) if ( (rc = arch_cpu_init(i, cpu)) < 0 ) { - printk("cpu%d init failed (hwid %"PRIregister"): %d\n", i, hwid, rc); + printk("cpu%d init failed (hwid %" PRIregister "): %d\n", i, hwid, + rc); tmp_map[i] = MPIDR_INVALID; } else @@ -238,7 +238,7 @@ static void __init dt_smp_init_cpus(void) if ( !bootcpu_valid ) { printk(XENLOG_WARNING "DT missing boot CPU MPIDR[23:0]\n" - "Using only 1 CPU\n"); + "Using only 1 CPU\n"); return; } @@ -261,7 +261,8 @@ void __init smp_init_cpus(void) if ( (rc = arch_smp_init()) < 0 ) { printk(XENLOG_WARNING "SMP init failed (%d)\n" - "Using only 1 CPU\n", rc); + "Using only 1 CPU\n", + rc); return; } @@ -271,13 +272,13 @@ void __init smp_init_cpus(void) acpi_smp_init_cpus(); if ( opt_hmp_unsafe ) - warning_add("WARNING: HMP COMPUTING HAS BEEN ENABLED.\n" - "It has implications on the security and stability of the system,\n" - "unless the cpu affinity of all domains is specified.\n"); + warning_add( + "WARNING: HMP COMPUTING HAS BEEN ENABLED.\n" + "It has implications on the security and stability of the system,\n" + "unless the cpu affinity of all domains is specified.\n"); } -int __init -smp_get_max_cpus (void) +int __init smp_get_max_cpus(void) { int i, max_cpus = 0; @@ -288,8 +289,7 @@ smp_get_max_cpus (void) return max_cpus; } -void __init -smp_prepare_cpus(void) +void __init smp_prepare_cpus(void) { cpumask_copy(&cpu_present_map, &cpu_possible_map); @@ -297,13 +297,12 @@ smp_prepare_cpus(void) } /* Boot the current CPU */ -void start_secondary(unsigned long boot_phys_offset, - unsigned long fdt_paddr, +void start_secondary(unsigned long boot_phys_offset, unsigned long fdt_paddr, unsigned long hwid) { unsigned int cpuid = init_data.cpuid; - memset(get_cpu_info(), 0, sizeof (struct cpu_info)); + memset(get_cpu_info(), 0, sizeof(struct cpu_info)); set_processor_id(cpuid); @@ -322,7 +321,8 @@ void start_secondary(unsigned long boot_phys_offset, if ( !opt_hmp_unsafe && current_cpu_data.midr.bits != boot_cpu_data.midr.bits ) { - printk(XENLOG_ERR "CPU%u MIDR (0x%x) does not match boot CPU MIDR (0x%x),\n" + printk(XENLOG_ERR + "CPU%u MIDR (0x%x) does not match boot CPU MIDR (0x%x),\n" "disable cpu (see big.LITTLE.txt under docs/).\n", smp_processor_id(), current_cpu_data.midr.bits, boot_cpu_data.midr.bits); @@ -331,9 +331,10 @@ void start_secondary(unsigned long boot_phys_offset, if ( dcache_line_bytes != read_dcache_line_bytes() ) { - printk(XENLOG_ERR "CPU%u dcache line size (%zu) does not match the boot CPU (%zu)\n", - smp_processor_id(), read_dcache_line_bytes(), - dcache_line_bytes); + printk( + XENLOG_ERR + "CPU%u dcache line size (%zu) does not match the boot CPU (%zu)\n", + smp_processor_id(), read_dcache_line_bytes(), dcache_line_bytes); stop_cpu(); } @@ -390,7 +391,7 @@ void __cpu_disable(void) BUG(); smp_mb(); - /* Return to caller; eventually the IPI mechanism will unwind and the + /* Return to caller; eventually the IPI mechanism will unwind and the * scheduler will drop to the idle loop, which will call stop_cpu(). */ } @@ -506,12 +507,11 @@ void __cpu_die(unsigned int cpu) } static int cpu_smpboot_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) + unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - switch ( action ) + switch (action) { case CPU_DEAD: remove_cpu_sibling_map(cpu); diff --git a/xen/arch/arm/sysctl.c b/xen/arch/arm/sysctl.c index fbfdb44eff..3a950ef827 100644 --- a/xen/arch/arm/sysctl.c +++ b/xen/arch/arm/sysctl.c @@ -12,7 +12,9 @@ #include #include -void arch_do_physinfo(struct xen_sysctl_physinfo *pi) { } +void arch_do_physinfo(struct xen_sysctl_physinfo *pi) +{ +} long arch_do_sysctl(struct xen_sysctl *sysctl, XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c index c176597500..5c514a8352 100644 --- a/xen/arch/arm/tee/optee.c +++ b/xen/arch/arm/tee/optee.c @@ -59,12 +59,12 @@ * assumptions about OP-TEE heap usage, we limit number of pages * arbitrary. */ -#define MAX_TOTAL_SMH_BUF_PG 16384 +#define MAX_TOTAL_SMH_BUF_PG 16384 #define OPTEE_KNOWN_NSEC_CAPS OPTEE_SMC_NSEC_CAP_UNIPROCESSOR -#define OPTEE_KNOWN_SEC_CAPS (OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM | \ - OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM | \ - OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) +#define OPTEE_KNOWN_SEC_CAPS \ + (OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM | \ + OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM | OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) static unsigned int max_optee_threads = DEF_MAX_OPTEE_THREADS; @@ -72,7 +72,8 @@ static unsigned int max_optee_threads = DEF_MAX_OPTEE_THREADS; * Call context. OP-TEE can issue multiple RPC returns during one call. * We need to preserve context during them. */ -struct optee_std_call { +struct optee_std_call +{ struct list_head list; /* Page where shadowed copy of call arguments is stored */ struct page_info *xen_arg_pg; @@ -88,7 +89,8 @@ struct optee_std_call { }; /* Pre-allocated SHM buffer for RPC commands */ -struct shm_rpc { +struct shm_rpc +{ struct list_head list; struct page_info *guest_page; struct page_info *xen_arg_pg; @@ -98,7 +100,8 @@ struct shm_rpc { }; /* Shared memory buffer for arbitrary data */ -struct optee_shm_buf { +struct optee_shm_buf +{ struct list_head list; uint64_t cookie; unsigned int page_cnt; @@ -116,7 +119,8 @@ struct optee_shm_buf { }; /* Domain context */ -struct optee_domain { +struct optee_domain +{ struct list_head call_list; struct list_head shm_rpc_list; struct list_head optee_shm_buf_list; @@ -202,15 +206,10 @@ static void forward_call(struct cpu_user_regs *regs) { struct arm_smccc_res resp; - arm_smccc_smc(get_user_reg(regs, 0), - get_user_reg(regs, 1), - get_user_reg(regs, 2), - get_user_reg(regs, 3), - get_user_reg(regs, 4), - get_user_reg(regs, 5), - get_user_reg(regs, 6), - OPTEE_CLIENT_ID(current->domain), - &resp); + arm_smccc_smc( + get_user_reg(regs, 0), get_user_reg(regs, 1), get_user_reg(regs, 2), + get_user_reg(regs, 3), get_user_reg(regs, 4), get_user_reg(regs, 5), + get_user_reg(regs, 6), OPTEE_CLIENT_ID(current->domain), &resp); set_user_reg(regs, 0, resp.a0); set_user_reg(regs, 1, resp.a1); @@ -274,8 +273,7 @@ static struct optee_std_call *allocate_std_call(struct optee_domain *ctx) return call; } -static void free_std_call(struct optee_domain *ctx, - struct optee_std_call *call) +static void free_std_call(struct optee_domain *ctx, struct optee_std_call *call) { atomic_dec(&ctx->call_count); @@ -314,13 +312,15 @@ static struct optee_std_call *get_std_call(struct optee_domain *ctx, struct optee_std_call *call; spin_lock(&ctx->lock); - list_for_each_entry( call, &ctx->call_list, list ) + list_for_each_entry (call, &ctx->call_list, list) { if ( call->optee_thread_id == thread_id ) { if ( call->in_flight ) { - gdprintk(XENLOG_WARNING, "Guest tries to execute call which is already in flight\n"); + gdprintk( + XENLOG_WARNING, + "Guest tries to execute call which is already in flight\n"); goto out; } call->in_flight = true; @@ -364,8 +364,8 @@ static struct shm_rpc *allocate_and_pin_shm_rpc(struct optee_domain *ctx, } /* This page will be shared with OP-TEE, so we need to pin it. */ - shm_rpc->guest_page = get_page_from_gfn(current->domain, gfn_x(gfn), &t, - P2M_ALLOC); + shm_rpc->guest_page = + get_page_from_gfn(current->domain, gfn_x(gfn), &t, P2M_ALLOC); if ( !shm_rpc->guest_page || t != p2m_ram_rw ) goto err; shm_rpc->gfn = gfn; @@ -374,12 +374,13 @@ static struct shm_rpc *allocate_and_pin_shm_rpc(struct optee_domain *ctx, spin_lock(&ctx->lock); /* Check if there is existing SHM with the same cookie. */ - list_for_each_entry( shm_rpc_tmp, &ctx->shm_rpc_list, list ) + list_for_each_entry (shm_rpc_tmp, &ctx->shm_rpc_list, list) { if ( shm_rpc_tmp->cookie == cookie ) { spin_unlock(&ctx->lock); - gdprintk(XENLOG_WARNING, "Guest tries to use the same RPC SHM cookie %lx\n", + gdprintk(XENLOG_WARNING, + "Guest tries to use the same RPC SHM cookie %lx\n", cookie); goto err; } @@ -408,7 +409,7 @@ static void free_shm_rpc(struct optee_domain *ctx, uint64_t cookie) spin_lock(&ctx->lock); - list_for_each_entry( shm_rpc, &ctx->shm_rpc_list, list ) + list_for_each_entry (shm_rpc, &ctx->shm_rpc_list, list) { if ( shm_rpc->cookie == cookie ) { @@ -435,12 +436,12 @@ static struct shm_rpc *find_shm_rpc(struct optee_domain *ctx, uint64_t cookie) struct shm_rpc *shm_rpc; spin_lock(&ctx->lock); - list_for_each_entry( shm_rpc, &ctx->shm_rpc_list, list ) + list_for_each_entry (shm_rpc, &ctx->shm_rpc_list, list) { if ( shm_rpc->cookie == cookie ) { - spin_unlock(&ctx->lock); - return shm_rpc; + spin_unlock(&ctx->lock); + return shm_rpc; } } spin_unlock(&ctx->lock); @@ -458,15 +459,13 @@ static struct optee_shm_buf *allocate_optee_shm_buf(struct optee_domain *ctx, int old, new; int err_code; - do - { + do { old = atomic_read(&ctx->optee_shm_buf_pages); new = old + pages_cnt; if ( new >= MAX_TOTAL_SMH_BUF_PG ) return ERR_PTR(-ENOMEM); - } - while ( unlikely(old != atomic_cmpxchg(&ctx->optee_shm_buf_pages, - old, new)) ); + } while ( + unlikely(old != atomic_cmpxchg(&ctx->optee_shm_buf_pages, old, new)) ); optee_shm_buf = xzalloc_bytes(sizeof(struct optee_shm_buf) + pages_cnt * sizeof(struct page *)); @@ -482,12 +481,13 @@ static struct optee_shm_buf *allocate_optee_shm_buf(struct optee_domain *ctx, spin_lock(&ctx->lock); /* Check if there is already SHM with the same cookie */ - list_for_each_entry( optee_shm_buf_tmp, &ctx->optee_shm_buf_list, list ) + list_for_each_entry (optee_shm_buf_tmp, &ctx->optee_shm_buf_list, list) { if ( optee_shm_buf_tmp->cookie == cookie ) { spin_unlock(&ctx->lock); - gdprintk(XENLOG_WARNING, "Guest tries to use the same SHM buffer cookie %lx\n", + gdprintk(XENLOG_WARNING, + "Guest tries to use the same SHM buffer cookie %lx\n", cookie); err_code = -EINVAL; goto err; @@ -522,7 +522,7 @@ static void free_optee_shm_buf(struct optee_domain *ctx, uint64_t cookie) bool found = false; spin_lock(&ctx->lock); - list_for_each_entry( optee_shm_buf, &ctx->optee_shm_buf_list, list ) + list_for_each_entry (optee_shm_buf, &ctx->optee_shm_buf_list, list) { if ( optee_shm_buf->cookie == cookie ) { @@ -554,7 +554,7 @@ static void free_optee_shm_buf_pg_list(struct optee_domain *ctx, bool found = false; spin_lock(&ctx->lock); - list_for_each_entry( optee_shm_buf, &ctx->optee_shm_buf_list, list ) + list_for_each_entry (optee_shm_buf, &ctx->optee_shm_buf_list, list) { if ( optee_shm_buf->cookie == cookie ) { @@ -567,8 +567,10 @@ static void free_optee_shm_buf_pg_list(struct optee_domain *ctx, if ( found ) free_pg_list(optee_shm_buf); else - gdprintk(XENLOG_ERR, "Can't find pagelist for SHM buffer with cookie %lx to free it\n", - cookie); + gdprintk( + XENLOG_ERR, + "Can't find pagelist for SHM buffer with cookie %lx to free it\n", + cookie); } static int optee_relinquish_resources(struct domain *d) @@ -581,20 +583,20 @@ static int optee_relinquish_resources(struct domain *d) if ( !ctx ) return 0; - list_for_each_entry_safe( call, call_tmp, &ctx->call_list, list ) + list_for_each_entry_safe(call, call_tmp, &ctx->call_list, list) free_std_call(ctx, call); if ( hypercall_preempt_check() ) return -ERESTART; - list_for_each_entry_safe( shm_rpc, shm_rpc_tmp, &ctx->shm_rpc_list, list ) + list_for_each_entry_safe(shm_rpc, shm_rpc_tmp, &ctx->shm_rpc_list, list) free_shm_rpc(ctx, shm_rpc->cookie); if ( hypercall_preempt_check() ) return -ERESTART; - list_for_each_entry_safe( optee_shm_buf, optee_shm_buf_tmp, - &ctx->optee_shm_buf_list, list ) + list_for_each_entry_safe(optee_shm_buf, optee_shm_buf_tmp, + &ctx->optee_shm_buf_list, list) free_optee_shm_buf(ctx, optee_shm_buf->cookie); return 0; @@ -628,7 +630,7 @@ static void optee_domain_destroy(struct domain *d) XFREE(d->arch.tee); } -#define PAGELIST_ENTRIES_PER_PAGE \ +#define PAGELIST_ENTRIES_PER_PAGE \ ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1) static size_t get_pages_list_size(size_t num_entries) @@ -658,10 +660,11 @@ static int translate_noncontig(struct optee_domain *ctx, * * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h for details. */ - struct { + struct + { uint64_t pages_list[PAGELIST_ENTRIES_PER_PAGE]; uint64_t next_page_data; - } *pages_data_guest, *pages_data_xen; + } * pages_data_guest, *pages_data_xen; /* Offset of user buffer withing page */ page_offset = param->u.tmem.buf_ptr & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1); @@ -686,7 +689,8 @@ static int translate_noncontig(struct optee_domain *ctx, gfn = gaddr_to_gfn(param->u.tmem.buf_ptr & ~(OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); - guest_page = get_page_from_gfn(current->domain, gfn_x(gfn), &p2m, P2M_ALLOC); + guest_page = + get_page_from_gfn(current->domain, gfn_x(gfn), &p2m, P2M_ALLOC); if ( !guest_page || p2m != p2m_ram_rw ) return -EINVAL; @@ -696,9 +700,10 @@ static int translate_noncontig(struct optee_domain *ctx, while ( num_pages ) { struct page_info *page; - page = get_page_from_gfn(current->domain, - paddr_to_pfn(pages_data_guest->pages_list[entries_on_page]), - &p2m, P2M_ALLOC); + page = get_page_from_gfn( + current->domain, + paddr_to_pfn(pages_data_guest->pages_list[entries_on_page]), &p2m, + P2M_ALLOC); if ( !page || p2m != p2m_ram_rw ) goto err_unmap; @@ -718,8 +723,8 @@ static int translate_noncontig(struct optee_domain *ctx, unmap_domain_page(pages_data_guest); put_page(guest_page); - guest_page = get_page_from_gfn(current->domain, gfn_x(gfn), &p2m, - P2M_ALLOC); + guest_page = + get_page_from_gfn(current->domain, gfn_x(gfn), &p2m, P2M_ALLOC); if ( !guest_page || p2m != p2m_ram_rw ) return -EINVAL; @@ -735,8 +740,7 @@ static int translate_noncontig(struct optee_domain *ctx, unmap_domain_page(pages_data_xen); put_page(guest_page); - param->u.tmem.buf_ptr = page_to_maddr(optee_shm_buf->pg_list) | - page_offset; + param->u.tmem.buf_ptr = page_to_maddr(optee_shm_buf->pg_list) | page_offset; return 0; @@ -760,7 +764,7 @@ static int translate_params(struct optee_domain *ctx, { attr = call->xen_arg->params[i].attr; - switch ( attr & OPTEE_MSG_ATTR_TYPE_MASK ) + switch (attr & OPTEE_MSG_ATTR_TYPE_MASK) { case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: @@ -866,8 +870,8 @@ static void copy_std_request_back(struct optee_domain *ctx, uint32_t attr; page = get_page_from_gfn(current->domain, - gfn_x(gaddr_to_gfn(call->guest_arg_ipa)), - &t, P2M_ALLOC); + gfn_x(gaddr_to_gfn(call->guest_arg_ipa)), &t, + P2M_ALLOC); if ( !page || t != p2m_ram_rw ) { if ( page ) @@ -896,7 +900,7 @@ static void copy_std_request_back(struct optee_domain *ctx, { attr = call->xen_arg->params[i].attr; - switch ( attr & OPTEE_MSG_ATTR_TYPE_MASK ) + switch (attr & OPTEE_MSG_ATTR_TYPE_MASK) { case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: @@ -910,12 +914,9 @@ static void copy_std_request_back(struct optee_domain *ctx, continue; case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: - guest_arg->params[i].u.value.a = - call->xen_arg->params[i].u.value.a; - guest_arg->params[i].u.value.b = - call->xen_arg->params[i].u.value.b; - guest_arg->params[i].u.value.c = - call->xen_arg->params[i].u.value.c; + guest_arg->params[i].u.value.a = call->xen_arg->params[i].u.value.a; + guest_arg->params[i].u.value.b = call->xen_arg->params[i].u.value.b; + guest_arg->params[i].u.value.c = call->xen_arg->params[i].u.value.c; continue; case OPTEE_MSG_ATTR_TYPE_NONE: case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT: @@ -928,15 +929,14 @@ static void copy_std_request_back(struct optee_domain *ctx, put_page(page); } - static void free_shm_buffers(struct optee_domain *ctx, struct optee_msg_arg *arg) { unsigned int i; - for ( i = 0; i < arg->num_params; i ++ ) + for ( i = 0; i < arg->num_params; i++ ) { - switch ( arg->params[i].attr & OPTEE_MSG_ATTR_TYPE_MASK ) + switch (arg->params[i].attr & OPTEE_MSG_ATTR_TYPE_MASK) { case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: @@ -985,11 +985,9 @@ static int handle_rpc_return(struct optee_domain *ctx, shm_rpc->xen_arg = __map_domain_page(shm_rpc->xen_arg_pg); - if ( access_guest_memory_by_ipa(current->domain, - gfn_to_gaddr(shm_rpc->gfn), - shm_rpc->xen_arg, - OPTEE_MSG_GET_ARG_SIZE(shm_rpc->xen_arg->num_params), - true) ) + if ( access_guest_memory_by_ipa( + current->domain, gfn_to_gaddr(shm_rpc->gfn), shm_rpc->xen_arg, + OPTEE_MSG_GET_ARG_SIZE(shm_rpc->xen_arg->num_params), true) ) { /* * We were unable to propagate request to guest, so let's return @@ -1033,7 +1031,7 @@ static void execute_std_call(struct optee_domain *ctx, optee_ret = get_user_reg(regs, 0); if ( OPTEE_SMC_RETURN_IS_RPC(optee_ret) ) { - if ( handle_rpc_return(ctx, regs, call) == -ERESTART ) + if ( handle_rpc_return(ctx, regs, call) == -ERESTART ) { set_user_reg(regs, 0, OPTEE_SMC_CALL_RETURN_FROM_RPC); continue; @@ -1047,12 +1045,12 @@ static void execute_std_call(struct optee_domain *ctx, copy_std_request_back(ctx, regs, call); - switch ( call->xen_arg->cmd ) + switch (call->xen_arg->cmd) { case OPTEE_MSG_CMD_REGISTER_SHM: if ( call->xen_arg->ret == 0 ) free_optee_shm_buf_pg_list(ctx, - call->xen_arg->params[0].u.tmem.shm_ref); + call->xen_arg->params[0].u.tmem.shm_ref); else free_optee_shm_buf(ctx, call->xen_arg->params[0].u.tmem.shm_ref); break; @@ -1086,7 +1084,7 @@ static void handle_std_call(struct optee_domain *ctx, if ( !copy_std_request(regs, call) ) goto err; - switch ( call->xen_arg->cmd ) + switch (call->xen_arg->cmd) { case OPTEE_MSG_CMD_OPEN_SESSION: case OPTEE_MSG_CMD_CLOSE_SESSION: @@ -1094,7 +1092,7 @@ static void handle_std_call(struct optee_domain *ctx, case OPTEE_MSG_CMD_CANCEL: case OPTEE_MSG_CMD_REGISTER_SHM: case OPTEE_MSG_CMD_UNREGISTER_SHM: - if( translate_params(ctx, call) ) + if ( translate_params(ctx, call) ) { /* * translate_params() sets xen_arg->ret value to non-zero. @@ -1127,8 +1125,8 @@ static void handle_rpc_cmd_alloc(struct optee_domain *ctx, if ( shm_rpc->xen_arg->ret || shm_rpc->xen_arg->num_params != 1 ) return; - if ( shm_rpc->xen_arg->params[0].attr != (OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | - OPTEE_MSG_ATTR_NONCONTIG) ) + if ( shm_rpc->xen_arg->params[0].attr != + (OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | OPTEE_MSG_ATTR_NONCONTIG) ) { gdprintk(XENLOG_WARNING, "Invalid attrs for shared mem buffer: %lx\n", shm_rpc->xen_arg->params[0].attr); @@ -1141,8 +1139,7 @@ static void handle_rpc_cmd_alloc(struct optee_domain *ctx, if ( !translate_noncontig(ctx, call, &shm_rpc->xen_arg->params[0]) ) { - call->rpc_data_cookie = - shm_rpc->xen_arg->params[0].u.tmem.shm_ref; + call->rpc_data_cookie = shm_rpc->xen_arg->params[0].u.tmem.shm_ref; } else { @@ -1183,11 +1180,9 @@ static void handle_rpc_cmd(struct optee_domain *ctx, struct cpu_user_regs *regs, shm_rpc->xen_arg = __map_domain_page(shm_rpc->xen_arg_pg); /* First, copy only header to read number of arguments */ - if ( access_guest_memory_by_ipa(current->domain, - gfn_to_gaddr(shm_rpc->gfn), + if ( access_guest_memory_by_ipa(current->domain, gfn_to_gaddr(shm_rpc->gfn), shm_rpc->xen_arg, - sizeof(struct optee_msg_arg), - false) ) + sizeof(struct optee_msg_arg), false) ) { shm_rpc->xen_arg->ret = TEEC_ERROR_GENERIC; goto out; @@ -1239,7 +1234,8 @@ static void handle_rpc_func_alloc(struct optee_domain *ctx, if ( ptr & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1) ) { - gdprintk(XENLOG_WARNING, "Domain returned invalid RPC command buffer\n"); + gdprintk(XENLOG_WARNING, + "Domain returned invalid RPC command buffer\n"); /* * OP-TEE is waiting for a response to the RPC. We can't just * return error to the guest. We need to provide some invalid @@ -1283,15 +1279,15 @@ static void handle_rpc(struct optee_domain *ctx, struct cpu_user_regs *regs) */ call->optee_thread_id = -1; - switch ( call->rpc_op ) + switch (call->rpc_op) { case OPTEE_SMC_RPC_FUNC_ALLOC: handle_rpc_func_alloc(ctx, regs); break; case OPTEE_SMC_RPC_FUNC_FREE: { - uint64_t cookie = (uint64_t)call->rpc_params[0] << 32 | - (uint32_t)call->rpc_params[1]; + uint64_t cookie = + (uint64_t)call->rpc_params[0] << 32 | (uint32_t)call->rpc_params[1]; free_shm_rpc(ctx, cookie); break; } @@ -1345,7 +1341,7 @@ static bool optee_handle_call(struct cpu_user_regs *regs) if ( !ctx ) return false; - switch ( get_user_reg(regs, 0) ) + switch (get_user_reg(regs, 0)) { case OPTEE_SMC_CALLS_COUNT: case OPTEE_SMC_CALLS_UID: @@ -1374,8 +1370,7 @@ static bool optee_handle_call(struct cpu_user_regs *regs) } } -static const struct tee_mediator_ops optee_ops = -{ +static const struct tee_mediator_ops optee_ops = { .probe = optee_probe, .domain_init = optee_domain_init, .domain_destroy = optee_domain_destroy, diff --git a/xen/arch/arm/time.c b/xen/arch/arm/time.c index bbccee742e..34f913ae6c 100644 --- a/xen/arch/arm/time.c +++ b/xen/arch/arm/time.c @@ -41,7 +41,7 @@ uint64_t __read_mostly boot_count; /* For fine-grained timekeeping, we use the ARM "Generic Timer", a * register-mapped time source in the SoC. */ -unsigned long __read_mostly cpu_khz; /* CPU clock frequency in kHz. */ +unsigned long __read_mostly cpu_khz; /* CPU clock frequency in kHz. */ uint32_t __read_mostly timer_dt_clock_frequency; @@ -106,16 +106,17 @@ static void __init preinit_acpi_xen_time(void) acpi_table_parse(ACPI_SIG_GTDT, arch_timer_acpi_init); } #else -static void __init preinit_acpi_xen_time(void) { } +static void __init preinit_acpi_xen_time(void) +{ +} #endif /* Set up the timer on the boot CPU (early init function) */ static void __init preinit_dt_xen_time(void) { - static const struct dt_device_match timer_ids[] __initconst = - { + static const struct dt_device_match timer_ids[] __initconst = { DT_MATCH_TIMER, - { /* sentinel */ }, + {/* sentinel */}, }; int res; u32 rate; @@ -181,10 +182,8 @@ int __init init_xen_time(void) panic("CPU does not support the Generic Timer v1 interface\n"); printk("Generic Timer IRQ: phys=%u hyp=%u virt=%u Freq: %lu KHz\n", - timer_irq[TIMER_PHYS_NONSECURE_PPI], - timer_irq[TIMER_HYP_PPI], - timer_irq[TIMER_VIRT_PPI], - cpu_khz); + timer_irq[TIMER_PHYS_NONSECURE_PPI], timer_irq[TIMER_HYP_PPI], + timer_irq[TIMER_VIRT_PPI], cpu_khz); return 0; } @@ -262,7 +261,8 @@ static void vtimer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) current->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0); WRITE_SYSREG32(current->arch.virt_timer.ctl | CNTx_CTL_MASK, CNTV_CTL_EL0); - vgic_inject_irq(current->domain, current, current->arch.virt_timer.irq, true); + vgic_inject_irq(current->domain, current, current->arch.virt_timer.irq, + true); } /* @@ -287,25 +287,25 @@ static void check_timer_irq_cfg(unsigned int irq, const char *which) if ( desc->arch.type & IRQ_TYPE_LEVEL_MASK ) return; - printk(XENLOG_WARNING - "WARNING: %s-timer IRQ%u is not level triggered.\n", which, irq); + printk(XENLOG_WARNING "WARNING: %s-timer IRQ%u is not level triggered.\n", + which, irq); } /* Set up the timer interrupt on this CPU */ void init_timer_interrupt(void) { /* Sensible defaults */ - WRITE_SYSREG64(0, CNTVOFF_EL2); /* No VM-specific offset */ - /* Do not let the VMs program the physical timer, only read the physical counter */ + WRITE_SYSREG64(0, CNTVOFF_EL2); /* No VM-specific offset */ + /* Do not let the VMs program the physical timer, only read the physical + * counter */ WRITE_SYSREG32(CNTHCTL_EL2_EL1PCTEN, CNTHCTL_EL2); - WRITE_SYSREG32(0, CNTP_CTL_EL0); /* Physical timer disabled */ - WRITE_SYSREG32(0, CNTHP_CTL_EL2); /* Hypervisor's timer disabled */ + WRITE_SYSREG32(0, CNTP_CTL_EL0); /* Physical timer disabled */ + WRITE_SYSREG32(0, CNTHP_CTL_EL2); /* Hypervisor's timer disabled */ isb(); - request_irq(timer_irq[TIMER_HYP_PPI], 0, timer_interrupt, - "hyptimer", NULL); - request_irq(timer_irq[TIMER_VIRT_PPI], 0, vtimer_interrupt, - "virtimer", NULL); + request_irq(timer_irq[TIMER_HYP_PPI], 0, timer_interrupt, "hyptimer", NULL); + request_irq(timer_irq[TIMER_VIRT_PPI], 0, vtimer_interrupt, "virtimer", + NULL); request_irq(timer_irq[TIMER_PHYS_NONSECURE_PPI], 0, timer_interrupt, "phytimer", NULL); @@ -320,8 +320,8 @@ void init_timer_interrupt(void) */ static void deinit_timer_interrupt(void) { - WRITE_SYSREG32(0, CNTP_CTL_EL0); /* Disable physical timer */ - WRITE_SYSREG32(0, CNTHP_CTL_EL2); /* Disable hypervisor's timer */ + WRITE_SYSREG32(0, CNTP_CTL_EL0); /* Disable physical timer */ + WRITE_SYSREG32(0, CNTHP_CTL_EL2); /* Disable hypervisor's timer */ isb(); release_irq(timer_irq[TIMER_HYP_PPI], NULL); @@ -332,7 +332,7 @@ static void deinit_timer_interrupt(void) /* Wait a set number of microseconds */ void udelay(unsigned long usecs) { - s_time_t deadline = get_s_time() + 1000 * (s_time_t) usecs; + s_time_t deadline = get_s_time() + 1000 * (s_time_t)usecs; while ( get_s_time() - deadline < 0 ) ; dsb(sy); @@ -357,11 +357,10 @@ void domain_set_time_offset(struct domain *d, int64_t time_offset_seconds) /* XXX update guest visible wallclock time */ } -static int cpu_time_callback(struct notifier_block *nfb, - unsigned long action, +static int cpu_time_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { - switch ( action ) + switch (action) { case CPU_DYING: deinit_timer_interrupt(); diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 8741aa1d59..005a29f17b 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -54,16 +54,17 @@ * that both the kernel half of struct cpu_user_regs (which is pushed in * entry.S) and struct cpu_info (which lives at the bottom of a Xen * stack) must be doubleword-aligned in size. */ -static inline void check_stack_alignment_constraints(void) { +static inline void check_stack_alignment_constraints(void) +{ #ifdef CONFIG_ARM_64 - BUILD_BUG_ON((sizeof (struct cpu_user_regs)) & 0xf); + BUILD_BUG_ON((sizeof(struct cpu_user_regs)) & 0xf); BUILD_BUG_ON((offsetof(struct cpu_user_regs, spsr_el1)) & 0xf); BUILD_BUG_ON((offsetof(struct cpu_user_regs, lr)) & 0xf); - BUILD_BUG_ON((sizeof (struct cpu_info)) & 0xf); + BUILD_BUG_ON((sizeof(struct cpu_info)) & 0xf); #else - BUILD_BUG_ON((sizeof (struct cpu_user_regs)) & 0x7); + BUILD_BUG_ON((sizeof(struct cpu_user_regs)) & 0x7); BUILD_BUG_ON((offsetof(struct cpu_user_regs, sp_usr)) & 0x7); - BUILD_BUG_ON((sizeof (struct cpu_info)) & 0x7); + BUILD_BUG_ON((sizeof(struct cpu_info)) & 0x7); #endif } @@ -78,26 +79,26 @@ static int debug_stack_lines = 40; integer_param("debug_stack_lines", debug_stack_lines); static enum { - TRAP, - NATIVE, + TRAP, + NATIVE, } vwfi; static int __init parse_vwfi(const char *s) { - if ( !strcmp(s, "native") ) - vwfi = NATIVE; - else - vwfi = TRAP; + if ( !strcmp(s, "native") ) + vwfi = NATIVE; + else + vwfi = TRAP; - return 0; + return 0; } custom_param("vwfi", parse_vwfi); register_t get_default_hcr_flags(void) { - return (HCR_PTW|HCR_BSU_INNER|HCR_AMO|HCR_IMO|HCR_FMO|HCR_VM| - (vwfi != NATIVE ? (HCR_TWI|HCR_TWE) : 0) | - HCR_TSC|HCR_TAC|HCR_SWIO|HCR_TIDCP|HCR_FB|HCR_TSW); + return (HCR_PTW | HCR_BSU_INNER | HCR_AMO | HCR_IMO | HCR_FMO | HCR_VM | + (vwfi != NATIVE ? (HCR_TWI | HCR_TWE) : 0) | HCR_TSC | HCR_TAC | + HCR_SWIO | HCR_TIDCP | HCR_FB | HCR_TSW); } static enum { @@ -140,7 +141,7 @@ void init_traps(void) WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2); /* Trap Debug and Performance Monitor accesses */ - WRITE_SYSREG(HDCR_TDRA|HDCR_TDOSA|HDCR_TDA|HDCR_TPM|HDCR_TPMCR, + WRITE_SYSREG(HDCR_TDRA | HDCR_TDOSA | HDCR_TDA | HDCR_TPM | HDCR_TPMCR, MDCR_EL2); /* Trap CP15 c15 used for implementation defined registers */ @@ -213,7 +214,7 @@ static inline bool is_zero_register(int reg) */ static register_t *select_user_reg(struct cpu_user_regs *regs, int reg) { - BUG_ON( !guest_mode(regs) ); + BUG_ON(!guest_mode(regs)); #ifdef CONFIG_ARM_32 /* @@ -223,24 +224,30 @@ static register_t *select_user_reg(struct cpu_user_regs *regs, int reg) */ #define REGOFFS(R) offsetof(struct cpu_user_regs, R) - switch ( reg ) + switch (reg) { case 0 ... 7: /* Unbanked registers */ - BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(register_t) != REGOFFS(r7)); + BUILD_BUG_ON(REGOFFS(r0) + 7 * sizeof(register_t) != REGOFFS(r7)); return ®s->r0 + reg; case 8 ... 12: /* Register banked in FIQ mode */ - BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(register_t) != REGOFFS(r12_fiq)); + BUILD_BUG_ON(REGOFFS(r8_fiq) + 4 * sizeof(register_t) != + REGOFFS(r12_fiq)); if ( fiq_mode(regs) ) return ®s->r8_fiq + reg - 8; else return ®s->r8 + reg - 8; case 13 ... 14: /* Banked SP + LR registers */ - BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(register_t) != REGOFFS(lr_fiq)); - BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(register_t) != REGOFFS(lr_irq)); - BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(register_t) != REGOFFS(lr_svc)); - BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(register_t) != REGOFFS(lr_abt)); - BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(register_t) != REGOFFS(lr_und)); - switch ( regs->cpsr & PSR_MODE_MASK ) + BUILD_BUG_ON(REGOFFS(sp_fiq) + 1 * sizeof(register_t) != + REGOFFS(lr_fiq)); + BUILD_BUG_ON(REGOFFS(sp_irq) + 1 * sizeof(register_t) != + REGOFFS(lr_irq)); + BUILD_BUG_ON(REGOFFS(sp_svc) + 1 * sizeof(register_t) != + REGOFFS(lr_svc)); + BUILD_BUG_ON(REGOFFS(sp_abt) + 1 * sizeof(register_t) != + REGOFFS(lr_abt)); + BUILD_BUG_ON(REGOFFS(sp_und) + 1 * sizeof(register_t) != + REGOFFS(lr_und)); + switch (regs->cpsr & PSR_MODE_MASK) { case PSR_MODE_USR: case PSR_MODE_SYS: /* Sys regs are the usr regs */ @@ -299,7 +306,7 @@ static const char *decode_fsc(uint32_t fsc, int *level) { const char *msg = NULL; - switch ( fsc & 0x3f ) + switch (fsc & 0x3f) { case FSC_FLT_TRANS ... FSC_FLT_TRANS + 3: msg = "Translation fault"; @@ -354,13 +361,18 @@ static const char *decode_fsc(uint32_t fsc, int *level) static const char *fsc_level_str(int level) { - switch ( level ) + switch (level) { - case -1: return ""; - case 1: return " at level 1"; - case 2: return " at level 2"; - case 3: return " at level 3"; - default: return " (level invalid)"; + case -1: + return ""; + case 1: + return " at level 1"; + case 2: + return " at level 2"; + case 3: + return " at level 3"; + default: + return " (level invalid)"; } } @@ -371,11 +383,9 @@ void panic_PAR(uint64_t par) int stage = par & PAR_STAGE2 ? 2 : 1; int second_in_first = !!(par & PAR_STAGE21); - msg = decode_fsc( (par&PAR_FSC_MASK) >> PAR_FSC_SHIFT, &level); + msg = decode_fsc((par & PAR_FSC_MASK) >> PAR_FSC_SHIFT, &level); - printk("PAR: %016"PRIx64": %s stage %d%s%s\n", - par, msg, - stage, + printk("PAR: %016" PRIx64 ": %s stage %d%s%s\n", par, msg, stage, second_in_first ? " during second stage lookup" : "", fsc_level_str(level)); @@ -386,7 +396,8 @@ static void cpsr_switch_mode(struct cpu_user_regs *regs, int mode) { uint32_t sctlr = READ_SYSREG32(SCTLR_EL1); - regs->cpsr &= ~(PSR_MODE_MASK|PSR_IT_MASK|PSR_JAZELLE|PSR_BIG_ENDIAN|PSR_THUMB); + regs->cpsr &= ~(PSR_MODE_MASK | PSR_IT_MASK | PSR_JAZELLE | PSR_BIG_ENDIAN | + PSR_THUMB); regs->cpsr |= mode; regs->cpsr |= PSR_IRQ_MASK; @@ -420,7 +431,7 @@ static void inject_undef32_exception(struct cpu_user_regs *regs) /* Saved PC points to the instruction past the faulting instruction. */ uint32_t return_offset = is_thumb ? 2 : 4; - BUG_ON( !is_32bit_domain(current->domain) ); + BUG_ON(!is_32bit_domain(current->domain)); /* Update processor mode */ cpsr_switch_mode(regs, PSR_MODE_UND); @@ -438,8 +449,7 @@ static void inject_undef32_exception(struct cpu_user_regs *regs) * adjustments). See TakePrefetchAbortException and * TakeDataAbortException pseudocode in ARM ARM. */ -static void inject_abt32_exception(struct cpu_user_regs *regs, - int prefetch, +static void inject_abt32_exception(struct cpu_user_regs *regs, int prefetch, register_t addr) { uint32_t spsr = regs->cpsr; @@ -448,7 +458,7 @@ static void inject_abt32_exception(struct cpu_user_regs *regs, uint32_t return_offset = is_thumb ? 4 : 0; register_t fsr; - BUG_ON( !is_32bit_domain(current->domain) ); + BUG_ON(!is_32bit_domain(current->domain)); cpsr_switch_mode(regs, PSR_MODE_ABT); @@ -495,14 +505,12 @@ static void inject_abt32_exception(struct cpu_user_regs *regs, } } -static void inject_dabt32_exception(struct cpu_user_regs *regs, - register_t addr) +static void inject_dabt32_exception(struct cpu_user_regs *regs, register_t addr) { inject_abt32_exception(regs, 0, addr); } -static void inject_pabt32_exception(struct cpu_user_regs *regs, - register_t addr) +static void inject_pabt32_exception(struct cpu_user_regs *regs, register_t addr) { inject_abt32_exception(regs, 1, addr); } @@ -518,7 +526,7 @@ static vaddr_t exception_handler64(struct cpu_user_regs *regs, vaddr_t offset) if ( usr_mode(regs) ) base += VECTOR64_LOWER32_BASE; - else if ( psr_mode(regs->cpsr,PSR_MODE_EL0t) ) + else if ( psr_mode(regs->cpsr, PSR_MODE_EL0t) ) base += VECTOR64_LOWER64_BASE; else /* Otherwise must be from kernel mode */ base += VECTOR64_CURRENT_SPx_BASE; @@ -536,25 +544,23 @@ void inject_undef64_exception(struct cpu_user_regs *regs, int instr_len) .ec = HSR_EC_UNKNOWN, }; - BUG_ON( is_32bit_domain(current->domain) ); + BUG_ON(is_32bit_domain(current->domain)); handler = exception_handler64(regs, VECTOR64_SYNC_OFFSET); regs->spsr_el1 = regs->cpsr; regs->elr_el1 = regs->pc; - regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | \ - PSR_IRQ_MASK | PSR_DBG_MASK; + regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | PSR_IRQ_MASK | + PSR_DBG_MASK; regs->pc = handler; WRITE_SYSREG32(esr.bits, ESR_EL1); } /* Inject an abort exception into a 64 bit guest */ -static void inject_abt64_exception(struct cpu_user_regs *regs, - int prefetch, - register_t addr, - int instr_len) +static void inject_abt64_exception(struct cpu_user_regs *regs, int prefetch, + register_t addr, int instr_len) { vaddr_t handler; union hsr esr = { @@ -563,37 +569,35 @@ static void inject_abt64_exception(struct cpu_user_regs *regs, }; if ( psr_mode_is_user(regs) ) - esr.ec = prefetch - ? HSR_EC_INSTR_ABORT_LOWER_EL : HSR_EC_DATA_ABORT_LOWER_EL; + esr.ec = + prefetch ? HSR_EC_INSTR_ABORT_LOWER_EL : HSR_EC_DATA_ABORT_LOWER_EL; else - esr.ec = prefetch - ? HSR_EC_INSTR_ABORT_CURR_EL : HSR_EC_DATA_ABORT_CURR_EL; + esr.ec = + prefetch ? HSR_EC_INSTR_ABORT_CURR_EL : HSR_EC_DATA_ABORT_CURR_EL; - BUG_ON( is_32bit_domain(current->domain) ); + BUG_ON(is_32bit_domain(current->domain)); handler = exception_handler64(regs, VECTOR64_SYNC_OFFSET); regs->spsr_el1 = regs->cpsr; regs->elr_el1 = regs->pc; - regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | \ - PSR_IRQ_MASK | PSR_DBG_MASK; + regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | PSR_IRQ_MASK | + PSR_DBG_MASK; regs->pc = handler; WRITE_SYSREG(addr, FAR_EL1); WRITE_SYSREG32(esr.bits, ESR_EL1); } -static void inject_dabt64_exception(struct cpu_user_regs *regs, - register_t addr, - int instr_len) +static void inject_dabt64_exception(struct cpu_user_regs *regs, register_t addr, + int instr_len) { inject_abt64_exception(regs, 0, addr, instr_len); } -static void inject_iabt64_exception(struct cpu_user_regs *regs, - register_t addr, - int instr_len) +static void inject_iabt64_exception(struct cpu_user_regs *regs, register_t addr, + int instr_len) { inject_abt64_exception(regs, 1, addr, instr_len); } @@ -602,49 +606,47 @@ static void inject_iabt64_exception(struct cpu_user_regs *regs, void inject_undef_exception(struct cpu_user_regs *regs, const union hsr hsr) { - if ( is_32bit_domain(current->domain) ) - inject_undef32_exception(regs); + if ( is_32bit_domain(current->domain) ) + inject_undef32_exception(regs); #ifdef CONFIG_ARM_64 - else - inject_undef64_exception(regs, hsr.len); + else + inject_undef64_exception(regs, hsr.len); #endif } -static void inject_iabt_exception(struct cpu_user_regs *regs, - register_t addr, +static void inject_iabt_exception(struct cpu_user_regs *regs, register_t addr, int instr_len) { - if ( is_32bit_domain(current->domain) ) - inject_pabt32_exception(regs, addr); + if ( is_32bit_domain(current->domain) ) + inject_pabt32_exception(regs, addr); #ifdef CONFIG_ARM_64 - else - inject_iabt64_exception(regs, addr, instr_len); + else + inject_iabt64_exception(regs, addr, instr_len); #endif } -static void inject_dabt_exception(struct cpu_user_regs *regs, - register_t addr, +static void inject_dabt_exception(struct cpu_user_regs *regs, register_t addr, int instr_len) { - if ( is_32bit_domain(current->domain) ) - inject_dabt32_exception(regs, addr); + if ( is_32bit_domain(current->domain) ) + inject_dabt32_exception(regs, addr); #ifdef CONFIG_ARM_64 - else - inject_dabt64_exception(regs, addr, instr_len); + else + inject_dabt64_exception(regs, addr, instr_len); #endif } /* Inject a virtual Abort/SError into the guest. */ static void inject_vabt_exception(struct cpu_user_regs *regs) { - const union hsr hsr = { .bits = regs->hsr }; + const union hsr hsr = {.bits = regs->hsr}; /* * SVC/HVC/SMC already have an adjusted PC (See ARM ARM DDI 0487A.j * D1.10.1 for more details), which we need to correct in order to * return to after having injected the SError. */ - switch ( hsr.ec ) + switch (hsr.ec) { case HSR_EC_SVC32: case HSR_EC_HVC32: @@ -717,7 +719,8 @@ crash_system: do_unexpected_trap("SError", regs); } -struct reg_ctxt { +struct reg_ctxt +{ /* Guest-side state */ uint32_t sctlr_el1; register_t tcr_el1; @@ -739,54 +742,55 @@ static const char *mode_string(uint32_t cpsr) { uint32_t mode; static const char *mode_strings[] = { - [PSR_MODE_USR] = "32-bit Guest USR", - [PSR_MODE_FIQ] = "32-bit Guest FIQ", - [PSR_MODE_IRQ] = "32-bit Guest IRQ", - [PSR_MODE_SVC] = "32-bit Guest SVC", - [PSR_MODE_MON] = "32-bit Monitor", - [PSR_MODE_ABT] = "32-bit Guest ABT", - [PSR_MODE_HYP] = "Hypervisor", - [PSR_MODE_UND] = "32-bit Guest UND", - [PSR_MODE_SYS] = "32-bit Guest SYS", + [PSR_MODE_USR] = "32-bit Guest USR", + [PSR_MODE_FIQ] = "32-bit Guest FIQ", + [PSR_MODE_IRQ] = "32-bit Guest IRQ", + [PSR_MODE_SVC] = "32-bit Guest SVC", + [PSR_MODE_MON] = "32-bit Monitor", + [PSR_MODE_ABT] = "32-bit Guest ABT", + [PSR_MODE_HYP] = "Hypervisor", + [PSR_MODE_UND] = "32-bit Guest UND", + [PSR_MODE_SYS] = "32-bit Guest SYS", #ifdef CONFIG_ARM_64 - [PSR_MODE_EL3h] = "64-bit EL3h (Monitor, handler)", - [PSR_MODE_EL3t] = "64-bit EL3t (Monitor, thread)", - [PSR_MODE_EL2h] = "64-bit EL2h (Hypervisor, handler)", - [PSR_MODE_EL2t] = "64-bit EL2t (Hypervisor, thread)", - [PSR_MODE_EL1h] = "64-bit EL1h (Guest Kernel, handler)", - [PSR_MODE_EL1t] = "64-bit EL1t (Guest Kernel, thread)", - [PSR_MODE_EL0t] = "64-bit EL0t (Guest User)", + [PSR_MODE_EL3h] = "64-bit EL3h (Monitor, handler)", + [PSR_MODE_EL3t] = "64-bit EL3t (Monitor, thread)", + [PSR_MODE_EL2h] = "64-bit EL2h (Hypervisor, handler)", + [PSR_MODE_EL2t] = "64-bit EL2t (Hypervisor, thread)", + [PSR_MODE_EL1h] = "64-bit EL1h (Guest Kernel, handler)", + [PSR_MODE_EL1t] = "64-bit EL1t (Guest Kernel, thread)", + [PSR_MODE_EL0t] = "64-bit EL0t (Guest User)", #endif }; mode = cpsr & PSR_MODE_MASK; if ( mode >= ARRAY_SIZE(mode_strings) ) return "Unknown"; - return mode_strings[mode] ? : "Unknown"; + return mode_strings[mode] ?: "Unknown"; } static void show_registers_32(const struct cpu_user_regs *regs, - const struct reg_ctxt *ctxt, - bool guest_mode, + const struct reg_ctxt *ctxt, bool guest_mode, const struct vcpu *v) { - #ifdef CONFIG_ARM_64 - BUG_ON( ! (regs->cpsr & PSR_MODE_BIT) ); - printk("PC: %08"PRIx32"\n", regs->pc32); + BUG_ON(!(regs->cpsr & PSR_MODE_BIT)); + printk("PC: %08" PRIx32 "\n", regs->pc32); #else - printk("PC: %08"PRIx32, regs->pc); + printk("PC: %08" PRIx32, regs->pc); if ( !guest_mode ) printk(" %pS", _p(regs->pc)); printk("\n"); #endif - printk("CPSR: %08"PRIx32" MODE:%s\n", regs->cpsr, + printk("CPSR: %08" PRIx32 " MODE:%s\n", regs->cpsr, mode_string(regs->cpsr)); - printk(" R0: %08"PRIx32" R1: %08"PRIx32" R2: %08"PRIx32" R3: %08"PRIx32"\n", + printk(" R0: %08" PRIx32 " R1: %08" PRIx32 " R2: %08" PRIx32 + " R3: %08" PRIx32 "\n", regs->r0, regs->r1, regs->r2, regs->r3); - printk(" R4: %08"PRIx32" R5: %08"PRIx32" R6: %08"PRIx32" R7: %08"PRIx32"\n", + printk(" R4: %08" PRIx32 " R5: %08" PRIx32 " R6: %08" PRIx32 + " R7: %08" PRIx32 "\n", regs->r4, regs->r5, regs->r6, regs->r7); - printk(" R8: %08"PRIx32" R9: %08"PRIx32" R10:%08"PRIx32" R11:%08"PRIx32" R12:%08"PRIx32"\n", + printk(" R8: %08" PRIx32 " R9: %08" PRIx32 " R10:%08" PRIx32 + " R11:%08" PRIx32 " R12:%08" PRIx32 "\n", regs->r8, regs->r9, regs->r10, #ifdef CONFIG_ARM_64 regs->r11, @@ -797,115 +801,113 @@ static void show_registers_32(const struct cpu_user_regs *regs, if ( guest_mode ) { - printk("USR: SP: %08"PRIx32" LR: %08"PRIregister"\n", - regs->sp_usr, regs->lr); - printk("SVC: SP: %08"PRIx32" LR: %08"PRIx32" SPSR:%08"PRIx32"\n", + printk("USR: SP: %08" PRIx32 " LR: %08" PRIregister "\n", regs->sp_usr, + regs->lr); + printk("SVC: SP: %08" PRIx32 " LR: %08" PRIx32 " SPSR:%08" PRIx32 "\n", regs->sp_svc, regs->lr_svc, regs->spsr_svc); - printk("ABT: SP: %08"PRIx32" LR: %08"PRIx32" SPSR:%08"PRIx32"\n", + printk("ABT: SP: %08" PRIx32 " LR: %08" PRIx32 " SPSR:%08" PRIx32 "\n", regs->sp_abt, regs->lr_abt, regs->spsr_abt); - printk("UND: SP: %08"PRIx32" LR: %08"PRIx32" SPSR:%08"PRIx32"\n", + printk("UND: SP: %08" PRIx32 " LR: %08" PRIx32 " SPSR:%08" PRIx32 "\n", regs->sp_und, regs->lr_und, regs->spsr_und); - printk("IRQ: SP: %08"PRIx32" LR: %08"PRIx32" SPSR:%08"PRIx32"\n", + printk("IRQ: SP: %08" PRIx32 " LR: %08" PRIx32 " SPSR:%08" PRIx32 "\n", regs->sp_irq, regs->lr_irq, regs->spsr_irq); - printk("FIQ: SP: %08"PRIx32" LR: %08"PRIx32" SPSR:%08"PRIx32"\n", + printk("FIQ: SP: %08" PRIx32 " LR: %08" PRIx32 " SPSR:%08" PRIx32 "\n", regs->sp_fiq, regs->lr_fiq, regs->spsr_fiq); - printk("FIQ: R8: %08"PRIx32" R9: %08"PRIx32" R10:%08"PRIx32" R11:%08"PRIx32" R12:%08"PRIx32"\n", - regs->r8_fiq, regs->r9_fiq, regs->r10_fiq, regs->r11_fiq, regs->r11_fiq); + printk("FIQ: R8: %08" PRIx32 " R9: %08" PRIx32 " R10:%08" PRIx32 + " R11:%08" PRIx32 " R12:%08" PRIx32 "\n", + regs->r8_fiq, regs->r9_fiq, regs->r10_fiq, regs->r11_fiq, + regs->r11_fiq); } #ifndef CONFIG_ARM_64 else { - printk("HYP: SP: %08"PRIx32" LR: %08"PRIregister"\n", regs->sp, regs->lr); + printk("HYP: SP: %08" PRIx32 " LR: %08" PRIregister "\n", regs->sp, + regs->lr); } #endif printk("\n"); if ( guest_mode ) { - printk(" SCTLR: %08"PRIx32"\n", ctxt->sctlr_el1); - printk(" TCR: %08"PRIregister"\n", ctxt->tcr_el1); - printk(" TTBR0: %016"PRIx64"\n", ctxt->ttbr0_el1); - printk(" TTBR1: %016"PRIx64"\n", ctxt->ttbr1_el1); - printk(" IFAR: %08"PRIx32", IFSR: %08"PRIx32"\n" - " DFAR: %08"PRIx32", DFSR: %08"PRIx32"\n", + printk(" SCTLR: %08" PRIx32 "\n", ctxt->sctlr_el1); + printk(" TCR: %08" PRIregister "\n", ctxt->tcr_el1); + printk(" TTBR0: %016" PRIx64 "\n", ctxt->ttbr0_el1); + printk(" TTBR1: %016" PRIx64 "\n", ctxt->ttbr1_el1); + printk(" IFAR: %08" PRIx32 ", IFSR: %08" PRIx32 "\n" + " DFAR: %08" PRIx32 ", DFSR: %08" PRIx32 "\n", #ifdef CONFIG_ARM_64 - (uint32_t)(ctxt->far >> 32), - ctxt->ifsr32_el2, - (uint32_t)(ctxt->far & 0xffffffff), - ctxt->esr_el1 + (uint32_t)(ctxt->far >> 32), ctxt->ifsr32_el2, + (uint32_t)(ctxt->far & 0xffffffff), ctxt->esr_el1 #else ctxt->ifar, ctxt->ifsr, ctxt->dfar, ctxt->dfsr #endif - ); + ); printk("\n"); } } #ifdef CONFIG_ARM_64 static void show_registers_64(const struct cpu_user_regs *regs, - const struct reg_ctxt *ctxt, - bool guest_mode, + const struct reg_ctxt *ctxt, bool guest_mode, const struct vcpu *v) { + BUG_ON((regs->cpsr & PSR_MODE_BIT)); - BUG_ON( (regs->cpsr & PSR_MODE_BIT) ); - - printk("PC: %016"PRIx64, regs->pc); + printk("PC: %016" PRIx64, regs->pc); if ( !guest_mode ) printk(" %pS", _p(regs->pc)); printk("\n"); - printk("LR: %016"PRIx64"\n", regs->lr); + printk("LR: %016" PRIx64 "\n", regs->lr); if ( guest_mode ) { - printk("SP_EL0: %016"PRIx64"\n", regs->sp_el0); - printk("SP_EL1: %016"PRIx64"\n", regs->sp_el1); + printk("SP_EL0: %016" PRIx64 "\n", regs->sp_el0); + printk("SP_EL1: %016" PRIx64 "\n", regs->sp_el1); } else { - printk("SP: %016"PRIx64"\n", regs->sp); + printk("SP: %016" PRIx64 "\n", regs->sp); } - printk("CPSR: %08"PRIx32" MODE:%s\n", regs->cpsr, + printk("CPSR: %08" PRIx32 " MODE:%s\n", regs->cpsr, mode_string(regs->cpsr)); - printk(" X0: %016"PRIx64" X1: %016"PRIx64" X2: %016"PRIx64"\n", + printk(" X0: %016" PRIx64 " X1: %016" PRIx64 " X2: %016" PRIx64 "\n", regs->x0, regs->x1, regs->x2); - printk(" X3: %016"PRIx64" X4: %016"PRIx64" X5: %016"PRIx64"\n", + printk(" X3: %016" PRIx64 " X4: %016" PRIx64 " X5: %016" PRIx64 "\n", regs->x3, regs->x4, regs->x5); - printk(" X6: %016"PRIx64" X7: %016"PRIx64" X8: %016"PRIx64"\n", + printk(" X6: %016" PRIx64 " X7: %016" PRIx64 " X8: %016" PRIx64 "\n", regs->x6, regs->x7, regs->x8); - printk(" X9: %016"PRIx64" X10: %016"PRIx64" X11: %016"PRIx64"\n", + printk(" X9: %016" PRIx64 " X10: %016" PRIx64 " X11: %016" PRIx64 "\n", regs->x9, regs->x10, regs->x11); - printk(" X12: %016"PRIx64" X13: %016"PRIx64" X14: %016"PRIx64"\n", + printk(" X12: %016" PRIx64 " X13: %016" PRIx64 " X14: %016" PRIx64 "\n", regs->x12, regs->x13, regs->x14); - printk(" X15: %016"PRIx64" X16: %016"PRIx64" X17: %016"PRIx64"\n", + printk(" X15: %016" PRIx64 " X16: %016" PRIx64 " X17: %016" PRIx64 "\n", regs->x15, regs->x16, regs->x17); - printk(" X18: %016"PRIx64" X19: %016"PRIx64" X20: %016"PRIx64"\n", + printk(" X18: %016" PRIx64 " X19: %016" PRIx64 " X20: %016" PRIx64 "\n", regs->x18, regs->x19, regs->x20); - printk(" X21: %016"PRIx64" X22: %016"PRIx64" X23: %016"PRIx64"\n", + printk(" X21: %016" PRIx64 " X22: %016" PRIx64 " X23: %016" PRIx64 "\n", regs->x21, regs->x22, regs->x23); - printk(" X24: %016"PRIx64" X25: %016"PRIx64" X26: %016"PRIx64"\n", + printk(" X24: %016" PRIx64 " X25: %016" PRIx64 " X26: %016" PRIx64 "\n", regs->x24, regs->x25, regs->x26); - printk(" X27: %016"PRIx64" X28: %016"PRIx64" FP: %016"PRIx64"\n", + printk(" X27: %016" PRIx64 " X28: %016" PRIx64 " FP: %016" PRIx64 "\n", regs->x27, regs->x28, regs->fp); printk("\n"); if ( guest_mode ) { - printk(" ELR_EL1: %016"PRIx64"\n", regs->elr_el1); - printk(" ESR_EL1: %08"PRIx32"\n", ctxt->esr_el1); - printk(" FAR_EL1: %016"PRIx64"\n", ctxt->far); + printk(" ELR_EL1: %016" PRIx64 "\n", regs->elr_el1); + printk(" ESR_EL1: %08" PRIx32 "\n", ctxt->esr_el1); + printk(" FAR_EL1: %016" PRIx64 "\n", ctxt->far); printk("\n"); - printk(" SCTLR_EL1: %08"PRIx32"\n", ctxt->sctlr_el1); - printk(" TCR_EL1: %08"PRIregister"\n", ctxt->tcr_el1); - printk(" TTBR0_EL1: %016"PRIx64"\n", ctxt->ttbr0_el1); - printk(" TTBR1_EL1: %016"PRIx64"\n", ctxt->ttbr1_el1); + printk(" SCTLR_EL1: %08" PRIx32 "\n", ctxt->sctlr_el1); + printk(" TCR_EL1: %08" PRIregister "\n", ctxt->tcr_el1); + printk(" TTBR0_EL1: %016" PRIx64 "\n", ctxt->ttbr0_el1); + printk(" TTBR1_EL1: %016" PRIx64 "\n", ctxt->ttbr1_el1); printk("\n"); } } #endif static void _show_registers(const struct cpu_user_regs *regs, - const struct reg_ctxt *ctxt, - bool guest_mode, + const struct reg_ctxt *ctxt, bool guest_mode, const struct vcpu *v) { print_xen_info(); @@ -939,22 +941,22 @@ static void _show_registers(const struct cpu_user_regs *regs, show_registers_32(regs, ctxt, guest_mode, v); #endif } - printk(" VTCR_EL2: %08"PRIx32"\n", READ_SYSREG32(VTCR_EL2)); - printk(" VTTBR_EL2: %016"PRIx64"\n", ctxt->vttbr_el2); + printk(" VTCR_EL2: %08" PRIx32 "\n", READ_SYSREG32(VTCR_EL2)); + printk(" VTTBR_EL2: %016" PRIx64 "\n", ctxt->vttbr_el2); printk("\n"); - printk(" SCTLR_EL2: %08"PRIx32"\n", READ_SYSREG32(SCTLR_EL2)); - printk(" HCR_EL2: %016"PRIregister"\n", READ_SYSREG(HCR_EL2)); - printk(" TTBR0_EL2: %016"PRIx64"\n", READ_SYSREG64(TTBR0_EL2)); + printk(" SCTLR_EL2: %08" PRIx32 "\n", READ_SYSREG32(SCTLR_EL2)); + printk(" HCR_EL2: %016" PRIregister "\n", READ_SYSREG(HCR_EL2)); + printk(" TTBR0_EL2: %016" PRIx64 "\n", READ_SYSREG64(TTBR0_EL2)); printk("\n"); - printk(" ESR_EL2: %08"PRIx32"\n", regs->hsr); - printk(" HPFAR_EL2: %016"PRIregister"\n", READ_SYSREG(HPFAR_EL2)); + printk(" ESR_EL2: %08" PRIx32 "\n", regs->hsr); + printk(" HPFAR_EL2: %016" PRIregister "\n", READ_SYSREG(HPFAR_EL2)); #ifdef CONFIG_ARM_32 - printk(" HDFAR: %08"PRIx32"\n", READ_CP32(HDFAR)); - printk(" HIFAR: %08"PRIx32"\n", READ_CP32(HIFAR)); + printk(" HDFAR: %08" PRIx32 "\n", READ_CP32(HDFAR)); + printk(" HIFAR: %08" PRIx32 "\n", READ_CP32(HIFAR)); #else - printk(" FAR_EL2: %016"PRIx64"\n", READ_SYSREG64(FAR_EL2)); + printk(" FAR_EL2: %016" PRIx64 "\n", READ_SYSREG64(FAR_EL2)); #endif printk("\n"); } @@ -1019,7 +1021,7 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) return; } - switch ( regs->cpsr & PSR_MODE_MASK ) + switch (regs->cpsr & PSR_MODE_MASK) { case PSR_MODE_USR: case PSR_MODE_SYS: @@ -1067,9 +1069,9 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) return; } - printk("Guest stack trace from sp=%"PRIvaddr":\n ", sp); + printk("Guest stack trace from sp=%" PRIvaddr ":\n ", sp); - if ( sp & ( sizeof(long) - 1 ) ) + if ( sp & (sizeof(long) - 1) ) { printk("Stack is misaligned\n"); return; @@ -1086,7 +1088,7 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) stack = mapped + (sp & ~PAGE_MASK); - for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ ) + for ( i = 0; i < (debug_stack_lines * stack_words_per_line); i++ ) { if ( (((long)stack - 1) ^ ((long)(stack + 1) - 1)) & PAGE_SIZE ) break; @@ -1103,7 +1105,7 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) put_page(page); } -#define STACK_BEFORE_EXCEPTION(regs) ((register_t*)(regs)->sp) +#define STACK_BEFORE_EXCEPTION(regs) ((register_t *)(regs)->sp) #ifdef CONFIG_ARM_32 /* Frame pointer points to the return address: * (largest address) @@ -1120,7 +1122,7 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) * | * v (smallest address, sp) */ -#define STACK_FRAME_BASE(fp) ((register_t*)(fp) - 1) +#define STACK_FRAME_BASE(fp) ((register_t *)(fp)-1) #else /* Frame pointer points to the next frame: * (largest address) @@ -1137,7 +1139,7 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) * | * v (smallest address, sp) */ -#define STACK_FRAME_BASE(fp) ((register_t*)(fp)) +#define STACK_FRAME_BASE(fp) ((register_t *)(fp)) #endif static void show_trace(const struct cpu_user_regs *regs) { @@ -1149,22 +1151,21 @@ static void show_trace(const struct cpu_user_regs *regs) printk(" [<%p>] %pS (LR)\n", _p(regs->lr), _p(regs->lr)); /* Bounds for range of valid frame pointer. */ - low = (register_t)(STACK_BEFORE_EXCEPTION(regs)); - high = (low & ~(STACK_SIZE - 1)) + - (STACK_SIZE - sizeof(struct cpu_info)); + low = (register_t)(STACK_BEFORE_EXCEPTION(regs)); + high = (low & ~(STACK_SIZE - 1)) + (STACK_SIZE - sizeof(struct cpu_info)); /* The initial frame pointer. */ next = regs->fp; - for ( ; ; ) + for ( ;; ) { if ( (next < low) || (next >= high) ) break; /* Ordinary stack frame. */ frame = STACK_FRAME_BASE(next); - next = frame[0]; - addr = frame[1]; + next = frame[0]; + addr = frame[1]; printk(" [<%p>] %pS\n", _p(addr), _p(addr)); @@ -1184,9 +1185,9 @@ void show_stack(const struct cpu_user_regs *regs) printk("Xen stack trace from sp=%p:\n ", stack); - for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ ) + for ( i = 0; i < (debug_stack_lines * stack_words_per_line); i++ ) { - if ( ((long)stack & (STACK_SIZE-BYTES_PER_LONG)) == 0 ) + if ( ((long)stack & (STACK_SIZE - BYTES_PER_LONG)) == 0 ) break; if ( (i != 0) && ((i % stack_words_per_line) == 0) ) printk("\n "); @@ -1209,8 +1210,8 @@ void show_execution_state(const struct cpu_user_regs *regs) void vcpu_show_execution_state(struct vcpu *v) { - printk("*** Dumping Dom%d vcpu#%d state: ***\n", - v->domain->domain_id, v->vcpu_id); + printk("*** Dumping Dom%d vcpu#%d state: ***\n", v->domain->domain_id, + v->vcpu_id); if ( v == current ) { @@ -1261,7 +1262,7 @@ int do_bug_frame(const struct cpu_user_regs *regs, vaddr_t pc) } } } - found: +found: if ( !bug ) return -ENOENT; @@ -1277,7 +1278,7 @@ int do_bug_frame(const struct cpu_user_regs *regs, vaddr_t pc) } lineno = bug_line(bug); - switch ( id ) + switch (id) { case BUGFRAME_warn: printk("Xen WARN at %s%s:%d\n", prefix, filename, lineno); @@ -1299,13 +1300,13 @@ int do_bug_frame(const struct cpu_user_regs *regs, vaddr_t pc) if ( !is_kernel(predicate) ) predicate = ""; - printk("Assertion '%s' failed at %s%s:%d\n", - predicate, prefix, filename, lineno); + printk("Assertion '%s' failed at %s%s:%d\n", predicate, prefix, + filename, lineno); if ( debugger_trap_fatal(TRAP_invalid_op, regs) ) return 0; show_execution_state(regs); - panic("Assertion '%s' failed at %s%s:%d\n", - predicate, prefix, filename, lineno); + panic("Assertion '%s' failed at %s%s:%d\n", predicate, prefix, filename, + lineno); } return -EINVAL; @@ -1319,7 +1320,7 @@ static void do_trap_brk(struct cpu_user_regs *regs, const union hsr hsr) */ BUG_ON(!hyp_mode(regs)); - switch ( hsr.brk.comment ) + switch (hsr.brk.comment) { case BRK_BUG_FRAME_IMM: if ( do_bug_frame(regs, regs->pc) ) @@ -1330,7 +1331,7 @@ static void do_trap_brk(struct cpu_user_regs *regs, const union hsr hsr) break; default: -die: + die: do_unexpected_trap("Undefined Breakpoint Value", regs); } } @@ -1341,45 +1342,44 @@ static register_t do_deprecated_hypercall(void) struct cpu_user_regs *regs = guest_cpu_user_regs(); const register_t op = #ifdef CONFIG_ARM_64 - !is_32bit_domain(current->domain) ? - regs->x16 - : + !is_32bit_domain(current->domain) ? regs->x16 : #endif - regs->r12; + regs->r12; - gdprintk(XENLOG_DEBUG, "%pv: deprecated hypercall %lu\n", - current, (unsigned long)op); + gdprintk(XENLOG_DEBUG, "%pv: deprecated hypercall %lu\n", current, + (unsigned long)op); return -ENOSYS; } -typedef register_t (*arm_hypercall_fn_t)( - register_t, register_t, register_t, register_t, register_t); +typedef register_t (*arm_hypercall_fn_t)(register_t, register_t, register_t, + register_t, register_t); -typedef struct { +typedef struct +{ arm_hypercall_fn_t fn; int nr_args; } arm_hypercall_t; -#define HYPERCALL(_name, _nr_args) \ - [ __HYPERVISOR_ ## _name ] = { \ - .fn = (arm_hypercall_fn_t) &do_ ## _name, \ - .nr_args = _nr_args, \ +#define HYPERCALL(_name, _nr_args) \ + [__HYPERVISOR_##_name] = { \ + .fn = (arm_hypercall_fn_t)&do_##_name, \ + .nr_args = _nr_args, \ } -#define HYPERCALL_ARM(_name, _nr_args) \ - [ __HYPERVISOR_ ## _name ] = { \ - .fn = (arm_hypercall_fn_t) &do_arm_ ## _name, \ - .nr_args = _nr_args, \ +#define HYPERCALL_ARM(_name, _nr_args) \ + [__HYPERVISOR_##_name] = { \ + .fn = (arm_hypercall_fn_t)&do_arm_##_name, \ + .nr_args = _nr_args, \ } /* * Only use this for hypercalls which were deprecated (i.e. replaced * by something else) before Xen on ARM was created, i.e. *not* for * hypercalls which are simply not yet used on ARM. */ -#define HYPERCALL_DEPRECATED(_name, _nr_args) \ - [ __HYPERVISOR_##_name ] = { \ - .fn = (arm_hypercall_fn_t) &do_deprecated_hypercall, \ - .nr_args = _nr_args, \ +#define HYPERCALL_DEPRECATED(_name, _nr_args) \ + [__HYPERVISOR_##_name] = { \ + .fn = (arm_hypercall_fn_t)&do_deprecated_hypercall, \ + .nr_args = _nr_args, \ } static arm_hypercall_t arm_hypercall_table[] = { @@ -1412,15 +1412,15 @@ static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code) uint32_t reg; uint32_t domid = current->domain->domain_id; - switch ( code ) + switch (code) { case 0xe0 ... 0xef: reg = code - 0xe0; - printk("DOM%d: R%d = 0x%"PRIregister" at 0x%"PRIvaddr"\n", - domid, reg, get_user_reg(regs, reg), regs->pc); + printk("DOM%d: R%d = 0x%" PRIregister " at 0x%" PRIvaddr "\n", domid, + reg, get_user_reg(regs, reg), regs->pc); break; case 0xfd: - printk("DOM%d: Reached %"PRIvaddr"\n", domid, regs->pc); + printk("DOM%d: Reached %" PRIvaddr "\n", domid, regs->pc); break; case 0xfe: printk("%c", (char)(get_user_reg(regs, 0) & 0xff)); @@ -1459,7 +1459,7 @@ static void do_trap_hypercall(struct cpu_user_regs *regs, register_t *nr, { arm_hypercall_fn_t call = NULL; - BUILD_BUG_ON(NR_hypercalls < ARRAY_SIZE(arm_hypercall_table) ); + BUILD_BUG_ON(NR_hypercalls < ARRAY_SIZE(arm_hypercall_table)); if ( hsr.iss != XEN_HYPERCALL_TAG ) { @@ -1490,14 +1490,20 @@ static void do_trap_hypercall(struct cpu_user_regs *regs, register_t *nr, if ( !current->hcall_preempted ) { /* Deliberately corrupt parameter regs used by this hypercall. */ - switch ( arm_hypercall_table[*nr].nr_args ) { - case 5: HYPERCALL_ARG5(regs) = 0xDEADBEEF; - case 4: HYPERCALL_ARG4(regs) = 0xDEADBEEF; - case 3: HYPERCALL_ARG3(regs) = 0xDEADBEEF; - case 2: HYPERCALL_ARG2(regs) = 0xDEADBEEF; + switch (arm_hypercall_table[*nr].nr_args) + { + case 5: + HYPERCALL_ARG5(regs) = 0xDEADBEEF; + case 4: + HYPERCALL_ARG4(regs) = 0xDEADBEEF; + case 3: + HYPERCALL_ARG3(regs) = 0xDEADBEEF; + case 2: + HYPERCALL_ARG2(regs) = 0xDEADBEEF; case 1: /* Don't clobber x0/r0 -- it's the return value */ break; - default: BUG(); + default: + BUG(); } *nr = 0xDEADBEEF; } @@ -1505,7 +1511,7 @@ static void do_trap_hypercall(struct cpu_user_regs *regs, register_t *nr, /* Ensure the hypercall trap instruction is re-executed. */ if ( current->hcall_preempted ) - regs->pc -= 4; /* re-execute 'hvc #XEN_HYPERCALL_TAG' */ + regs->pc -= 4; /* re-execute 'hvc #XEN_HYPERCALL_TAG' */ } static bool check_multicall_32bit_clean(struct multicall_entry *multi) @@ -1516,7 +1522,8 @@ static bool check_multicall_32bit_clean(struct multicall_entry *multi) { if ( unlikely(multi->args[i] & 0xffffffff00000000ULL) ) { - printk("%pv: multicall argument %d is not 32-bit clean %"PRIx64"\n", + printk("%pv: multicall argument %d is not 32-bit clean %" PRIx64 + "\n", current, i, multi->args[i]); domain_crash(current->domain); return false; @@ -1548,12 +1555,11 @@ enum mc_disposition arch_do_multicall_call(struct mc_state *state) !check_multicall_32bit_clean(multi) ) return mc_continue; - multi->result = call(multi->args[0], multi->args[1], - multi->args[2], multi->args[3], - multi->args[4]); + multi->result = call(multi->args[0], multi->args[1], multi->args[2], + multi->args[3], multi->args[4]); - return likely(!psr_mode_is_user(guest_cpu_user_regs())) - ? mc_continue : mc_preempt; + return likely(!psr_mode_is_user(guest_cpu_user_regs())) ? mc_continue + : mc_preempt; } /* @@ -1565,22 +1571,22 @@ enum mc_disposition arch_do_multicall_call(struct mc_state *state) * bit position in short is condition code: NZCV */ static const unsigned short cc_map[16] = { - 0xF0F0, /* EQ == Z set */ - 0x0F0F, /* NE */ - 0xCCCC, /* CS == C set */ - 0x3333, /* CC */ - 0xFF00, /* MI == N set */ - 0x00FF, /* PL */ - 0xAAAA, /* VS == V set */ - 0x5555, /* VC */ - 0x0C0C, /* HI == C set && Z clear */ - 0xF3F3, /* LS == C clear || Z set */ - 0xAA55, /* GE == (N==V) */ - 0x55AA, /* LT == (N!=V) */ - 0x0A05, /* GT == (!Z && (N==V)) */ - 0xF5FA, /* LE == (Z || (N!=V)) */ - 0xFFFF, /* AL always */ - 0 /* NV */ + 0xF0F0, /* EQ == Z set */ + 0x0F0F, /* NE */ + 0xCCCC, /* CS == C set */ + 0x3333, /* CC */ + 0xFF00, /* MI == N set */ + 0x00FF, /* PL */ + 0xAAAA, /* VS == V set */ + 0x5555, /* VC */ + 0x0C0C, /* HI == C set && Z clear */ + 0xF3F3, /* LS == C clear || Z set */ + 0xAA55, /* GE == (N==V) */ + 0x55AA, /* LT == (N!=V) */ + 0x0A05, /* GT == (!Z && (N==V)) */ + 0xF5FA, /* LE == (Z || (N!=V)) */ + 0xFFFF, /* AL always */ + 0 /* NV */ }; int check_conditional_instr(struct cpu_user_regs *regs, const union hsr hsr) @@ -1623,16 +1629,16 @@ int check_conditional_instr(struct cpu_user_regs *regs, const union hsr hsr) { unsigned long it; - BUG_ON( !psr_mode_is_32bit(regs->cpsr) || !(cpsr&PSR_THUMB) ); + BUG_ON(!psr_mode_is_32bit(regs->cpsr) || !(cpsr & PSR_THUMB)); - it = ( (cpsr >> (10-2)) & 0xfc) | ((cpsr >> 25) & 0x3 ); + it = ((cpsr >> (10 - 2)) & 0xfc) | ((cpsr >> 25) & 0x3); /* it == 0 => unconditional. */ if ( it == 0 ) return 1; /* The cond for this instruction works out as the top 4 bits. */ - cond = ( it >> 4 ); + cond = (it >> 4); } cpsr_cond = cpsr >> 28; @@ -1650,10 +1656,10 @@ void advance_pc(struct cpu_user_regs *regs, const union hsr hsr) unsigned long itbits, cond, cpsr = regs->cpsr; /* PSR_IT_MASK bits can only be set for 32-bit processors in Thumb mode. */ - BUG_ON( (!psr_mode_is_32bit(cpsr)||!(cpsr&PSR_THUMB)) - && (cpsr&PSR_IT_MASK) ); + BUG_ON((!psr_mode_is_32bit(cpsr) || !(cpsr & PSR_THUMB)) && + (cpsr & PSR_IT_MASK)); - if ( cpsr&PSR_IT_MASK ) + if ( cpsr & PSR_IT_MASK ) { /* The ITSTATE[7:0] block is contained in CPSR[15:10],CPSR[26:25] * @@ -1686,11 +1692,8 @@ void advance_pc(struct cpu_user_regs *regs, const union hsr hsr) } /* Read as zero and write ignore */ -void handle_raz_wi(struct cpu_user_regs *regs, - int regidx, - bool read, - const union hsr hsr, - int min_el) +void handle_raz_wi(struct cpu_user_regs *regs, int regidx, bool read, + const union hsr hsr, int min_el) { ASSERT((min_el == 0) || (min_el == 1)); @@ -1705,11 +1708,8 @@ void handle_raz_wi(struct cpu_user_regs *regs, } /* write only as write ignore */ -void handle_wo_wi(struct cpu_user_regs *regs, - int regidx, - bool read, - const union hsr hsr, - int min_el) +void handle_wo_wi(struct cpu_user_regs *regs, int regidx, bool read, + const union hsr hsr, int min_el) { ASSERT((min_el == 0) || (min_el == 1)); @@ -1724,12 +1724,8 @@ void handle_wo_wi(struct cpu_user_regs *regs, } /* Read only as value provided with 'val' argument of this function */ -void handle_ro_read_val(struct cpu_user_regs *regs, - int regidx, - bool read, - const union hsr hsr, - int min_el, - register_t val) +void handle_ro_read_val(struct cpu_user_regs *regs, int regidx, bool read, + const union hsr hsr, int min_el, register_t val) { ASSERT((min_el == 0) || (min_el == 1)); @@ -1745,11 +1741,8 @@ void handle_ro_read_val(struct cpu_user_regs *regs, } /* Read only as read as zero */ -inline void handle_ro_raz(struct cpu_user_regs *regs, - int regidx, - bool read, - const union hsr hsr, - int min_el) +inline void handle_ro_raz(struct cpu_user_regs *regs, int regidx, bool read, + const union hsr hsr, int min_el) { handle_ro_read_val(regs, regidx, read, hsr, min_el, 0); } @@ -1764,10 +1757,10 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr) mfn = gfn_to_mfn(d, gaddr_to_gfn(ttbr0)); - printk("dom%d VA 0x%08"PRIvaddr"\n", d->domain_id, addr); - printk(" TTBCR: 0x%08"PRIregister"\n", ttbcr); - printk(" TTBR0: 0x%016"PRIx64" = 0x%"PRIpaddr"\n", - ttbr0, mfn_to_maddr(mfn)); + printk("dom%d VA 0x%08" PRIvaddr "\n", d->domain_id, addr); + printk(" TTBCR: 0x%08" PRIregister "\n", ttbcr); + printk(" TTBR0: 0x%016" PRIx64 " = 0x%" PRIpaddr "\n", ttbr0, + mfn_to_maddr(mfn)); if ( ttbcr & TTBCR_EAE ) { @@ -1787,11 +1780,10 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr) } first = map_domain_page(mfn); - offset = addr >> (12+8); - printk("1ST[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n", - offset, mfn_to_maddr(mfn), first[offset]); - if ( !(first[offset] & 0x1) || - (first[offset] & 0x2) ) + offset = addr >> (12 + 8); + printk("1ST[0x%" PRIx32 "] (0x%" PRIpaddr ") = 0x%08" PRIx32 "\n", offset, + mfn_to_maddr(mfn), first[offset]); + if ( !(first[offset] & 0x1) || (first[offset] & 0x2) ) goto done; mfn = gfn_to_mfn(d, gaddr_to_gfn(first[offset])); @@ -1803,12 +1795,14 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr) } second = map_domain_page(mfn); offset = (addr >> 12) & 0x3FF; - printk("2ND[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n", - offset, mfn_to_maddr(mfn), second[offset]); + printk("2ND[0x%" PRIx32 "] (0x%" PRIpaddr ") = 0x%08" PRIx32 "\n", offset, + mfn_to_maddr(mfn), second[offset]); done: - if ( second ) unmap_domain_page(second); - if ( first ) unmap_domain_page(first); + if ( second ) + unmap_domain_page(second); + if ( first ) + unmap_domain_page(first); } /* @@ -1827,7 +1821,7 @@ static inline vaddr_t get_hfar(bool is_data) else gva = READ_CP32(HIFAR); #else - gva = READ_SYSREG(FAR_EL2); + gva = READ_SYSREG(FAR_EL2); #endif return gva; @@ -1934,17 +1928,16 @@ static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs, return; /* Try again */ } - switch ( fsc ) + switch (fsc) { case FSC_FLT_PERM: { - const struct npfec npfec = { - .insn_fetch = !is_data, - .read_access = is_data && !hsr.dabt.write, - .write_access = is_data && hsr.dabt.write, - .gla_valid = 1, - .kind = xabt.s1ptw ? npfec_kind_in_gpt : npfec_kind_with_gla - }; + const struct npfec npfec = {.insn_fetch = !is_data, + .read_access = is_data && !hsr.dabt.write, + .write_access = is_data && hsr.dabt.write, + .gla_valid = 1, + .kind = xabt.s1ptw ? npfec_kind_in_gpt + : npfec_kind_with_gla}; p2m_mem_access_check(gpa, gva, npfec); /* @@ -1964,7 +1957,7 @@ static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs, { enum io_state state = try_handle_mmio(regs, hsr, gpa); - switch ( state ) + switch (state) { case IO_ABORT: goto inject_abt; @@ -1981,8 +1974,7 @@ static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs, * First check if the translation fault can be resolved by the * P2M subsystem. If that's the case nothing else to do. */ - if ( p2m_resolve_translation_fault(current->domain, - gaddr_to_gfn(gpa)) ) + if ( p2m_resolve_translation_fault(current->domain, gaddr_to_gfn(gpa)) ) return; if ( is_data && try_map_mmio(gaddr_to_gfn(gpa)) ) @@ -1990,13 +1982,15 @@ static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs, break; default: - gprintk(XENLOG_WARNING, "Unsupported FSC: HSR=%#x DFSC=%#x\n", - hsr.bits, xabt.fsc); + gprintk(XENLOG_WARNING, "Unsupported FSC: HSR=%#x DFSC=%#x\n", hsr.bits, + xabt.fsc); } inject_abt: - gdprintk(XENLOG_DEBUG, "HSR=0x%x pc=%#"PRIregister" gva=%#"PRIvaddr - " gpa=%#"PRIpaddr"\n", hsr.bits, regs->pc, gva, gpa); + gdprintk(XENLOG_DEBUG, + "HSR=0x%x pc=%#" PRIregister " gva=%#" PRIvaddr " gpa=%#" PRIpaddr + "\n", + hsr.bits, regs->pc, gva, gpa); if ( is_data ) inject_dabt_exception(regs, gva, hsr.len); else @@ -2009,7 +2003,7 @@ static inline bool needs_ssbd_flip(struct vcpu *v) return false; return !(v->arch.cpu_info->flags & CPUINFO_WORKAROUND_2_FLAG) && - cpu_require_ssbd_mitigation(); + cpu_require_ssbd_mitigation(); } static void enter_hypervisor_head(struct cpu_user_regs *regs) @@ -2049,11 +2043,11 @@ static void enter_hypervisor_head(struct cpu_user_regs *regs) void do_trap_guest_sync(struct cpu_user_regs *regs) { - const union hsr hsr = { .bits = regs->hsr }; + const union hsr hsr = {.bits = regs->hsr}; enter_hypervisor_head(regs); - switch ( hsr.ec ) + switch (hsr.ec) { case HSR_EC_WFI_WFE: /* @@ -2067,11 +2061,14 @@ void do_trap_guest_sync(struct cpu_user_regs *regs) advance_pc(regs, hsr); return; } - if ( hsr.wfi_wfe.ti ) { + if ( hsr.wfi_wfe.ti ) + { /* Yield the VCPU for WFE */ perfc_incr(trap_wfe); vcpu_yield(); - } else { + } + else + { /* Block the VCPU for WFI */ perfc_incr(trap_wfi); vcpu_block_unless_event_pending(current); @@ -2176,7 +2173,8 @@ void do_trap_guest_sync(struct cpu_user_regs *regs) default: gprintk(XENLOG_WARNING, - "Unknown Guest Trap. HSR=0x%x EC=0x%x IL=%x Syndrome=0x%"PRIx32"\n", + "Unknown Guest Trap. HSR=0x%x EC=0x%x IL=%x Syndrome=0x%" PRIx32 + "\n", hsr.bits, hsr.ec, hsr.len, hsr.iss); inject_undef_exception(regs, hsr); } @@ -2184,11 +2182,11 @@ void do_trap_guest_sync(struct cpu_user_regs *regs) void do_trap_hyp_sync(struct cpu_user_regs *regs) { - const union hsr hsr = { .bits = regs->hsr }; + const union hsr hsr = {.bits = regs->hsr}; enter_hypervisor_head(regs); - switch ( hsr.ec ) + switch (hsr.ec) { #ifdef CONFIG_ARM_64 case HSR_EC_BRK: @@ -2216,7 +2214,8 @@ void do_trap_hyp_sync(struct cpu_user_regs *regs) break; } default: - printk("Hypervisor Trap. HSR=0x%x EC=0x%x IL=%x Syndrome=0x%"PRIx32"\n", + printk("Hypervisor Trap. HSR=0x%x EC=0x%x IL=%x Syndrome=0x%" PRIx32 + "\n", hsr.bits, hsr.ec, hsr.len, hsr.iss); do_unexpected_trap("Hypervisor", regs); } diff --git a/xen/arch/arm/vcpreg.c b/xen/arch/arm/vcpreg.c index cdc91cdf5b..222d6f39d6 100644 --- a/xen/arch/arm/vcpreg.c +++ b/xen/arch/arm/vcpreg.c @@ -48,67 +48,67 @@ */ /* The name is passed from the upper macro to workaround macro expansion. */ -#define TVM_REG(sz, func, reg...) \ -static bool func(struct cpu_user_regs *regs, uint##sz##_t *r, bool read) \ -{ \ - struct vcpu *v = current; \ - bool cache_enabled = vcpu_has_cache_enabled(v); \ - \ - GUEST_BUG_ON(read); \ - WRITE_SYSREG##sz(*r, reg); \ - \ - p2m_toggle_cache(v, cache_enabled); \ - \ - return true; \ -} +#define TVM_REG(sz, func, reg...) \ + static bool func(struct cpu_user_regs *regs, uint##sz##_t *r, bool read) \ + { \ + struct vcpu *v = current; \ + bool cache_enabled = vcpu_has_cache_enabled(v); \ + \ + GUEST_BUG_ON(read); \ + WRITE_SYSREG##sz(*r, reg); \ + \ + p2m_toggle_cache(v, cache_enabled); \ + \ + return true; \ + } #define TVM_REG32(regname, xreg) TVM_REG(32, vreg_emulate_##regname, xreg) #define TVM_REG64(regname, xreg) TVM_REG(64, vreg_emulate_##regname, xreg) #ifdef CONFIG_ARM_32 -#define TVM_REG32_COMBINED(lowreg, hireg, xreg) \ - /* Use TVM_REG directly to workaround macro expansion. */ \ - TVM_REG(32, vreg_emulate_##lowreg, lowreg) \ +#define TVM_REG32_COMBINED(lowreg, hireg, xreg) \ + /* Use TVM_REG directly to workaround macro expansion. */ \ + TVM_REG(32, vreg_emulate_##lowreg, lowreg) \ TVM_REG(32, vreg_emulate_##hireg, hireg) #else /* CONFIG_ARM_64 */ -#define TVM_REG32_COMBINED(lowreg, hireg, xreg) \ -static bool vreg_emulate_##xreg(struct cpu_user_regs *regs, uint32_t *r, \ - bool read, bool hi) \ -{ \ - struct vcpu *v = current; \ - bool cache_enabled = vcpu_has_cache_enabled(v); \ - register_t reg = READ_SYSREG(xreg); \ - \ - GUEST_BUG_ON(read); \ - if ( hi ) /* reg[63:32] is AArch32 register hireg */ \ - { \ - reg &= GENMASK(31, 0); \ - reg |= ((uint64_t)*r) << 32; \ - } \ - else /* reg[31:0] is AArch32 register lowreg. */ \ - { \ - reg &= GENMASK(63, 32); \ - reg |= *r; \ - } \ - WRITE_SYSREG(reg, xreg); \ - \ - p2m_toggle_cache(v, cache_enabled); \ - \ - return true; \ -} \ - \ -static bool vreg_emulate_##lowreg(struct cpu_user_regs *regs, uint32_t *r, \ - bool read) \ -{ \ - return vreg_emulate_##xreg(regs, r, read, false); \ -} \ - \ -static bool vreg_emulate_##hireg(struct cpu_user_regs *regs, uint32_t *r, \ - bool read) \ -{ \ - return vreg_emulate_##xreg(regs, r, read, true); \ -} +#define TVM_REG32_COMBINED(lowreg, hireg, xreg) \ + static bool vreg_emulate_##xreg(struct cpu_user_regs *regs, uint32_t *r, \ + bool read, bool hi) \ + { \ + struct vcpu *v = current; \ + bool cache_enabled = vcpu_has_cache_enabled(v); \ + register_t reg = READ_SYSREG(xreg); \ + \ + GUEST_BUG_ON(read); \ + if ( hi ) /* reg[63:32] is AArch32 register hireg */ \ + { \ + reg &= GENMASK(31, 0); \ + reg |= ((uint64_t)*r) << 32; \ + } \ + else /* reg[31:0] is AArch32 register lowreg. */ \ + { \ + reg &= GENMASK(63, 32); \ + reg |= *r; \ + } \ + WRITE_SYSREG(reg, xreg); \ + \ + p2m_toggle_cache(v, cache_enabled); \ + \ + return true; \ + } \ + \ + static bool vreg_emulate_##lowreg(struct cpu_user_regs *regs, uint32_t *r, \ + bool read) \ + { \ + return vreg_emulate_##xreg(regs, r, read, false); \ + } \ + \ + static bool vreg_emulate_##hireg(struct cpu_user_regs *regs, uint32_t *r, \ + bool read) \ + { \ + return vreg_emulate_##xreg(regs, r, read, true); \ + } #endif /* Defining helpers for emulating co-processor registers. */ @@ -145,14 +145,14 @@ TVM_REG32_COMBINED(AMAIR0, AMAIR1, AMAIR_EL1) TVM_REG32(CONTEXTIDR, CONTEXTIDR_EL1) /* Macro to generate easily case for co-processor emulation. */ -#define GENERATE_CASE(reg, sz) \ - case HSR_CPREG##sz(reg): \ - { \ - bool res; \ - \ - res = vreg_emulate_cp##sz(regs, hsr, vreg_emulate_##reg); \ - ASSERT(res); \ - break; \ +#define GENERATE_CASE(reg, sz) \ + case HSR_CPREG##sz(reg): \ + { \ + bool res; \ + \ + res = vreg_emulate_cp##sz(regs, hsr, vreg_emulate_##reg); \ + ASSERT(res); \ + break; \ } void do_cp15_32(struct cpu_user_regs *regs, const union hsr hsr) @@ -167,7 +167,7 @@ void do_cp15_32(struct cpu_user_regs *regs, const union hsr hsr) return; } - switch ( hsr.bits & HSR_CP32_REGS_MASK ) + switch (hsr.bits & HSR_CP32_REGS_MASK) { /* * !CNTHCTL_EL2.EL1PCEN / !CNTHCTL.PL1PCEN @@ -207,30 +207,30 @@ void do_cp15_32(struct cpu_user_regs *regs, const union hsr hsr) p2m_set_way_flush(current); break; - /* - * HCR_EL2.TVM - * - * ARMv8 (DDI 0487D.a): Table D1-38 - */ - GENERATE_CASE(SCTLR, 32) - GENERATE_CASE(TTBR0_32, 32) - GENERATE_CASE(TTBR1_32, 32) - GENERATE_CASE(TTBCR, 32) - GENERATE_CASE(TTBCR2, 32) - GENERATE_CASE(DACR, 32) - GENERATE_CASE(DFSR, 32) - GENERATE_CASE(IFSR, 32) - GENERATE_CASE(DFAR, 32) - GENERATE_CASE(IFAR, 32) - GENERATE_CASE(ADFSR, 32) - GENERATE_CASE(AIFSR, 32) - /* AKA PRRR */ - GENERATE_CASE(MAIR0, 32) - /* AKA NMRR */ - GENERATE_CASE(MAIR1, 32) - GENERATE_CASE(AMAIR0, 32) - GENERATE_CASE(AMAIR1, 32) - GENERATE_CASE(CONTEXTIDR, 32) + /* + * HCR_EL2.TVM + * + * ARMv8 (DDI 0487D.a): Table D1-38 + */ + GENERATE_CASE(SCTLR, 32) + GENERATE_CASE(TTBR0_32, 32) + GENERATE_CASE(TTBR1_32, 32) + GENERATE_CASE(TTBCR, 32) + GENERATE_CASE(TTBCR2, 32) + GENERATE_CASE(DACR, 32) + GENERATE_CASE(DFSR, 32) + GENERATE_CASE(IFSR, 32) + GENERATE_CASE(DFAR, 32) + GENERATE_CASE(IFAR, 32) + GENERATE_CASE(ADFSR, 32) + GENERATE_CASE(AIFSR, 32) + /* AKA PRRR */ + GENERATE_CASE(MAIR0, 32) + /* AKA NMRR */ + GENERATE_CASE(MAIR1, 32) + GENERATE_CASE(AMAIR0, 32) + GENERATE_CASE(AMAIR1, 32) + GENERATE_CASE(CONTEXTIDR, 32) /* * MDCR_EL2.TPM @@ -317,9 +317,9 @@ void do_cp15_32(struct cpu_user_regs *regs, const union hsr hsr) */ default: gdprintk(XENLOG_ERR, - "%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n", - cp32.read ? "mrc" : "mcr", - cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc); + "%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%" PRIregister "\n", + cp32.read ? "mrc" : "mcr", cp32.op1, cp32.reg, cp32.crn, + cp32.crm, cp32.op2, regs->pc); gdprintk(XENLOG_ERR, "unhandled 32-bit CP15 access %#x\n", hsr.bits & HSR_CP32_REGS_MASK); inject_undef_exception(regs, hsr); @@ -336,7 +336,7 @@ void do_cp15_64(struct cpu_user_regs *regs, const union hsr hsr) return; } - switch ( hsr.bits & HSR_CP64_REGS_MASK ) + switch (hsr.bits & HSR_CP64_REGS_MASK) { /* * !CNTHCTL_EL2.EL1PCEN / !CNTHCTL.PL1PCEN @@ -361,8 +361,8 @@ void do_cp15_64(struct cpu_user_regs *regs, const union hsr hsr) return inject_undef_exception(regs, hsr); break; - GENERATE_CASE(TTBR0, 64) - GENERATE_CASE(TTBR1, 64) + GENERATE_CASE(TTBR0, 64) + GENERATE_CASE(TTBR1, 64) /* * CPTR_EL2.T{0..9,12..13} @@ -382,18 +382,18 @@ void do_cp15_64(struct cpu_user_regs *regs, const union hsr hsr) * And all other unknown registers. */ default: - { - const struct hsr_cp64 cp64 = hsr.cp64; - - gdprintk(XENLOG_ERR, - "%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n", - cp64.read ? "mrrc" : "mcrr", - cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc); - gdprintk(XENLOG_ERR, "unhandled 64-bit CP15 access %#x\n", - hsr.bits & HSR_CP64_REGS_MASK); - inject_undef_exception(regs, hsr); - return; - } + { + const struct hsr_cp64 cp64 = hsr.cp64; + + gdprintk(XENLOG_ERR, + "%s p15, %d, r%d, r%d, cr%d @ 0x%" PRIregister "\n", + cp64.read ? "mrrc" : "mcrr", cp64.op1, cp64.reg1, cp64.reg2, + cp64.crm, regs->pc); + gdprintk(XENLOG_ERR, "unhandled 64-bit CP15 access %#x\n", + hsr.bits & HSR_CP64_REGS_MASK); + inject_undef_exception(regs, hsr); + return; + } } advance_pc(regs, hsr); } @@ -409,7 +409,7 @@ void do_cp14_32(struct cpu_user_regs *regs, const union hsr hsr) return; } - switch ( hsr.bits & HSR_CP32_REGS_MASK ) + switch (hsr.bits & HSR_CP32_REGS_MASK) { /* * MDCR_EL2.TDOSA @@ -467,7 +467,7 @@ void do_cp14_32(struct cpu_user_regs *regs, const union hsr hsr) */ val = (1 << 24) | (5 << 16); val |= ((current_cpu_data.midr.bits >> 20) & 0xf) | - (current_cpu_data.midr.bits & 0xf); + (current_cpu_data.midr.bits & 0xf); set_user_reg(regs, regidx, val); break; @@ -517,9 +517,9 @@ void do_cp14_32(struct cpu_user_regs *regs, const union hsr hsr) */ default: gdprintk(XENLOG_ERR, - "%s p14, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n", - cp32.read ? "mrc" : "mcr", - cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc); + "%s p14, %d, r%d, cr%d, cr%d, %d @ 0x%" PRIregister "\n", + cp32.read ? "mrc" : "mcr", cp32.op1, cp32.reg, cp32.crn, + cp32.crm, cp32.op2, regs->pc); gdprintk(XENLOG_ERR, "unhandled 32-bit cp14 access %#x\n", hsr.bits & HSR_CP32_REGS_MASK); inject_undef_exception(regs, hsr); @@ -558,10 +558,9 @@ void do_cp14_64(struct cpu_user_regs *regs, const union hsr hsr) * * And all other unknown registers. */ - gdprintk(XENLOG_ERR, - "%s p14, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n", - cp64.read ? "mrrc" : "mcrr", - cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc); + gdprintk(XENLOG_ERR, "%s p14, %d, r%d, r%d, cr%d @ 0x%" PRIregister "\n", + cp64.read ? "mrrc" : "mcrr", cp64.op1, cp64.reg1, cp64.reg2, + cp64.crm, regs->pc); gdprintk(XENLOG_ERR, "unhandled 64-bit CP14 access %#x\n", hsr.bits & HSR_CP64_REGS_MASK); inject_undef_exception(regs, hsr); @@ -589,10 +588,9 @@ void do_cp14_dbg(struct cpu_user_regs *regs, const union hsr hsr) * * And all other unknown registers. */ - gdprintk(XENLOG_ERR, - "%s p14, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n", - cp64.read ? "mrrc" : "mcrr", - cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc); + gdprintk(XENLOG_ERR, "%s p14, %d, r%d, r%d, cr%d @ 0x%" PRIregister "\n", + cp64.read ? "mrrc" : "mcrr", cp64.op1, cp64.reg1, cp64.reg2, + cp64.crm, regs->pc); gdprintk(XENLOG_ERR, "unhandled 64-bit CP14 DBG access %#x\n", hsr.bits & HSR_CP64_REGS_MASK); diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c index 64b141fea5..13a970ea59 100644 --- a/xen/arch/arm/vgic-v2.c +++ b/xen/arch/arm/vgic-v2.c @@ -33,7 +33,8 @@ #include #include -static struct { +static struct +{ bool enabled; /* Distributor interface address */ paddr_t dbase; @@ -58,8 +59,8 @@ void vgic_v2_setup_hw(paddr_t dbase, paddr_t cbase, paddr_t csize, vgic_v2_hw.aliased_offset = aliased_offset; } -#define NR_TARGETS_PER_ITARGETSR 4U -#define NR_BITS_PER_TARGET (32U / NR_TARGETS_PER_ITARGETSR) +#define NR_TARGETS_PER_ITARGETSR 4U +#define NR_BITS_PER_TARGET (32U / NR_TARGETS_PER_ITARGETSR) /* * Fetch an ITARGETSR register based on the offset from ITARGETSR0. Only @@ -79,7 +80,8 @@ static uint32_t vgic_fetch_itargetsr(struct vgic_irq_rank *rank, offset &= ~(NR_TARGETS_PER_ITARGETSR - 1); for ( i = 0; i < NR_TARGETS_PER_ITARGETSR; i++, offset++ ) - reg |= (1 << read_atomic(&rank->vcpu[offset])) << (i * NR_BITS_PER_TARGET); + reg |= (1 << read_atomic(&rank->vcpu[offset])) + << (i * NR_BITS_PER_TARGET); return reg; } @@ -121,7 +123,7 @@ static void vgic_store_itargetsr(struct domain *d, struct vgic_irq_rank *rank, * Don't need to mask as we rely on new_mask to fit for only one * target. */ - BUILD_BUG_ON((sizeof (new_mask) * 8) != NR_BITS_PER_TARGET); + BUILD_BUG_ON((sizeof(new_mask) * 8) != NR_BITS_PER_TARGET); new_mask = itargetsr >> (i * NR_BITS_PER_TARGET); @@ -144,8 +146,9 @@ static void vgic_store_itargetsr(struct domain *d, struct vgic_irq_rank *rank, if ( !new_target || (new_target > d->max_vcpus) ) { gprintk(XENLOG_WARNING, - "No valid vCPU found for vIRQ%u in the target list (%#x). Skip it\n", - virq, new_mask); + "No valid vCPU found for vIRQ%u in the target list (%#x). " + "Skip it\n", + virq, new_mask); continue; } @@ -157,9 +160,8 @@ static void vgic_store_itargetsr(struct domain *d, struct vgic_irq_rank *rank, /* Only migrate the vIRQ if the target vCPU has changed */ if ( new_target != old_target ) { - if ( vgic_migrate_irq(d->vcpu[old_target], - d->vcpu[new_target], - virq) ) + if ( vgic_migrate_irq(d->vcpu[old_target], d->vcpu[new_target], + virq) ) write_atomic(&rank->vcpu[offset], new_target); } } @@ -175,10 +177,11 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, perfc_incr(vgicd_reads); - switch ( gicd_reg ) + switch (gicd_reg) { case VREG32(GICD_CTLR): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; vgic_lock(v); *r = vreg_reg32_extract(v->domain->arch.vgic.ctlr, info); vgic_unlock(v); @@ -188,11 +191,12 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, { uint32_t typer; - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; /* No secure world support for guests. */ vgic_lock(v); - typer = ((v->domain->max_vcpus - 1) << GICD_TYPE_CPUS_SHIFT) - | DIV_ROUND_UP(v->domain->arch.vgic.nr_spis, 32); + typer = ((v->domain->max_vcpus - 1) << GICD_TYPE_CPUS_SHIFT) | + DIV_ROUND_UP(v->domain->arch.vgic.nr_spis, 32); vgic_unlock(v); *r = vreg_reg32_extract(typer, info); @@ -201,7 +205,8 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, } case VREG32(GICD_IIDR): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; /* * XXX Do we need a JEP106 manufacturer ID? * Just use the physical h/w value for now @@ -223,18 +228,22 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, goto read_as_zero_32; case VRANGE32(GICD_ISENABLER, GICD_ISENABLERN): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 1, gicd_reg - GICD_ISENABLER, DABT_WORD); - if ( rank == NULL) goto read_as_zero; + if ( rank == NULL ) + goto read_as_zero; vgic_lock_rank(v, rank, flags); *r = vreg_reg32_extract(rank->ienable, info); vgic_unlock_rank(v, rank, flags); return 1; case VRANGE32(GICD_ICENABLER, GICD_ICENABLERN): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 1, gicd_reg - GICD_ICENABLER, DABT_WORD); - if ( rank == NULL) goto read_as_zero; + if ( rank == NULL ) + goto read_as_zero; vgic_lock_rank(v, rank, flags); *r = vreg_reg32_extract(rank->ienable, info); vgic_unlock_rank(v, rank, flags); @@ -255,9 +264,11 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, uint32_t ipriorityr; uint8_t rank_index; - if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 8, gicd_reg - GICD_IPRIORITYR, DABT_WORD); - if ( rank == NULL ) goto read_as_zero; + if ( rank == NULL ) + goto read_as_zero; rank_index = REG_RANK_INDEX(8, gicd_reg - GICD_IPRIORITYR, DABT_WORD); vgic_lock_rank(v, rank, flags); @@ -275,9 +286,11 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, { uint32_t itargetsr; - if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 8, gicd_reg - GICD_ITARGETSR, DABT_WORD); - if ( rank == NULL) goto read_as_zero; + if ( rank == NULL ) + goto read_as_zero; vgic_lock_rank(v, rank, flags); itargetsr = vgic_fetch_itargetsr(rank, gicd_reg - GICD_ITARGETSR); vgic_unlock_rank(v, rank, flags); @@ -293,9 +306,11 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, { uint32_t icfgr; - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 2, gicd_reg - GICD_ICFGR, DABT_WORD); - if ( rank == NULL) goto read_as_zero; + if ( rank == NULL ) + goto read_as_zero; vgic_lock_rank(v, rank, flags); icfgr = rank->icfg[REG_RANK_INDEX(2, gicd_reg - GICD_ICFGR, DABT_WORD)]; vgic_unlock_rank(v, rank, flags); @@ -313,7 +328,8 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, goto read_as_zero_32; case VREG32(GICD_SGIR): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; /* Write only -- read unknown */ *r = 0xdeadbeef; return 1; @@ -333,7 +349,8 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, goto read_impl_defined; case VREG32(GICD_ICPIDR2): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; printk(XENLOG_G_ERR "%pv: vGICD: unhandled read from ICPIDR2\n", v); return 0; @@ -341,18 +358,19 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, goto read_impl_defined; default: - printk(XENLOG_G_ERR "%pv: vGICD: unhandled read r%d offset %#08x\n", - v, dabt.reg, gicd_reg); + printk(XENLOG_G_ERR "%pv: vGICD: unhandled read r%d offset %#08x\n", v, + dabt.reg, gicd_reg); return 0; } bad_width: - printk(XENLOG_G_ERR "%pv: vGICD: bad read width %d r%d offset %#08x\n", - v, dabt.size, dabt.reg, gicd_reg); + printk(XENLOG_G_ERR "%pv: vGICD: bad read width %d r%d offset %#08x\n", v, + dabt.size, dabt.reg, gicd_reg); return 0; read_as_zero_32: - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; read_as_zero: *r = 0; return 1; @@ -365,8 +383,7 @@ read_impl_defined: return 1; read_reserved: - printk(XENLOG_G_DEBUG - "%pv: vGICD: RAZ on reserved register offset %#08x\n", + printk(XENLOG_G_DEBUG "%pv: vGICD: RAZ on reserved register offset %#08x\n", v, gicd_reg); *r = 0; return 1; @@ -374,7 +391,6 @@ read_reserved: static bool vgic_v2_to_sgi(struct vcpu *v, register_t sgir) { - int virq; int irqmode; enum gic_sgi_mode sgi_mode; @@ -385,7 +401,7 @@ static bool vgic_v2_to_sgi(struct vcpu *v, register_t sgir) virq = (sgir & GICD_SGI_INTID_MASK); /* Map GIC sgi value to enum value */ - switch ( irqmode ) + switch (irqmode) { case GICD_SGI_TARGET_LIST_VAL: target.list = (sgir & GICD_SGI_TARGET_MASK) >> GICD_SGI_TARGET_SHIFT; @@ -399,7 +415,8 @@ static bool vgic_v2_to_sgi(struct vcpu *v, register_t sgir) break; default: printk(XENLOG_G_DEBUG - "%pv: vGICD: unhandled GICD_SGIR write %"PRIregister" with wrong mode\n", + "%pv: vGICD: unhandled GICD_SGIR write %" PRIregister + " with wrong mode\n", v, sgir); return false; } @@ -418,10 +435,11 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, perfc_incr(vgicd_writes); - switch ( gicd_reg ) + switch (gicd_reg) { case VREG32(GICD_CTLR): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; /* Ignore all but the enable bit */ vgic_lock(v); vreg_reg32_update(&v->domain->arch.vgic.ctlr, r, info); @@ -449,9 +467,11 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, goto write_ignore_32; case VRANGE32(GICD_ISENABLER, GICD_ISENABLERN): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 1, gicd_reg - GICD_ISENABLER, DABT_WORD); - if ( rank == NULL) goto write_ignore; + if ( rank == NULL ) + goto write_ignore; vgic_lock_rank(v, rank, flags); tr = rank->ienable; vreg_reg32_setbits(&rank->ienable, r, info); @@ -460,9 +480,11 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, return 1; case VRANGE32(GICD_ICENABLER, GICD_ICENABLERN): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 1, gicd_reg - GICD_ICENABLER, DABT_WORD); - if ( rank == NULL) goto write_ignore; + if ( rank == NULL ) + goto write_ignore; vgic_lock_rank(v, rank, flags); tr = rank->ienable; vreg_reg32_clearbits(&rank->ienable, r, info); @@ -471,31 +493,34 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, return 1; case VRANGE32(GICD_ISPENDR, GICD_ISPENDRN): - if ( dabt.size != DABT_WORD ) goto bad_width; - printk(XENLOG_G_ERR - "%pv: vGICD: unhandled word write %#"PRIregister" to ISPENDR%d\n", + if ( dabt.size != DABT_WORD ) + goto bad_width; + printk(XENLOG_G_ERR "%pv: vGICD: unhandled word write %#" PRIregister + " to ISPENDR%d\n", v, r, gicd_reg - GICD_ISPENDR); return 0; case VRANGE32(GICD_ICPENDR, GICD_ICPENDRN): - if ( dabt.size != DABT_WORD ) goto bad_width; - printk(XENLOG_G_ERR - "%pv: vGICD: unhandled word write %#"PRIregister" to ICPENDR%d\n", + if ( dabt.size != DABT_WORD ) + goto bad_width; + printk(XENLOG_G_ERR "%pv: vGICD: unhandled word write %#" PRIregister + " to ICPENDR%d\n", v, r, gicd_reg - GICD_ICPENDR); return 0; case VRANGE32(GICD_ISACTIVER, GICD_ISACTIVERN): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; if ( r == 0 ) goto write_ignore_32; - printk(XENLOG_G_ERR - "%pv: vGICD: unhandled word write %#"PRIregister" to ISACTIVER%d\n", + printk(XENLOG_G_ERR "%pv: vGICD: unhandled word write %#" PRIregister + " to ISACTIVER%d\n", v, r, gicd_reg - GICD_ISACTIVER); return 0; case VRANGE32(GICD_ICACTIVER, GICD_ICACTIVERN): - printk(XENLOG_G_ERR - "%pv: vGICD: unhandled word write %#"PRIregister" to ICACTIVER%d\n", + printk(XENLOG_G_ERR "%pv: vGICD: unhandled word write %#" PRIregister + " to ICACTIVER%d\n", v, r, gicd_reg - GICD_ICACTIVER); goto write_ignore_32; @@ -503,13 +528,14 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, { uint32_t *ipriorityr, priority; - if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 8, gicd_reg - GICD_IPRIORITYR, DABT_WORD); - if ( rank == NULL) goto write_ignore; + if ( rank == NULL ) + goto write_ignore; vgic_lock_rank(v, rank, flags); - ipriorityr = &rank->ipriorityr[REG_RANK_INDEX(8, - gicd_reg - GICD_IPRIORITYR, - DABT_WORD)]; + ipriorityr = &rank->ipriorityr[REG_RANK_INDEX( + 8, gicd_reg - GICD_IPRIORITYR, DABT_WORD)]; priority = ACCESS_ONCE(*ipriorityr); vreg_reg32_update(&priority, r, info); ACCESS_ONCE(*ipriorityr) = priority; @@ -529,9 +555,11 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, { uint32_t itargetsr; - if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 8, gicd_reg - GICD_ITARGETSR, DABT_WORD); - if ( rank == NULL) goto write_ignore; + if ( rank == NULL ) + goto write_ignore; vgic_lock_rank(v, rank, flags); itargetsr = vgic_fetch_itargetsr(rank, gicd_reg - GICD_ITARGETSR); vreg_reg32_update(&itargetsr, r, info); @@ -552,13 +580,15 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, goto write_ignore_32; case VRANGE32(GICD_ICFGR2, GICD_ICFGRN): /* SPIs */ - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 2, gicd_reg - GICD_ICFGR, DABT_WORD); - if ( rank == NULL) goto write_ignore; + if ( rank == NULL ) + goto write_ignore; vgic_lock_rank(v, rank, flags); - vreg_reg32_update(&rank->icfg[REG_RANK_INDEX(2, gicd_reg - GICD_ICFGR, - DABT_WORD)], - r, info); + vreg_reg32_update( + &rank->icfg[REG_RANK_INDEX(2, gicd_reg - GICD_ICFGR, DABT_WORD)], r, + info); vgic_unlock_rank(v, rank, flags); return 1; @@ -570,23 +600,26 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, goto write_ignore_32; case VREG32(GICD_SGIR): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; return vgic_v2_to_sgi(v, r); case VRANGE32(0xF04, 0xF0C): goto write_reserved; case VRANGE32(GICD_CPENDSGIR, GICD_CPENDSGIRN): - if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; - printk(XENLOG_G_ERR - "%pv: vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n", + if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) + goto bad_width; + printk(XENLOG_G_ERR "%pv: vGICD: unhandled %s write %#" PRIregister + " to ICPENDSGIR%d\n", v, dabt.size ? "word" : "byte", r, gicd_reg - GICD_CPENDSGIR); return 0; case VRANGE32(GICD_SPENDSGIR, GICD_SPENDSGIRN): - if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; - printk(XENLOG_G_ERR - "%pv: vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n", + if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) + goto bad_width; + printk(XENLOG_G_ERR "%pv: vGICD: unhandled %s write %#" PRIregister + " to ISPENDSGIR%d\n", v, dabt.size ? "word" : "byte", r, gicd_reg - GICD_SPENDSGIR); return 0; @@ -605,20 +638,21 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, /* Implementation defined identification registers */ default: - printk(XENLOG_G_ERR - "%pv: vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n", + printk(XENLOG_G_ERR "%pv: vGICD: unhandled write r%d=%" PRIregister + " offset %#08x\n", v, dabt.reg, r, gicd_reg); return 0; } bad_width: - printk(XENLOG_G_ERR - "%pv: vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n", + printk(XENLOG_G_ERR "%pv: vGICD: bad write width %d r%d=%" PRIregister + " offset %#08x\n", v, dabt.size, dabt.reg, r, gicd_reg); return 0; write_ignore_32: - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; write_ignore: return 1; @@ -636,7 +670,7 @@ write_reserved: } static const struct mmio_handler_ops vgic_v2_distr_mmio_handler = { - .read = vgic_v2_distr_mmio_read, + .read = vgic_v2_distr_mmio_read, .write = vgic_v2_distr_mmio_write, }; @@ -720,7 +754,7 @@ static int vgic_v2_lpi_get_priority(struct domain *d, unsigned int vlpi) } static const struct vgic_ops vgic_v2_ops = { - .vcpu_init = vgic_v2_vcpu_init, + .vcpu_init = vgic_v2_vcpu_init, .domain_init = vgic_v2_domain_init, .domain_free = vgic_v2_domain_free, .lpi_to_pending = vgic_v2_lpi_to_pending, @@ -731,8 +765,7 @@ int vgic_v2_init(struct domain *d, int *mmio_count) { if ( !vgic_v2_hw.enabled ) { - printk(XENLOG_G_ERR - "d%d: vGICv2 is not supported on this platform.\n", + printk(XENLOG_G_ERR "d%d: vGICv2 is not supported on this platform.\n", d->domain_id); return -ENODEV; } diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c index 5b73c4ecd7..b5e7245ced 100644 --- a/xen/arch/arm/vgic-v3-its.c +++ b/xen/arch/arm/vgic-v3-its.c @@ -52,19 +52,20 @@ * If both the vcmd_lock and the its_lock are required, the vcmd_lock must * be taken first. */ -struct virt_its { +struct virt_its +{ struct domain *d; struct list_head vits_list; paddr_t doorbell_address; unsigned int devid_bits; unsigned int evid_bits; - spinlock_t vcmd_lock; /* Protects the virtual command buffer, which */ - uint64_t cwriter; /* consists of CWRITER and CREADR and those */ - uint64_t creadr; /* shadow variables cwriter and creadr. */ + spinlock_t vcmd_lock; /* Protects the virtual command buffer, which */ + uint64_t cwriter; /* consists of CWRITER and CREADR and those */ + uint64_t creadr; /* shadow variables cwriter and creadr. */ /* Protects the rest of this structure, including the ITS tables. */ spinlock_t its_lock; uint64_t cbaser; - uint64_t baser_dev, baser_coll; /* BASER0 and BASER1 for the guest */ + uint64_t baser_dev, baser_coll; /* BASER0 and BASER1 for the guest */ unsigned int max_collections; unsigned int max_devices; /* changing "enabled" requires to hold *both* the vcmd_lock and its_lock */ @@ -87,7 +88,7 @@ struct vits_itte * Each entry just contains the VCPU ID of the respective vCPU. */ typedef uint16_t coll_table_entry_t; -#define UNMAPPED_COLLECTION ((coll_table_entry_t)~0) +#define UNMAPPED_COLLECTION ((coll_table_entry_t)~0) /* * Our device table encodings: @@ -96,13 +97,13 @@ typedef uint16_t coll_table_entry_t; * in the lowest 5 bits of the word. */ typedef uint64_t dev_table_entry_t; -#define DEV_TABLE_ITT_ADDR(x) ((x) & GENMASK(51, 8)) -#define DEV_TABLE_ITT_SIZE(x) (BIT(((x) & GENMASK(4, 0)) + 1)) -#define DEV_TABLE_ENTRY(addr, bits) \ - (((addr) & GENMASK(51, 8)) | (((bits) - 1) & GENMASK(4, 0))) +#define DEV_TABLE_ITT_ADDR(x) ((x)&GENMASK(51, 8)) +#define DEV_TABLE_ITT_SIZE(x) (BIT(((x)&GENMASK(4, 0)) + 1)) +#define DEV_TABLE_ENTRY(addr, bits) \ + (((addr)&GENMASK(51, 8)) | (((bits)-1) & GENMASK(4, 0))) -#define GITS_BASER_RO_MASK (GITS_BASER_TYPE_MASK | \ - (0x1fL << GITS_BASER_ENTRY_SIZE_SHIFT)) +#define GITS_BASER_RO_MASK \ + (GITS_BASER_TYPE_MASK | (0x1fL << GITS_BASER_ENTRY_SIZE_SHIFT)) /* * The physical address is encoded slightly differently depending on @@ -112,8 +113,7 @@ typedef uint64_t dev_table_entry_t; static paddr_t get_baser_phys_addr(uint64_t reg) { if ( reg & BIT(9) ) - return (reg & GENMASK(47, 16)) | - ((reg & GENMASK(15, 12)) << 36); + return (reg & GENMASK(47, 16)) | ((reg & GENMASK(15, 12)) << 36); else return reg & GENMASK(47, 12); } @@ -132,9 +132,9 @@ static int its_set_collection(struct virt_its *its, uint16_t collid, if ( collid >= its->max_collections ) return -ENOENT; - return access_guest_memory_by_ipa(its->d, - addr + collid * sizeof(coll_table_entry_t), - &vcpu_id, sizeof(vcpu_id), true); + return access_guest_memory_by_ipa( + its->d, addr + collid * sizeof(coll_table_entry_t), &vcpu_id, + sizeof(vcpu_id), true); } /* Must be called with the ITS lock held. */ @@ -150,9 +150,9 @@ static struct vcpu *get_vcpu_from_collection(struct virt_its *its, if ( collid >= its->max_collections ) return NULL; - ret = access_guest_memory_by_ipa(its->d, - addr + collid * sizeof(coll_table_entry_t), - &vcpu_id, sizeof(coll_table_entry_t), false); + ret = access_guest_memory_by_ipa( + its->d, addr + collid * sizeof(coll_table_entry_t), &vcpu_id, + sizeof(coll_table_entry_t), false); if ( ret ) return NULL; @@ -200,8 +200,8 @@ static int its_get_itt(struct virt_its *its, uint32_t devid, * a device ID and return the address of the ITTE belonging to the event ID * (which is an index into that table). */ -static paddr_t its_get_itte_address(struct virt_its *its, - uint32_t devid, uint32_t evid) +static paddr_t its_get_itte_address(struct virt_its *its, uint32_t devid, + uint32_t evid) { dev_table_entry_t itt; int ret; @@ -256,8 +256,8 @@ static bool read_itte(struct virt_its *its, uint32_t devid, uint32_t evid, * If vcpu_ptr is provided, returns the VCPU belonging to that collection. * Must be called with the ITS lock held. */ -static bool write_itte(struct virt_its *its, uint32_t devid, - uint32_t evid, uint32_t collid, uint32_t vlpi) +static bool write_itte(struct virt_its *its, uint32_t devid, uint32_t evid, + uint32_t collid, uint32_t vlpi) { paddr_t addr; struct vits_itte itte; @@ -287,15 +287,15 @@ static uint64_t its_cmd_mask_field(uint64_t *its_cmd, unsigned int word, return (its_cmd[word] >> shift) & GENMASK(size - 1, 0); } -#define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8) -#define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32) -#define its_cmd_get_size(cmd) its_cmd_mask_field(cmd, 1, 0, 5) -#define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32) -#define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32) -#define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16) -#define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32) -#define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1) -#define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8) +#define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8) +#define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32) +#define its_cmd_get_size(cmd) its_cmd_mask_field(cmd, 1, 0, 5) +#define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32) +#define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32) +#define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16) +#define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32) +#define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1) +#define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8) static int its_handle_int(struct virt_its *its, uint64_t *cmdptr) { @@ -360,8 +360,8 @@ static int its_handle_clear(struct virt_its *its, uint64_t *cmdptr) if ( !read_itte(its, devid, eventid, &vcpu, &vlpi) ) goto out_unlock; - p = gicv3_its_get_event_pending_irq(its->d, its->doorbell_address, - devid, eventid); + p = gicv3_its_get_event_pending_irq(its->d, its->doorbell_address, devid, + eventid); /* Protect against an invalid LPI number. */ if ( unlikely(!p) ) goto out_unlock; @@ -416,8 +416,8 @@ static int update_lpi_property(struct domain *d, struct pending_irq *p) addr = d->arch.vgic.rdist_propbase & GENMASK(51, 12); - ret = access_guest_memory_by_ipa(d, addr + p->irq - LPI_OFFSET, - &property, sizeof(property), false); + ret = access_guest_memory_by_ipa(d, addr + p->irq - LPI_OFFSET, &property, + sizeof(property), false); if ( ret ) return ret; @@ -482,8 +482,8 @@ static int its_handle_inv(struct virt_its *its, uint64_t *cmdptr) if ( vlpi == INVALID_LPI ) goto out_unlock_its; - p = gicv3_its_get_event_pending_irq(d, its->doorbell_address, - devid, eventid); + p = gicv3_its_get_event_pending_irq(d, its->doorbell_address, devid, + eventid); if ( unlikely(!p) ) goto out_unlock_its; @@ -517,7 +517,7 @@ static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr) uint32_t collid = its_cmd_get_collection(cmdptr); struct vcpu *vcpu; struct pending_irq *pirqs[16]; - uint64_t vlpi = 0; /* 64-bit to catch overflows */ + uint64_t vlpi = 0; /* 64-bit to catch overflows */ unsigned int nr_lpis, i; unsigned long flags; int ret = 0; @@ -547,13 +547,12 @@ static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr) spin_lock_irqsave(&vcpu->arch.vgic.lock, flags); read_lock(&its->d->arch.vgic.pend_lpi_tree_lock); - do - { + do { int err; - nr_lpis = radix_tree_gang_lookup(&its->d->arch.vgic.pend_lpi_tree, - (void **)pirqs, vlpi, - ARRAY_SIZE(pirqs)); + nr_lpis = + radix_tree_gang_lookup(&its->d->arch.vgic.pend_lpi_tree, + (void **)pirqs, vlpi, ARRAY_SIZE(pirqs)); for ( i = 0; i < nr_lpis; i++ ) { @@ -569,11 +568,11 @@ static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr) else ret = err; } - /* - * Loop over the next gang of pending_irqs until we reached the end of - * a (fully populated) tree or the lookup function returns less LPIs than - * it has been asked for. - */ + /* + * Loop over the next gang of pending_irqs until we reached the end of + * a (fully populated) tree or the lookup function returns less LPIs + * than it has been asked for. + */ } while ( (++vlpi < its->d->arch.vgic.nr_lpis) && (nr_lpis == ARRAY_SIZE(pirqs)) ); @@ -584,8 +583,8 @@ static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr) } /* Must be called with the ITS lock held. */ -static int its_discard_event(struct virt_its *its, - uint32_t vdevid, uint32_t vevid) +static int its_discard_event(struct virt_its *its, uint32_t vdevid, + uint32_t vevid) { struct pending_irq *p; unsigned long flags; @@ -626,8 +625,8 @@ static int its_discard_event(struct virt_its *its, spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags); /* Remove the corresponding host LPI entry */ - return gicv3_remove_guest_event(its->d, its->doorbell_address, - vdevid, vevid); + return gicv3_remove_guest_event(its->d, its->doorbell_address, vdevid, + vevid); } static void its_unmap_device(struct virt_its *its, uint32_t devid) @@ -683,7 +682,6 @@ static int its_handle_mapd(struct virt_its *its, uint64_t *cmdptr) */ if ( is_hardware_domain(its->d) ) { - /* * Dom0's ITSes are mapped 1:1, so both addresses are the same. * Also the device IDs are equal. @@ -755,8 +753,8 @@ static int its_handle_mapti(struct virt_its *its, uint64_t *cmdptr) * determined by the same device ID and event ID on the host side. * This returns us the corresponding, still unused pending_irq. */ - pirq = gicv3_assign_guest_event(its->d, its->doorbell_address, - devid, eventid, intid); + pirq = gicv3_assign_guest_event(its->d, its->doorbell_address, devid, + eventid, intid); if ( !pirq ) goto out_remove_mapping; @@ -830,8 +828,8 @@ static int its_handle_movi(struct virt_its *its, uint64_t *cmdptr) if ( !nvcpu ) goto out_unlock; - p = gicv3_its_get_event_pending_irq(its->d, its->doorbell_address, - devid, eventid); + p = gicv3_its_get_event_pending_irq(its->d, its->doorbell_address, devid, + eventid); if ( unlikely(!p) ) goto out_unlock; @@ -892,14 +890,14 @@ out_unlock: return ret; } -#define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12) -#define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5)) +#define ITS_CMD_BUFFER_SIZE(baser) ((((baser)&0xff) + 1) << 12) +#define ITS_CMD_OFFSET(reg) ((reg)&GENMASK(19, 5)) static void dump_its_command(uint64_t *command) { gdprintk(XENLOG_WARNING, " cmd 0x%02lx: %016lx %016lx %016lx %016lx\n", - its_cmd_get_command(command), - command[0], command[1], command[2], command[3]); + its_cmd_get_command(command), command[0], command[1], command[2], + command[3]); } /* @@ -921,12 +919,12 @@ static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its) { int ret; - ret = access_guest_memory_by_ipa(d, addr + its->creadr, - command, sizeof(command), false); + ret = access_guest_memory_by_ipa(d, addr + its->creadr, command, + sizeof(command), false); if ( ret ) return ret; - switch ( its_cmd_get_command(command) ) + switch (its_cmd_get_command(command)) { case GITS_CMD_CLEAR: ret = its_handle_clear(its, command); @@ -969,7 +967,7 @@ static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its) } write_u64_atomic(&its->creadr, (its->creadr + ITS_CMD_SIZE) % - ITS_CMD_BUFFER_SIZE(its->cbaser)); + ITS_CMD_BUFFER_SIZE(its->cbaser)); if ( ret ) { @@ -988,7 +986,7 @@ static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its) *****************************/ /* Identifying as an ARM IP, using "X" as the product ID. */ -#define GITS_IIDR_VALUE 0x5800034c +#define GITS_IIDR_VALUE 0x5800034c static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, register_t *r, void *priv) @@ -996,7 +994,7 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, struct virt_its *its = priv; uint64_t reg; - switch ( info->gpa & 0xffff ) + switch (info->gpa & 0xffff) { case VREG32(GITS_CTLR): { @@ -1006,7 +1004,8 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, */ bool have_cmd_lock; - if ( info->dabt.size != DABT_WORD ) goto bad_width; + if ( info->dabt.size != DABT_WORD ) + goto bad_width; have_cmd_lock = spin_trylock(&its->vcmd_lock); reg = its->enabled ? GITS_CTLR_ENABLE : 0; @@ -1022,12 +1021,14 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, } case VREG32(GITS_IIDR): - if ( info->dabt.size != DABT_WORD ) goto bad_width; + if ( info->dabt.size != DABT_WORD ) + goto bad_width; *r = vreg_reg32_extract(GITS_IIDR_VALUE, info); break; case VREG64(GITS_TYPER): - if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + if ( !vgic_reg64_check_access(info->dabt) ) + goto bad_width; reg = GITS_TYPER_PHYSICAL; reg |= (sizeof(struct vits_itte) - 1) << GITS_TYPER_ITT_SIZE_SHIFT; @@ -1044,14 +1045,16 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, goto read_reserved; case VREG64(GITS_CBASER): - if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + if ( !vgic_reg64_check_access(info->dabt) ) + goto bad_width; spin_lock(&its->its_lock); *r = vreg_reg64_extract(its->cbaser, info); spin_unlock(&its->its_lock); break; case VREG64(GITS_CWRITER): - if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + if ( !vgic_reg64_check_access(info->dabt) ) + goto bad_width; /* CWRITER is only written by the guest, so no extra locking here. */ reg = its->cwriter; @@ -1059,7 +1062,8 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, break; case VREG64(GITS_CREADR): - if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + if ( !vgic_reg64_check_access(info->dabt) ) + goto bad_width; /* * Lockless access, to avoid waiting for the whole command queue to be @@ -1074,15 +1078,17 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, case VRANGE64(0x0098, 0x00F8): goto read_reserved; - case VREG64(GITS_BASER0): /* device table */ - if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + case VREG64(GITS_BASER0): /* device table */ + if ( !vgic_reg64_check_access(info->dabt) ) + goto bad_width; spin_lock(&its->its_lock); *r = vreg_reg64_extract(its->baser_dev, info); spin_unlock(&its->its_lock); break; - case VREG64(GITS_BASER1): /* collection table */ - if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + case VREG64(GITS_BASER1): /* collection table */ + if ( !vgic_reg64_check_access(info->dabt) ) + goto bad_width; spin_lock(&its->its_lock); *r = vreg_reg64_extract(its->baser_coll, info); spin_unlock(&its->its_lock); @@ -1098,7 +1104,8 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, goto read_impl_defined; case VREG32(GITS_PIDR2): - if ( info->dabt.size != DABT_WORD ) goto bad_width; + if ( info->dabt.size != DABT_WORD ) + goto bad_width; *r = vreg_reg32_extract(GIC_PIDR2_ARCH_GICv3, info); break; @@ -1106,16 +1113,16 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, goto read_impl_defined; default: - printk(XENLOG_G_ERR - "%pv: vGITS: unhandled read r%d offset %#04lx\n", - v, info->dabt.reg, (unsigned long)info->gpa & 0xffff); + printk(XENLOG_G_ERR "%pv: vGITS: unhandled read r%d offset %#04lx\n", v, + info->dabt.reg, (unsigned long)info->gpa & 0xffff); return 0; } return 1; read_as_zero_64: - if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + if ( !vgic_reg64_check_access(info->dabt) ) + goto bad_width; *r = 0; return 1; @@ -1173,8 +1180,10 @@ static bool vgic_v3_verify_its_status(struct virt_its *its, bool status) !(its->baser_dev & GITS_VALID_BIT) || !(its->baser_coll & GITS_VALID_BIT) ) { - printk(XENLOG_G_WARNING "d%d tried to enable ITS without having the tables configured.\n", - its->d->domain_id); + printk( + XENLOG_G_WARNING + "d%d tried to enable ITS without having the tables configured.\n", + its->d->domain_id); return false; } @@ -1198,7 +1207,7 @@ static void sanitize_its_base_reg(uint64_t *reg) uint64_t r = *reg; /* Avoid outer shareable. */ - switch ( (r >> GITS_BASER_SHAREABILITY_SHIFT) & 0x03 ) + switch ((r >> GITS_BASER_SHAREABILITY_SHIFT) & 0x03) { case GIC_BASER_OuterShareable: r &= ~GITS_BASER_SHAREABILITY_MASK; @@ -1209,7 +1218,7 @@ static void sanitize_its_base_reg(uint64_t *reg) } /* Avoid any inner non-cacheable mapping. */ - switch ( (r >> GITS_BASER_INNER_CACHEABILITY_SHIFT) & 0x07 ) + switch ((r >> GITS_BASER_INNER_CACHEABILITY_SHIFT) & 0x07) { case GIC_BASER_CACHE_nCnB: case GIC_BASER_CACHE_nC: @@ -1221,7 +1230,7 @@ static void sanitize_its_base_reg(uint64_t *reg) } /* Only allow non-cacheable or same-as-inner. */ - switch ( (r >> GITS_BASER_OUTER_CACHEABILITY_SHIFT) & 0x07 ) + switch ((r >> GITS_BASER_OUTER_CACHEABILITY_SHIFT) & 0x07) { case GIC_BASER_CACHE_SameAsInner: case GIC_BASER_CACHE_nC: @@ -1243,13 +1252,14 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, uint64_t reg; uint32_t reg32; - switch ( info->gpa & 0xffff ) + switch (info->gpa & 0xffff) { case VREG32(GITS_CTLR): { uint32_t ctlr; - if ( info->dabt.size != DABT_WORD ) goto bad_width; + if ( info->dabt.size != DABT_WORD ) + goto bad_width; /* * We need to take the vcmd_lock to prevent a guest from disabling @@ -1262,8 +1272,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, vreg_reg32_update(®32, r, info); if ( ctlr ^ reg32 ) - its->enabled = vgic_v3_verify_its_status(its, - reg32 & GITS_CTLR_ENABLE); + its->enabled = + vgic_v3_verify_its_status(its, reg32 & GITS_CTLR_ENABLE); spin_unlock(&its->its_lock); spin_unlock(&its->vcmd_lock); return 1; @@ -1283,7 +1293,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, goto write_reserved; case VREG64(GITS_CBASER): - if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + if ( !vgic_reg64_check_access(info->dabt) ) + goto bad_width; spin_lock(&its->its_lock); /* Changing base registers with the ITS enabled is UNPREDICTABLE. */ @@ -1306,7 +1317,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, return 1; case VREG64(GITS_CWRITER): - if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + if ( !vgic_reg64_check_access(info->dabt) ) + goto bad_width; spin_lock(&its->vcmd_lock); reg = ITS_CMD_OFFSET(its->cwriter); @@ -1327,8 +1339,9 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, case VRANGE32(0x0098, 0x00FC): goto write_reserved; - case VREG64(GITS_BASER0): /* device table */ - if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + case VREG64(GITS_BASER0): /* device table */ + if ( !vgic_reg64_check_access(info->dabt) ) + goto bad_width; spin_lock(&its->its_lock); @@ -1339,7 +1352,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, if ( its->enabled ) { spin_unlock(&its->its_lock); - gdprintk(XENLOG_WARNING, "vGITS: tried to change BASER with the ITS enabled.\n"); + gdprintk(XENLOG_WARNING, + "vGITS: tried to change BASER with the ITS enabled.\n"); return 1; } @@ -1366,8 +1380,9 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, spin_unlock(&its->its_lock); return 1; - case VREG64(GITS_BASER1): /* collection table */ - if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + case VREG64(GITS_BASER1): /* collection table */ + if ( !vgic_reg64_check_access(info->dabt) ) + goto bad_width; spin_lock(&its->its_lock); /* @@ -1377,7 +1392,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, if ( its->enabled ) { spin_unlock(&its->its_lock); - gdprintk(XENLOG_INFO, "vGITS: tried to change BASER with the ITS enabled.\n"); + gdprintk(XENLOG_INFO, + "vGITS: tried to change BASER with the ITS enabled.\n"); return 1; } @@ -1404,18 +1420,17 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, goto write_reserved; case VRANGE32(0xC000, 0xFFCC): goto write_impl_defined; - case VRANGE32(0xFFD0, 0xFFE4): /* IMPDEF identification registers */ + case VRANGE32(0xFFD0, 0xFFE4): /* IMPDEF identification registers */ goto write_impl_defined; case VREG32(GITS_PIDR2): goto write_ignore_32; - case VRANGE32(0xFFEC, 0xFFFC): /* IMPDEF identification registers */ + case VRANGE32(0xFFEC, 0xFFFC): /* IMPDEF identification registers */ goto write_impl_defined; default: - printk(XENLOG_G_ERR - "%pv: vGITS: unhandled write r%d offset %#04lx\n", + printk(XENLOG_G_ERR "%pv: vGITS: unhandled write r%d offset %#04lx\n", v, info->dabt.reg, (unsigned long)info->gpa & 0xffff); return 0; } @@ -1423,11 +1438,13 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, return 1; write_ignore_64: - if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; + if ( !vgic_reg64_check_access(info->dabt) ) + goto bad_width; return 1; write_ignore_32: - if ( info->dabt.size != DABT_WORD ) goto bad_width; + if ( info->dabt.size != DABT_WORD ) + goto bad_width; return 1; write_impl_defined: @@ -1450,7 +1467,7 @@ bad_width: } static const struct mmio_handler_ops vgic_its_mmio_handler = { - .read = vgic_v3_its_mmio_read, + .read = vgic_v3_its_mmio_read, .write = vgic_v3_its_mmio_write, }; @@ -1465,19 +1482,20 @@ static int vgic_v3_its_init_virtual(struct domain *d, paddr_t guest_addr, if ( !its ) return -ENOMEM; - base_attr = GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT; - base_attr |= GIC_BASER_CACHE_SameAsInner << GITS_BASER_OUTER_CACHEABILITY_SHIFT; + base_attr = GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT; + base_attr |= GIC_BASER_CACHE_SameAsInner + << GITS_BASER_OUTER_CACHEABILITY_SHIFT; base_attr |= GIC_BASER_CACHE_RaWaWb << GITS_BASER_INNER_CACHEABILITY_SHIFT; - its->cbaser = base_attr; - base_attr |= 0ULL << GITS_BASER_PAGE_SIZE_SHIFT; /* 4K pages */ + its->cbaser = base_attr; + base_attr |= 0ULL << GITS_BASER_PAGE_SIZE_SHIFT; /* 4K pages */ its->baser_dev = GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT; - its->baser_dev |= (sizeof(dev_table_entry_t) - 1) << - GITS_BASER_ENTRY_SIZE_SHIFT; + its->baser_dev |= (sizeof(dev_table_entry_t) - 1) + << GITS_BASER_ENTRY_SIZE_SHIFT; its->baser_dev |= base_attr; - its->baser_coll = GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT; - its->baser_coll |= (sizeof(coll_table_entry_t) - 1) << - GITS_BASER_ENTRY_SIZE_SHIFT; + its->baser_coll = GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT; + its->baser_coll |= (sizeof(coll_table_entry_t) - 1) + << GITS_BASER_ENTRY_SIZE_SHIFT; its->baser_coll |= base_attr; its->d = d; its->doorbell_address = guest_addr + ITS_DOORBELL_OFFSET; @@ -1503,7 +1521,7 @@ unsigned int vgic_v3_its_count(const struct domain *d) if ( !is_hardware_domain(d) ) return 0; - list_for_each_entry(hw_its, &host_its_list, entry) + list_for_each_entry (hw_its, &host_its_list, entry) ret++; return ret; @@ -1525,15 +1543,14 @@ int vgic_v3_its_init_domain(struct domain *d) { struct host_its *hw_its; - list_for_each_entry(hw_its, &host_its_list, entry) + list_for_each_entry (hw_its, &host_its_list, entry) { /* * For each host ITS create a virtual ITS using the same * base and thus doorbell address. * Use the same number of device ID and event ID bits as the host. */ - ret = vgic_v3_its_init_virtual(d, hw_its->addr, - hw_its->devid_bits, + ret = vgic_v3_its_init_virtual(d, hw_its->addr, hw_its->devid_bits, hw_its->evid_bits); if ( ret ) return ret; @@ -1553,7 +1570,7 @@ void vgic_v3_its_free_domain(struct domain *d) if ( list_head_is_null(&d->arch.vgic.vits_list) ) return; - list_for_each_entry_safe( pos, temp, &d->arch.vgic.vits_list, vits_list ) + list_for_each_entry_safe(pos, temp, &d->arch.vgic.vits_list, vits_list) { list_del(&pos->vits_list); xfree(pos); diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c index 474be134c1..6d10663b45 100644 --- a/xen/arch/arm/vgic-v3.c +++ b/xen/arch/arm/vgic-v3.c @@ -42,27 +42,27 @@ * We don't emulate a specific registers scheme so implement the others * bits as RES0 as recommended by the spec (see 8.1.13 in ARM IHI 0069A). */ -#define GICV3_GICD_PIDR2 0x30 -#define GICV3_GICR_PIDR2 GICV3_GICD_PIDR2 +#define GICV3_GICD_PIDR2 0x30 +#define GICV3_GICR_PIDR2 GICV3_GICD_PIDR2 /* * GICD_CTLR default value: * - No GICv2 compatibility => ARE = 1 */ -#define VGICD_CTLR_DEFAULT (GICD_CTLR_ARE_NS) +#define VGICD_CTLR_DEFAULT (GICD_CTLR_ARE_NS) -static struct { +static struct +{ bool enabled; /* Distributor interface address */ paddr_t dbase; /* Re-distributor regions */ unsigned int nr_rdist_regions; const struct rdist_region *regions; - unsigned int intid_bits; /* Number of interrupt ID bits */ + unsigned int intid_bits; /* Number of interrupt ID bits */ } vgic_v3_hw; -void vgic_v3_setup_hw(paddr_t dbase, - unsigned int nr_rdist_regions, +void vgic_v3_setup_hw(paddr_t dbase, unsigned int nr_rdist_regions, const struct rdist_region *regions, unsigned int intid_bits) { @@ -161,12 +161,11 @@ static void vgic_store_irouter(struct domain *d, struct vgic_irq_rank *rank, } static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, - uint32_t gicr_reg, - register_t *r) + uint32_t gicr_reg, register_t *r) { struct hsr_dabt dabt = info->dabt; - switch ( gicr_reg ) + switch (gicr_reg) { case VREG32(GICR_CTLR): { @@ -174,7 +173,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, if ( !v->domain->arch.vgic.has_its ) goto read_as_zero_32; - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; spin_lock_irqsave(&v->arch.vgic.lock, flags); *r = vreg_reg32_extract(!!(v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED), @@ -184,7 +184,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, } case VREG32(GICR_IIDR): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; *r = vreg_reg32_extract(GICV3_GICR_IIDR_VAL, info); return 1; @@ -192,7 +193,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, { uint64_t typer, aff; - if ( !vgic_reg64_check_access(dabt) ) goto bad_width; + if ( !vgic_reg64_check_access(dabt) ) + goto bad_width; aff = (MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 3) << 56 | MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 2) << 48 | MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 1) << 40 | @@ -240,7 +242,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, case VREG64(GICR_PROPBASER): if ( !v->domain->arch.vgic.has_its ) goto read_as_zero_64; - if ( !vgic_reg64_check_access(dabt) ) goto bad_width; + if ( !vgic_reg64_check_access(dabt) ) + goto bad_width; vgic_lock(v); *r = vreg_reg64_extract(v->domain->arch.vgic.rdist_propbase, info); @@ -253,11 +256,12 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, if ( !v->domain->arch.vgic.has_its ) goto read_as_zero_64; - if ( !vgic_reg64_check_access(dabt) ) goto bad_width; + if ( !vgic_reg64_check_access(dabt) ) + goto bad_width; spin_lock_irqsave(&v->arch.vgic.lock, flags); *r = vreg_reg64_extract(v->arch.vgic.rdist_pendbase, info); - *r &= ~GICR_PENDBASER_PTZ; /* WO, reads as 0 */ + *r &= ~GICR_PENDBASER_PTZ; /* WO, reads as 0 */ spin_unlock_irqrestore(&v->arch.vgic.lock, flags); return 1; } @@ -280,7 +284,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, goto read_reserved; case VREG32(GICR_SYNCR): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; /* RO . But when read it always returns busy bito bit[0] */ *r = vreg_reg32_extract(GICR_SYNCR_NOT_BUSY, info); return 1; @@ -305,35 +310,37 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, case 0xFFD0 ... 0xFFE4: /* Implementation defined identification registers */ - goto read_impl_defined; + goto read_impl_defined; case VREG32(GICR_PIDR2): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; *r = vreg_reg32_extract(GICV3_GICR_PIDR2, info); - return 1; + return 1; case 0xFFEC ... 0xFFFC: - /* Implementation defined identification registers */ - goto read_impl_defined; + /* Implementation defined identification registers */ + goto read_impl_defined; default: - printk(XENLOG_G_ERR - "%pv: vGICR: unhandled read r%d offset %#08x\n", - v, dabt.reg, gicr_reg); + printk(XENLOG_G_ERR "%pv: vGICR: unhandled read r%d offset %#08x\n", v, + dabt.reg, gicr_reg); return 0; } bad_width: - printk(XENLOG_G_ERR "%pv vGICR: bad read width %d r%d offset %#08x\n", - v, dabt.size, dabt.reg, gicr_reg); + printk(XENLOG_G_ERR "%pv vGICR: bad read width %d r%d offset %#08x\n", v, + dabt.size, dabt.reg, gicr_reg); return 0; read_as_zero_64: - if ( !vgic_reg64_check_access(dabt) ) goto bad_width; + if ( !vgic_reg64_check_access(dabt) ) + goto bad_width; *r = 0; return 1; read_as_zero_32: - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; *r = 0; return 1; @@ -345,8 +352,7 @@ read_impl_defined: return 1; read_reserved: - printk(XENLOG_G_DEBUG - "%pv: vGICR: RAZ on reserved register offset %#08x\n", + printk(XENLOG_G_DEBUG "%pv: vGICR: RAZ on reserved register offset %#08x\n", v, gicr_reg); *r = 0; return 1; @@ -370,7 +376,7 @@ static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask, /* We want to avoid outer shareable. */ static uint64_t vgic_sanitise_shareability(uint64_t field) { - switch ( field ) + switch (field) { case GIC_BASER_OuterShareable: return GIC_BASER_InnerShareable; @@ -382,7 +388,7 @@ static uint64_t vgic_sanitise_shareability(uint64_t field) /* Avoid any inner non-cacheable mapping. */ static uint64_t vgic_sanitise_inner_cacheability(uint64_t field) { - switch ( field ) + switch (field) { case GIC_BASER_CACHE_nCnB: case GIC_BASER_CACHE_nC: @@ -395,7 +401,7 @@ static uint64_t vgic_sanitise_inner_cacheability(uint64_t field) /* Non-cacheable or same-as-inner are OK. */ static uint64_t vgic_sanitise_outer_cacheability(uint64_t field) { - switch ( field ) + switch (field) { case GIC_BASER_CACHE_SameAsInner: case GIC_BASER_CACHE_nC: @@ -474,13 +480,12 @@ static void vgic_vcpu_enable_lpis(struct vcpu *v) } static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, - uint32_t gicr_reg, - register_t r) + uint32_t gicr_reg, register_t r) { struct hsr_dabt dabt = info->dabt; uint64_t reg; - switch ( gicr_reg ) + switch (gicr_reg) { case VREG32(GICR_CTLR): { @@ -488,9 +493,10 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, if ( !v->domain->arch.vgic.has_its ) goto write_ignore_32; - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; - vgic_lock(v); /* protects rdists_enabled */ + vgic_lock(v); /* protects rdists_enabled */ spin_lock_irqsave(&v->arch.vgic.lock, flags); /* LPIs can only be enabled once, but never disabled again. */ @@ -540,7 +546,8 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, case VREG64(GICR_PROPBASER): if ( !v->domain->arch.vgic.has_its ) goto write_ignore_64; - if ( !vgic_reg64_check_access(dabt) ) goto bad_width; + if ( !vgic_reg64_check_access(dabt) ) + goto bad_width; vgic_lock(v); @@ -566,7 +573,8 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, if ( !v->domain->arch.vgic.has_its ) goto write_ignore_64; - if ( !vgic_reg64_check_access(dabt) ) goto bad_width; + if ( !vgic_reg64_check_access(dabt) ) + goto bad_width; spin_lock_irqsave(&v->arch.vgic.lock, flags); @@ -625,33 +633,35 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, case 0xFFD0 ... 0xFFE4: /* Implementation defined identification registers */ - goto write_impl_defined; + goto write_impl_defined; case VREG32(GICR_PIDR2): /* RO */ goto write_ignore_32; case 0xFFEC ... 0xFFFC: - /* Implementation defined identification registers */ - goto write_impl_defined; + /* Implementation defined identification registers */ + goto write_impl_defined; default: - printk(XENLOG_G_ERR "%pv: vGICR: unhandled write r%d offset %#08x\n", - v, dabt.reg, gicr_reg); + printk(XENLOG_G_ERR "%pv: vGICR: unhandled write r%d offset %#08x\n", v, + dabt.reg, gicr_reg); return 0; } bad_width: - printk(XENLOG_G_ERR - "%pv: vGICR: bad write width %d r%d=%"PRIregister" offset %#08x\n", - v, dabt.size, dabt.reg, r, gicr_reg); + printk(XENLOG_G_ERR "%pv: vGICR: bad write width %d r%d=%" PRIregister + " offset %#08x\n", + v, dabt.size, dabt.reg, r, gicr_reg); return 0; write_ignore_64: - if ( vgic_reg64_check_access(dabt) ) goto bad_width; + if ( vgic_reg64_check_access(dabt) ) + goto bad_width; return 1; write_ignore_32: - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; return 1; write_impl_defined: @@ -661,8 +671,7 @@ write_impl_defined: return 1; write_reserved: - printk(XENLOG_G_DEBUG - "%pv: vGICR: WI on reserved register offset %#08x\n", + printk(XENLOG_G_DEBUG "%pv: vGICR: WI on reserved register offset %#08x\n", v, gicr_reg); return 1; } @@ -675,26 +684,31 @@ static int __vgic_v3_distr_common_mmio_read(const char *name, struct vcpu *v, struct vgic_irq_rank *rank; unsigned long flags; - switch ( reg ) + switch (reg) { case VRANGE32(GICD_IGROUPR, GICD_IGROUPRN): /* We do not implement security extensions for guests, read zero */ - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; goto read_as_zero; case VRANGE32(GICD_ISENABLER, GICD_ISENABLERN): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 1, reg - GICD_ISENABLER, DABT_WORD); - if ( rank == NULL ) goto read_as_zero; + if ( rank == NULL ) + goto read_as_zero; vgic_lock_rank(v, rank, flags); *r = vreg_reg32_extract(rank->ienable, info); vgic_unlock_rank(v, rank, flags); return 1; case VRANGE32(GICD_ICENABLER, GICD_ICENABLERN): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 1, reg - GICD_ICENABLER, DABT_WORD); - if ( rank == NULL ) goto read_as_zero; + if ( rank == NULL ) + goto read_as_zero; vgic_lock_rank(v, rank, flags); *r = vreg_reg32_extract(rank->ienable, info); vgic_unlock_rank(v, rank, flags); @@ -715,9 +729,11 @@ static int __vgic_v3_distr_common_mmio_read(const char *name, struct vcpu *v, uint32_t ipriorityr; uint8_t rank_index; - if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 8, reg - GICD_IPRIORITYR, DABT_WORD); - if ( rank == NULL ) goto read_as_zero; + if ( rank == NULL ) + goto read_as_zero; rank_index = REG_RANK_INDEX(8, reg - GICD_IPRIORITYR, DABT_WORD); vgic_lock_rank(v, rank, flags); @@ -733,9 +749,11 @@ static int __vgic_v3_distr_common_mmio_read(const char *name, struct vcpu *v, { uint32_t icfgr; - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 2, reg - GICD_ICFGR, DABT_WORD); - if ( rank == NULL ) goto read_as_zero; + if ( rank == NULL ) + goto read_as_zero; vgic_lock_rank(v, rank, flags); icfgr = rank->icfg[REG_RANK_INDEX(2, reg - GICD_ICFGR, DABT_WORD)]; vgic_unlock_rank(v, rank, flags); @@ -746,15 +764,14 @@ static int __vgic_v3_distr_common_mmio_read(const char *name, struct vcpu *v, } default: - printk(XENLOG_G_ERR - "%pv: %s: unhandled read r%d offset %#08x\n", - v, name, dabt.reg, reg); + printk(XENLOG_G_ERR "%pv: %s: unhandled read r%d offset %#08x\n", v, + name, dabt.reg, reg); return 0; } bad_width: - printk(XENLOG_G_ERR "%pv: %s: bad read width %d r%d offset %#08x\n", - v, name, dabt.size, dabt.reg, reg); + printk(XENLOG_G_ERR "%pv: %s: bad read width %d r%d offset %#08x\n", v, + name, dabt.size, dabt.reg, reg); return 0; read_as_zero: @@ -771,16 +788,18 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v, uint32_t tr; unsigned long flags; - switch ( reg ) + switch (reg) { case VRANGE32(GICD_IGROUPR, GICD_IGROUPRN): /* We do not implement security extensions for guests, write ignore */ goto write_ignore_32; case VRANGE32(GICD_ISENABLER, GICD_ISENABLERN): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 1, reg - GICD_ISENABLER, DABT_WORD); - if ( rank == NULL ) goto write_ignore; + if ( rank == NULL ) + goto write_ignore; vgic_lock_rank(v, rank, flags); tr = rank->ienable; vreg_reg32_setbits(&rank->ienable, r, info); @@ -789,9 +808,11 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v, return 1; case VRANGE32(GICD_ICENABLER, GICD_ICENABLERN): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 1, reg - GICD_ICENABLER, DABT_WORD); - if ( rank == NULL ) goto write_ignore; + if ( rank == NULL ) + goto write_ignore; vgic_lock_rank(v, rank, flags); tr = rank->ienable; vreg_reg32_clearbits(&rank->ienable, r, info); @@ -800,29 +821,32 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v, return 1; case VRANGE32(GICD_ISPENDR, GICD_ISPENDRN): - if ( dabt.size != DABT_WORD ) goto bad_width; - printk(XENLOG_G_ERR - "%pv: %s: unhandled word write %#"PRIregister" to ISPENDR%d\n", + if ( dabt.size != DABT_WORD ) + goto bad_width; + printk(XENLOG_G_ERR "%pv: %s: unhandled word write %#" PRIregister + " to ISPENDR%d\n", v, name, r, reg - GICD_ISPENDR); return 0; case VRANGE32(GICD_ICPENDR, GICD_ICPENDRN): - if ( dabt.size != DABT_WORD ) goto bad_width; - printk(XENLOG_G_ERR - "%pv: %s: unhandled word write %#"PRIregister" to ICPENDR%d\n", + if ( dabt.size != DABT_WORD ) + goto bad_width; + printk(XENLOG_G_ERR "%pv: %s: unhandled word write %#" PRIregister + " to ICPENDR%d\n", v, name, r, reg - GICD_ICPENDR); return 0; case VRANGE32(GICD_ISACTIVER, GICD_ISACTIVERN): - if ( dabt.size != DABT_WORD ) goto bad_width; - printk(XENLOG_G_ERR - "%pv: %s: unhandled word write %#"PRIregister" to ISACTIVER%d\n", + if ( dabt.size != DABT_WORD ) + goto bad_width; + printk(XENLOG_G_ERR "%pv: %s: unhandled word write %#" PRIregister + " to ISACTIVER%d\n", v, name, r, reg - GICD_ISACTIVER); return 0; case VRANGE32(GICD_ICACTIVER, GICD_ICACTIVERN): - printk(XENLOG_G_ERR - "%pv: %s: unhandled word write %#"PRIregister" to ICACTIVER%d\n", + printk(XENLOG_G_ERR "%pv: %s: unhandled word write %#" PRIregister + " to ICACTIVER%d\n", v, name, r, reg - GICD_ICACTIVER); goto write_ignore_32; @@ -830,9 +854,11 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v, { uint32_t *ipriorityr, priority; - if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 8, reg - GICD_IPRIORITYR, DABT_WORD); - if ( rank == NULL ) goto write_ignore; + if ( rank == NULL ) + goto write_ignore; vgic_lock_rank(v, rank, flags); ipriorityr = &rank->ipriorityr[REG_RANK_INDEX(8, reg - GICD_IPRIORITYR, DABT_WORD)]; @@ -849,31 +875,34 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v, case VRANGE32(GICD_ICFGR + 4, GICD_ICFGRN): /* PPI + SPIs */ /* ICFGR1 for PPI's, which is implementation defined if ICFGR1 is programmable or not. We chose to program */ - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; rank = vgic_rank_offset(v, 2, reg - GICD_ICFGR, DABT_WORD); - if ( rank == NULL ) goto write_ignore; + if ( rank == NULL ) + goto write_ignore; vgic_lock_rank(v, rank, flags); - vreg_reg32_update(&rank->icfg[REG_RANK_INDEX(2, reg - GICD_ICFGR, - DABT_WORD)], - r, info); + vreg_reg32_update( + &rank->icfg[REG_RANK_INDEX(2, reg - GICD_ICFGR, DABT_WORD)], r, + info); vgic_unlock_rank(v, rank, flags); return 1; default: - printk(XENLOG_G_ERR - "%pv: %s: unhandled write r%d=%"PRIregister" offset %#08x\n", + printk(XENLOG_G_ERR "%pv: %s: unhandled write r%d=%" PRIregister + " offset %#08x\n", v, name, dabt.reg, r, reg); return 0; } bad_width: - printk(XENLOG_G_ERR - "%pv: %s: bad write width %d r%d=%"PRIregister" offset %#08x\n", + printk(XENLOG_G_ERR "%pv: %s: bad write width %d r%d=%" PRIregister + " offset %#08x\n", v, name, dabt.size, dabt.reg, r, reg); return 0; write_ignore_32: - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; write_ignore: return 1; } @@ -883,7 +912,7 @@ static int vgic_v3_rdistr_sgi_mmio_read(struct vcpu *v, mmio_info_t *info, { struct hsr_dabt dabt = info->dabt; - switch ( gicr_reg ) + switch (gicr_reg) { case VREG32(GICR_IGROUPR0): case VREG32(GICR_ISENABLER0): @@ -892,12 +921,12 @@ static int vgic_v3_rdistr_sgi_mmio_read(struct vcpu *v, mmio_info_t *info, case VREG32(GICR_ICACTIVER0): case VRANGE32(GICR_IPRIORITYR0, GICR_IPRIORITYR7): case VRANGE32(GICR_ICFGR0, GICR_ICFGR1): - /* - * Above registers offset are common with GICD. - * So handle in common with GICD handling - */ - return __vgic_v3_distr_common_mmio_read("vGICR: SGI", v, info, - gicr_reg, r); + /* + * Above registers offset are common with GICD. + * So handle in common with GICD handling + */ + return __vgic_v3_distr_common_mmio_read("vGICR: SGI", v, info, gicr_reg, + r); /* Read the pending status of an SGI is via GICR is not supported */ case VREG32(GICR_ISPENDR0): @@ -933,14 +962,15 @@ bad_width: return 0; read_as_zero_32: - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; read_as_zero: *r = 0; return 1; read_impl_defined: - printk(XENLOG_G_DEBUG - "%pv: vGICR: SGI: RAZ on implementation defined register offset %#08x\n", + printk(XENLOG_G_DEBUG "%pv: vGICR: SGI: RAZ on implementation defined " + "register offset %#08x\n", v, gicr_reg); *r = 0; return 1; @@ -951,7 +981,6 @@ read_reserved: v, gicr_reg); *r = 0; return 1; - } static int vgic_v3_rdistr_sgi_mmio_write(struct vcpu *v, mmio_info_t *info, @@ -959,7 +988,7 @@ static int vgic_v3_rdistr_sgi_mmio_write(struct vcpu *v, mmio_info_t *info, { struct hsr_dabt dabt = info->dabt; - switch ( gicr_reg ) + switch (gicr_reg) { case VREG32(GICR_IGROUPR0): case VREG32(GICR_ISENABLER0): @@ -968,24 +997,28 @@ static int vgic_v3_rdistr_sgi_mmio_write(struct vcpu *v, mmio_info_t *info, case VREG32(GICR_ICACTIVER0): case VREG32(GICR_ICFGR1): case VRANGE32(GICR_IPRIORITYR0, GICR_IPRIORITYR7): - /* - * Above registers offset are common with GICD. - * So handle common with GICD handling - */ - return __vgic_v3_distr_common_mmio_write("vGICR: SGI", v, - info, gicr_reg, r); + /* + * Above registers offset are common with GICD. + * So handle common with GICD handling + */ + return __vgic_v3_distr_common_mmio_write("vGICR: SGI", v, info, + gicr_reg, r); case VREG32(GICR_ISPENDR0): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; printk(XENLOG_G_ERR - "%pv: vGICR: SGI: unhandled word write %#"PRIregister" to ISPENDR0\n", + "%pv: vGICR: SGI: unhandled word write %#" PRIregister + " to ISPENDR0\n", v, r); return 0; case VREG32(GICR_ICPENDR0): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; printk(XENLOG_G_ERR - "%pv: vGICR: SGI: unhandled word write %#"PRIregister" to ICPENDR0\n", + "%pv: vGICR: SGI: unhandled word write %#" PRIregister + " to ICPENDR0\n", v, r); return 0; @@ -993,7 +1026,6 @@ static int vgic_v3_rdistr_sgi_mmio_write(struct vcpu *v, mmio_info_t *info, /* We do not implement security extensions for guests, write ignore */ goto write_ignore_32; - case VREG32(GICR_NSACR): /* We do not implement security extensions for guests, write ignore */ goto write_ignore_32; @@ -1006,19 +1038,20 @@ static int vgic_v3_rdistr_sgi_mmio_write(struct vcpu *v, mmio_info_t *info, } bad_width: - printk(XENLOG_G_ERR - "%pv: vGICR: SGI: bad write width %d r%d=%"PRIregister" offset %#08x\n", + printk(XENLOG_G_ERR "%pv: vGICR: SGI: bad write width %d r%d=%" PRIregister + " offset %#08x\n", v, dabt.size, dabt.reg, r, gicr_reg); return 0; write_ignore_32: - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; return 1; } static struct vcpu *get_vcpu_from_rdist(struct domain *d, - const struct vgic_rdist_region *region, - paddr_t gpa, uint32_t *offset) + const struct vgic_rdist_region *region, + paddr_t gpa, uint32_t *offset) { struct vcpu *v; unsigned int vcpu_id; @@ -1048,12 +1081,12 @@ static int vgic_v3_rdistr_mmio_read(struct vcpu *v, mmio_info_t *info, if ( offset < SZ_64K ) return __vgic_v3_rdistr_rd_mmio_read(v, info, offset, r); - else if ( (offset >= SZ_64K) && (offset < 2 * SZ_64K) ) + else if ( (offset >= SZ_64K) && (offset < 2 * SZ_64K) ) return vgic_v3_rdistr_sgi_mmio_read(v, info, (offset - SZ_64K), r); else printk(XENLOG_G_WARNING - "%pv: vGICR: unknown gpa read address %"PRIpaddr"\n", - v, info->gpa); + "%pv: vGICR: unknown gpa read address %" PRIpaddr "\n", + v, info->gpa); return 0; } @@ -1072,11 +1105,11 @@ static int vgic_v3_rdistr_mmio_write(struct vcpu *v, mmio_info_t *info, if ( offset < SZ_64K ) return __vgic_v3_rdistr_rd_mmio_write(v, info, offset, r); - else if ( (offset >= SZ_64K) && (offset < 2 * SZ_64K) ) + else if ( (offset >= SZ_64K) && (offset < 2 * SZ_64K) ) return vgic_v3_rdistr_sgi_mmio_write(v, info, (offset - SZ_64K), r); else printk(XENLOG_G_WARNING - "%pv: vGICR: unknown gpa write address %"PRIpaddr"\n", + "%pv: vGICR: unknown gpa write address %" PRIpaddr "\n", v, info->gpa); return 0; @@ -1092,10 +1125,11 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info, perfc_incr(vgicd_reads); - switch ( gicd_reg ) + switch (gicd_reg) { case VREG32(GICD_CTLR): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; vgic_lock(v); *r = vreg_reg32_extract(v->domain->arch.vgic.ctlr, info); vgic_unlock(v); @@ -1114,7 +1148,8 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info, unsigned int ncpus = min_t(unsigned int, v->domain->max_vcpus, 8); uint32_t typer; - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; /* No secure world support for guests. */ typer = ((ncpus - 1) << GICD_TYPE_CPUS_SHIFT | DIV_ROUND_UP(v->domain->arch.vgic.nr_spis, 32)); @@ -1122,7 +1157,8 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info, if ( v->domain->arch.vgic.has_its ) typer |= GICD_TYPE_LPIS; - typer |= (v->domain->arch.vgic.intid_bits - 1) << GICD_TYPE_ID_BITS_SHIFT; + typer |= (v->domain->arch.vgic.intid_bits - 1) + << GICD_TYPE_ID_BITS_SHIFT; *r = vreg_reg32_extract(typer, info); @@ -1130,7 +1166,8 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info, } case VREG32(GICD_IIDR): - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; *r = vreg_reg32_extract(GICV3_GICD_IIDR_VAL, info); return 1; @@ -1216,10 +1253,12 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info, { uint64_t irouter; - if ( !vgic_reg64_check_access(dabt) ) goto bad_width; - rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER, - DABT_DOUBLE_WORD); - if ( rank == NULL ) goto read_as_zero; + if ( !vgic_reg64_check_access(dabt) ) + goto bad_width; + rank = + vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER, DABT_DOUBLE_WORD); + if ( rank == NULL ) + goto read_as_zero; vgic_lock_rank(v, rank, flags); irouter = vgic_fetch_irouter(rank, gicd_reg - GICD_IROUTER); vgic_unlock_rank(v, rank, flags); @@ -1237,31 +1276,33 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info, case VRANGE32(0xFFD0, 0xFFE4): /* Implementation defined identification registers */ - goto read_impl_defined; + goto read_impl_defined; case VREG32(GICD_PIDR2): /* GICv3 identification value */ - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; *r = vreg_reg32_extract(GICV3_GICD_PIDR2, info); return 1; case VRANGE32(0xFFEC, 0xFFFC): - /* Implementation defined identification registers */ - goto read_impl_defined; + /* Implementation defined identification registers */ + goto read_impl_defined; default: - printk(XENLOG_G_ERR "%pv: vGICD: unhandled read r%d offset %#08x\n", - v, dabt.reg, gicd_reg); + printk(XENLOG_G_ERR "%pv: vGICD: unhandled read r%d offset %#08x\n", v, + dabt.reg, gicd_reg); return 0; } bad_width: - printk(XENLOG_G_ERR "%pv: vGICD: bad read width %d r%d offset %#08x\n", - v, dabt.size, dabt.reg, gicd_reg); + printk(XENLOG_G_ERR "%pv: vGICD: bad read width %d r%d offset %#08x\n", v, + dabt.size, dabt.reg, gicd_reg); return 0; read_as_zero_32: - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; *r = 0; return 1; @@ -1277,8 +1318,7 @@ read_impl_defined: return 1; read_reserved: - printk(XENLOG_G_DEBUG - "%pv: vGICD: RAZ on reserved register offset %#08x\n", + printk(XENLOG_G_DEBUG "%pv: vGICD: RAZ on reserved register offset %#08x\n", v, gicd_reg); *r = 0; return 1; @@ -1294,13 +1334,14 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info, perfc_incr(vgicd_writes); - switch ( gicd_reg ) + switch (gicd_reg) { case VREG32(GICD_CTLR): { uint32_t ctlr = 0; - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; vgic_lock(v); @@ -1376,8 +1417,7 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info, case VRANGE32(GICD_ICFGR, GICD_ICFGRN): /* Above registers are common with GICR and GICD * Manage in common */ - return __vgic_v3_distr_common_mmio_write("vGICD", v, info, - gicd_reg, r); + return __vgic_v3_distr_common_mmio_write("vGICD", v, info, gicd_reg, r); case VRANGE32(GICD_NSACR, GICD_NSACRN): /* We do not implement security extensions for guests, write ignore */ @@ -1389,12 +1429,14 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info, case VRANGE32(GICD_CPENDSGIR, GICD_CPENDSGIRN): /* Replaced with GICR_ICPENDR0. So ignore write */ - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; return 0; case VRANGE32(GICD_SPENDSGIR, GICD_SPENDSGIRN): /* Replaced with GICR_ISPENDR0. So ignore write */ - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; return 0; case VRANGE32(0x0F30, 0x60FC): @@ -1404,10 +1446,12 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info, { uint64_t irouter; - if ( !vgic_reg64_check_access(dabt) ) goto bad_width; - rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER, - DABT_DOUBLE_WORD); - if ( rank == NULL ) goto write_ignore; + if ( !vgic_reg64_check_access(dabt) ) + goto bad_width; + rank = + vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER, DABT_DOUBLE_WORD); + if ( rank == NULL ) + goto write_ignore; vgic_lock_rank(v, rank, flags); irouter = vgic_fetch_irouter(rank, gicd_reg - GICD_IROUTER); vreg_reg64_update(&irouter, r, info); @@ -1424,31 +1468,32 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info, case VRANGE32(0xFFD0, 0xFFE4): /* Implementation defined identification registers */ - goto write_impl_defined; + goto write_impl_defined; case VREG32(GICD_PIDR2): /* RO -- write ignore */ goto write_ignore_32; case VRANGE32(0xFFEC, 0xFFFC): - /* Implementation defined identification registers */ - goto write_impl_defined; + /* Implementation defined identification registers */ + goto write_impl_defined; default: - printk(XENLOG_G_ERR - "%pv: vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n", + printk(XENLOG_G_ERR "%pv: vGICD: unhandled write r%d=%" PRIregister + " offset %#08x\n", v, dabt.reg, r, gicd_reg); return 0; } bad_width: - printk(XENLOG_G_ERR - "%pv: vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n", + printk(XENLOG_G_ERR "%pv: vGICD: bad write width %d r%d=%" PRIregister + " offset %#08x\n", v, dabt.size, dabt.reg, r, gicd_reg); return 0; write_ignore_32: - if ( dabt.size != DABT_WORD ) goto bad_width; + if ( dabt.size != DABT_WORD ) + goto bad_width; return 1; write_ignore: @@ -1461,8 +1506,7 @@ write_impl_defined: return 1; write_reserved: - printk(XENLOG_G_DEBUG - "%pv: vGICD: WI on reserved register offset %#08x\n", + printk(XENLOG_G_DEBUG "%pv: vGICD: WI on reserved register offset %#08x\n", v, gicd_reg); return 1; } @@ -1476,10 +1520,10 @@ static bool vgic_v3_to_sgi(struct vcpu *v, register_t sgir) sgi_target_init(&target); irqmode = (sgir >> ICH_SGI_IRQMODE_SHIFT) & ICH_SGI_IRQMODE_MASK; - virq = (sgir >> ICH_SGI_IRQ_SHIFT ) & ICH_SGI_IRQ_MASK; + virq = (sgir >> ICH_SGI_IRQ_SHIFT) & ICH_SGI_IRQ_MASK; /* Map GIC sgi value to enum value */ - switch ( irqmode ) + switch (irqmode) { case ICH_SGI_TARGET_LIST: /* We assume that only AFF1 is used in ICC_SGI1R_EL1. */ @@ -1515,14 +1559,14 @@ static bool vgic_v3_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr) { struct hsr_sysreg sysreg = hsr.sysreg; - ASSERT (hsr.ec == HSR_EC_SYSREG); + ASSERT(hsr.ec == HSR_EC_SYSREG); if ( sysreg.read ) perfc_incr(vgic_sysreg_reads); else perfc_incr(vgic_sysreg_writes); - switch ( hsr.bits & HSR_SYSREG_REGS_MASK ) + switch (hsr.bits & HSR_SYSREG_REGS_MASK) { case HSR_SYSREG_ICC_SGI1R_EL1: return vreg_emulate_sysreg64(regs, hsr, vgic_v3_emulate_sgi1r); @@ -1541,7 +1585,7 @@ static bool vgic_v3_emulate_cp64(struct cpu_user_regs *regs, union hsr hsr) else perfc_incr(vgic_cp64_writes); - switch ( hsr.bits & HSR_CP64_REGS_MASK ) + switch (hsr.bits & HSR_CP64_REGS_MASK) { case HSR_CPREG64(ICC_SGI1R): return vreg_emulate_cp64(regs, hsr, vgic_v3_emulate_sgi1r); @@ -1564,12 +1608,12 @@ static bool vgic_v3_emulate_reg(struct cpu_user_regs *regs, union hsr hsr) } static const struct mmio_handler_ops vgic_rdistr_mmio_handler = { - .read = vgic_v3_rdistr_mmio_read, + .read = vgic_v3_rdistr_mmio_read, .write = vgic_v3_rdistr_mmio_write, }; static const struct mmio_handler_ops vgic_distr_mmio_handler = { - .read = vgic_v3_distr_mmio_read, + .read = vgic_v3_distr_mmio_read, .write = vgic_v3_distr_mmio_write, }; @@ -1641,8 +1685,8 @@ static inline unsigned int vgic_v3_max_rdist_count(struct domain *d) * However DomU get a constructed memory map, so we can go with * the architected single redistributor region. */ - return is_hardware_domain(d) ? vgic_v3_hw.nr_rdist_regions : - GUEST_GICV3_RDIST_REGIONS; + return is_hardware_domain(d) ? vgic_v3_hw.nr_rdist_regions + : GUEST_GICV3_RDIST_REGIONS; } static int vgic_v3_domain_init(struct domain *d) @@ -1707,7 +1751,8 @@ static int vgic_v3_domain_init(struct domain *d) BUILD_BUG_ON(GUEST_GICV3_RDIST_REGIONS != 1); /* The first redistributor should contain enough space for all CPUs */ - BUILD_BUG_ON((GUEST_GICV3_GICR0_SIZE / GICV3_GICR_SIZE) < MAX_VIRT_CPUS); + BUILD_BUG_ON((GUEST_GICV3_GICR0_SIZE / GICV3_GICR_SIZE) < + MAX_VIRT_CPUS); d->arch.vgic.rdist_regions[0].base = GUEST_GICV3_GICR0_BASE; d->arch.vgic.rdist_regions[0].size = GUEST_GICV3_GICR0_SIZE; d->arch.vgic.rdist_regions[0].first_cpu = 0; @@ -1739,8 +1784,8 @@ static int vgic_v3_domain_init(struct domain *d) { struct vgic_rdist_region *region = &d->arch.vgic.rdist_regions[i]; - register_mmio_handler(d, &vgic_rdistr_mmio_handler, - region->base, region->size, region); + register_mmio_handler(d, &vgic_rdistr_mmio_handler, region->base, + region->size, region); } d->arch.vgic.ctlr = VGICD_CTLR_DEFAULT; @@ -1791,10 +1836,10 @@ static int vgic_v3_lpi_get_priority(struct domain *d, uint32_t vlpi) } static const struct vgic_ops v3_ops = { - .vcpu_init = vgic_v3_vcpu_init, + .vcpu_init = vgic_v3_vcpu_init, .domain_init = vgic_v3_domain_init, .domain_free = vgic_v3_domain_free, - .emulate_reg = vgic_v3_emulate_reg, + .emulate_reg = vgic_v3_emulate_reg, .lpi_to_pending = vgic_v3_lpi_to_pending, .lpi_get_priority = vgic_v3_lpi_get_priority, }; @@ -1803,8 +1848,7 @@ int vgic_v3_init(struct domain *d, int *mmio_count) { if ( !vgic_v3_hw.enabled ) { - printk(XENLOG_G_ERR - "d%d: vGICv3 is not supported on this platform.\n", + printk(XENLOG_G_ERR "d%d: vGICv3 is not supported on this platform.\n", d->domain_id); return -ENODEV; } diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index 7c3bb499f1..a9f106f922 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -47,8 +47,7 @@ static inline struct vgic_irq_rank *vgic_get_rank(struct vcpu *v, int rank) * Returns rank corresponding to a GICD_ register for * GICD_ with -bits-per-interrupt. */ -struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n, - int s) +struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n, int s) { int rank = REG_RANK_NR(b, (n >> s)); @@ -57,7 +56,7 @@ struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n, struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned int irq) { - int rank = irq/32; + int rank = irq / 32; return vgic_get_rank(v, rank); } @@ -96,12 +95,12 @@ static void vgic_rank_init(struct vgic_irq_rank *rank, uint8_t index, int domain_vgic_register(struct domain *d, int *mmio_count) { - switch ( d->arch.vgic.version ) + switch (d->arch.vgic.version) { #ifdef CONFIG_GICV3 case GIC_V3: if ( vgic_v3_init(d, mmio_count) ) - return -ENODEV; + return -ENODEV; break; #endif case GIC_V2: @@ -109,8 +108,8 @@ int domain_vgic_register(struct domain *d, int *mmio_count) return -ENODEV; break; default: - printk(XENLOG_G_ERR "d%d: Unknown vGIC version %u\n", - d->domain_id, d->arch.vgic.version); + printk(XENLOG_G_ERR "d%d: Unknown vGIC version %u\n", d->domain_id, + d->arch.vgic.version); return -ENODEV; } @@ -149,7 +148,7 @@ int domain_vgic_init(struct domain *d, unsigned int nr_spis) if ( d->arch.vgic.pending_irqs == NULL ) return -ENOMEM; - for (i=0; iarch.vgic.nr_spis; i++) + for ( i = 0; i < d->arch.vgic.nr_spis; i++ ) vgic_init_pending_irq(&d->arch.vgic.pending_irqs[i], i + 32); /* SPIs are routed to VCPU0 by default */ @@ -174,7 +173,7 @@ int domain_vgic_init(struct domain *d, unsigned int nr_spis) void register_vgic_ops(struct domain *d, const struct vgic_ops *ops) { - d->arch.vgic.handler = ops; + d->arch.vgic.handler = ops; } extern const int gsx_irq_num; @@ -196,7 +195,8 @@ void domain_vgic_free(struct domain *d) { ret = release_guest_irq(d, p->irq); if ( ret ) - dprintk(XENLOG_G_WARNING, "d%u: Failed to release virq %u ret = %d\n", + dprintk(XENLOG_G_WARNING, + "d%u: Failed to release virq %u ret = %d\n", d->domain_id, p->irq, ret); } } @@ -214,7 +214,7 @@ int vcpu_vgic_init(struct vcpu *v) v->arch.vgic.private_irqs = xzalloc(struct vgic_irq_rank); if ( v->arch.vgic.private_irqs == NULL ) - return -ENOMEM; + return -ENOMEM; /* SGIs/PPIs are always routed to this VCPU */ vgic_rank_init(v->arch.vgic.private_irqs, 0, v->vcpu_id); @@ -222,7 +222,7 @@ int vcpu_vgic_init(struct vcpu *v) v->domain->arch.vgic.handler->vcpu_init(v); memset(&v->arch.vgic.pending_irqs, 0, sizeof(v->arch.vgic.pending_irqs)); - for (i = 0; i < 32; i++) + for ( i = 0; i < 32; i++ ) vgic_init_pending_irq(&v->arch.vgic.pending_irqs[i], i); INIT_LIST_HEAD(&v->arch.vgic.inflight_irqs); @@ -279,7 +279,8 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq) /* migration already in progress, no need to do anything */ if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) ) { - gprintk(XENLOG_WARNING, "irq %u migration failed: requested while in progress\n", irq); + gprintk(XENLOG_WARNING, + "irq %u migration failed: requested while in progress\n", irq); spin_unlock_irqrestore(&old->arch.vgic.lock, flags); return false; } @@ -352,7 +353,8 @@ void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n) /* LPIs will never be disabled via this function. */ ASSERT(!is_lpi(32 * n + 31)); - while ( (i = find_next_bit(&mask, 32, i)) < 32 ) { + while ( (i = find_next_bit(&mask, 32, i)) < 32 ) + { irq = i + (32 * n); v_target = vgic_get_target_vcpu(v, irq); @@ -402,13 +404,15 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n) /* LPIs will never be enabled via this function. */ ASSERT(!is_lpi(32 * n + 31)); - while ( (i = find_next_bit(&mask, 32, i)) < 32 ) { + while ( (i = find_next_bit(&mask, 32, i)) < 32 ) + { irq = i + (32 * n); v_target = vgic_get_target_vcpu(v, irq); spin_lock_irqsave(&v_target->arch.vgic.lock, flags); p = irq_to_pending(v_target, irq); set_bit(GIC_IRQ_GUEST_ENABLED, &p->status); - if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) ) + if ( !list_empty(&p->inflight) && + !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) ) gic_raise_guest_irq(v_target, irq, p->priority); spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags); if ( p->desc != NULL ) @@ -438,21 +442,21 @@ bool vgic_to_sgi(struct vcpu *v, register_t sgir, enum gic_sgi_mode irqmode, unsigned int base; unsigned long int bitmap; - ASSERT( virq < 16 ); + ASSERT(virq < 16); - switch ( irqmode ) + switch (irqmode) { case SGI_TARGET_LIST: perfc_incr(vgic_sgi_list); base = target->aff1 << 4; bitmap = target->list; - for_each_set_bit( i, &bitmap, sizeof(target->list) * 8 ) + for_each_set_bit (i, &bitmap, sizeof(target->list) * 8) { vcpuid = base + i; if ( vcpuid >= d->max_vcpus || d->vcpu[vcpuid] == NULL || !is_vcpu_online(d->vcpu[vcpuid]) ) { - gprintk(XENLOG_WARNING, "VGIC: write r=%"PRIregister" \ + gprintk(XENLOG_WARNING, "VGIC: write r=%" PRIregister " \ target->list=%hx, wrong CPUTargetList \n", sgir, target->list); continue; @@ -475,8 +479,9 @@ bool vgic_to_sgi(struct vcpu *v, register_t sgir, enum gic_sgi_mode irqmode, break; default: gprintk(XENLOG_WARNING, - "vGICD:unhandled GICD_SGIR write %"PRIregister" \ - with wrong mode\n", sgir); + "vGICD:unhandled GICD_SGIR write %" PRIregister " \ + with wrong mode\n", + sgir); return false; } @@ -516,7 +521,7 @@ void vgic_clear_pending_irqs(struct vcpu *v) unsigned long flags; spin_lock_irqsave(&v->arch.vgic.lock, flags); - list_for_each_entry_safe ( p, t, &v->arch.vgic.inflight_irqs, inflight ) + list_for_each_entry_safe(p, t, &v->arch.vgic.inflight_irqs, inflight) list_del_init(&p->inflight); gic_clear_pending_irqs(v); spin_unlock_irqrestore(&v->arch.vgic.lock, flags); @@ -585,7 +590,7 @@ void vgic_inject_irq(struct domain *d, struct vcpu *v, unsigned int virq, if ( test_bit(GIC_IRQ_GUEST_ENABLED, &n->status) ) gic_raise_guest_irq(v, virq, priority); - list_for_each_entry ( iter, &v->arch.vgic.inflight_irqs, inflight ) + list_for_each_entry (iter, &v->arch.vgic.inflight_irqs, inflight) { if ( iter->priority > priority ) { @@ -657,13 +662,11 @@ int vgic_allocate_virq(struct domain *d, bool spi) * There is no spinlock to protect allocated_irqs, therefore * test_and_set_bit may fail. If so retry it. */ - do - { + do { virq = find_next_zero_bit(d->arch.vgic.allocated_irqs, end, first); if ( virq >= end ) return -1; - } - while ( test_and_set_bit(virq, d->arch.vgic.allocated_irqs) ); + } while ( test_and_set_bit(virq, d->arch.vgic.allocated_irqs) ); return virq; } @@ -675,7 +678,7 @@ void vgic_free_virq(struct domain *d, unsigned int virq) unsigned int vgic_max_vcpus(unsigned int domctl_vgic_version) { - switch ( domctl_vgic_version ) + switch (domctl_vgic_version) { case XEN_DOMCTL_CONFIG_GIC_V2: return 8; @@ -698,4 +701,3 @@ unsigned int vgic_max_vcpus(unsigned int domctl_vgic_version) * indent-tabs-mode: nil * End: */ - diff --git a/xen/arch/arm/vgic/vgic-init.c b/xen/arch/arm/vgic/vgic-init.c index 62ae553699..18bcdcb1fe 100644 --- a/xen/arch/arm/vgic/vgic-init.c +++ b/xen/arch/arm/vgic/vgic-init.c @@ -103,7 +103,7 @@ static void vgic_vcpu_early_init(struct vcpu *vcpu) */ int domain_vgic_register(struct domain *d, int *mmio_count) { - switch ( d->arch.vgic.version ) + switch (d->arch.vgic.version) { case GIC_V2: *mmio_count = 1; @@ -142,7 +142,7 @@ int domain_vgic_init(struct domain *d, unsigned int nr_spis) dist->nr_spis = nr_spis; dist->spis = xzalloc_array(struct vgic_irq, nr_spis); if ( !dist->spis ) - return -ENOMEM; + return -ENOMEM; /* * In the following code we do not take the irq struct lock since @@ -215,7 +215,7 @@ int vcpu_vgic_init(struct vcpu *vcpu) void domain_vgic_free(struct domain *d) { struct vgic_dist *dist = &d->arch.vgic; - int i, ret; + int i, ret; for ( i = 0; i < dist->nr_spis; i++ ) { @@ -227,8 +227,8 @@ void domain_vgic_free(struct domain *d) ret = release_guest_irq(d, irq->hwintid); if ( ret ) dprintk(XENLOG_G_WARNING, - "d%u: Failed to release virq %u ret = %d\n", - d->domain_id, 32 + i, ret); + "d%u: Failed to release virq %u ret = %d\n", d->domain_id, + 32 + i, ret); } dist->ready = false; diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c index 2e507b10fe..77e18f1dae 100644 --- a/xen/arch/arm/vgic/vgic-mmio-v2.c +++ b/xen/arch/arm/vgic/vgic-mmio-v2.c @@ -20,24 +20,23 @@ #include "vgic.h" #include "vgic-mmio.h" -static unsigned long vgic_mmio_read_v2_misc(struct vcpu *vcpu, - paddr_t addr, unsigned int len) +static unsigned long vgic_mmio_read_v2_misc(struct vcpu *vcpu, paddr_t addr, + unsigned int len) { uint32_t value; - switch ( addr & 0x0c ) /* filter for the 4 registers handled here */ + switch (addr & 0x0c) /* filter for the 4 registers handled here */ { case GICD_CTLR: value = vcpu->domain->arch.vgic.enabled ? GICD_CTL_ENABLE : 0; break; case GICD_TYPER: value = vcpu->domain->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; - value = (value >> 5) - 1; /* stored as multiples of 32 */ + value = (value >> 5) - 1; /* stored as multiples of 32 */ value |= (vcpu->domain->max_vcpus - 1) << GICD_TYPE_CPUS_SHIFT; break; case GICD_IIDR: - value = (PRODUCT_ID_KVM << 24) | - (VARIANT_ID_XEN << 16) | + value = (PRODUCT_ID_KVM << 24) | (VARIANT_ID_XEN << 16) | (IMPLEMENTER_ARM << 0); break; default: @@ -47,14 +46,13 @@ static unsigned long vgic_mmio_read_v2_misc(struct vcpu *vcpu, return value; } -static void vgic_mmio_write_v2_misc(struct vcpu *vcpu, - paddr_t addr, unsigned int len, - unsigned long val) +static void vgic_mmio_write_v2_misc(struct vcpu *vcpu, paddr_t addr, + unsigned int len, unsigned long val) { struct vgic_dist *dist = &vcpu->domain->arch.vgic; bool enabled; - switch ( addr & 0x0c ) /* filter for the 4 registers handled here */ + switch (addr & 0x0c) /* filter for the 4 registers handled here */ { case GICD_CTLR: domain_lock(vcpu->domain); @@ -81,34 +79,33 @@ static void vgic_mmio_write_v2_misc(struct vcpu *vcpu, } } -static void vgic_mmio_write_sgir(struct vcpu *source_vcpu, - paddr_t addr, unsigned int len, - unsigned long val) +static void vgic_mmio_write_sgir(struct vcpu *source_vcpu, paddr_t addr, + unsigned int len, unsigned long val) { struct domain *d = source_vcpu->domain; unsigned int nr_vcpus = d->max_vcpus; unsigned int intid = val & GICD_SGI_INTID_MASK; - unsigned long targets = (val & GICD_SGI_TARGET_MASK) >> - GICD_SGI_TARGET_SHIFT; + unsigned long targets = + (val & GICD_SGI_TARGET_MASK) >> GICD_SGI_TARGET_SHIFT; unsigned int vcpu_id; - switch ( val & GICD_SGI_TARGET_LIST_MASK ) + switch (val & GICD_SGI_TARGET_LIST_MASK) { - case GICD_SGI_TARGET_LIST: /* as specified by targets */ - targets &= GENMASK(nr_vcpus - 1, 0); /* limit to existing VCPUs */ + case GICD_SGI_TARGET_LIST: /* as specified by targets */ + targets &= GENMASK(nr_vcpus - 1, 0); /* limit to existing VCPUs */ break; case GICD_SGI_TARGET_OTHERS: targets = GENMASK(nr_vcpus - 1, 0); /* all, ... */ targets &= ~(1U << source_vcpu->vcpu_id); /* but self */ break; - case GICD_SGI_TARGET_SELF: /* this very vCPU only */ + case GICD_SGI_TARGET_SELF: /* this very vCPU only */ targets = (1U << source_vcpu->vcpu_id); break; - case 0x3: /* reserved */ + case 0x3: /* reserved */ return; } - for_each_set_bit( vcpu_id, &targets, 8 ) + for_each_set_bit (vcpu_id, &targets, 8) { struct vcpu *vcpu = d->vcpu[vcpu_id]; struct vgic_irq *irq = vgic_get_irq(d, vcpu, intid); @@ -124,8 +121,8 @@ static void vgic_mmio_write_sgir(struct vcpu *source_vcpu, } } -static unsigned long vgic_mmio_read_target(struct vcpu *vcpu, - paddr_t addr, unsigned int len) +static unsigned long vgic_mmio_read_target(struct vcpu *vcpu, paddr_t addr, + unsigned int len) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8); uint32_t val = 0; @@ -143,9 +140,8 @@ static unsigned long vgic_mmio_read_target(struct vcpu *vcpu, return val; } -static void vgic_mmio_write_target(struct vcpu *vcpu, - paddr_t addr, unsigned int len, - unsigned long val) +static void vgic_mmio_write_target(struct vcpu *vcpu, paddr_t addr, + unsigned int len, unsigned long val) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8); uint8_t cpu_mask = GENMASK(vcpu->domain->max_vcpus - 1, 0); @@ -181,8 +177,8 @@ static void vgic_mmio_write_target(struct vcpu *vcpu, } } -static unsigned long vgic_mmio_read_sgipend(struct vcpu *vcpu, - paddr_t addr, unsigned int len) +static unsigned long vgic_mmio_read_sgipend(struct vcpu *vcpu, paddr_t addr, + unsigned int len) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8); uint32_t val = 0; @@ -202,9 +198,8 @@ static unsigned long vgic_mmio_read_sgipend(struct vcpu *vcpu, return val; } -static void vgic_mmio_write_sgipendc(struct vcpu *vcpu, - paddr_t addr, unsigned int len, - unsigned long val) +static void vgic_mmio_write_sgipendc(struct vcpu *vcpu, paddr_t addr, + unsigned int len, unsigned long val) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8); unsigned int i; @@ -227,9 +222,8 @@ static void vgic_mmio_write_sgipendc(struct vcpu *vcpu, } } -static void vgic_mmio_write_sgipends(struct vcpu *vcpu, - paddr_t addr, unsigned int len, - unsigned long val) +static void vgic_mmio_write_sgipends(struct vcpu *vcpu, paddr_t addr, + unsigned int len, unsigned long val) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8); unsigned int i; @@ -259,48 +253,45 @@ static void vgic_mmio_write_sgipends(struct vcpu *vcpu, } static const struct vgic_register_region vgic_v2_dist_registers[] = { - REGISTER_DESC_WITH_LENGTH(GICD_CTLR, - vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12, - VGIC_ACCESS_32bit), - REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IGROUPR, - vgic_mmio_read_rao, vgic_mmio_write_wi, 1, - VGIC_ACCESS_32bit), - REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER, - vgic_mmio_read_enable, vgic_mmio_write_senable, 1, - VGIC_ACCESS_32bit), - REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER, - vgic_mmio_read_enable, vgic_mmio_write_cenable, 1, - VGIC_ACCESS_32bit), - REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR, - vgic_mmio_read_pending, vgic_mmio_write_spending, 1, - VGIC_ACCESS_32bit), - REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICPENDR, - vgic_mmio_read_pending, vgic_mmio_write_cpending, 1, - VGIC_ACCESS_32bit), - REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISACTIVER, - vgic_mmio_read_active, vgic_mmio_write_sactive, 1, - VGIC_ACCESS_32bit), - REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICACTIVER, - vgic_mmio_read_active, vgic_mmio_write_cactive, 1, - VGIC_ACCESS_32bit), - REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IPRIORITYR, - vgic_mmio_read_priority, vgic_mmio_write_priority, 8, - VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), - REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ITARGETSR, - vgic_mmio_read_target, vgic_mmio_write_target, 8, - VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), - REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICFGR, - vgic_mmio_read_config, vgic_mmio_write_config, 2, - VGIC_ACCESS_32bit), - REGISTER_DESC_WITH_LENGTH(GICD_SGIR, - vgic_mmio_read_raz, vgic_mmio_write_sgir, 4, - VGIC_ACCESS_32bit), - REGISTER_DESC_WITH_LENGTH(GICD_CPENDSGIR, - vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16, - VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), - REGISTER_DESC_WITH_LENGTH(GICD_SPENDSGIR, - vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16, - VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), + REGISTER_DESC_WITH_LENGTH(GICD_CTLR, vgic_mmio_read_v2_misc, + vgic_mmio_write_v2_misc, 12, VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IGROUPR, vgic_mmio_read_rao, + vgic_mmio_write_wi, 1, VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER, vgic_mmio_read_enable, + vgic_mmio_write_senable, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER, vgic_mmio_read_enable, + vgic_mmio_write_cenable, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR, vgic_mmio_read_pending, + vgic_mmio_write_spending, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICPENDR, vgic_mmio_read_pending, + vgic_mmio_write_cpending, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISACTIVER, vgic_mmio_read_active, + vgic_mmio_write_sactive, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICACTIVER, vgic_mmio_read_active, + vgic_mmio_write_cactive, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IPRIORITYR, vgic_mmio_read_priority, + vgic_mmio_write_priority, 8, + VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ITARGETSR, vgic_mmio_read_target, + vgic_mmio_write_target, 8, + VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICFGR, vgic_mmio_read_config, + vgic_mmio_write_config, 2, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICD_SGIR, vgic_mmio_read_raz, + vgic_mmio_write_sgir, 4, VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICD_CPENDSGIR, vgic_mmio_read_sgipend, + vgic_mmio_write_sgipendc, 16, + VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), + REGISTER_DESC_WITH_LENGTH(GICD_SPENDSGIR, vgic_mmio_read_sgipend, + vgic_mmio_write_sgipends, 16, + VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), }; unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev) diff --git a/xen/arch/arm/vgic/vgic-mmio.c b/xen/arch/arm/vgic/vgic-mmio.c index 5d935a7301..535cfbcb56 100644 --- a/xen/arch/arm/vgic/vgic-mmio.c +++ b/xen/arch/arm/vgic/vgic-mmio.c @@ -21,20 +21,20 @@ #include "vgic.h" #include "vgic-mmio.h" -unsigned long vgic_mmio_read_raz(struct vcpu *vcpu, - paddr_t addr, unsigned int len) +unsigned long vgic_mmio_read_raz(struct vcpu *vcpu, paddr_t addr, + unsigned int len) { return 0; } -unsigned long vgic_mmio_read_rao(struct vcpu *vcpu, - paddr_t addr, unsigned int len) +unsigned long vgic_mmio_read_rao(struct vcpu *vcpu, paddr_t addr, + unsigned int len) { return -1UL; } -void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr, - unsigned int len, unsigned long val) +void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr, unsigned int len, + unsigned long val) { /* Ignore */ } @@ -43,8 +43,8 @@ void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr, * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value * of the enabled bit, so there is only one function for both here. */ -unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, - paddr_t addr, unsigned int len) +unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, paddr_t addr, + unsigned int len) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); uint32_t value = 0; @@ -64,14 +64,13 @@ unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, return value; } -void vgic_mmio_write_senable(struct vcpu *vcpu, - paddr_t addr, unsigned int len, +void vgic_mmio_write_senable(struct vcpu *vcpu, paddr_t addr, unsigned int len, unsigned long val) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); unsigned int i; - for_each_set_bit( i, &val, len * 8 ) + for_each_set_bit (i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); unsigned long flags; @@ -79,7 +78,7 @@ void vgic_mmio_write_senable(struct vcpu *vcpu, spin_lock_irqsave(&irq->irq_lock, flags); - if ( irq->enabled ) /* skip already enabled IRQs */ + if ( irq->enabled ) /* skip already enabled IRQs */ { spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->domain, irq); @@ -109,14 +108,13 @@ void vgic_mmio_write_senable(struct vcpu *vcpu, } } -void vgic_mmio_write_cenable(struct vcpu *vcpu, - paddr_t addr, unsigned int len, +void vgic_mmio_write_cenable(struct vcpu *vcpu, paddr_t addr, unsigned int len, unsigned long val) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); unsigned int i; - for_each_set_bit( i, &val, len * 8 ) + for_each_set_bit (i, &val, len * 8) { struct vgic_irq *irq; unsigned long flags; @@ -125,7 +123,7 @@ void vgic_mmio_write_cenable(struct vcpu *vcpu, irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); spin_lock_irqsave(&irq->irq_lock, flags); - if ( !irq->enabled ) /* skip already disabled IRQs */ + if ( !irq->enabled ) /* skip already disabled IRQs */ { spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->domain, irq); @@ -156,8 +154,8 @@ void vgic_mmio_write_cenable(struct vcpu *vcpu, } } -unsigned long vgic_mmio_read_pending(struct vcpu *vcpu, - paddr_t addr, unsigned int len) +unsigned long vgic_mmio_read_pending(struct vcpu *vcpu, paddr_t addr, + unsigned int len) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); uint32_t value = 0; @@ -177,8 +175,7 @@ unsigned long vgic_mmio_read_pending(struct vcpu *vcpu, return value; } -void vgic_mmio_write_spending(struct vcpu *vcpu, - paddr_t addr, unsigned int len, +void vgic_mmio_write_spending(struct vcpu *vcpu, paddr_t addr, unsigned int len, unsigned long val) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); @@ -186,7 +183,7 @@ void vgic_mmio_write_spending(struct vcpu *vcpu, unsigned long flags; irq_desc_t *desc; - for_each_set_bit( i, &val, len * 8 ) + for_each_set_bit (i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); @@ -225,8 +222,7 @@ void vgic_mmio_write_spending(struct vcpu *vcpu, } } -void vgic_mmio_write_cpending(struct vcpu *vcpu, - paddr_t addr, unsigned int len, +void vgic_mmio_write_cpending(struct vcpu *vcpu, paddr_t addr, unsigned int len, unsigned long val) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); @@ -234,7 +230,7 @@ void vgic_mmio_write_cpending(struct vcpu *vcpu, unsigned long flags; irq_desc_t *desc; - for_each_set_bit( i, &val, len * 8 ) + for_each_set_bit (i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); @@ -280,7 +276,6 @@ void vgic_mmio_write_cpending(struct vcpu *vcpu, spin_unlock_irqrestore(&desc->lock, flags); } - vgic_put_irq(vcpu->domain, irq); } } @@ -293,8 +288,8 @@ void vgic_mmio_write_cpending(struct vcpu *vcpu, * VCPUs processing any affected vIRQs), so we use a simple implementation * to get the best possible answer. */ -unsigned long vgic_mmio_read_active(struct vcpu *vcpu, - paddr_t addr, unsigned int len) +unsigned long vgic_mmio_read_active(struct vcpu *vcpu, paddr_t addr, + unsigned int len) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); uint32_t value = 0; @@ -321,14 +316,13 @@ unsigned long vgic_mmio_read_active(struct vcpu *vcpu, * and only print our warning in this case. So clearing already non-active * IRQs would not be moaned about in the logs. */ -void vgic_mmio_write_cactive(struct vcpu *vcpu, - paddr_t addr, unsigned int len, +void vgic_mmio_write_cactive(struct vcpu *vcpu, paddr_t addr, unsigned int len, unsigned long val) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); unsigned int i; - for_each_set_bit( i, &val, len * 8 ) + for_each_set_bit (i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); @@ -351,14 +345,13 @@ void vgic_mmio_write_cactive(struct vcpu *vcpu, * We check whether this MMIO access would actually affect any non-active IRQ, * and only print our warning in this case. */ -void vgic_mmio_write_sactive(struct vcpu *vcpu, - paddr_t addr, unsigned int len, +void vgic_mmio_write_sactive(struct vcpu *vcpu, paddr_t addr, unsigned int len, unsigned long val) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); unsigned int i; - for_each_set_bit( i, &val, len * 8 ) + for_each_set_bit (i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); @@ -376,8 +369,8 @@ void vgic_mmio_write_sactive(struct vcpu *vcpu, } } -unsigned long vgic_mmio_read_priority(struct vcpu *vcpu, - paddr_t addr, unsigned int len) +unsigned long vgic_mmio_read_priority(struct vcpu *vcpu, paddr_t addr, + unsigned int len) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8); unsigned int i; @@ -402,8 +395,7 @@ unsigned long vgic_mmio_read_priority(struct vcpu *vcpu, * leading to this interrupt getting presented now to the guest (if it has * been masked by the priority mask before). */ -void vgic_mmio_write_priority(struct vcpu *vcpu, - paddr_t addr, unsigned int len, +void vgic_mmio_write_priority(struct vcpu *vcpu, paddr_t addr, unsigned int len, unsigned long val) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8); @@ -423,8 +415,8 @@ void vgic_mmio_write_priority(struct vcpu *vcpu, } } -unsigned long vgic_mmio_read_config(struct vcpu *vcpu, - paddr_t addr, unsigned int len) +unsigned long vgic_mmio_read_config(struct vcpu *vcpu, paddr_t addr, + unsigned int len) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 2); uint32_t value = 0; @@ -443,8 +435,7 @@ unsigned long vgic_mmio_read_config(struct vcpu *vcpu, return value; } -void vgic_mmio_write_config(struct vcpu *vcpu, - paddr_t addr, unsigned int len, +void vgic_mmio_write_config(struct vcpu *vcpu, paddr_t addr, unsigned int len, unsigned long val) { uint32_t intid = VGIC_ADDR_TO_INTID(addr, 2); @@ -505,7 +496,7 @@ static bool check_region(const struct domain *d, { unsigned int flags, nr_irqs = d->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; - switch ( len ) + switch (len) { case sizeof(uint8_t): flags = VGIC_ACCESS_8bit; @@ -614,7 +605,7 @@ int vgic_register_dist_iodev(struct domain *d, gfn_t dist_base_fn, struct vgic_io_device *io_device = &d->arch.vgic.dist_iodev; unsigned int len; - switch ( type ) + switch (type) { case VGIC_V2: len = vgic_v2_init_dist_iodev(io_device); diff --git a/xen/arch/arm/vgic/vgic-v2.c b/xen/arch/arm/vgic/vgic-v2.c index b5ba4ace87..b23a5dd5ce 100644 --- a/xen/arch/arm/vgic/vgic-v2.c +++ b/xen/arch/arm/vgic/vgic-v2.c @@ -23,12 +23,13 @@ #include "vgic.h" -static struct { +static struct +{ bool enabled; - paddr_t dbase; /* Distributor interface address */ - paddr_t cbase; /* CPU interface address & size */ + paddr_t dbase; /* Distributor interface address */ + paddr_t cbase; /* CPU interface address & size */ paddr_t csize; - paddr_t vbase; /* Virtual CPU interface address */ + paddr_t vbase; /* Virtual CPU interface address */ /* Offset to add to get an 8kB contiguous region if GIC is aliased */ uint32_t aliased_offset; @@ -61,7 +62,7 @@ void vgic_v2_fold_lr_state(struct vcpu *vcpu) unsigned long flags; unsigned int lr; - if ( !used_lrs ) /* No LRs used, so nothing to sync back here. */ + if ( !used_lrs ) /* No LRs used, so nothing to sync back here. */ return; gic_hw_ops->update_hcr_status(GICH_HCR_UIE, false); @@ -295,7 +296,6 @@ int vgic_v2_map_resources(struct domain *d) vbase = gic_v2_hw_data.vbase + gic_v2_hw_data.aliased_offset; } - ret = vgic_register_dist_iodev(d, gaddr_to_gfn(dist->vgic_dist_base), VGIC_V2); if ( ret ) diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c index e2844dcc20..4df94779e6 100644 --- a/xen/arch/arm/vgic/vgic.c +++ b/xen/arch/arm/vgic/vgic.c @@ -64,7 +64,7 @@ static struct vgic_irq *vgic_get_lpi(struct domain *d, uint32_t intid) spin_lock(&dist->lpi_list_lock); - list_for_each_entry( irq, &dist->lpi_list_head, lpi_list ) + list_for_each_entry (irq, &dist->lpi_list_head, lpi_list) { if ( irq->intid != intid ) continue; @@ -170,7 +170,7 @@ static struct vcpu *vgic_target_oracle(struct vgic_irq *irq) /* If the interrupt is active, it must stay on the current vcpu */ if ( irq->active ) - return irq->vcpu ? : irq->target_vcpu; + return irq->vcpu ?: irq->target_vcpu; /* * If the IRQ is not active but enabled and pending, we should direct @@ -429,7 +429,7 @@ static void vgic_prune_ap_list(struct vcpu *vcpu) retry: spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); - list_for_each_entry_safe( irq, tmp, &vgic_cpu->ap_list_head, ap_list ) + list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { struct vcpu *target_vcpu, *vcpuA, *vcpuB; @@ -524,8 +524,7 @@ static void vgic_fold_lr_state(struct vcpu *vcpu) } /* Requires the irq_lock to be held. */ -static void vgic_populate_lr(struct vcpu *vcpu, - struct vgic_irq *irq, int lr) +static void vgic_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, int lr) { ASSERT(spin_is_locked(&irq->irq_lock)); @@ -548,7 +547,7 @@ static int compute_ap_list_depth(struct vcpu *vcpu) ASSERT(spin_is_locked(&vgic_cpu->ap_list_lock)); - list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) + list_for_each_entry (irq, &vgic_cpu->ap_list_head, ap_list) count++; return count; @@ -566,7 +565,7 @@ static void vgic_flush_lr_state(struct vcpu *vcpu) if ( compute_ap_list_depth(vcpu) > gic_get_nr_lrs() ) vgic_sort_ap_list(vcpu); - list_for_each_entry( irq, &vgic_cpu->ap_list_head, ap_list ) + list_for_each_entry (irq, &vgic_cpu->ap_list_head, ap_list) { spin_lock(&irq->irq_lock); @@ -662,7 +661,7 @@ int vgic_vcpu_pending_irq(struct vcpu *vcpu) spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); - list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) + list_for_each_entry (irq, &vgic_cpu->ap_list_head, ap_list) { spin_lock(&irq->irq_lock); ret = irq_is_pending(irq) && irq->enabled; @@ -685,7 +684,7 @@ void vgic_kick_vcpus(struct domain *d) * We've injected an interrupt, time to find out who deserves * a good kick... */ - for_each_vcpu( d, vcpu ) + for_each_vcpu (d, vcpu) { if ( vgic_vcpu_pending_irq(vcpu) ) vcpu_kick(vcpu); @@ -744,8 +743,7 @@ int vgic_allocate_virq(struct domain *d, bool spi) * There is no spinlock to protect allocated_irqs, therefore * test_and_set_bit may fail. If so retry it. */ - do - { + do { virq = find_next_zero_bit(d->arch.vgic.allocated_irqs, end, first); if ( virq >= end ) return -1; @@ -770,14 +768,14 @@ void gic_dump_vgic_info(struct vcpu *v) if ( !list_empty(&vgic_cpu->ap_list_head) ) printk(" active or pending interrupts queued:\n"); - list_for_each_entry ( irq, &vgic_cpu->ap_list_head, ap_list ) + list_for_each_entry (irq, &vgic_cpu->ap_list_head, ap_list) { spin_lock(&irq->irq_lock); printk(" %s %s irq %u: %spending, %sactive, %senabled\n", irq->hw ? "hardware" : "virtual", - irq->config == VGIC_CONFIG_LEVEL ? "level" : "edge", - irq->intid, irq_is_pending(irq) ? "" : "not ", - irq->active ? "" : "not ", irq->enabled ? "" : "not "); + irq->config == VGIC_CONFIG_LEVEL ? "level" : "edge", irq->intid, + irq_is_pending(irq) ? "" : "not ", irq->active ? "" : "not ", + irq->enabled ? "" : "not "); spin_unlock(&irq->irq_lock); } @@ -819,7 +817,7 @@ void arch_move_irqs(struct vcpu *v) spin_lock_irqsave(&irq->irq_lock, flags); /* Only hardware mapped vIRQs that are targeting this vCPU. */ - if ( irq->hw && irq->target_vcpu == v) + if ( irq->hw && irq->target_vcpu == v ) { irq_desc_t *desc = irq_to_desc(irq->hwintid); @@ -879,7 +877,7 @@ int vgic_connect_hw_irq(struct domain *d, struct vcpu *vcpu, spin_lock_irqsave(&irq->irq_lock, flags); - if ( connect ) /* assign a mapped IRQ */ + if ( connect ) /* assign a mapped IRQ */ { /* The VIRQ should not be already enabled by the guest */ if ( !irq->hw && !irq->enabled ) @@ -890,7 +888,7 @@ int vgic_connect_hw_irq(struct domain *d, struct vcpu *vcpu, else ret = -EBUSY; } - else /* remove a mapped IRQ */ + else /* remove a mapped IRQ */ { if ( desc && irq->hwintid != desc->irq ) { @@ -914,8 +912,8 @@ static unsigned int translate_irq_type(bool is_level) return is_level ? IRQ_TYPE_LEVEL_HIGH : IRQ_TYPE_EDGE_RISING; } -void vgic_sync_hardware_irq(struct domain *d, - irq_desc_t *desc, struct vgic_irq *irq) +void vgic_sync_hardware_irq(struct domain *d, irq_desc_t *desc, + struct vgic_irq *irq) { unsigned long flags; @@ -951,7 +949,7 @@ void vgic_sync_hardware_irq(struct domain *d, unsigned int vgic_max_vcpus(unsigned int domctl_vgic_version) { - switch ( domctl_vgic_version ) + switch (domctl_vgic_version) { case XEN_DOMCTL_CONFIG_GIC_V2: return VGIC_V2_MAX_CPUS; @@ -963,8 +961,7 @@ unsigned int vgic_max_vcpus(unsigned int domctl_vgic_version) #ifdef CONFIG_GICV3 /* Dummy implementation to allow building without actual vGICv3 support. */ -void vgic_v3_setup_hw(paddr_t dbase, - unsigned int nr_rdist_regions, +void vgic_v3_setup_hw(paddr_t dbase, unsigned int nr_rdist_regions, const struct rdist_region *regions, unsigned int intid_bits) { diff --git a/xen/arch/arm/vpl011.c b/xen/arch/arm/vpl011.c index 7bc5eeb207..e8d05b398f 100644 --- a/xen/arch/arm/vpl011.c +++ b/xen/arch/arm/vpl011.c @@ -111,8 +111,7 @@ static void vpl011_write_data_xen(struct domain *d, uint8_t data) } else { - if ( intf->out_prod == SBSA_UART_OUT_BUF_SIZE - 2 || - data == '\n' ) + if ( intf->out_prod == SBSA_UART_OUT_BUF_SIZE - 2 || data == '\n' ) { if ( data != '\n' ) intf->out[intf->out_prod++] = '\n'; @@ -154,7 +153,8 @@ static uint8_t vpl011_read_data_xen(struct domain *d) * It is expected that there will be data in the ring buffer when this * function is called since the guest is expected to read the data register * only if the TXFE flag is not set. - * If the guest still does read when TXFE bit is set then 0 will be returned. + * If the guest still does read when TXFE bit is set then 0 will be + * returned. */ if ( xencons_queued(in_prod, in_cons, sizeof(intf->in)) > 0 ) { @@ -213,7 +213,8 @@ static uint8_t vpl011_read_data(struct domain *d) * It is expected that there will be data in the ring buffer when this * function is called since the guest is expected to read the data register * only if the TXFE flag is not set. - * If the guest still does read when TXFE bit is set then 0 will be returned. + * If the guest still does read when TXFE bit is set then 0 will be + * returned. */ if ( xencons_queued(in_prod, in_cons, sizeof(intf->in)) > 0 ) { @@ -299,7 +300,7 @@ static void vpl011_write_data(struct domain *d, uint8_t data) * data will be silently dropped. */ if ( xencons_queued(out_prod, out_cons, sizeof(intf->out)) != - sizeof (intf->out) ) + sizeof(intf->out) ) { unsigned int fifo_level; @@ -341,9 +342,7 @@ static void vpl011_write_data(struct domain *d, uint8_t data) notify_via_xen_event_channel(d, vpl011->evtchn); } -static int vpl011_mmio_read(struct vcpu *v, - mmio_info_t *info, - register_t *r, +static int vpl011_mmio_read(struct vcpu *v, mmio_info_t *info, register_t *r, void *priv) { struct hsr_dabt dabt = info->dabt; @@ -352,10 +351,11 @@ static int vpl011_mmio_read(struct vcpu *v, struct domain *d = v->domain; unsigned long flags; - switch ( vpl011_reg ) + switch (vpl011_reg) { case DR: - if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; + if ( !vpl011_reg32_check_access(dabt) ) + goto bad_width; if ( vpl011->backend_in_domain ) *r = vreg_reg32_extract(vpl011_read_data(d), info); @@ -364,14 +364,16 @@ static int vpl011_mmio_read(struct vcpu *v, return 1; case RSR: - if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; + if ( !vpl011_reg32_check_access(dabt) ) + goto bad_width; /* It always returns 0 as there are no physical errors. */ *r = 0; return 1; case FR: - if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; + if ( !vpl011_reg32_check_access(dabt) ) + goto bad_width; VPL011_LOCK(d, flags); *r = vreg_reg32_extract(vpl011->uartfr, info); @@ -379,7 +381,8 @@ static int vpl011_mmio_read(struct vcpu *v, return 1; case RIS: - if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; + if ( !vpl011_reg32_check_access(dabt) ) + goto bad_width; VPL011_LOCK(d, flags); *r = vreg_reg32_extract(vpl011->uartris, info); @@ -387,16 +390,17 @@ static int vpl011_mmio_read(struct vcpu *v, return 1; case MIS: - if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; + if ( !vpl011_reg32_check_access(dabt) ) + goto bad_width; VPL011_LOCK(d, flags); - *r = vreg_reg32_extract(vpl011->uartris & vpl011->uartimsc, - info); + *r = vreg_reg32_extract(vpl011->uartris & vpl011->uartimsc, info); VPL011_UNLOCK(d, flags); return 1; case IMSC: - if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; + if ( !vpl011_reg32_check_access(dabt) ) + goto bad_width; VPL011_LOCK(d, flags); *r = vreg_reg32_extract(vpl011->uartimsc, info); @@ -404,7 +408,8 @@ static int vpl011_mmio_read(struct vcpu *v, return 1; case ICR: - if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; + if ( !vpl011_reg32_check_access(dabt) ) + goto bad_width; /* Only write is valid. */ return 0; @@ -421,12 +426,9 @@ bad_width: gprintk(XENLOG_ERR, "vpl011: bad read width %d r%d offset %#08x\n", dabt.size, dabt.reg, vpl011_reg); return 0; - } -static int vpl011_mmio_write(struct vcpu *v, - mmio_info_t *info, - register_t r, +static int vpl011_mmio_write(struct vcpu *v, mmio_info_t *info, register_t r, void *priv) { struct hsr_dabt dabt = info->dabt; @@ -435,13 +437,14 @@ static int vpl011_mmio_write(struct vcpu *v, struct domain *d = v->domain; unsigned long flags; - switch ( vpl011_reg ) + switch (vpl011_reg) { case DR: { uint32_t data = 0; - if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; + if ( !vpl011_reg32_check_access(dabt) ) + goto bad_width; vreg_reg32_update(&data, r, info); data &= 0xFF; @@ -453,7 +456,8 @@ static int vpl011_mmio_write(struct vcpu *v, } case RSR: /* Nothing to clear. */ - if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; + if ( !vpl011_reg32_check_access(dabt) ) + goto bad_width; return 1; @@ -463,7 +467,8 @@ static int vpl011_mmio_write(struct vcpu *v, goto write_ignore; case IMSC: - if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; + if ( !vpl011_reg32_check_access(dabt) ) + goto bad_width; VPL011_LOCK(d, flags); vreg_reg32_update(&vpl011->uartimsc, r, info); @@ -472,7 +477,8 @@ static int vpl011_mmio_write(struct vcpu *v, return 1; case ICR: - if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; + if ( !vpl011_reg32_check_access(dabt) ) + goto bad_width; VPL011_LOCK(d, flags); vreg_reg32_clearbits(&vpl011->uartris, r, info); @@ -493,7 +499,6 @@ bad_width: gprintk(XENLOG_ERR, "vpl011: bad write width %d r%d offset %#08x\n", dabt.size, dabt.reg, vpl011_reg); return 0; - } static const struct mmio_handler_ops vpl011_mmio_handler = { @@ -501,8 +506,7 @@ static const struct mmio_handler_ops vpl011_mmio_handler = { .write = vpl011_mmio_write, }; -static void vpl011_data_avail(struct domain *d, - XENCONS_RING_IDX in_fifo_level, +static void vpl011_data_avail(struct domain *d, XENCONS_RING_IDX in_fifo_level, XENCONS_RING_IDX in_size, XENCONS_RING_IDX out_fifo_level, XENCONS_RING_IDX out_size) @@ -569,7 +573,8 @@ void vpl011_rx_char_xen(struct domain *d, char c) in_cons = intf->in_cons; in_prod = intf->in_prod; - if ( xencons_queued(in_prod, in_cons, sizeof(intf->in)) == sizeof(intf->in) ) + if ( xencons_queued(in_prod, in_cons, sizeof(intf->in)) == + sizeof(intf->in) ) { VPL011_UNLOCK(d, flags); return; @@ -578,11 +583,10 @@ void vpl011_rx_char_xen(struct domain *d, char c) intf->in[xencons_mask(in_prod, sizeof(intf->in))] = c; intf->in_prod = ++in_prod; - in_fifo_level = xencons_queued(in_prod, - in_cons, - sizeof(intf->in)); + in_fifo_level = xencons_queued(in_prod, in_cons, sizeof(intf->in)); - vpl011_data_avail(d, in_fifo_level, sizeof(intf->in), 0, SBSA_UART_FIFO_SIZE); + vpl011_data_avail(d, in_fifo_level, sizeof(intf->in), 0, + SBSA_UART_FIFO_SIZE); VPL011_UNLOCK(d, flags); } @@ -604,13 +608,9 @@ static void vpl011_notification(struct vcpu *v, unsigned int port) smp_rmb(); - in_fifo_level = xencons_queued(in_prod, - in_cons, - sizeof(intf->in)); + in_fifo_level = xencons_queued(in_prod, in_cons, sizeof(intf->in)); - out_fifo_level = xencons_queued(out_prod, - out_cons, - sizeof(intf->out)); + out_fifo_level = xencons_queued(out_prod, out_cons, sizeof(intf->out)); vpl011_data_avail(v->domain, in_fifo_level, sizeof(intf->in), out_fifo_level, sizeof(intf->out)); @@ -635,10 +635,9 @@ int domain_vpl011_init(struct domain *d, struct vpl011_init_info *info) vpl011->backend_in_domain = true; /* Map the guest PFN to Xen address space. */ - rc = prepare_ring_for_helper(d, - gfn_x(info->gfn), - &vpl011->backend.dom.ring_page, - &vpl011->backend.dom.ring_buf); + rc = prepare_ring_for_helper(d, gfn_x(info->gfn), + &vpl011->backend.dom.ring_page, + &vpl011->backend.dom.ring_buf); if ( rc < 0 ) goto out; @@ -670,8 +669,8 @@ int domain_vpl011_init(struct domain *d, struct vpl011_init_info *info) spin_lock_init(&vpl011->lock); - register_mmio_handler(d, &vpl011_mmio_handler, - GUEST_PL011_BASE, GUEST_PL011_SIZE, NULL); + register_mmio_handler(d, &vpl011_mmio_handler, GUEST_PL011_BASE, + GUEST_PL011_SIZE, NULL); return 0; diff --git a/xen/arch/arm/vpsci.c b/xen/arch/arm/vpsci.c index 9f4e5b8844..b2b10060bf 100644 --- a/xen/arch/arm/vpsci.c +++ b/xen/arch/arm/vpsci.c @@ -49,7 +49,7 @@ static int do_common_cpu_on(register_t target_cpu, register_t entry_point, vgic_clear_pending_irqs(v); memset(ctxt, 0, sizeof(*ctxt)); - ctxt->user_regs.pc64 = (u64) entry_point; + ctxt->user_regs.pc64 = (u64)entry_point; ctxt->sctlr = SCTLR_GUEST_INIT; ctxt->ttbr0 = 0; ctxt->ttbr1 = 0; @@ -149,19 +149,18 @@ static int32_t do_psci_0_2_cpu_off(void) return do_psci_cpu_off(0); } -static int32_t do_psci_0_2_cpu_on(register_t target_cpu, - register_t entry_point, +static int32_t do_psci_0_2_cpu_on(register_t target_cpu, register_t entry_point, register_t context_id) { return do_common_cpu_on(target_cpu, entry_point, context_id); } static const unsigned long target_affinity_mask[] = { - ( MPIDR_HWID_MASK & AFFINITY_MASK( 0 ) ), - ( MPIDR_HWID_MASK & AFFINITY_MASK( 1 ) ), - ( MPIDR_HWID_MASK & AFFINITY_MASK( 2 ) ) + (MPIDR_HWID_MASK & AFFINITY_MASK(0)), (MPIDR_HWID_MASK & AFFINITY_MASK(1)), + (MPIDR_HWID_MASK & AFFINITY_MASK(2)) #ifdef CONFIG_ARM_64 - ,( MPIDR_HWID_MASK & AFFINITY_MASK( 3 ) ) + , + (MPIDR_HWID_MASK & AFFINITY_MASK(3)) #endif }; @@ -185,8 +184,8 @@ static int32_t do_psci_0_2_affinity_info(register_t target_affinity, { v = d->vcpu[vcpuid]; - if ( ( ( v->arch.vmpidr & tmask ) == target_affinity ) - && ( !test_bit(_VPF_down, &v->pause_flags) ) ) + if ( ((v->arch.vmpidr & tmask) == target_affinity) && + (!test_bit(_VPF_down, &v->pause_flags)) ) return PSCI_0_2_AFFINITY_LEVEL_ON; } @@ -198,22 +197,22 @@ static int32_t do_psci_0_2_migrate_info_type(void) return PSCI_0_2_TOS_MP_OR_NOT_PRESENT; } -static void do_psci_0_2_system_off( void ) +static void do_psci_0_2_system_off(void) { struct domain *d = current->domain; - domain_shutdown(d,SHUTDOWN_poweroff); + domain_shutdown(d, SHUTDOWN_poweroff); } static void do_psci_0_2_system_reset(void) { struct domain *d = current->domain; - domain_shutdown(d,SHUTDOWN_reboot); + domain_shutdown(d, SHUTDOWN_reboot); } static int32_t do_psci_1_0_features(uint32_t psci_func_id) { /* /!\ Ordered by function ID and not name */ - switch ( psci_func_id ) + switch (psci_func_id) { case PSCI_0_2_FN32_PSCI_VERSION: case PSCI_0_2_FN32_CPU_SUSPEND: @@ -249,7 +248,7 @@ static int32_t do_psci_1_0_features(uint32_t psci_func_id) */ bool do_vpsci_0_1_call(struct cpu_user_regs *regs, uint32_t fid) { - switch ( (uint32_t)get_user_reg(regs, 0) ) + switch ((uint32_t)get_user_reg(regs, 0)) { case PSCI_cpu_off: { @@ -284,7 +283,7 @@ bool do_vpsci_0_2_call(struct cpu_user_regs *regs, uint32_t fid) * adding/removing a function. SCCC_SMCCC_*_REVISION should be * updated once per release. */ - switch ( fid ) + switch (fid) { case PSCI_0_2_FN32_PSCI_VERSION: perfc_incr(vpsci_version); diff --git a/xen/arch/arm/vsmc.c b/xen/arch/arm/vsmc.c index f8e350311d..775b01a8ea 100644 --- a/xen/arch/arm/vsmc.c +++ b/xen/arch/arm/vsmc.c @@ -14,7 +14,6 @@ * GNU General Public License for more details. */ - #include #include #include @@ -43,7 +42,7 @@ static bool fill_uid(struct cpu_user_regs *regs, xen_uuid_t uuid) * first byte is stored in low-order bits of a register. * (ARM DEN 0028B page 14) */ - for (n = 0; n < 4; n++) + for ( n = 0; n < 4; n++ ) { const uint8_t *bytes = uuid.a + n * 4; uint32_t r; @@ -60,7 +59,7 @@ static bool fill_uid(struct cpu_user_regs *regs, xen_uuid_t uuid) } static bool fill_revision(struct cpu_user_regs *regs, uint32_t major, - uint32_t minor) + uint32_t minor) { /* * Revision is returned in registers r0 and r1. @@ -90,7 +89,7 @@ static bool handle_arch(struct cpu_user_regs *regs) { uint32_t fid = (uint32_t)get_user_reg(regs, 0); - switch ( fid ) + switch (fid) { case ARM_SMCCC_VERSION_FID: set_user_reg(regs, 0, ARM_SMCCC_VERSION_1_1); @@ -101,14 +100,14 @@ static bool handle_arch(struct cpu_user_regs *regs) uint32_t arch_func_id = get_user_reg(regs, 1); int ret = ARM_SMCCC_NOT_SUPPORTED; - switch ( arch_func_id ) + switch (arch_func_id) { case ARM_SMCCC_ARCH_WORKAROUND_1_FID: if ( cpus_have_cap(ARM_HARDEN_BRANCH_PREDICTOR) ) ret = 0; break; case ARM_SMCCC_ARCH_WORKAROUND_2_FID: - switch ( get_ssbd_state() ) + switch (get_ssbd_state()) { case ARM_SSBD_UNKNOWN: case ARM_SSBD_FORCE_DISABLE: @@ -163,7 +162,7 @@ static bool handle_hypervisor(struct cpu_user_regs *regs) { uint32_t fid = (uint32_t)get_user_reg(regs, 0); - switch ( fid ) + switch (fid) { case ARM_SMCCC_CALL_COUNT_FID(HYPERVISOR): return fill_function_call_count(regs, XEN_SMCCC_FUNCTION_COUNT); @@ -194,7 +193,7 @@ static bool handle_sssc(struct cpu_user_regs *regs) if ( do_vpsci_0_2_call(regs, fid) ) return true; - switch ( fid ) + switch (fid) { case ARM_SMCCC_CALL_COUNT_FID(STANDARD): return fill_function_call_count(regs, SSSC_SMCCC_FUNCTION_COUNT); @@ -219,7 +218,7 @@ static bool handle_sssc(struct cpu_user_regs *regs) static bool vsmccc_handle_call(struct cpu_user_regs *regs) { bool handled = false; - const union hsr hsr = { .bits = regs->hsr }; + const union hsr hsr = {.bits = regs->hsr}; register_t funcid = get_user_reg(regs, 0); /* @@ -229,14 +228,14 @@ static bool vsmccc_handle_call(struct cpu_user_regs *regs) * value we need to disassemble instruction at current pc, which * is expensive. So we will assume that it is 0x0. */ - switch ( hsr.ec ) + switch (hsr.ec) { case HSR_EC_HVC32: #ifdef CONFIG_ARM_64 case HSR_EC_HVC64: case HSR_EC_SMC64: #endif - if ( (hsr.iss & HSR_XXC_IMM_MASK) != 0) + if ( (hsr.iss & HSR_XXC_IMM_MASK) != 0 ) return false; break; case HSR_EC_SMC32: @@ -263,7 +262,7 @@ static bool vsmccc_handle_call(struct cpu_user_regs *regs) handled = handle_existing_apis(regs); else { - switch ( smccc_get_owner(funcid) ) + switch (smccc_get_owner(funcid)) { case ARM_SMCCC_OWNER_ARCH: handled = handle_arch(regs); @@ -286,7 +285,7 @@ static bool vsmccc_handle_call(struct cpu_user_regs *regs) if ( !handled ) { - gprintk(XENLOG_INFO, "Unhandled SMC/HVC: %08"PRIregister"\n", funcid); + gprintk(XENLOG_INFO, "Unhandled SMC/HVC: %08" PRIregister "\n", funcid); /* Inform caller that function is not supported. */ set_user_reg(regs, 0, ARM_SMCCC_ERR_UNKNOWN_FUNCTION); @@ -327,7 +326,7 @@ void do_trap_smc(struct cpu_user_regs *regs, const union hsr hsr) void do_trap_hvc_smccc(struct cpu_user_regs *regs) { - const union hsr hsr = { .bits = regs->hsr }; + const union hsr hsr = {.bits = regs->hsr}; /* * vsmccc_handle_call() will return false if this call is not diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c index c99dd237d1..75f35bea7c 100644 --- a/xen/arch/arm/vtimer.c +++ b/xen/arch/arm/vtimer.c @@ -36,8 +36,8 @@ * CNTKCTL_EL1_ bit name which gates user access */ #define ACCESS_ALLOWED(regs, user_gate) \ - ( !psr_mode_is_user(regs) || \ - (READ_SYSREG(CNTKCTL_EL1) & CNTKCTL_EL1_##user_gate) ) + (!psr_mode_is_user(regs) || \ + (READ_SYSREG(CNTKCTL_EL1) & CNTKCTL_EL1_##user_gate)) static void phys_timer_expired(void *data) { @@ -64,7 +64,8 @@ int domain_vtimer_init(struct domain *d, struct xen_arch_domainconfig *config) { d->arch.phys_timer_base.offset = NOW(); d->arch.virt_timer_base.offset = READ_SYSREG64(CNTPCT_EL0); - d->time_offset_seconds = ticks_to_ns(d->arch.virt_timer_base.offset - boot_count); + d->time_offset_seconds = + ticks_to_ns(d->arch.virt_timer_base.offset - boot_count); do_div(d->time_offset_seconds, 1000000000); config->clock_frequency = timer_dt_clock_frequency; @@ -109,17 +110,14 @@ int vcpu_vtimer_init(struct vcpu *v) init_timer(&t->timer, phys_timer_expired, t, v->processor); t->ctl = 0; t->cval = NOW(); - t->irq = d0 - ? timer_get_irq(TIMER_PHYS_NONSECURE_PPI) - : GUEST_TIMER_PHYS_NS_PPI; + t->irq = + d0 ? timer_get_irq(TIMER_PHYS_NONSECURE_PPI) : GUEST_TIMER_PHYS_NS_PPI; t->v = v; t = &v->arch.virt_timer; init_timer(&t->timer, virt_timer_expired, t, v->processor); t->ctl = 0; - t->irq = d0 - ? timer_get_irq(TIMER_VIRT_PPI) - : GUEST_TIMER_VIRT_PPI; + t->irq = d0 ? timer_get_irq(TIMER_VIRT_PPI) : GUEST_TIMER_VIRT_PPI; t->v = v; v->arch.vtimer_initialized = 1; @@ -144,10 +142,12 @@ int virt_timer_save(struct vcpu *v) WRITE_SYSREG32(v->arch.virt_timer.ctl & ~CNTx_CTL_ENABLE, CNTV_CTL_EL0); v->arch.virt_timer.cval = READ_SYSREG64(CNTV_CVAL_EL0); if ( (v->arch.virt_timer.ctl & CNTx_CTL_ENABLE) && - !(v->arch.virt_timer.ctl & CNTx_CTL_MASK)) + !(v->arch.virt_timer.ctl & CNTx_CTL_MASK) ) { - set_timer(&v->arch.virt_timer.timer, ticks_to_ns(v->arch.virt_timer.cval + - v->domain->arch.virt_timer_base.offset - boot_count)); + set_timer(&v->arch.virt_timer.timer, + ticks_to_ns(v->arch.virt_timer.cval + + v->domain->arch.virt_timer_base.offset - + boot_count)); } return 0; } @@ -187,7 +187,8 @@ static bool vtimer_cntp_ctl(struct cpu_user_regs *regs, uint32_t *r, bool read) if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE ) { set_timer(&v->arch.phys_timer.timer, - v->arch.phys_timer.cval + v->domain->arch.phys_timer_base.offset); + v->arch.phys_timer.cval + + v->domain->arch.phys_timer_base.offset); } else stop_timer(&v->arch.phys_timer.timer); @@ -195,8 +196,7 @@ static bool vtimer_cntp_ctl(struct cpu_user_regs *regs, uint32_t *r, bool read) return true; } -static bool vtimer_cntp_tval(struct cpu_user_regs *regs, uint32_t *r, - bool read) +static bool vtimer_cntp_tval(struct cpu_user_regs *regs, uint32_t *r, bool read) { struct vcpu *v = current; s_time_t now; @@ -208,7 +208,8 @@ static bool vtimer_cntp_tval(struct cpu_user_regs *regs, uint32_t *r, if ( read ) { - *r = (uint32_t)(ns_to_ticks(v->arch.phys_timer.cval - now) & 0xffffffffull); + *r = (uint32_t)(ns_to_ticks(v->arch.phys_timer.cval - now) & + 0xffffffffull); } else { @@ -218,14 +219,13 @@ static bool vtimer_cntp_tval(struct cpu_user_regs *regs, uint32_t *r, v->arch.phys_timer.ctl &= ~CNTx_CTL_PENDING; set_timer(&v->arch.phys_timer.timer, v->arch.phys_timer.cval + - v->domain->arch.phys_timer_base.offset); + v->domain->arch.phys_timer_base.offset); } } return true; } -static bool vtimer_cntp_cval(struct cpu_user_regs *regs, uint64_t *r, - bool read) +static bool vtimer_cntp_cval(struct cpu_user_regs *regs, uint64_t *r, bool read) { struct vcpu *v = current; @@ -244,7 +244,7 @@ static bool vtimer_cntp_cval(struct cpu_user_regs *regs, uint64_t *r, v->arch.phys_timer.ctl &= ~CNTx_CTL_PENDING; set_timer(&v->arch.phys_timer.timer, v->arch.phys_timer.cval + - v->domain->arch.phys_timer_base.offset); + v->domain->arch.phys_timer_base.offset); } } return true; @@ -259,7 +259,7 @@ static bool vtimer_emulate_cp32(struct cpu_user_regs *regs, union hsr hsr) else perfc_incr(vtimer_cp32_writes); - switch ( hsr.bits & HSR_CP32_REGS_MASK ) + switch (hsr.bits & HSR_CP32_REGS_MASK) { case HSR_CPREG32(CNTP_CTL): return vreg_emulate_cp32(regs, hsr, vtimer_cntp_ctl); @@ -281,7 +281,7 @@ static bool vtimer_emulate_cp64(struct cpu_user_regs *regs, union hsr hsr) else perfc_incr(vtimer_cp64_writes); - switch ( hsr.bits & HSR_CP64_REGS_MASK ) + switch (hsr.bits & HSR_CP64_REGS_MASK) { case HSR_CPREG64(CNTP_CVAL): return vreg_emulate_cp64(regs, hsr, vtimer_cntp_cval); @@ -301,7 +301,7 @@ static bool vtimer_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr) else perfc_incr(vtimer_sysreg_writes); - switch ( hsr.bits & HSR_SYSREG_REGS_MASK ) + switch (hsr.bits & HSR_SYSREG_REGS_MASK) { case HSR_SYSREG_CNTP_CTL_EL0: return vreg_emulate_sysreg32(regs, hsr, vtimer_cntp_ctl); @@ -313,14 +313,13 @@ static bool vtimer_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr) default: return false; } - } #endif bool vtimer_emulate(struct cpu_user_regs *regs, union hsr hsr) { - - switch (hsr.ec) { + switch (hsr.ec) + { case HSR_EC_CP15_32: return vtimer_emulate_cp32(regs, hsr); case HSR_EC_CP15_64: diff --git a/xen/arch/arm/vuart.c b/xen/arch/arm/vuart.c index 80d4755d43..35302dac08 100644 --- a/xen/arch/arm/vuart.c +++ b/xen/arch/arm/vuart.c @@ -42,19 +42,19 @@ #define domain_has_vuart(d) ((d)->arch.vuart.info != NULL) -static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, - register_t *r, void *priv); -static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, - register_t r, void *priv); +static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, register_t *r, + void *priv); +static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, register_t r, + void *priv); static const struct mmio_handler_ops vuart_mmio_handler = { - .read = vuart_mmio_read, + .read = vuart_mmio_read, .write = vuart_mmio_write, }; int domain_vuart_init(struct domain *d) { - ASSERT( is_hardware_domain(d) ); + ASSERT(is_hardware_domain(d)); d->arch.vuart.info = serial_vuart_info(SERHND_DTUART); if ( !d->arch.vuart.info ) @@ -67,10 +67,8 @@ int domain_vuart_init(struct domain *d) if ( !d->arch.vuart.buf ) return -ENOMEM; - register_mmio_handler(d, &vuart_mmio_handler, - d->arch.vuart.info->base_addr, - d->arch.vuart.info->size, - NULL); + register_mmio_handler(d, &vuart_mmio_handler, d->arch.vuart.info->base_addr, + d->arch.vuart.info->size, NULL); return 0; } @@ -90,7 +88,7 @@ static void vuart_print_char(struct vcpu *v, char c) /* Accept only printable characters, newline, and horizontal tab. */ if ( !isprint(c) && (c != '\n') && (c != '\t') ) - return ; + return; spin_lock(&uart->lock); uart->buf[uart->idx++] = c; @@ -105,8 +103,8 @@ static void vuart_print_char(struct vcpu *v, char c) spin_unlock(&uart->lock); } -static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, - register_t *r, void *priv) +static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, register_t *r, + void *priv) { struct domain *d = v->domain; paddr_t offset = info->gpa - d->arch.vuart.info->base_addr; @@ -123,8 +121,8 @@ static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, return 1; } -static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, - register_t r, void *priv) +static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, register_t r, + void *priv) { struct domain *d = v->domain; paddr_t offset = info->gpa - d->arch.vuart.info->base_addr; @@ -146,4 +144,3 @@ static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, * indent-tabs-mode: nil * End: */ - diff --git a/xen/arch/x86/acpi/boot.c b/xen/arch/x86/acpi/boot.c index e6ab9fa398..05b37e3713 100644 --- a/xen/arch/x86/acpi/boot.c +++ b/xen/arch/x86/acpi/boot.c @@ -42,7 +42,7 @@ #include #include -#define PREFIX "ACPI: " +#define PREFIX "ACPI: " bool __initdata acpi_noirq; /* skip ACPI IRQ initialization */ bool __initdata acpi_ht = true; /* enable HT */ @@ -62,450 +62,445 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; static int __init acpi_parse_madt(struct acpi_table_header *table) { - struct acpi_table_madt *madt; + struct acpi_table_madt *madt; - madt = (struct acpi_table_madt *)table; + madt = (struct acpi_table_madt *)table; - if (madt->address) { - acpi_lapic_addr = (u64) madt->address; + if ( madt->address ) + { + acpi_lapic_addr = (u64)madt->address; - printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", - madt->address); - } + printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", madt->address); + } - acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); + acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); - return 0; + return 0; } -static int __init -acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) +static int __init acpi_parse_x2apic(struct acpi_subtable_header *header, + const unsigned long end) { - struct acpi_madt_local_x2apic *processor = - container_of(header, struct acpi_madt_local_x2apic, header); - bool enabled = false, log = false; - - if (BAD_MADT_ENTRY(processor, end)) - return -EINVAL; - - if ((processor->lapic_flags & ACPI_MADT_ENABLED) || - processor->local_apic_id != 0xffffffff || opt_cpu_info) { - acpi_table_print_madt_entry(header); - log = true; - } - - /* Record local apic id only when enabled and fitting. */ - if (processor->local_apic_id >= MAX_APICS || - processor->uid >= MAX_MADT_ENTRIES) { - if (log) - printk("%sAPIC ID %#x and/or ACPI ID %#x beyond limit" - " - processor ignored\n", - processor->lapic_flags & ACPI_MADT_ENABLED - ? KERN_WARNING "WARNING: " : KERN_INFO, - processor->local_apic_id, processor->uid); - /* - * Must not return an error here, to prevent - * acpi_table_parse_entries() from terminating early. - */ - return 0 /* -ENOSPC */; - } - if (processor->lapic_flags & ACPI_MADT_ENABLED) { - x86_acpiid_to_apicid[processor->uid] = - processor->local_apic_id; - enabled = true; - } - - /* - * We need to register disabled CPU as well to permit - * counting disabled CPUs. This allows us to size - * cpus_possible_map more accurately, to permit - * to not preallocating memory for all NR_CPUS - * when we use CPU hotplug. - */ - mp_register_lapic(processor->local_apic_id, enabled, 0); - - return 0; + struct acpi_madt_local_x2apic *processor = + container_of(header, struct acpi_madt_local_x2apic, header); + bool enabled = false, log = false; + + if ( BAD_MADT_ENTRY(processor, end) ) + return -EINVAL; + + if ( (processor->lapic_flags & ACPI_MADT_ENABLED) || + processor->local_apic_id != 0xffffffff || opt_cpu_info ) + { + acpi_table_print_madt_entry(header); + log = true; + } + + /* Record local apic id only when enabled and fitting. */ + if ( processor->local_apic_id >= MAX_APICS || + processor->uid >= MAX_MADT_ENTRIES ) + { + if ( log ) + printk("%sAPIC ID %#x and/or ACPI ID %#x beyond limit" + " - processor ignored\n", + processor->lapic_flags & ACPI_MADT_ENABLED ? KERN_WARNING + "WARNING: " + : KERN_INFO, + processor->local_apic_id, processor->uid); + /* + * Must not return an error here, to prevent + * acpi_table_parse_entries() from terminating early. + */ + return 0 /* -ENOSPC */; + } + if ( processor->lapic_flags & ACPI_MADT_ENABLED ) + { + x86_acpiid_to_apicid[processor->uid] = processor->local_apic_id; + enabled = true; + } + + /* + * We need to register disabled CPU as well to permit + * counting disabled CPUs. This allows us to size + * cpus_possible_map more accurately, to permit + * to not preallocating memory for all NR_CPUS + * when we use CPU hotplug. + */ + mp_register_lapic(processor->local_apic_id, enabled, 0); + + return 0; } -static int __init -acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) +static int __init acpi_parse_lapic(struct acpi_subtable_header *header, + const unsigned long end) { - struct acpi_madt_local_apic *processor = - container_of(header, struct acpi_madt_local_apic, header); - bool enabled = false; - - if (BAD_MADT_ENTRY(processor, end)) - return -EINVAL; - - if ((processor->lapic_flags & ACPI_MADT_ENABLED) || - processor->id != 0xff || opt_cpu_info) - acpi_table_print_madt_entry(header); - - /* Record local apic id only when enabled */ - if (processor->lapic_flags & ACPI_MADT_ENABLED) { - x86_acpiid_to_apicid[processor->processor_id] = processor->id; - enabled = true; - } - - /* - * We need to register disabled CPU as well to permit - * counting disabled CPUs. This allows us to size - * cpus_possible_map more accurately, to permit - * to not preallocating memory for all NR_CPUS - * when we use CPU hotplug. - */ - mp_register_lapic(processor->id, enabled, 0); - - return 0; + struct acpi_madt_local_apic *processor = + container_of(header, struct acpi_madt_local_apic, header); + bool enabled = false; + + if ( BAD_MADT_ENTRY(processor, end) ) + return -EINVAL; + + if ( (processor->lapic_flags & ACPI_MADT_ENABLED) || + processor->id != 0xff || opt_cpu_info ) + acpi_table_print_madt_entry(header); + + /* Record local apic id only when enabled */ + if ( processor->lapic_flags & ACPI_MADT_ENABLED ) + { + x86_acpiid_to_apicid[processor->processor_id] = processor->id; + enabled = true; + } + + /* + * We need to register disabled CPU as well to permit + * counting disabled CPUs. This allows us to size + * cpus_possible_map more accurately, to permit + * to not preallocating memory for all NR_CPUS + * when we use CPU hotplug. + */ + mp_register_lapic(processor->id, enabled, 0); + + return 0; } -static int __init -acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, - const unsigned long end) +static int __init acpi_parse_lapic_addr_ovr(struct acpi_subtable_header *header, + const unsigned long end) { - struct acpi_madt_local_apic_override *lapic_addr_ovr = - container_of(header, struct acpi_madt_local_apic_override, - header); + struct acpi_madt_local_apic_override *lapic_addr_ovr = + container_of(header, struct acpi_madt_local_apic_override, header); - if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) - return -EINVAL; + if ( BAD_MADT_ENTRY(lapic_addr_ovr, end) ) + return -EINVAL; - acpi_lapic_addr = lapic_addr_ovr->address; + acpi_lapic_addr = lapic_addr_ovr->address; - return 0; + return 0; } -static int __init -acpi_parse_x2apic_nmi(struct acpi_subtable_header *header, - const unsigned long end) +static int __init acpi_parse_x2apic_nmi(struct acpi_subtable_header *header, + const unsigned long end) { - struct acpi_madt_local_x2apic_nmi *x2apic_nmi = - container_of(header, struct acpi_madt_local_x2apic_nmi, - header); + struct acpi_madt_local_x2apic_nmi *x2apic_nmi = + container_of(header, struct acpi_madt_local_x2apic_nmi, header); - if (BAD_MADT_ENTRY(x2apic_nmi, end)) - return -EINVAL; + if ( BAD_MADT_ENTRY(x2apic_nmi, end) ) + return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(header); - if (x2apic_nmi->lint != 1) - printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); + if ( x2apic_nmi->lint != 1 ) + printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); - return 0; + return 0; } -static int __init -acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) +static int __init acpi_parse_lapic_nmi(struct acpi_subtable_header *header, + const unsigned long end) { - struct acpi_madt_local_apic_nmi *lapic_nmi = - container_of(header, struct acpi_madt_local_apic_nmi, header); + struct acpi_madt_local_apic_nmi *lapic_nmi = + container_of(header, struct acpi_madt_local_apic_nmi, header); - if (BAD_MADT_ENTRY(lapic_nmi, end)) - return -EINVAL; + if ( BAD_MADT_ENTRY(lapic_nmi, end) ) + return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(header); - if (lapic_nmi->lint != 1) - printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); + if ( lapic_nmi->lint != 1 ) + printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); - return 0; + return 0; } -static int __init -acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) +static int __init acpi_parse_ioapic(struct acpi_subtable_header *header, + const unsigned long end) { - struct acpi_madt_io_apic *ioapic = - container_of(header, struct acpi_madt_io_apic, header); + struct acpi_madt_io_apic *ioapic = + container_of(header, struct acpi_madt_io_apic, header); - if (BAD_MADT_ENTRY(ioapic, end)) - return -EINVAL; + if ( BAD_MADT_ENTRY(ioapic, end) ) + return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(header); - mp_register_ioapic(ioapic->id, - ioapic->address, ioapic->global_irq_base); + mp_register_ioapic(ioapic->id, ioapic->address, ioapic->global_irq_base); - return 0; + return 0; } -static int __init -acpi_parse_int_src_ovr(struct acpi_subtable_header * header, - const unsigned long end) +static int __init acpi_parse_int_src_ovr(struct acpi_subtable_header *header, + const unsigned long end) { - struct acpi_madt_interrupt_override *intsrc = - container_of(header, struct acpi_madt_interrupt_override, - header); + struct acpi_madt_interrupt_override *intsrc = + container_of(header, struct acpi_madt_interrupt_override, header); - if (BAD_MADT_ENTRY(intsrc, end)) - return -EINVAL; + if ( BAD_MADT_ENTRY(intsrc, end) ) + return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(header); - if (acpi_skip_timer_override && - intsrc->source_irq == 0 && intsrc->global_irq == 2) { - printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); - return 0; - } + if ( acpi_skip_timer_override && intsrc->source_irq == 0 && + intsrc->global_irq == 2 ) + { + printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); + return 0; + } - mp_override_legacy_irq(intsrc->source_irq, - ACPI_MADT_GET_POLARITY(intsrc->inti_flags), - ACPI_MADT_GET_TRIGGER(intsrc->inti_flags), - intsrc->global_irq); + mp_override_legacy_irq( + intsrc->source_irq, ACPI_MADT_GET_POLARITY(intsrc->inti_flags), + ACPI_MADT_GET_TRIGGER(intsrc->inti_flags), intsrc->global_irq); - return 0; + return 0; } -static int __init -acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end) +static int __init acpi_parse_nmi_src(struct acpi_subtable_header *header, + const unsigned long end) { - struct acpi_madt_nmi_source *nmi_src = - container_of(header, struct acpi_madt_nmi_source, header); + struct acpi_madt_nmi_source *nmi_src = + container_of(header, struct acpi_madt_nmi_source, header); - if (BAD_MADT_ENTRY(nmi_src, end)) - return -EINVAL; + if ( BAD_MADT_ENTRY(nmi_src, end) ) + return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(header); - /* TBD: Support nimsrc entries? */ + /* TBD: Support nimsrc entries? */ - return 0; + return 0; } #ifdef CONFIG_HPET_TIMER static int __init acpi_parse_hpet(struct acpi_table_header *table) { - struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table; - - if (hpet_tbl->address.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) { - printk(KERN_WARNING PREFIX "HPET timers must be located in " - "memory.\n"); - return -1; - } - - /* - * Some BIOSes provide multiple HPET tables. Sometimes this is a BIOS - * bug; the intended way of supporting more than 1 HPET is to use AML - * entries. - * - * If someone finds a real system with two genuine HPET tables, perhaps - * they will be kind and implement support. Until then however, warn - * that we will ignore subsequent tables. - */ - if (hpet_address) - { - printk(KERN_WARNING PREFIX - "Found multiple HPET tables. Only using first\n"); - return -1; - } - - hpet_address = hpet_tbl->address.address; - hpet_blockid = hpet_tbl->sequence; - hpet_flags = hpet_tbl->flags; - printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", - hpet_tbl->id, hpet_address); - - return 0; + struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table; + + if ( hpet_tbl->address.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY ) + { + printk(KERN_WARNING PREFIX "HPET timers must be located in " + "memory.\n"); + return -1; + } + + /* + * Some BIOSes provide multiple HPET tables. Sometimes this is a BIOS + * bug; the intended way of supporting more than 1 HPET is to use AML + * entries. + * + * If someone finds a real system with two genuine HPET tables, perhaps + * they will be kind and implement support. Until then however, warn + * that we will ignore subsequent tables. + */ + if ( hpet_address ) + { + printk(KERN_WARNING PREFIX + "Found multiple HPET tables. Only using first\n"); + return -1; + } + + hpet_address = hpet_tbl->address.address; + hpet_blockid = hpet_tbl->sequence; + hpet_flags = hpet_tbl->flags; + printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", hpet_tbl->id, + hpet_address); + + return 0; } #else -#define acpi_parse_hpet NULL +#define acpi_parse_hpet NULL #endif static int __init acpi_invalidate_bgrt(struct acpi_table_header *table) { - struct acpi_table_bgrt *bgrt_tbl = - container_of(table, struct acpi_table_bgrt, header); + struct acpi_table_bgrt *bgrt_tbl = + container_of(table, struct acpi_table_bgrt, header); - if (table->length < sizeof(*bgrt_tbl)) - return -1; + if ( table->length < sizeof(*bgrt_tbl) ) + return -1; - if (bgrt_tbl->version == 1 && bgrt_tbl->image_address - && !page_is_ram_type(PFN_DOWN(bgrt_tbl->image_address), - RAM_TYPE_CONVENTIONAL)) - return 0; + if ( bgrt_tbl->version == 1 && bgrt_tbl->image_address && + !page_is_ram_type(PFN_DOWN(bgrt_tbl->image_address), + RAM_TYPE_CONVENTIONAL) ) + return 0; - printk(KERN_INFO PREFIX "BGRT: invalidating v%d image at %#"PRIx64"\n", - bgrt_tbl->version, bgrt_tbl->image_address); - bgrt_tbl->image_address = 0; - bgrt_tbl->status &= ~1; + printk(KERN_INFO PREFIX "BGRT: invalidating v%d image at %#" PRIx64 "\n", + bgrt_tbl->version, bgrt_tbl->image_address); + bgrt_tbl->image_address = 0; + bgrt_tbl->status &= ~1; - return 0; + return 0; } #ifdef CONFIG_ACPI_SLEEP -#define acpi_fadt_copy_address(dst, src, len) do { \ - if (fadt->header.revision >= FADT2_REVISION_ID && \ - fadt->header.length >= ACPI_FADT_V2_SIZE) \ - acpi_sinfo.dst##_blk = fadt->x##src##_block; \ - if (!acpi_sinfo.dst##_blk.address) { \ - acpi_sinfo.dst##_blk.address = fadt->src##_block; \ - acpi_sinfo.dst##_blk.space_id = ACPI_ADR_SPACE_SYSTEM_IO; \ - acpi_sinfo.dst##_blk.bit_width = fadt->len##_length << 3; \ - acpi_sinfo.dst##_blk.bit_offset = 0; \ - acpi_sinfo.dst##_blk.access_width = fadt->len##_length; \ - } \ -} while (0) +#define acpi_fadt_copy_address(dst, src, len) \ + do { \ + if ( fadt->header.revision >= FADT2_REVISION_ID && \ + fadt->header.length >= ACPI_FADT_V2_SIZE ) \ + acpi_sinfo.dst##_blk = fadt->x##src##_block; \ + if ( !acpi_sinfo.dst##_blk.address ) \ + { \ + acpi_sinfo.dst##_blk.address = fadt->src##_block; \ + acpi_sinfo.dst##_blk.space_id = ACPI_ADR_SPACE_SYSTEM_IO; \ + acpi_sinfo.dst##_blk.bit_width = fadt->len##_length << 3; \ + acpi_sinfo.dst##_blk.bit_offset = 0; \ + acpi_sinfo.dst##_blk.access_width = fadt->len##_length; \ + } \ + } while ( 0 ) /* Get pm1x_cnt and pm1x_evt information for ACPI sleep */ -static void __init -acpi_fadt_parse_sleep_info(struct acpi_table_fadt *fadt) +static void __init acpi_fadt_parse_sleep_info(struct acpi_table_fadt *fadt) { - struct acpi_table_facs *facs = NULL; - uint64_t facs_pa; - - if (fadt->header.revision >= 5 && - fadt->header.length >= ACPI_FADT_V5_SIZE) { - acpi_sinfo.sleep_control = fadt->sleep_control; - acpi_sinfo.sleep_status = fadt->sleep_status; - - printk(KERN_INFO PREFIX - "v5 SLEEP INFO: control[%d:%"PRIx64"]," - " status[%d:%"PRIx64"]\n", - acpi_sinfo.sleep_control.space_id, - acpi_sinfo.sleep_control.address, - acpi_sinfo.sleep_status.space_id, - acpi_sinfo.sleep_status.address); - - if ((fadt->sleep_control.address && - (fadt->sleep_control.bit_offset || - fadt->sleep_control.bit_width != - fadt->sleep_control.access_width * 8)) || - (fadt->sleep_status.address && - (fadt->sleep_status.bit_offset || - fadt->sleep_status.bit_width != - fadt->sleep_status.access_width * 8))) { - printk(KERN_WARNING PREFIX - "Invalid sleep control/status register data:" - " %#x:%#x:%#x %#x:%#x:%#x\n", - fadt->sleep_control.bit_offset, - fadt->sleep_control.bit_width, - fadt->sleep_control.access_width, - fadt->sleep_status.bit_offset, - fadt->sleep_status.bit_width, - fadt->sleep_status.access_width); - fadt->sleep_control.address = 0; - fadt->sleep_status.address = 0; - } - } - - if (fadt->flags & ACPI_FADT_HW_REDUCED) - goto bad; - - acpi_fadt_copy_address(pm1a_cnt, pm1a_control, pm1_control); - acpi_fadt_copy_address(pm1b_cnt, pm1b_control, pm1_control); - acpi_fadt_copy_address(pm1a_evt, pm1a_event, pm1_event); - acpi_fadt_copy_address(pm1b_evt, pm1b_event, pm1_event); - - printk(KERN_INFO PREFIX - "SLEEP INFO: pm1x_cnt[%d:%"PRIx64",%d:%"PRIx64"], " - "pm1x_evt[%d:%"PRIx64",%d:%"PRIx64"]\n", - acpi_sinfo.pm1a_cnt_blk.space_id, - acpi_sinfo.pm1a_cnt_blk.address, - acpi_sinfo.pm1b_cnt_blk.space_id, - acpi_sinfo.pm1b_cnt_blk.address, - acpi_sinfo.pm1a_evt_blk.space_id, - acpi_sinfo.pm1a_evt_blk.address, - acpi_sinfo.pm1b_evt_blk.space_id, - acpi_sinfo.pm1b_evt_blk.address); - - /* Now FACS... */ - facs_pa = ((fadt->header.revision >= FADT2_REVISION_ID) - ? fadt->Xfacs : (uint64_t)fadt->facs); - if (fadt->facs && ((uint64_t)fadt->facs != facs_pa)) { - printk(KERN_WARNING PREFIX - "32/64X FACS address mismatch in FADT - " - "%08x/%016"PRIx64", using 32\n", - fadt->facs, facs_pa); - facs_pa = (uint64_t)fadt->facs; - } - if (!facs_pa) - goto bad; - - facs = (struct acpi_table_facs *) - __acpi_map_table(facs_pa, sizeof(struct acpi_table_facs)); - if (!facs) - goto bad; - - if (strncmp(facs->signature, "FACS", 4)) { - printk(KERN_ERR PREFIX "Invalid FACS signature %.4s\n", - facs->signature); - goto bad; - } - - if (facs->length < 24) { - printk(KERN_ERR PREFIX "Invalid FACS table length: %#x", - facs->length); - goto bad; - } - - if (facs->length < 64) - printk(KERN_WARNING PREFIX - "FACS is shorter than ACPI spec allow: %#x", - facs->length); - - acpi_sinfo.wakeup_vector = facs_pa + - offsetof(struct acpi_table_facs, firmware_waking_vector); - acpi_sinfo.vector_width = 32; - - printk(KERN_INFO PREFIX - " wakeup_vec[%"PRIx64"], vec_size[%x]\n", - acpi_sinfo.wakeup_vector, acpi_sinfo.vector_width); - return; + struct acpi_table_facs *facs = NULL; + uint64_t facs_pa; + + if ( fadt->header.revision >= 5 && + fadt->header.length >= ACPI_FADT_V5_SIZE ) + { + acpi_sinfo.sleep_control = fadt->sleep_control; + acpi_sinfo.sleep_status = fadt->sleep_status; + + printk( + KERN_INFO PREFIX "v5 SLEEP INFO: control[%d:%" PRIx64 "]," + " status[%d:%" PRIx64 "]\n", + acpi_sinfo.sleep_control.space_id, acpi_sinfo.sleep_control.address, + acpi_sinfo.sleep_status.space_id, acpi_sinfo.sleep_status.address); + + if ( (fadt->sleep_control.address && + (fadt->sleep_control.bit_offset || + fadt->sleep_control.bit_width != + fadt->sleep_control.access_width * 8)) || + (fadt->sleep_status.address && + (fadt->sleep_status.bit_offset || + fadt->sleep_status.bit_width != + fadt->sleep_status.access_width * 8)) ) + { + printk( + KERN_WARNING PREFIX + "Invalid sleep control/status register data:" + " %#x:%#x:%#x %#x:%#x:%#x\n", + fadt->sleep_control.bit_offset, fadt->sleep_control.bit_width, + fadt->sleep_control.access_width, fadt->sleep_status.bit_offset, + fadt->sleep_status.bit_width, fadt->sleep_status.access_width); + fadt->sleep_control.address = 0; + fadt->sleep_status.address = 0; + } + } + + if ( fadt->flags & ACPI_FADT_HW_REDUCED ) + goto bad; + + acpi_fadt_copy_address(pm1a_cnt, pm1a_control, pm1_control); + acpi_fadt_copy_address(pm1b_cnt, pm1b_control, pm1_control); + acpi_fadt_copy_address(pm1a_evt, pm1a_event, pm1_event); + acpi_fadt_copy_address(pm1b_evt, pm1b_event, pm1_event); + + printk(KERN_INFO PREFIX "SLEEP INFO: pm1x_cnt[%d:%" PRIx64 ",%d:%" PRIx64 + "], " + "pm1x_evt[%d:%" PRIx64 ",%d:%" PRIx64 "]\n", + acpi_sinfo.pm1a_cnt_blk.space_id, acpi_sinfo.pm1a_cnt_blk.address, + acpi_sinfo.pm1b_cnt_blk.space_id, acpi_sinfo.pm1b_cnt_blk.address, + acpi_sinfo.pm1a_evt_blk.space_id, acpi_sinfo.pm1a_evt_blk.address, + acpi_sinfo.pm1b_evt_blk.space_id, acpi_sinfo.pm1b_evt_blk.address); + + /* Now FACS... */ + facs_pa = + ((fadt->header.revision >= FADT2_REVISION_ID) ? fadt->Xfacs + : (uint64_t)fadt->facs); + if ( fadt->facs && ((uint64_t)fadt->facs != facs_pa) ) + { + printk(KERN_WARNING PREFIX "32/64X FACS address mismatch in FADT - " + "%08x/%016" PRIx64 ", using 32\n", + fadt->facs, facs_pa); + facs_pa = (uint64_t)fadt->facs; + } + if ( !facs_pa ) + goto bad; + + facs = (struct acpi_table_facs *)__acpi_map_table( + facs_pa, sizeof(struct acpi_table_facs)); + if ( !facs ) + goto bad; + + if ( strncmp(facs->signature, "FACS", 4) ) + { + printk(KERN_ERR PREFIX "Invalid FACS signature %.4s\n", + facs->signature); + goto bad; + } + + if ( facs->length < 24 ) + { + printk(KERN_ERR PREFIX "Invalid FACS table length: %#x", facs->length); + goto bad; + } + + if ( facs->length < 64 ) + printk(KERN_WARNING PREFIX "FACS is shorter than ACPI spec allow: %#x", + facs->length); + + acpi_sinfo.wakeup_vector = + facs_pa + offsetof(struct acpi_table_facs, firmware_waking_vector); + acpi_sinfo.vector_width = 32; + + printk(KERN_INFO PREFIX " wakeup_vec[%" PRIx64 + "], vec_size[%x]\n", + acpi_sinfo.wakeup_vector, acpi_sinfo.vector_width); + return; bad: - memset(&acpi_sinfo, 0, - offsetof(struct acpi_sleep_info, sleep_control)); - memset(&acpi_sinfo.sleep_status + 1, 0, - (long)(&acpi_sinfo + 1) - (long)(&acpi_sinfo.sleep_status + 1)); + memset(&acpi_sinfo, 0, offsetof(struct acpi_sleep_info, sleep_control)); + memset(&acpi_sinfo.sleep_status + 1, 0, + (long)(&acpi_sinfo + 1) - (long)(&acpi_sinfo.sleep_status + 1)); } #endif static int __init acpi_parse_fadt(struct acpi_table_header *table) { - struct acpi_table_fadt *fadt = (struct acpi_table_fadt *)table; + struct acpi_table_fadt *fadt = (struct acpi_table_fadt *)table; -#ifdef CONFIG_ACPI_INTERPRETER - /* initialize sci_int early for INT_SRC_OVR MADT parsing */ - acpi_fadt.sci_int = fadt->sci_int; +#ifdef CONFIG_ACPI_INTERPRETER + /* initialize sci_int early for INT_SRC_OVR MADT parsing */ + acpi_fadt.sci_int = fadt->sci_int; - /* initialize rev and apic_phys_dest_mode for x86_64 genapic */ - acpi_fadt.revision = fadt->revision; - acpi_fadt.force_apic_physical_destination_mode = - fadt->force_apic_physical_destination_mode; + /* initialize rev and apic_phys_dest_mode for x86_64 genapic */ + acpi_fadt.revision = fadt->revision; + acpi_fadt.force_apic_physical_destination_mode = + fadt->force_apic_physical_destination_mode; #endif #ifdef CONFIG_X86_PM_TIMER - /* detect the location of the ACPI PM Timer */ - if (fadt->header.revision >= FADT2_REVISION_ID) { - /* FADT rev. 2 */ - if (fadt->xpm_timer_block.space_id == - ACPI_ADR_SPACE_SYSTEM_IO) { - pmtmr_ioport = fadt->xpm_timer_block.address; - pmtmr_width = fadt->xpm_timer_block.bit_width; - } - } - /* - * "X" fields are optional extensions to the original V1.0 - * fields, so we must selectively expand V1.0 fields if the - * corresponding X field is zero. - */ - if (!pmtmr_ioport) { - pmtmr_ioport = fadt->pm_timer_block; - pmtmr_width = fadt->pm_timer_length == 4 ? 24 : 0; - } - if (pmtmr_ioport) - printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x (%u bits)\n", - pmtmr_ioport, pmtmr_width); + /* detect the location of the ACPI PM Timer */ + if ( fadt->header.revision >= FADT2_REVISION_ID ) + { + /* FADT rev. 2 */ + if ( fadt->xpm_timer_block.space_id == ACPI_ADR_SPACE_SYSTEM_IO ) + { + pmtmr_ioport = fadt->xpm_timer_block.address; + pmtmr_width = fadt->xpm_timer_block.bit_width; + } + } + /* + * "X" fields are optional extensions to the original V1.0 + * fields, so we must selectively expand V1.0 fields if the + * corresponding X field is zero. + */ + if ( !pmtmr_ioport ) + { + pmtmr_ioport = fadt->pm_timer_block; + pmtmr_width = fadt->pm_timer_length == 4 ? 24 : 0; + } + if ( pmtmr_ioport ) + printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x (%u bits)\n", + pmtmr_ioport, pmtmr_width); #endif - acpi_smi_cmd = fadt->smi_command; - acpi_enable_value = fadt->acpi_enable; - acpi_disable_value = fadt->acpi_disable; + acpi_smi_cmd = fadt->smi_command; + acpi_enable_value = fadt->acpi_enable; + acpi_disable_value = fadt->acpi_disable; #ifdef CONFIG_ACPI_SLEEP - acpi_fadt_parse_sleep_info(fadt); + acpi_fadt_parse_sleep_info(fadt); #endif - return 0; + return 0; } /* @@ -514,54 +509,55 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table) */ static int __init acpi_parse_madt_lapic_entries(void) { - int count, x2count; - - if (!cpu_has_apic) - return -ENODEV; - - /* - * Note that the LAPIC address is obtained from the MADT (32-bit value) - * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). - */ - - count = - acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, - acpi_parse_lapic_addr_ovr, 0); - if (count < 0) { - printk(KERN_ERR PREFIX - "Error parsing LAPIC address override entry\n"); - return count; - } - - mp_register_lapic_address(acpi_lapic_addr); - - BUILD_BUG_ON(MAX_APICS != MAX_LOCAL_APIC); - count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, - acpi_parse_lapic, MAX_APICS); - x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC, - acpi_parse_x2apic, MAX_APICS); - if (!count && !x2count) { - printk(KERN_ERR PREFIX "No LAPIC entries present\n"); - /* TBD: Cleanup to allow fallback to MPS */ - return -ENODEV; - } else if (count < 0 || x2count < 0) { - printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); - /* TBD: Cleanup to allow fallback to MPS */ - return count < 0 ? count : x2count; - } - - count = - acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, + int count, x2count; + + if ( !cpu_has_apic ) + return -ENODEV; + + /* + * Note that the LAPIC address is obtained from the MADT (32-bit value) + * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). + */ + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, + acpi_parse_lapic_addr_ovr, 0); + if ( count < 0 ) + { + printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); + return count; + } + + mp_register_lapic_address(acpi_lapic_addr); + + BUILD_BUG_ON(MAX_APICS != MAX_LOCAL_APIC); + count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic, + MAX_APICS); + x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC, + acpi_parse_x2apic, MAX_APICS); + if ( !count && !x2count ) + { + printk(KERN_ERR PREFIX "No LAPIC entries present\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return -ENODEV; + } + else if ( count < 0 || x2count < 0 ) + { + printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return count < 0 ? count : x2count; + } + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0); - x2count = - acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI, - acpi_parse_x2apic_nmi, 0); - if (count < 0 || x2count < 0) { - printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); - /* TBD: Cleanup to allow fallback to MPS */ - return count < 0 ? count : x2count; - } - return 0; + x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI, + acpi_parse_x2apic_nmi, 0); + if ( count < 0 || x2count < 0 ) + { + printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return count < 0 ? count : x2count; + } + return 0; } /* @@ -570,100 +566,106 @@ static int __init acpi_parse_madt_lapic_entries(void) */ static int __init acpi_parse_madt_ioapic_entries(void) { - int count; - - /* - * ACPI interpreter is required to complete interrupt setup, - * so if it is off, don't enumerate the io-apics with ACPI. - * If MPS is present, it will handle them, - * otherwise the system will stay in PIC mode - */ - if (acpi_disabled || acpi_noirq) { - return -ENODEV; - } - - if (!cpu_has_apic) - return -ENODEV; - - /* - * if "noapic" boot option, don't look for IO-APICs - */ - if (skip_ioapic_setup) { - printk(KERN_INFO PREFIX "Skipping IOAPIC probe " - "due to 'noapic' option.\n"); - return -ENODEV; - } - - count = - acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic, - MAX_IO_APICS); - if (!count) { - printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); - return -ENODEV; - } else if (count < 0) { - printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); - return count; - } - - count = - acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, + int count; + + /* + * ACPI interpreter is required to complete interrupt setup, + * so if it is off, don't enumerate the io-apics with ACPI. + * If MPS is present, it will handle them, + * otherwise the system will stay in PIC mode + */ + if ( acpi_disabled || acpi_noirq ) + { + return -ENODEV; + } + + if ( !cpu_has_apic ) + return -ENODEV; + + /* + * if "noapic" boot option, don't look for IO-APICs + */ + if ( skip_ioapic_setup ) + { + printk(KERN_INFO PREFIX "Skipping IOAPIC probe " + "due to 'noapic' option.\n"); + return -ENODEV; + } + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic, + MAX_IO_APICS); + if ( !count ) + { + printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); + return -ENODEV; + } + else if ( count < 0 ) + { + printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); + return count; + } + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, MAX_IRQ_SOURCES); - if (count < 0) { - printk(KERN_ERR PREFIX - "Error parsing interrupt source overrides entry\n"); - /* TBD: Cleanup to allow fallback to MPS */ - return count; - } - - /* Fill in identity legacy mapings where no override */ - mp_config_acpi_legacy_irqs(); - - count = - acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, - acpi_parse_nmi_src, MAX_IRQ_SOURCES); - if (count < 0) { - printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); - /* TBD: Cleanup to allow fallback to MPS */ - return count; - } - - return 0; + if ( count < 0 ) + { + printk(KERN_ERR PREFIX + "Error parsing interrupt source overrides entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return count; + } + + /* Fill in identity legacy mapings where no override */ + mp_config_acpi_legacy_irqs(); + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, + MAX_IRQ_SOURCES); + if ( count < 0 ) + { + printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return count; + } + + return 0; } static void __init acpi_process_madt(void) { - int error; - - if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { - - /* - * Parse MADT LAPIC entries - */ - error = acpi_parse_madt_lapic_entries(); - if (!error) { - acpi_lapic = true; - generic_bigsmp_probe(); - - /* - * Parse MADT IO-APIC entries - */ - error = acpi_parse_madt_ioapic_entries(); - if (!error) { - acpi_ioapic = true; - - smp_found_config = true; - clustered_apic_check(); - } - } - if (error == -EINVAL) { - /* - * Dell Precision Workstation 410, 610 come here. - */ - printk(KERN_ERR PREFIX - "Invalid BIOS MADT, disabling ACPI\n"); - disable_acpi(); - } - } + int error; + + if ( !acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt) ) + { + /* + * Parse MADT LAPIC entries + */ + error = acpi_parse_madt_lapic_entries(); + if ( !error ) + { + acpi_lapic = true; + generic_bigsmp_probe(); + + /* + * Parse MADT IO-APIC entries + */ + error = acpi_parse_madt_ioapic_entries(); + if ( !error ) + { + acpi_ioapic = true; + + smp_found_config = true; + clustered_apic_check(); + } + } + if ( error == -EINVAL ) + { + /* + * Dell Precision Workstation 410, 610 come here. + */ + printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n"); + disable_acpi(); + } + } } /* @@ -689,57 +691,58 @@ static void __init acpi_process_madt(void) int __init acpi_boot_table_init(void) { - int error; - - /* - * If acpi_disabled, bail out - * One exception: acpi=ht continues far enough to enumerate LAPICs - */ - if (acpi_disabled && !acpi_ht) - return 1; - - /* - * Initialize the ACPI boot-time table parser. - */ - error = acpi_table_init(); - if (error) { - disable_acpi(); - return error; - } - - return 0; + int error; + + /* + * If acpi_disabled, bail out + * One exception: acpi=ht continues far enough to enumerate LAPICs + */ + if ( acpi_disabled && !acpi_ht ) + return 1; + + /* + * Initialize the ACPI boot-time table parser. + */ + error = acpi_table_init(); + if ( error ) + { + disable_acpi(); + return error; + } + + return 0; } int __init acpi_boot_init(void) { - /* - * If acpi_disabled, bail out - * One exception: acpi=ht continues far enough to enumerate LAPICs - */ - if (acpi_disabled && !acpi_ht) - return 1; + /* + * If acpi_disabled, bail out + * One exception: acpi=ht continues far enough to enumerate LAPICs + */ + if ( acpi_disabled && !acpi_ht ) + return 1; - /* - * set sci_int and PM timer address - */ - acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt); + /* + * set sci_int and PM timer address + */ + acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt); - /* - * Process the Multiple APIC Description Table (MADT), if present - */ - acpi_process_madt(); + /* + * Process the Multiple APIC Description Table (MADT), if present + */ + acpi_process_madt(); - acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet); + acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet); - acpi_mmcfg_init(); + acpi_mmcfg_init(); - acpi_dmar_init(); + acpi_dmar_init(); - erst_init(); + erst_init(); - acpi_hest_init(); + acpi_hest_init(); - acpi_table_parse(ACPI_SIG_BGRT, acpi_invalidate_bgrt); + acpi_table_parse(ACPI_SIG_BGRT, acpi_invalidate_bgrt); - return 0; + return 0; } diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c index 14b02789c5..2df9fbbb28 100644 --- a/xen/arch/x86/acpi/cpu_idle.c +++ b/xen/arch/x86/acpi/cpu_idle.c @@ -1,6 +1,6 @@ /* - * cpu_idle - xen idle state module derived from Linux - * drivers/acpi/processor_idle.c & + * cpu_idle - xen idle state module derived from Linux + * drivers/acpi/processor_idle.c & * arch/x86/kernel/acpi/cstate.c * * Copyright (C) 2001, 2002 Andy Grover @@ -60,22 +60,28 @@ /*#define DEBUG_PM_CX*/ #define GET_HW_RES_IN_NS(msr, val) \ - do { rdmsrl(msr, val); val = tsc_ticks2ns(val); } while( 0 ) -#define GET_MC6_RES(val) GET_HW_RES_IN_NS(0x664, val) -#define GET_PC2_RES(val) GET_HW_RES_IN_NS(0x60D, val) /* SNB onwards */ -#define GET_PC3_RES(val) GET_HW_RES_IN_NS(0x3F8, val) -#define GET_PC6_RES(val) GET_HW_RES_IN_NS(0x3F9, val) -#define GET_PC7_RES(val) GET_HW_RES_IN_NS(0x3FA, val) -#define GET_PC8_RES(val) GET_HW_RES_IN_NS(0x630, val) /* some Haswells only */ -#define GET_PC9_RES(val) GET_HW_RES_IN_NS(0x631, val) /* some Haswells only */ -#define GET_PC10_RES(val) GET_HW_RES_IN_NS(0x632, val) /* some Haswells only */ -#define GET_CC1_RES(val) GET_HW_RES_IN_NS(0x660, val) /* Silvermont only */ -#define GET_CC3_RES(val) GET_HW_RES_IN_NS(0x3FC, val) -#define GET_CC6_RES(val) GET_HW_RES_IN_NS(0x3FD, val) -#define GET_CC7_RES(val) GET_HW_RES_IN_NS(0x3FE, val) /* SNB onwards */ -#define PHI_CC6_RES(val) GET_HW_RES_IN_NS(0x3FF, val) /* Xeon Phi only */ - -static void lapic_timer_nop(void) { } + do { \ + rdmsrl(msr, val); \ + val = tsc_ticks2ns(val); \ + } while ( 0 ) +#define GET_MC6_RES(val) GET_HW_RES_IN_NS(0x664, val) +#define GET_PC2_RES(val) GET_HW_RES_IN_NS(0x60D, val) /* SNB onwards */ +#define GET_PC3_RES(val) GET_HW_RES_IN_NS(0x3F8, val) +#define GET_PC6_RES(val) GET_HW_RES_IN_NS(0x3F9, val) +#define GET_PC7_RES(val) GET_HW_RES_IN_NS(0x3FA, val) +#define GET_PC8_RES(val) GET_HW_RES_IN_NS(0x630, val) /* some Haswells only */ +#define GET_PC9_RES(val) GET_HW_RES_IN_NS(0x631, val) /* some Haswells only */ +#define GET_PC10_RES(val) GET_HW_RES_IN_NS(0x632, val) /* some Haswells only \ + */ +#define GET_CC1_RES(val) GET_HW_RES_IN_NS(0x660, val) /* Silvermont only */ +#define GET_CC3_RES(val) GET_HW_RES_IN_NS(0x3FC, val) +#define GET_CC6_RES(val) GET_HW_RES_IN_NS(0x3FD, val) +#define GET_CC7_RES(val) GET_HW_RES_IN_NS(0x3FE, val) /* SNB onwards */ +#define PHI_CC6_RES(val) GET_HW_RES_IN_NS(0x3FF, val) /* Xeon Phi only */ + +static void lapic_timer_nop(void) +{ +} void (*__read_mostly lapic_timer_off)(void); void (*__read_mostly lapic_timer_on)(void); @@ -138,7 +144,7 @@ static void do_get_hw_residencies(void *arg) if ( c->x86_vendor != X86_VENDOR_INTEL || c->x86 != 6 ) return; - switch ( c->x86_model ) + switch (c->x86_model) { /* 4th generation Intel Core (Haswell) */ case 0x45: @@ -252,33 +258,38 @@ static void print_hw_residencies(uint32_t cpu) get_hw_residencies(cpu, &hw_res); if ( hw_res.mc0 | hw_res.mc6 ) - printk("MC0[%"PRIu64"] MC6[%"PRIu64"]\n", - hw_res.mc0, hw_res.mc6); - printk("PC2[%"PRIu64"] PC%d[%"PRIu64"] PC6[%"PRIu64"] PC7[%"PRIu64"]\n", - hw_res.pc2, - hw_res.pc4 ? 4 : 3, hw_res.pc4 ?: hw_res.pc3, - hw_res.pc6, hw_res.pc7); + printk("MC0[%" PRIu64 "] MC6[%" PRIu64 "]\n", hw_res.mc0, hw_res.mc6); + printk("PC2[%" PRIu64 "] PC%d[%" PRIu64 "] PC6[%" PRIu64 "] PC7[%" PRIu64 + "]\n", + hw_res.pc2, hw_res.pc4 ? 4 : 3, hw_res.pc4 ?: hw_res.pc3, hw_res.pc6, + hw_res.pc7); if ( hw_res.pc8 | hw_res.pc9 | hw_res.pc10 ) - printk("PC8[%"PRIu64"] PC9[%"PRIu64"] PC10[%"PRIu64"]\n", + printk("PC8[%" PRIu64 "] PC9[%" PRIu64 "] PC10[%" PRIu64 "]\n", hw_res.pc8, hw_res.pc9, hw_res.pc10); - printk("CC%d[%"PRIu64"] CC6[%"PRIu64"] CC7[%"PRIu64"]\n", - hw_res.cc1 ? 1 : 3, hw_res.cc1 ?: hw_res.cc3, - hw_res.cc6, hw_res.cc7); + printk("CC%d[%" PRIu64 "] CC6[%" PRIu64 "] CC7[%" PRIu64 "]\n", + hw_res.cc1 ? 1 : 3, hw_res.cc1 ?: hw_res.cc3, hw_res.cc6, + hw_res.cc7); } -static char* acpi_cstate_method_name[] = -{ - "NONE", - "SYSIO", - "FFH", - "HALT" -}; +static char *acpi_cstate_method_name[] = {"NONE", "SYSIO", "FFH", "HALT"}; -static uint64_t get_stime_tick(void) { return (uint64_t)NOW(); } -static uint64_t stime_ticks_elapsed(uint64_t t1, uint64_t t2) { return t2 - t1; } -static uint64_t stime_tick_to_ns(uint64_t ticks) { return ticks; } +static uint64_t get_stime_tick(void) +{ + return (uint64_t)NOW(); +} +static uint64_t stime_ticks_elapsed(uint64_t t1, uint64_t t2) +{ + return t2 - t1; +} +static uint64_t stime_tick_to_ns(uint64_t ticks) +{ + return ticks; +} -static uint64_t get_acpi_pm_tick(void) { return (uint64_t)inl(pmtmr_ioport); } +static uint64_t get_acpi_pm_tick(void) +{ + return (uint64_t)inl(pmtmr_ioport); +} static uint64_t acpi_pm_ticks_elapsed(uint64_t t1, uint64_t t2) { if ( t2 >= t1 ) @@ -286,19 +297,19 @@ static uint64_t acpi_pm_ticks_elapsed(uint64_t t1, uint64_t t2) else if ( !(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) ) return (((0x00FFFFFF - t1) + t2 + 1) & 0x00FFFFFF); else - return ((0xFFFFFFFF - t1) + t2 +1); + return ((0xFFFFFFFF - t1) + t2 + 1); } uint64_t (*__read_mostly cpuidle_get_tick)(void) = get_acpi_pm_tick; -static uint64_t (*__read_mostly ticks_elapsed)(uint64_t, uint64_t) - = acpi_pm_ticks_elapsed; +static uint64_t (*__read_mostly ticks_elapsed)(uint64_t, uint64_t) = + acpi_pm_ticks_elapsed; static void print_acpi_power(uint32_t cpu, struct acpi_processor_power *power) { uint64_t idle_res = 0, idle_usage = 0; uint64_t last_state_update_tick, current_tick, current_stime; - uint64_t usage[ACPI_PROCESSOR_MAX_POWER] = { 0 }; - uint64_t res_tick[ACPI_PROCESSOR_MAX_POWER] = { 0 }; + uint64_t usage[ACPI_PROCESSOR_MAX_POWER] = {0}; + uint64_t res_tick[ACPI_PROCESSOR_MAX_POWER] = {0}; unsigned int i; signed int last_state_idx; @@ -321,8 +332,8 @@ static void print_acpi_power(uint32_t cpu, struct acpi_processor_power *power) if ( last_state_idx >= 0 ) { - res_tick[last_state_idx] += ticks_elapsed(last_state_update_tick, - current_tick); + res_tick[last_state_idx] += + ticks_elapsed(last_state_update_tick, current_tick); usage[last_state_idx]++; } @@ -335,12 +346,13 @@ static void print_acpi_power(uint32_t cpu, struct acpi_processor_power *power) printk("C%d:\t", i); printk("type[C%d] ", power->states[i].type); printk("latency[%03d] ", power->states[i].latency); - printk("usage[%08"PRIu64"] ", usage[i]); - printk("method[%5s] ", acpi_cstate_method_name[power->states[i].entry_method]); - printk("duration[%"PRIu64"]\n", tick_to_ns(res_tick[i])); + printk("usage[%08" PRIu64 "] ", usage[i]); + printk("method[%5s] ", + acpi_cstate_method_name[power->states[i].entry_method]); + printk("duration[%" PRIu64 "]\n", tick_to_ns(res_tick[i])); } printk((last_state_idx == 0) ? " *" : " "); - printk("C0:\tusage[%08"PRIu64"] duration[%"PRIu64"]\n", + printk("C0:\tusage[%08" PRIu64 "] duration[%" PRIu64 "]\n", usage[0] + idle_usage, current_stime - idle_res); print_hw_residencies(cpu); @@ -351,8 +363,8 @@ static void dump_cx(unsigned char key) unsigned int cpu; printk("'%c' pressed -> printing ACPI Cx structures\n", key); - for_each_online_cpu ( cpu ) - if (processor_powers[cpu]) + for_each_online_cpu (cpu) + if ( processor_powers[cpu] ) { print_acpi_power(cpu, processor_powers[cpu]); process_pending_softirqs(); @@ -381,7 +393,7 @@ void cpuidle_wakeup_mwait(cpumask_t *mask) cpumask_and(&target, mask, &cpuidle_mwait_flags); /* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */ - for_each_cpu(cpu, &target) + for_each_cpu (cpu, &target) mwait_wakeup(cpu) = 0; cpumask_andnot(mask, mask, &target); @@ -442,7 +454,7 @@ static void acpi_idle_do_entry(struct acpi_processor_cx *cx) { struct cpu_info *info = get_cpu_info(); - switch ( cx->entry_method ) + switch (cx->entry_method) { case ACPI_CSTATE_EM_FFH: /* Call into architectural FFH based C-state */ @@ -482,17 +494,18 @@ static int acpi_idle_bm_check(void) return bm_status; } -static struct { +static struct +{ spinlock_t lock; unsigned int count; -} c3_cpu_status = { .lock = SPIN_LOCK_UNLOCKED }; +} c3_cpu_status = {.lock = SPIN_LOCK_UNLOCKED}; void trace_exit_reason(u32 *irq_traced) { if ( unlikely(tb_init_done) ) { int i, curbit; - u32 irr_status[8] = { 0 }; + u32 irr_status[8] = {0}; /* Get local apic IRR register */ for ( i = 0; i < 8; i++ ) @@ -502,17 +515,18 @@ void trace_exit_reason(u32 *irq_traced) while ( i < 4 && curbit < 256 ) { irq_traced[i++] = curbit; - curbit = find_next_bit((const unsigned long *)irr_status, 256, curbit + 1); + curbit = find_next_bit((const unsigned long *)irr_status, 256, + curbit + 1); } } } /* - * "AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6 During + * "AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6 During * an Interrupt Service Routine" - * - * There was an errata with some Core i7 processors that an EOI transaction - * may not be sent if software enters core C6 during an interrupt service + * + * There was an errata with some Core i7 processors that an EOI transaction + * may not be sent if software enters core C6 during an interrupt service * routine. So we don't enter deep Cx state if there is an EOI pending. */ static bool errata_c6_eoi_workaround(void) @@ -544,8 +558,8 @@ void update_last_cx_stat(struct acpi_processor_power *power, } void update_idle_stats(struct acpi_processor_power *power, - struct acpi_processor_cx *cx, - uint64_t before, uint64_t after) + struct acpi_processor_cx *cx, uint64_t before, + uint64_t after) { int64_t sleep_ticks = ticks_elapsed(before, after); /* Interrupts are disabled */ @@ -571,7 +585,7 @@ static void acpi_processor_idle(void) int next_state; uint64_t t1, t2 = 0; u32 exp = 0, pred = 0; - u32 irq_traced[4] = { 0 }; + u32 irq_traced[4] = {0}; if ( max_cstate > 0 && power && !sched_has_urgent_vcpu() && (next_state = cpuidle_current_governor->select(power)) > 0 ) @@ -622,13 +636,12 @@ static void acpi_processor_idle(void) if ( (cx->type == ACPI_STATE_C3) && errata_c6_eoi_workaround() ) cx = power->safe_state; - /* * Sleep: * ------ * Invoke the current Cx state to put the processor to sleep. */ - switch ( cx->type ) + switch (cx->type) { case ACPI_STATE_C1: case ACPI_STATE_C2: @@ -647,8 +660,8 @@ static void acpi_processor_idle(void) t2 = cpuidle_get_tick(); trace_exit_reason(irq_traced); /* Trace cpu idle exit */ - TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, - irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); + TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, irq_traced[0], + irq_traced[1], irq_traced[2], irq_traced[3]); /* Update statistics */ update_idle_stats(power, cx, t1, t2); /* Re-enable interrupts */ @@ -658,7 +671,7 @@ static void acpi_processor_idle(void) case ACPI_STATE_C3: /* - * Before invoking C3, be aware that TSC/APIC timer may be + * Before invoking C3, be aware that TSC/APIC timer may be * stopped by H/W. Without carefully handling of TSC/APIC stop issues, * deep C state can't work correctly. */ @@ -706,8 +719,8 @@ static void acpi_processor_idle(void) /* Invoke C3 */ acpi_idle_do_entry(cx); - if ( (cx->type == ACPI_STATE_C3) && - power->flags.bm_check && power->flags.bm_control ) + if ( (cx->type == ACPI_STATE_C3) && power->flags.bm_check && + power->flags.bm_control ) { /* Enable bus master arbitration */ spin_lock(&c3_cpu_status.lock); @@ -723,8 +736,8 @@ static void acpi_processor_idle(void) cstate_restore_tsc(); trace_exit_reason(irq_traced); /* Trace cpu idle exit */ - TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, - irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); + TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, irq_traced[0], irq_traced[1], + irq_traced[2], irq_traced[3]); /* Update statistics */ update_idle_stats(power, cx, t1, t2); @@ -781,11 +794,11 @@ void acpi_dead_idle(void) { /* * 1. The CLFLUSH is a workaround for erratum AAI65 for - * the Xeon 7400 series. + * the Xeon 7400 series. * 2. The WBINVD is insufficient due to the spurious-wakeup * case where we return around the loop. - * 3. Unlike wbinvd, clflush is a light weight but not serializing - * instruction, hence memory fence is necessary to make sure all + * 3. Unlike wbinvd, clflush is a light weight but not serializing + * instruction, hence memory fence is necessary to make sure all * load/store visible before flush cache line. */ mb(); @@ -874,8 +887,8 @@ static int acpi_processor_ffh_cstate_probe(xen_processor_cx_t *cx) cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); if ( opt_cpu_info ) - printk(XENLOG_DEBUG "cpuid.MWAIT[eax=%x ebx=%x ecx=%x edx=%x]\n", - eax, ebx, ecx, edx); + printk(XENLOG_DEBUG "cpuid.MWAIT[eax=%x ebx=%x ecx=%x edx=%x]\n", eax, + ebx, ecx, edx); /* Check whether this particular cx_type (in CST) is supported or not */ cstate_type = (cx->reg.address >> MWAIT_SUBSTATE_SIZE) + 1; @@ -905,7 +918,8 @@ static int acpi_processor_ffh_cstate_probe(xen_processor_cx_t *cx) * * This routine is called only after all the CPUs are online */ -static void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags) +static void +acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags) { struct cpuinfo_x86 *c = ¤t_cpu_data; @@ -930,19 +944,19 @@ static void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flag * P4, Core and beyond CPUs */ if ( c->x86_vendor == X86_VENDOR_INTEL && - (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)) ) - flags->bm_control = 0; + (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)) ) + flags->bm_control = 0; } -#define VENDOR_INTEL (1) -#define NATIVE_CSTATE_BEYOND_HALT (2) +#define VENDOR_INTEL (1) +#define NATIVE_CSTATE_BEYOND_HALT (2) static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx) { static int bm_check_flag = -1; static int bm_control_flag = -1; - switch ( cx->reg.space_id ) + switch (cx->reg.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: if ( cx->reg.address == 0 ) @@ -950,7 +964,7 @@ static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx) break; case ACPI_ADR_SPACE_FIXED_HARDWARE: - if ( cx->reg.bit_width != VENDOR_INTEL || + if ( cx->reg.bit_width != VENDOR_INTEL || cx->reg.bit_offset != NATIVE_CSTATE_BEYOND_HALT ) return -EINVAL; @@ -963,7 +977,7 @@ static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx) return -ENODEV; } - switch ( cx->type ) + switch (cx->type) { case ACPI_STATE_C2: if ( local_apic_timer_c2_ok ) @@ -991,15 +1005,15 @@ static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx) if ( power->flags.has_cst != 1 ) { /* bus mastering control is necessary */ - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "C3 support requires BM control\n")); + ACPI_DEBUG_PRINT( + (ACPI_DB_INFO, "C3 support requires BM control\n")); return -EINVAL; } else { /* Here we enter C3 without bus mastering */ - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "C3 support without BM control\n")); + ACPI_DEBUG_PRINT( + (ACPI_DB_INFO, "C3 support without BM control\n")); } } /* @@ -1020,8 +1034,8 @@ static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx) if ( !(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD) ) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Cache invalidation should work properly" - " for C3 to be enabled on SMP systems\n")); + "Cache invalidation should work properly" + " for C3 to be enabled on SMP systems\n")); return -EINVAL; } } @@ -1042,16 +1056,15 @@ static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx) static unsigned int latency_factor = 2; integer_param("idle_latency_factor", latency_factor); -static void set_cx( - struct acpi_processor_power *acpi_power, - xen_processor_cx_t *xen_cx) +static void set_cx(struct acpi_processor_power *acpi_power, + xen_processor_cx_t *xen_cx) { struct acpi_processor_cx *cx; if ( check_cx(acpi_power, xen_cx) != 0 ) return; - switch ( xen_cx->type ) + switch (xen_cx->type) { case ACPI_STATE_C1: cx = &acpi_power->states[1]; @@ -1059,9 +1072,9 @@ static void set_cx( default: if ( acpi_power->count >= ACPI_PROCESSOR_MAX_POWER ) { - case ACPI_STATE_C0: - printk(XENLOG_WARNING "CPU%u: C%d data ignored\n", - acpi_power->cpu, xen_cx->type); + case ACPI_STATE_C0: + printk(XENLOG_WARNING "CPU%u: C%d data ignored\n", acpi_power->cpu, + xen_cx->type); return; } cx = &acpi_power->states[acpi_power->count]; @@ -1071,7 +1084,7 @@ static void set_cx( cx->address = xen_cx->reg.address; - switch ( xen_cx->reg.space_id ) + switch (xen_cx->reg.space_id) { case ACPI_ADR_SPACE_FIXED_HARDWARE: if ( xen_cx->reg.bit_width == VENDOR_INTEL && @@ -1126,7 +1139,7 @@ int get_cpu_id(u32 acpi_id) static void print_cx_pminfo(uint32_t cpu, struct xen_processor_power *power) { XEN_GUEST_HANDLE(xen_processor_cx_t) states; - xen_processor_cx_t state; + xen_processor_cx_t state; XEN_GUEST_HANDLE(xen_processor_csd_t) csd; xen_processor_csd_t dp; uint32_t i; @@ -1137,27 +1150,27 @@ static void print_cx_pminfo(uint32_t cpu, struct xen_processor_power *power) "\t pwr_setup_done[%d], bm_rld_set[%d]\n", power->flags.bm_control, power->flags.bm_check, power->flags.has_cst, power->flags.power_setup_done, power->flags.bm_rld_set); - + states = power->states; - + for ( i = 0; i < power->count; i++ ) { if ( unlikely(copy_from_guest_offset(&state, states, i, 1)) ) return; - + printk("\tstates[%d]:\n", i); printk("\t\treg.space_id = %#x\n", state.reg.space_id); printk("\t\treg.bit_width = %#x\n", state.reg.bit_width); printk("\t\treg.bit_offset = %#x\n", state.reg.bit_offset); printk("\t\treg.access_size = %#x\n", state.reg.access_size); - printk("\t\treg.address = %#"PRIx64"\n", state.reg.address); + printk("\t\treg.address = %#" PRIx64 "\n", state.reg.address); printk("\t\ttype = %d\n", state.type); printk("\t\tlatency = %d\n", state.latency); printk("\t\tpower = %d\n", state.power); csd = state.dp; printk("\t\tdp(@0x%p)\n", csd.p); - + if ( csd.p != NULL ) { if ( unlikely(copy_from_guest(&dp, csd, 1)) ) @@ -1232,7 +1245,7 @@ long set_cx_pminfo(uint32_t acpi_id, struct xen_processor_power *power) dead_idle = acpi_dead_idle; } - + return 0; } @@ -1246,8 +1259,8 @@ int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat) struct acpi_processor_power *power = processor_powers[cpuid]; uint64_t idle_usage = 0, idle_res = 0; uint64_t last_state_update_tick, current_stime, current_tick; - uint64_t usage[ACPI_PROCESSOR_MAX_POWER] = { 0 }; - uint64_t res[ACPI_PROCESSOR_MAX_POWER] = { 0 }; + uint64_t usage[ACPI_PROCESSOR_MAX_POWER] = {0}; + uint64_t res[ACPI_PROCESSOR_MAX_POWER] = {0}; unsigned int i, nr, nr_pc = 0, nr_cc = 0; if ( power == NULL ) @@ -1296,8 +1309,8 @@ int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat) if ( last_state_idx >= 0 ) { usage[last_state_idx]++; - res[last_state_idx] += ticks_elapsed(last_state_update_tick, - current_tick); + res[last_state_idx] += + ticks_elapsed(last_state_update_tick, current_tick); stat->last = last_state_idx; } else @@ -1312,12 +1325,13 @@ int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat) get_hw_residencies(cpuid, &hw_res); -#define PUT_xC(what, n) do { \ - if ( stat->nr_##what >= n && \ +#define PUT_xC(what, n) \ + do { \ + if ( stat->nr_##what >= n && \ copy_to_guest_offset(stat->what, n - 1, &hw_res.what##n, 1) ) \ - return -EFAULT; \ - if ( hw_res.what##n ) \ - nr_##what = n; \ + return -EFAULT; \ + if ( hw_res.what##n ) \ + nr_##what = n; \ } while ( 0 ) #define PUT_PC(n) PUT_xC(pc, n) PUT_PC(2); @@ -1374,14 +1388,14 @@ bool cpuidle_using_deep_cstate(void) return xen_cpuidle && max_cstate > (local_apic_timer_c2_ok ? 2 : 1); } -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; /* Only hook on CPU_ONLINE because a dead cpu may utilize the info to * to enter deep C-state */ - switch ( action ) + switch (action) { case CPU_ONLINE: (void)cpuidle_init_cpu(cpu); @@ -1393,9 +1407,7 @@ static int cpu_callback( return NOTIFY_DONE; } -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback -}; +static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback}; static int __init cpuidle_presmp_init(void) { @@ -1410,4 +1422,3 @@ static int __init cpuidle_presmp_init(void) return 0; } presmp_initcall(cpuidle_presmp_init); - diff --git a/xen/arch/x86/acpi/cpufreq/cpufreq.c b/xen/arch/x86/acpi/cpufreq/cpufreq.c index 844ab85cd3..7b8ac8f377 100644 --- a/xen/arch/x86/acpi/cpufreq/cpufreq.c +++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c @@ -43,13 +43,14 @@ #include #include -enum { +enum +{ UNDEFINED_CAPABLE = 0, SYSTEM_INTEL_MSR_CAPABLE, SYSTEM_IO_CAPABLE, }; -#define INTEL_MSR_RANGE (0xffffull) +#define INTEL_MSR_RANGE (0xffffull) struct acpi_cpufreq_data *cpufreq_drv_data[NR_CPUS]; @@ -60,8 +61,8 @@ static int check_est_cpu(unsigned int cpuid) { struct cpuinfo_x86 *cpu = &cpu_data[cpuid]; - if (cpu->x86_vendor != X86_VENDOR_INTEL || - !cpu_has(cpu, X86_FEATURE_EIST)) + if ( cpu->x86_vendor != X86_VENDOR_INTEL || + !cpu_has(cpu, X86_FEATURE_EIST) ) return 0; return 1; @@ -74,8 +75,9 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) perf = data->acpi_data; - for (i=0; istate_count; i++) { - if (value == perf->states[i].status) + for ( i = 0; i < perf->state_count; i++ ) + { + if ( value == perf->states[i].status ) return data->freq_table[i].frequency; } return 0; @@ -89,8 +91,9 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) msr &= INTEL_MSR_RANGE; perf = data->acpi_data; - for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { - if (msr == perf->states[data->freq_table[i].index].status) + for ( i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++ ) + { + if ( msr == perf->states[data->freq_table[i].index].status ) return data->freq_table[i].frequency; } return data->freq_table[0].frequency; @@ -98,7 +101,8 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) { - switch (data->arch_cpu_flags) { + switch (data->arch_cpu_flags) + { case SYSTEM_INTEL_MSR_CAPABLE: return extract_msr(val, data); case SYSTEM_IO_CAPABLE: @@ -108,11 +112,13 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) } } -struct msr_addr { +struct msr_addr +{ u32 reg; }; -struct io_addr { +struct io_addr +{ u16 port; u8 bit_width; }; @@ -122,7 +128,8 @@ typedef union { struct io_addr io; } drv_addr_union; -struct drv_cmd { +struct drv_cmd +{ unsigned int type; const cpumask_t *mask; drv_addr_union addr; @@ -135,13 +142,14 @@ static void do_drv_read(void *drvcmd) cmd = (struct drv_cmd *)drvcmd; - switch (cmd->type) { + switch (cmd->type) + { case SYSTEM_INTEL_MSR_CAPABLE: rdmsrl(cmd->addr.msr.reg, cmd->val); break; case SYSTEM_IO_CAPABLE: - acpi_os_read_port((acpi_io_address)cmd->addr.io.port, - &cmd->val, (u32)cmd->addr.io.bit_width); + acpi_os_read_port((acpi_io_address)cmd->addr.io.port, &cmd->val, + (u32)cmd->addr.io.bit_width); break; default: break; @@ -155,16 +163,17 @@ static void do_drv_write(void *drvcmd) cmd = (struct drv_cmd *)drvcmd; - switch (cmd->type) { + switch (cmd->type) + { case SYSTEM_INTEL_MSR_CAPABLE: rdmsrl(cmd->addr.msr.reg, msr_content); - msr_content = (msr_content & ~INTEL_MSR_RANGE) - | (cmd->val & INTEL_MSR_RANGE); + msr_content = + (msr_content & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); wrmsrl(cmd->addr.msr.reg, msr_content); break; case SYSTEM_IO_CAPABLE: - acpi_os_write_port((acpi_io_address)cmd->addr.io.port, - cmd->val, (u32)cmd->addr.io.bit_width); + acpi_os_write_port((acpi_io_address)cmd->addr.io.port, cmd->val, + (u32)cmd->addr.io.bit_width); break; default: break; @@ -178,7 +187,7 @@ static void drv_read(struct drv_cmd *cmd) ASSERT(cpumask_weight(cmd->mask) == 1); /* to reduce IPI for the sake of performance */ - if (likely(cpumask_test_cpu(smp_processor_id(), cmd->mask))) + if ( likely(cpumask_test_cpu(smp_processor_id(), cmd->mask)) ) do_drv_read((void *)cmd); else on_selected_cpus(cmd->mask, do_drv_read, cmd, 1); @@ -186,7 +195,7 @@ static void drv_read(struct drv_cmd *cmd) static void drv_write(struct drv_cmd *cmd) { - if (cpumask_equal(cmd->mask, cpumask_of(smp_processor_id()))) + if ( cpumask_equal(cmd->mask, cpumask_of(smp_processor_id())) ) do_drv_write((void *)cmd); else on_selected_cpus(cmd->mask, do_drv_write, cmd, 1); @@ -199,19 +208,20 @@ static u32 get_cur_val(const cpumask_t *mask) struct drv_cmd cmd; unsigned int cpu = smp_processor_id(); - if (unlikely(cpumask_empty(mask))) + if ( unlikely(cpumask_empty(mask)) ) return 0; - if (!cpumask_test_cpu(cpu, mask)) + if ( !cpumask_test_cpu(cpu, mask) ) cpu = cpumask_first(mask); - if (cpu >= nr_cpu_ids || !cpu_online(cpu)) + if ( cpu >= nr_cpu_ids || !cpu_online(cpu) ) return 0; policy = per_cpu(cpufreq_cpu_policy, cpu); - if (!policy || !cpufreq_drv_data[policy->cpu]) - return 0; + if ( !policy || !cpufreq_drv_data[policy->cpu] ) + return 0; - switch (cpufreq_drv_data[policy->cpu]->arch_cpu_flags) { + switch (cpufreq_drv_data[policy->cpu]->arch_cpu_flags) + { case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; @@ -232,9 +242,11 @@ static u32 get_cur_val(const cpumask_t *mask) return cmd.val; } -struct perf_pair { +struct perf_pair +{ union { - struct { + struct + { uint32_t lo; uint32_t hi; } split; @@ -267,16 +279,16 @@ static void read_measured_perf_ctrs(void *_readin) */ unsigned int get_measured_perf(unsigned int cpu, unsigned int flag) { - struct cpufreq_policy *policy; + struct cpufreq_policy *policy; struct perf_pair readin, cur, *saved; unsigned int perf_percent; unsigned int retval; - if (!cpu_online(cpu)) + if ( !cpu_online(cpu) ) return 0; policy = per_cpu(cpufreq_cpu_policy, cpu); - if (!policy || !policy->aperf_mperf) + if ( !policy || !policy->aperf_mperf ) return 0; switch (flag) @@ -295,11 +307,13 @@ unsigned int get_measured_perf(unsigned int cpu, unsigned int flag) return 0; } - if (cpu == smp_processor_id()) { + if ( cpu == smp_processor_id() ) + { read_measured_perf_ctrs((void *)&readin); - } else { - on_selected_cpus(cpumask_of(cpu), read_measured_perf_ctrs, - &readin, 1); + } + else + { + on_selected_cpus(cpumask_of(cpu), read_measured_perf_ctrs, &readin, 1); } cur.aperf.whole = readin.aperf.whole - saved->aperf.whole; @@ -307,13 +321,14 @@ unsigned int get_measured_perf(unsigned int cpu, unsigned int flag) saved->aperf.whole = readin.aperf.whole; saved->mperf.whole = readin.mperf.whole; - if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) { + if ( unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole) ) + { int shift_count = 7; cur.aperf.whole >>= shift_count; cur.mperf.whole >>= shift_count; } - if (cur.aperf.whole && cur.mperf.whole) + if ( cur.aperf.whole && cur.mperf.whole ) perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole; else perf_percent = 0; @@ -329,16 +344,16 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) struct acpi_cpufreq_data *data; unsigned int freq; - if (!cpu_online(cpu)) + if ( !cpu_online(cpu) ) return 0; policy = per_cpu(cpufreq_cpu_policy, cpu); - if (!policy) + if ( !policy ) return 0; data = cpufreq_drv_data[policy->cpu]; - if (unlikely(data == NULL || - data->acpi_data == NULL || data->freq_table == NULL)) + if ( unlikely(data == NULL || data->acpi_data == NULL || + data->freq_table == NULL) ) return 0; freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); @@ -357,9 +372,10 @@ static void feature_detect(void *info) } eax = cpuid_eax(6); - if (eax & 0x2) { + if ( eax & 0x2 ) + { policy->turbo = CPUFREQ_TURBO_ENABLED; - if (cpufreq_verbose) + if ( cpufreq_verbose ) printk(XENLOG_INFO "CPU%u: Turbo Mode detected and enabled\n", smp_processor_id()); } @@ -371,9 +387,10 @@ static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq, unsigned int cur_freq; unsigned int i; - for (i=0; i<100; i++) { + for ( i = 0; i < 100; i++ ) + { cur_freq = extract_freq(get_cur_val(mask), data); - if (cur_freq == freq) + if ( cur_freq == freq ) return 1; udelay(10); } @@ -388,55 +405,56 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; cpumask_t online_policy_cpus; struct drv_cmd cmd; - unsigned int next_state = 0; /* Index into freq_table */ + unsigned int next_state = 0; /* Index into freq_table */ unsigned int next_perf_state = 0; /* Index into perf table */ unsigned int j; int result = 0; - if (unlikely(data == NULL || - data->acpi_data == NULL || data->freq_table == NULL)) { + if ( unlikely(data == NULL || data->acpi_data == NULL || + data->freq_table == NULL) ) + { return -ENODEV; } - if (policy->turbo == CPUFREQ_TURBO_DISABLED) - if (target_freq > policy->cpuinfo.second_max_freq) + if ( policy->turbo == CPUFREQ_TURBO_DISABLED ) + if ( target_freq > policy->cpuinfo.second_max_freq ) target_freq = policy->cpuinfo.second_max_freq; perf = data->acpi_data; - result = cpufreq_frequency_table_target(policy, - data->freq_table, - target_freq, - relation, &next_state); - if (unlikely(result)) + result = cpufreq_frequency_table_target(policy, data->freq_table, + target_freq, relation, &next_state); + if ( unlikely(result) ) return -ENODEV; cpumask_and(&online_policy_cpus, &cpu_online_map, policy->cpus); next_perf_state = data->freq_table[next_state].index; - if (perf->state == next_perf_state) { - if (unlikely(policy->resume)) + if ( perf->state == next_perf_state ) + { + if ( unlikely(policy->resume) ) policy->resume = 0; else return 0; } - switch (data->arch_cpu_flags) { + switch (data->arch_cpu_flags) + { case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.addr.msr.reg = MSR_IA32_PERF_CTL; - cmd.val = (u32) perf->states[next_perf_state].control; + cmd.val = (u32)perf->states[next_perf_state].control; break; case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; cmd.addr.io.port = perf->control_register.address; cmd.addr.io.bit_width = perf->control_register.bit_width; - cmd.val = (u32) perf->states[next_perf_state].control; + cmd.val = (u32)perf->states[next_perf_state].control; break; default: return -ENODEV; } - if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) + if ( policy->shared_type != CPUFREQ_SHARED_TYPE_ANY ) cmd.mask = &online_policy_cpus; else cmd.mask = cpumask_of(policy->cpu); @@ -446,12 +464,13 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, drv_write(&cmd); - if (acpi_pstate_strict && !check_freqs(cmd.mask, freqs.new, data)) { + if ( acpi_pstate_strict && !check_freqs(cmd.mask, freqs.new, data) ) + { printk(KERN_WARNING "Fail transfer to new freq %d\n", freqs.new); return -EAGAIN; } - for_each_cpu(j, &online_policy_cpus) + for_each_cpu (j, &online_policy_cpus) cpufreq_statistic_update(j, perf->state, next_perf_state); perf->state = next_perf_state; @@ -465,48 +484,52 @@ static int acpi_cpufreq_verify(struct cpufreq_policy *policy) struct acpi_cpufreq_data *data; struct processor_performance *perf; - if (!policy || !(data = cpufreq_drv_data[policy->cpu]) || - !processor_pminfo[policy->cpu]) + if ( !policy || !(data = cpufreq_drv_data[policy->cpu]) || + !processor_pminfo[policy->cpu] ) return -EINVAL; perf = &processor_pminfo[policy->cpu]->perf; - cpufreq_verify_within_limits(policy, 0, - perf->states[perf->platform_limit].core_frequency * 1000); + cpufreq_verify_within_limits( + policy, 0, perf->states[perf->platform_limit].core_frequency * 1000); return cpufreq_frequency_table_verify(policy, data->freq_table); } -static unsigned long -acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) +static unsigned long acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, + unsigned int cpu) { struct processor_performance *perf = data->acpi_data; - if (cpu_khz) { + if ( cpu_khz ) + { /* search the closest match to cpu_khz */ unsigned int i; unsigned long freq; unsigned long freqn = perf->states[0].core_frequency * 1000; - for (i=0; i<(perf->state_count-1); i++) { + for ( i = 0; i < (perf->state_count - 1); i++ ) + { freq = freqn; - freqn = perf->states[i+1].core_frequency * 1000; - if ((2 * cpu_khz) > (freqn + freq)) { + freqn = perf->states[i + 1].core_frequency * 1000; + if ( (2 * cpu_khz) > (freqn + freq) ) + { perf->state = i; return freq; } } - perf->state = perf->state_count-1; + perf->state = perf->state_count - 1; return freqn; - } else { + } + else + { /* assume CPU is at P0... */ perf->state = 0; return perf->states[0].core_frequency * 1000; } } -static int -acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) +static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) { unsigned int i; unsigned int valid_states = 0; @@ -517,7 +540,7 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) struct processor_performance *perf; data = xzalloc(struct acpi_cpufreq_data); - if (!data) + if ( !data ) return -ENOMEM; cpufreq_drv_data[cpu] = data; @@ -527,18 +550,20 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) perf = data->acpi_data; policy->shared_type = perf->shared_type; - switch (perf->control_register.space_id) { + switch (perf->control_register.space_id) + { case ACPI_ADR_SPACE_SYSTEM_IO: - if (cpufreq_verbose) + if ( cpufreq_verbose ) printk("xen_pminfo: @acpi_cpufreq_cpu_init," "SYSTEM IO addr space\n"); data->arch_cpu_flags = SYSTEM_IO_CAPABLE; break; case ACPI_ADR_SPACE_FIXED_HARDWARE: - if (cpufreq_verbose) + if ( cpufreq_verbose ) printk("xen_pminfo: @acpi_cpufreq_cpu_init," "HARDWARE addr space\n"); - if (!check_est_cpu(cpu)) { + if ( !check_est_cpu(cpu) ) + { result = -ENODEV; goto err_unreg; } @@ -549,28 +574,31 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) goto err_unreg; } - data->freq_table = xmalloc_array(struct cpufreq_frequency_table, - (perf->state_count+1)); - if (!data->freq_table) { + data->freq_table = + xmalloc_array(struct cpufreq_frequency_table, (perf->state_count + 1)); + if ( !data->freq_table ) + { result = -ENOMEM; goto err_unreg; } /* detect transition latency */ policy->cpuinfo.transition_latency = 0; - for (i=0; istate_count; i++) { - if ((perf->states[i].transition_latency * 1000) > - policy->cpuinfo.transition_latency) + for ( i = 0; i < perf->state_count; i++ ) + { + if ( (perf->states[i].transition_latency * 1000) > + policy->cpuinfo.transition_latency ) policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000; } - policy->governor = cpufreq_opt_governor ? : CPUFREQ_DEFAULT_GOVERNOR; + policy->governor = cpufreq_opt_governor ?: CPUFREQ_DEFAULT_GOVERNOR; /* table init */ - for (i=0; istate_count; i++) { - if (i>0 && perf->states[i].core_frequency >= - data->freq_table[valid_states-1].frequency / 1000) + for ( i = 0; i < perf->state_count; i++ ) + { + if ( i > 0 && perf->states[i].core_frequency >= + data->freq_table[valid_states - 1].frequency / 1000 ) continue; data->freq_table[valid_states].index = i; @@ -582,10 +610,11 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) perf->state = 0; result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); - if (result) + if ( result ) goto err_freqfree; - switch (perf->control_register.space_id) { + switch (perf->control_register.space_id) + { case ACPI_ADR_SPACE_SYSTEM_IO: /* Current speed is unknown and not detectable by IO port */ policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); @@ -600,7 +629,7 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) /* Check for APERF/MPERF support in hardware * also check for boost support */ - if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) + if ( c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6 ) on_selected_cpus(cpumask_of(cpu), feature_detect, policy, 1); /* @@ -624,7 +653,8 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = cpufreq_drv_data[policy->cpu]; - if (data) { + if ( data ) + { cpufreq_drv_data[policy->cpu] = NULL; xfree(data->freq_table); xfree(data); @@ -634,22 +664,22 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) } static const struct cpufreq_driver __initconstrel acpi_cpufreq_driver = { - .name = "acpi-cpufreq", + .name = "acpi-cpufreq", .verify = acpi_cpufreq_verify, .target = acpi_cpufreq_target, - .init = acpi_cpufreq_cpu_init, - .exit = acpi_cpufreq_cpu_exit, + .init = acpi_cpufreq_cpu_init, + .exit = acpi_cpufreq_cpu_exit, }; static int __init cpufreq_driver_init(void) { int ret = 0; - if ((cpufreq_controller == FREQCTL_xen) && - (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) + if ( (cpufreq_controller == FREQCTL_xen) && + (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ) ret = cpufreq_register_driver(&acpi_cpufreq_driver); - else if ((cpufreq_controller == FREQCTL_xen) && - (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) + else if ( (cpufreq_controller == FREQCTL_xen) && + (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) ) ret = powernow_register_driver(); return ret; @@ -661,8 +691,8 @@ int cpufreq_cpu_init(unsigned int cpuid) int ret; /* Currently we only handle Intel and AMD processor */ - if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) || - (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) ) + if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) || + (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) ) ret = cpufreq_add_cpu(cpuid); else ret = -EFAULT; diff --git a/xen/arch/x86/acpi/cpufreq/powernow.c b/xen/arch/x86/acpi/cpufreq/powernow.c index 025b37da03..0587dfbc8f 100644 --- a/xen/arch/x86/acpi/cpufreq/powernow.c +++ b/xen/arch/x86/acpi/cpufreq/powernow.c @@ -37,20 +37,20 @@ #include #include -#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 -#define CPB_CAPABLE 0x00000200 -#define USE_HW_PSTATE 0x00000080 -#define HW_PSTATE_MASK 0x00000007 -#define HW_PSTATE_VALID_MASK 0x80000000 -#define HW_PSTATE_MAX_MASK 0x000000f0 -#define HW_PSTATE_MAX_SHIFT 4 -#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */ -#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */ -#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */ -#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */ -#define MSR_HWCR_CPBDIS_MASK 0x02000000ULL - -#define ARCH_CPU_FLAG_RESUME 1 +#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 +#define CPB_CAPABLE 0x00000200 +#define USE_HW_PSTATE 0x00000080 +#define HW_PSTATE_MASK 0x00000007 +#define HW_PSTATE_VALID_MASK 0x80000000 +#define HW_PSTATE_MAX_MASK 0x000000f0 +#define HW_PSTATE_MAX_SHIFT 4 +#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */ +#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */ +#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */ +#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */ +#define MSR_HWCR_CPBDIS_MASK 0x02000000ULL + +#define ARCH_CPU_FLAG_RESUME 1 static void transition_pstate(void *pstate) { @@ -61,24 +61,24 @@ static void update_cpb(void *data) { struct cpufreq_policy *policy = (struct cpufreq_policy *)data; - if (policy->turbo != CPUFREQ_TURBO_UNSUPPORTED) { + if ( policy->turbo != CPUFREQ_TURBO_UNSUPPORTED ) + { uint64_t msr_content; - + rdmsrl(MSR_K8_HWCR, msr_content); - if (policy->turbo == CPUFREQ_TURBO_ENABLED) + if ( policy->turbo == CPUFREQ_TURBO_ENABLED ) msr_content &= ~MSR_HWCR_CPBDIS_MASK; else - msr_content |= MSR_HWCR_CPBDIS_MASK; + msr_content |= MSR_HWCR_CPBDIS_MASK; wrmsrl(MSR_K8_HWCR, msr_content); } } -static int powernow_cpufreq_update (int cpuid, - struct cpufreq_policy *policy) +static int powernow_cpufreq_update(int cpuid, struct cpufreq_policy *policy) { - if (!cpumask_test_cpu(cpuid, &cpu_online_map)) + if ( !cpumask_test_cpu(cpuid, &cpu_online_map) ) return -EINVAL; on_selected_cpus(cpumask_of(cpuid), update_cpb, policy, 1); @@ -87,53 +87,57 @@ static int powernow_cpufreq_update (int cpuid, } static int powernow_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int target_freq, + unsigned int relation) { struct acpi_cpufreq_data *data = cpufreq_drv_data[policy->cpu]; struct processor_performance *perf; - unsigned int next_state; /* Index into freq_table */ + unsigned int next_state; /* Index into freq_table */ unsigned int next_perf_state; /* Index into perf table */ int result; - if (unlikely(data == NULL || - data->acpi_data == NULL || data->freq_table == NULL)) { + if ( unlikely(data == NULL || data->acpi_data == NULL || + data->freq_table == NULL) ) + { return -ENODEV; } perf = data->acpi_data; - result = cpufreq_frequency_table_target(policy, - data->freq_table, - target_freq, - relation, &next_state); - if (unlikely(result)) + result = cpufreq_frequency_table_target(policy, data->freq_table, + target_freq, relation, &next_state); + if ( unlikely(result) ) return result; next_perf_state = data->freq_table[next_state].index; - if (perf->state == next_perf_state) { - if (unlikely(data->arch_cpu_flags & ARCH_CPU_FLAG_RESUME)) + if ( perf->state == next_perf_state ) + { + if ( unlikely(data->arch_cpu_flags & ARCH_CPU_FLAG_RESUME) ) data->arch_cpu_flags &= ~ARCH_CPU_FLAG_RESUME; else return 0; } - if (policy->shared_type == CPUFREQ_SHARED_TYPE_HW && - likely(policy->cpu == smp_processor_id())) { + if ( policy->shared_type == CPUFREQ_SHARED_TYPE_HW && + likely(policy->cpu == smp_processor_id()) ) + { transition_pstate(&next_perf_state); cpufreq_statistic_update(policy->cpu, perf->state, next_perf_state); - } else { + } + else + { cpumask_t online_policy_cpus; unsigned int cpu; cpumask_and(&online_policy_cpus, policy->cpus, &cpu_online_map); - if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || - unlikely(policy->cpu != smp_processor_id())) + if ( policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || + unlikely(policy->cpu != smp_processor_id()) ) on_selected_cpus(&online_policy_cpus, transition_pstate, &next_perf_state, 1); else transition_pstate(&next_perf_state); - for_each_cpu(cpu, &online_policy_cpus) + for_each_cpu (cpu, &online_policy_cpus) cpufreq_statistic_update(cpu, perf->state, next_perf_state); } @@ -149,7 +153,7 @@ static void amd_fixup_frequency(struct xen_processor_px *px) int index = px->control & 0x00000007; const struct cpuinfo_x86 *c = ¤t_cpu_data; - if ((c->x86 != 0x10 || c->x86_model >= 10) && c->x86 != 0x11) + if ( (c->x86 != 0x10 || c->x86_model >= 10) && c->x86 != 0x11 ) return; rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); @@ -157,18 +161,19 @@ static void amd_fixup_frequency(struct xen_processor_px *px) * MSR C001_0064+: * Bit 63: PstateEn. Read-write. If set, the P-state is valid. */ - if (!(hi & (1U << 31))) + if ( !(hi & (1U << 31)) ) return; fid = lo & 0x3f; did = (lo >> 6) & 7; - if (c->x86 == 0x10) + if ( c->x86 == 0x10 ) px->core_frequency = (100 * (fid + 16)) >> did; else px->core_frequency = (100 * (fid + 8)) >> did; } -struct amd_cpu_data { +struct amd_cpu_data +{ struct processor_performance *perf; u32 max_hw_pstate; }; @@ -181,10 +186,10 @@ static void get_cpu_data(void *arg) unsigned int i; rdmsrl(MSR_PSTATE_CUR_LIMIT, msr_content); - data->max_hw_pstate = (msr_content & HW_PSTATE_MAX_MASK) >> - HW_PSTATE_MAX_SHIFT; + data->max_hw_pstate = + (msr_content & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; - for (i = 0; i < perf->state_count && i <= data->max_hw_pstate; i++) + for ( i = 0; i < perf->state_count && i <= data->max_hw_pstate; i++ ) amd_fixup_frequency(&perf->states[i]); } @@ -193,14 +198,14 @@ static int powernow_cpufreq_verify(struct cpufreq_policy *policy) struct acpi_cpufreq_data *data; struct processor_performance *perf; - if (!policy || !(data = cpufreq_drv_data[policy->cpu]) || - !processor_pminfo[policy->cpu]) + if ( !policy || !(data = cpufreq_drv_data[policy->cpu]) || + !processor_pminfo[policy->cpu] ) return -EINVAL; perf = &processor_pminfo[policy->cpu]->perf; - cpufreq_verify_within_limits(policy, 0, - perf->states[perf->platform_limit].core_frequency * 1000); + cpufreq_verify_within_limits( + policy, 0, perf->states[perf->platform_limit].core_frequency * 1000); return cpufreq_frequency_table_verify(policy, data->freq_table); } @@ -217,11 +222,11 @@ static void feature_detect(void *info) } edx = cpuid_edx(CPUID_FREQ_VOLT_CAPABILITIES); - if ((edx & CPB_CAPABLE) == CPB_CAPABLE) { + if ( (edx & CPB_CAPABLE) == CPB_CAPABLE ) + { policy->turbo = CPUFREQ_TURBO_ENABLED; - if (cpufreq_verbose) - printk(XENLOG_INFO - "CPU%u: Core Boost/Turbo detected and enabled\n", + if ( cpufreq_verbose ) + printk(XENLOG_INFO "CPU%u: Core Boost/Turbo detected and enabled\n", smp_processor_id()); } } @@ -238,7 +243,7 @@ static int powernow_cpufreq_cpu_init(struct cpufreq_policy *policy) struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; data = xzalloc(struct acpi_cpufreq_data); - if (!data) + if ( !data ) return -ENOMEM; cpufreq_drv_data[cpu] = data; @@ -248,58 +253,68 @@ static int powernow_cpufreq_cpu_init(struct cpufreq_policy *policy) info.perf = perf = data->acpi_data; policy->shared_type = perf->shared_type; - if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || - policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { + if ( policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || + policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ) + { cpumask_set_cpu(cpu, policy->cpus); - if (cpumask_weight(policy->cpus) != 1) { + if ( cpumask_weight(policy->cpus) != 1 ) + { printk(XENLOG_WARNING "Unsupported sharing type %d (%u CPUs)\n", policy->shared_type, cpumask_weight(policy->cpus)); result = -ENODEV; goto err_unreg; } - } else { + } + else + { cpumask_copy(policy->cpus, cpumask_of(cpu)); } /* capability check */ - if (perf->state_count <= 1) { + if ( perf->state_count <= 1 ) + { printk("No P-States\n"); result = -ENODEV; goto err_unreg; } - if (perf->control_register.space_id != perf->status_register.space_id) { + if ( perf->control_register.space_id != perf->status_register.space_id ) + { result = -ENODEV; goto err_unreg; } - data->freq_table = xmalloc_array(struct cpufreq_frequency_table, - (perf->state_count+1)); - if (!data->freq_table) { + data->freq_table = + xmalloc_array(struct cpufreq_frequency_table, (perf->state_count + 1)); + if ( !data->freq_table ) + { result = -ENOMEM; goto err_unreg; } /* detect transition latency */ policy->cpuinfo.transition_latency = 0; - for (i=0; istate_count; i++) { - if ((perf->states[i].transition_latency * 1000) > - policy->cpuinfo.transition_latency) + for ( i = 0; i < perf->state_count; i++ ) + { + if ( (perf->states[i].transition_latency * 1000) > + policy->cpuinfo.transition_latency ) policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000; } - policy->governor = cpufreq_opt_governor ? : CPUFREQ_DEFAULT_GOVERNOR; + policy->governor = cpufreq_opt_governor ?: CPUFREQ_DEFAULT_GOVERNOR; on_selected_cpus(cpumask_of(cpu), get_cpu_data, &info, 1); /* table init */ - for (i = 0; i < perf->state_count && i <= info.max_hw_pstate; i++) { - if (i > 0 && perf->states[i].core_frequency >= - data->freq_table[valid_states-1].frequency / 1000) + for ( i = 0; i < perf->state_count && i <= info.max_hw_pstate; i++ ) + { + if ( i > 0 && perf->states[i].core_frequency >= + data->freq_table[valid_states - 1].frequency / 1000 ) continue; - data->freq_table[valid_states].index = perf->states[i].control & HW_PSTATE_MASK; + data->freq_table[valid_states].index = + perf->states[i].control & HW_PSTATE_MASK; data->freq_table[valid_states].frequency = perf->states[i].core_frequency * 1000; valid_states++; @@ -308,12 +323,12 @@ static int powernow_cpufreq_cpu_init(struct cpufreq_policy *policy) perf->state = 0; result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); - if (result) + if ( result ) goto err_freqfree; - if (c->cpuid_level >= 6) + if ( c->cpuid_level >= 6 ) on_selected_cpus(cpumask_of(cpu), feature_detect, policy, 1); - + /* * the first call to ->target() should result in us actually * writing something to the appropriate registers. @@ -336,7 +351,8 @@ static int powernow_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = cpufreq_drv_data[policy->cpu]; - if (data) { + if ( data ) + { cpufreq_drv_data[policy->cpu] = NULL; xfree(data->freq_table); xfree(data); @@ -346,30 +362,30 @@ static int powernow_cpufreq_cpu_exit(struct cpufreq_policy *policy) } static const struct cpufreq_driver __initconstrel powernow_cpufreq_driver = { - .name = "powernow", + .name = "powernow", .verify = powernow_cpufreq_verify, .target = powernow_cpufreq_target, - .init = powernow_cpufreq_cpu_init, - .exit = powernow_cpufreq_cpu_exit, - .update = powernow_cpufreq_update -}; + .init = powernow_cpufreq_cpu_init, + .exit = powernow_cpufreq_cpu_exit, + .update = powernow_cpufreq_update}; unsigned int __init powernow_register_driver() { unsigned int i, ret = 0; - for_each_online_cpu(i) { + for_each_online_cpu (i) + { struct cpuinfo_x86 *c = &cpu_data[i]; - if (c->x86_vendor != X86_VENDOR_AMD) + if ( c->x86_vendor != X86_VENDOR_AMD ) ret = -ENODEV; else { u32 eax, ebx, ecx, edx; cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); - if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE) + if ( (edx & USE_HW_PSTATE) != USE_HW_PSTATE ) ret = -ENODEV; } - if (ret) + if ( ret ) return ret; } diff --git a/xen/arch/x86/acpi/cpuidle_menu.c b/xen/arch/x86/acpi/cpuidle_menu.c index 20f4f5fcf4..9201ab5207 100644 --- a/xen/arch/x86/acpi/cpuidle_menu.c +++ b/xen/arch/x86/acpi/cpuidle_menu.c @@ -1,6 +1,6 @@ /* * cpuidle_menu - menu governor for cpu idle, main idea come from Linux. - * drivers/cpuidle/governors/menu.c + * drivers/cpuidle/governors/menu.c * * Copyright (C) 2006-2007 Adam Belay * Copyright (C) 2007, 2008 Intel Corporation @@ -92,23 +92,24 @@ * measured idle time. */ -struct perf_factor{ - s_time_t time_stamp; - s_time_t duration; +struct perf_factor +{ + s_time_t time_stamp; + s_time_t duration; unsigned int irq_count_stamp; unsigned int irq_sum; }; struct menu_device { - int last_state_idx; - unsigned int expected_us; - u64 predicted_us; - u64 latency_factor; - unsigned int measured_us; - unsigned int exit_us; - unsigned int bucket; - u64 correction_factor[BUCKETS]; + int last_state_idx; + unsigned int expected_us; + u64 predicted_us; + u64 latency_factor; + unsigned int measured_us; + unsigned int exit_us; + unsigned int bucket; + u64 correction_factor[BUCKETS]; struct perf_factor pf; }; @@ -116,19 +117,19 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices); static inline int which_bucket(unsigned int duration) { - int bucket = 0; - - if (duration < 10) - return bucket; - if (duration < 100) - return bucket + 1; - if (duration < 1000) - return bucket + 2; - if (duration < 10000) - return bucket + 3; - if (duration < 100000) - return bucket + 4; - return bucket + 5; + int bucket = 0; + + if ( duration < 10 ) + return bucket; + if ( duration < 100 ) + return bucket + 1; + if ( duration < 1000 ) + return bucket + 2; + if ( duration < 10000 ) + return bucket + 3; + if ( duration < 100000 ) + return bucket + 4; + return bucket + 5; } /* @@ -139,35 +140,37 @@ static inline int which_bucket(unsigned int duration) */ /* 5 milisec sampling period */ -#define SAMPLING_PERIOD 5000000 +#define SAMPLING_PERIOD 5000000 /* for I/O interrupt, we give 8x multiplier compared to C state latency*/ -#define IO_MULTIPLIER 8 +#define IO_MULTIPLIER 8 static inline s_time_t avg_intr_interval_us(void) { struct menu_device *data = &__get_cpu_var(menu_devices); - s_time_t duration, now; - s_time_t avg_interval; + s_time_t duration, now; + s_time_t avg_interval; unsigned int irq_sum; now = NOW(); - duration = (data->pf.duration + (now - data->pf.time_stamp) - * (DECAY - 1)) / DECAY; + duration = + (data->pf.duration + (now - data->pf.time_stamp) * (DECAY - 1)) / DECAY; - irq_sum = (data->pf.irq_sum + (this_cpu(irq_count) - data->pf.irq_count_stamp) - * (DECAY - 1)) / DECAY; + irq_sum = (data->pf.irq_sum + + (this_cpu(irq_count) - data->pf.irq_count_stamp) * (DECAY - 1)) / + DECAY; - if (irq_sum == 0) + if ( irq_sum == 0 ) /* no irq recently, so return a big enough interval: 1 sec */ avg_interval = 1000000; else avg_interval = duration / irq_sum / 1000; /* in us */ - if ( duration >= SAMPLING_PERIOD){ + if ( duration >= SAMPLING_PERIOD ) + { data->pf.time_stamp = now; data->pf.duration = duration; - data->pf.irq_count_stamp= this_cpu(irq_count); + data->pf.irq_count_stamp = this_cpu(irq_count); data->pf.irq_sum = irq_sum; } @@ -189,7 +192,7 @@ static int menu_select(struct acpi_processor_power *power) { struct menu_device *data = &__get_cpu_var(menu_devices); int i; - s_time_t io_interval; + s_time_t io_interval; /* TBD: Change to 0 if C0(polling mode) support is added later*/ data->last_state_idx = CPUIDLE_DRIVER_STATE_START; @@ -203,31 +206,30 @@ static int menu_select(struct acpi_processor_power *power) io_interval = avg_intr_interval_us(); data->latency_factor = DIV_ROUND( - data->latency_factor * (DECAY - 1) + data->measured_us, - DECAY); + data->latency_factor * (DECAY - 1) + data->measured_us, DECAY); /* * if the correction factor is 0 (eg first time init or cpu hotplug * etc), we actually want to start out with a unity factor. */ - if (data->correction_factor[data->bucket] == 0) + if ( data->correction_factor[data->bucket] == 0 ) data->correction_factor[data->bucket] = RESOLUTION * DECAY; /* Make sure to round up for half microseconds */ - data->predicted_us = DIV_ROUND( - data->expected_us * data->correction_factor[data->bucket], - RESOLUTION * DECAY); + data->predicted_us = + DIV_ROUND(data->expected_us * data->correction_factor[data->bucket], + RESOLUTION * DECAY); /* find the deepest idle state that satisfies our constraints */ for ( i = CPUIDLE_DRIVER_STATE_START + 1; i < power->count; i++ ) { struct acpi_processor_cx *s = &power->states[i]; - if (s->target_residency > data->predicted_us) + if ( s->target_residency > data->predicted_us ) break; - if (s->latency * IO_MULTIPLIER > io_interval) + if ( s->latency * IO_MULTIPLIER > io_interval ) break; - if (s->latency * LATENCY_MULTIPLIER > data->latency_factor) + if ( s->latency * LATENCY_MULTIPLIER > data->latency_factor ) break; /* TBD: we need to check the QoS requirment in future */ data->exit_us = s->latency; @@ -248,15 +250,14 @@ static void menu_reflect(struct acpi_processor_power *power) * We correct for the exit latency; we are assuming here that the * exit latency happens after the event that we're interested in. */ - if (data->measured_us > data->exit_us) + if ( data->measured_us > data->exit_us ) data->measured_us -= data->exit_us; /* update our correction ratio */ - new_factor = data->correction_factor[data->bucket] - * (DECAY - 1) / DECAY; + new_factor = data->correction_factor[data->bucket] * (DECAY - 1) / DECAY; - if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING) + if ( data->expected_us > 0 && data->measured_us < MAX_INTERESTING ) new_factor += RESOLUTION * data->measured_us / data->expected_us; else /* @@ -269,7 +270,7 @@ static void menu_reflect(struct acpi_processor_power *power) * We don't want 0 as factor; we always want at least * a tiny bit of estimated time. */ - if (new_factor == 0) + if ( new_factor == 0 ) new_factor = 1; data->correction_factor[data->bucket] = new_factor; @@ -277,7 +278,7 @@ static void menu_reflect(struct acpi_processor_power *power) static int menu_enable_device(struct acpi_processor_power *power) { - if (!cpu_online(power->cpu)) + if ( !cpu_online(power->cpu) ) return -1; memset(&per_cpu(menu_devices, power->cpu), 0, sizeof(struct menu_device)); @@ -285,13 +286,12 @@ static int menu_enable_device(struct acpi_processor_power *power) return 0; } -static struct cpuidle_governor menu_governor = -{ - .name = "menu", - .rating = 20, - .enable = menu_enable_device, - .select = menu_select, - .reflect = menu_reflect, +static struct cpuidle_governor menu_governor = { + .name = "menu", + .rating = 20, + .enable = menu_enable_device, + .select = menu_select, + .reflect = menu_reflect, }; struct cpuidle_governor *cpuidle_current_governor = &menu_governor; diff --git a/xen/arch/x86/acpi/lib.c b/xen/arch/x86/acpi/lib.c index 265b9ad819..4bf51a7478 100644 --- a/xen/arch/x86/acpi/lib.c +++ b/xen/arch/x86/acpi/lib.c @@ -29,8 +29,8 @@ u32 __read_mostly acpi_smi_cmd; u8 __read_mostly acpi_enable_value; u8 __read_mostly acpi_disable_value; -u32 __read_mostly x86_acpiid_to_apicid[MAX_MADT_ENTRIES] = - {[0 ... MAX_MADT_ENTRIES - 1] = BAD_APICID }; +u32 __read_mostly x86_acpiid_to_apicid[MAX_MADT_ENTRIES] = { + [0 ... MAX_MADT_ENTRIES - 1] = BAD_APICID}; /* * Important Safety Note: The fixed ACPI page numbers are *subtracted* @@ -39,87 +39,87 @@ u32 __read_mostly x86_acpiid_to_apicid[MAX_MADT_ENTRIES] = */ char *__acpi_map_table(paddr_t phys, unsigned long size) { - unsigned long base, offset, mapped_size; - int idx; - - /* XEN: RAM holes above 1MB are not permanently mapped. */ - if ((phys + size) <= (1 * 1024 * 1024)) - return __va(phys); - - offset = phys & (PAGE_SIZE - 1); - mapped_size = PAGE_SIZE - offset; - set_fixmap(FIX_ACPI_END, phys); - base = __fix_to_virt(FIX_ACPI_END); - - /* - * Most cases can be covered by the below. - */ - idx = FIX_ACPI_END; - while (mapped_size < size) { - if (--idx < FIX_ACPI_BEGIN) - return NULL; /* cannot handle this */ - phys += PAGE_SIZE; - set_fixmap(idx, phys); - mapped_size += PAGE_SIZE; - } - - return ((char *) base + offset); + unsigned long base, offset, mapped_size; + int idx; + + /* XEN: RAM holes above 1MB are not permanently mapped. */ + if ( (phys + size) <= (1 * 1024 * 1024) ) + return __va(phys); + + offset = phys & (PAGE_SIZE - 1); + mapped_size = PAGE_SIZE - offset; + set_fixmap(FIX_ACPI_END, phys); + base = __fix_to_virt(FIX_ACPI_END); + + /* + * Most cases can be covered by the below. + */ + idx = FIX_ACPI_END; + while ( mapped_size < size ) + { + if ( --idx < FIX_ACPI_BEGIN ) + return NULL; /* cannot handle this */ + phys += PAGE_SIZE; + set_fixmap(idx, phys); + mapped_size += PAGE_SIZE; + } + + return ((char *)base + offset); } unsigned int acpi_get_processor_id(unsigned int cpu) { - unsigned int acpiid, apicid; + unsigned int acpiid, apicid; - if ((apicid = x86_cpu_to_apicid[cpu]) == BAD_APICID) - return INVALID_ACPIID; + if ( (apicid = x86_cpu_to_apicid[cpu]) == BAD_APICID ) + return INVALID_ACPIID; - for (acpiid = 0; acpiid < ARRAY_SIZE(x86_acpiid_to_apicid); acpiid++) - if (x86_acpiid_to_apicid[acpiid] == apicid) - return acpiid; + for ( acpiid = 0; acpiid < ARRAY_SIZE(x86_acpiid_to_apicid); acpiid++ ) + if ( x86_acpiid_to_apicid[acpiid] == apicid ) + return acpiid; - return INVALID_ACPIID; + return INVALID_ACPIID; } static void get_mwait_ecx(void *info) { - *(u32 *)info = cpuid_ecx(CPUID_MWAIT_LEAF); + *(u32 *)info = cpuid_ecx(CPUID_MWAIT_LEAF); } int arch_acpi_set_pdc_bits(u32 acpi_id, u32 *pdc, u32 mask) { - unsigned int cpu = get_cpu_id(acpi_id); - struct cpuinfo_x86 *c; - u32 ecx; - - if (!(acpi_id + 1)) - c = &boot_cpu_data; - else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) - return -EINVAL; - else - c = cpu_data + cpu; - - pdc[2] |= ACPI_PDC_C_CAPABILITY_SMP & mask; - - if (cpu_has(c, X86_FEATURE_EIST)) - pdc[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP & mask; - - if (cpu_has(c, X86_FEATURE_ACPI)) - pdc[2] |= ACPI_PDC_T_FFH & mask; - - /* - * If mwait/monitor or its break-on-interrupt extension are - * unsupported, Cx_FFH will be disabled. - */ - if (!cpu_has(c, X86_FEATURE_MONITOR) || - c->cpuid_level < CPUID_MWAIT_LEAF) - ecx = 0; - else if (c == &boot_cpu_data || cpu == smp_processor_id()) - ecx = cpuid_ecx(CPUID_MWAIT_LEAF); - else - on_selected_cpus(cpumask_of(cpu), get_mwait_ecx, &ecx, 1); - if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || - !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) - pdc[2] &= ~(ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH); - - return 0; + unsigned int cpu = get_cpu_id(acpi_id); + struct cpuinfo_x86 *c; + u32 ecx; + + if ( !(acpi_id + 1) ) + c = &boot_cpu_data; + else if ( cpu >= nr_cpu_ids || !cpu_online(cpu) ) + return -EINVAL; + else + c = cpu_data + cpu; + + pdc[2] |= ACPI_PDC_C_CAPABILITY_SMP & mask; + + if ( cpu_has(c, X86_FEATURE_EIST) ) + pdc[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP & mask; + + if ( cpu_has(c, X86_FEATURE_ACPI) ) + pdc[2] |= ACPI_PDC_T_FFH & mask; + + /* + * If mwait/monitor or its break-on-interrupt extension are + * unsupported, Cx_FFH will be disabled. + */ + if ( !cpu_has(c, X86_FEATURE_MONITOR) || c->cpuid_level < CPUID_MWAIT_LEAF ) + ecx = 0; + else if ( c == &boot_cpu_data || cpu == smp_processor_id() ) + ecx = cpuid_ecx(CPUID_MWAIT_LEAF); + else + on_selected_cpus(cpumask_of(cpu), get_mwait_ecx, &ecx, 1); + if ( !(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || + !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ) + pdc[2] &= ~(ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH); + + return 0; } diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c index 93e967fe8f..7461dcc95f 100644 --- a/xen/arch/x86/acpi/power.c +++ b/xen/arch/x86/acpi/power.c @@ -80,7 +80,7 @@ static int device_power_down(void) static void device_power_up(enum dev_power_saved saved) { - switch ( saved ) + switch (saved) { case SAVED_ALL: case SAVED_LAPIC: @@ -119,7 +119,7 @@ static void freeze_domains(void) * first which is required for correctness (as only dom0 can add domains to * the domain list). Otherwise we could miss concurrently-created domains. */ - for_each_domain ( d ) + for_each_domain (d) domain_pause(d); rcu_read_unlock(&domlist_read_lock); } @@ -129,7 +129,7 @@ static void thaw_domains(void) struct domain *d; rcu_read_lock(&domlist_read_lock); - for_each_domain ( d ) + for_each_domain (d) { restore_vcpu_affinity(d); domain_unpause(d); @@ -144,8 +144,8 @@ static void acpi_sleep_prepare(u32 state) if ( state != ACPI_STATE_S3 ) return; - wakeup_vector_va = __acpi_map_table( - acpi_sinfo.wakeup_vector, sizeof(uint64_t)); + wakeup_vector_va = + __acpi_map_table(acpi_sinfo.wakeup_vector, sizeof(uint64_t)); /* TBoot will set resume vector itself (when it is safe to do so). */ if ( tboot_in_measured_env() ) @@ -157,7 +157,9 @@ static void acpi_sleep_prepare(u32 state) *(uint64_t *)wakeup_vector_va = bootsym_phys(wakeup_start); } -static void acpi_sleep_post(u32 state) {} +static void acpi_sleep_post(u32 state) +{ +} /* Main interface to do xen specific suspend/resume */ static int enter_state(u32 state) @@ -219,7 +221,7 @@ static int enter_state(u32 state) ACPI_FLUSH_CPU_CACHE(); - switch ( state ) + switch (state) { case ACPI_STATE_S3: do_suspend_lowlevel(); @@ -262,7 +264,7 @@ static int enter_state(u32 state) ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_ist_wrmsr); spec_ctrl_exit_idle(ci); - done: +done: spin_debug_enable(); local_irq_restore(flags); acpi_sleep_post(state); @@ -270,7 +272,7 @@ static int enter_state(u32 state) BUG(); cpufreq_add_cpu(0); - enable_cpu: +enable_cpu: rcu_barrier(); mtrr_aps_sync_begin(); enable_nonboot_cpus(); @@ -367,11 +369,11 @@ static void tboot_sleep(u8 sleep_state) { uint32_t shutdown_type; -#define TB_COPY_GAS(tbg, g) \ - tbg.space_id = g.space_id; \ - tbg.bit_width = g.bit_width; \ - tbg.bit_offset = g.bit_offset; \ - tbg.access_width = g.access_width; \ +#define TB_COPY_GAS(tbg, g) \ + tbg.space_id = g.space_id; \ + tbg.bit_width = g.bit_width; \ + tbg.bit_offset = g.bit_offset; \ + tbg.access_width = g.access_width; \ tbg.address = g.address; /* sizes are not same (due to packing) so copy each one */ @@ -388,21 +390,21 @@ static void tboot_sleep(u8 sleep_state) g_tboot_shared->acpi_sinfo.wakeup_vector = acpi_sinfo.wakeup_vector; g_tboot_shared->acpi_sinfo.vector_width = acpi_sinfo.vector_width; g_tboot_shared->acpi_sinfo.kernel_s3_resume_vector = - bootsym_phys(wakeup_start); + bootsym_phys(wakeup_start); - switch ( sleep_state ) + switch (sleep_state) { - case ACPI_STATE_S3: - shutdown_type = TB_SHUTDOWN_S3; - break; - case ACPI_STATE_S4: - shutdown_type = TB_SHUTDOWN_S4; - break; - case ACPI_STATE_S5: - shutdown_type = TB_SHUTDOWN_S5; - break; - default: - return; + case ACPI_STATE_S3: + shutdown_type = TB_SHUTDOWN_S3; + break; + case ACPI_STATE_S4: + shutdown_type = TB_SHUTDOWN_S4; + break; + case ACPI_STATE_S5: + shutdown_type = TB_SHUTDOWN_S5; + break; + default: + return; } tboot_shutdown(shutdown_type); @@ -432,7 +434,8 @@ acpi_status acpi_enter_sleep_state(u8 sleep_state) */ u8 sleep_type_value = ((acpi_sinfo.sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) & - ACPI_X_SLEEP_TYPE_MASK) | ACPI_X_SLEEP_ENABLE; + ACPI_X_SLEEP_TYPE_MASK) | + ACPI_X_SLEEP_ENABLE; status = acpi_hw_register_write(ACPI_REGISTER_SLEEP_CONTROL, sleep_type_value); diff --git a/xen/arch/x86/acpi/suspend.c b/xen/arch/x86/acpi/suspend.c index 00e6012f54..46acecb384 100644 --- a/xen/arch/x86/acpi/suspend.c +++ b/xen/arch/x86/acpi/suspend.c @@ -37,7 +37,6 @@ void save_rest_processor_state(void) saved_xcr0 = get_xcr0(); } - void restore_rest_processor_state(void) { load_TR(); diff --git a/xen/arch/x86/alternative.c b/xen/arch/x86/alternative.c index b8c819a481..d95cb4915e 100644 --- a/xen/arch/x86/alternative.c +++ b/xen/arch/x86/alternative.c @@ -27,80 +27,68 @@ #include #include -#define MAX_PATCH_LEN (255-1) +#define MAX_PATCH_LEN (255 - 1) extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; #ifdef K8_NOP1 static const unsigned char k8nops[] init_or_livepatch_const = { - K8_NOP1, - K8_NOP2, - K8_NOP3, - K8_NOP4, - K8_NOP5, - K8_NOP6, - K8_NOP7, - K8_NOP8, - K8_NOP9, + K8_NOP1, K8_NOP2, K8_NOP3, K8_NOP4, K8_NOP5, + K8_NOP6, K8_NOP7, K8_NOP8, K8_NOP9, }; -static const unsigned char * const k8_nops[ASM_NOP_MAX+1] init_or_livepatch_constrel = { - NULL, - k8nops, - k8nops + 1, - k8nops + 1 + 2, - k8nops + 1 + 2 + 3, - k8nops + 1 + 2 + 3 + 4, - k8nops + 1 + 2 + 3 + 4 + 5, - k8nops + 1 + 2 + 3 + 4 + 5 + 6, - k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, - k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, +static const unsigned char + *const k8_nops[ASM_NOP_MAX + 1] init_or_livepatch_constrel = { + NULL, + k8nops, + k8nops + 1, + k8nops + 1 + 2, + k8nops + 1 + 2 + 3, + k8nops + 1 + 2 + 3 + 4, + k8nops + 1 + 2 + 3 + 4 + 5, + k8nops + 1 + 2 + 3 + 4 + 5 + 6, + k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, + k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, }; #endif #ifdef P6_NOP1 static const unsigned char p6nops[] init_or_livepatch_const = { - P6_NOP1, - P6_NOP2, - P6_NOP3, - P6_NOP4, - P6_NOP5, - P6_NOP6, - P6_NOP7, - P6_NOP8, - P6_NOP9, + P6_NOP1, P6_NOP2, P6_NOP3, P6_NOP4, P6_NOP5, + P6_NOP6, P6_NOP7, P6_NOP8, P6_NOP9, }; -static const unsigned char * const p6_nops[ASM_NOP_MAX+1] init_or_livepatch_constrel = { - NULL, - p6nops, - p6nops + 1, - p6nops + 1 + 2, - p6nops + 1 + 2 + 3, - p6nops + 1 + 2 + 3 + 4, - p6nops + 1 + 2 + 3 + 4 + 5, - p6nops + 1 + 2 + 3 + 4 + 5 + 6, - p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, - p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, +static const unsigned char + *const p6_nops[ASM_NOP_MAX + 1] init_or_livepatch_constrel = { + NULL, + p6nops, + p6nops + 1, + p6nops + 1 + 2, + p6nops + 1 + 2 + 3, + p6nops + 1 + 2 + 3 + 4, + p6nops + 1 + 2 + 3 + 4 + 5, + p6nops + 1 + 2 + 3 + 4 + 5 + 6, + p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, + p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, }; #endif -static const unsigned char * const *ideal_nops init_or_livepatch_data = p6_nops; +static const unsigned char *const *ideal_nops init_or_livepatch_data = p6_nops; #ifdef HAVE_AS_NOPS_DIRECTIVE /* Nops in .init.rodata to compare against the runtime ideal nops. */ -asm ( ".pushsection .init.rodata, \"a\", @progbits\n\t" - "toolchain_nops: .nops " __stringify(ASM_NOP_MAX) "\n\t" - ".popsection\n\t"); +asm(".pushsection .init.rodata, \"a\", @progbits\n\t" + "toolchain_nops: .nops " __stringify(ASM_NOP_MAX) "\n\t" + ".popsection\n\t"); extern char toolchain_nops[ASM_NOP_MAX]; static bool init_or_livepatch_read_mostly toolchain_nops_are_ideal; #else -# define toolchain_nops_are_ideal false +#define toolchain_nops_are_ideal false #endif static void __init arch_init_ideal_nops(void) { - switch ( boot_cpu_data.x86_vendor ) + switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: /* @@ -111,7 +99,7 @@ static void __init arch_init_ideal_nops(void) if ( boot_cpu_data.x86 != 6 ) break; - switch ( boot_cpu_data.x86_model ) + switch (boot_cpu_data.x86_model) { case 0x0f ... 0x1b: case 0x1d ... 0x25: @@ -165,8 +153,9 @@ void init_or_livepatch add_nops(void *insns, unsigned int len) * "noinline" to cause control flow change and thus invalidate I$ and * cause refetch after modification. */ -static void *init_or_livepatch noinline -text_poke(void *addr, const void *opcode, size_t len) +static void *init_or_livepatch noinline text_poke(void *addr, + const void *opcode, + size_t len) { return memcpy(addr, opcode, len); } diff --git a/xen/arch/x86/apic.c b/xen/arch/x86/apic.c index 2a2432619e..a40a9a80f9 100644 --- a/xen/arch/x86/apic.c +++ b/xen/arch/x86/apic.c @@ -44,7 +44,8 @@ static bool __read_mostly tdt_enabled; static bool __initdata tdt_enable = true; boolean_param("tdt", tdt_enable); -static struct { +static struct +{ int active; /* r/w apic fields */ unsigned int apic_id; @@ -89,8 +90,8 @@ static int modern_apic(void) { unsigned int lvr, version; /* AMD systems use old APIC versions, so check the CPU */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && - boot_cpu_data.x86 >= 0xf) + if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0xf ) return 1; lvr = apic_read(APIC_LVR); version = GET_APIC_VERSION(lvr); @@ -113,7 +114,7 @@ void ack_bad_irq(unsigned int irq) * completely. * But only ack when the APIC is enabled -AK */ - if (cpu_has_apic) + if ( cpu_has_apic ) ack_APIC_irq(); } @@ -139,7 +140,7 @@ static bool __read_mostly enabled_via_apicbase; int get_physical_broadcast(void) { - if (modern_apic()) + if ( modern_apic() ) return 0xff; else return 0xf; @@ -166,7 +167,8 @@ void clear_local_APIC(void) * Masking an LVT entry on a P6 can trigger a local APIC error * if the vector is zero. Mask LVTERR first to prevent this. */ - if (maxlvt >= 3) { + if ( maxlvt >= 3 ) + { v = ERROR_APIC_VECTOR; /* any non-zero vector will do */ apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); } @@ -180,20 +182,23 @@ void clear_local_APIC(void) apic_write(APIC_LVT0, v | APIC_LVT_MASKED); v = apic_read(APIC_LVT1); apic_write(APIC_LVT1, v | APIC_LVT_MASKED); - if (maxlvt >= 4) { + if ( maxlvt >= 4 ) + { v = apic_read(APIC_LVTPC); apic_write(APIC_LVTPC, v | APIC_LVT_MASKED); } /* lets not touch this if we didn't frob it */ #ifdef CONFIG_X86_MCE_THERMAL - if (maxlvt >= 5) { + if ( maxlvt >= 5 ) + { v = apic_read(APIC_LVTTHMR); apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); } #endif - if (maxlvt >= 6) { + if ( maxlvt >= 6 ) + { v = apic_read(APIC_CMCI); apic_write(APIC_CMCI, v | APIC_LVT_MASKED); } @@ -203,26 +208,27 @@ void clear_local_APIC(void) apic_write(APIC_LVTT, APIC_LVT_MASKED); apic_write(APIC_LVT0, APIC_LVT_MASKED); apic_write(APIC_LVT1, APIC_LVT_MASKED); - if (maxlvt >= 3) + if ( maxlvt >= 3 ) apic_write(APIC_LVTERR, APIC_LVT_MASKED); - if (maxlvt >= 4) + if ( maxlvt >= 4 ) apic_write(APIC_LVTPC, APIC_LVT_MASKED); #ifdef CONFIG_X86_MCE_THERMAL - if (maxlvt >= 5) + if ( maxlvt >= 5 ) apic_write(APIC_LVTTHMR, APIC_LVT_MASKED); #endif - if (maxlvt >= 6) + if ( maxlvt >= 6 ) apic_write(APIC_CMCI, APIC_LVT_MASKED); - if (maxlvt > 3) /* Due to Pentium errata 3AP and 11AP. */ + if ( maxlvt > 3 ) /* Due to Pentium errata 3AP and 11AP. */ apic_write(APIC_ESR, 0); apic_read(APIC_ESR); } void __init connect_bsp_APIC(void) { - if (pic_mode) { + if ( pic_mode ) + { /* * Do not trust the local APIC being empty at bootup. */ @@ -232,7 +238,7 @@ void __init connect_bsp_APIC(void) * connect BSP's local APIC to INT and NMI lines. */ apic_printk(APIC_VERBOSE, "leaving PIC mode, " - "enabling APIC mode.\n"); + "enabling APIC mode.\n"); outb(0x70, 0x22); outb(0x01, 0x23); } @@ -241,7 +247,8 @@ void __init connect_bsp_APIC(void) void disconnect_bsp_APIC(int virt_wire_setup) { - if (pic_mode) { + if ( pic_mode ) + { /* * Put the board back into PIC mode (has an effect * only on certain older boards). Note that APIC @@ -249,11 +256,12 @@ void disconnect_bsp_APIC(int virt_wire_setup) * this point! The only exception are INIT IPIs. */ apic_printk(APIC_VERBOSE, "disabling APIC mode, " - "entering PIC mode.\n"); + "entering PIC mode.\n"); outb(0x70, 0x22); outb(0x00, 0x23); } - else { + else + { /* Go back to Virtual Wire compatibility mode */ unsigned long value; @@ -264,27 +272,29 @@ void disconnect_bsp_APIC(int virt_wire_setup) value |= 0xf; apic_write(APIC_SPIV, value); - if (!virt_wire_setup) { - /* For LVT0 make it edge triggered, active high, external and enabled */ + if ( !virt_wire_setup ) + { + /* For LVT0 make it edge triggered, active high, external and + * enabled */ value = apic_read(APIC_LVT0); value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | - APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED ); + APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT); apic_write(APIC_LVT0, value); } - else { + else + { /* Disable LVT0 */ apic_write(APIC_LVT0, APIC_LVT_MASKED); } /* For LVT1 make it edge triggered, active high, nmi and enabled */ value = apic_read(APIC_LVT1); - value &= ~( - APIC_MODE_MASK | APIC_SEND_PENDING | - APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | - APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); + value &= + ~(APIC_MODE_MASK | APIC_SEND_PENDING | APIC_INPUT_POLARITY | + APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI); apic_write(APIC_LVT1, value); @@ -301,11 +311,12 @@ void disable_local_APIC(void) */ apic_write(APIC_SPIV, apic_read(APIC_SPIV) & ~APIC_SPIV_APIC_ENABLED); - if (enabled_via_apicbase) { + if ( enabled_via_apicbase ) + { uint64_t msr_content; rdmsrl(MSR_APIC_BASE, msr_content); - wrmsrl(MSR_APIC_BASE, msr_content & - ~(APIC_BASE_ENABLE | APIC_BASE_EXTD)); + wrmsrl(MSR_APIC_BASE, + msr_content & ~(APIC_BASE_ENABLE | APIC_BASE_EXTD)); } if ( kexecing && (current_local_apic_mode() != apic_boot_mode) ) @@ -315,7 +326,7 @@ void disable_local_APIC(void) msr_content &= ~(APIC_BASE_ENABLE | APIC_BASE_EXTD); wrmsrl(MSR_APIC_BASE, msr_content); - switch ( apic_boot_mode ) + switch (apic_boot_mode) { case APIC_MODE_DISABLED: break; /* Nothing to do - we did this above */ @@ -335,7 +346,6 @@ void disable_local_APIC(void) break; } } - } /* @@ -364,17 +374,17 @@ int __init verify_local_APIC(void) * numbers. If the second one is different, then we * poke at a non-APIC. */ - if (reg1 != reg0) + if ( reg1 != reg0 ) return 0; /* * Check if the version looks reasonably. */ reg1 = GET_APIC_VERSION(reg0); - if (reg1 == 0x00 || reg1 == 0xff) + if ( reg1 == 0x00 || reg1 == 0xff ) return 0; reg1 = get_maxlvt(); - if (reg1 < 0x02 || reg1 == 0xff) + if ( reg1 < 0x02 || reg1 == 0xff ) return 0; /* @@ -418,7 +428,7 @@ void __init sync_Arb_IDs(void) { /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not needed on AMD */ - if (modern_apic()) + if ( modern_apic() ) return; /* * Wait for idle. @@ -440,23 +450,24 @@ void __init init_bsp_APIC(void) * Don't do the setup now if we have a SMP BIOS as the * through-I/O-APIC virtual wire mode might be active. */ - if (smp_found_config || !cpu_has_apic) + if ( smp_found_config || !cpu_has_apic ) return; /* * Do not trust the local APIC being empty at bootup. */ clear_local_APIC(); - + /* * Enable APIC. */ value = apic_read(APIC_SPIV); value &= ~APIC_VECTOR_MASK; value |= APIC_SPIV_APIC_ENABLED; - + /* This bit is reserved on P4/Xeon and should be cleared */ - if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 15)) + if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && + (boot_cpu_data.x86 == 15) ) value &= ~APIC_SPIV_FOCUS_DISABLED; else value |= APIC_SPIV_FOCUS_DISABLED; @@ -527,7 +538,8 @@ void setup_local_APIC(void) int i, j; /* Pound the ESR really hard over the head with a big hammer - mbligh */ - if (esr_disable) { + if ( esr_disable ) + { apic_write(APIC_ESR, 0); apic_write(APIC_ESR, 0); apic_write(APIC_ESR, 0); @@ -539,7 +551,7 @@ void setup_local_APIC(void) /* * Double-check whether this APIC is really registered. */ - if (!apic_id_registered()) + if ( !apic_id_registered() ) BUG(); /* @@ -565,10 +577,12 @@ void setup_local_APIC(void) * the interrupt. Hence a vector might get locked. It was noticed * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. */ - for (i = APIC_ISR_NR - 1; i >= 0; i--) { - value = apic_read(APIC_ISR + i*0x10); - for (j = 31; j >= 0; j--) { - if (value & (1u << j)) + for ( i = APIC_ISR_NR - 1; i >= 0; i-- ) + { + value = apic_read(APIC_ISR + i * 0x10); + for ( j = 31; j >= 0; j-- ) + { + if ( value & (1u << j) ) ack_APIC_irq(); } } @@ -637,11 +651,14 @@ void setup_local_APIC(void) * TODO: set up through-local-APIC from through-I/O-APIC? --macro */ value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; - if (!smp_processor_id() && (pic_mode || !value)) { + if ( !smp_processor_id() && (pic_mode || !value) ) + { value = APIC_DM_EXTINT; apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", smp_processor_id()); - } else { + } + else + { value = APIC_DM_EXTINT | APIC_LVT_MASKED; apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", smp_processor_id()); @@ -651,31 +668,35 @@ void setup_local_APIC(void) /* * only the BP should see the LINT1 NMI signal, obviously. */ - if (!smp_processor_id()) + if ( !smp_processor_id() ) value = APIC_DM_NMI; else value = APIC_DM_NMI | APIC_LVT_MASKED; apic_write(APIC_LVT1, value); - if (!esr_disable) { + if ( !esr_disable ) + { maxlvt = get_maxlvt(); - if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ + if ( maxlvt > 3 ) /* Due to the Pentium erratum 3AP. */ apic_write(APIC_ESR, 0); oldvalue = apic_read(APIC_ESR); - value = ERROR_APIC_VECTOR; // enables sending errors + value = ERROR_APIC_VECTOR; // enables sending errors apic_write(APIC_LVTERR, value); /* * spec says clear errors after enabling vector. */ - if (maxlvt > 3) + if ( maxlvt > 3 ) apic_write(APIC_ESR, 0); value = apic_read(APIC_ESR); - if (value != oldvalue) - apic_printk(APIC_VERBOSE, "ESR value before enabling " + if ( value != oldvalue ) + apic_printk(APIC_VERBOSE, + "ESR value before enabling " "vector: %#lx after: %#lx\n", oldvalue, value); - } else { + } + else + { /* * Something untraceble is creating bad interrupts on * secondary quads ... for the moment, just leave the @@ -685,7 +706,7 @@ void setup_local_APIC(void) printk("Leaving ESR disabled.\n"); } - if (nmi_watchdog == NMI_LOCAL_APIC && smp_processor_id()) + if ( nmi_watchdog == NMI_LOCAL_APIC && smp_processor_id() ) setup_apic_nmi_watchdog(); apic_pm_activate(); } @@ -694,7 +715,7 @@ int lapic_suspend(void) { unsigned long flags; int maxlvt = get_maxlvt(); - if (!apic_pm_state.active) + if ( !apic_pm_state.active ) return 0; apic_pm_state.apic_id = apic_read(APIC_ID); @@ -703,10 +724,11 @@ int lapic_suspend(void) apic_pm_state.apic_dfr = apic_read(APIC_DFR); apic_pm_state.apic_spiv = apic_read(APIC_SPIV); apic_pm_state.apic_lvtt = apic_read(APIC_LVTT); - if (maxlvt >= 4) + if ( maxlvt >= 4 ) apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC); - if (maxlvt >= 6) { + if ( maxlvt >= 6 ) + { apic_pm_state.apic_lvtcmci = apic_read(APIC_CMCI); } @@ -715,7 +737,7 @@ int lapic_suspend(void) apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); apic_pm_state.apic_tmict = apic_read(APIC_TMICT); apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); - if (maxlvt >= 5) + if ( maxlvt >= 5 ) apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); local_irq_save(flags); @@ -731,7 +753,7 @@ int lapic_resume(void) unsigned long flags; int maxlvt; - if (!apic_pm_state.active) + if ( !apic_pm_state.active ) return 0; local_irq_save(flags); @@ -746,8 +768,7 @@ int lapic_resume(void) { rdmsrl(MSR_APIC_BASE, msr_content); msr_content &= ~APIC_BASE_ADDR_MASK; - wrmsrl(MSR_APIC_BASE, - msr_content | APIC_BASE_ENABLE | mp_lapic_addr); + wrmsrl(MSR_APIC_BASE, msr_content | APIC_BASE_ENABLE | mp_lapic_addr); } else resume_x2apic(); @@ -761,14 +782,15 @@ int lapic_resume(void) apic_write(APIC_SPIV, apic_pm_state.apic_spiv); apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); - if (maxlvt >= 5) + if ( maxlvt >= 5 ) apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); - if (maxlvt >= 6) { + if ( maxlvt >= 6 ) + { apic_write(APIC_CMCI, apic_pm_state.apic_lvtcmci); } - if (maxlvt >= 4) + if ( maxlvt >= 4 ) apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc); apic_write(APIC_LVTT, apic_pm_state.apic_lvtt); apic_write(APIC_TDCR, apic_pm_state.apic_tdcr); @@ -782,7 +804,6 @@ int lapic_resume(void) return 0; } - /* * Detect and enable local APICs on non-SMP boards. * Original code written by Keir Fraser. @@ -799,9 +820,9 @@ boolean_param("lapic", enable_local_apic); static int __init apic_set_verbosity(const char *str) { - if (strcmp("debug", str) == 0) + if ( strcmp("debug", str) == 0 ) apic_verbosity = APIC_DEBUG; - else if (strcmp("verbose", str) == 0) + else if ( strcmp("verbose", str) == 0 ) apic_verbosity = APIC_VERBOSE; else return -EINVAL; @@ -810,12 +831,12 @@ static int __init apic_set_verbosity(const char *str) } custom_param("apic_verbosity", apic_set_verbosity); -static int __init detect_init_APIC (void) +static int __init detect_init_APIC(void) { uint64_t msr_content; /* Disabled by kernel option? */ - if (enable_local_apic < 0) + if ( enable_local_apic < 0 ) return -1; if ( rdmsr_safe(MSR_APIC_BASE, msr_content) ) @@ -824,12 +845,14 @@ static int __init detect_init_APIC (void) return -1; } - if (!cpu_has_apic) { + if ( !cpu_has_apic ) + { /* * Over-ride BIOS and try to enable the local * APIC only if "lapic" specified. */ - if (enable_local_apic <= 0) { + if ( enable_local_apic <= 0 ) + { printk("Local APIC disabled by BIOS -- " "you can enable it with \"lapic\"\n"); return -1; @@ -853,7 +876,8 @@ static int __init detect_init_APIC (void) * The APIC feature bit should now be enabled * in `cpuid' */ - if (!(cpuid_edx(1) & cpufeat_mask(X86_FEATURE_APIC))) { + if ( !(cpuid_edx(1) & cpufeat_mask(X86_FEATURE_APIC)) ) + { printk("Could not enable APIC!\n"); return -1; } @@ -865,7 +889,7 @@ static int __init detect_init_APIC (void) if ( msr_content & APIC_BASE_ENABLE ) mp_lapic_addr = msr_content & APIC_BASE_ADDR_MASK; - if (nmi_watchdog != NMI_NONE) + if ( nmi_watchdog != NMI_NONE ) nmi_watchdog = NMI_LOCAL_APIC; printk("Found and enabled local APIC!\n"); @@ -894,7 +918,7 @@ void __init x2apic_bsp_setup(void) { printk("Not enabling x2APIC: disabled by cmdline.\n"); return; - } + } printk("x2APIC: Already enabled by BIOS: Ignoring cmdline disable.\n"); } @@ -905,7 +929,8 @@ void __init x2apic_bsp_setup(void) printk("Not enabling x2APIC: depends on iommu_supports_eim.\n"); return; } - panic("x2APIC: already enabled by BIOS, but iommu_supports_eim failed\n"); + panic( + "x2APIC: already enabled by BIOS, but iommu_supports_eim failed\n"); } if ( (ioapic_entries = alloc_ioapic_entries()) == NULL ) @@ -923,7 +948,7 @@ void __init x2apic_bsp_setup(void) mask_8259A(); mask_IO_APIC_setup(ioapic_entries); - switch ( iommu_enable_x2apic_IR() ) + switch (iommu_enable_x2apic_IR()) { case 0: break; @@ -939,8 +964,9 @@ void __init x2apic_bsp_setup(void) panic("Interrupt remapping could not be enabled while " "x2APIC is already enabled by BIOS\n"); - printk(XENLOG_ERR - "Failed to enable Interrupt Remapping: Will not enable x2APIC.\n"); + printk( + XENLOG_ERR + "Failed to enable Interrupt Remapping: Will not enable x2APIC.\n"); goto restore_out; } @@ -975,10 +1001,12 @@ void __init init_apic_mappings(void) * zeroes page to simulate the local APIC and another * one for the IO-APIC. */ - if (!smp_found_config && detect_init_APIC()) { + if ( !smp_found_config && detect_init_APIC() ) + { apic_phys = __pa(alloc_xenheap_page()); clear_page(__va(apic_phys)); - } else + } + else apic_phys = mp_lapic_addr; set_fixmap_nocache(FIX_APIC_BASE, apic_phys); @@ -990,7 +1018,7 @@ __next: * Fetch the APIC ID of the BSP in case we have a * default configuration (or the MP table is broken). */ - if (boot_cpu_physical_apicid == -1U) + if ( boot_cpu_physical_apicid == -1U ) boot_cpu_physical_apicid = get_apic_id(); x86_cpu_to_apicid[0] = get_apic_id(); @@ -999,15 +1027,15 @@ __next: /***************************************************************************** * APIC calibration - * + * * The APIC is programmed in bus cycles. * Timeout values should specified in real time units. * The "cheapest" time source is the cyclecounter. - * + * * Thus, we need a mappings from: bus cycles <- cycle counter <- system time - * + * * The calibration is currently a bit shoddy since it requires the external - * timer chip to generate periodic timer interupts. + * timer chip to generate periodic timer interupts. *****************************************************************************/ /* used for system time scaling */ @@ -1040,17 +1068,17 @@ static unsigned int __init get_8254_timer_count(void) static void __init wait_8254_wraparound(void) { unsigned int curr_count, prev_count; - + curr_count = get_8254_timer_count(); do { prev_count = curr_count; curr_count = get_8254_timer_count(); /* workaround for broken Mercury/Neptune */ - if (prev_count >= curr_count + 0x100) + if ( prev_count >= curr_count + 0x100 ) curr_count = get_8254_timer_count(); - - } while (prev_count >= curr_count); + + } while ( prev_count >= curr_count ); } /* @@ -1086,7 +1114,7 @@ static void __setup_APIC_LVTT(unsigned int clocks) * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized. * According to Intel, MFENCE can do the serialization here. */ - asm volatile( "mfence" : : : "memory" ); + asm volatile("mfence" : : : "memory"); tmp_value = apic_read(APIC_TDCR); apic_write(APIC_TDCR, tmp_value | APIC_TDR_DIV_1); @@ -1102,17 +1130,21 @@ static void setup_APIC_timer(void) local_irq_restore(flags); } -#define DEADLINE_MODEL_MATCH(m, fr) \ - { .vendor = X86_VENDOR_INTEL, .family = 6, .model = (m), \ - .feature = X86_FEATURE_TSC_DEADLINE, \ - .driver_data = (void *)(unsigned long)(fr) } +#define DEADLINE_MODEL_MATCH(m, fr) \ + { \ + .vendor = X86_VENDOR_INTEL, .family = 6, .model = (m), \ + .feature = X86_FEATURE_TSC_DEADLINE, \ + .driver_data = (void *)(unsigned long)(fr) \ + } static unsigned int __init hsx_deadline_rev(void) { - switch ( boot_cpu_data.x86_mask ) + switch (boot_cpu_data.x86_mask) { - case 0x02: return 0x3a; /* EP */ - case 0x04: return 0x0f; /* EX */ + case 0x02: + return 0x3a; /* EP */ + case 0x04: + return 0x0f; /* EX */ } return ~0U; @@ -1120,12 +1152,16 @@ static unsigned int __init hsx_deadline_rev(void) static unsigned int __init bdx_deadline_rev(void) { - switch ( boot_cpu_data.x86_mask ) + switch (boot_cpu_data.x86_mask) { - case 0x02: return 0x00000011; - case 0x03: return 0x0700000e; - case 0x04: return 0x0f00000c; - case 0x05: return 0x0e000003; + case 0x02: + return 0x00000011; + case 0x03: + return 0x0700000e; + case 0x04: + return 0x0f00000c; + case 0x05: + return 0x0e000003; } return ~0U; @@ -1133,11 +1169,14 @@ static unsigned int __init bdx_deadline_rev(void) static unsigned int __init skx_deadline_rev(void) { - switch ( boot_cpu_data.x86_mask ) + switch (boot_cpu_data.x86_mask) { - case 0x00 ... 0x02: return ~0U; - case 0x03: return 0x01000136; - case 0x04: return 0x02000014; + case 0x00 ... 0x02: + return ~0U; + case 0x03: + return 0x01000136; + case 0x04: + return 0x02000014; } return 0; @@ -1158,11 +1197,10 @@ static const struct x86_cpu_id __initconstrel deadline_match[] = { DEADLINE_MODEL_MATCH(0x55, skx_deadline_rev), /* Skylake X */ DEADLINE_MODEL_MATCH(0x5e, 0xb2), /* Skylake D */ - DEADLINE_MODEL_MATCH(0x8e, 0x52), /* Kabylake M */ - DEADLINE_MODEL_MATCH(0x9e, 0x52), /* Kabylake D */ + DEADLINE_MODEL_MATCH(0x8e, 0x52), /* Kabylake M */ + DEADLINE_MODEL_MATCH(0x9e, 0x52), /* Kabylake D */ - {} -}; + {}}; static void __init check_deadline_errata(void) { @@ -1190,7 +1228,8 @@ static void __init check_deadline_errata(void) setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE); printk(XENLOG_WARNING "TSC_DEADLINE disabled due to Errata; " - "please update microcode to version %#x (or later)\n", rev); + "please update microcode to version %#x (or later)\n", + rev); } static void wait_tick_pvh(void) @@ -1228,7 +1267,7 @@ static int __init calibrate_APIC_clock(void) int i; unsigned long bus_freq; /* KAF: pointer-size avoids compile warns. */ u32 bus_cycle; /* length of one bus cycle in pico-seconds */ - const int LOOPS = HZ/10; + const int LOOPS = HZ / 10; apic_printk(APIC_VERBOSE, "calibrating APIC timer ...\n"); @@ -1258,7 +1297,7 @@ static int __init calibrate_APIC_clock(void) /* * Let's wait LOOPS ticks: */ - for (i = 0; i < LOOPS; i++) + for ( i = 0; i < LOOPS; i++ ) if ( !xen_guest ) wait_8254_wraparound(); else @@ -1275,7 +1314,7 @@ static int __init calibrate_APIC_clock(void) * underflown to be exact, as the timer counts down ;) */ - result = (tt1-tt2)*APIC_DIVISOR/LOOPS; + result = (tt1 - tt2) * APIC_DIVISOR / LOOPS; apic_printk(APIC_VERBOSE, "..... CPU clock speed is %ld.%04ld MHz.\n", ((long)(t2 - t1) / LOOPS) / (1000000 / HZ), @@ -1285,9 +1324,9 @@ static int __init calibrate_APIC_clock(void) result / (1000000 / HZ), result % (1000000 / HZ)); /* set up multipliers for accurate timer code */ - bus_freq = result*HZ; - bus_cycle = (u32) (1000000000000LL/bus_freq); /* in pico seconds */ - bus_scale = (1000*262144)/bus_cycle; + bus_freq = result * HZ; + bus_cycle = (u32)(1000000000000LL / bus_freq); /* in pico seconds */ + bus_scale = (1000 * 262144) / bus_cycle; apic_printk(APIC_VERBOSE, "..... bus_scale = %#x\n", bus_scale); /* reset APIC to zero timeout value */ @@ -1315,7 +1354,7 @@ void __init setup_boot_APIC_clock(void) } setup_APIC_timer(); - + local_irq_restore(flags); } @@ -1326,7 +1365,8 @@ void setup_secondary_APIC_clock(void) void disable_APIC_timer(void) { - if (using_apic_timer) { + if ( using_apic_timer ) + { unsigned long v; /* Work around AMD Erratum 411. This is a nice thing to do anyway. */ @@ -1339,9 +1379,10 @@ void disable_APIC_timer(void) void enable_APIC_timer(void) { - if (using_apic_timer) { + if ( using_apic_timer ) + { unsigned long v; - + v = apic_read(APIC_LVTT); apic_write(APIC_LVTT, v & ~APIC_LVT_MASKED); } @@ -1377,7 +1418,7 @@ int reprogram_timer(s_time_t timeout) return apic_tmict || !timeout; } -void apic_timer_interrupt(struct cpu_user_regs * regs) +void apic_timer_interrupt(struct cpu_user_regs *regs) { ack_APIC_irq(); perfc_incr(apic_timer); @@ -1403,9 +1444,11 @@ void spurious_interrupt(struct cpu_user_regs *regs) * a request to dump local CPU state). Vectored interrupts are ACKed; * spurious interrupts are not. */ - if (apic_isr_read(SPURIOUS_APIC_VECTOR)) { + if ( apic_isr_read(SPURIOUS_APIC_VECTOR) ) + { ack_APIC_irq(); - if (this_cpu(state_dump_pending)) { + if ( this_cpu(state_dump_pending) ) + { this_cpu(state_dump_pending) = false; dump_execstate(regs); return; @@ -1414,7 +1457,8 @@ void spurious_interrupt(struct cpu_user_regs *regs) /* see sw-dev-man vol 3, chapter 7.4.13.5 */ printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should " - "never happen.\n", smp_processor_id()); + "never happen.\n", + smp_processor_id()); } /* @@ -1424,14 +1468,10 @@ void spurious_interrupt(struct cpu_user_regs *regs) void error_interrupt(struct cpu_user_regs *regs) { static const char *const esr_fields[] = { - "Send CS error", - "Receive CS error", - "Send accept error", - "Receive accept error", - "Redirectable IPI", - "Send illegal vector", - "Received illegal vector", - "Illegal register address", + "Send CS error", "Receive CS error", + "Send accept error", "Receive accept error", + "Redirectable IPI", "Send illegal vector", + "Received illegal vector", "Illegal register address", }; unsigned int v, v1; int i; @@ -1442,8 +1482,8 @@ void error_interrupt(struct cpu_user_regs *regs) v1 = apic_read(APIC_ESR); ack_APIC_irq(); - printk(XENLOG_DEBUG "APIC error on CPU%u: %02x(%02x)", - smp_processor_id(), v , v1); + printk(XENLOG_DEBUG "APIC error on CPU%u: %02x(%02x)", smp_processor_id(), + v, v1); for ( i = 7; i >= 0; --i ) if ( v1 & (1 << i) ) printk(", %s", esr_fields[i]); @@ -1464,12 +1504,13 @@ void pmu_apic_interrupt(struct cpu_user_regs *regs) * This initializes the IO-APIC and APIC hardware if this is * a UP kernel. */ -int __init APIC_init_uniprocessor (void) +int __init APIC_init_uniprocessor(void) { - if (enable_local_apic < 0) + if ( enable_local_apic < 0 ) setup_clear_cpu_cap(X86_FEATURE_APIC); - if (!smp_found_config && !cpu_has_apic) { + if ( !smp_found_config && !cpu_has_apic ) + { skip_ioapic_setup = true; return -1; } @@ -1477,7 +1518,8 @@ int __init APIC_init_uniprocessor (void) /* * Complain if the BIOS pretends there is one. */ - if (!cpu_has_apic) { + if ( !cpu_has_apic ) + { printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", boot_cpu_physical_apicid); skip_ioapic_setup = true; @@ -1501,11 +1543,11 @@ int __init APIC_init_uniprocessor (void) setup_local_APIC(); - if (nmi_watchdog == NMI_LOCAL_APIC) + if ( nmi_watchdog == NMI_LOCAL_APIC ) check_nmi_watchdog(); - if (smp_found_config) - if (!skip_ioapic_setup && nr_ioapics) + if ( smp_found_config ) + if ( !skip_ioapic_setup && nr_ioapics ) setup_IO_APIC(); setup_boot_APIC_clock(); @@ -1513,20 +1555,20 @@ int __init APIC_init_uniprocessor (void) return 0; } -static const char * __init apic_mode_to_str(const enum apic_mode mode) +static const char *__init apic_mode_to_str(const enum apic_mode mode) { - switch ( mode ) + switch (mode) { - case APIC_MODE_INVALID: - return "invalid"; - case APIC_MODE_DISABLED: - return "disabled"; - case APIC_MODE_XAPIC: - return "xapic"; - case APIC_MODE_X2APIC: - return "x2apic"; - default: - return "unrecognised"; + case APIC_MODE_INVALID: + return "invalid"; + case APIC_MODE_DISABLED: + return "disabled"; + case APIC_MODE_XAPIC: + return "xapic"; + case APIC_MODE_X2APIC: + return "x2apic"; + default: + return "unrecognised"; } } @@ -1566,7 +1608,6 @@ enum apic_mode current_local_apic_mode(void) return APIC_MODE_DISABLED; } - void check_for_unexpected_msi(unsigned int vector) { BUG_ON(apic_isr_read(vector)); diff --git a/xen/arch/x86/bitops.c b/xen/arch/x86/bitops.c index f6ee71512c..ce28b1678d 100644 --- a/xen/arch/x86/bitops.c +++ b/xen/arch/x86/bitops.c @@ -2,30 +2,30 @@ #include #include -unsigned int __find_first_bit( - const unsigned long *addr, unsigned int size) +unsigned int __find_first_bit(const unsigned long *addr, unsigned int size) { unsigned long d0, d1, res; - asm volatile ( - "1: xor %%eax,%%eax\n\t" /* also ensures ZF==1 if size==0 */ - " repe; scas"__OS"\n\t" - " je 2f\n\t" - " bsf -"STR(BITS_PER_LONG/8)"(%2),%0\n\t" - " jz 1b\n\t" - " lea -"STR(BITS_PER_LONG/8)"(%2),%2\n\t" - "2: sub %%ebx,%%edi\n\t" - " shl $3,%%edi\n\t" - " add %%edi,%%eax" - : "=&a" (res), "=&c" (d0), "=&D" (d1) - : "1" (BITS_TO_LONGS(size)), "2" (addr), "b" ((int)(long)addr) - : "memory" ); + asm volatile("1: xor %%eax,%%eax\n\t" /* also ensures ZF==1 if size==0 */ + " repe; scas" __OS "\n\t" + " je 2f\n\t" + " bsf -" STR(BITS_PER_LONG / + 8) "(%2),%0\n\t" + " jz 1b\n\t" + " lea -" STR(BITS_PER_LONG / + 8) "(%2),%2\n\t" + "2: sub %%ebx,%%edi\n\t" + " shl $3,%%edi\n\t" + " add %%edi,%%eax" + : "=&a"(res), "=&c"(d0), "=&D"(d1) + : "1"(BITS_TO_LONGS(size)), "2"(addr), "b"((int)(long)addr) + : "memory"); return res; } -unsigned int __find_next_bit( - const unsigned long *addr, unsigned int size, unsigned int offset) +unsigned int __find_next_bit(const unsigned long *addr, unsigned int size, + unsigned int offset) { const unsigned long *p = addr + (offset / BITS_PER_LONG); unsigned int set, bit = offset & (BITS_PER_LONG - 1); @@ -50,32 +50,32 @@ unsigned int __find_next_bit( return (offset + set); } -unsigned int __find_first_zero_bit( - const unsigned long *addr, unsigned int size) +unsigned int __find_first_zero_bit(const unsigned long *addr, unsigned int size) { unsigned long d0, d1, d2, res; - asm volatile ( - "1: xor %%eax,%%eax ; not %3\n\t" /* rAX == ~0ul */ - " xor %%edx,%%edx\n\t" /* also ensures ZF==1 if size==0 */ - " repe; scas"__OS"\n\t" - " je 2f\n\t" - " xor -"STR(BITS_PER_LONG/8)"(%2),%3\n\t" - " jz 1b\n\t" - " rep; bsf %3,%0\n\t" - " lea -"STR(BITS_PER_LONG/8)"(%2),%2\n\t" - "2: sub %%ebx,%%edi\n\t" - " shl $3,%%edi\n\t" - " add %%edi,%%edx" - : "=&d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) - : "1" (BITS_TO_LONGS(size)), "2" (addr), "b" ((int)(long)addr) - : "memory" ); + asm volatile("1: xor %%eax,%%eax ; not %3\n\t" /* rAX == ~0ul */ + " xor %%edx,%%edx\n\t" /* also ensures ZF==1 if size==0 */ + " repe; scas" __OS "\n\t" + " je 2f\n\t" + " xor -" STR(BITS_PER_LONG / + 8) "(%2),%3\n\t" + " jz 1b\n\t" + " rep; bsf %3,%0\n\t" + " lea -" STR(BITS_PER_LONG / + 8) "(%2),%2\n\t" + "2: sub %%ebx,%%edi\n\t" + " shl $3,%%edi\n\t" + " add %%edi,%%edx" + : "=&d"(res), "=&c"(d0), "=&D"(d1), "=&a"(d2) + : "1"(BITS_TO_LONGS(size)), "2"(addr), "b"((int)(long)addr) + : "memory"); return res; } -unsigned int __find_next_zero_bit( - const unsigned long *addr, unsigned int size, unsigned int offset) +unsigned int __find_next_zero_bit(const unsigned long *addr, unsigned int size, + unsigned int offset) { const unsigned long *p = addr + (offset / BITS_PER_LONG); unsigned int set, bit = offset & (BITS_PER_LONG - 1); diff --git a/xen/arch/x86/boot/cmdline.c b/xen/arch/x86/boot/cmdline.c index 51b0659a04..81232d09ad 100644 --- a/xen/arch/x86/boot/cmdline.c +++ b/xen/arch/x86/boot/cmdline.c @@ -23,19 +23,18 @@ * - 0x4(%esp) = &cmdline, * - 0x8(%esp) = &early_boot_opts. */ -asm ( - " .text \n" +asm(" .text \n" " .globl _start \n" "_start: \n" - " jmp cmdline_parse_early \n" - ); + " jmp cmdline_parse_early \n"); #include #include "defs.h" #include "video.h" /* Keep in sync with trampoline.S:early_boot_opts label! */ -typedef struct __packed { +typedef struct __packed +{ u8 skip_realmode; u8 opt_edd; u8 opt_edid; @@ -53,7 +52,7 @@ typedef struct __packed { */ static const char delim_chars_comma[] = ", \n\r\t"; -#define delim_chars (delim_chars_comma + 1) +#define delim_chars (delim_chars_comma + 1) static size_t strlen(const char *s) { @@ -133,7 +132,7 @@ static unsigned int strtoui(const char *s, const char *stop, const char **next) unsigned long long res = 0; if ( *s == '0' ) - base = (tolower(*++s) == 'x') ? (++s, 16) : 8; + base = (tolower(*++s) == 'x') ? (++s, 16) : 8; for ( ; *s != '\0'; ++s ) { @@ -164,9 +163,9 @@ static unsigned int strtoui(const char *s, const char *stop, const char **next) } } - out: +out: if ( next ) - *next = s; + *next = s; return res; } @@ -187,7 +186,7 @@ static const char *find_opt(const char *cmdline, const char *opt, bool arg) lo = strlen(opt); - for ( ; ; ) + for ( ;; ) { cmdline += strspn(cmdline, delim_chars); @@ -208,7 +207,8 @@ static const char *find_opt(const char *cmdline, const char *opt, bool arg) static bool skip_realmode(const char *cmdline) { - return find_opt(cmdline, "no-real-mode", false) || find_opt(cmdline, "tboot=", true); + return find_opt(cmdline, "no-real-mode", false) || + find_opt(cmdline, "tboot=", true); } static u8 edd_parse(const char *cmdline) @@ -243,7 +243,7 @@ static u8 edid_parse(const char *cmdline) static u16 rows2vmode(unsigned int rows) { - switch ( rows ) + switch (rows) { case 25: return VIDEO_80x25; diff --git a/xen/arch/x86/boot/mkelf32.c b/xen/arch/x86/boot/mkelf32.c index bcbde1a056..31ae4e7eaf 100644 --- a/xen/arch/x86/boot/mkelf32.c +++ b/xen/arch/x86/boot/mkelf32.c @@ -1,10 +1,10 @@ /****************************************************************************** * mkelf32.c - * + * * Usage: elf-prefix - * + * * Converts an Elf64 executable binary into a simple Elf32 - * image comprising a single chunk to be loaded at . + * image comprising a single chunk to be loaded at . */ #include @@ -17,59 +17,61 @@ #include #include -#define u8 uint8_t +#define u8 uint8_t #define u16 uint16_t #define u32 uint32_t #define u64 uint64_t -#define s8 int8_t +#define s8 int8_t #define s16 int16_t #define s32 int32_t #define s64 int64_t #include "../../../include/xen/elfstructs.h" -#define DYNAMICALLY_FILLED 0 -#define RAW_OFFSET 128 +#define DYNAMICALLY_FILLED 0 +#define RAW_OFFSET 128 static Elf32_Ehdr out_ehdr = { - { ELFMAG0, ELFMAG1, ELFMAG2, ELFMAG3, /* EI_MAG{0-3} */ - ELFCLASS32, /* EI_CLASS */ - ELFDATA2LSB, /* EI_DATA */ - EV_CURRENT, /* EI_VERSION */ - 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* e_ident */ - ET_EXEC, /* e_type */ - EM_386, /* e_machine */ - EV_CURRENT, /* e_version */ - DYNAMICALLY_FILLED, /* e_entry */ - sizeof(Elf32_Ehdr), /* e_phoff */ - DYNAMICALLY_FILLED, /* e_shoff */ - 0, /* e_flags */ - sizeof(Elf32_Ehdr), /* e_ehsize */ - sizeof(Elf32_Phdr), /* e_phentsize */ - 1, /* modify based on num_phdrs */ /* e_phnum */ - sizeof(Elf32_Shdr), /* e_shentsize */ - 3, /* modify based on num_phdrs */ /* e_shnum */ - 2 /* e_shstrndx */ + {ELFMAG0, ELFMAG1, ELFMAG2, ELFMAG3, /* EI_MAG{0-3} */ + ELFCLASS32, /* EI_CLASS */ + ELFDATA2LSB, /* EI_DATA */ + EV_CURRENT, /* EI_VERSION */ + 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* e_ident */ + ET_EXEC, /* e_type */ + EM_386, /* e_machine */ + EV_CURRENT, /* e_version */ + DYNAMICALLY_FILLED, /* e_entry */ + sizeof(Elf32_Ehdr), /* e_phoff */ + DYNAMICALLY_FILLED, /* e_shoff */ + 0, /* e_flags */ + sizeof(Elf32_Ehdr), /* e_ehsize */ + sizeof(Elf32_Phdr), /* e_phentsize */ + 1, + /* modify based on num_phdrs */ /* e_phnum */ + sizeof(Elf32_Shdr), /* e_shentsize */ + 3, + /* modify based on num_phdrs */ /* e_shnum */ + 2 /* e_shstrndx */ }; static Elf32_Phdr out_phdr = { - PT_LOAD, /* p_type */ - RAW_OFFSET, /* p_offset */ - DYNAMICALLY_FILLED, /* p_vaddr */ - DYNAMICALLY_FILLED, /* p_paddr */ - DYNAMICALLY_FILLED, /* p_filesz */ - DYNAMICALLY_FILLED, /* p_memsz */ - PF_R|PF_W|PF_X, /* p_flags */ - 64 /* p_align */ + PT_LOAD, /* p_type */ + RAW_OFFSET, /* p_offset */ + DYNAMICALLY_FILLED, /* p_vaddr */ + DYNAMICALLY_FILLED, /* p_paddr */ + DYNAMICALLY_FILLED, /* p_filesz */ + DYNAMICALLY_FILLED, /* p_memsz */ + PF_R | PF_W | PF_X, /* p_flags */ + 64 /* p_align */ }; static Elf32_Phdr note_phdr = { - PT_NOTE, /* p_type */ - DYNAMICALLY_FILLED, /* p_offset */ - DYNAMICALLY_FILLED, /* p_vaddr */ - DYNAMICALLY_FILLED, /* p_paddr */ - DYNAMICALLY_FILLED, /* p_filesz */ - DYNAMICALLY_FILLED, /* p_memsz */ - PF_R, /* p_flags */ - 4 /* p_align */ + PT_NOTE, /* p_type */ + DYNAMICALLY_FILLED, /* p_offset */ + DYNAMICALLY_FILLED, /* p_vaddr */ + DYNAMICALLY_FILLED, /* p_paddr */ + DYNAMICALLY_FILLED, /* p_filesz */ + DYNAMICALLY_FILLED, /* p_memsz */ + PF_R, /* p_flags */ + 4 /* p_align */ }; static u8 out_shstrtab[] = "\0.text\0.shstrtab"; @@ -77,46 +79,47 @@ static u8 out_shstrtab[] = "\0.text\0.shstrtab"; static u8 out_shstrtab_extra[] = ".note\0"; static Elf32_Shdr out_shdr[] = { - { 0 }, - { 1, /* sh_name */ - SHT_PROGBITS, /* sh_type */ - SHF_WRITE|SHF_ALLOC|SHF_EXECINSTR, /* sh_flags */ - DYNAMICALLY_FILLED, /* sh_addr */ - RAW_OFFSET, /* sh_offset */ - DYNAMICALLY_FILLED, /* sh_size */ - 0, /* sh_link */ - 0, /* sh_info */ - 64, /* sh_addralign */ - 0 /* sh_entsize */ + {0}, + { + 1, /* sh_name */ + SHT_PROGBITS, /* sh_type */ + SHF_WRITE | SHF_ALLOC | SHF_EXECINSTR, /* sh_flags */ + DYNAMICALLY_FILLED, /* sh_addr */ + RAW_OFFSET, /* sh_offset */ + DYNAMICALLY_FILLED, /* sh_size */ + 0, /* sh_link */ + 0, /* sh_info */ + 64, /* sh_addralign */ + 0 /* sh_entsize */ }, - { 7, /* sh_name */ - SHT_STRTAB, /* sh_type */ - 0, /* sh_flags */ - 0, /* sh_addr */ - DYNAMICALLY_FILLED, /* sh_offset */ - sizeof(out_shstrtab), /* sh_size */ - 0, /* sh_link */ - 0, /* sh_info */ - 1, /* sh_addralign */ - 0 /* sh_entsize */ - } -}; + { + 7, /* sh_name */ + SHT_STRTAB, /* sh_type */ + 0, /* sh_flags */ + 0, /* sh_addr */ + DYNAMICALLY_FILLED, /* sh_offset */ + sizeof(out_shstrtab), /* sh_size */ + 0, /* sh_link */ + 0, /* sh_info */ + 1, /* sh_addralign */ + 0 /* sh_entsize */ + }}; /* * The 17 points to the '.note' in the out_shstrtab and out_shstrtab_extra * laid out in the file. */ static Elf32_Shdr out_shdr_note = { - 17, /* sh_name */ - SHT_NOTE, /* sh_type */ - 0, /* sh_flags */ - DYNAMICALLY_FILLED, /* sh_addr */ - DYNAMICALLY_FILLED, /* sh_offset */ - DYNAMICALLY_FILLED, /* sh_size */ - 0, /* sh_link */ - 0, /* sh_info */ - 4, /* sh_addralign */ - 0 /* sh_entsize */ + 17, /* sh_name */ + SHT_NOTE, /* sh_type */ + 0, /* sh_flags */ + DYNAMICALLY_FILLED, /* sh_addr */ + DYNAMICALLY_FILLED, /* sh_offset */ + DYNAMICALLY_FILLED, /* sh_size */ + 0, /* sh_link */ + 0, /* sh_info */ + 4, /* sh_addralign */ + 0 /* sh_entsize */ }; /* Some system header files define these macros and pollute our namespace. */ @@ -124,9 +127,11 @@ static Elf32_Shdr out_shdr_note = { #undef swap32 #undef swap64 -#define swap16(_v) ((((u16)(_v)>>8)&0xff)|(((u16)(_v)&0xff)<<8)) -#define swap32(_v) (((u32)swap16((u16)(_v))<<16)|(u32)swap16((u32)((_v)>>16))) -#define swap64(_v) (((u64)swap32((u32)(_v))<<32)|(u64)swap32((u32)((_v)>>32))) +#define swap16(_v) ((((u16)(_v) >> 8) & 0xff) | (((u16)(_v)&0xff) << 8)) +#define swap32(_v) \ + (((u32)swap16((u16)(_v)) << 16) | (u32)swap16((u32)((_v) >> 16))) +#define swap64(_v) \ + (((u64)swap32((u32)(_v)) << 32) | (u64)swap32((u32)((_v) >> 32))) static int big_endian; @@ -134,87 +139,87 @@ static void endianadjust_ehdr32(Elf32_Ehdr *eh) { if ( !big_endian ) return; - eh->e_type = swap16(eh->e_type); - eh->e_machine = swap16(eh->e_machine); - eh->e_version = swap32(eh->e_version); - eh->e_entry = swap32(eh->e_entry); - eh->e_phoff = swap32(eh->e_phoff); - eh->e_shoff = swap32(eh->e_shoff); - eh->e_flags = swap32(eh->e_flags); - eh->e_ehsize = swap16(eh->e_ehsize); + eh->e_type = swap16(eh->e_type); + eh->e_machine = swap16(eh->e_machine); + eh->e_version = swap32(eh->e_version); + eh->e_entry = swap32(eh->e_entry); + eh->e_phoff = swap32(eh->e_phoff); + eh->e_shoff = swap32(eh->e_shoff); + eh->e_flags = swap32(eh->e_flags); + eh->e_ehsize = swap16(eh->e_ehsize); eh->e_phentsize = swap16(eh->e_phentsize); - eh->e_phnum = swap16(eh->e_phnum); + eh->e_phnum = swap16(eh->e_phnum); eh->e_shentsize = swap16(eh->e_shentsize); - eh->e_shnum = swap16(eh->e_shnum); - eh->e_shstrndx = swap16(eh->e_shstrndx); + eh->e_shnum = swap16(eh->e_shnum); + eh->e_shstrndx = swap16(eh->e_shstrndx); } static void endianadjust_ehdr64(Elf64_Ehdr *eh) { if ( !big_endian ) return; - eh->e_type = swap16(eh->e_type); - eh->e_machine = swap16(eh->e_machine); - eh->e_version = swap32(eh->e_version); - eh->e_entry = swap64(eh->e_entry); - eh->e_phoff = swap64(eh->e_phoff); - eh->e_shoff = swap64(eh->e_shoff); - eh->e_flags = swap32(eh->e_flags); - eh->e_ehsize = swap16(eh->e_ehsize); + eh->e_type = swap16(eh->e_type); + eh->e_machine = swap16(eh->e_machine); + eh->e_version = swap32(eh->e_version); + eh->e_entry = swap64(eh->e_entry); + eh->e_phoff = swap64(eh->e_phoff); + eh->e_shoff = swap64(eh->e_shoff); + eh->e_flags = swap32(eh->e_flags); + eh->e_ehsize = swap16(eh->e_ehsize); eh->e_phentsize = swap16(eh->e_phentsize); - eh->e_phnum = swap16(eh->e_phnum); + eh->e_phnum = swap16(eh->e_phnum); eh->e_shentsize = swap16(eh->e_shentsize); - eh->e_shnum = swap16(eh->e_shnum); - eh->e_shstrndx = swap16(eh->e_shstrndx); + eh->e_shnum = swap16(eh->e_shnum); + eh->e_shstrndx = swap16(eh->e_shstrndx); } static void endianadjust_phdr32(Elf32_Phdr *ph) { if ( !big_endian ) return; - ph->p_type = swap32(ph->p_type); - ph->p_offset = swap32(ph->p_offset); - ph->p_vaddr = swap32(ph->p_vaddr); - ph->p_paddr = swap32(ph->p_paddr); - ph->p_filesz = swap32(ph->p_filesz); - ph->p_memsz = swap32(ph->p_memsz); - ph->p_flags = swap32(ph->p_flags); - ph->p_align = swap32(ph->p_align); + ph->p_type = swap32(ph->p_type); + ph->p_offset = swap32(ph->p_offset); + ph->p_vaddr = swap32(ph->p_vaddr); + ph->p_paddr = swap32(ph->p_paddr); + ph->p_filesz = swap32(ph->p_filesz); + ph->p_memsz = swap32(ph->p_memsz); + ph->p_flags = swap32(ph->p_flags); + ph->p_align = swap32(ph->p_align); } static void endianadjust_phdr64(Elf64_Phdr *ph) { if ( !big_endian ) return; - ph->p_type = swap32(ph->p_type); - ph->p_flags = swap32(ph->p_flags); - ph->p_offset = swap64(ph->p_offset); - ph->p_vaddr = swap64(ph->p_vaddr); - ph->p_paddr = swap64(ph->p_paddr); - ph->p_filesz = swap64(ph->p_filesz); - ph->p_memsz = swap64(ph->p_memsz); - ph->p_align = swap64(ph->p_align); + ph->p_type = swap32(ph->p_type); + ph->p_flags = swap32(ph->p_flags); + ph->p_offset = swap64(ph->p_offset); + ph->p_vaddr = swap64(ph->p_vaddr); + ph->p_paddr = swap64(ph->p_paddr); + ph->p_filesz = swap64(ph->p_filesz); + ph->p_memsz = swap64(ph->p_memsz); + ph->p_align = swap64(ph->p_align); } static void endianadjust_shdr32(Elf32_Shdr *sh) { if ( !big_endian ) return; - sh->sh_name = swap32(sh->sh_name); - sh->sh_type = swap32(sh->sh_type); - sh->sh_flags = swap32(sh->sh_flags); - sh->sh_addr = swap32(sh->sh_addr); - sh->sh_offset = swap32(sh->sh_offset); - sh->sh_size = swap32(sh->sh_size); - sh->sh_link = swap32(sh->sh_link); - sh->sh_info = swap32(sh->sh_info); + sh->sh_name = swap32(sh->sh_name); + sh->sh_type = swap32(sh->sh_type); + sh->sh_flags = swap32(sh->sh_flags); + sh->sh_addr = swap32(sh->sh_addr); + sh->sh_offset = swap32(sh->sh_offset); + sh->sh_size = swap32(sh->sh_size); + sh->sh_link = swap32(sh->sh_link); + sh->sh_info = swap32(sh->sh_info); sh->sh_addralign = swap32(sh->sh_addralign); - sh->sh_entsize = swap32(sh->sh_entsize); + sh->sh_entsize = swap32(sh->sh_entsize); } static void do_write(int fd, void *data, int len) { - int done, left = len; + int done, left = len; char *p = data; while ( left != 0 ) @@ -223,19 +228,19 @@ static void do_write(int fd, void *data, int len) { if ( errno == EINTR ) continue; - fprintf(stderr, "Error writing output image: %d (%s).\n", - errno, strerror(errno)); + fprintf(stderr, "Error writing output image: %d (%s).\n", errno, + strerror(errno)); exit(1); } left -= done; - p += done; + p += done; } } static void do_read(int fd, void *data, int len) { - int done, left = len; + int done, left = len; char *p = data; while ( left != 0 ) @@ -244,25 +249,25 @@ static void do_read(int fd, void *data, int len) { if ( errno == EINTR ) continue; - fprintf(stderr, "Error reading input image: %d (%s).\n", - errno, strerror(errno)); + fprintf(stderr, "Error reading input image: %d (%s).\n", errno, + strerror(errno)); exit(1); } left -= done; - p += done; + p += done; } } int main(int argc, char **argv) { - u64 final_exec_addr; - u32 loadbase, dat_siz, mem_siz, note_base, note_sz, offset; - char *inimage, *outimage; - int infd, outfd; - char buffer[1024] = {}; - int bytes, todo, i = 1; - int num_phdrs = 1; + u64 final_exec_addr; + u32 loadbase, dat_siz, mem_siz, note_base, note_sz, offset; + char *inimage, *outimage; + int infd, outfd; + char buffer[1024] = {}; + int bytes, todo, i = 1; + int num_phdrs = 1; Elf32_Ehdr in32_ehdr; @@ -272,7 +277,7 @@ int main(int argc, char **argv) if ( argc < 5 ) { fprintf(stderr, "Usage: mkelf32 [--notes] " - " \n"); + " \n"); return 1; } @@ -281,7 +286,7 @@ int main(int argc, char **argv) i = 2; num_phdrs = 2; } - inimage = argv[i++]; + inimage = argv[i++]; outimage = argv[i++]; loadbase = strtoul(argv[i++], NULL, 16); final_exec_addr = strtoull(argv[i++], NULL, 16); @@ -289,14 +294,13 @@ int main(int argc, char **argv) infd = open(inimage, O_RDONLY); if ( infd == -1 ) { - fprintf(stderr, "Failed to open input image '%s': %d (%s).\n", - inimage, errno, strerror(errno)); + fprintf(stderr, "Failed to open input image '%s': %d (%s).\n", inimage, + errno, strerror(errno)); return 1; } do_read(infd, &in32_ehdr, sizeof(in32_ehdr)); - if ( !IS_ELF(in32_ehdr) || - (in32_ehdr.e_ident[EI_DATA] != ELFDATA2LSB) ) + if ( !IS_ELF(in32_ehdr) || (in32_ehdr.e_ident[EI_DATA] != ELFDATA2LSB) ) { fprintf(stderr, "Input image must be a little-endian Elf image.\n"); return 1; @@ -344,7 +348,7 @@ int main(int argc, char **argv) offset = in64_phdr.p_offset; note_base = in64_phdr.p_vaddr; - (void)lseek(infd, in64_ehdr.e_phoff+sizeof(in64_phdr), SEEK_SET); + (void)lseek(infd, in64_ehdr.e_phoff + sizeof(in64_phdr), SEEK_SET); do_read(infd, &in64_phdr, sizeof(in64_phdr)); endianadjust_phdr64(&in64_phdr); @@ -355,8 +359,9 @@ int main(int argc, char **argv) if ( in64_phdr.p_offset > dat_siz || offset > in64_phdr.p_offset ) { - fprintf(stderr, "Expected .note section within .text section!\n" \ - "Offset %"PRId64" not within %d!\n", + fprintf(stderr, + "Expected .note section within .text section!\n" + "Offset %" PRId64 " not within %d!\n", in64_phdr.p_offset, dat_siz); return 1; } @@ -373,13 +378,13 @@ int main(int argc, char **argv) out_ehdr.e_entry = loadbase; out_ehdr.e_shoff = RAW_OFFSET + dat_siz; - out_phdr.p_vaddr = loadbase; - out_phdr.p_paddr = loadbase; + out_phdr.p_vaddr = loadbase; + out_phdr.p_paddr = loadbase; out_phdr.p_filesz = dat_siz; - out_phdr.p_memsz = mem_siz; + out_phdr.p_memsz = mem_siz; - out_shdr[1].sh_addr = loadbase; - out_shdr[1].sh_size = dat_siz; + out_shdr[1].sh_addr = loadbase; + out_shdr[1].sh_size = dat_siz; out_shdr[2].sh_offset = RAW_OFFSET + dat_siz + sizeof(out_shdr); if ( num_phdrs > 1 ) @@ -390,11 +395,11 @@ int main(int argc, char **argv) out_ehdr.e_shnum++; /* Fill out the PT_NOTE program header. */ - note_phdr.p_vaddr = note_base; - note_phdr.p_paddr = note_base; - note_phdr.p_filesz = note_sz; - note_phdr.p_memsz = note_sz; - note_phdr.p_offset = RAW_OFFSET + offset; + note_phdr.p_vaddr = note_base; + note_phdr.p_paddr = note_base; + note_phdr.p_filesz = note_sz; + note_phdr.p_memsz = note_sz; + note_phdr.p_offset = RAW_OFFSET + offset; /* Tack on the .note\0 */ out_shdr[2].sh_size += sizeof(out_shstrtab_extra); @@ -407,7 +412,7 @@ int main(int argc, char **argv) out_shdr_note.sh_offset = RAW_OFFSET + offset; } - outfd = open(outimage, O_WRONLY|O_CREAT|O_TRUNC, 0775); + outfd = open(outimage, O_WRONLY | O_CREAT | O_TRUNC, 0775); if ( outfd == -1 ) { fprintf(stderr, "Failed to open output image '%s': %d (%s).\n", @@ -427,7 +432,8 @@ int main(int argc, char **argv) do_write(outfd, ¬e_phdr, sizeof(note_phdr)); } - if ( (bytes = RAW_OFFSET - sizeof(out_ehdr) - (num_phdrs * sizeof(out_phdr)) ) < 0 ) + if ( (bytes = RAW_OFFSET - sizeof(out_ehdr) - + (num_phdrs * sizeof(out_phdr))) < 0 ) { fprintf(stderr, "Header overflow.\n"); return 1; @@ -436,8 +442,8 @@ int main(int argc, char **argv) for ( bytes = 0; bytes < dat_siz; bytes += todo ) { - todo = ((dat_siz - bytes) > sizeof(buffer)) ? - sizeof(buffer) : (dat_siz - bytes); + todo = ((dat_siz - bytes) > sizeof(buffer)) ? sizeof(buffer) + : (dat_siz - bytes); do_read(infd, buffer, todo); do_write(outfd, buffer, todo); } @@ -455,12 +461,15 @@ int main(int argc, char **argv) do_write(outfd, out_shstrtab, sizeof(out_shstrtab)); /* Our .note */ do_write(outfd, out_shstrtab_extra, sizeof(out_shstrtab_extra)); - do_write(outfd, buffer, 4-((sizeof(out_shstrtab)+sizeof(out_shstrtab_extra)+dat_siz)&3)); + do_write( + outfd, buffer, + 4 - ((sizeof(out_shstrtab) + sizeof(out_shstrtab_extra) + dat_siz) & + 3)); } else { do_write(outfd, out_shstrtab, sizeof(out_shstrtab)); - do_write(outfd, buffer, 4-((sizeof(out_shstrtab)+dat_siz)&3)); + do_write(outfd, buffer, 4 - ((sizeof(out_shstrtab) + dat_siz) & 3)); } close(infd); close(outfd); diff --git a/xen/arch/x86/boot/reloc.c b/xen/arch/x86/boot/reloc.c index 4f4039bb7c..e6be07f331 100644 --- a/xen/arch/x86/boot/reloc.c +++ b/xen/arch/x86/boot/reloc.c @@ -18,12 +18,10 @@ * - 0x8(%esp) = INFORMATION_ADDRESS, * - 0xc(%esp) = TOPMOST_LOW_MEMORY_STACK_ADDRESS. */ -asm ( - " .text \n" +asm(" .text \n" " .globl _start \n" "_start: \n" - " jmp reloc \n" - ); + " jmp reloc \n"); #include "defs.h" #include "../../../include/xen/multiboot.h" @@ -32,7 +30,8 @@ asm ( #include "../../../include/xen/kconfig.h" #include -#define get_mb2_data(tag, type, member) (((multiboot2_tag_##type##_t *)(tag))->member) +#define get_mb2_data(tag, type, member) \ + (((multiboot2_tag_##type##_t *)(tag))->member) #define get_mb2_string(tag, type, member) ((u32)get_mb2_data(tag, type, member)) static u32 alloc; @@ -137,10 +136,7 @@ static multiboot_info_t *mbi_reloc(u32 mbi_in) mbi_out->boot_loader_name = copy_string(mbi_out->boot_loader_name); /* Mask features we don't understand or don't relocate. */ - mbi_out->flags &= (MBI_MEMLIMITS | - MBI_CMDLINE | - MBI_MODULES | - MBI_MEMMAP | + mbi_out->flags &= (MBI_MEMLIMITS | MBI_CMDLINE | MBI_MODULES | MBI_MEMMAP | MBI_LOADERNAME); return mbi_out; @@ -181,8 +177,8 @@ static multiboot_info_t *mbi2_reloc(u32 mbi_in) * We have to allocate one more module slot here. At some point * __start_xen() may put Xen image placement into it. */ - mbi_out->mods_addr = alloc_mem((mbi_out->mods_count + 1) * - sizeof(*mbi_out_mods)); + mbi_out->mods_addr = + alloc_mem((mbi_out->mods_count + 1) * sizeof(*mbi_out_mods)); mbi_out_mods = _p(mbi_out->mods_addr); } @@ -192,7 +188,7 @@ static multiboot_info_t *mbi2_reloc(u32 mbi_in) /* Put all needed data into mbi_out. */ for ( tag = _p(ptr); (u32)tag - mbi_in < mbi_fix->total_size; tag = _p(ALIGN_UP((u32)tag + tag->size, MULTIBOOT2_TAG_ALIGN)) ) - switch ( tag->type ) + switch (tag->type) { case MULTIBOOT2_TAG_TYPE_BOOT_LOADER_NAME: mbi_out->flags |= MBI_LOADERNAME; @@ -246,7 +242,8 @@ static multiboot_info_t *mbi2_reloc(u32 mbi_in) if ( mod_idx >= mbi_out->mods_count ) break; - mbi_out_mods[mod_idx].mod_start = get_mb2_data(tag, module, mod_start); + mbi_out_mods[mod_idx].mod_start = + get_mb2_data(tag, module, mod_start); mbi_out_mods[mod_idx].mod_end = get_mb2_data(tag, module, mod_end); ptr = get_mb2_string(tag, module, cmdline); mbi_out_mods[mod_idx].string = copy_string(ptr); @@ -264,11 +261,11 @@ static multiboot_info_t *mbi2_reloc(u32 mbi_in) return mbi_out; } -void * __stdcall reloc(u32 magic, u32 in, u32 trampoline) +void *__stdcall reloc(u32 magic, u32 in, u32 trampoline) { alloc = trampoline; - switch ( magic ) + switch (magic) { case MULTIBOOT_BOOTLOADER_MAGIC: return mbi_reloc(in); diff --git a/xen/arch/x86/bzimage.c b/xen/arch/x86/bzimage.c index ac4fd428be..9d975b5f76 100644 --- a/xen/arch/x86/bzimage.c +++ b/xen/arch/x86/bzimage.c @@ -14,44 +14,45 @@ static __init unsigned long output_length(void *image, unsigned long image_len) return *(uint32_t *)(image + image_len - 4); } -struct __packed setup_header { - uint8_t _pad0[0x1f1]; /* skip uninteresting stuff */ - uint8_t setup_sects; - uint16_t root_flags; - uint32_t syssize; - uint16_t ram_size; - uint16_t vid_mode; - uint16_t root_dev; - uint16_t boot_flag; - uint16_t jump; - uint32_t header; -#define HDR_MAGIC "HdrS" -#define HDR_MAGIC_SZ 4 - uint16_t version; -#define VERSION(h,l) (((h)<<8) | (l)) - uint32_t realmode_swtch; - uint16_t start_sys; - uint16_t kernel_version; - uint8_t type_of_loader; - uint8_t loadflags; - uint16_t setup_move_size; - uint32_t code32_start; - uint32_t ramdisk_image; - uint32_t ramdisk_size; - uint32_t bootsect_kludge; - uint16_t heap_end_ptr; - uint16_t _pad1; - uint32_t cmd_line_ptr; - uint32_t initrd_addr_max; - uint32_t kernel_alignment; - uint8_t relocatable_kernel; - uint8_t _pad2[3]; - uint32_t cmdline_size; - uint32_t hardware_subarch; - uint64_t hardware_subarch_data; - uint32_t payload_offset; - uint32_t payload_length; - }; +struct __packed setup_header +{ + uint8_t _pad0[0x1f1]; /* skip uninteresting stuff */ + uint8_t setup_sects; + uint16_t root_flags; + uint32_t syssize; + uint16_t ram_size; + uint16_t vid_mode; + uint16_t root_dev; + uint16_t boot_flag; + uint16_t jump; + uint32_t header; +#define HDR_MAGIC "HdrS" +#define HDR_MAGIC_SZ 4 + uint16_t version; +#define VERSION(h, l) (((h) << 8) | (l)) + uint32_t realmode_swtch; + uint16_t start_sys; + uint16_t kernel_version; + uint8_t type_of_loader; + uint8_t loadflags; + uint16_t setup_move_size; + uint32_t code32_start; + uint32_t ramdisk_image; + uint32_t ramdisk_size; + uint32_t bootsect_kludge; + uint16_t heap_end_ptr; + uint16_t _pad1; + uint32_t cmd_line_ptr; + uint32_t initrd_addr_max; + uint32_t kernel_alignment; + uint8_t relocatable_kernel; + uint8_t _pad2[3]; + uint32_t cmdline_size; + uint32_t hardware_subarch; + uint64_t hardware_subarch_data; + uint32_t payload_offset; + uint32_t payload_length; +}; static __init int bzimage_check(struct setup_header *hdr, unsigned long len) { @@ -61,9 +62,10 @@ static __init int bzimage_check(struct setup_header *hdr, unsigned long len) if ( memcmp(&hdr->header, HDR_MAGIC, HDR_MAGIC_SZ) != 0 ) return 0; - if ( hdr->version < VERSION(2,8) ) { + if ( hdr->version < VERSION(2, 8) ) + { printk("Cannot load bzImage v%d.%02d at least v2.08 is required\n", - hdr->version >> 8, hdr->version & 0xff); + hdr->version >> 8, hdr->version & 0xff); return -EINVAL; } return 1; @@ -93,11 +95,12 @@ unsigned long __init bzimage_headroom(void *image_start, orig_image_len = image_length; headroom = output_length(image_start, image_length); - if (gzip_check(image_start, image_length)) + if ( gzip_check(image_start, image_length) ) { headroom += headroom >> 12; /* Add 8 bytes for every 32K input block */ - headroom += (32768 + 18); /* Add 32K + 18 bytes of extra headroom */ - } else + headroom += (32768 + 18); /* Add 32K + 18 bytes of extra headroom */ + } + else headroom += image_length; headroom = (headroom + 4095) & ~4095; diff --git a/xen/arch/x86/compat.c b/xen/arch/x86/compat.c index a40ec295ae..258b701e99 100644 --- a/xen/arch/x86/compat.c +++ b/xen/arch/x86/compat.c @@ -1,6 +1,6 @@ /****************************************************************************** * compat.c - * + * * Implementations of legacy hypercalls. These call through to the new * hypercall after doing necessary argument munging. */ @@ -35,15 +35,15 @@ long do_sched_op_compat(int cmd, unsigned long arg) typeof(do_sched_op) *fn = (void *)pv_hypercall_table[__HYPERVISOR_sched_op].native; - switch ( cmd ) + switch (cmd) { case SCHEDOP_yield: case SCHEDOP_block: return fn(cmd, guest_handle_from_ptr(NULL, void)); case SCHEDOP_shutdown: - TRACE_3D(TRC_SCHED_SHUTDOWN, - current->domain->domain_id, current->vcpu_id, arg); + TRACE_3D(TRC_SCHED_SHUTDOWN, current->domain->domain_id, + current->vcpu_id, arg); domain_shutdown(current->domain, (u8)arg); break; @@ -64,7 +64,7 @@ long do_event_channel_op_compat(XEN_GUEST_HANDLE_PARAM(evtchn_op_t) uop) if ( unlikely(copy_from_guest(&op, uop, 1) != 0) ) return -EFAULT; - switch ( op.cmd ) + switch (op.cmd) { case EVTCHNOP_bind_interdomain: case EVTCHNOP_bind_virq: diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c index c790416f8d..26f9f77676 100644 --- a/xen/arch/x86/cpu/amd.c +++ b/xen/arch/x86/cpu/amd.c @@ -15,11 +15,11 @@ #include "cpu.h" /* - * Pre-canned values for overriding the CPUID features + * Pre-canned values for overriding the CPUID features * and extended features masks. * * Currently supported processors: - * + * * "fam_0f_rev_c" * "fam_0f_rev_d" * "fam_0f_rev_e" @@ -48,90 +48,82 @@ boolean_param("allow_unsafe", opt_allow_unsafe); bool __read_mostly amd_acpi_c1e_quirk; static inline int rdmsr_amd_safe(unsigned int msr, unsigned int *lo, - unsigned int *hi) + unsigned int *hi) { - int err; - - asm volatile("1: rdmsr\n2:\n" - ".section .fixup,\"ax\"\n" - "3: movl %6,%2\n" - " jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(1b, 3b) - : "=a" (*lo), "=d" (*hi), "=r" (err) - : "c" (msr), "D" (0x9c5a203a), "2" (0), "i" (-EFAULT)); - - return err; + int err; + + asm volatile("1: rdmsr\n2:\n" + ".section .fixup,\"ax\"\n" + "3: movl %6,%2\n" + " jmp 2b\n" + ".previous\n" _ASM_EXTABLE(1b, 3b) + : "=a"(*lo), "=d"(*hi), "=r"(err) + : "c"(msr), "D"(0x9c5a203a), "2"(0), "i"(-EFAULT)); + + return err; } static inline int wrmsr_amd_safe(unsigned int msr, unsigned int lo, - unsigned int hi) + unsigned int hi) { - int err; - - asm volatile("1: wrmsr\n2:\n" - ".section .fixup,\"ax\"\n" - "3: movl %6,%0\n" - " jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(1b, 3b) - : "=r" (err) - : "c" (msr), "a" (lo), "d" (hi), "D" (0x9c5a203a), - "0" (0), "i" (-EFAULT)); - - return err; + int err; + + asm volatile("1: wrmsr\n2:\n" + ".section .fixup,\"ax\"\n" + "3: movl %6,%0\n" + " jmp 2b\n" + ".previous\n" _ASM_EXTABLE(1b, 3b) + : "=r"(err) + : "c"(msr), "a"(lo), "d"(hi), "D"(0x9c5a203a), "0"(0), + "i"(-EFAULT)); + + return err; } static void wrmsr_amd(unsigned int msr, uint64_t val) { - asm volatile("wrmsr" :: - "c" (msr), "a" ((uint32_t)val), - "d" (val >> 32), "D" (0x9c5a203a)); + asm volatile("wrmsr" ::"c"(msr), "a"((uint32_t)val), "d"(val >> 32), + "D"(0x9c5a203a)); } -static const struct cpuidmask { - uint16_t fam; - char rev[2]; - unsigned int ecx, edx, ext_ecx, ext_edx; +static const struct cpuidmask +{ + uint16_t fam; + char rev[2]; + unsigned int ecx, edx, ext_ecx, ext_edx; } pre_canned[] __initconst = { -#define CAN(fam, id, rev) { \ - fam, #rev, \ - AMD_FEATURES_##id##_REV_##rev##_ECX, \ - AMD_FEATURES_##id##_REV_##rev##_EDX, \ - AMD_EXTFEATURES_##id##_REV_##rev##_ECX, \ - AMD_EXTFEATURES_##id##_REV_##rev##_EDX \ - } +#define CAN(fam, id, rev) \ + { \ + fam, #rev, AMD_FEATURES_##id##_REV_##rev##_ECX, \ + AMD_FEATURES_##id##_REV_##rev##_EDX, \ + AMD_EXTFEATURES_##id##_REV_##rev##_ECX, \ + AMD_EXTFEATURES_##id##_REV_##rev##_EDX \ + } #define CAN_FAM(fam, rev) CAN(0x##fam, FAM##fam##h, rev) -#define CAN_K8(rev) CAN(0x0f, K8, rev) - CAN_FAM(11, B), - CAN_FAM(10, C), - CAN_FAM(10, B), - CAN_K8(G), - CAN_K8(F), - CAN_K8(E), - CAN_K8(D), - CAN_K8(C) +#define CAN_K8(rev) CAN(0x0f, K8, rev) + CAN_FAM(11, B), CAN_FAM(10, C), CAN_FAM(10, B), CAN_K8(G), CAN_K8(F), + CAN_K8(E), CAN_K8(D), CAN_K8(C) #undef CAN }; static const struct cpuidmask *__init noinline get_cpuidmask(const char *opt) { - unsigned long fam; - char rev; - unsigned int i; - - if (strncmp(opt, "fam_", 4)) - return NULL; - fam = simple_strtoul(opt + 4, &opt, 16); - if (strncmp(opt, "_rev_", 5) || !opt[5] || opt[6]) - return NULL; - rev = toupper(opt[5]); - - for (i = 0; i < ARRAY_SIZE(pre_canned); ++i) - if (fam == pre_canned[i].fam && rev == *pre_canned[i].rev) - return &pre_canned[i]; - - return NULL; + unsigned long fam; + char rev; + unsigned int i; + + if ( strncmp(opt, "fam_", 4) ) + return NULL; + fam = simple_strtoul(opt + 4, &opt, 16); + if ( strncmp(opt, "_rev_", 5) || !opt[5] || opt[6] ) + return NULL; + rev = toupper(opt[5]); + + for ( i = 0; i < ARRAY_SIZE(pre_canned); ++i ) + if ( fam == pre_canned[i].fam && rev == *pre_canned[i].rev ) + return &pre_canned[i]; + + return NULL; } /* @@ -141,15 +133,15 @@ static const struct cpuidmask *__init noinline get_cpuidmask(const char *opt) */ static uint64_t __init _probe_mask_msr(unsigned int msr, uint64_t caps) { - unsigned int hi, lo; + unsigned int hi, lo; - expected_levelling_cap |= caps; + expected_levelling_cap |= caps; - if ((rdmsr_amd_safe(msr, &lo, &hi) == 0) && - (wrmsr_amd_safe(msr, lo, hi) == 0)) - levelling_caps |= caps; + if ( (rdmsr_amd_safe(msr, &lo, &hi) == 0) && + (wrmsr_amd_safe(msr, lo, hi) == 0) ) + levelling_caps |= caps; - return ((uint64_t)hi << 32) | lo; + return ((uint64_t)hi << 32) | lo; } /* @@ -158,46 +150,43 @@ static uint64_t __init _probe_mask_msr(unsigned int msr, uint64_t caps) */ static void __init noinline probe_masking_msrs(void) { - const struct cpuinfo_x86 *c = &boot_cpu_data; - - /* - * First, work out which masking MSRs we should have, based on - * revision and cpuid. - */ - - /* Fam11 doesn't support masking at all. */ - if (c->x86 == 0x11) - return; - - cpuidmask_defaults._1cd = - _probe_mask_msr(MSR_K8_FEATURE_MASK, LCAP_1cd); - cpuidmask_defaults.e1cd = - _probe_mask_msr(MSR_K8_EXT_FEATURE_MASK, LCAP_e1cd); - - if (c->cpuid_level >= 7) - cpuidmask_defaults._7ab0 = - _probe_mask_msr(MSR_AMD_L7S0_FEATURE_MASK, LCAP_7ab0); - - if (c->x86 == 0x15 && c->cpuid_level >= 6 && cpuid_ecx(6)) - cpuidmask_defaults._6c = - _probe_mask_msr(MSR_AMD_THRM_FEATURE_MASK, LCAP_6c); - - /* - * Don't bother warning about a mismatch if virtualised. These MSRs - * are not architectural and almost never virtualised. - */ - if ((expected_levelling_cap == levelling_caps) || - cpu_has_hypervisor) - return; - - printk(XENLOG_WARNING "Mismatch between expected (%#x) " - "and real (%#x) levelling caps: missing %#x\n", - expected_levelling_cap, levelling_caps, - (expected_levelling_cap ^ levelling_caps) & levelling_caps); - printk(XENLOG_WARNING "Fam %#x, model %#x level %#x\n", - c->x86, c->x86_model, c->cpuid_level); - printk(XENLOG_WARNING - "If not running virtualised, please report a bug\n"); + const struct cpuinfo_x86 *c = &boot_cpu_data; + + /* + * First, work out which masking MSRs we should have, based on + * revision and cpuid. + */ + + /* Fam11 doesn't support masking at all. */ + if ( c->x86 == 0x11 ) + return; + + cpuidmask_defaults._1cd = _probe_mask_msr(MSR_K8_FEATURE_MASK, LCAP_1cd); + cpuidmask_defaults.e1cd = + _probe_mask_msr(MSR_K8_EXT_FEATURE_MASK, LCAP_e1cd); + + if ( c->cpuid_level >= 7 ) + cpuidmask_defaults._7ab0 = + _probe_mask_msr(MSR_AMD_L7S0_FEATURE_MASK, LCAP_7ab0); + + if ( c->x86 == 0x15 && c->cpuid_level >= 6 && cpuid_ecx(6) ) + cpuidmask_defaults._6c = + _probe_mask_msr(MSR_AMD_THRM_FEATURE_MASK, LCAP_6c); + + /* + * Don't bother warning about a mismatch if virtualised. These MSRs + * are not architectural and almost never virtualised. + */ + if ( (expected_levelling_cap == levelling_caps) || cpu_has_hypervisor ) + return; + + printk(XENLOG_WARNING "Mismatch between expected (%#x) " + "and real (%#x) levelling caps: missing %#x\n", + expected_levelling_cap, levelling_caps, + (expected_levelling_cap ^ levelling_caps) & levelling_caps); + printk(XENLOG_WARNING "Fam %#x, model %#x level %#x\n", c->x86, + c->x86_model, c->cpuid_level); + printk(XENLOG_WARNING "If not running virtualised, please report a bug\n"); } /* @@ -208,43 +197,46 @@ static void __init noinline probe_masking_msrs(void) */ static void amd_ctxt_switch_masking(const struct vcpu *next) { - struct cpuidmasks *these_masks = &this_cpu(cpuidmasks); - const struct domain *nextd = next ? next->domain : NULL; - const struct cpuidmasks *masks = - (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks) - ? nextd->arch.pv.cpuidmasks : &cpuidmask_defaults; - - if ((levelling_caps & LCAP_1cd) == LCAP_1cd) { - uint64_t val = masks->_1cd; - - /* - * OSXSAVE defaults to 1, which causes fast-forwarding of - * Xen's real setting. Clobber it if disabled by the guest - * kernel. - */ - if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) && - !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE)) - val &= ~((uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE) << 32); - - if (unlikely(these_masks->_1cd != val)) { - wrmsr_amd(MSR_K8_FEATURE_MASK, val); - these_masks->_1cd = val; - } - } - -#define LAZY(cap, msr, field) \ - ({ \ - if (unlikely(these_masks->field != masks->field) && \ - ((levelling_caps & cap) == cap)) \ - { \ - wrmsr_amd(msr, masks->field); \ - these_masks->field = masks->field; \ - } \ - }) - - LAZY(LCAP_e1cd, MSR_K8_EXT_FEATURE_MASK, e1cd); - LAZY(LCAP_7ab0, MSR_AMD_L7S0_FEATURE_MASK, _7ab0); - LAZY(LCAP_6c, MSR_AMD_THRM_FEATURE_MASK, _6c); + struct cpuidmasks *these_masks = &this_cpu(cpuidmasks); + const struct domain *nextd = next ? next->domain : NULL; + const struct cpuidmasks *masks = + (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks) + ? nextd->arch.pv.cpuidmasks + : &cpuidmask_defaults; + + if ( (levelling_caps & LCAP_1cd) == LCAP_1cd ) + { + uint64_t val = masks->_1cd; + + /* + * OSXSAVE defaults to 1, which causes fast-forwarding of + * Xen's real setting. Clobber it if disabled by the guest + * kernel. + */ + if ( next && is_pv_vcpu(next) && !is_idle_vcpu(next) && + !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE) ) + val &= ~((uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE) << 32); + + if ( unlikely(these_masks->_1cd != val) ) + { + wrmsr_amd(MSR_K8_FEATURE_MASK, val); + these_masks->_1cd = val; + } + } + +#define LAZY(cap, msr, field) \ + ({ \ + if ( unlikely(these_masks->field != masks->field) && \ + ((levelling_caps & cap) == cap) ) \ + { \ + wrmsr_amd(msr, masks->field); \ + these_masks->field = masks->field; \ + } \ + }) + + LAZY(LCAP_e1cd, MSR_K8_EXT_FEATURE_MASK, e1cd); + LAZY(LCAP_7ab0, MSR_AMD_L7S0_FEATURE_MASK, _7ab0); + LAZY(LCAP_6c, MSR_AMD_THRM_FEATURE_MASK, _6c); #undef LAZY } @@ -265,542 +257,576 @@ static void amd_ctxt_switch_masking(const struct vcpu *next) */ static void __init noinline amd_init_levelling(void) { - const struct cpuidmask *m = NULL; - - if (probe_cpuid_faulting()) - return; - - probe_masking_msrs(); - - if (*opt_famrev != '\0') { - m = get_cpuidmask(opt_famrev); - - if (!m) - printk("Invalid processor string: %s\n", opt_famrev); - } + const struct cpuidmask *m = NULL; - if ((levelling_caps & LCAP_1cd) == LCAP_1cd) { - uint32_t ecx, edx, tmp; + if ( probe_cpuid_faulting() ) + return; - cpuid(0x00000001, &tmp, &tmp, &ecx, &edx); + probe_masking_msrs(); - if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx)) { - ecx &= opt_cpuid_mask_ecx; - edx &= opt_cpuid_mask_edx; - } else if (m) { - ecx &= m->ecx; - edx &= m->edx; - } + if ( *opt_famrev != '\0' ) + { + m = get_cpuidmask(opt_famrev); - /* Fast-forward bits - Must be set. */ - if (ecx & cpufeat_mask(X86_FEATURE_XSAVE)) - ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE); - edx |= cpufeat_mask(X86_FEATURE_APIC); + if ( !m ) + printk("Invalid processor string: %s\n", opt_famrev); + } - /* Allow the HYPERVISOR bit to be set via guest policy. */ - ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR); + if ( (levelling_caps & LCAP_1cd) == LCAP_1cd ) + { + uint32_t ecx, edx, tmp; - cpuidmask_defaults._1cd = ((uint64_t)ecx << 32) | edx; - } + cpuid(0x00000001, &tmp, &tmp, &ecx, &edx); - if ((levelling_caps & LCAP_e1cd) == LCAP_e1cd) { - uint32_t ecx, edx, tmp; - - cpuid(0x80000001, &tmp, &tmp, &ecx, &edx); + if ( ~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx) ) + { + ecx &= opt_cpuid_mask_ecx; + edx &= opt_cpuid_mask_edx; + } + else if ( m ) + { + ecx &= m->ecx; + edx &= m->edx; + } - if (~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) { - ecx &= opt_cpuid_mask_ext_ecx; - edx &= opt_cpuid_mask_ext_edx; - } else if (m) { - ecx &= m->ext_ecx; - edx &= m->ext_edx; - } + /* Fast-forward bits - Must be set. */ + if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) ) + ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE); + edx |= cpufeat_mask(X86_FEATURE_APIC); - /* Fast-forward bits - Must be set. */ - edx |= cpufeat_mask(X86_FEATURE_APIC); + /* Allow the HYPERVISOR bit to be set via guest policy. */ + ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR); - cpuidmask_defaults.e1cd = ((uint64_t)ecx << 32) | edx; - } + cpuidmask_defaults._1cd = ((uint64_t)ecx << 32) | edx; + } - if ((levelling_caps & LCAP_7ab0) == LCAP_7ab0) { - uint32_t eax, ebx, tmp; + if ( (levelling_caps & LCAP_e1cd) == LCAP_e1cd ) + { + uint32_t ecx, edx, tmp; - cpuid(0x00000007, &eax, &ebx, &tmp, &tmp); + cpuid(0x80000001, &tmp, &tmp, &ecx, &edx); - if (~(opt_cpuid_mask_l7s0_eax & opt_cpuid_mask_l7s0_ebx)) { - eax &= opt_cpuid_mask_l7s0_eax; - ebx &= opt_cpuid_mask_l7s0_ebx; - } + if ( ~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx) ) + { + ecx &= opt_cpuid_mask_ext_ecx; + edx &= opt_cpuid_mask_ext_edx; + } + else if ( m ) + { + ecx &= m->ext_ecx; + edx &= m->ext_edx; + } - cpuidmask_defaults._7ab0 &= ((uint64_t)eax << 32) | ebx; - } + /* Fast-forward bits - Must be set. */ + edx |= cpufeat_mask(X86_FEATURE_APIC); - if ((levelling_caps & LCAP_6c) == LCAP_6c) { - uint32_t ecx = cpuid_ecx(6); + cpuidmask_defaults.e1cd = ((uint64_t)ecx << 32) | edx; + } - if (~opt_cpuid_mask_thermal_ecx) - ecx &= opt_cpuid_mask_thermal_ecx; + if ( (levelling_caps & LCAP_7ab0) == LCAP_7ab0 ) + { + uint32_t eax, ebx, tmp; - cpuidmask_defaults._6c &= (~0ULL << 32) | ecx; - } + cpuid(0x00000007, &eax, &ebx, &tmp, &tmp); - if (opt_cpu_info) { - printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps); - printk(XENLOG_INFO - "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, " - "e1c 0x%08x, 7a0 0x%08x, 7b0 0x%08x, 6c 0x%08x\n", - (uint32_t)cpuidmask_defaults._1cd, - (uint32_t)(cpuidmask_defaults._1cd >> 32), - (uint32_t)cpuidmask_defaults.e1cd, - (uint32_t)(cpuidmask_defaults.e1cd >> 32), - (uint32_t)(cpuidmask_defaults._7ab0 >> 32), - (uint32_t)cpuidmask_defaults._7ab0, - (uint32_t)cpuidmask_defaults._6c); - } + if ( ~(opt_cpuid_mask_l7s0_eax & opt_cpuid_mask_l7s0_ebx) ) + { + eax &= opt_cpuid_mask_l7s0_eax; + ebx &= opt_cpuid_mask_l7s0_ebx; + } - if (levelling_caps) - ctxt_switch_masking = amd_ctxt_switch_masking; + cpuidmask_defaults._7ab0 &= ((uint64_t)eax << 32) | ebx; + } + + if ( (levelling_caps & LCAP_6c) == LCAP_6c ) + { + uint32_t ecx = cpuid_ecx(6); + + if ( ~opt_cpuid_mask_thermal_ecx ) + ecx &= opt_cpuid_mask_thermal_ecx; + + cpuidmask_defaults._6c &= (~0ULL << 32) | ecx; + } + + if ( opt_cpu_info ) + { + printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps); + printk(XENLOG_INFO "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, " + "e1c 0x%08x, 7a0 0x%08x, 7b0 0x%08x, 6c 0x%08x\n", + (uint32_t)cpuidmask_defaults._1cd, + (uint32_t)(cpuidmask_defaults._1cd >> 32), + (uint32_t)cpuidmask_defaults.e1cd, + (uint32_t)(cpuidmask_defaults.e1cd >> 32), + (uint32_t)(cpuidmask_defaults._7ab0 >> 32), + (uint32_t)cpuidmask_defaults._7ab0, + (uint32_t)cpuidmask_defaults._6c); + } + + if ( levelling_caps ) + ctxt_switch_masking = amd_ctxt_switch_masking; } /* - * Check for the presence of an AMD erratum. Arguments are defined in amd.h + * Check for the presence of an AMD erratum. Arguments are defined in amd.h * for each known erratum. Return 1 if erratum is found. */ int cpu_has_amd_erratum(const struct cpuinfo_x86 *cpu, int osvw_id, ...) { - va_list ap; - u32 range; - u32 ms; - - if (cpu->x86_vendor != X86_VENDOR_AMD) - return 0; - - if (osvw_id >= 0 && cpu_has(cpu, X86_FEATURE_OSVW)) { - u64 osvw_len; - - rdmsrl(MSR_AMD_OSVW_ID_LENGTH, osvw_len); - - if (osvw_id < osvw_len) { - u64 osvw_bits; - - rdmsrl(MSR_AMD_OSVW_STATUS + (osvw_id >> 6), - osvw_bits); - - return (osvw_bits >> (osvw_id & 0x3f)) & 1; - } - } - - /* OSVW unavailable or ID unknown, match family-model-stepping range */ - va_start(ap, osvw_id); - - ms = (cpu->x86_model << 4) | cpu->x86_mask; - while ((range = va_arg(ap, int))) { - if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && - (ms >= AMD_MODEL_RANGE_START(range)) && - (ms <= AMD_MODEL_RANGE_END(range))) { - va_end(ap); - return 1; - } - } - - va_end(ap); - return 0; + va_list ap; + u32 range; + u32 ms; + + if ( cpu->x86_vendor != X86_VENDOR_AMD ) + return 0; + + if ( osvw_id >= 0 && cpu_has(cpu, X86_FEATURE_OSVW) ) + { + u64 osvw_len; + + rdmsrl(MSR_AMD_OSVW_ID_LENGTH, osvw_len); + + if ( osvw_id < osvw_len ) + { + u64 osvw_bits; + + rdmsrl(MSR_AMD_OSVW_STATUS + (osvw_id >> 6), osvw_bits); + + return (osvw_bits >> (osvw_id & 0x3f)) & 1; + } + } + + /* OSVW unavailable or ID unknown, match family-model-stepping range */ + va_start(ap, osvw_id); + + ms = (cpu->x86_model << 4) | cpu->x86_mask; + while ( (range = va_arg(ap, int)) ) + { + if ( (cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && + (ms >= AMD_MODEL_RANGE_START(range)) && + (ms <= AMD_MODEL_RANGE_END(range)) ) + { + va_end(ap); + return 1; + } + } + + va_end(ap); + return 0; } /* * Disable C1-Clock ramping if enabled in PMM7.CpuLowPwrEnh on 8th-generation * cores only. Assume BIOS has setup all Northbridges equivalently. */ -static void disable_c1_ramping(void) +static void disable_c1_ramping(void) { - u8 pmm7; - int node, nr_nodes; - - /* Read the number of nodes from the first Northbridge. */ - nr_nodes = ((pci_conf_read32(0, 0, 0x18, 0x0, 0x60)>>4)&0x07)+1; - for (node = 0; node < nr_nodes; node++) { - /* PMM7: bus=0, dev=0x18+node, function=0x3, register=0x87. */ - pmm7 = pci_conf_read8(0, 0, 0x18+node, 0x3, 0x87); - /* Invalid read means we've updated every Northbridge. */ - if (pmm7 == 0xFF) - break; - pmm7 &= 0xFC; /* clear pmm7[1:0] */ - pci_conf_write8(0, 0, 0x18+node, 0x3, 0x87, pmm7); - printk ("AMD: Disabling C1 Clock Ramping Node #%x\n", node); - } + u8 pmm7; + int node, nr_nodes; + + /* Read the number of nodes from the first Northbridge. */ + nr_nodes = ((pci_conf_read32(0, 0, 0x18, 0x0, 0x60) >> 4) & 0x07) + 1; + for ( node = 0; node < nr_nodes; node++ ) + { + /* PMM7: bus=0, dev=0x18+node, function=0x3, register=0x87. */ + pmm7 = pci_conf_read8(0, 0, 0x18 + node, 0x3, 0x87); + /* Invalid read means we've updated every Northbridge. */ + if ( pmm7 == 0xFF ) + break; + pmm7 &= 0xFC; /* clear pmm7[1:0] */ + pci_conf_write8(0, 0, 0x18 + node, 0x3, 0x87, pmm7); + printk("AMD: Disabling C1 Clock Ramping Node #%x\n", node); + } } static void disable_c1e(void *unused) { - uint64_t msr_content; - - /* - * Disable C1E mode, as the APIC timer stops in that mode. - * The MSR does not exist in all FamilyF CPUs (only Rev F and above), - * but we safely catch the #GP in that case. - */ - if ((rdmsr_safe(MSR_K8_ENABLE_C1E, msr_content) == 0) && - (msr_content & (3ULL << 27)) && - (wrmsr_safe(MSR_K8_ENABLE_C1E, msr_content & ~(3ULL << 27)) != 0)) - printk(KERN_ERR "Failed to disable C1E on CPU#%u (%16"PRIx64")\n", - smp_processor_id(), msr_content); + uint64_t msr_content; + + /* + * Disable C1E mode, as the APIC timer stops in that mode. + * The MSR does not exist in all FamilyF CPUs (only Rev F and above), + * but we safely catch the #GP in that case. + */ + if ( (rdmsr_safe(MSR_K8_ENABLE_C1E, msr_content) == 0) && + (msr_content & (3ULL << 27)) && + (wrmsr_safe(MSR_K8_ENABLE_C1E, msr_content & ~(3ULL << 27)) != 0) ) + printk(KERN_ERR "Failed to disable C1E on CPU#%u (%16" PRIx64 ")\n", + smp_processor_id(), msr_content); } void amd_check_disable_c1e(unsigned int port, u8 value) { - /* C1E is sometimes enabled during entry to ACPI mode. */ - if ((port == acpi_smi_cmd) && (value == acpi_enable_value)) - on_each_cpu(disable_c1e, NULL, 1); + /* C1E is sometimes enabled during entry to ACPI mode. */ + if ( (port == acpi_smi_cmd) && (value == acpi_enable_value) ) + on_each_cpu(disable_c1e, NULL, 1); } /* - * BIOS is expected to clear MtrrFixDramModEn bit. According to AMD BKDG : - * "The MtrrFixDramModEn bit should be set to 1 during BIOS initalization of + * BIOS is expected to clear MtrrFixDramModEn bit. According to AMD BKDG : + * "The MtrrFixDramModEn bit should be set to 1 during BIOS initalization of * the fixed MTRRs, then cleared to 0 for operation." */ static void check_syscfg_dram_mod_en(void) { - uint64_t syscfg; - static bool_t printed = 0; + uint64_t syscfg; + static bool_t printed = 0; - if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && - (boot_cpu_data.x86 >= 0x0f))) - return; + if ( !((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && + (boot_cpu_data.x86 >= 0x0f)) ) + return; - rdmsrl(MSR_K8_SYSCFG, syscfg); - if (!(syscfg & K8_MTRRFIXRANGE_DRAM_MODIFY)) - return; + rdmsrl(MSR_K8_SYSCFG, syscfg); + if ( !(syscfg & K8_MTRRFIXRANGE_DRAM_MODIFY) ) + return; - if (!test_and_set_bool(printed)) - printk(KERN_ERR "MTRR: SYSCFG[MtrrFixDramModEn] not " - "cleared by BIOS, clearing this bit\n"); + if ( !test_and_set_bool(printed) ) + printk(KERN_ERR "MTRR: SYSCFG[MtrrFixDramModEn] not " + "cleared by BIOS, clearing this bit\n"); - syscfg &= ~K8_MTRRFIXRANGE_DRAM_MODIFY; - wrmsrl(MSR_K8_SYSCFG, syscfg); + syscfg &= ~K8_MTRRFIXRANGE_DRAM_MODIFY; + wrmsrl(MSR_K8_SYSCFG, syscfg); } static void amd_get_topology(struct cpuinfo_x86 *c) { - int cpu; - unsigned bits; - - if (c->x86_max_cores <= 1) - return; - /* - * On a AMD multi core setup the lower bits of the APIC id - * distingush the cores. - */ - cpu = smp_processor_id(); - bits = (cpuid_ecx(0x80000008) >> 12) & 0xf; - - if (bits == 0) { - while ((1 << bits) < c->x86_max_cores) - bits++; + int cpu; + unsigned bits; + + if ( c->x86_max_cores <= 1 ) + return; + /* + * On a AMD multi core setup the lower bits of the APIC id + * distingush the cores. + */ + cpu = smp_processor_id(); + bits = (cpuid_ecx(0x80000008) >> 12) & 0xf; + + if ( bits == 0 ) + { + while ( (1 << bits) < c->x86_max_cores ) + bits++; + } + + /* Low order bits define the core id */ + c->cpu_core_id = c->phys_proc_id & ((1 << bits) - 1); + /* Convert local APIC ID into the socket ID */ + c->phys_proc_id >>= bits; + /* Collect compute unit ID if available */ + if ( cpu_has(c, X86_FEATURE_TOPOEXT) ) + { + u32 eax, ebx, ecx, edx; + + cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); + c->x86_num_siblings = ((ebx >> 8) & 0x3) + 1; + + if ( c->x86 < 0x17 ) + c->compute_unit_id = ebx & 0xFF; + else + { + c->cpu_core_id = ebx & 0xFF; + c->x86_max_cores /= c->x86_num_siblings; } - - /* Low order bits define the core id */ - c->cpu_core_id = c->phys_proc_id & ((1<phys_proc_id >>= bits; - /* Collect compute unit ID if available */ - if (cpu_has(c, X86_FEATURE_TOPOEXT)) { - u32 eax, ebx, ecx, edx; - - cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); - c->x86_num_siblings = ((ebx >> 8) & 0x3) + 1; - - if (c->x86 < 0x17) - c->compute_unit_id = ebx & 0xFF; - else { - c->cpu_core_id = ebx & 0xFF; - c->x86_max_cores /= c->x86_num_siblings; - } - } - - if (opt_cpu_info) - printk("CPU %d(%d) -> Processor %d, %s %d\n", - cpu, c->x86_max_cores, c->phys_proc_id, - c->compute_unit_id != INVALID_CUID ? "Compute Unit" - : "Core", - c->compute_unit_id != INVALID_CUID ? c->compute_unit_id - : c->cpu_core_id); + } + + if ( opt_cpu_info ) + printk("CPU %d(%d) -> Processor %d, %s %d\n", cpu, c->x86_max_cores, + c->phys_proc_id, + c->compute_unit_id != INVALID_CUID ? "Compute Unit" : "Core", + c->compute_unit_id != INVALID_CUID ? c->compute_unit_id + : c->cpu_core_id); } static void early_init_amd(struct cpuinfo_x86 *c) { - if (c == &boot_cpu_data) - amd_init_levelling(); + if ( c == &boot_cpu_data ) + amd_init_levelling(); - ctxt_switch_levelling(NULL); + ctxt_switch_levelling(NULL); } static void init_amd(struct cpuinfo_x86 *c) { - u32 l, h; - - unsigned long long value; - - /* Disable TLB flush filter by setting HWCR.FFDIS on K8 - * bit 6 of msr C001_0015 - * - * Errata 63 for SH-B3 steppings - * Errata 122 for all steppings (F+ have it disabled by default) - */ - if (c->x86 == 15) { - rdmsrl(MSR_K7_HWCR, value); - value |= 1 << 6; - wrmsrl(MSR_K7_HWCR, value); - } - - /* - * Some AMD CPUs duplicate the 3DNow bit in base and extended CPUID - * leaves. Unfortunately, this aliases PBE on Intel CPUs. Clobber the - * alias, leaving 3DNow in the extended leaf. - */ - __clear_bit(X86_FEATURE_PBE, c->x86_capability); - - if (c->x86 == 0xf && c->x86_model < 0x14 - && cpu_has(c, X86_FEATURE_LAHF_LM)) { - /* - * Some BIOSes incorrectly force this feature, but only K8 - * revision D (model = 0x14) and later actually support it. - * (AMD Erratum #110, docId: 25759). - */ - __clear_bit(X86_FEATURE_LAHF_LM, c->x86_capability); - if (!rdmsr_amd_safe(0xc001100d, &l, &h)) - wrmsr_amd_safe(0xc001100d, l, h & ~1); - } - - /* - * Attempt to set lfence to be Dispatch Serialising. This MSR almost - * certainly isn't virtualised (and Xen at least will leak the real - * value in but silently discard writes), as well as being per-core - * rather than per-thread, so do a full safe read/write/readback cycle - * in the worst case. - */ - if (c->x86 == 0x0f || c->x86 == 0x11) - /* Always dispatch serialising on this hardare. */ - __set_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability); - else /* Implicily "== 0x10 || >= 0x12" by being 64bit. */ { - if (rdmsr_safe(MSR_AMD64_DE_CFG, value)) - /* Unable to read. Assume the safer default. */ - __clear_bit(X86_FEATURE_LFENCE_DISPATCH, - c->x86_capability); - else if (value & AMD64_DE_CFG_LFENCE_SERIALISE) - /* Already dispatch serialising. */ - __set_bit(X86_FEATURE_LFENCE_DISPATCH, - c->x86_capability); - else if (wrmsr_safe(MSR_AMD64_DE_CFG, - value | AMD64_DE_CFG_LFENCE_SERIALISE) || - rdmsr_safe(MSR_AMD64_DE_CFG, value) || - !(value & AMD64_DE_CFG_LFENCE_SERIALISE)) - /* Attempt to set failed. Assume the safer default. */ - __clear_bit(X86_FEATURE_LFENCE_DISPATCH, - c->x86_capability); - else - /* Successfully enabled! */ - __set_bit(X86_FEATURE_LFENCE_DISPATCH, - c->x86_capability); - } - - /* - * If the user has explicitly chosen to disable Memory Disambiguation - * to mitigiate Speculative Store Bypass, poke the appropriate MSR. - */ - if (opt_ssbd) { - int bit = -1; - - switch (c->x86) { - case 0x15: bit = 54; break; - case 0x16: bit = 33; break; - case 0x17: bit = 10; break; - } - - if (bit >= 0 && !rdmsr_safe(MSR_AMD64_LS_CFG, value)) { - value |= 1ull << bit; - wrmsr_safe(MSR_AMD64_LS_CFG, value); - } - } - - /* MFENCE stops RDTSC speculation */ - if (!cpu_has_lfence_dispatch) - __set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability); - - switch(c->x86) - { - case 0xf ... 0x17: - disable_c1e(NULL); - if (acpi_smi_cmd && (acpi_enable_value | acpi_disable_value)) - amd_acpi_c1e_quirk = true; - break; - } - - display_cacheinfo(c); - - if (c->extended_cpuid_level >= 0x80000008) { - c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; - } - - if (c->extended_cpuid_level >= 0x80000007) { - if (cpu_has(c, X86_FEATURE_ITSC)) { - __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); - __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability); - if (c->x86 != 0x11) - __set_bit(X86_FEATURE_TSC_RELIABLE, - c->x86_capability); - } - } - - /* re-enable TopologyExtensions if switched off by BIOS */ - if ((c->x86 == 0x15) && - (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && - !cpu_has(c, X86_FEATURE_TOPOEXT) && - !rdmsr_safe(MSR_K8_EXT_FEATURE_MASK, value)) { - value |= 1ULL << 54; - wrmsr_safe(MSR_K8_EXT_FEATURE_MASK, value); - rdmsrl(MSR_K8_EXT_FEATURE_MASK, value); - if (value & (1ULL << 54)) { - __set_bit(X86_FEATURE_TOPOEXT, c->x86_capability); - printk(KERN_INFO "CPU: Re-enabling disabled " - "Topology Extensions Support\n"); - } - } - - /* - * The way access filter has a performance penalty on some workloads. - * Disable it on the affected CPUs. - */ - if (c->x86 == 0x15 && c->x86_model >= 0x02 && c->x86_model < 0x20 && - !rdmsr_safe(MSR_AMD64_IC_CFG, value) && (value & 0x1e) != 0x1e) - wrmsr_safe(MSR_AMD64_IC_CFG, value | 0x1e); - - amd_get_topology(c); - - /* Pointless to use MWAIT on Family10 as it does not deep sleep. */ - if (c->x86 == 0x10) - __clear_bit(X86_FEATURE_MONITOR, c->x86_capability); - - if (!cpu_has_amd_erratum(c, AMD_ERRATUM_121)) - opt_allow_unsafe = 1; - else if (opt_allow_unsafe < 0) - panic("Xen will not boot on this CPU for security reasons" - "Pass \"allow_unsafe\" if you're trusting all your" - " (PV) guest kernels.\n"); - else if (!opt_allow_unsafe && c == &boot_cpu_data) - printk(KERN_WARNING - "*** Xen will not allow creation of DomU-s on" - " this CPU for security reasons. ***\n" - KERN_WARNING - "*** Pass \"allow_unsafe\" if you're trusting" - " all your (PV) guest kernels. ***\n"); - - if (c->x86 == 0x16 && c->x86_model <= 0xf) { - if (c == &boot_cpu_data) { - l = pci_conf_read32(0, 0, 0x18, 0x3, 0x58); - h = pci_conf_read32(0, 0, 0x18, 0x3, 0x5c); - if ((l & 0x1f) | (h & 0x1)) - printk(KERN_WARNING - "Applying workaround for erratum 792: %s%s%s\n", - (l & 0x1f) ? "clearing D18F3x58[4:0]" : "", - ((l & 0x1f) && (h & 0x1)) ? " and " : "", - (h & 0x1) ? "clearing D18F3x5C[0]" : ""); - - if (l & 0x1f) - pci_conf_write32(0, 0, 0x18, 0x3, 0x58, - l & ~0x1f); - - if (h & 0x1) - pci_conf_write32(0, 0, 0x18, 0x3, 0x5c, - h & ~0x1); - } - - rdmsrl(MSR_AMD64_LS_CFG, value); - if (!(value & (1 << 15))) { - static bool_t warned; - - if (c == &boot_cpu_data || opt_cpu_info || - !test_and_set_bool(warned)) - printk(KERN_WARNING - "CPU%u: Applying workaround for erratum 793\n", - smp_processor_id()); - wrmsrl(MSR_AMD64_LS_CFG, value | (1 << 15)); - } - } else if (c->x86 == 0x12) { - rdmsrl(MSR_AMD64_DE_CFG, value); - if (!(value & (1U << 31))) { - static bool warned; - - if (c == &boot_cpu_data || opt_cpu_info || - !test_and_set_bool(warned)) - printk(KERN_WARNING - "CPU%u: Applying workaround for erratum 665\n", - smp_processor_id()); - wrmsrl(MSR_AMD64_DE_CFG, value | (1U << 31)); - } - } - - /* AMD CPUs do not support SYSENTER outside of legacy mode. */ - __clear_bit(X86_FEATURE_SEP, c->x86_capability); - - if (c->x86 == 0x10) { - /* do this for boot cpu */ - if (c == &boot_cpu_data) - check_enable_amd_mmconf_dmi(); - - fam10h_check_enable_mmcfg(); - - /* - * On family 10h BIOS may not have properly enabled WC+ - * support, causing it to be converted to CD memtype. This may - * result in performance degradation for certain nested-paging - * guests. Prevent this conversion by clearing bit 24 in - * MSR_F10_BU_CFG2. - */ - rdmsrl(MSR_F10_BU_CFG2, value); - value &= ~(1ULL << 24); - wrmsrl(MSR_F10_BU_CFG2, value); - } - - /* - * Family 0x12 and above processors have APIC timer - * running in deep C states. - */ - if ( opt_arat && c->x86 > 0x11 ) - __set_bit(X86_FEATURE_ARAT, c->x86_capability); - - /* - * Prior to Family 0x14, perf counters are not reset during warm reboot. - * We have to reset them manually. - */ - if (nmi_watchdog != NMI_LOCAL_APIC && c->x86 < 0x14) { - wrmsrl(MSR_K7_PERFCTR0, 0); - wrmsrl(MSR_K7_PERFCTR1, 0); - wrmsrl(MSR_K7_PERFCTR2, 0); - wrmsrl(MSR_K7_PERFCTR3, 0); - } - - if (cpu_has(c, X86_FEATURE_EFRO)) { - rdmsr(MSR_K7_HWCR, l, h); - l |= (1 << 27); /* Enable read-only APERF/MPERF bit */ - wrmsr(MSR_K7_HWCR, l, h); - } - - /* Prevent TSC drift in non single-processor, single-core platforms. */ - if ((smp_processor_id() == 1) && !cpu_has(c, X86_FEATURE_ITSC)) - disable_c1_ramping(); - - check_syscfg_dram_mod_en(); + u32 l, h; + + unsigned long long value; + + /* Disable TLB flush filter by setting HWCR.FFDIS on K8 + * bit 6 of msr C001_0015 + * + * Errata 63 for SH-B3 steppings + * Errata 122 for all steppings (F+ have it disabled by default) + */ + if ( c->x86 == 15 ) + { + rdmsrl(MSR_K7_HWCR, value); + value |= 1 << 6; + wrmsrl(MSR_K7_HWCR, value); + } + + /* + * Some AMD CPUs duplicate the 3DNow bit in base and extended CPUID + * leaves. Unfortunately, this aliases PBE on Intel CPUs. Clobber the + * alias, leaving 3DNow in the extended leaf. + */ + __clear_bit(X86_FEATURE_PBE, c->x86_capability); + + if ( c->x86 == 0xf && c->x86_model < 0x14 && + cpu_has(c, X86_FEATURE_LAHF_LM) ) + { + /* + * Some BIOSes incorrectly force this feature, but only K8 + * revision D (model = 0x14) and later actually support it. + * (AMD Erratum #110, docId: 25759). + */ + __clear_bit(X86_FEATURE_LAHF_LM, c->x86_capability); + if ( !rdmsr_amd_safe(0xc001100d, &l, &h) ) + wrmsr_amd_safe(0xc001100d, l, h & ~1); + } + + /* + * Attempt to set lfence to be Dispatch Serialising. This MSR almost + * certainly isn't virtualised (and Xen at least will leak the real + * value in but silently discard writes), as well as being per-core + * rather than per-thread, so do a full safe read/write/readback cycle + * in the worst case. + */ + if ( c->x86 == 0x0f || c->x86 == 0x11 ) + /* Always dispatch serialising on this hardare. */ + __set_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability); + else /* Implicily "== 0x10 || >= 0x12" by being 64bit. */ + { + if ( rdmsr_safe(MSR_AMD64_DE_CFG, value) ) + /* Unable to read. Assume the safer default. */ + __clear_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability); + else if ( value & AMD64_DE_CFG_LFENCE_SERIALISE ) + /* Already dispatch serialising. */ + __set_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability); + else if ( wrmsr_safe(MSR_AMD64_DE_CFG, + value | AMD64_DE_CFG_LFENCE_SERIALISE) || + rdmsr_safe(MSR_AMD64_DE_CFG, value) || + !(value & AMD64_DE_CFG_LFENCE_SERIALISE) ) + /* Attempt to set failed. Assume the safer default. */ + __clear_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability); + else + /* Successfully enabled! */ + __set_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability); + } + + /* + * If the user has explicitly chosen to disable Memory Disambiguation + * to mitigiate Speculative Store Bypass, poke the appropriate MSR. + */ + if ( opt_ssbd ) + { + int bit = -1; + + switch (c->x86) + { + case 0x15: + bit = 54; + break; + case 0x16: + bit = 33; + break; + case 0x17: + bit = 10; + break; + } + + if ( bit >= 0 && !rdmsr_safe(MSR_AMD64_LS_CFG, value) ) + { + value |= 1ull << bit; + wrmsr_safe(MSR_AMD64_LS_CFG, value); + } + } + + /* MFENCE stops RDTSC speculation */ + if ( !cpu_has_lfence_dispatch ) + __set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability); + + switch (c->x86) + { + case 0xf ... 0x17: + disable_c1e(NULL); + if ( acpi_smi_cmd && (acpi_enable_value | acpi_disable_value) ) + amd_acpi_c1e_quirk = true; + break; + } + + display_cacheinfo(c); + + if ( c->extended_cpuid_level >= 0x80000008 ) + { + c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; + } + + if ( c->extended_cpuid_level >= 0x80000007 ) + { + if ( cpu_has(c, X86_FEATURE_ITSC) ) + { + __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); + __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability); + if ( c->x86 != 0x11 ) + __set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability); + } + } + + /* re-enable TopologyExtensions if switched off by BIOS */ + if ( (c->x86 == 0x15) && (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && + !cpu_has(c, X86_FEATURE_TOPOEXT) && + !rdmsr_safe(MSR_K8_EXT_FEATURE_MASK, value) ) + { + value |= 1ULL << 54; + wrmsr_safe(MSR_K8_EXT_FEATURE_MASK, value); + rdmsrl(MSR_K8_EXT_FEATURE_MASK, value); + if ( value & (1ULL << 54) ) + { + __set_bit(X86_FEATURE_TOPOEXT, c->x86_capability); + printk(KERN_INFO "CPU: Re-enabling disabled " + "Topology Extensions Support\n"); + } + } + + /* + * The way access filter has a performance penalty on some workloads. + * Disable it on the affected CPUs. + */ + if ( c->x86 == 0x15 && c->x86_model >= 0x02 && c->x86_model < 0x20 && + !rdmsr_safe(MSR_AMD64_IC_CFG, value) && (value & 0x1e) != 0x1e ) + wrmsr_safe(MSR_AMD64_IC_CFG, value | 0x1e); + + amd_get_topology(c); + + /* Pointless to use MWAIT on Family10 as it does not deep sleep. */ + if ( c->x86 == 0x10 ) + __clear_bit(X86_FEATURE_MONITOR, c->x86_capability); + + if ( !cpu_has_amd_erratum(c, AMD_ERRATUM_121) ) + opt_allow_unsafe = 1; + else if ( opt_allow_unsafe < 0 ) + panic("Xen will not boot on this CPU for security reasons" + "Pass \"allow_unsafe\" if you're trusting all your" + " (PV) guest kernels.\n"); + else if ( !opt_allow_unsafe && c == &boot_cpu_data ) + printk(KERN_WARNING "*** Xen will not allow creation of DomU-s on" + " this CPU for security reasons. ***\n" KERN_WARNING + "*** Pass \"allow_unsafe\" if you're trusting" + " all your (PV) guest kernels. ***\n"); + + if ( c->x86 == 0x16 && c->x86_model <= 0xf ) + { + if ( c == &boot_cpu_data ) + { + l = pci_conf_read32(0, 0, 0x18, 0x3, 0x58); + h = pci_conf_read32(0, 0, 0x18, 0x3, 0x5c); + if ( (l & 0x1f) | (h & 0x1) ) + printk(KERN_WARNING + "Applying workaround for erratum 792: %s%s%s\n", + (l & 0x1f) ? "clearing D18F3x58[4:0]" : "", + ((l & 0x1f) && (h & 0x1)) ? " and " : "", + (h & 0x1) ? "clearing D18F3x5C[0]" : ""); + + if ( l & 0x1f ) + pci_conf_write32(0, 0, 0x18, 0x3, 0x58, l & ~0x1f); + + if ( h & 0x1 ) + pci_conf_write32(0, 0, 0x18, 0x3, 0x5c, h & ~0x1); + } + + rdmsrl(MSR_AMD64_LS_CFG, value); + if ( !(value & (1 << 15)) ) + { + static bool_t warned; + + if ( c == &boot_cpu_data || opt_cpu_info || + !test_and_set_bool(warned) ) + printk(KERN_WARNING + "CPU%u: Applying workaround for erratum 793\n", + smp_processor_id()); + wrmsrl(MSR_AMD64_LS_CFG, value | (1 << 15)); + } + } + else if ( c->x86 == 0x12 ) + { + rdmsrl(MSR_AMD64_DE_CFG, value); + if ( !(value & (1U << 31)) ) + { + static bool warned; + + if ( c == &boot_cpu_data || opt_cpu_info || + !test_and_set_bool(warned) ) + printk(KERN_WARNING + "CPU%u: Applying workaround for erratum 665\n", + smp_processor_id()); + wrmsrl(MSR_AMD64_DE_CFG, value | (1U << 31)); + } + } + + /* AMD CPUs do not support SYSENTER outside of legacy mode. */ + __clear_bit(X86_FEATURE_SEP, c->x86_capability); + + if ( c->x86 == 0x10 ) + { + /* do this for boot cpu */ + if ( c == &boot_cpu_data ) + check_enable_amd_mmconf_dmi(); + + fam10h_check_enable_mmcfg(); + + /* + * On family 10h BIOS may not have properly enabled WC+ + * support, causing it to be converted to CD memtype. This may + * result in performance degradation for certain nested-paging + * guests. Prevent this conversion by clearing bit 24 in + * MSR_F10_BU_CFG2. + */ + rdmsrl(MSR_F10_BU_CFG2, value); + value &= ~(1ULL << 24); + wrmsrl(MSR_F10_BU_CFG2, value); + } + + /* + * Family 0x12 and above processors have APIC timer + * running in deep C states. + */ + if ( opt_arat && c->x86 > 0x11 ) + __set_bit(X86_FEATURE_ARAT, c->x86_capability); + + /* + * Prior to Family 0x14, perf counters are not reset during warm reboot. + * We have to reset them manually. + */ + if ( nmi_watchdog != NMI_LOCAL_APIC && c->x86 < 0x14 ) + { + wrmsrl(MSR_K7_PERFCTR0, 0); + wrmsrl(MSR_K7_PERFCTR1, 0); + wrmsrl(MSR_K7_PERFCTR2, 0); + wrmsrl(MSR_K7_PERFCTR3, 0); + } + + if ( cpu_has(c, X86_FEATURE_EFRO) ) + { + rdmsr(MSR_K7_HWCR, l, h); + l |= (1 << 27); /* Enable read-only APERF/MPERF bit */ + wrmsr(MSR_K7_HWCR, l, h); + } + + /* Prevent TSC drift in non single-processor, single-core platforms. */ + if ( (smp_processor_id() == 1) && !cpu_has(c, X86_FEATURE_ITSC) ) + disable_c1_ramping(); + + check_syscfg_dram_mod_en(); } static const struct cpu_dev amd_cpu_dev = { - .c_vendor = "AMD", - .c_ident = { "AuthenticAMD" }, - .c_early_init = early_init_amd, - .c_init = init_amd, + .c_vendor = "AMD", + .c_ident = {"AuthenticAMD"}, + .c_early_init = early_init_amd, + .c_init = init_amd, }; int __init amd_init_cpu(void) { - cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev; - return 0; + cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev; + return 0; } diff --git a/xen/arch/x86/cpu/centaur.c b/xen/arch/x86/cpu/centaur.c index 1c760be40d..b610ca9e12 100644 --- a/xen/arch/x86/cpu/centaur.c +++ b/xen/arch/x86/cpu/centaur.c @@ -6,62 +6,66 @@ #include #include "cpu.h" -#define ACE_PRESENT (1 << 6) -#define ACE_ENABLED (1 << 7) -#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */ +#define ACE_PRESENT (1 << 6) +#define ACE_ENABLED (1 << 7) +#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */ -#define RNG_PRESENT (1 << 2) -#define RNG_ENABLED (1 << 3) -#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ +#define RNG_PRESENT (1 << 2) +#define RNG_ENABLED (1 << 3) +#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ static void init_c3(struct cpuinfo_x86 *c) { - uint64_t msr_content; + uint64_t msr_content; - /* Test for Centaur Extended Feature Flags presence */ - if (cpuid_eax(0xC0000000) >= 0xC0000001) { - u32 tmp = cpuid_edx(0xC0000001); + /* Test for Centaur Extended Feature Flags presence */ + if ( cpuid_eax(0xC0000000) >= 0xC0000001 ) + { + u32 tmp = cpuid_edx(0xC0000001); - /* enable ACE unit, if present and disabled */ - if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) { - rdmsrl(MSR_VIA_FCR, msr_content); - /* enable ACE unit */ - wrmsrl(MSR_VIA_FCR, msr_content | ACE_FCR); - printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n"); - } + /* enable ACE unit, if present and disabled */ + if ( (tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT ) + { + rdmsrl(MSR_VIA_FCR, msr_content); + /* enable ACE unit */ + wrmsrl(MSR_VIA_FCR, msr_content | ACE_FCR); + printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n"); + } - /* enable RNG unit, if present and disabled */ - if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) { - rdmsrl(MSR_VIA_RNG, msr_content); - /* enable RNG unit */ - wrmsrl(MSR_VIA_RNG, msr_content | RNG_ENABLE); - printk(KERN_INFO "CPU: Enabled h/w RNG\n"); - } - } + /* enable RNG unit, if present and disabled */ + if ( (tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT ) + { + rdmsrl(MSR_VIA_RNG, msr_content); + /* enable RNG unit */ + wrmsrl(MSR_VIA_RNG, msr_content | RNG_ENABLE); + printk(KERN_INFO "CPU: Enabled h/w RNG\n"); + } + } - if (c->x86 == 0x6 && c->x86_model >= 0xf) { - c->x86_cache_alignment = c->x86_clflush_size * 2; - __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); - } + if ( c->x86 == 0x6 && c->x86_model >= 0xf ) + { + c->x86_cache_alignment = c->x86_clflush_size * 2; + __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); + } - get_model_name(c); - display_cacheinfo(c); + get_model_name(c); + display_cacheinfo(c); } static void init_centaur(struct cpuinfo_x86 *c) { - if (c->x86 == 6) - init_c3(c); + if ( c->x86 == 6 ) + init_c3(c); } static const struct cpu_dev centaur_cpu_dev = { - .c_vendor = "Centaur", - .c_ident = { "CentaurHauls" }, - .c_init = init_centaur, + .c_vendor = "Centaur", + .c_ident = {"CentaurHauls"}, + .c_init = init_centaur, }; int __init centaur_init_cpu(void) { - cpu_devs[X86_VENDOR_CENTAUR] = ¢aur_cpu_dev; - return 0; + cpu_devs[X86_VENDOR_CENTAUR] = ¢aur_cpu_dev; + return 0; } diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c index de6c5c903f..4881e28efb 100644 --- a/xen/arch/x86/cpu/common.c +++ b/xen/arch/x86/cpu/common.c @@ -53,139 +53,140 @@ static unsigned int forced_caps[NCAPINTS]; void __init setup_clear_cpu_cap(unsigned int cap) { - const uint32_t *dfs; - unsigned int i; - - if (__test_and_set_bit(cap, cleared_caps)) - return; - - if (test_bit(cap, forced_caps)) - printk("%pS clearing previously forced feature %#x\n", - __builtin_return_address(0), cap); - - __clear_bit(cap, boot_cpu_data.x86_capability); - dfs = x86_cpuid_lookup_deep_deps(cap); - - if (!dfs) - return; - - for (i = 0; i < FSCAPINTS; ++i) { - cleared_caps[i] |= dfs[i]; - boot_cpu_data.x86_capability[i] &= ~dfs[i]; - if (!(forced_caps[i] & dfs[i])) - continue; - printk("%pS implicitly clearing previously forced feature(s) %u:%#x\n", - __builtin_return_address(0), - i, forced_caps[i] & dfs[i]); - } + const uint32_t *dfs; + unsigned int i; + + if ( __test_and_set_bit(cap, cleared_caps) ) + return; + + if ( test_bit(cap, forced_caps) ) + printk("%pS clearing previously forced feature %#x\n", + __builtin_return_address(0), cap); + + __clear_bit(cap, boot_cpu_data.x86_capability); + dfs = x86_cpuid_lookup_deep_deps(cap); + + if ( !dfs ) + return; + + for ( i = 0; i < FSCAPINTS; ++i ) + { + cleared_caps[i] |= dfs[i]; + boot_cpu_data.x86_capability[i] &= ~dfs[i]; + if ( !(forced_caps[i] & dfs[i]) ) + continue; + printk("%pS implicitly clearing previously forced feature(s) %u:%#x\n", + __builtin_return_address(0), i, forced_caps[i] & dfs[i]); + } } void __init setup_force_cpu_cap(unsigned int cap) { - if (__test_and_set_bit(cap, forced_caps)) - return; + if ( __test_and_set_bit(cap, forced_caps) ) + return; - if (test_bit(cap, cleared_caps)) { - printk("%pS tries to force previously cleared feature %#x\n", - __builtin_return_address(0), cap); - return; - } + if ( test_bit(cap, cleared_caps) ) + { + printk("%pS tries to force previously cleared feature %#x\n", + __builtin_return_address(0), cap); + return; + } - __set_bit(cap, boot_cpu_data.x86_capability); + __set_bit(cap, boot_cpu_data.x86_capability); } -static void default_init(struct cpuinfo_x86 * c) +static void default_init(struct cpuinfo_x86 *c) { - /* Not much we can do here... */ - /* Check if at least it has cpuid */ - BUG_ON(c->cpuid_level == -1); - __clear_bit(X86_FEATURE_SEP, c->x86_capability); + /* Not much we can do here... */ + /* Check if at least it has cpuid */ + BUG_ON(c->cpuid_level == -1); + __clear_bit(X86_FEATURE_SEP, c->x86_capability); } static const struct cpu_dev default_cpu = { - .c_init = default_init, - .c_vendor = "Unknown", + .c_init = default_init, + .c_vendor = "Unknown", }; static const struct cpu_dev *this_cpu = &default_cpu; static DEFINE_PER_CPU(uint64_t, msr_misc_features); -void (* __read_mostly ctxt_switch_masking)(const struct vcpu *next); +void (*__read_mostly ctxt_switch_masking)(const struct vcpu *next); bool __init probe_cpuid_faulting(void) { - uint64_t val; - int rc; - - if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0) - raw_msr_policy.plaform_info.cpuid_faulting = - val & MSR_PLATFORM_INFO_CPUID_FAULTING; - - if (rc || - !(val & MSR_PLATFORM_INFO_CPUID_FAULTING) || - rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES, - this_cpu(msr_misc_features))) - { - setup_clear_cpu_cap(X86_FEATURE_CPUID_FAULTING); - return false; - } - - expected_levelling_cap |= LCAP_faulting; - levelling_caps |= LCAP_faulting; - setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING); - - return true; + uint64_t val; + int rc; + + if ( (rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0 ) + raw_msr_policy.plaform_info.cpuid_faulting = + val & MSR_PLATFORM_INFO_CPUID_FAULTING; + + if ( rc || !(val & MSR_PLATFORM_INFO_CPUID_FAULTING) || + rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES, + this_cpu(msr_misc_features)) ) + { + setup_clear_cpu_cap(X86_FEATURE_CPUID_FAULTING); + return false; + } + + expected_levelling_cap |= LCAP_faulting; + levelling_caps |= LCAP_faulting; + setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING); + + return true; } static void set_cpuid_faulting(bool enable) { - uint64_t *this_misc_features = &this_cpu(msr_misc_features); - uint64_t val = *this_misc_features; + uint64_t *this_misc_features = &this_cpu(msr_misc_features); + uint64_t val = *this_misc_features; - if (!!(val & MSR_MISC_FEATURES_CPUID_FAULTING) == enable) - return; + if ( !!(val & MSR_MISC_FEATURES_CPUID_FAULTING) == enable ) + return; - val ^= MSR_MISC_FEATURES_CPUID_FAULTING; + val ^= MSR_MISC_FEATURES_CPUID_FAULTING; - wrmsrl(MSR_INTEL_MISC_FEATURES_ENABLES, val); - *this_misc_features = val; + wrmsrl(MSR_INTEL_MISC_FEATURES_ENABLES, val); + *this_misc_features = val; } void ctxt_switch_levelling(const struct vcpu *next) { - const struct domain *nextd = next ? next->domain : NULL; - - if (cpu_has_cpuid_faulting) { - /* - * No need to alter the faulting setting if we are switching - * to idle; it won't affect any code running in idle context. - */ - if (nextd && is_idle_domain(nextd)) - return; - /* - * We *should* be enabling faulting for the control domain. - * - * Unfortunately, the domain builder (having only ever been a - * PV guest) expects to be able to see host cpuid state in a - * native CPUID instruction, to correctly build a CPUID policy - * for HVM guests (notably the xstate leaves). - * - * This logic is fundimentally broken for HVM toolstack - * domains, and faulting causes PV guests to behave like HVM - * guests from their point of view. - * - * Future development plans will move responsibility for - * generating the maximum full cpuid policy into Xen, at which - * this problem will disappear. - */ - set_cpuid_faulting(nextd && !is_control_domain(nextd) && - (is_pv_domain(nextd) || - next->arch.msrs-> - misc_features_enables.cpuid_faulting)); - return; - } - - if (ctxt_switch_masking) - ctxt_switch_masking(next); + const struct domain *nextd = next ? next->domain : NULL; + + if ( cpu_has_cpuid_faulting ) + { + /* + * No need to alter the faulting setting if we are switching + * to idle; it won't affect any code running in idle context. + */ + if ( nextd && is_idle_domain(nextd) ) + return; + /* + * We *should* be enabling faulting for the control domain. + * + * Unfortunately, the domain builder (having only ever been a + * PV guest) expects to be able to see host cpuid state in a + * native CPUID instruction, to correctly build a CPUID policy + * for HVM guests (notably the xstate leaves). + * + * This logic is fundimentally broken for HVM toolstack + * domains, and faulting causes PV guests to behave like HVM + * guests from their point of view. + * + * Future development plans will move responsibility for + * generating the maximum full cpuid policy into Xen, at which + * this problem will disappear. + */ + set_cpuid_faulting( + nextd && !is_control_domain(nextd) && + (is_pv_domain(nextd) || + next->arch.msrs->misc_features_enables.cpuid_faulting)); + return; + } + + if ( ctxt_switch_masking ) + ctxt_switch_masking(next); } bool_t opt_cpu_info; @@ -193,93 +194,99 @@ boolean_param("cpuinfo", opt_cpu_info); int get_model_name(struct cpuinfo_x86 *c) { - unsigned int *v; - char *p, *q; - - if (c->extended_cpuid_level < 0x80000004) - return 0; - - v = (unsigned int *) c->x86_model_id; - cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); - cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); - cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); - c->x86_model_id[48] = 0; - - /* Intel chips right-justify this string for some dumb reason; - undo that brain damage */ - p = q = &c->x86_model_id[0]; - while ( *p == ' ' ) - p++; - if ( p != q ) { - while ( *p ) - *q++ = *p++; - while ( q <= &c->x86_model_id[48] ) - *q++ = '\0'; /* Zero-pad the rest */ - } - - return 1; + unsigned int *v; + char *p, *q; + + if ( c->extended_cpuid_level < 0x80000004 ) + return 0; + + v = (unsigned int *)c->x86_model_id; + cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); + cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); + cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); + c->x86_model_id[48] = 0; + + /* Intel chips right-justify this string for some dumb reason; + undo that brain damage */ + p = q = &c->x86_model_id[0]; + while ( *p == ' ' ) + p++; + if ( p != q ) + { + while ( *p ) + *q++ = *p++; + while ( q <= &c->x86_model_id[48] ) + *q++ = '\0'; /* Zero-pad the rest */ + } + + return 1; } - void display_cacheinfo(struct cpuinfo_x86 *c) { - unsigned int dummy, ecx, edx, l2size; - - if (c->extended_cpuid_level >= 0x80000005) { - cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); - if (opt_cpu_info) - printk("CPU: L1 I cache %dK (%d bytes/line)," - " D cache %dK (%d bytes/line)\n", - edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); - c->x86_cache_size=(ecx>>24)+(edx>>24); - } - - if (c->extended_cpuid_level < 0x80000006) /* Some chips just has a large L1. */ - return; - - ecx = cpuid_ecx(0x80000006); - l2size = ecx >> 16; - - c->x86_cache_size = l2size; - - if (opt_cpu_info) - printk("CPU: L2 Cache: %dK (%d bytes/line)\n", - l2size, ecx & 0xFF); + unsigned int dummy, ecx, edx, l2size; + + if ( c->extended_cpuid_level >= 0x80000005 ) + { + cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); + if ( opt_cpu_info ) + printk("CPU: L1 I cache %dK (%d bytes/line)," + " D cache %dK (%d bytes/line)\n", + edx >> 24, edx & 0xFF, ecx >> 24, ecx & 0xFF); + c->x86_cache_size = (ecx >> 24) + (edx >> 24); + } + + if ( c->extended_cpuid_level < + 0x80000006 ) /* Some chips just has a large L1. */ + return; + + ecx = cpuid_ecx(0x80000006); + l2size = ecx >> 16; + + c->x86_cache_size = l2size; + + if ( opt_cpu_info ) + printk("CPU: L2 Cache: %dK (%d bytes/line)\n", l2size, ecx & 0xFF); } int get_cpu_vendor(uint32_t b, uint32_t c, uint32_t d, enum get_cpu_vendor mode) { - int i; - static int printed; - - for (i = 0; i < X86_VENDOR_NUM; i++) { - if (cpu_devs[i]) { - struct { - uint32_t b, d, c; - } *ptr = (void *)cpu_devs[i]->c_ident; - - if (ptr->b == b && ptr->c == c && ptr->d == d) { - if (mode == gcv_host) - this_cpu = cpu_devs[i]; - return i; - } - } - } - if (mode == gcv_guest) - return X86_VENDOR_UNKNOWN; - if (!printed) { - printed++; - printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); - printk(KERN_ERR "CPU: Your system may be unstable.\n"); - } - this_cpu = &default_cpu; - - return X86_VENDOR_UNKNOWN; + int i; + static int printed; + + for ( i = 0; i < X86_VENDOR_NUM; i++ ) + { + if ( cpu_devs[i] ) + { + struct + { + uint32_t b, d, c; + } *ptr = (void *)cpu_devs[i]->c_ident; + + if ( ptr->b == b && ptr->c == c && ptr->d == d ) + { + if ( mode == gcv_host ) + this_cpu = cpu_devs[i]; + return i; + } + } + } + if ( mode == gcv_guest ) + return X86_VENDOR_UNKNOWN; + if ( !printed ) + { + printed++; + printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); + printk(KERN_ERR "CPU: Your system may be unstable.\n"); + } + this_cpu = &default_cpu; + + return X86_VENDOR_UNKNOWN; } static inline u32 _phys_pkg_id(u32 cpuid_apic, int index_msb) { - return cpuid_apic >> index_msb; + return cpuid_apic >> index_msb; } /* @@ -291,129 +298,131 @@ static inline u32 _phys_pkg_id(u32 cpuid_apic, int index_msb) */ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { - return _phys_pkg_id(get_apic_id(), index_msb); + return _phys_pkg_id(get_apic_id(), index_msb); } /* Do minimum CPU detection early. - Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. - The others are not touched to avoid unwanted side effects. + Fields really needed: vendor, cpuid_level, family, model, mask, cache + alignment. The others are not touched to avoid unwanted side effects. WARNING: this function is only called on the BP. Don't add code here that is supposed to run on all CPUs. */ static void __init early_cpu_detect(void) { - struct cpuinfo_x86 *c = &boot_cpu_data; - u32 eax, ebx, ecx, edx; - - c->x86_cache_alignment = 32; - - /* Get vendor name */ - cpuid(0x00000000, &c->cpuid_level, &ebx, &ecx, &edx); - *(u32 *)&c->x86_vendor_id[0] = ebx; - *(u32 *)&c->x86_vendor_id[8] = ecx; - *(u32 *)&c->x86_vendor_id[4] = edx; - - c->x86_vendor = get_cpu_vendor(ebx, ecx, edx, gcv_host); - - cpuid(0x00000001, &eax, &ebx, &ecx, &edx); - c->x86 = get_cpu_family(eax, &c->x86_model, &c->x86_mask); - - edx &= ~cleared_caps[cpufeat_word(X86_FEATURE_FPU)]; - ecx &= ~cleared_caps[cpufeat_word(X86_FEATURE_SSE3)]; - if (edx & cpufeat_mask(X86_FEATURE_CLFLUSH)) - c->x86_cache_alignment = ((ebx >> 8) & 0xff) * 8; - /* Leaf 0x1 capabilities filled in early for Xen. */ - c->x86_capability[cpufeat_word(X86_FEATURE_FPU)] = edx; - c->x86_capability[cpufeat_word(X86_FEATURE_SSE3)] = ecx; - - printk(XENLOG_INFO - "CPU Vendor: %s, Family %u (%#x), Model %u (%#x), Stepping %u (raw %08x)\n", - this_cpu->c_vendor, c->x86, c->x86, - c->x86_model, c->x86_model, c->x86_mask, eax); - - eax = cpuid_eax(0x80000000); - if ((eax >> 16) == 0x8000 && eax >= 0x80000008) { - eax = cpuid_eax(0x80000008); - paddr_bits = eax & 0xff; - if (paddr_bits > PADDR_BITS) - paddr_bits = PADDR_BITS; - vaddr_bits = (eax >> 8) & 0xff; - if (vaddr_bits > VADDR_BITS) - vaddr_bits = VADDR_BITS; - hap_paddr_bits = ((eax >> 16) & 0xff) ?: paddr_bits; - if (hap_paddr_bits > PADDR_BITS) - hap_paddr_bits = PADDR_BITS; - } - - if (c->x86_vendor != X86_VENDOR_AMD) - park_offline_cpus = opt_mce; - - initialize_cpu_data(0); + struct cpuinfo_x86 *c = &boot_cpu_data; + u32 eax, ebx, ecx, edx; + + c->x86_cache_alignment = 32; + + /* Get vendor name */ + cpuid(0x00000000, &c->cpuid_level, &ebx, &ecx, &edx); + *(u32 *)&c->x86_vendor_id[0] = ebx; + *(u32 *)&c->x86_vendor_id[8] = ecx; + *(u32 *)&c->x86_vendor_id[4] = edx; + + c->x86_vendor = get_cpu_vendor(ebx, ecx, edx, gcv_host); + + cpuid(0x00000001, &eax, &ebx, &ecx, &edx); + c->x86 = get_cpu_family(eax, &c->x86_model, &c->x86_mask); + + edx &= ~cleared_caps[cpufeat_word(X86_FEATURE_FPU)]; + ecx &= ~cleared_caps[cpufeat_word(X86_FEATURE_SSE3)]; + if ( edx & cpufeat_mask(X86_FEATURE_CLFLUSH) ) + c->x86_cache_alignment = ((ebx >> 8) & 0xff) * 8; + /* Leaf 0x1 capabilities filled in early for Xen. */ + c->x86_capability[cpufeat_word(X86_FEATURE_FPU)] = edx; + c->x86_capability[cpufeat_word(X86_FEATURE_SSE3)] = ecx; + + printk(XENLOG_INFO "CPU Vendor: %s, Family %u (%#x), Model %u (%#x), " + "Stepping %u (raw %08x)\n", + this_cpu->c_vendor, c->x86, c->x86, c->x86_model, c->x86_model, + c->x86_mask, eax); + + eax = cpuid_eax(0x80000000); + if ( (eax >> 16) == 0x8000 && eax >= 0x80000008 ) + { + eax = cpuid_eax(0x80000008); + paddr_bits = eax & 0xff; + if ( paddr_bits > PADDR_BITS ) + paddr_bits = PADDR_BITS; + vaddr_bits = (eax >> 8) & 0xff; + if ( vaddr_bits > VADDR_BITS ) + vaddr_bits = VADDR_BITS; + hap_paddr_bits = ((eax >> 16) & 0xff) ?: paddr_bits; + if ( hap_paddr_bits > PADDR_BITS ) + hap_paddr_bits = PADDR_BITS; + } + + if ( c->x86_vendor != X86_VENDOR_AMD ) + park_offline_cpus = opt_mce; + + initialize_cpu_data(0); } static void generic_identify(struct cpuinfo_x86 *c) { - u32 eax, ebx, ecx, edx, tmp; - - /* Get vendor name */ - cpuid(0x00000000, &c->cpuid_level, &ebx, &ecx, &edx); - *(u32 *)&c->x86_vendor_id[0] = ebx; - *(u32 *)&c->x86_vendor_id[8] = ecx; - *(u32 *)&c->x86_vendor_id[4] = edx; - - c->x86_vendor = get_cpu_vendor(ebx, ecx, edx, gcv_host); - /* Initialize the standard set of capabilities */ - /* Note that the vendor-specific code below might override */ - - /* Model and family information. */ - cpuid(0x00000001, &eax, &ebx, &ecx, &edx); - c->x86 = get_cpu_family(eax, &c->x86_model, &c->x86_mask); - c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); - c->phys_proc_id = c->apicid; - - if (this_cpu->c_early_init) - this_cpu->c_early_init(c); - - /* c_early_init() may have adjusted cpuid levels/features. Reread. */ - c->cpuid_level = cpuid_eax(0); - cpuid(0x00000001, &eax, &ebx, &ecx, &edx); - c->x86_capability[cpufeat_word(X86_FEATURE_FPU)] = edx; - c->x86_capability[cpufeat_word(X86_FEATURE_SSE3)] = ecx; - - if ( cpu_has(c, X86_FEATURE_CLFLUSH) ) - c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; - - if ( (c->cpuid_level >= CPUID_PM_LEAF) && - (cpuid_ecx(CPUID_PM_LEAF) & CPUID6_ECX_APERFMPERF_CAPABILITY) ) - set_bit(X86_FEATURE_APERFMPERF, c->x86_capability); - - /* AMD-defined flags: level 0x80000001 */ - c->extended_cpuid_level = cpuid_eax(0x80000000); - if ((c->extended_cpuid_level >> 16) != 0x8000) - c->extended_cpuid_level = 0; - if (c->extended_cpuid_level > 0x80000000) - cpuid(0x80000001, &tmp, &tmp, - &c->x86_capability[cpufeat_word(X86_FEATURE_LAHF_LM)], - &c->x86_capability[cpufeat_word(X86_FEATURE_SYSCALL)]); - if (c == &boot_cpu_data) - bootsym(cpuid_ext_features) = - c->x86_capability[cpufeat_word(X86_FEATURE_NX)]; - - if (c->extended_cpuid_level >= 0x80000004) - get_model_name(c); /* Default name */ - if (c->extended_cpuid_level >= 0x80000007) - c->x86_capability[cpufeat_word(X86_FEATURE_ITSC)] - = cpuid_edx(0x80000007); - if (c->extended_cpuid_level >= 0x80000008) - c->x86_capability[cpufeat_word(X86_FEATURE_CLZERO)] - = cpuid_ebx(0x80000008); - - /* Intel-defined flags: level 0x00000007 */ - if ( c->cpuid_level >= 0x00000007 ) - cpuid_count(0x00000007, 0, &tmp, - &c->x86_capability[cpufeat_word(X86_FEATURE_FSGSBASE)], - &c->x86_capability[cpufeat_word(X86_FEATURE_PKU)], - &c->x86_capability[cpufeat_word(X86_FEATURE_AVX512_4VNNIW)]); + u32 eax, ebx, ecx, edx, tmp; + + /* Get vendor name */ + cpuid(0x00000000, &c->cpuid_level, &ebx, &ecx, &edx); + *(u32 *)&c->x86_vendor_id[0] = ebx; + *(u32 *)&c->x86_vendor_id[8] = ecx; + *(u32 *)&c->x86_vendor_id[4] = edx; + + c->x86_vendor = get_cpu_vendor(ebx, ecx, edx, gcv_host); + /* Initialize the standard set of capabilities */ + /* Note that the vendor-specific code below might override */ + + /* Model and family information. */ + cpuid(0x00000001, &eax, &ebx, &ecx, &edx); + c->x86 = get_cpu_family(eax, &c->x86_model, &c->x86_mask); + c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); + c->phys_proc_id = c->apicid; + + if ( this_cpu->c_early_init ) + this_cpu->c_early_init(c); + + /* c_early_init() may have adjusted cpuid levels/features. Reread. */ + c->cpuid_level = cpuid_eax(0); + cpuid(0x00000001, &eax, &ebx, &ecx, &edx); + c->x86_capability[cpufeat_word(X86_FEATURE_FPU)] = edx; + c->x86_capability[cpufeat_word(X86_FEATURE_SSE3)] = ecx; + + if ( cpu_has(c, X86_FEATURE_CLFLUSH) ) + c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; + + if ( (c->cpuid_level >= CPUID_PM_LEAF) && + (cpuid_ecx(CPUID_PM_LEAF) & CPUID6_ECX_APERFMPERF_CAPABILITY) ) + set_bit(X86_FEATURE_APERFMPERF, c->x86_capability); + + /* AMD-defined flags: level 0x80000001 */ + c->extended_cpuid_level = cpuid_eax(0x80000000); + if ( (c->extended_cpuid_level >> 16) != 0x8000 ) + c->extended_cpuid_level = 0; + if ( c->extended_cpuid_level > 0x80000000 ) + cpuid(0x80000001, &tmp, &tmp, + &c->x86_capability[cpufeat_word(X86_FEATURE_LAHF_LM)], + &c->x86_capability[cpufeat_word(X86_FEATURE_SYSCALL)]); + if ( c == &boot_cpu_data ) + bootsym(cpuid_ext_features) = + c->x86_capability[cpufeat_word(X86_FEATURE_NX)]; + + if ( c->extended_cpuid_level >= 0x80000004 ) + get_model_name(c); /* Default name */ + if ( c->extended_cpuid_level >= 0x80000007 ) + c->x86_capability[cpufeat_word(X86_FEATURE_ITSC)] = + cpuid_edx(0x80000007); + if ( c->extended_cpuid_level >= 0x80000008 ) + c->x86_capability[cpufeat_word(X86_FEATURE_CLZERO)] = + cpuid_ebx(0x80000008); + + /* Intel-defined flags: level 0x00000007 */ + if ( c->cpuid_level >= 0x00000007 ) + cpuid_count( + 0x00000007, 0, &tmp, + &c->x86_capability[cpufeat_word(X86_FEATURE_FSGSBASE)], + &c->x86_capability[cpufeat_word(X86_FEATURE_PKU)], + &c->x86_capability[cpufeat_word(X86_FEATURE_AVX512_4VNNIW)]); } /* @@ -421,112 +430,116 @@ static void generic_identify(struct cpuinfo_x86 *c) */ void identify_cpu(struct cpuinfo_x86 *c) { - int i; - - c->x86_cache_size = -1; - c->x86_vendor = X86_VENDOR_UNKNOWN; - c->cpuid_level = -1; /* CPUID not detected */ - c->x86_model = c->x86_mask = 0; /* So far unknown... */ - c->x86_vendor_id[0] = '\0'; /* Unset */ - c->x86_model_id[0] = '\0'; /* Unset */ - c->x86_max_cores = 1; - c->x86_num_siblings = 1; - c->x86_clflush_size = 0; - c->phys_proc_id = XEN_INVALID_SOCKET_ID; - c->cpu_core_id = XEN_INVALID_CORE_ID; - c->compute_unit_id = INVALID_CUID; - memset(&c->x86_capability, 0, sizeof c->x86_capability); - - generic_identify(c); + int i; + + c->x86_cache_size = -1; + c->x86_vendor = X86_VENDOR_UNKNOWN; + c->cpuid_level = -1; /* CPUID not detected */ + c->x86_model = c->x86_mask = 0; /* So far unknown... */ + c->x86_vendor_id[0] = '\0'; /* Unset */ + c->x86_model_id[0] = '\0'; /* Unset */ + c->x86_max_cores = 1; + c->x86_num_siblings = 1; + c->x86_clflush_size = 0; + c->phys_proc_id = XEN_INVALID_SOCKET_ID; + c->cpu_core_id = XEN_INVALID_CORE_ID; + c->compute_unit_id = INVALID_CUID; + memset(&c->x86_capability, 0, sizeof c->x86_capability); + + generic_identify(c); #ifdef NOISY_CAPS - printk(KERN_DEBUG "CPU: After vendor identify, caps:"); - for (i = 0; i < NCAPINTS; i++) - printk(" %08x", c->x86_capability[i]); - printk("\n"); + printk(KERN_DEBUG "CPU: After vendor identify, caps:"); + for ( i = 0; i < NCAPINTS; i++ ) + printk(" %08x", c->x86_capability[i]); + printk("\n"); #endif - /* - * Vendor-specific initialization. In this section we - * canonicalize the feature flags, meaning if there are - * features a certain CPU supports which CPUID doesn't - * tell us, CPUID claiming incorrect flags, or other bugs, - * we handle them here. - * - * At the end of this section, c->x86_capability better - * indicate the features this CPU genuinely supports! - */ - if (this_cpu->c_init) - this_cpu->c_init(c); - - - if ( !opt_pku ) - setup_clear_cpu_cap(X86_FEATURE_PKU); - - /* - * The vendor-specific functions might have changed features. Now - * we do "generic changes." - */ - for (i = 0; i < FSCAPINTS; ++i) - c->x86_capability[i] &= known_features[i]; - - for (i = 0 ; i < NCAPINTS ; ++i) { - c->x86_capability[i] |= forced_caps[i]; - c->x86_capability[i] &= ~cleared_caps[i]; - } - - /* If the model name is still unset, do table lookup. */ - if ( !c->x86_model_id[0] ) { - /* Last resort... */ - snprintf(c->x86_model_id, sizeof(c->x86_model_id), - "%02x/%02x", c->x86_vendor, c->x86_model); - } - - /* Now the feature flags better reflect actual CPU features! */ - - if ( cpu_has_xsave ) - xstate_init(c); + /* + * Vendor-specific initialization. In this section we + * canonicalize the feature flags, meaning if there are + * features a certain CPU supports which CPUID doesn't + * tell us, CPUID claiming incorrect flags, or other bugs, + * we handle them here. + * + * At the end of this section, c->x86_capability better + * indicate the features this CPU genuinely supports! + */ + if ( this_cpu->c_init ) + this_cpu->c_init(c); + + if ( !opt_pku ) + setup_clear_cpu_cap(X86_FEATURE_PKU); + + /* + * The vendor-specific functions might have changed features. Now + * we do "generic changes." + */ + for ( i = 0; i < FSCAPINTS; ++i ) + c->x86_capability[i] &= known_features[i]; + + for ( i = 0; i < NCAPINTS; ++i ) + { + c->x86_capability[i] |= forced_caps[i]; + c->x86_capability[i] &= ~cleared_caps[i]; + } + + /* If the model name is still unset, do table lookup. */ + if ( !c->x86_model_id[0] ) + { + /* Last resort... */ + snprintf(c->x86_model_id, sizeof(c->x86_model_id), "%02x/%02x", + c->x86_vendor, c->x86_model); + } + + /* Now the feature flags better reflect actual CPU features! */ + + if ( cpu_has_xsave ) + xstate_init(c); #ifdef NOISY_CAPS - printk(KERN_DEBUG "CPU: After all inits, caps:"); - for (i = 0; i < NCAPINTS; i++) - printk(" %08x", c->x86_capability[i]); - printk("\n"); + printk(KERN_DEBUG "CPU: After all inits, caps:"); + for ( i = 0; i < NCAPINTS; i++ ) + printk(" %08x", c->x86_capability[i]); + printk("\n"); #endif - if (system_state == SYS_STATE_resume) - return; - - /* - * On SMP, boot_cpu_data holds the common feature set between - * all CPUs; so make sure that we indicate which features are - * common between the CPUs. The first time this routine gets - * executed, c == &boot_cpu_data. - */ - if ( c != &boot_cpu_data ) { - /* AND the already accumulated flags with these */ - for ( i = 0 ; i < NCAPINTS ; i++ ) - boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; - - mcheck_init(c, false); - } else { - mcheck_init(c, true); - - mtrr_bp_init(); - } + if ( system_state == SYS_STATE_resume ) + return; + + /* + * On SMP, boot_cpu_data holds the common feature set between + * all CPUs; so make sure that we indicate which features are + * common between the CPUs. The first time this routine gets + * executed, c == &boot_cpu_data. + */ + if ( c != &boot_cpu_data ) + { + /* AND the already accumulated flags with these */ + for ( i = 0; i < NCAPINTS; i++ ) + boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; + + mcheck_init(c, false); + } + else + { + mcheck_init(c, true); + + mtrr_bp_init(); + } } /* leaf 0xb SMT level */ -#define SMT_LEVEL 0 +#define SMT_LEVEL 0 /* leaf 0xb sub-leaf types */ -#define INVALID_TYPE 0 -#define SMT_TYPE 1 -#define CORE_TYPE 2 +#define INVALID_TYPE 0 +#define SMT_TYPE 1 +#define CORE_TYPE 2 -#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff) -#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f) -#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff) +#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff) +#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax)&0x1f) +#define LEVEL_MAX_SIBLINGS(ebx) ((ebx)&0xffff) /* * Check for extended topology enumeration cpuid leaf 0xb and if it @@ -534,160 +547,159 @@ void identify_cpu(struct cpuinfo_x86 *c) */ void detect_extended_topology(struct cpuinfo_x86 *c) { - unsigned int eax, ebx, ecx, edx, sub_index; - unsigned int ht_mask_width, core_plus_mask_width; - unsigned int core_select_mask, core_level_siblings; - unsigned int initial_apicid; + unsigned int eax, ebx, ecx, edx, sub_index; + unsigned int ht_mask_width, core_plus_mask_width; + unsigned int core_select_mask, core_level_siblings; + unsigned int initial_apicid; - if ( c->cpuid_level < 0xb ) - return; + if ( c->cpuid_level < 0xb ) + return; - cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); + cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); - /* Check if the cpuid leaf 0xb is actually implemented */ - if ( ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE) ) - return; + /* Check if the cpuid leaf 0xb is actually implemented */ + if ( ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE) ) + return; - __set_bit(X86_FEATURE_XTOPOLOGY, c->x86_capability); + __set_bit(X86_FEATURE_XTOPOLOGY, c->x86_capability); - initial_apicid = edx; + initial_apicid = edx; - /* Populate HT related information from sub-leaf level 0 */ - core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); - core_level_siblings = c->x86_num_siblings = 1u << ht_mask_width; + /* Populate HT related information from sub-leaf level 0 */ + core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); + core_level_siblings = c->x86_num_siblings = 1u << ht_mask_width; - sub_index = 1; - do { - cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx); + sub_index = 1; + do { + cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx); - /* Check for the Core type in the implemented sub leaves */ - if ( LEAFB_SUBTYPE(ecx) == CORE_TYPE ) { - core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); - core_level_siblings = 1u << core_plus_mask_width; - break; - } + /* Check for the Core type in the implemented sub leaves */ + if ( LEAFB_SUBTYPE(ecx) == CORE_TYPE ) + { + core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); + core_level_siblings = 1u << core_plus_mask_width; + break; + } - sub_index++; - } while ( LEAFB_SUBTYPE(ecx) != INVALID_TYPE ); + sub_index++; + } while ( LEAFB_SUBTYPE(ecx) != INVALID_TYPE ); - core_select_mask = (~(~0u << core_plus_mask_width)) >> ht_mask_width; + core_select_mask = (~(~0u << core_plus_mask_width)) >> ht_mask_width; - c->cpu_core_id = phys_pkg_id(initial_apicid, ht_mask_width) - & core_select_mask; - c->phys_proc_id = phys_pkg_id(initial_apicid, core_plus_mask_width); + c->cpu_core_id = + phys_pkg_id(initial_apicid, ht_mask_width) & core_select_mask; + c->phys_proc_id = phys_pkg_id(initial_apicid, core_plus_mask_width); - c->apicid = phys_pkg_id(initial_apicid, 0); - c->x86_max_cores = (core_level_siblings / c->x86_num_siblings); + c->apicid = phys_pkg_id(initial_apicid, 0); + c->x86_max_cores = (core_level_siblings / c->x86_num_siblings); - if ( opt_cpu_info ) - { - printk("CPU: Physical Processor ID: %d\n", - c->phys_proc_id); - if ( c->x86_max_cores > 1 ) - printk("CPU: Processor Core ID: %d\n", - c->cpu_core_id); - } + if ( opt_cpu_info ) + { + printk("CPU: Physical Processor ID: %d\n", c->phys_proc_id); + if ( c->x86_max_cores > 1 ) + printk("CPU: Processor Core ID: %d\n", c->cpu_core_id); + } } void detect_ht(struct cpuinfo_x86 *c) { - u32 eax, ebx, ecx, edx; - int index_msb, core_bits; + u32 eax, ebx, ecx, edx; + int index_msb, core_bits; - if (!cpu_has(c, X86_FEATURE_HTT) || - cpu_has(c, X86_FEATURE_CMP_LEGACY) || - cpu_has(c, X86_FEATURE_XTOPOLOGY)) - return; + if ( !cpu_has(c, X86_FEATURE_HTT) || cpu_has(c, X86_FEATURE_CMP_LEGACY) || + cpu_has(c, X86_FEATURE_XTOPOLOGY) ) + return; - cpuid(1, &eax, &ebx, &ecx, &edx); - c->x86_num_siblings = (ebx & 0xff0000) >> 16; + cpuid(1, &eax, &ebx, &ecx, &edx); + c->x86_num_siblings = (ebx & 0xff0000) >> 16; - if (c->x86_num_siblings == 1) { - printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); - } else if (c->x86_num_siblings > 1 ) { - index_msb = get_count_order(c->x86_num_siblings); - c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); + if ( c->x86_num_siblings == 1 ) + { + printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); + } + else if ( c->x86_num_siblings > 1 ) + { + index_msb = get_count_order(c->x86_num_siblings); + c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); - if (opt_cpu_info) - printk("CPU: Physical Processor ID: %d\n", - c->phys_proc_id); + if ( opt_cpu_info ) + printk("CPU: Physical Processor ID: %d\n", c->phys_proc_id); - c->x86_num_siblings = c->x86_num_siblings / c->x86_max_cores; + c->x86_num_siblings = c->x86_num_siblings / c->x86_max_cores; - index_msb = get_count_order(c->x86_num_siblings) ; + index_msb = get_count_order(c->x86_num_siblings); - core_bits = get_count_order(c->x86_max_cores); + core_bits = get_count_order(c->x86_max_cores); - c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & - ((1 << core_bits) - 1); + c->cpu_core_id = + phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & ((1 << core_bits) - 1); - if (opt_cpu_info && c->x86_max_cores > 1) - printk("CPU: Processor Core ID: %d\n", - c->cpu_core_id); - } + if ( opt_cpu_info && c->x86_max_cores > 1 ) + printk("CPU: Processor Core ID: %d\n", c->cpu_core_id); + } } unsigned int __init apicid_to_socket(unsigned int apicid) { - unsigned int dummy; + unsigned int dummy; - if (boot_cpu_has(X86_FEATURE_XTOPOLOGY)) { - unsigned int eax, ecx, sub_index = 1, core_plus_mask_width; + if ( boot_cpu_has(X86_FEATURE_XTOPOLOGY) ) + { + unsigned int eax, ecx, sub_index = 1, core_plus_mask_width; - cpuid_count(0xb, SMT_LEVEL, &eax, &dummy, &dummy, &dummy); - core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); - do { - cpuid_count(0xb, sub_index, &eax, &dummy, &ecx, - &dummy); + cpuid_count(0xb, SMT_LEVEL, &eax, &dummy, &dummy, &dummy); + core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); + do { + cpuid_count(0xb, sub_index, &eax, &dummy, &ecx, &dummy); - if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) { - core_plus_mask_width = - BITS_SHIFT_NEXT_LEVEL(eax); - break; - } + if ( LEAFB_SUBTYPE(ecx) == CORE_TYPE ) + { + core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); + break; + } - sub_index++; - } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); + sub_index++; + } while ( LEAFB_SUBTYPE(ecx) != INVALID_TYPE ); - return _phys_pkg_id(apicid, core_plus_mask_width); - } + return _phys_pkg_id(apicid, core_plus_mask_width); + } - if (boot_cpu_has(X86_FEATURE_HTT) && - !boot_cpu_has(X86_FEATURE_CMP_LEGACY)) { - unsigned int num_siblings = (cpuid_ebx(1) & 0xff0000) >> 16; + if ( boot_cpu_has(X86_FEATURE_HTT) && + !boot_cpu_has(X86_FEATURE_CMP_LEGACY) ) + { + unsigned int num_siblings = (cpuid_ebx(1) & 0xff0000) >> 16; - if (num_siblings) - return _phys_pkg_id(apicid, - get_count_order(num_siblings)); - } + if ( num_siblings ) + return _phys_pkg_id(apicid, get_count_order(num_siblings)); + } - return apicid; + return apicid; } void print_cpu_info(unsigned int cpu) { - const struct cpuinfo_x86 *c = cpu_data + cpu; - const char *vendor = NULL; + const struct cpuinfo_x86 *c = cpu_data + cpu; + const char *vendor = NULL; - if (!opt_cpu_info) - return; + if ( !opt_cpu_info ) + return; - printk("CPU%u: ", cpu); + printk("CPU%u: ", cpu); - if (c->x86_vendor < X86_VENDOR_NUM) - vendor = this_cpu->c_vendor; - else - vendor = c->x86_vendor_id; + if ( c->x86_vendor < X86_VENDOR_NUM ) + vendor = this_cpu->c_vendor; + else + vendor = c->x86_vendor_id; - if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) - printk("%s ", vendor); + if ( vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)) ) + printk("%s ", vendor); - if (!c->x86_model_id[0]) - printk("%d86", c->x86); - else - printk("%s", c->x86_model_id); + if ( !c->x86_model_id[0] ) + printk("%d86", c->x86); + else + printk("%s", c->x86_model_id); - printk(" stepping %02x\n", c->x86_mask); + printk(" stepping %02x\n", c->x86_mask); } static cpumask_t cpu_initialized; @@ -702,11 +714,11 @@ static cpumask_t cpu_initialized; void __init early_cpu_init(void) { - intel_cpu_init(); - amd_init_cpu(); - centaur_init_cpu(); - shanghai_init_cpu(); - early_cpu_detect(); + intel_cpu_init(); + amd_init_cpu(); + centaur_init_cpu(); + shanghai_init_cpu(); + early_cpu_detect(); } /* @@ -719,76 +731,72 @@ void __init early_cpu_init(void) */ void load_system_tables(void) { - unsigned int cpu = smp_processor_id(); - unsigned long stack_bottom = get_stack_bottom(), - stack_top = stack_bottom & ~(STACK_SIZE - 1); - - struct tss_struct *tss = &this_cpu(init_tss); - seg_desc_t *gdt = - this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY; - seg_desc_t *compat_gdt = - this_cpu(compat_gdt_table) - FIRST_RESERVED_GDT_ENTRY; - - const struct desc_ptr gdtr = { - .base = (unsigned long)gdt, - .limit = LAST_RESERVED_GDT_BYTE, - }; - const struct desc_ptr idtr = { - .base = (unsigned long)idt_tables[cpu], - .limit = (IDT_ENTRIES * sizeof(idt_entry_t)) - 1, - }; - - *tss = (struct tss_struct){ - /* Main stack for interrupts/exceptions. */ - .rsp0 = stack_bottom, - - /* Ring 1 and 2 stacks poisoned. */ - .rsp1 = 0x8600111111111111ul, - .rsp2 = 0x8600111111111111ul, - - /* - * MCE, NMI and Double Fault handlers get their own stacks. - * All others poisoned. - */ - .ist = { - [IST_MCE - 1] = stack_top + IST_MCE * PAGE_SIZE, - [IST_DF - 1] = stack_top + IST_DF * PAGE_SIZE, - [IST_NMI - 1] = stack_top + IST_NMI * PAGE_SIZE, - [IST_DB - 1] = stack_top + IST_DB * PAGE_SIZE, - - [IST_MAX ... ARRAY_SIZE(tss->ist) - 1] = - 0x8600111111111111ul, - }, - - .bitmap = IOBMP_INVALID_OFFSET, - }; - - _set_tssldt_desc( - gdt + TSS_ENTRY, - (unsigned long)tss, - offsetof(struct tss_struct, __cacheline_filler) - 1, - SYS_DESC_tss_avail); - _set_tssldt_desc( - compat_gdt + TSS_ENTRY, - (unsigned long)tss, - offsetof(struct tss_struct, __cacheline_filler) - 1, - SYS_DESC_tss_busy); - - lgdt(&gdtr); - lidt(&idtr); - ltr(TSS_ENTRY << 3); - lldt(0); - - enable_each_ist(idt_tables[cpu]); - - /* - * Bottom-of-stack must be 16-byte aligned! - * - * Defer checks until exception support is sufficiently set up. - */ - BUILD_BUG_ON((sizeof(struct cpu_info) - - offsetof(struct cpu_info, guest_cpu_user_regs.es)) & 0xf); - BUG_ON(system_state != SYS_STATE_early_boot && (stack_bottom & 0xf)); + unsigned int cpu = smp_processor_id(); + unsigned long stack_bottom = get_stack_bottom(), + stack_top = stack_bottom & ~(STACK_SIZE - 1); + + struct tss_struct *tss = &this_cpu(init_tss); + seg_desc_t *gdt = this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY; + seg_desc_t *compat_gdt = + this_cpu(compat_gdt_table) - FIRST_RESERVED_GDT_ENTRY; + + const struct desc_ptr gdtr = { + .base = (unsigned long)gdt, + .limit = LAST_RESERVED_GDT_BYTE, + }; + const struct desc_ptr idtr = { + .base = (unsigned long)idt_tables[cpu], + .limit = (IDT_ENTRIES * sizeof(idt_entry_t)) - 1, + }; + + *tss = (struct tss_struct){ + /* Main stack for interrupts/exceptions. */ + .rsp0 = stack_bottom, + + /* Ring 1 and 2 stacks poisoned. */ + .rsp1 = 0x8600111111111111ul, + .rsp2 = 0x8600111111111111ul, + + /* + * MCE, NMI and Double Fault handlers get their own stacks. + * All others poisoned. + */ + .ist = + { + [IST_MCE - 1] = stack_top + IST_MCE * PAGE_SIZE, + [IST_DF - 1] = stack_top + IST_DF * PAGE_SIZE, + [IST_NMI - 1] = stack_top + IST_NMI * PAGE_SIZE, + [IST_DB - 1] = stack_top + IST_DB * PAGE_SIZE, + + [IST_MAX... ARRAY_SIZE(tss->ist) - 1] = 0x8600111111111111ul, + }, + + .bitmap = IOBMP_INVALID_OFFSET, + }; + + _set_tssldt_desc(gdt + TSS_ENTRY, (unsigned long)tss, + offsetof(struct tss_struct, __cacheline_filler) - 1, + SYS_DESC_tss_avail); + _set_tssldt_desc(compat_gdt + TSS_ENTRY, (unsigned long)tss, + offsetof(struct tss_struct, __cacheline_filler) - 1, + SYS_DESC_tss_busy); + + lgdt(&gdtr); + lidt(&idtr); + ltr(TSS_ENTRY << 3); + lldt(0); + + enable_each_ist(idt_tables[cpu]); + + /* + * Bottom-of-stack must be 16-byte aligned! + * + * Defer checks until exception support is sufficiently set up. + */ + BUILD_BUG_ON((sizeof(struct cpu_info) - + offsetof(struct cpu_info, guest_cpu_user_regs.es)) & + 0xf); + BUG_ON(system_state != SYS_STATE_early_boot && (stack_bottom & 0xf)); } /* @@ -799,38 +807,40 @@ void load_system_tables(void) */ void cpu_init(void) { - int cpu = smp_processor_id(); - - if (cpumask_test_and_set_cpu(cpu, &cpu_initialized)) { - printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); - for (;;) local_irq_enable(); - } - if (opt_cpu_info) - printk("Initializing CPU#%d\n", cpu); - - wrmsrl(MSR_IA32_CR_PAT, XEN_MSR_PAT); - - /* Install correct page table. */ - write_ptbase(current); - - /* Ensure FPU gets initialised for each domain. */ - stts(); - - /* Reset debug registers: */ - write_debugreg(0, 0); - write_debugreg(1, 0); - write_debugreg(2, 0); - write_debugreg(3, 0); - write_debugreg(6, X86_DR6_DEFAULT); - write_debugreg(7, X86_DR7_DEFAULT); - - /* Enable NMIs. Our loader (e.g. Tboot) may have left them disabled. */ - enable_nmis(); + int cpu = smp_processor_id(); + + if ( cpumask_test_and_set_cpu(cpu, &cpu_initialized) ) + { + printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); + for ( ;; ) + local_irq_enable(); + } + if ( opt_cpu_info ) + printk("Initializing CPU#%d\n", cpu); + + wrmsrl(MSR_IA32_CR_PAT, XEN_MSR_PAT); + + /* Install correct page table. */ + write_ptbase(current); + + /* Ensure FPU gets initialised for each domain. */ + stts(); + + /* Reset debug registers: */ + write_debugreg(0, 0); + write_debugreg(1, 0); + write_debugreg(2, 0); + write_debugreg(3, 0); + write_debugreg(6, X86_DR6_DEFAULT); + write_debugreg(7, X86_DR7_DEFAULT); + + /* Enable NMIs. Our loader (e.g. Tboot) may have left them disabled. */ + enable_nmis(); } void cpu_uninit(unsigned int cpu) { - cpumask_clear_cpu(cpu, &cpu_initialized); + cpumask_clear_cpu(cpu, &cpu_initialized); } /* @@ -854,19 +864,20 @@ features are */ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id table[]) { - const struct x86_cpu_id *m; - const struct cpuinfo_x86 *c = &boot_cpu_data; - - for (m = table; m->vendor | m->family | m->model | m->feature; m++) { - if (c->x86_vendor != m->vendor) - continue; - if (c->x86 != m->family) - continue; - if (c->x86_model != m->model) - continue; - if (!cpu_has(c, m->feature)) - continue; - return m; - } - return NULL; + const struct x86_cpu_id *m; + const struct cpuinfo_x86 *c = &boot_cpu_data; + + for ( m = table; m->vendor | m->family | m->model | m->feature; m++ ) + { + if ( c->x86_vendor != m->vendor ) + continue; + if ( c->x86 != m->family ) + continue; + if ( c->x86_model != m->model ) + continue; + if ( !cpu_has(c, m->feature) ) + continue; + return m; + } + return NULL; } diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c index 65fa3d611f..462abfeab6 100644 --- a/xen/arch/x86/cpu/intel.c +++ b/xen/arch/x86/cpu/intel.c @@ -21,21 +21,21 @@ */ static uint64_t __init _probe_mask_msr(unsigned int *msr, uint64_t caps) { - uint64_t val = 0; + uint64_t val = 0; - expected_levelling_cap |= caps; + expected_levelling_cap |= caps; - if (rdmsr_safe(*msr, val) || wrmsr_safe(*msr, val)) - *msr = 0; - else - levelling_caps |= caps; + if ( rdmsr_safe(*msr, val) || wrmsr_safe(*msr, val) ) + *msr = 0; + else + levelling_caps |= caps; - return val; + return val; } /* Indices of the masking MSRs, or 0 if unavailable. */ static unsigned int __read_mostly msr_basic, __read_mostly msr_ext, - __read_mostly msr_xsave; + __read_mostly msr_xsave; /* * Probe for the existance of the expected masking MSRs. They might easily @@ -43,69 +43,68 @@ static unsigned int __read_mostly msr_basic, __read_mostly msr_ext, */ static void __init probe_masking_msrs(void) { - const struct cpuinfo_x86 *c = &boot_cpu_data; - unsigned int exp_msr_basic, exp_msr_ext, exp_msr_xsave; - - /* Only family 6 supports this feature. */ - if (c->x86 != 6) - return; - - switch (c->x86_model) { - case 0x17: /* Yorkfield, Wolfdale, Penryn, Harpertown(DP) */ - case 0x1d: /* Dunnington(MP) */ - msr_basic = MSR_INTEL_MASK_V1_CPUID1; - break; - - case 0x1a: /* Bloomfield, Nehalem-EP(Gainestown) */ - case 0x1e: /* Clarksfield, Lynnfield, Jasper Forest */ - case 0x1f: /* Something Nehalem-based - perhaps Auburndale/Havendale? */ - case 0x25: /* Arrandale, Clarksdale */ - case 0x2c: /* Gulftown, Westmere-EP */ - case 0x2e: /* Nehalem-EX(Beckton) */ - case 0x2f: /* Westmere-EX */ - msr_basic = MSR_INTEL_MASK_V2_CPUID1; - msr_ext = MSR_INTEL_MASK_V2_CPUID80000001; - break; - - case 0x2a: /* SandyBridge */ - case 0x2d: /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP */ - msr_basic = MSR_INTEL_MASK_V3_CPUID1; - msr_ext = MSR_INTEL_MASK_V3_CPUID80000001; - msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01; - break; - } - - exp_msr_basic = msr_basic; - exp_msr_ext = msr_ext; - exp_msr_xsave = msr_xsave; - - if (msr_basic) - cpuidmask_defaults._1cd = _probe_mask_msr(&msr_basic, LCAP_1cd); - - if (msr_ext) - cpuidmask_defaults.e1cd = _probe_mask_msr(&msr_ext, LCAP_e1cd); - - if (msr_xsave) - cpuidmask_defaults.Da1 = _probe_mask_msr(&msr_xsave, LCAP_Da1); - - /* - * Don't bother warning about a mismatch if virtualised. These MSRs - * are not architectural and almost never virtualised. - */ - if ((expected_levelling_cap == levelling_caps) || - cpu_has_hypervisor) - return; - - printk(XENLOG_WARNING "Mismatch between expected (%#x) " - "and real (%#x) levelling caps: missing %#x\n", - expected_levelling_cap, levelling_caps, - (expected_levelling_cap ^ levelling_caps) & levelling_caps); - printk(XENLOG_WARNING "Fam %#x, model %#x expected (%#x/%#x/%#x), " - "got (%#x/%#x/%#x)\n", c->x86, c->x86_model, - exp_msr_basic, exp_msr_ext, exp_msr_xsave, - msr_basic, msr_ext, msr_xsave); - printk(XENLOG_WARNING - "If not running virtualised, please report a bug\n"); + const struct cpuinfo_x86 *c = &boot_cpu_data; + unsigned int exp_msr_basic, exp_msr_ext, exp_msr_xsave; + + /* Only family 6 supports this feature. */ + if ( c->x86 != 6 ) + return; + + switch (c->x86_model) + { + case 0x17: /* Yorkfield, Wolfdale, Penryn, Harpertown(DP) */ + case 0x1d: /* Dunnington(MP) */ + msr_basic = MSR_INTEL_MASK_V1_CPUID1; + break; + + case 0x1a: /* Bloomfield, Nehalem-EP(Gainestown) */ + case 0x1e: /* Clarksfield, Lynnfield, Jasper Forest */ + case 0x1f: /* Something Nehalem-based - perhaps Auburndale/Havendale? */ + case 0x25: /* Arrandale, Clarksdale */ + case 0x2c: /* Gulftown, Westmere-EP */ + case 0x2e: /* Nehalem-EX(Beckton) */ + case 0x2f: /* Westmere-EX */ + msr_basic = MSR_INTEL_MASK_V2_CPUID1; + msr_ext = MSR_INTEL_MASK_V2_CPUID80000001; + break; + + case 0x2a: /* SandyBridge */ + case 0x2d: /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP */ + msr_basic = MSR_INTEL_MASK_V3_CPUID1; + msr_ext = MSR_INTEL_MASK_V3_CPUID80000001; + msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01; + break; + } + + exp_msr_basic = msr_basic; + exp_msr_ext = msr_ext; + exp_msr_xsave = msr_xsave; + + if ( msr_basic ) + cpuidmask_defaults._1cd = _probe_mask_msr(&msr_basic, LCAP_1cd); + + if ( msr_ext ) + cpuidmask_defaults.e1cd = _probe_mask_msr(&msr_ext, LCAP_e1cd); + + if ( msr_xsave ) + cpuidmask_defaults.Da1 = _probe_mask_msr(&msr_xsave, LCAP_Da1); + + /* + * Don't bother warning about a mismatch if virtualised. These MSRs + * are not architectural and almost never virtualised. + */ + if ( (expected_levelling_cap == levelling_caps) || cpu_has_hypervisor ) + return; + + printk(XENLOG_WARNING "Mismatch between expected (%#x) " + "and real (%#x) levelling caps: missing %#x\n", + expected_levelling_cap, levelling_caps, + (expected_levelling_cap ^ levelling_caps) & levelling_caps); + printk(XENLOG_WARNING "Fam %#x, model %#x expected (%#x/%#x/%#x), " + "got (%#x/%#x/%#x)\n", + c->x86, c->x86_model, exp_msr_basic, exp_msr_ext, exp_msr_xsave, + msr_basic, msr_ext, msr_xsave); + printk(XENLOG_WARNING "If not running virtualised, please report a bug\n"); } /* @@ -116,42 +115,44 @@ static void __init probe_masking_msrs(void) */ static void intel_ctxt_switch_masking(const struct vcpu *next) { - struct cpuidmasks *these_masks = &this_cpu(cpuidmasks); - const struct domain *nextd = next ? next->domain : NULL; - const struct cpuidmasks *masks = - (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks) - ? nextd->arch.pv.cpuidmasks : &cpuidmask_defaults; - - if (msr_basic) { - uint64_t val = masks->_1cd; - - /* - * OSXSAVE defaults to 1, which causes fast-forwarding of - * Xen's real setting. Clobber it if disabled by the guest - * kernel. - */ - if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) && - !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE)) - val &= ~(uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE); - - if (unlikely(these_masks->_1cd != val)) { - wrmsrl(msr_basic, val); - these_masks->_1cd = val; - } + struct cpuidmasks *these_masks = &this_cpu(cpuidmasks); + const struct domain *nextd = next ? next->domain : NULL; + const struct cpuidmasks *masks = + (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks) + ? nextd->arch.pv.cpuidmasks + : &cpuidmask_defaults; + + if ( msr_basic ) + { + uint64_t val = masks->_1cd; + + /* + * OSXSAVE defaults to 1, which causes fast-forwarding of + * Xen's real setting. Clobber it if disabled by the guest + * kernel. + */ + if ( next && is_pv_vcpu(next) && !is_idle_vcpu(next) && + !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE) ) + val &= ~(uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE); + + if ( unlikely(these_masks->_1cd != val) ) + { + wrmsrl(msr_basic, val); + these_masks->_1cd = val; } + } -#define LAZY(msr, field) \ - ({ \ - if (unlikely(these_masks->field != masks->field) && \ - (msr)) \ - { \ - wrmsrl((msr), masks->field); \ - these_masks->field = masks->field; \ - } \ - }) +#define LAZY(msr, field) \ + ({ \ + if ( unlikely(these_masks->field != masks->field) && (msr) ) \ + { \ + wrmsrl((msr), masks->field); \ + these_masks->field = masks->field; \ + } \ + }) - LAZY(msr_ext, e1cd); - LAZY(msr_xsave, Da1); + LAZY(msr_ext, e1cd); + LAZY(msr_xsave, Da1); #undef LAZY } @@ -164,102 +165,106 @@ static void intel_ctxt_switch_masking(const struct vcpu *next) */ static void __init noinline intel_init_levelling(void) { - if (probe_cpuid_faulting()) - return; + if ( probe_cpuid_faulting() ) + return; - probe_masking_msrs(); + probe_masking_msrs(); - if (msr_basic) { - uint32_t ecx, edx, tmp; + if ( msr_basic ) + { + uint32_t ecx, edx, tmp; - cpuid(0x00000001, &tmp, &tmp, &ecx, &edx); + cpuid(0x00000001, &tmp, &tmp, &ecx, &edx); - ecx &= opt_cpuid_mask_ecx; - edx &= opt_cpuid_mask_edx; + ecx &= opt_cpuid_mask_ecx; + edx &= opt_cpuid_mask_edx; - /* Fast-forward bits - Must be set. */ - if (ecx & cpufeat_mask(X86_FEATURE_XSAVE)) - ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE); - edx |= cpufeat_mask(X86_FEATURE_APIC); + /* Fast-forward bits - Must be set. */ + if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) ) + ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE); + edx |= cpufeat_mask(X86_FEATURE_APIC); - cpuidmask_defaults._1cd &= ((u64)edx << 32) | ecx; - } + cpuidmask_defaults._1cd &= ((u64)edx << 32) | ecx; + } - if (msr_ext) { - uint32_t ecx, edx, tmp; + if ( msr_ext ) + { + uint32_t ecx, edx, tmp; - cpuid(0x80000001, &tmp, &tmp, &ecx, &edx); + cpuid(0x80000001, &tmp, &tmp, &ecx, &edx); - ecx &= opt_cpuid_mask_ext_ecx; - edx &= opt_cpuid_mask_ext_edx; + ecx &= opt_cpuid_mask_ext_ecx; + edx &= opt_cpuid_mask_ext_edx; - cpuidmask_defaults.e1cd &= ((u64)edx << 32) | ecx; - } + cpuidmask_defaults.e1cd &= ((u64)edx << 32) | ecx; + } - if (msr_xsave) { - uint32_t eax, tmp; + if ( msr_xsave ) + { + uint32_t eax, tmp; - cpuid_count(0x0000000d, 1, &eax, &tmp, &tmp, &tmp); + cpuid_count(0x0000000d, 1, &eax, &tmp, &tmp, &tmp); - eax &= opt_cpuid_mask_xsave_eax; + eax &= opt_cpuid_mask_xsave_eax; - cpuidmask_defaults.Da1 &= (~0ULL << 32) | eax; - } + cpuidmask_defaults.Da1 &= (~0ULL << 32) | eax; + } - if (opt_cpu_info) { - printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps); + if ( opt_cpu_info ) + { + printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps); - if (!cpu_has_cpuid_faulting) - printk(XENLOG_INFO - "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, " - "e1c 0x%08x, Da1 0x%08x\n", - (uint32_t)(cpuidmask_defaults._1cd >> 32), - (uint32_t)cpuidmask_defaults._1cd, - (uint32_t)(cpuidmask_defaults.e1cd >> 32), - (uint32_t)cpuidmask_defaults.e1cd, - (uint32_t)cpuidmask_defaults.Da1); - } + if ( !cpu_has_cpuid_faulting ) + printk(XENLOG_INFO + "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, " + "e1c 0x%08x, Da1 0x%08x\n", + (uint32_t)(cpuidmask_defaults._1cd >> 32), + (uint32_t)cpuidmask_defaults._1cd, + (uint32_t)(cpuidmask_defaults.e1cd >> 32), + (uint32_t)cpuidmask_defaults.e1cd, + (uint32_t)cpuidmask_defaults.Da1); + } - if (levelling_caps) - ctxt_switch_masking = intel_ctxt_switch_masking; + if ( levelling_caps ) + ctxt_switch_masking = intel_ctxt_switch_masking; } static void early_init_intel(struct cpuinfo_x86 *c) { - u64 misc_enable, disable; - - /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ - if (c->x86 == 15 && c->x86_cache_alignment == 64) - c->x86_cache_alignment = 128; - - /* Unmask CPUID levels and NX if masked: */ - rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); - - disable = misc_enable & (MSR_IA32_MISC_ENABLE_LIMIT_CPUID | - MSR_IA32_MISC_ENABLE_XD_DISABLE); - if (disable) { - wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable & ~disable); - bootsym(trampoline_misc_enable_off) |= disable; - } - - if (disable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) - printk(KERN_INFO "revised cpuid level: %d\n", - cpuid_eax(0)); - if (disable & MSR_IA32_MISC_ENABLE_XD_DISABLE) { - write_efer(read_efer() | EFER_NX); - printk(KERN_INFO - "re-enabled NX (Execute Disable) protection\n"); - } - - /* CPUID workaround for Intel 0F33/0F34 CPU */ - if (boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 3 && - (boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4)) - paddr_bits = 36; - - if (c == &boot_cpu_data) - intel_init_levelling(); - - ctxt_switch_levelling(NULL); + u64 misc_enable, disable; + + /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ + if ( c->x86 == 15 && c->x86_cache_alignment == 64 ) + c->x86_cache_alignment = 128; + + /* Unmask CPUID levels and NX if masked: */ + rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + + disable = misc_enable & (MSR_IA32_MISC_ENABLE_LIMIT_CPUID | + MSR_IA32_MISC_ENABLE_XD_DISABLE); + if ( disable ) + { + wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable & ~disable); + bootsym(trampoline_misc_enable_off) |= disable; + } + + if ( disable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID ) + printk(KERN_INFO "revised cpuid level: %d\n", cpuid_eax(0)); + if ( disable & MSR_IA32_MISC_ENABLE_XD_DISABLE ) + { + write_efer(read_efer() | EFER_NX); + printk(KERN_INFO "re-enabled NX (Execute Disable) protection\n"); + } + + /* CPUID workaround for Intel 0F33/0F34 CPU */ + if ( boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 3 && + (boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4) ) + paddr_bits = 36; + + if ( c == &boot_cpu_data ) + intel_init_levelling(); + + ctxt_switch_levelling(NULL); } /* @@ -271,92 +276,94 @@ static void early_init_intel(struct cpuinfo_x86 *c) */ static void Intel_errata_workarounds(struct cpuinfo_x86 *c) { - unsigned long lo, hi; - - if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { - rdmsr (MSR_IA32_MISC_ENABLE, lo, hi); - if ((lo & (1<<9)) == 0) { - printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); - printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); - lo |= (1<<9); /* Disable hw prefetching */ - wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); - } - } - - if (c->x86 == 6 && cpu_has_clflush && - (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) - __set_bit(X86_FEATURE_CLFLUSH_MONITOR, c->x86_capability); -} + unsigned long lo, hi; + + if ( (c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1) ) + { + rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); + if ( (lo & (1 << 9)) == 0 ) + { + printk(KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); + printk(KERN_INFO + "CPU: Disabling hardware prefetching (Errata 037)\n"); + lo |= (1 << 9); /* Disable hw prefetching */ + wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); + } + } + if ( c->x86 == 6 && cpu_has_clflush && + (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47) ) + __set_bit(X86_FEATURE_CLFLUSH_MONITOR, c->x86_capability); +} /* * find out the number of processor cores on the die */ static int num_cpu_cores(struct cpuinfo_x86 *c) { - unsigned int eax, ebx, ecx, edx; + unsigned int eax, ebx, ecx, edx; - if (c->cpuid_level < 4) - return 1; + if ( c->cpuid_level < 4 ) + return 1; - /* Intel has a non-standard dependency on %ecx for this CPUID level. */ - cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); - if (eax & 0x1f) - return ((eax >> 26) + 1); - else - return 1; + /* Intel has a non-standard dependency on %ecx for this CPUID level. */ + cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); + if ( eax & 0x1f ) + return ((eax >> 26) + 1); + else + return 1; } static void init_intel(struct cpuinfo_x86 *c) { - unsigned int l2 = 0; - - /* Detect the extended topology information if available */ - detect_extended_topology(c); - - l2 = init_intel_cacheinfo(c); - if (c->cpuid_level > 9) { - unsigned eax = cpuid_eax(10); - /* Check for version and the number of counters */ - if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) - __set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability); - } - - if ( !cpu_has(c, X86_FEATURE_XTOPOLOGY) ) - { - c->x86_max_cores = num_cpu_cores(c); - detect_ht(c); - } - - /* Work around errata */ - Intel_errata_workarounds(c); - - if ((c->x86 == 0xf && c->x86_model >= 0x03) || - (c->x86 == 0x6 && c->x86_model >= 0x0e)) - __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); - if (cpu_has(c, X86_FEATURE_ITSC)) { - __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); - __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability); - __set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability); - } - if ( opt_arat && - ( c->cpuid_level >= 0x00000006 ) && - ( cpuid_eax(0x00000006) & (1u<<2) ) ) - __set_bit(X86_FEATURE_ARAT, c->x86_capability); + unsigned int l2 = 0; + + /* Detect the extended topology information if available */ + detect_extended_topology(c); + + l2 = init_intel_cacheinfo(c); + if ( c->cpuid_level > 9 ) + { + unsigned eax = cpuid_eax(10); + /* Check for version and the number of counters */ + if ( (eax & 0xff) && (((eax >> 8) & 0xff) > 1) ) + __set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability); + } + + if ( !cpu_has(c, X86_FEATURE_XTOPOLOGY) ) + { + c->x86_max_cores = num_cpu_cores(c); + detect_ht(c); + } + + /* Work around errata */ + Intel_errata_workarounds(c); + + if ( (c->x86 == 0xf && c->x86_model >= 0x03) || + (c->x86 == 0x6 && c->x86_model >= 0x0e) ) + __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); + if ( cpu_has(c, X86_FEATURE_ITSC) ) + { + __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); + __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability); + __set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability); + } + if ( opt_arat && (c->cpuid_level >= 0x00000006) && + (cpuid_eax(0x00000006) & (1u << 2)) ) + __set_bit(X86_FEATURE_ARAT, c->x86_capability); } static const struct cpu_dev intel_cpu_dev = { - .c_vendor = "Intel", - .c_ident = { "GenuineIntel" }, - .c_early_init = early_init_intel, - .c_init = init_intel, + .c_vendor = "Intel", + .c_ident = {"GenuineIntel"}, + .c_early_init = early_init_intel, + .c_init = init_intel, }; int __init intel_cpu_init(void) { - cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev; - return 0; + cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev; + return 0; } // arch_initcall(intel_cpu_init); - diff --git a/xen/arch/x86/cpu/intel_cacheinfo.c b/xen/arch/x86/cpu/intel_cacheinfo.c index 88b61fddfe..34997cf91e 100644 --- a/xen/arch/x86/cpu/intel_cacheinfo.c +++ b/xen/arch/x86/cpu/intel_cacheinfo.c @@ -12,256 +12,276 @@ #include #include -#define LVL_1_INST 1 -#define LVL_1_DATA 2 -#define LVL_2 3 -#define LVL_3 4 -#define LVL_TRACE 5 +#define LVL_1_INST 1 +#define LVL_1_DATA 2 +#define LVL_2 3 +#define LVL_3 4 +#define LVL_TRACE 5 struct _cache_table { - unsigned char descriptor; - char cache_type; - short size; + unsigned char descriptor; + char cache_type; + short size; }; -/* all the cache descriptor types we care about (no TLB or trace cache entries) */ -static const struct _cache_table cache_table[] = -{ - { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ - { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ - { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ - { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ - { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ - { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ - { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */ - { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */ - { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */ - { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ - { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ - { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ - { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */ - { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */ - { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */ - { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */ - { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */ - { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */ - { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */ - { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */ - { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */ - { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */ - { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ - { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ - { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */ - { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */ - { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */ - { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ - { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ - { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ - { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */ - { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ - { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ - { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ - { 0x00, 0, 0} -}; +/* all the cache descriptor types we care about (no TLB or trace cache entries) + */ +static const struct _cache_table cache_table[] = { + {0x06, LVL_1_INST, 8}, /* 4-way set assoc, 32 byte line size */ + {0x08, LVL_1_INST, 16}, /* 4-way set assoc, 32 byte line size */ + {0x0a, LVL_1_DATA, 8}, /* 2 way set assoc, 32 byte line size */ + {0x0c, LVL_1_DATA, 16}, /* 4-way set assoc, 32 byte line size */ + {0x22, LVL_3, 512}, /* 4-way set assoc, sectored cache, 64 byte line size */ + {0x23, LVL_3, + 1024}, /* 8-way set assoc, sectored cache, 64 byte line size */ + {0x25, LVL_3, + 2048}, /* 8-way set assoc, sectored cache, 64 byte line size */ + {0x29, LVL_3, + 4096}, /* 8-way set assoc, sectored cache, 64 byte line size */ + {0x2c, LVL_1_DATA, 32}, /* 8-way set assoc, 64 byte line size */ + {0x30, LVL_1_INST, 32}, /* 8-way set assoc, 64 byte line size */ + {0x39, LVL_2, 128}, /* 4-way set assoc, sectored cache, 64 byte line size */ + {0x3a, LVL_2, 192}, /* 6-way set assoc, sectored cache, 64 byte line size */ + {0x3b, LVL_2, 128}, /* 2-way set assoc, sectored cache, 64 byte line size */ + {0x3c, LVL_2, 256}, /* 4-way set assoc, sectored cache, 64 byte line size */ + {0x3d, LVL_2, 384}, /* 6-way set assoc, sectored cache, 64 byte line size */ + {0x3e, LVL_2, 512}, /* 4-way set assoc, sectored cache, 64 byte line size */ + {0x41, LVL_2, 128}, /* 4-way set assoc, 32 byte line size */ + {0x42, LVL_2, 256}, /* 4-way set assoc, 32 byte line size */ + {0x43, LVL_2, 512}, /* 4-way set assoc, 32 byte line size */ + {0x44, LVL_2, 1024}, /* 4-way set assoc, 32 byte line size */ + {0x45, LVL_2, 2048}, /* 4-way set assoc, 32 byte line size */ + {0x46, LVL_3, 4096}, /* 4-way set assoc, 64 byte line size */ + {0x47, LVL_3, 8192}, /* 8-way set assoc, 64 byte line size */ + {0x49, LVL_3, 4096}, /* 16-way set assoc, 64 byte line size */ + {0x4a, LVL_3, 6144}, /* 12-way set assoc, 64 byte line size */ + {0x4b, LVL_3, 8192}, /* 16-way set assoc, 64 byte line size */ + {0x4c, LVL_3, 12288}, /* 12-way set assoc, 64 byte line size */ + {0x4d, LVL_3, 16384}, /* 16-way set assoc, 64 byte line size */ + {0x60, LVL_1_DATA, + 16}, /* 8-way set assoc, sectored cache, 64 byte line size */ + {0x66, LVL_1_DATA, + 8}, /* 4-way set assoc, sectored cache, 64 byte line size */ + {0x67, LVL_1_DATA, + 16}, /* 4-way set assoc, sectored cache, 64 byte line size */ + {0x68, LVL_1_DATA, + 32}, /* 4-way set assoc, sectored cache, 64 byte line size */ + {0x70, LVL_TRACE, 12}, /* 8-way set assoc */ + {0x71, LVL_TRACE, 16}, /* 8-way set assoc */ + {0x72, LVL_TRACE, 32}, /* 8-way set assoc */ + {0x73, LVL_TRACE, 64}, /* 8-way set assoc */ + {0x78, LVL_2, 1024}, /* 4-way set assoc, 64 byte line size */ + {0x79, LVL_2, 128}, /* 8-way set assoc, sectored cache, 64 byte line size */ + {0x7a, LVL_2, 256}, /* 8-way set assoc, sectored cache, 64 byte line size */ + {0x7b, LVL_2, 512}, /* 8-way set assoc, sectored cache, 64 byte line size */ + {0x7c, LVL_2, + 1024}, /* 8-way set assoc, sectored cache, 64 byte line size */ + {0x7d, LVL_2, 2048}, /* 8-way set assoc, 64 byte line size */ + {0x7f, LVL_2, 512}, /* 2-way set assoc, 64 byte line size */ + {0x82, LVL_2, 256}, /* 8-way set assoc, 32 byte line size */ + {0x83, LVL_2, 512}, /* 8-way set assoc, 32 byte line size */ + {0x84, LVL_2, 1024}, /* 8-way set assoc, 32 byte line size */ + {0x85, LVL_2, 2048}, /* 8-way set assoc, 32 byte line size */ + {0x86, LVL_2, 512}, /* 4-way set assoc, 64 byte line size */ + {0x87, LVL_2, 1024}, /* 8-way set assoc, 64 byte line size */ + {0x00, 0, 0}}; int cpuid4_cache_lookup(int index, struct cpuid4_info *this_leaf) { - union _cpuid4_leaf_eax eax; - union _cpuid4_leaf_ebx ebx; - union _cpuid4_leaf_ecx ecx; - unsigned edx; - - cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); - if (eax.split.type == CACHE_TYPE_NULL) - return -EIO; /* better error ? */ - - this_leaf->eax = eax; - this_leaf->ebx = ebx; - this_leaf->ecx = ecx; - this_leaf->size = (ecx.split.number_of_sets + 1) * - (ebx.split.coherency_line_size + 1) * - (ebx.split.physical_line_partition + 1) * - (ebx.split.ways_of_associativity + 1); - return 0; + union _cpuid4_leaf_eax eax; + union _cpuid4_leaf_ebx ebx; + union _cpuid4_leaf_ecx ecx; + unsigned edx; + + cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); + if ( eax.split.type == CACHE_TYPE_NULL ) + return -EIO; /* better error ? */ + + this_leaf->eax = eax; + this_leaf->ebx = ebx; + this_leaf->ecx = ecx; + this_leaf->size = (ecx.split.number_of_sets + 1) * + (ebx.split.coherency_line_size + 1) * + (ebx.split.physical_line_partition + 1) * + (ebx.split.ways_of_associativity + 1); + return 0; } static int find_num_cache_leaves(void) { - unsigned int eax, ebx, ecx, edx; - union _cpuid4_leaf_eax cache_eax; - int i = -1; - - do { - ++i; - /* Do cpuid(4) loop to find out num_cache_leaves */ - cpuid_count(4, i, &eax, &ebx, &ecx, &edx); - cache_eax.full = eax; - } while (cache_eax.split.type != CACHE_TYPE_NULL); - return i; + unsigned int eax, ebx, ecx, edx; + union _cpuid4_leaf_eax cache_eax; + int i = -1; + + do { + ++i; + /* Do cpuid(4) loop to find out num_cache_leaves */ + cpuid_count(4, i, &eax, &ebx, &ecx, &edx); + cache_eax.full = eax; + } while ( cache_eax.split.type != CACHE_TYPE_NULL ); + return i; } unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) { - unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ - unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ - unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ - static unsigned int num_cache_leaves; - - if (c->cpuid_level > 3) { - static int is_initialized; - - if (is_initialized == 0) { - /* Init num_cache_leaves from boot CPU */ - num_cache_leaves = find_num_cache_leaves(); - is_initialized++; - } - - /* - * Whenever possible use cpuid(4), deterministic cache - * parameters cpuid leaf to find the cache details - */ - for (i = 0; i < num_cache_leaves; i++) { - struct cpuid4_info this_leaf; - - int retval; - - retval = cpuid4_cache_lookup(i, &this_leaf); - if (retval >= 0) { - switch(this_leaf.eax.split.level) { - case 1: - if (this_leaf.eax.split.type == - CACHE_TYPE_DATA) - new_l1d = this_leaf.size/1024; - else if (this_leaf.eax.split.type == - CACHE_TYPE_INST) - new_l1i = this_leaf.size/1024; - break; - case 2: - new_l2 = this_leaf.size/1024; - break; - case 3: - new_l3 = this_leaf.size/1024; - break; - default: - break; - } - } - } - } - /* - * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for - * trace cache - */ - if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1 && - c->x86_vendor != X86_VENDOR_SHANGHAI) - { - /* supports eax=2 call */ - int i, j, n; - int regs[4]; - unsigned char *dp = (unsigned char *)regs; - int only_trace = 0; - - if (num_cache_leaves != 0 && c->x86 == 15) - only_trace = 1; - - /* Number of times to iterate */ - n = cpuid_eax(2) & 0xFF; - - for ( i = 0 ; i < n ; i++ ) { - cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); - - /* If bit 31 is set, this is an unknown format */ - for ( j = 0 ; j < 3 ; j++ ) { - if ( regs[j] < 0 ) regs[j] = 0; - } - - /* Byte 0 is level count, not a descriptor */ - for ( j = 1 ; j < 16 ; j++ ) { - unsigned char des = dp[j]; - unsigned char k = 0; - - /* look up this descriptor in the table */ - while (cache_table[k].descriptor != 0) - { - if (cache_table[k].descriptor == des) { - if (only_trace && cache_table[k].cache_type != LVL_TRACE) - break; - switch (cache_table[k].cache_type) { - case LVL_1_INST: - l1i += cache_table[k].size; - break; - case LVL_1_DATA: - l1d += cache_table[k].size; - break; - case LVL_2: - l2 += cache_table[k].size; - break; - case LVL_3: - l3 += cache_table[k].size; - break; - case LVL_TRACE: - trace += cache_table[k].size; - break; - } - - break; - } - - k++; - } - } - } - } - - if (new_l1d) - l1d = new_l1d; - - if (new_l1i) - l1i = new_l1i; - - if (new_l2) { - l2 = new_l2; - } - - if (new_l3) { - l3 = new_l3; - } - - if (opt_cpu_info) { - if (trace) - printk("CPU: Trace cache: %dK uops", trace); - else if ( l1i ) - printk("CPU: L1 I cache: %dK", l1i); - - if (l1d) - printk(", L1 D cache: %dK\n", l1d); - else - printk("\n"); - - if (l2) - printk("CPU: L2 cache: %dK\n", l2); - - if (l3) - printk("CPU: L3 cache: %dK\n", l3); - } - - c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); - - return l2; + unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ + unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ + unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ + static unsigned int num_cache_leaves; + + if ( c->cpuid_level > 3 ) + { + static int is_initialized; + + if ( is_initialized == 0 ) + { + /* Init num_cache_leaves from boot CPU */ + num_cache_leaves = find_num_cache_leaves(); + is_initialized++; + } + + /* + * Whenever possible use cpuid(4), deterministic cache + * parameters cpuid leaf to find the cache details + */ + for ( i = 0; i < num_cache_leaves; i++ ) + { + struct cpuid4_info this_leaf; + + int retval; + + retval = cpuid4_cache_lookup(i, &this_leaf); + if ( retval >= 0 ) + { + switch (this_leaf.eax.split.level) + { + case 1: + if ( this_leaf.eax.split.type == CACHE_TYPE_DATA ) + new_l1d = this_leaf.size / 1024; + else if ( this_leaf.eax.split.type == CACHE_TYPE_INST ) + new_l1i = this_leaf.size / 1024; + break; + case 2: + new_l2 = this_leaf.size / 1024; + break; + case 3: + new_l3 = this_leaf.size / 1024; + break; + default: + break; + } + } + } + } + /* + * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for + * trace cache + */ + if ( (num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1 && + c->x86_vendor != X86_VENDOR_SHANGHAI ) + { + /* supports eax=2 call */ + int i, j, n; + int regs[4]; + unsigned char *dp = (unsigned char *)regs; + int only_trace = 0; + + if ( num_cache_leaves != 0 && c->x86 == 15 ) + only_trace = 1; + + /* Number of times to iterate */ + n = cpuid_eax(2) & 0xFF; + + for ( i = 0; i < n; i++ ) + { + cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); + + /* If bit 31 is set, this is an unknown format */ + for ( j = 0; j < 3; j++ ) + { + if ( regs[j] < 0 ) + regs[j] = 0; + } + + /* Byte 0 is level count, not a descriptor */ + for ( j = 1; j < 16; j++ ) + { + unsigned char des = dp[j]; + unsigned char k = 0; + + /* look up this descriptor in the table */ + while ( cache_table[k].descriptor != 0 ) + { + if ( cache_table[k].descriptor == des ) + { + if ( only_trace && + cache_table[k].cache_type != LVL_TRACE ) + break; + switch (cache_table[k].cache_type) + { + case LVL_1_INST: + l1i += cache_table[k].size; + break; + case LVL_1_DATA: + l1d += cache_table[k].size; + break; + case LVL_2: + l2 += cache_table[k].size; + break; + case LVL_3: + l3 += cache_table[k].size; + break; + case LVL_TRACE: + trace += cache_table[k].size; + break; + } + + break; + } + + k++; + } + } + } + } + + if ( new_l1d ) + l1d = new_l1d; + + if ( new_l1i ) + l1i = new_l1i; + + if ( new_l2 ) + { + l2 = new_l2; + } + + if ( new_l3 ) + { + l3 = new_l3; + } + + if ( opt_cpu_info ) + { + if ( trace ) + printk("CPU: Trace cache: %dK uops", trace); + else if ( l1i ) + printk("CPU: L1 I cache: %dK", l1i); + + if ( l1d ) + printk(", L1 D cache: %dK\n", l1d); + else + printk("\n"); + + if ( l2 ) + printk("CPU: L2 cache: %dK\n", l2); + + if ( l3 ) + printk("CPU: L3 cache: %dK\n", l3); + } + + c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i + l1d)); + + return l2; } diff --git a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c index 222f539b1e..6746aea6b0 100644 --- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c +++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c @@ -16,7 +16,6 @@ * along with this program; If not, see . */ - /* K8 common MCA documentation published at * * AMD64 Architecture Programmer's Manual Volume 2: @@ -67,8 +66,8 @@ static struct timer mce_timer; #define MCE_PERIOD MILLISECS(10000) -#define MCE_MIN MILLISECS(2000) -#define MCE_MAX MILLISECS(30000) +#define MCE_MIN MILLISECS(2000) +#define MCE_MAX MILLISECS(30000) static s_time_t period = MCE_PERIOD; static int hw_threshold = 0; @@ -81,43 +80,50 @@ static int variable_period = 1; */ static void mce_amd_checkregs(void *info) { - mctelem_cookie_t mctc; - struct mca_summary bs; - - mctc = mcheck_mca_logout(MCA_POLLER, mca_allbanks, &bs, NULL); - - if (bs.errcnt && mctc != NULL) { - static uint64_t dumpcount = 0; - - /* If Dom0 enabled the VIRQ_MCA event, then notify it. - * Otherwise, if dom0 has had plenty of time to register - * the virq handler but still hasn't then dump telemetry - * to the Xen console. The call count may be incremented - * on multiple cpus at once and is indicative only - just - * a simple-minded attempt to avoid spamming the console - * for corrected errors in early startup. */ - - if (dom0_vmce_enabled()) { - mctelem_commit(mctc); - send_global_virq(VIRQ_MCA); - } else if (++dumpcount >= 10) { - x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc)); - mctelem_dismiss(mctc); - } else { - mctelem_dismiss(mctc); - } - - } else if (mctc != NULL) { - mctelem_dismiss(mctc); - } - - /* adjust is global and all cpus may attempt to increment it without - * synchronisation, so they race and the final adjust count - * (number of cpus seeing any error) is approximate. We can - * guarantee that if any cpu observes an error that the - * adjust count is at least 1. */ - if (bs.errcnt) - adjust++; + mctelem_cookie_t mctc; + struct mca_summary bs; + + mctc = mcheck_mca_logout(MCA_POLLER, mca_allbanks, &bs, NULL); + + if ( bs.errcnt && mctc != NULL ) + { + static uint64_t dumpcount = 0; + + /* If Dom0 enabled the VIRQ_MCA event, then notify it. + * Otherwise, if dom0 has had plenty of time to register + * the virq handler but still hasn't then dump telemetry + * to the Xen console. The call count may be incremented + * on multiple cpus at once and is indicative only - just + * a simple-minded attempt to avoid spamming the console + * for corrected errors in early startup. */ + + if ( dom0_vmce_enabled() ) + { + mctelem_commit(mctc); + send_global_virq(VIRQ_MCA); + } + else if ( ++dumpcount >= 10 ) + { + x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc)); + mctelem_dismiss(mctc); + } + else + { + mctelem_dismiss(mctc); + } + } + else if ( mctc != NULL ) + { + mctelem_dismiss(mctc); + } + + /* adjust is global and all cpus may attempt to increment it without + * synchronisation, so they race and the final adjust count + * (number of cpus seeing any error) is approximate. We can + * guarantee that if any cpu observes an error that the + * adjust count is at least 1. */ + if ( bs.errcnt ) + adjust++; } /* polling service routine invoker: @@ -129,118 +135,135 @@ static void mce_amd_checkregs(void *info) */ static void mce_amd_work_fn(void *data) { - on_each_cpu(mce_amd_checkregs, data, 1); - - if (adjust > 0) { - if (!dom0_vmce_enabled()) { - /* Dom0 did not enable VIRQ_MCA, so Xen is reporting. */ - printk("MCE: polling routine found correctable error. " - " Use mcelog to parse above error output.\n"); - } - } - - if (hw_threshold) { - uint64_t value; - uint32_t counter; - - value = mca_rdmsr(MSR_IA32_MCx_MISC(4)); - /* Only the error counter field is of interest - * Bit field is described in AMD K8 BKDG chapter 6.4.5.5 - */ - counter = (value & 0xFFF00000000ULL) >> 32U; - - /* HW does not count *all* kinds of correctable errors. - * Thus it is possible, that the polling routine finds an - * correctable error even if the HW reports nothing. */ - if (counter > 0) { - /* HW reported correctable errors, - * the polling routine did not find... - */ - if (adjust == 0) { - printk("CPU counter reports %"PRIu32 - " correctable hardware error%s that %s" - " not reported by the status MSRs\n", - counter, - (counter == 1 ? "" : "s"), - (counter == 1 ? "was" : "were")); - } - /* subtract 1 to not double count the error - * from the polling service routine */ - adjust += (counter - 1); - - /* Restart counter */ - /* No interrupt, reset counter value */ - value &= ~(0x60FFF00000000ULL); - /* Counter enable */ - value |= (1ULL << 51); - mca_wrmsr(MSR_IA32_MCx_MISC(4), value); - } - } - - if (variable_period && adjust > 0) { - /* Increase polling frequency */ - adjust++; /* adjust == 1 must have an effect */ - period /= adjust; - } else if (variable_period) { - /* Decrease polling frequency */ - period *= 2; - } - if (variable_period && period > MCE_MAX) { - /* limit: Poll at least every 30s */ - period = MCE_MAX; - } - if (variable_period && period < MCE_MIN) { - /* limit: Poll every 2s. - * When this is reached an uncorrectable error - * is expected to happen, if Dom0 does nothing. - */ - period = MCE_MIN; - } - - set_timer(&mce_timer, NOW() + period); - adjust = 0; + on_each_cpu(mce_amd_checkregs, data, 1); + + if ( adjust > 0 ) + { + if ( !dom0_vmce_enabled() ) + { + /* Dom0 did not enable VIRQ_MCA, so Xen is reporting. */ + printk("MCE: polling routine found correctable error. " + " Use mcelog to parse above error output.\n"); + } + } + + if ( hw_threshold ) + { + uint64_t value; + uint32_t counter; + + value = mca_rdmsr(MSR_IA32_MCx_MISC(4)); + /* Only the error counter field is of interest + * Bit field is described in AMD K8 BKDG chapter 6.4.5.5 + */ + counter = (value & 0xFFF00000000ULL) >> 32U; + + /* HW does not count *all* kinds of correctable errors. + * Thus it is possible, that the polling routine finds an + * correctable error even if the HW reports nothing. */ + if ( counter > 0 ) + { + /* HW reported correctable errors, + * the polling routine did not find... + */ + if ( adjust == 0 ) + { + printk("CPU counter reports %" PRIu32 + " correctable hardware error%s that %s" + " not reported by the status MSRs\n", + counter, (counter == 1 ? "" : "s"), + (counter == 1 ? "was" : "were")); + } + /* subtract 1 to not double count the error + * from the polling service routine */ + adjust += (counter - 1); + + /* Restart counter */ + /* No interrupt, reset counter value */ + value &= ~(0x60FFF00000000ULL); + /* Counter enable */ + value |= (1ULL << 51); + mca_wrmsr(MSR_IA32_MCx_MISC(4), value); + } + } + + if ( variable_period && adjust > 0 ) + { + /* Increase polling frequency */ + adjust++; /* adjust == 1 must have an effect */ + period /= adjust; + } + else if ( variable_period ) + { + /* Decrease polling frequency */ + period *= 2; + } + if ( variable_period && period > MCE_MAX ) + { + /* limit: Poll at least every 30s */ + period = MCE_MAX; + } + if ( variable_period && period < MCE_MIN ) + { + /* limit: Poll every 2s. + * When this is reached an uncorrectable error + * is expected to happen, if Dom0 does nothing. + */ + period = MCE_MIN; + } + + set_timer(&mce_timer, NOW() + period); + adjust = 0; } void __init amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c) { - if (c->x86_vendor != X86_VENDOR_AMD) - return; - - /* Assume we are on K8 or newer AMD CPU here */ - - /* The threshold bitfields in MSR_IA32_MC4_MISC has - * been introduced along with the SVME feature bit. */ - if (variable_period && cpu_has(c, X86_FEATURE_SVM)) { - uint64_t value; - - /* hw threshold registers present */ - hw_threshold = 1; - rdmsrl(MSR_IA32_MCx_MISC(4), value); - - if (value & (1ULL << 61)) { /* Locked bit */ - /* Locked by BIOS. Not available for use */ - hw_threshold = 0; - } - if (!(value & (1ULL << 63))) { /* Valid bit */ - /* No CtrP present */ - hw_threshold = 0; - } else { - if (!(value & (1ULL << 62))) { /* Counter Bit */ - /* No counter field present */ - hw_threshold = 0; - } - } - - if (hw_threshold) { - /* No interrupt, reset counter value */ - value &= ~(0x60FFF00000000ULL); - /* Counter enable */ - value |= (1ULL << 51); - wrmsrl(MSR_IA32_MCx_MISC(4), value); - printk(XENLOG_INFO "MCA: Use hw thresholding to adjust polling frequency\n"); - } - } - - init_timer(&mce_timer, mce_amd_work_fn, NULL, 0); - set_timer(&mce_timer, NOW() + period); + if ( c->x86_vendor != X86_VENDOR_AMD ) + return; + + /* Assume we are on K8 or newer AMD CPU here */ + + /* The threshold bitfields in MSR_IA32_MC4_MISC has + * been introduced along with the SVME feature bit. */ + if ( variable_period && cpu_has(c, X86_FEATURE_SVM) ) + { + uint64_t value; + + /* hw threshold registers present */ + hw_threshold = 1; + rdmsrl(MSR_IA32_MCx_MISC(4), value); + + if ( value & (1ULL << 61) ) + { /* Locked bit */ + /* Locked by BIOS. Not available for use */ + hw_threshold = 0; + } + if ( !(value & (1ULL << 63)) ) + { /* Valid bit */ + /* No CtrP present */ + hw_threshold = 0; + } + else + { + if ( !(value & (1ULL << 62)) ) + { /* Counter Bit */ + /* No counter field present */ + hw_threshold = 0; + } + } + + if ( hw_threshold ) + { + /* No interrupt, reset counter value */ + value &= ~(0x60FFF00000000ULL); + /* Counter enable */ + value |= (1ULL << 51); + wrmsrl(MSR_IA32_MCx_MISC(4), value); + printk(XENLOG_INFO + "MCA: Use hw thresholding to adjust polling frequency\n"); + } + } + + init_timer(&mce_timer, mce_amd_work_fn, NULL, 0); + set_timer(&mce_timer, NOW() + period); } diff --git a/xen/arch/x86/cpu/mcheck/barrier.c b/xen/arch/x86/cpu/mcheck/barrier.c index a7e5b19a44..7b5253cbf2 100644 --- a/xen/arch/x86/cpu/mcheck/barrier.c +++ b/xen/arch/x86/cpu/mcheck/barrier.c @@ -29,8 +29,8 @@ void mce_barrier_enter(struct mce_softirq_barrier *bar, bool wait) while ( atomic_read(&bar->val) != num_online_cpus() && atomic_read(&bar->outgen) == gen ) { - smp_mb(); - mce_panic_check(); + smp_mb(); + mce_panic_check(); } } @@ -44,11 +44,10 @@ void mce_barrier_exit(struct mce_softirq_barrier *bar, bool wait) gen = atomic_read(&bar->ingen); smp_mb(); atomic_dec(&bar->val); - while ( atomic_read(&bar->val) != 0 && - atomic_read(&bar->ingen) == gen ) + while ( atomic_read(&bar->val) != 0 && atomic_read(&bar->ingen) == gen ) { - smp_mb(); - mce_panic_check(); + smp_mb(); + mce_panic_check(); } } diff --git a/xen/arch/x86/cpu/mcheck/mcaction.c b/xen/arch/x86/cpu/mcheck/mcaction.c index e42267414e..c69ebe8708 100644 --- a/xen/arch/x86/cpu/mcheck/mcaction.c +++ b/xen/arch/x86/cpu/mcheck/mcaction.c @@ -4,9 +4,10 @@ #include "vmce.h" #include "mce.h" -static struct mcinfo_recovery * -mci_action_add_pageoffline(int bank, struct mc_info *mi, - uint64_t mfn, uint32_t status) +static struct mcinfo_recovery *mci_action_add_pageoffline(int bank, + struct mc_info *mi, + uint64_t mfn, + uint32_t status) { struct mcinfo_recovery *rec; @@ -34,10 +35,8 @@ void mce_register_addrcheck(mce_check_addr_t cbfunc) mc_check_addr = cbfunc; } -void -mc_memerr_dhandler(struct mca_binfo *binfo, - enum mce_result *result, - const struct cpu_user_regs *regs) +void mc_memerr_dhandler(struct mca_binfo *binfo, enum mce_result *result, + const struct cpu_user_regs *regs) { struct mcinfo_bank *bank = binfo->mib; struct mcinfo_global *global = binfo->mig; @@ -57,8 +56,8 @@ mc_memerr_dhandler(struct mca_binfo *binfo, mfn = bank->mc_addr >> PAGE_SHIFT; if ( offline_page(mfn, 1, &status) ) { - dprintk(XENLOG_WARNING, - "Failed to offline page %lx for MCE error\n", mfn); + dprintk(XENLOG_WARNING, "Failed to offline page %lx for MCE error\n", + mfn); return; } @@ -75,14 +74,16 @@ mc_memerr_dhandler(struct mca_binfo *binfo, if ( status & PG_OFFLINE_OWNED ) { bank->mc_domid = status >> PG_OFFLINE_OWNER_SHIFT; - mce_printk(MCE_QUIET, "MCE: This error page is ownded" - " by DOM %d\n", bank->mc_domid); + mce_printk(MCE_QUIET, + "MCE: This error page is ownded" + " by DOM %d\n", + bank->mc_domid); /* * XXX: Cannot handle shared pages yet * (this should identify all domains and gfn mapping to * the mfn in question) */ - BUG_ON( bank->mc_domid == DOMID_COW ); + BUG_ON(bank->mc_domid == DOMID_COW); if ( bank->mc_domid != DOMID_XEN ) { d = get_domain_by_id(bank->mc_domid); @@ -91,8 +92,8 @@ mc_memerr_dhandler(struct mca_binfo *binfo, if ( unmmap_broken_page(d, _mfn(mfn), gfn) ) { - printk("Unmap broken memory %lx for DOM%d failed\n", - mfn, d->domain_id); + printk("Unmap broken memory %lx for DOM%d failed\n", mfn, + d->domain_id); goto vmce_failed; } @@ -113,20 +114,24 @@ mc_memerr_dhandler(struct mca_binfo *binfo, else vmce_vcpuid = mc_vcpuid; - bank->mc_addr = gfn << PAGE_SHIFT | - (bank->mc_addr & (PAGE_SIZE - 1)); + bank->mc_addr = + gfn << PAGE_SHIFT | (bank->mc_addr & (PAGE_SIZE - 1)); if ( fill_vmsr_data(bank, d, global->mc_gstatus, vmce_vcpuid) ) { - mce_printk(MCE_QUIET, "Fill vMCE# data for DOM%d " - "failed\n", bank->mc_domid); + mce_printk(MCE_QUIET, + "Fill vMCE# data for DOM%d " + "failed\n", + bank->mc_domid); goto vmce_failed; } /* We will inject vMCE to DOMU */ if ( inject_vmce(d, vmce_vcpuid) < 0 ) { - mce_printk(MCE_QUIET, "inject vMCE to DOM%d" - " failed\n", d->domain_id); + mce_printk(MCE_QUIET, + "inject vMCE to DOM%d" + " failed\n", + d->domain_id); goto vmce_failed; } @@ -140,7 +145,7 @@ mc_memerr_dhandler(struct mca_binfo *binfo, put_domain(d); return; -vmce_failed: + vmce_failed: put_domain(d); domain_crash(d); } diff --git a/xen/arch/x86/cpu/mcheck/mce-apei.c b/xen/arch/x86/cpu/mcheck/mce-apei.c index 53b6735896..56cae3cce9 100644 --- a/xen/arch/x86/cpu/mcheck/mce-apei.c +++ b/xen/arch/x86/cpu/mcheck/mce-apei.c @@ -36,94 +36,96 @@ #include "mce.h" -#define CPER_CREATOR_MCE \ - UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \ - 0x64, 0x90, 0xb8, 0x9d) -#define CPER_SECTION_TYPE_MCE \ - UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \ - 0x04, 0x4a, 0x38, 0xfc) +#define CPER_CREATOR_MCE \ + UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, 0x64, 0x90, \ + 0xb8, 0x9d) +#define CPER_SECTION_TYPE_MCE \ + UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, 0x04, 0x4a, \ + 0x38, 0xfc) /* * CPER specification (in UEFI specification 2.3 appendix N) requires * byte-packed. */ -struct __packed cper_mce_record { - struct cper_record_header hdr; - struct cper_section_descriptor sec_hdr; - struct mce mce; +struct __packed cper_mce_record +{ + struct cper_record_header hdr; + struct cper_section_descriptor sec_hdr; + struct mce mce; }; int apei_write_mce(struct mce *m) { - struct cper_mce_record rcd; - - if (!m) - return -EINVAL; - - memset(&rcd, 0, sizeof(rcd)); - memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE); - rcd.hdr.revision = CPER_RECORD_REV; - rcd.hdr.signature_end = CPER_SIG_END; - rcd.hdr.section_count = 1; - rcd.hdr.error_severity = CPER_SER_FATAL; - /* timestamp, platform_id, partition_id are all invalid */ - rcd.hdr.validation_bits = 0; - rcd.hdr.record_length = sizeof(rcd); - rcd.hdr.creator_id = CPER_CREATOR_MCE; - rcd.hdr.notification_type = CPER_NOTIFY_MCE; - rcd.hdr.record_id = cper_next_record_id(); - rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR; - - rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd; - rcd.sec_hdr.section_length = sizeof(rcd.mce); - rcd.sec_hdr.revision = CPER_SEC_REV; - /* fru_id and fru_text is invalid */ - rcd.sec_hdr.validation_bits = 0; - rcd.sec_hdr.flags = CPER_SEC_PRIMARY; - rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE; - rcd.sec_hdr.section_severity = CPER_SER_FATAL; - - memcpy(&rcd.mce, m, sizeof(*m)); - - return erst_write(&rcd.hdr); + struct cper_mce_record rcd; + + if ( !m ) + return -EINVAL; + + memset(&rcd, 0, sizeof(rcd)); + memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE); + rcd.hdr.revision = CPER_RECORD_REV; + rcd.hdr.signature_end = CPER_SIG_END; + rcd.hdr.section_count = 1; + rcd.hdr.error_severity = CPER_SER_FATAL; + /* timestamp, platform_id, partition_id are all invalid */ + rcd.hdr.validation_bits = 0; + rcd.hdr.record_length = sizeof(rcd); + rcd.hdr.creator_id = CPER_CREATOR_MCE; + rcd.hdr.notification_type = CPER_NOTIFY_MCE; + rcd.hdr.record_id = cper_next_record_id(); + rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR; + + rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd; + rcd.sec_hdr.section_length = sizeof(rcd.mce); + rcd.sec_hdr.revision = CPER_SEC_REV; + /* fru_id and fru_text is invalid */ + rcd.sec_hdr.validation_bits = 0; + rcd.sec_hdr.flags = CPER_SEC_PRIMARY; + rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE; + rcd.sec_hdr.section_severity = CPER_SER_FATAL; + + memcpy(&rcd.mce, m, sizeof(*m)); + + return erst_write(&rcd.hdr); } #ifndef NDEBUG /* currently dead code */ ssize_t apei_read_mce(struct mce *m, u64 *record_id) { - struct cper_mce_record rcd; - ssize_t len; - - if (!m || !record_id) - return -EINVAL; - - len = erst_read_next(&rcd.hdr, sizeof(rcd)); - if (len <= 0) - return len; - /* Can not skip other records in storage via ERST unless clear them */ - else if (len != sizeof(rcd) || - uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE)) { - printk(KERN_WARNING - "MCE-APEI: Can not skip the unknown record in ERST"); - return -EIO; - } - - memcpy(m, &rcd.mce, sizeof(*m)); - *record_id = rcd.hdr.record_id; - - return sizeof(*m); + struct cper_mce_record rcd; + ssize_t len; + + if ( !m || !record_id ) + return -EINVAL; + + len = erst_read_next(&rcd.hdr, sizeof(rcd)); + if ( len <= 0 ) + return len; + /* Can not skip other records in storage via ERST unless clear them */ + else if ( len != sizeof(rcd) || + uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE) ) + { + printk(KERN_WARNING + "MCE-APEI: Can not skip the unknown record in ERST"); + return -EIO; + } + + memcpy(m, &rcd.mce, sizeof(*m)); + *record_id = rcd.hdr.record_id; + + return sizeof(*m); } /* Check whether there is record in ERST */ bool apei_check_mce(void) { - return erst_get_record_count() > 0; + return erst_get_record_count() > 0; } int apei_clear_mce(u64 record_id) { - return erst_clear(record_id); + return erst_clear(record_id); } #endif /* currently dead code */ diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c index 30cdb06401..9b3efdb8a9 100644 --- a/xen/arch/x86/cpu/mcheck/mce.c +++ b/xen/arch/x86/cpu/mcheck/mce.c @@ -46,16 +46,16 @@ static void intpose_init(void); static void mcinfo_clear(struct mc_info *); struct mca_banks *mca_allbanks; -#define SEG_PL(segsel) ((segsel) & 0x3) +#define SEG_PL(segsel) ((segsel)&0x3) #define _MC_MSRINJ_F_REQ_HWCR_WREN (1 << 16) #if 0 -#define x86_mcerr(fmt, err, args...) \ - ({ \ - int _err = (err); \ - gdprintk(XENLOG_WARNING, "x86_mcerr: " fmt ", returning %d\n", \ - ## args, _err); \ - _err; \ +#define x86_mcerr(fmt, err, args...) \ + ({ \ + int _err = (err); \ + gdprintk(XENLOG_WARNING, "x86_mcerr: " fmt ", returning %d\n", ##args, \ + _err); \ + _err; \ }) #else #define x86_mcerr(fmt, err, args...) (err) @@ -128,8 +128,7 @@ struct mca_banks *mcabanks_alloc(void) if ( !mb ) return NULL; - mb->bank_map = xzalloc_array(unsigned long, - BITS_TO_LONGS(nr_mce_banks)); + mb->bank_map = xzalloc_array(unsigned long, BITS_TO_LONGS(nr_mce_banks)); if ( !mb->bank_map ) { xfree(mb); @@ -249,9 +248,9 @@ static int mca_init_global(uint32_t flags, struct mcinfo_global *mig) mig->mc_flags = flags; cpu_nr = smp_processor_id(); /* Retrieve detector information */ - x86_mc_get_cpu_info(cpu_nr, &mig->mc_socketid, - &mig->mc_coreid, &mig->mc_core_threadid, - &mig->mc_apicid, NULL, NULL, NULL); + x86_mc_get_cpu_info(cpu_nr, &mig->mc_socketid, &mig->mc_coreid, + &mig->mc_core_threadid, &mig->mc_apicid, NULL, NULL, + NULL); if ( curr != INVALID_VCPU ) { @@ -276,9 +275,10 @@ static int mca_init_global(uint32_t flags, struct mcinfo_global *mig) * For Intel latest CPU, whether to clear the error bank status needs to * be judged by the callback function defined above. */ -mctelem_cookie_t -mcheck_mca_logout(enum mca_source who, struct mca_banks *bankmask, - struct mca_summary *sp, struct mca_banks *clear_bank) +mctelem_cookie_t mcheck_mca_logout(enum mca_source who, + struct mca_banks *bankmask, + struct mca_summary *sp, + struct mca_banks *clear_bank) { uint64_t gstatus, status; struct mcinfo_global *mig = NULL; /* on stack */ @@ -291,7 +291,7 @@ mcheck_mca_logout(enum mca_source who, struct mca_banks *bankmask, int i; gstatus = mca_rdmsr(MSR_IA32_MCG_STATUS); - switch ( who ) + switch (who) { case MCA_MCE_SCAN: mc_flags = MC_FLAG_MCE; @@ -538,8 +538,8 @@ void mcheck_cmn_handler(const struct cpu_user_regs *regs) char ebuf[96]; snprintf(ebuf, sizeof(ebuf), - "MCE: Fatal error happened on CPUs %*pb", - nr_cpu_ids, cpumask_bits(&mce_fatal_cpus)); + "MCE: Fatal error happened on CPUs %*pb", nr_cpu_ids, + cpumask_bits(&mce_fatal_cpus)); mc_panic(ebuf); } @@ -585,8 +585,8 @@ bool mce_available(const struct cpuinfo_x86 *c) */ unsigned int mce_firstbank(struct cpuinfo_x86 *c) { - return c->x86 == 6 && - c->x86_vendor == X86_VENDOR_INTEL && c->x86_model < 0x1a; + return c->x86 == 6 && c->x86_vendor == X86_VENDOR_INTEL && + c->x86_model < 0x1a; } int show_mca_info(int inited, struct cpuinfo_x86 *c) @@ -596,26 +596,24 @@ int show_mca_info(int inited, struct cpuinfo_x86 *c) if ( inited != g_type ) { char prefix[20]; - static const char *const type_str[] = { - [mcheck_amd_famXX] = "AMD", - [mcheck_amd_k8] = "AMD K8", - [mcheck_intel] = "Intel" - }; + static const char *const type_str[] = {[mcheck_amd_famXX] = "AMD", + [mcheck_amd_k8] = "AMD K8", + [mcheck_intel] = "Intel"}; snprintf(prefix, ARRAY_SIZE(prefix), "%sCPU%u: ", g_type != mcheck_unset ? XENLOG_WARNING : XENLOG_INFO, smp_processor_id()); BUG_ON(inited >= ARRAY_SIZE(type_str)); - switch ( inited ) + switch (inited) { default: - printk("%s%s machine check reporting enabled\n", - prefix, type_str[inited]); + printk("%s%s machine check reporting enabled\n", prefix, + type_str[inited]); break; case mcheck_amd_famXX: - printk("%s%s Fam%xh machine check reporting enabled\n", - prefix, type_str[inited], c->x86); + printk("%s%s Fam%xh machine check reporting enabled\n", prefix, + type_str[inited], c->x86); break; case mcheck_none: @@ -671,7 +669,8 @@ int mca_cap_init(void) if ( !nr_mce_banks ) { printk(XENLOG_INFO "CPU%u: No MCE banks present. " - "Machine check support disabled\n", smp_processor_id()); + "Machine check support disabled\n", + smp_processor_id()); return -ENODEV; } @@ -717,13 +716,13 @@ static int cpu_bank_alloc(unsigned int cpu) return 0; } -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int rc = 0; - switch ( action ) + switch (action) { case CPU_UP_PREPARE: rc = cpu_bank_alloc(cpu); @@ -744,9 +743,7 @@ static int cpu_callback( return !rc ? NOTIFY_DONE : notifier_from_errno(rc); } -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback -}; +static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback}; /* This has to be run for each processor */ void mcheck_init(struct cpuinfo_x86 *c, bool bsp) @@ -775,14 +772,14 @@ void mcheck_init(struct cpuinfo_x86 *c, bool bsp) if ( bsp && cpu_bank_alloc(smp_processor_id()) ) BUG(); - switch ( c->x86_vendor ) + switch (c->x86_vendor) { case X86_VENDOR_AMD: inited = amd_mcheck_init(c); break; case X86_VENDOR_INTEL: - switch ( c->x86 ) + switch (c->x86) { case 6: case 15: @@ -813,7 +810,7 @@ void mcheck_init(struct cpuinfo_x86 *c, bool bsp) set_poll_bankmask(c); return; - out: +out: if ( bsp ) { cpu_bank_free(smp_processor_id()); @@ -828,8 +825,8 @@ static void mcinfo_clear(struct mc_info *mi) x86_mcinfo_nentries(mi) = 0; } -void *x86_mcinfo_reserve(struct mc_info *mi, - unsigned int size, unsigned int type) +void *x86_mcinfo_reserve(struct mc_info *mi, unsigned int size, + unsigned int type) { int i; unsigned long end1, end2; @@ -847,8 +844,7 @@ void *x86_mcinfo_reserve(struct mc_info *mi, if ( end1 < end2 ) { - mce_printk(MCE_CRITICAL, - "mcinfo_add: No space left in mc_info\n"); + mce_printk(MCE_CRITICAL, "mcinfo_add: No space left in mc_info\n"); return NULL; } @@ -862,8 +858,8 @@ void *x86_mcinfo_reserve(struct mc_info *mi, return mic_index; } -static void x86_mcinfo_apei_save( - struct mcinfo_global *mc_global, struct mcinfo_bank *mc_bank) +static void x86_mcinfo_apei_save(struct mcinfo_global *mc_global, + struct mcinfo_bank *mc_bank) { struct mce m; @@ -901,8 +897,7 @@ void x86_mcinfo_dump(struct mc_info *mi) return; mc_global = (struct mcinfo_global *)mic; if ( mc_global->mc_flags & MC_FLAG_MCE ) - printk(XENLOG_WARNING - "CPU%d: Machine Check Exception: %16"PRIx64"\n", + printk(XENLOG_WARNING "CPU%d: Machine Check Exception: %16" PRIx64 "\n", mc_global->mc_coreid, mc_global->mc_gstatus); else if ( mc_global->mc_flags & MC_FLAG_CMCI ) printk(XENLOG_WARNING "CMCI occurred on CPU %d.\n", @@ -921,19 +916,18 @@ void x86_mcinfo_dump(struct mc_info *mi) mc_bank = (struct mcinfo_bank *)mic; - printk(XENLOG_WARNING "Bank %d: %16"PRIx64, - mc_bank->mc_bank, + printk(XENLOG_WARNING "Bank %d: %16" PRIx64, mc_bank->mc_bank, mc_bank->mc_status); if ( mc_bank->mc_status & MCi_STATUS_MISCV ) - printk("[%16"PRIx64"]", mc_bank->mc_misc); + printk("[%16" PRIx64 "]", mc_bank->mc_misc); if ( mc_bank->mc_status & MCi_STATUS_ADDRV ) - printk(" at %16"PRIx64, mc_bank->mc_addr); + printk(" at %16" PRIx64, mc_bank->mc_addr); printk("\n"); if ( is_mc_panic ) x86_mcinfo_apei_save(mc_global, mc_bank); - next: + next: mic = x86_mcinfo_next(mic); /* next entry */ if ( (mic == NULL) || (mic->size == 0) ) break; @@ -966,9 +960,8 @@ static void do_mc_get_cpu_info(void *v) xcp = &log_cpus[cindex]; c = &cpu_data[cpu]; xcp->mc_cpunr = cpu; - x86_mc_get_cpu_info(cpu, &xcp->mc_chipid, - &xcp->mc_coreid, &xcp->mc_threadid, - &xcp->mc_apicid, &xcp->mc_ncores, + x86_mc_get_cpu_info(cpu, &xcp->mc_chipid, &xcp->mc_coreid, + &xcp->mc_threadid, &xcp->mc_apicid, &xcp->mc_ncores, &xcp->mc_ncores_active, &xcp->mc_nthreads); xcp->mc_cpuid_level = c->cpuid_level; xcp->mc_family = c->x86; @@ -998,9 +991,8 @@ static void do_mc_get_cpu_info(void *v) } void x86_mc_get_cpu_info(unsigned cpu, uint32_t *chipid, uint16_t *coreid, - uint16_t *threadid, uint32_t *apicid, - unsigned *ncores, unsigned *ncores_active, - unsigned *nthreads) + uint16_t *threadid, uint32_t *apicid, unsigned *ncores, + unsigned *ncores_active, unsigned *nthreads) { struct cpuinfo_x86 *c; @@ -1037,7 +1029,8 @@ void x86_mc_get_cpu_info(unsigned cpu, uint32_t *chipid, uint16_t *coreid, #define INTPOSE_NENT 50 -static struct intpose_ent { +static struct intpose_ent +{ unsigned int cpu_nr; uint64_t msr; uint64_t val; @@ -1053,7 +1046,6 @@ static void intpose_init(void) for ( i = 0; i < INTPOSE_NENT; i++ ) intpose_arr[i].cpu_nr = -1; - } struct intpose_ent *intpose_lookup(unsigned int cpu_nr, uint64_t msr, @@ -1110,10 +1102,9 @@ bool intpose_inval(unsigned int cpu_nr, uint64_t msr) return true; } -#define IS_MCA_BANKREG(r) \ - ((r) >= MSR_IA32_MC0_CTL && \ - (r) <= MSR_IA32_MCx_MISC(nr_mce_banks - 1) && \ - ((r) - MSR_IA32_MC0_CTL) % 4 != 0) /* excludes MCi_CTL */ +#define IS_MCA_BANKREG(r) \ + ((r) >= MSR_IA32_MC0_CTL && (r) <= MSR_IA32_MCx_MISC(nr_mce_banks - 1) && \ + ((r)-MSR_IA32_MC0_CTL) % 4 != 0) /* excludes MCi_CTL */ static bool x86_mc_msrinject_verify(struct xen_mc_msrinject *mci) { @@ -1139,8 +1130,7 @@ static bool x86_mc_msrinject_verify(struct xen_mc_msrinject *mci) * is necessary and set it as a courtesy to * avoid #GP in the hypervisor. */ - mci->mcinj_flags |= - _MC_MSRINJ_F_REQ_HWCR_WREN; + mci->mcinj_flags |= _MC_MSRINJ_F_REQ_HWCR_WREN; continue; } else @@ -1155,7 +1145,7 @@ static bool x86_mc_msrinject_verify(struct xen_mc_msrinject *mci) } else { - switch ( reg ) + switch (reg) { /* MSRs acceptable on all x86 cpus */ case MSR_IA32_MCG_STATUS: @@ -1232,9 +1222,8 @@ static void x86_mc_msrinject(void *data) for ( i = 0, msr = &mci->mcinj_msr[0]; i < mci->mcinj_count; i++, msr++ ) { printk("HV MSR INJECT (%s) target %u actual %u MSR %#Lx <-- %#Lx\n", - intpose ? "interpose" : "hardware", - mci->mcinj_cpunr, smp_processor_id(), - (unsigned long long)msr->reg, + intpose ? "interpose" : "hardware", mci->mcinj_cpunr, + smp_processor_id(), (unsigned long long)msr->reg, (unsigned long long)msr->value); if ( intpose ) @@ -1265,55 +1254,55 @@ static void x86_mc_mceinject(void *data) #error BITS_PER_LONG definition absent #endif -# include +#include -# define xen_mcinfo_msr mcinfo_msr +#define xen_mcinfo_msr mcinfo_msr CHECK_mcinfo_msr; -# undef xen_mcinfo_msr -# undef CHECK_mcinfo_msr -# define CHECK_mcinfo_msr struct mcinfo_msr +#undef xen_mcinfo_msr +#undef CHECK_mcinfo_msr +#define CHECK_mcinfo_msr struct mcinfo_msr -# define xen_mcinfo_common mcinfo_common +#define xen_mcinfo_common mcinfo_common CHECK_mcinfo_common; -# undef xen_mcinfo_common -# undef CHECK_mcinfo_common -# define CHECK_mcinfo_common struct mcinfo_common +#undef xen_mcinfo_common +#undef CHECK_mcinfo_common +#define CHECK_mcinfo_common struct mcinfo_common CHECK_FIELD_(struct, mc_fetch, flags); CHECK_FIELD_(struct, mc_fetch, fetch_id); -# define CHECK_compat_mc_fetch struct mc_fetch +#define CHECK_compat_mc_fetch struct mc_fetch CHECK_FIELD_(struct, mc_physcpuinfo, ncpus); -# define CHECK_compat_mc_physcpuinfo struct mc_physcpuinfo +#define CHECK_compat_mc_physcpuinfo struct mc_physcpuinfo -#define CHECK_compat_mc_inject_v2 struct mc_inject_v2 +#define CHECK_compat_mc_inject_v2 struct mc_inject_v2 CHECK_mc; -# undef CHECK_compat_mc_fetch -# undef CHECK_compat_mc_physcpuinfo +#undef CHECK_compat_mc_fetch +#undef CHECK_compat_mc_physcpuinfo -# define xen_mc_info mc_info +#define xen_mc_info mc_info CHECK_mc_info; -# undef xen_mc_info +#undef xen_mc_info -# define xen_mcinfo_global mcinfo_global +#define xen_mcinfo_global mcinfo_global CHECK_mcinfo_global; -# undef xen_mcinfo_global +#undef xen_mcinfo_global -# define xen_mcinfo_bank mcinfo_bank +#define xen_mcinfo_bank mcinfo_bank CHECK_mcinfo_bank; -# undef xen_mcinfo_bank +#undef xen_mcinfo_bank -# define xen_mcinfo_extended mcinfo_extended +#define xen_mcinfo_extended mcinfo_extended CHECK_mcinfo_extended; -# undef xen_mcinfo_extended +#undef xen_mcinfo_extended -# define xen_mcinfo_recovery mcinfo_recovery -# define xen_cpu_offline_action cpu_offline_action -# define xen_page_offline_action page_offline_action +#define xen_mcinfo_recovery mcinfo_recovery +#define xen_cpu_offline_action cpu_offline_action +#define xen_page_offline_action page_offline_action CHECK_mcinfo_recovery; -# undef xen_cpu_offline_action -# undef xen_page_offline_action -# undef xen_mcinfo_recovery +#undef xen_cpu_offline_action +#undef xen_page_offline_action +#undef xen_mcinfo_recovery /* Machine Check Architecture Hypercall */ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc) @@ -1348,13 +1337,13 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc) if ( op->interface_version != XEN_MCA_INTERFACE_VERSION ) return x86_mcerr("do_mca: interface version mismatch", -EACCES); - switch ( op->cmd ) + switch (op->cmd) { case XEN_MC_fetch: mc_fetch.nat = &op->u.mc_fetch; cmdflags = mc_fetch.nat->flags; - switch ( cmdflags & (XEN_MC_NONURGENT | XEN_MC_URGENT) ) + switch (cmdflags & (XEN_MC_NONURGENT | XEN_MC_URGENT)) { case XEN_MC_NONURGENT: which = MC_NONURGENT; @@ -1378,18 +1367,19 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc) else { if ( !is_pv_32bit_vcpu(v) - ? guest_handle_is_null(mc_fetch.nat->data) - : compat_handle_is_null(mc_fetch.cmp->data) ) + ? guest_handle_is_null(mc_fetch.nat->data) + : compat_handle_is_null(mc_fetch.cmp->data) ) return x86_mcerr("do_mca fetch: guest buffer " - "invalid", -EINVAL); + "invalid", + -EINVAL); mctc = mctelem_consume_oldest_begin(which); if ( mctc ) { struct mc_info *mcip = mctelem_dataptr(mctc); if ( !is_pv_32bit_vcpu(v) - ? copy_to_guest(mc_fetch.nat->data, mcip, 1) - : copy_to_compat(mc_fetch.cmp->data, mcip, 1) ) + ? copy_to_guest(mc_fetch.nat->data, mcip, 1) + : copy_to_compat(mc_fetch.cmp->data, mcip, 1) ) { ret = -EFAULT; flags |= XEN_MC_FETCHFAILED; @@ -1407,7 +1397,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc) } mc_fetch.nat->flags = flags; - if (copy_to_guest(u_xen_mc, op, 1) != 0) + if ( copy_to_guest(u_xen_mc, op, 1) != 0 ) ret = -EFAULT; } @@ -1421,20 +1411,20 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc) nlcpu = num_online_cpus(); if ( !is_pv_32bit_vcpu(v) - ? !guest_handle_is_null(mc_physcpuinfo.nat->info) - : !compat_handle_is_null(mc_physcpuinfo.cmp->info) ) + ? !guest_handle_is_null(mc_physcpuinfo.nat->info) + : !compat_handle_is_null(mc_physcpuinfo.cmp->info) ) { if ( mc_physcpuinfo.nat->ncpus <= 0 ) - return x86_mcerr("do_mca cpuinfo: ncpus <= 0", - -EINVAL); + return x86_mcerr("do_mca cpuinfo: ncpus <= 0", -EINVAL); nlcpu = min(nlcpu, (int)mc_physcpuinfo.nat->ncpus); log_cpus = xmalloc_array(xen_mc_logical_cpu_t, nlcpu); if ( log_cpus == NULL ) return x86_mcerr("do_mca cpuinfo", -ENOMEM); on_each_cpu(do_mc_get_cpu_info, log_cpus, 1); if ( !is_pv_32bit_vcpu(v) - ? copy_to_guest(mc_physcpuinfo.nat->info, log_cpus, nlcpu) - : copy_to_compat(mc_physcpuinfo.cmp->info, log_cpus, nlcpu) ) + ? copy_to_guest(mc_physcpuinfo.nat->info, log_cpus, nlcpu) + : copy_to_compat(mc_physcpuinfo.cmp->info, log_cpus, + nlcpu) ) ret = -EFAULT; xfree(log_cpus); } @@ -1457,8 +1447,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc) return x86_mcerr("do_mca inject: bad target", -EINVAL); if ( !cpu_online(target) ) - return x86_mcerr("do_mca inject: target offline", - -EINVAL); + return x86_mcerr("do_mca inject: target offline", -EINVAL); if ( mc_msrinject->mcinj_count == 0 ) return 0; @@ -1473,8 +1462,9 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc) unsigned long gfn, mfn; p2m_type_t t; - domid = (mc_msrinject->mcinj_domid == DOMID_SELF) ? - current->domain->domain_id : mc_msrinject->mcinj_domid; + domid = (mc_msrinject->mcinj_domid == DOMID_SELF) + ? current->domain->domain_id + : mc_msrinject->mcinj_domid; if ( domid >= DOMID_FIRST_RESERVED ) return x86_mcerr("do_mca inject: incompatible flag " "MC_MSRINJ_F_GPADDR with domain %d", @@ -1482,12 +1472,11 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc) d = get_domain_by_id(domid); if ( d == NULL ) - return x86_mcerr("do_mca inject: bad domain id %d", - -EINVAL, domid); + return x86_mcerr("do_mca inject: bad domain id %d", -EINVAL, + domid); for ( i = 0, msr = &mc_msrinject->mcinj_msr[0]; - i < mc_msrinject->mcinj_count; - i++, msr++ ) + i < mc_msrinject->mcinj_count; i++, msr++ ) { gaddr = msr->value; gfn = PFN_DOWN(gaddr); @@ -1514,8 +1503,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc) add_taint(TAINT_ERROR_INJECT); - on_selected_cpus(cpumask_of(target), x86_mc_msrinject, - mc_msrinject, 1); + on_selected_cpus(cpumask_of(target), x86_mc_msrinject, mc_msrinject, 1); break; @@ -1537,8 +1525,8 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc) if ( mce_broadcast ) on_each_cpu(x86_mc_mceinject, mc_mceinject, 1); else - on_selected_cpus(cpumask_of(target), x86_mc_mceinject, - mc_mceinject, 1); + on_selected_cpus(cpumask_of(target), x86_mc_mceinject, mc_mceinject, + 1); break; case XEN_MC_inject_v2: @@ -1565,15 +1553,13 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc) break; } if ( !cpumask_subset(cpumap, &cpu_online_map) ) - dprintk(XENLOG_INFO, - "Not all required CPUs are online\n"); + dprintk(XENLOG_INFO, "Not all required CPUs are online\n"); } - switch ( op->u.mc_inject_v2.flags & XEN_MC_INJECT_TYPE_MASK ) + switch (op->u.mc_inject_v2.flags & XEN_MC_INJECT_TYPE_MASK) { case XEN_MC_INJECT_TYPE_MCE: - if ( mce_broadcast && - !cpumask_equal(cpumap, &cpu_online_map) ) + if ( mce_broadcast && !cpumask_equal(cpumap, &cpu_online_map) ) printk("Not trigger MCE on all CPUs, may HANG!\n"); on_selected_cpus(cpumap, x86_mc_mceinject, NULL, 1); break; @@ -1603,8 +1589,8 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc) /* Ensure at most one CPU is specified. */ if ( nr_cpu_ids > cpumask_next(cpumask_first(cpumap), cpumap) ) { - ret = x86_mcerr("More than one CPU specified for LMCE", - -EINVAL); + ret = + x86_mcerr("More than one CPU specified for LMCE", -EINVAL); break; } on_selected_cpus(cpumap, x86_mc_mceinject, NULL, 1); @@ -1645,7 +1631,7 @@ static void mc_panic_dump(void) int cpu; dprintk(XENLOG_ERR, "Begin dump mc_info\n"); - for_each_online_cpu(cpu) + for_each_online_cpu (cpu) mctelem_process_deferred(cpu, x86_mcinfo_dump_panic, mctelem_has_deferred_lmce(cpu)); dprintk(XENLOG_ERR, "End dump mc_info, %x mcinfo dumped\n", mcinfo_dumpped); @@ -1753,7 +1739,7 @@ static int mce_delayed_action(mctelem_cookie_t mctc) result = mce_action(NULL, mctc); - switch ( result ) + switch (result) { case MCER_RESET: dprintk(XENLOG_ERR, "MCE delayed action failed\n"); @@ -1769,7 +1755,7 @@ static int mce_delayed_action(mctelem_cookie_t mctc) case MCER_CONTINUE: dprintk(XENLOG_INFO, "MCE: Error can't be recovered, " - "system is tainted\n"); + "system is tainted\n"); x86_mcinfo_dump(mctelem_dataptr(mctc)); ret = 1; break; @@ -1818,7 +1804,6 @@ static void mce_softirq(void) /* We choose severity_cpu for further processing */ if ( lmce || atomic_read(&severity_cpu) == cpu ) { - mce_printk(MCE_VERBOSE, "CPU%d handling errors\n", cpu); /* @@ -1829,7 +1814,7 @@ static void mce_softirq(void) if ( lmce ) mctelem_process_deferred(cpu, mce_delayed_action, true); else - for_each_online_cpu(workcpu) + for_each_online_cpu (workcpu) mctelem_process_deferred(workcpu, mce_delayed_action, false); /* Step2: Send Log to DOM0 through vIRQ */ diff --git a/xen/arch/x86/cpu/mcheck/mce_amd.c b/xen/arch/x86/cpu/mcheck/mce_amd.c index d125bc1611..3e0654688a 100644 --- a/xen/arch/x86/cpu/mcheck/mce_amd.c +++ b/xen/arch/x86/cpu/mcheck/mce_amd.c @@ -24,7 +24,7 @@ * Issue Date: October 2013 * * URL: - * http://support.amd.com/TechDocs/24593.pdf + * http://support.amd.com/TechDocs/24593.pdf */ /* The related documentation for K8 Revisions A - E is: @@ -35,7 +35,7 @@ * Issue Date: February 2006 * * URL: - * http://support.amd.com/TechDocs/26094.PDF + * http://support.amd.com/TechDocs/26094.PDF */ /* The related documentation for K8 Revisions F - G is: @@ -46,7 +46,7 @@ * Issue Date: July 2007 * * URL: - * http://support.amd.com/TechDocs/32559.pdf + * http://support.amd.com/TechDocs/32559.pdf */ /* Family10 MCA documentation published at @@ -57,7 +57,7 @@ * Isse Date: January 11, 2013 * * URL: - * http://support.amd.com/TechDocs/31116.pdf + * http://support.amd.com/TechDocs/31116.pdf */ #include @@ -76,21 +76,21 @@ #define ANY -1 static const struct mce_quirkdata mce_amd_quirks[] = { - { 0xf /* cpu family */, ANY /* all models */, ANY /* all steppings */, - MCEQUIRK_K8_GART }, - { 0x10 /* cpu family */, ANY /* all models */, ANY /* all steppings */, - MCEQUIRK_F10_GART }, + {0xf /* cpu family */, ANY /* all models */, ANY /* all steppings */, + MCEQUIRK_K8_GART}, + {0x10 /* cpu family */, ANY /* all models */, ANY /* all steppings */, + MCEQUIRK_F10_GART}, }; /* Error Code Types */ -enum mc_ec_type { +enum mc_ec_type +{ MC_EC_TLB_TYPE = 0x0010, MC_EC_MEM_TYPE = 0x0100, MC_EC_BUS_TYPE = 0x0800, }; -enum mc_ec_type -mc_ec2type(uint16_t errorcode) +enum mc_ec_type mc_ec2type(uint16_t errorcode) { if ( errorcode & MC_EC_BUS_TYPE ) return MC_EC_BUS_TYPE; @@ -115,14 +115,14 @@ bool mc_amd_recoverable_scan(uint64_t status) errorcode = status & (MCi_STATUS_MCA | MCi_STATUS_MSEC); ectype = mc_ec2type(errorcode); - switch ( ectype ) + switch (ectype) { case MC_EC_BUS_TYPE: /* value in addr MSR is physical */ /* should run cpu offline action */ break; case MC_EC_MEM_TYPE: /* value in addr MSR is physical */ - ret = true; /* run memory page offline action */ + ret = true; /* run memory page offline action */ break; case MC_EC_TLB_TYPE: /* value in addr MSR is virtual */ @@ -141,7 +141,7 @@ bool mc_amd_addrcheck(uint64_t status, uint64_t misc, int addrtype) errorcode = status & (MCi_STATUS_MCA | MCi_STATUS_MSEC); ectype = mc_ec2type(errorcode); - switch ( ectype ) + switch (ectype) { case MC_EC_BUS_TYPE: /* value in addr MSR is physical */ case MC_EC_MEM_TYPE: /* value in addr MSR is physical */ @@ -157,8 +157,7 @@ bool mc_amd_addrcheck(uint64_t status, uint64_t misc, int addrtype) } /* MC quirks */ -enum mcequirk_amd_flags -mcequirk_lookup_amd_quirkdata(struct cpuinfo_x86 *c) +enum mcequirk_amd_flags mcequirk_lookup_amd_quirkdata(struct cpuinfo_x86 *c) { int i; @@ -173,7 +172,7 @@ mcequirk_lookup_amd_quirkdata(struct cpuinfo_x86 *c) continue; if ( (mce_amd_quirks[i].cpu_stepping != ANY) && (mce_amd_quirks[i].cpu_stepping != c->x86_mask) ) - continue; + continue; return mce_amd_quirks[i].quirk; } return 0; @@ -183,7 +182,7 @@ int mcequirk_amd_apply(enum mcequirk_amd_flags flags) { uint64_t val; - switch ( flags ) + switch (flags) { case MCEQUIRK_K8_GART: /* @@ -197,15 +196,15 @@ int mcequirk_amd_apply(enum mcequirk_amd_flags flags) case MCEQUIRK_F10_GART: if ( rdmsr_safe(MSR_AMD64_MCx_MASK(4), val) == 0 ) - wrmsr_safe(MSR_AMD64_MCx_MASK(4), val | (1 << 10)); + wrmsr_safe(MSR_AMD64_MCx_MASK(4), val | (1 << 10)); break; } return 0; } -static struct mcinfo_extended * -amd_f10_handler(struct mc_info *mi, uint16_t bank, uint64_t status) +static struct mcinfo_extended *amd_f10_handler(struct mc_info *mi, + uint16_t bank, uint64_t status) { struct mcinfo_extended *mc_ext; @@ -259,7 +258,7 @@ static bool amd_need_clearbank_scan(enum mca_source who, uint64_t status) int vmce_amd_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) { /* Do nothing as we don't emulate this MC bank currently */ - mce_printk(MCE_VERBOSE, "MCE: wr msr %#"PRIx64"\n", val); + mce_printk(MCE_VERBOSE, "MCE: wr msr %#" PRIx64 "\n", val); return 1; } @@ -270,8 +269,7 @@ int vmce_amd_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) return 1; } -enum mcheck_type -amd_mcheck_init(struct cpuinfo_x86 *ci) +enum mcheck_type amd_mcheck_init(struct cpuinfo_x86 *ci) { uint32_t i; enum mcequirk_amd_flags quirkflag = mcequirk_lookup_amd_quirkdata(ci); diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c b/xen/arch/x86/cpu/mcheck/mce_intel.c index 4474a34e34..aaea2b3681 100644 --- a/xen/arch/x86/cpu/mcheck/mce_intel.c +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c @@ -41,18 +41,18 @@ bool __read_mostly lmce_support; * 2). L3 explicit writeback error, error code = 0x17A */ #define INTEL_SRAO_MEM_SCRUB 0xC0 ... 0xCF -#define INTEL_SRAO_L3_EWB 0x17A +#define INTEL_SRAO_L3_EWB 0x17A /* * Currently Intel SDM define 2 kinds of srar errors: * 1). Data Load error, error code = 0x134 * 2). Instruction Fetch error, error code = 0x150 */ -#define INTEL_SRAR_DATA_LOAD 0x134 -#define INTEL_SRAR_INSTR_FETCH 0x150 +#define INTEL_SRAR_DATA_LOAD 0x134 +#define INTEL_SRAR_INSTR_FETCH 0x150 #ifdef CONFIG_X86_MCE_THERMAL -#define MCE_RING 0x1 +#define MCE_RING 0x1 static DEFINE_PER_CPU(int, last_state); static void intel_thermal_interrupt(struct cpu_user_regs *regs) @@ -78,7 +78,8 @@ static void intel_thermal_interrupt(struct cpu_user_regs *regs) printk(KERN_EMERG "CPU%u: Temperature above threshold\n", cpu); printk(KERN_EMERG "CPU%u: Running in modulated clock mode\n", cpu); add_taint(TAINT_MACHINE_CHECK); - } else + } + else printk(KERN_INFO "CPU%u: Temperature/speed normal\n", cpu); } @@ -133,12 +134,11 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) * BIOS has programmed on AP based on BSP's info we saved (since BIOS * is required to set the same value for all threads/cores). */ - if ( (val & APIC_MODE_MASK) != APIC_DM_FIXED - || (val & APIC_VECTOR_MASK) > 0xf ) + if ( (val & APIC_MODE_MASK) != APIC_DM_FIXED || + (val & APIC_VECTOR_MASK) > 0xf ) apic_write(APIC_LVTTHMR, val); - if ( (msr_content & (1ULL<<3)) - && (val & APIC_MODE_MASK) == APIC_DM_SMI ) + if ( (msr_content & (1ULL << 3)) && (val & APIC_MODE_MASK) == APIC_DM_SMI ) { if ( c == &boot_cpu_data ) printk(KERN_DEBUG "Thermal monitoring handled by SMI\n"); @@ -160,20 +160,20 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) alloc_direct_apic_vector(&thermal_apic_vector, intel_thermal_interrupt); /* The temperature transition interrupt handler setup */ - val = thermal_apic_vector; /* our delivery vector */ - val |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */ + val = thermal_apic_vector; /* our delivery vector */ + val |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */ apic_write(APIC_LVTTHMR, val); rdmsrl(MSR_IA32_THERM_INTERRUPT, msr_content); wrmsrl(MSR_IA32_THERM_INTERRUPT, msr_content | 0x03); rdmsrl(MSR_IA32_MISC_ENABLE, msr_content); - wrmsrl(MSR_IA32_MISC_ENABLE, msr_content | (1ULL<<3)); + wrmsrl(MSR_IA32_MISC_ENABLE, msr_content | (1ULL << 3)); apic_write(APIC_LVTTHMR, val & ~APIC_LVT_MASKED); if ( opt_cpu_info ) - printk(KERN_INFO "CPU%u: Thermal monitoring enabled (%s)\n", - cpu, tm2 ? "TM2" : "TM1"); + printk(KERN_INFO "CPU%u: Thermal monitoring enabled (%s)\n", cpu, + tm2 ? "TM2" : "TM1"); return; } #endif /* CONFIG_X86_MCE_THERMAL */ @@ -181,8 +181,8 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) /* Intel MCE handler */ static inline void intel_get_extended_msr(struct mcinfo_extended *ext, u32 msr) { - if ( ext->mc_msrs < ARRAY_SIZE(ext->mc_msr) - && msr < MSR_IA32_MCG_EAX + nr_intel_ext_msrs ) + if ( ext->mc_msrs < ARRAY_SIZE(ext->mc_msr) && + msr < MSR_IA32_MCG_EAX + nr_intel_ext_msrs ) { ext->mc_msr[ext->mc_msrs].reg = msr; rdmsrl(msr, ext->mc_msr[ext->mc_msrs].value); @@ -190,9 +190,8 @@ static inline void intel_get_extended_msr(struct mcinfo_extended *ext, u32 msr) } } - -struct mcinfo_extended * -intel_get_extended_msrs(struct mcinfo_global *mig, struct mc_info *mi) +struct mcinfo_extended *intel_get_extended_msrs(struct mcinfo_global *mig, + struct mc_info *mi) { struct mcinfo_extended *mc_ext; int i; @@ -201,7 +200,7 @@ intel_get_extended_msrs(struct mcinfo_global *mig, struct mc_info *mi) * According to spec, processor _support_ 64 bit will always * have MSR beyond IA32_MCG_MISC */ - if ( !mi|| !mig || nr_intel_ext_msrs == 0 || + if ( !mi || !mig || nr_intel_ext_msrs == 0 || !(mig->mc_gstatus & MCG_STATUS_EIPV) ) return NULL; @@ -254,7 +253,8 @@ static enum intel_mce_type intel_check_mce_type(uint64_t status) return intel_mce_fatal; else return intel_mce_ucr_srar; - } else + } + else return intel_mce_ucr_srao; } else @@ -264,10 +264,9 @@ static enum intel_mce_type intel_check_mce_type(uint64_t status) return intel_mce_fatal; } -static void intel_memerr_dhandler( - struct mca_binfo *binfo, - enum mce_result *result, - const struct cpu_user_regs *regs) +static void intel_memerr_dhandler(struct mca_binfo *binfo, + enum mce_result *result, + const struct cpu_user_regs *regs) { mce_printk(MCE_VERBOSE, "MCE: Enter UCR recovery action\n"); mc_memerr_dhandler(binfo, result, regs); @@ -280,8 +279,7 @@ static bool intel_srar_check(uint64_t status) static bool intel_checkaddr(uint64_t status, uint64_t misc, int addrtype) { - if ( !(status & MCi_STATUS_ADDRV) || - !(status & MCi_STATUS_MISCV) || + if ( !(status & MCi_STATUS_ADDRV) || !(status & MCi_STATUS_MISCV) || ((misc & MCi_MISC_ADDRMOD_MASK) != MCi_MISC_PHYSMOD) ) /* addr is virtual */ return (addrtype == MC_ADDR_VIRTUAL); @@ -289,17 +287,16 @@ static bool intel_checkaddr(uint64_t status, uint64_t misc, int addrtype) return (addrtype == MC_ADDR_PHYSICAL); } -static void intel_srar_dhandler( - struct mca_binfo *binfo, - enum mce_result *result, - const struct cpu_user_regs *regs) +static void intel_srar_dhandler(struct mca_binfo *binfo, + enum mce_result *result, + const struct cpu_user_regs *regs) { uint64_t status = binfo->mib->mc_status; /* For unknown srar error code, reset system */ *result = MCER_RESET; - switch ( status & INTEL_MCCOD_MASK ) + switch (status & INTEL_MCCOD_MASK) { case INTEL_SRAR_DATA_LOAD: case INTEL_SRAR_INSTR_FETCH: @@ -313,10 +310,9 @@ static bool intel_srao_check(uint64_t status) return (intel_check_mce_type(status) == intel_mce_ucr_srao); } -static void intel_srao_dhandler( - struct mca_binfo *binfo, - enum mce_result *result, - const struct cpu_user_regs *regs) +static void intel_srao_dhandler(struct mca_binfo *binfo, + enum mce_result *result, + const struct cpu_user_regs *regs) { uint64_t status = binfo->mib->mc_status; @@ -325,7 +321,7 @@ static void intel_srao_dhandler( if ( status & MCi_STATUS_VAL ) { - switch ( status & INTEL_MCCOD_MASK ) + switch (status & INTEL_MCCOD_MASK) { case INTEL_SRAO_MEM_SCRUB: case INTEL_SRAO_L3_EWB: @@ -340,10 +336,9 @@ static bool intel_default_check(uint64_t status) return true; } -static void intel_default_mce_dhandler( - struct mca_binfo *binfo, - enum mce_result *result, - const struct cpu_user_regs * regs) +static void intel_default_mce_dhandler(struct mca_binfo *binfo, + enum mce_result *result, + const struct cpu_user_regs *regs) { uint64_t status = binfo->mib->mc_status; enum intel_mce_type type; @@ -359,20 +354,18 @@ static void intel_default_mce_dhandler( static const struct mca_error_handler intel_mce_dhandlers[] = { {intel_srao_check, intel_srao_dhandler}, {intel_srar_check, intel_srar_dhandler}, - {intel_default_check, intel_default_mce_dhandler} -}; + {intel_default_check, intel_default_mce_dhandler}}; -static void intel_default_mce_uhandler( - struct mca_binfo *binfo, - enum mce_result *result, - const struct cpu_user_regs *regs) +static void intel_default_mce_uhandler(struct mca_binfo *binfo, + enum mce_result *result, + const struct cpu_user_regs *regs) { uint64_t status = binfo->mib->mc_status; enum intel_mce_type type; type = intel_check_mce_type(status); - switch ( type ) + switch (type) { case intel_mce_fatal: *result = MCER_RESET; @@ -385,8 +378,7 @@ static void intel_default_mce_uhandler( } static const struct mca_error_handler intel_mce_uhandlers[] = { - {intel_default_check, intel_default_mce_uhandler} -}; + {intel_default_check, intel_default_mce_uhandler}}; /* According to MCA OS writer guide, CMCI handler need to clear bank when * 1) CE (UC = 0) @@ -406,16 +398,17 @@ static bool intel_need_clearbank_scan(enum mca_source who, u64 status) if ( !(status & MCi_STATUS_UC) ) return true; /* Spurious need clear bank */ - else if ( ser_support && !(status & MCi_STATUS_OVER) - && !(status & MCi_STATUS_EN) ) + else if ( ser_support && !(status & MCi_STATUS_OVER) && + !(status & MCi_STATUS_EN) ) return true; /* UCNA OVER = 0 need clear bank */ - else if ( ser_support && !(status & MCi_STATUS_OVER) - && !(status & MCi_STATUS_PCC) && !(status & MCi_STATUS_S) - && !(status & MCi_STATUS_AR) ) + else if ( ser_support && !(status & MCi_STATUS_OVER) && + !(status & MCi_STATUS_PCC) && !(status & MCi_STATUS_S) && + !(status & MCi_STATUS_AR) ) return true; /* Only Log, no clear */ - else return false; + else + return false; } else if ( who == MCA_MCE_SCAN ) { @@ -428,17 +421,16 @@ static bool intel_need_clearbank_scan(enum mca_source who, u64 status) if ( (status & MCi_STATUS_UC) && (status & MCi_STATUS_PCC) ) return false; /* Spurious need clear bank */ - else if ( !(status & MCi_STATUS_OVER) - && (status & MCi_STATUS_UC) && !(status & MCi_STATUS_EN) ) + else if ( !(status & MCi_STATUS_OVER) && (status & MCi_STATUS_UC) && + !(status & MCi_STATUS_EN) ) return true; /* SRAR OVER=0 clear bank. OVER = 1 have caused reset */ - else if ( (status & MCi_STATUS_UC) - && (status & MCi_STATUS_S) && (status & MCi_STATUS_AR) - && !(status & MCi_STATUS_OVER) ) + else if ( (status & MCi_STATUS_UC) && (status & MCi_STATUS_S) && + (status & MCi_STATUS_AR) && !(status & MCi_STATUS_OVER) ) return true; /* SRAO need clear bank */ - else if ( !(status & MCi_STATUS_AR) - && (status & MCi_STATUS_S) && (status & MCi_STATUS_UC) ) + else if ( !(status & MCi_STATUS_AR) && (status & MCi_STATUS_S) && + (status & MCi_STATUS_UC) ) return true; else return false; @@ -457,26 +449,25 @@ static bool intel_need_clearbank_scan(enum mca_source who, u64 status) */ static bool intel_recoverable_scan(uint64_t status) { - - if ( !(status & MCi_STATUS_UC ) ) + if ( !(status & MCi_STATUS_UC) ) return true; - else if ( ser_support && !(status & MCi_STATUS_EN) - && !(status & MCi_STATUS_OVER) ) + else if ( ser_support && !(status & MCi_STATUS_EN) && + !(status & MCi_STATUS_OVER) ) return true; /* SRAR error */ - else if ( ser_support && !(status & MCi_STATUS_OVER) - && !(status & MCi_STATUS_PCC) && (status & MCi_STATUS_S) - && (status & MCi_STATUS_AR) && (status & MCi_STATUS_EN) ) + else if ( ser_support && !(status & MCi_STATUS_OVER) && + !(status & MCi_STATUS_PCC) && (status & MCi_STATUS_S) && + (status & MCi_STATUS_AR) && (status & MCi_STATUS_EN) ) return true; /* SRAO error */ - else if ( ser_support && !(status & MCi_STATUS_PCC) - && (status & MCi_STATUS_S) && !(status & MCi_STATUS_AR) - && (status & MCi_STATUS_EN) ) + else if ( ser_support && !(status & MCi_STATUS_PCC) && + (status & MCi_STATUS_S) && !(status & MCi_STATUS_AR) && + (status & MCi_STATUS_EN) ) return true; /* UCNA error */ - else if ( ser_support && !(status & MCi_STATUS_OVER) - && (status & MCi_STATUS_EN) && !(status & MCi_STATUS_PCC) - && !(status & MCi_STATUS_S) && !(status & MCi_STATUS_AR) ) + else if ( ser_support && !(status & MCi_STATUS_OVER) && + (status & MCi_STATUS_EN) && !(status & MCi_STATUS_PCC) && + !(status & MCi_STATUS_S) && !(status & MCi_STATUS_AR) ) return true; return false; } @@ -520,9 +511,10 @@ static int do_cmci_discover(int i) threshold = cmci_threshold; if ( threshold > max_threshold ) { - mce_printk(MCE_QUIET, - "CMCI: threshold %#x too large for CPU%u bank %u, using %#x\n", - threshold, smp_processor_id(), i, max_threshold); + mce_printk( + MCE_QUIET, + "CMCI: threshold %#x too large for CPU%u bank %u, using %#x\n", + threshold, smp_processor_id(), i, max_threshold); threshold = max_threshold; } wrmsrl(msr, (val & ~CMCI_THRESHOLD_MASK) | CMCI_EN | threshold); @@ -556,8 +548,8 @@ static void cmci_discover(void) * the CMCI interrupt will never be triggered again. */ - mctc = mcheck_mca_logout( - MCA_CMCI_HANDLER, __get_cpu_var(mce_banks_owned), &bs, NULL); + mctc = mcheck_mca_logout(MCA_CMCI_HANDLER, __get_cpu_var(mce_banks_owned), + &bs, NULL); if ( bs.errcnt && mctc != NULL ) { @@ -628,8 +620,8 @@ static void clear_cmci(void) if ( !mcabanks_test(i, __get_cpu_var(mce_banks_owned)) ) continue; rdmsrl(msr, val); - if ( val & (CMCI_EN|CMCI_THRESHOLD_MASK) ) - wrmsrl(msr, val & ~(CMCI_EN|CMCI_THRESHOLD_MASK)); + if ( val & (CMCI_EN | CMCI_THRESHOLD_MASK) ) + wrmsrl(msr, val & ~(CMCI_EN | CMCI_THRESHOLD_MASK)); mcabanks_clear(i, __get_cpu_var(mce_banks_owned)); } } @@ -647,8 +639,8 @@ static void cmci_interrupt(struct cpu_user_regs *regs) ack_APIC_irq(); - mctc = mcheck_mca_logout( - MCA_CMCI_HANDLER, __get_cpu_var(mce_banks_owned), &bs, NULL); + mctc = mcheck_mca_logout(MCA_CMCI_HANDLER, __get_cpu_var(mce_banks_owned), + &bs, NULL); if ( bs.errcnt && mctc != NULL ) { @@ -684,7 +676,7 @@ static void intel_init_cmci(struct cpuinfo_x86 *c) if ( apic & APIC_VECTOR_MASK ) { mce_printk(MCE_QUIET, "CPU%d CMCI LVT vector (%#x) already installed\n", - cpu, ( apic & APIC_VECTOR_MASK )); + cpu, (apic & APIC_VECTOR_MASK)); return; } @@ -773,11 +765,8 @@ static void intel_init_mca(struct cpuinfo_x86 *c) { dprintk(XENLOG_INFO, "MCA Capability: firstbank %d, extended MCE MSR %d%s%s%s%s\n", - first, ext_num, - CAP(broadcast, "BCAST"), - CAP(ser, "SER"), - CAP(cmci, "CMCI"), - CAP(lmce, "LMCE")); + first, ext_num, CAP(broadcast, "BCAST"), CAP(ser, "SER"), + CAP(cmci, "CMCI"), CAP(lmce, "LMCE")); mce_broadcast = broadcast; cmci_support = cmci; @@ -787,18 +776,14 @@ static void intel_init_mca(struct cpuinfo_x86 *c) firstbank = first; } else if ( cmci != cmci_support || ser != ser_support || - broadcast != mce_broadcast || - first != firstbank || ext_num != nr_intel_ext_msrs || - lmce != lmce_support ) + broadcast != mce_broadcast || first != firstbank || + ext_num != nr_intel_ext_msrs || lmce != lmce_support ) dprintk(XENLOG_WARNING, "CPU%u has different MCA capability " "(firstbank %d, extended MCE MSR %d%s%s%s%s)" " than BSP, may cause undetermined result!!!\n", - smp_processor_id(), first, ext_num, - CAP(broadcast, "BCAST"), - CAP(ser, "SER"), - CAP(cmci, "CMCI"), - CAP(lmce, "LMCE")); + smp_processor_id(), first, ext_num, CAP(broadcast, "BCAST"), + CAP(ser, "SER"), CAP(cmci, "CMCI"), CAP(lmce, "LMCE")); #undef CAP } @@ -877,19 +862,19 @@ static int cpu_mcabank_alloc(unsigned int cpu) per_cpu(last_state, cpu) = -1; return 0; - out: +out: mcabanks_free(cmci); mcabanks_free(owned); return -ENOMEM; } -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int rc = 0; - switch ( action ) + switch (action) { case CPU_UP_PREPARE: rc = cpu_mcabank_alloc(cpu); @@ -909,9 +894,7 @@ static int cpu_callback( return !rc ? NOTIFY_DONE : notifier_from_errno(rc); } -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback -}; +static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback}; /* p4/p6 family have similar MCA initialization process */ enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c, bool bsp) @@ -947,7 +930,7 @@ int vmce_intel_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) if ( bank < GUEST_MC_BANK_NUM ) { v->arch.vmce.bank[bank].mci_ctl2 = val; - mce_printk(MCE_VERBOSE, "MCE: wr MC%u_CTL2 %#"PRIx64"\n", bank, val); + mce_printk(MCE_VERBOSE, "MCE: wr MC%u_CTL2 %#" PRIx64 "\n", bank, val); } return 1; @@ -960,7 +943,7 @@ int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) if ( bank < GUEST_MC_BANK_NUM ) { *val = v->arch.vmce.bank[bank].mci_ctl2; - mce_printk(MCE_VERBOSE, "MCE: rd MC%u_CTL2 %#"PRIx64"\n", bank, *val); + mce_printk(MCE_VERBOSE, "MCE: rd MC%u_CTL2 %#" PRIx64 "\n", bank, *val); } return 1; diff --git a/xen/arch/x86/cpu/mcheck/mctelem.c b/xen/arch/x86/cpu/mcheck/mctelem.c index 3bb13e5265..ac90f9092d 100644 --- a/xen/arch/x86/cpu/mcheck/mctelem.c +++ b/xen/arch/x86/cpu/mcheck/mctelem.c @@ -28,104 +28,110 @@ #include "mce.h" -struct mctelem_ent { - struct mctelem_ent *mcte_next; /* next in chronological order */ - struct mctelem_ent *mcte_prev; /* previous in chronological order */ - uint32_t mcte_flags; /* See MCTE_F_* below */ - uint32_t mcte_refcnt; /* Reference count */ - void *mcte_data; /* corresponding data payload */ +struct mctelem_ent +{ + struct mctelem_ent *mcte_next; /* next in chronological order */ + struct mctelem_ent *mcte_prev; /* previous in chronological order */ + uint32_t mcte_flags; /* See MCTE_F_* below */ + uint32_t mcte_refcnt; /* Reference count */ + void *mcte_data; /* corresponding data payload */ }; -#define MCTE_F_CLASS_URGENT 0x0001U /* in use - urgent errors */ -#define MCTE_F_CLASS_NONURGENT 0x0002U /* in use - nonurgent errors */ -#define MCTE_F_STATE_FREE 0x0010U /* on a freelist */ -#define MCTE_F_STATE_UNCOMMITTED 0x0020U /* reserved; on no list */ -#define MCTE_F_STATE_COMMITTED 0x0040U /* on a committed list */ -#define MCTE_F_STATE_PROCESSING 0x0080U /* on a processing list */ - -#define MCTE_F_MASK_CLASS (MCTE_F_CLASS_URGENT | MCTE_F_CLASS_NONURGENT) -#define MCTE_F_MASK_STATE (MCTE_F_STATE_FREE | \ - MCTE_F_STATE_UNCOMMITTED | \ - MCTE_F_STATE_COMMITTED | \ - MCTE_F_STATE_PROCESSING) - -#define MCTE_CLASS(tep) ((tep)->mcte_flags & MCTE_F_MASK_CLASS) -#define MCTE_SET_CLASS(tep, new) do { \ - (tep)->mcte_flags &= ~MCTE_F_MASK_CLASS; \ - (tep)->mcte_flags |= MCTE_F_CLASS_##new; } while (0) - -#define MCTE_STATE(tep) ((tep)->mcte_flags & MCTE_F_MASK_STATE) -#define MCTE_TRANSITION_STATE(tep, old, new) do { \ - BUG_ON(MCTE_STATE(tep) != (MCTE_F_STATE_##old)); \ - (tep)->mcte_flags &= ~MCTE_F_MASK_STATE; \ - (tep)->mcte_flags |= (MCTE_F_STATE_##new); } while (0) - -#define MC_URGENT_NENT 10 -#define MC_NONURGENT_NENT 20 +#define MCTE_F_CLASS_URGENT 0x0001U /* in use - urgent errors */ +#define MCTE_F_CLASS_NONURGENT 0x0002U /* in use - nonurgent errors */ +#define MCTE_F_STATE_FREE 0x0010U /* on a freelist */ +#define MCTE_F_STATE_UNCOMMITTED 0x0020U /* reserved; on no list */ +#define MCTE_F_STATE_COMMITTED 0x0040U /* on a committed list */ +#define MCTE_F_STATE_PROCESSING 0x0080U /* on a processing list */ + +#define MCTE_F_MASK_CLASS (MCTE_F_CLASS_URGENT | MCTE_F_CLASS_NONURGENT) +#define MCTE_F_MASK_STATE \ + (MCTE_F_STATE_FREE | MCTE_F_STATE_UNCOMMITTED | MCTE_F_STATE_COMMITTED | \ + MCTE_F_STATE_PROCESSING) + +#define MCTE_CLASS(tep) ((tep)->mcte_flags & MCTE_F_MASK_CLASS) +#define MCTE_SET_CLASS(tep, new) \ + do { \ + (tep)->mcte_flags &= ~MCTE_F_MASK_CLASS; \ + (tep)->mcte_flags |= MCTE_F_CLASS_##new; \ + } while ( 0 ) + +#define MCTE_STATE(tep) ((tep)->mcte_flags & MCTE_F_MASK_STATE) +#define MCTE_TRANSITION_STATE(tep, old, new) \ + do { \ + BUG_ON(MCTE_STATE(tep) != (MCTE_F_STATE_##old)); \ + (tep)->mcte_flags &= ~MCTE_F_MASK_STATE; \ + (tep)->mcte_flags |= (MCTE_F_STATE_##new); \ + } while ( 0 ) + +#define MC_URGENT_NENT 10 +#define MC_NONURGENT_NENT 20 #define MC_NENT (MC_URGENT_NENT + MC_NONURGENT_NENT) -#define MC_NCLASSES (MC_NONURGENT + 1) - -#define COOKIE2MCTE(c) ((struct mctelem_ent *)(c)) -#define MCTE2COOKIE(tep) ((mctelem_cookie_t)(tep)) - -static struct mc_telem_ctl { - /* Linked lists that thread the array members together. - * - * The free lists is a bit array where bit 1 means free. - * This as element number is quite small and is easy to - * atomically allocate that way. - * - * The committed list grows at the head and we do not maintain a - * tail pointer; insertions are performed atomically. The head - * thus has the most-recently committed telemetry, i.e. the - * list is in reverse chronological order. The committed list - * is singly-linked via mcte_prev pointers, and mcte_next is NULL. - * When we move telemetry from the committed list to the processing - * list we atomically unlink the committed list and keep a pointer - * to the head of that list; we then traverse the list following - * mcte_prev and fill in mcte_next to doubly-link the list, and then - * append the tail of the list onto the processing list. If we panic - * during this manipulation of the committed list we still have - * the pointer to its head so we can recover all entries during - * the panic flow (albeit in reverse chronological order). - * - * The processing list is updated in a controlled context, and - * we can lock it for updates. The head of the processing list - * always has the oldest telemetry, and we append (as above) - * at the tail of the processing list. */ - DECLARE_BITMAP(mctc_free, MC_NENT); - struct mctelem_ent *mctc_committed[MC_NCLASSES]; - struct mctelem_ent *mctc_processing_head[MC_NCLASSES]; - struct mctelem_ent *mctc_processing_tail[MC_NCLASSES]; - /* - * Telemetry array - */ - struct mctelem_ent *mctc_elems; +#define MC_NCLASSES (MC_NONURGENT + 1) + +#define COOKIE2MCTE(c) ((struct mctelem_ent *)(c)) +#define MCTE2COOKIE(tep) ((mctelem_cookie_t)(tep)) + +static struct mc_telem_ctl +{ + /* Linked lists that thread the array members together. + * + * The free lists is a bit array where bit 1 means free. + * This as element number is quite small and is easy to + * atomically allocate that way. + * + * The committed list grows at the head and we do not maintain a + * tail pointer; insertions are performed atomically. The head + * thus has the most-recently committed telemetry, i.e. the + * list is in reverse chronological order. The committed list + * is singly-linked via mcte_prev pointers, and mcte_next is NULL. + * When we move telemetry from the committed list to the processing + * list we atomically unlink the committed list and keep a pointer + * to the head of that list; we then traverse the list following + * mcte_prev and fill in mcte_next to doubly-link the list, and then + * append the tail of the list onto the processing list. If we panic + * during this manipulation of the committed list we still have + * the pointer to its head so we can recover all entries during + * the panic flow (albeit in reverse chronological order). + * + * The processing list is updated in a controlled context, and + * we can lock it for updates. The head of the processing list + * always has the oldest telemetry, and we append (as above) + * at the tail of the processing list. */ + DECLARE_BITMAP(mctc_free, MC_NENT); + struct mctelem_ent *mctc_committed[MC_NCLASSES]; + struct mctelem_ent *mctc_processing_head[MC_NCLASSES]; + struct mctelem_ent *mctc_processing_tail[MC_NCLASSES]; + /* + * Telemetry array + */ + struct mctelem_ent *mctc_elems; } mctctl; -struct mc_telem_cpu_ctl { - /* - * Per-CPU processing lists, used for deferred (softirq) - * processing of telemetry. - * - * The two pending lists @lmce_pending and @pending grow at - * the head in the reverse chronological order. - * - * @pending and @lmce_pending on the same CPU are mutually - * exclusive, i.e. deferred MCE on a CPU are either all in - * @lmce_pending or all in @pending. In the former case, all - * deferred MCE are LMCE. In the latter case, both LMCE and - * non-local MCE can be in @pending, and @pending contains at - * least one non-local MCE if it's not empty. - * - * Changes to @pending and @lmce_pending should be performed - * via mctelem_process_deferred() and mctelem_defer(), in order - * to guarantee the above mutual exclusivity. - */ - struct mctelem_ent *pending, *lmce_pending; - struct mctelem_ent *processing; +struct mc_telem_cpu_ctl +{ + /* + * Per-CPU processing lists, used for deferred (softirq) + * processing of telemetry. + * + * The two pending lists @lmce_pending and @pending grow at + * the head in the reverse chronological order. + * + * @pending and @lmce_pending on the same CPU are mutually + * exclusive, i.e. deferred MCE on a CPU are either all in + * @lmce_pending or all in @pending. In the former case, all + * deferred MCE are LMCE. In the latter case, both LMCE and + * non-local MCE can be in @pending, and @pending contains at + * least one non-local MCE if it's not empty. + * + * Changes to @pending and @lmce_pending should be performed + * via mctelem_process_deferred() and mctelem_defer(), in order + * to guarantee the above mutual exclusivity. + */ + struct mctelem_ent *pending, *lmce_pending; + struct mctelem_ent *processing; }; static DEFINE_PER_CPU(struct mc_telem_cpu_ctl, mctctl); @@ -133,16 +139,17 @@ static DEFINE_PER_CPU(struct mc_telem_cpu_ctl, mctctl); static DEFINE_SPINLOCK(processing_lock); static void mctelem_xchg_head(struct mctelem_ent **headp, - struct mctelem_ent **linkp, - struct mctelem_ent *new) + struct mctelem_ent **linkp, + struct mctelem_ent *new) { - for (;;) { - struct mctelem_ent *old; - - *linkp = old = *headp; - if (cmpxchgptr(headp, old, new) == old) - break; - } + for ( ;; ) + { + struct mctelem_ent *old; + + *linkp = old = *headp; + if ( cmpxchgptr(headp, old, new) == old ) + break; + } } /** @@ -170,30 +177,30 @@ static void mctelem_xchg_head(struct mctelem_ent **headp, */ void mctelem_defer(mctelem_cookie_t cookie, bool lmce) { - struct mctelem_ent *tep = COOKIE2MCTE(cookie); - struct mc_telem_cpu_ctl *mctctl = &this_cpu(mctctl); - - ASSERT(mctctl->pending == NULL || mctctl->lmce_pending == NULL); - - if (mctctl->pending) - mctelem_xchg_head(&mctctl->pending, &tep->mcte_next, tep); - else if (lmce) - mctelem_xchg_head(&mctctl->lmce_pending, &tep->mcte_next, tep); - else { - /* - * LMCE is supported on Skylake-server and later CPUs, on - * which mce_broadcast is always true. Therefore, non-empty - * mctctl->lmce_pending in this branch implies a broadcasting - * MC# is being handled, every CPU is in the exception - * context, and no one is consuming mctctl->pending at this - * moment. As a result, the following two exchanges together - * can be treated as atomic. - */ - if (mctctl->lmce_pending) - mctelem_xchg_head(&mctctl->lmce_pending, - &mctctl->pending, NULL); - mctelem_xchg_head(&mctctl->pending, &tep->mcte_next, tep); - } + struct mctelem_ent *tep = COOKIE2MCTE(cookie); + struct mc_telem_cpu_ctl *mctctl = &this_cpu(mctctl); + + ASSERT(mctctl->pending == NULL || mctctl->lmce_pending == NULL); + + if ( mctctl->pending ) + mctelem_xchg_head(&mctctl->pending, &tep->mcte_next, tep); + else if ( lmce ) + mctelem_xchg_head(&mctctl->lmce_pending, &tep->mcte_next, tep); + else + { + /* + * LMCE is supported on Skylake-server and later CPUs, on + * which mce_broadcast is always true. Therefore, non-empty + * mctctl->lmce_pending in this branch implies a broadcasting + * MC# is being handled, every CPU is in the exception + * context, and no one is consuming mctctl->pending at this + * moment. As a result, the following two exchanges together + * can be treated as atomic. + */ + if ( mctctl->lmce_pending ) + mctelem_xchg_head(&mctctl->lmce_pending, &mctctl->pending, NULL); + mctelem_xchg_head(&mctctl->pending, &tep->mcte_next, tep); + } } /** @@ -210,78 +217,79 @@ void mctelem_defer(mctelem_cookie_t cookie, bool lmce) * @fn: the function to handle the deferred MCE * @lmce: indicate which pending list on @cpu is handled */ -void mctelem_process_deferred(unsigned int cpu, - int (*fn)(mctelem_cookie_t), - bool lmce) +void mctelem_process_deferred(unsigned int cpu, int (*fn)(mctelem_cookie_t), + bool lmce) { - struct mctelem_ent *tep; - struct mctelem_ent *head, *prev; - struct mc_telem_cpu_ctl *mctctl = &per_cpu(mctctl, cpu); - int ret; - - /* - * First, unhook the list of telemetry structures, and - * hook it up to the processing list head for this CPU. - * - * If @lmce is true and a non-local MC# occurs before the - * following atomic exchange, @lmce will not hold after - * resumption, because all telemetries in @lmce_pending on - * @cpu are moved to @pending on @cpu in mcheck_cmn_handler(). - * In such a case, no telemetries will be handled in this - * function after resumption. Another round of MCE softirq, - * which was raised by above mcheck_cmn_handler(), will handle - * those moved telemetries in @pending on @cpu. - * - * Any MC# occurring after the following atomic exchange will be - * handled by another round of MCE softirq. - */ - mctelem_xchg_head(lmce ? &mctctl->lmce_pending : &mctctl->pending, - &this_cpu(mctctl.processing), NULL); - - head = this_cpu(mctctl.processing); - - /* - * Then, fix up the list to include prev pointers, to make - * things a little easier, as the list must be traversed in - * chronological order, which is backward from the order they - * are in. - */ - for (tep = head, prev = NULL; tep != NULL; tep = tep->mcte_next) { - tep->mcte_prev = prev; - prev = tep; - } - - /* - * Now walk the list of telemetry structures, handling each - * one of them. Unhooking the structure here does not need to - * be atomic, as this list is only accessed from a softirq - * context; the MCE handler does not touch it. - */ - for (tep = prev; tep != NULL; tep = prev) { - prev = tep->mcte_prev; - tep->mcte_next = tep->mcte_prev = NULL; - - ret = fn(MCTE2COOKIE(tep)); - if (prev != NULL) - prev->mcte_next = NULL; - tep->mcte_prev = tep->mcte_next = NULL; - if (ret != 0) - mctelem_commit(MCTE2COOKIE(tep)); - else - mctelem_dismiss(MCTE2COOKIE(tep)); - } + struct mctelem_ent *tep; + struct mctelem_ent *head, *prev; + struct mc_telem_cpu_ctl *mctctl = &per_cpu(mctctl, cpu); + int ret; + + /* + * First, unhook the list of telemetry structures, and + * hook it up to the processing list head for this CPU. + * + * If @lmce is true and a non-local MC# occurs before the + * following atomic exchange, @lmce will not hold after + * resumption, because all telemetries in @lmce_pending on + * @cpu are moved to @pending on @cpu in mcheck_cmn_handler(). + * In such a case, no telemetries will be handled in this + * function after resumption. Another round of MCE softirq, + * which was raised by above mcheck_cmn_handler(), will handle + * those moved telemetries in @pending on @cpu. + * + * Any MC# occurring after the following atomic exchange will be + * handled by another round of MCE softirq. + */ + mctelem_xchg_head(lmce ? &mctctl->lmce_pending : &mctctl->pending, + &this_cpu(mctctl.processing), NULL); + + head = this_cpu(mctctl.processing); + + /* + * Then, fix up the list to include prev pointers, to make + * things a little easier, as the list must be traversed in + * chronological order, which is backward from the order they + * are in. + */ + for ( tep = head, prev = NULL; tep != NULL; tep = tep->mcte_next ) + { + tep->mcte_prev = prev; + prev = tep; + } + + /* + * Now walk the list of telemetry structures, handling each + * one of them. Unhooking the structure here does not need to + * be atomic, as this list is only accessed from a softirq + * context; the MCE handler does not touch it. + */ + for ( tep = prev; tep != NULL; tep = prev ) + { + prev = tep->mcte_prev; + tep->mcte_next = tep->mcte_prev = NULL; + + ret = fn(MCTE2COOKIE(tep)); + if ( prev != NULL ) + prev->mcte_next = NULL; + tep->mcte_prev = tep->mcte_next = NULL; + if ( ret != 0 ) + mctelem_commit(MCTE2COOKIE(tep)); + else + mctelem_dismiss(MCTE2COOKIE(tep)); + } } bool mctelem_has_deferred(unsigned int cpu) { - if (per_cpu(mctctl.pending, cpu) != NULL) - return true; - return false; + if ( per_cpu(mctctl.pending, cpu) != NULL ) + return true; + return false; } bool mctelem_has_deferred_lmce(unsigned int cpu) { - return per_cpu(mctctl.lmce_pending, cpu) != NULL; + return per_cpu(mctctl.lmce_pending, cpu) != NULL; } /* Free an entry to its native free list; the entry must not be linked on @@ -289,14 +297,14 @@ bool mctelem_has_deferred_lmce(unsigned int cpu) */ static void mctelem_free(struct mctelem_ent *tep) { - BUG_ON(tep->mcte_refcnt != 0); - BUG_ON(MCTE_STATE(tep) != MCTE_F_STATE_FREE); + BUG_ON(tep->mcte_refcnt != 0); + BUG_ON(MCTE_STATE(tep) != MCTE_F_STATE_FREE); - tep->mcte_prev = NULL; - tep->mcte_next = NULL; + tep->mcte_prev = NULL; + tep->mcte_next = NULL; - /* set free in array */ - set_bit(tep - mctctl.mctc_elems, mctctl.mctc_free); + /* set free in array */ + set_bit(tep - mctctl.mctc_elems, mctctl.mctc_free); } /* Increment the reference count of an entry that is not linked on to @@ -304,7 +312,7 @@ static void mctelem_free(struct mctelem_ent *tep) */ static void mctelem_hold(struct mctelem_ent *tep) { - tep->mcte_refcnt++; + tep->mcte_refcnt++; } /* Increment the reference count on an entry that is linked at the head of @@ -312,11 +320,11 @@ static void mctelem_hold(struct mctelem_ent *tep) */ static void mctelem_processing_hold(struct mctelem_ent *tep) { - int which = MCTE_CLASS(tep) == MCTE_F_CLASS_URGENT ? - MC_URGENT : MC_NONURGENT; + int which = + MCTE_CLASS(tep) == MCTE_F_CLASS_URGENT ? MC_URGENT : MC_NONURGENT; - BUG_ON(tep != mctctl.mctc_processing_head[which]); - tep->mcte_refcnt++; + BUG_ON(tep != mctctl.mctc_processing_head[which]); + tep->mcte_refcnt++; } /* Decrement the reference count on an entry that is linked at the head of @@ -324,46 +332,49 @@ static void mctelem_processing_hold(struct mctelem_ent *tep) */ static void mctelem_processing_release(struct mctelem_ent *tep) { - int which = MCTE_CLASS(tep) == MCTE_F_CLASS_URGENT ? - MC_URGENT : MC_NONURGENT; - - BUG_ON(tep != mctctl.mctc_processing_head[which]); - if (--tep->mcte_refcnt == 0) { - MCTE_TRANSITION_STATE(tep, PROCESSING, FREE); - mctctl.mctc_processing_head[which] = tep->mcte_next; - mctelem_free(tep); - } + int which = + MCTE_CLASS(tep) == MCTE_F_CLASS_URGENT ? MC_URGENT : MC_NONURGENT; + + BUG_ON(tep != mctctl.mctc_processing_head[which]); + if ( --tep->mcte_refcnt == 0 ) + { + MCTE_TRANSITION_STATE(tep, PROCESSING, FREE); + mctctl.mctc_processing_head[which] = tep->mcte_next; + mctelem_free(tep); + } } void __init mctelem_init(unsigned int datasz) { - char *datarr; - unsigned int i; - - BUILD_BUG_ON(MC_URGENT != 0 || MC_NONURGENT != 1 || MC_NCLASSES != 2); - - datasz = (datasz & ~0xf) + 0x10; /* 16 byte roundup */ - - if ((mctctl.mctc_elems = xmalloc_array(struct mctelem_ent, - MC_NENT)) == NULL || - (datarr = xmalloc_bytes(MC_NENT * datasz)) == NULL) { - xfree(mctctl.mctc_elems); - printk("Allocations for MCA telemetry failed\n"); - return; - } - - for (i = 0; i < MC_NENT; i++) { - struct mctelem_ent *tep; - - tep = mctctl.mctc_elems + i; - tep->mcte_flags = MCTE_F_STATE_FREE; - tep->mcte_refcnt = 0; - tep->mcte_data = datarr + i * datasz; - - __set_bit(i, mctctl.mctc_free); - tep->mcte_next = NULL; - tep->mcte_prev = NULL; - } + char *datarr; + unsigned int i; + + BUILD_BUG_ON(MC_URGENT != 0 || MC_NONURGENT != 1 || MC_NCLASSES != 2); + + datasz = (datasz & ~0xf) + 0x10; /* 16 byte roundup */ + + if ( (mctctl.mctc_elems = xmalloc_array(struct mctelem_ent, MC_NENT)) == + NULL || + (datarr = xmalloc_bytes(MC_NENT * datasz)) == NULL ) + { + xfree(mctctl.mctc_elems); + printk("Allocations for MCA telemetry failed\n"); + return; + } + + for ( i = 0; i < MC_NENT; i++ ) + { + struct mctelem_ent *tep; + + tep = mctctl.mctc_elems + i; + tep->mcte_flags = MCTE_F_STATE_FREE; + tep->mcte_refcnt = 0; + tep->mcte_data = datarr + i * datasz; + + __set_bit(i, mctctl.mctc_free); + tep->mcte_next = NULL; + tep->mcte_prev = NULL; + } } /* incremented non-atomically when reserve fails */ @@ -375,40 +386,43 @@ static int mctelem_drop_count; */ mctelem_cookie_t mctelem_reserve(mctelem_class_t which) { - unsigned bit; - unsigned start_bit = (which == MC_URGENT) ? 0 : MC_URGENT_NENT; - - for (;;) { - bit = find_next_bit(mctctl.mctc_free, MC_NENT, start_bit); - - if (bit >= MC_NENT) { - mctelem_drop_count++; - return (NULL); - } - - /* try to allocate, atomically clear free bit */ - if (test_and_clear_bit(bit, mctctl.mctc_free)) { - /* return element we got */ - struct mctelem_ent *tep = mctctl.mctc_elems + bit; - - mctelem_hold(tep); - MCTE_TRANSITION_STATE(tep, FREE, UNCOMMITTED); - tep->mcte_next = NULL; - tep->mcte_prev = NULL; - if (which == MC_URGENT) - MCTE_SET_CLASS(tep, URGENT); - else - MCTE_SET_CLASS(tep, NONURGENT); - return MCTE2COOKIE(tep); - } - } + unsigned bit; + unsigned start_bit = (which == MC_URGENT) ? 0 : MC_URGENT_NENT; + + for ( ;; ) + { + bit = find_next_bit(mctctl.mctc_free, MC_NENT, start_bit); + + if ( bit >= MC_NENT ) + { + mctelem_drop_count++; + return (NULL); + } + + /* try to allocate, atomically clear free bit */ + if ( test_and_clear_bit(bit, mctctl.mctc_free) ) + { + /* return element we got */ + struct mctelem_ent *tep = mctctl.mctc_elems + bit; + + mctelem_hold(tep); + MCTE_TRANSITION_STATE(tep, FREE, UNCOMMITTED); + tep->mcte_next = NULL; + tep->mcte_prev = NULL; + if ( which == MC_URGENT ) + MCTE_SET_CLASS(tep, URGENT); + else + MCTE_SET_CLASS(tep, NONURGENT); + return MCTE2COOKIE(tep); + } + } } void *mctelem_dataptr(mctelem_cookie_t cookie) { - struct mctelem_ent *tep = COOKIE2MCTE(cookie); + struct mctelem_ent *tep = COOKIE2MCTE(cookie); - return tep->mcte_data; + return tep->mcte_data; } /* Release a previously reserved entry back to the freelist without @@ -417,11 +431,11 @@ void *mctelem_dataptr(mctelem_cookie_t cookie) */ void mctelem_dismiss(mctelem_cookie_t cookie) { - struct mctelem_ent *tep = COOKIE2MCTE(cookie); + struct mctelem_ent *tep = COOKIE2MCTE(cookie); - tep->mcte_refcnt--; - MCTE_TRANSITION_STATE(tep, UNCOMMITTED, FREE); - mctelem_free(tep); + tep->mcte_refcnt--; + MCTE_TRANSITION_STATE(tep, UNCOMMITTED, FREE); + mctelem_free(tep); } /* Commit an entry with completed telemetry for logging. The caller must @@ -431,14 +445,14 @@ void mctelem_dismiss(mctelem_cookie_t cookie) */ void mctelem_commit(mctelem_cookie_t cookie) { - struct mctelem_ent *tep = COOKIE2MCTE(cookie); - mctelem_class_t target = MCTE_CLASS(tep) == MCTE_F_CLASS_URGENT ? - MC_URGENT : MC_NONURGENT; + struct mctelem_ent *tep = COOKIE2MCTE(cookie); + mctelem_class_t target = + MCTE_CLASS(tep) == MCTE_F_CLASS_URGENT ? MC_URGENT : MC_NONURGENT; - BUG_ON(tep->mcte_next != NULL || tep->mcte_prev != NULL); - MCTE_TRANSITION_STATE(tep, UNCOMMITTED, COMMITTED); + BUG_ON(tep->mcte_next != NULL || tep->mcte_prev != NULL); + MCTE_TRANSITION_STATE(tep, UNCOMMITTED, COMMITTED); - mctelem_xchg_head(&mctctl.mctc_committed[target], &tep->mcte_prev, tep); + mctelem_xchg_head(&mctctl.mctc_committed[target], &tep->mcte_prev, tep); } /* Move telemetry from committed list to processing list, reversing the @@ -458,94 +472,96 @@ static struct mctelem_ent *dangling[MC_NCLASSES]; static void mctelem_append_processing(mctelem_class_t which) { - mctelem_class_t target = which == MC_URGENT ? - MC_URGENT : MC_NONURGENT; - struct mctelem_ent **commlp = &mctctl.mctc_committed[target]; - struct mctelem_ent **proclhp = &mctctl.mctc_processing_head[target]; - struct mctelem_ent **procltp = &mctctl.mctc_processing_tail[target]; - struct mctelem_ent *tep, *ltep; - - /* Check for an empty list; no race since we hold the processing lock */ - if (*commlp == NULL) - return; - - /* Atomically unlink the committed list, and keep a pointer to - * the list we unlink in a well-known location so it can be - * picked up in panic code should we panic between this unlink - * and the append to the processing list. */ - mctelem_xchg_head(commlp, &dangling[target], NULL); - - if (dangling[target] == NULL) - return; - - /* Traverse the list following the previous pointers (reverse - * chronological order). For each entry fill in the next pointer - * and transition the element state. */ - for (tep = dangling[target], ltep = NULL; tep != NULL; - tep = tep->mcte_prev) { - MCTE_TRANSITION_STATE(tep, COMMITTED, PROCESSING); - tep->mcte_next = ltep; - ltep = tep; - } - - /* ltep points to the head of a chronologically ordered linked - * list of telemetry entries ending at the most recent entry - * dangling[target] if mcte_next is followed; tack this on to - * the processing list. - */ - if (*proclhp == NULL) { - *proclhp = ltep; - *procltp = dangling[target]; - } else { - (*procltp)->mcte_next = ltep; - ltep->mcte_prev = *procltp; - *procltp = dangling[target]; - } - smp_wmb(); - dangling[target] = NULL; - smp_wmb(); + mctelem_class_t target = which == MC_URGENT ? MC_URGENT : MC_NONURGENT; + struct mctelem_ent **commlp = &mctctl.mctc_committed[target]; + struct mctelem_ent **proclhp = &mctctl.mctc_processing_head[target]; + struct mctelem_ent **procltp = &mctctl.mctc_processing_tail[target]; + struct mctelem_ent *tep, *ltep; + + /* Check for an empty list; no race since we hold the processing lock */ + if ( *commlp == NULL ) + return; + + /* Atomically unlink the committed list, and keep a pointer to + * the list we unlink in a well-known location so it can be + * picked up in panic code should we panic between this unlink + * and the append to the processing list. */ + mctelem_xchg_head(commlp, &dangling[target], NULL); + + if ( dangling[target] == NULL ) + return; + + /* Traverse the list following the previous pointers (reverse + * chronological order). For each entry fill in the next pointer + * and transition the element state. */ + for ( tep = dangling[target], ltep = NULL; tep != NULL; + tep = tep->mcte_prev ) + { + MCTE_TRANSITION_STATE(tep, COMMITTED, PROCESSING); + tep->mcte_next = ltep; + ltep = tep; + } + + /* ltep points to the head of a chronologically ordered linked + * list of telemetry entries ending at the most recent entry + * dangling[target] if mcte_next is followed; tack this on to + * the processing list. + */ + if ( *proclhp == NULL ) + { + *proclhp = ltep; + *procltp = dangling[target]; + } + else + { + (*procltp)->mcte_next = ltep; + ltep->mcte_prev = *procltp; + *procltp = dangling[target]; + } + smp_wmb(); + dangling[target] = NULL; + smp_wmb(); } mctelem_cookie_t mctelem_consume_oldest_begin(mctelem_class_t which) { - mctelem_class_t target = (which == MC_URGENT) ? - MC_URGENT : MC_NONURGENT; - struct mctelem_ent *tep; - - spin_lock(&processing_lock); - mctelem_append_processing(target); - if ((tep = mctctl.mctc_processing_head[target]) == NULL) { - spin_unlock(&processing_lock); - return NULL; - } - - mctelem_processing_hold(tep); - spin_unlock(&processing_lock); - return MCTE2COOKIE(tep); + mctelem_class_t target = (which == MC_URGENT) ? MC_URGENT : MC_NONURGENT; + struct mctelem_ent *tep; + + spin_lock(&processing_lock); + mctelem_append_processing(target); + if ( (tep = mctctl.mctc_processing_head[target]) == NULL ) + { + spin_unlock(&processing_lock); + return NULL; + } + + mctelem_processing_hold(tep); + spin_unlock(&processing_lock); + return MCTE2COOKIE(tep); } void mctelem_consume_oldest_end(mctelem_cookie_t cookie) { - struct mctelem_ent *tep = COOKIE2MCTE(cookie); + struct mctelem_ent *tep = COOKIE2MCTE(cookie); - spin_lock(&processing_lock); - mctelem_processing_release(tep); - spin_unlock(&processing_lock); + spin_lock(&processing_lock); + mctelem_processing_release(tep); + spin_unlock(&processing_lock); } void mctelem_ack(mctelem_class_t which, mctelem_cookie_t cookie) { - mctelem_class_t target = (which == MC_URGENT) ? - MC_URGENT : MC_NONURGENT; - struct mctelem_ent *tep = COOKIE2MCTE(cookie); + mctelem_class_t target = (which == MC_URGENT) ? MC_URGENT : MC_NONURGENT; + struct mctelem_ent *tep = COOKIE2MCTE(cookie); - if (tep == NULL) - return; + if ( tep == NULL ) + return; - spin_lock(&processing_lock); - if (tep == mctctl.mctc_processing_head[target]) - mctelem_processing_release(tep); - spin_unlock(&processing_lock); + spin_lock(&processing_lock); + if ( tep == mctctl.mctc_processing_head[target] ) + mctelem_processing_release(tep); + spin_unlock(&processing_lock); } /* diff --git a/xen/arch/x86/cpu/mcheck/non-fatal.c b/xen/arch/x86/cpu/mcheck/non-fatal.c index d12e8f23ba..ddce64050a 100644 --- a/xen/arch/x86/cpu/mcheck/non-fatal.c +++ b/xen/arch/x86/cpu/mcheck/non-fatal.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include @@ -32,86 +32,97 @@ static uint64_t period = MCE_PERIOD; static int adjust = 0; static int variable_period = 1; -static void mce_checkregs (void *info) +static void mce_checkregs(void *info) { - mctelem_cookie_t mctc; - struct mca_summary bs; - static uint64_t dumpcount = 0; - - mctc = mcheck_mca_logout(MCA_POLLER, __get_cpu_var(poll_bankmask), &bs, NULL); - - if (bs.errcnt && mctc != NULL) { - adjust++; - - /* If Dom0 enabled the VIRQ_MCA event, then notify it. - * Otherwise, if dom0 has had plenty of time to register - * the virq handler but still hasn't then dump telemetry - * to the Xen console. The call count may be incremented - * on multiple cpus at once and is indicative only - just - * a simple-minded attempt to avoid spamming the console - * for corrected errors in early startup. - */ - - if (dom0_vmce_enabled()) { - mctelem_commit(mctc); - send_global_virq(VIRQ_MCA); - } else if (++dumpcount >= 10) { - x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc)); - mctelem_dismiss(mctc); - } else { - mctelem_dismiss(mctc); - } - } else if (mctc != NULL) { - mctelem_dismiss(mctc); - } + mctelem_cookie_t mctc; + struct mca_summary bs; + static uint64_t dumpcount = 0; + + mctc = + mcheck_mca_logout(MCA_POLLER, __get_cpu_var(poll_bankmask), &bs, NULL); + + if ( bs.errcnt && mctc != NULL ) + { + adjust++; + + /* If Dom0 enabled the VIRQ_MCA event, then notify it. + * Otherwise, if dom0 has had plenty of time to register + * the virq handler but still hasn't then dump telemetry + * to the Xen console. The call count may be incremented + * on multiple cpus at once and is indicative only - just + * a simple-minded attempt to avoid spamming the console + * for corrected errors in early startup. + */ + + if ( dom0_vmce_enabled() ) + { + mctelem_commit(mctc); + send_global_virq(VIRQ_MCA); + } + else if ( ++dumpcount >= 10 ) + { + x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc)); + mctelem_dismiss(mctc); + } + else + { + mctelem_dismiss(mctc); + } + } + else if ( mctc != NULL ) + { + mctelem_dismiss(mctc); + } } static void mce_work_fn(void *data) -{ - on_each_cpu(mce_checkregs, NULL, 1); - - if (variable_period) { - if (adjust) - period /= (adjust + 1); - else - period *= 2; - if (period > MCE_PERIOD_MAX) - period = MCE_PERIOD_MAX; - if (period < MCE_PERIOD_MIN) - period = MCE_PERIOD_MIN; - } - - set_timer(&mce_timer, NOW() + period); - adjust = 0; +{ + on_each_cpu(mce_checkregs, NULL, 1); + + if ( variable_period ) + { + if ( adjust ) + period /= (adjust + 1); + else + period *= 2; + if ( period > MCE_PERIOD_MAX ) + period = MCE_PERIOD_MAX; + if ( period < MCE_PERIOD_MIN ) + period = MCE_PERIOD_MIN; + } + + set_timer(&mce_timer, NOW() + period); + adjust = 0; } static int __init init_nonfatal_mce_checker(void) { - struct cpuinfo_x86 *c = &boot_cpu_data; - - /* Check for MCE support */ - if (!opt_mce || !mce_available(c)) - return -ENODEV; - - if (__get_cpu_var(poll_bankmask) == NULL) - return -EINVAL; - - /* - * Check for non-fatal errors every MCE_RATE s - */ - switch (c->x86_vendor) { - case X86_VENDOR_AMD: - /* Assume we are on K8 or newer AMD CPU here */ - amd_nonfatal_mcheck_init(c); - break; - - case X86_VENDOR_INTEL: - init_timer(&mce_timer, mce_work_fn, NULL, 0); - set_timer(&mce_timer, NOW() + MCE_PERIOD); - break; - } - - printk(KERN_INFO "mcheck_poll: Machine check polling timer started.\n"); - return 0; + struct cpuinfo_x86 *c = &boot_cpu_data; + + /* Check for MCE support */ + if ( !opt_mce || !mce_available(c) ) + return -ENODEV; + + if ( __get_cpu_var(poll_bankmask) == NULL ) + return -EINVAL; + + /* + * Check for non-fatal errors every MCE_RATE s + */ + switch (c->x86_vendor) + { + case X86_VENDOR_AMD: + /* Assume we are on K8 or newer AMD CPU here */ + amd_nonfatal_mcheck_init(c); + break; + + case X86_VENDOR_INTEL: + init_timer(&mce_timer, mce_work_fn, NULL, 0); + set_timer(&mce_timer, NOW() + MCE_PERIOD); + break; + } + + printk(KERN_INFO "mcheck_poll: Machine check polling timer started.\n"); + return 0; } __initcall(init_nonfatal_mce_checker); diff --git a/xen/arch/x86/cpu/mcheck/util.c b/xen/arch/x86/cpu/mcheck/util.c index eaedee4ae8..c95587f832 100644 --- a/xen/arch/x86/cpu/mcheck/util.c +++ b/xen/arch/x86/cpu/mcheck/util.c @@ -8,7 +8,7 @@ void mce_panic_check(void) if ( is_mc_panic ) { local_irq_enable(); - for ( ; ; ) + for ( ;; ) halt(); } } diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c index f15835e9f6..d3518fcd3c 100644 --- a/xen/arch/x86/cpu/mcheck/vmce.c +++ b/xen/arch/x86/cpu/mcheck/vmce.c @@ -44,10 +44,8 @@ * MCG_CMCI_P: expose CMCI capability but never really inject it to guest, * for sake of performance since guest not polling periodically */ -#define INTEL_GUEST_MCG_CAP (MCG_SER_P | \ - MCG_TES_P | \ - MCG_CMCI_P | \ - GUEST_MC_BANK_NUM) +#define INTEL_GUEST_MCG_CAP \ + (MCG_SER_P | MCG_TES_P | MCG_CMCI_P | GUEST_MC_BANK_NUM) #define AMD_GUEST_MCG_CAP GUEST_MC_BANK_NUM @@ -81,10 +79,11 @@ int vmce_restore_vcpu(struct vcpu *v, const struct hvm_vmce_vcpu *ctxt) if ( ctxt->caps & ~guest_mcg_cap & ~MCG_CAP_COUNT & ~MCG_CTL_P ) { - dprintk(XENLOG_G_ERR, "%s restore: unsupported MCA capabilities" + dprintk(XENLOG_G_ERR, + "%s restore: unsupported MCA capabilities" " %#" PRIx64 " for %pv (supported: %#Lx)\n", - is_hvm_vcpu(v) ? "HVM" : "PV", ctxt->caps, - v, guest_mcg_cap & ~MCG_CAP_COUNT); + is_hvm_vcpu(v) ? "HVM" : "PV", ctxt->caps, v, + guest_mcg_cap & ~MCG_CAP_COUNT); return -EPERM; } @@ -107,13 +106,13 @@ static int bank_mce_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) *val = 0; - switch ( msr & (-MSR_IA32_MC0_CTL | 3) ) + switch (msr & (-MSR_IA32_MC0_CTL | 3)) { case MSR_IA32_MC0_CTL: /* stick all 1's to MCi_CTL */ *val = ~0UL; - mce_printk(MCE_VERBOSE, "MCE: %pv: rd MC%u_CTL %#"PRIx64"\n", - v, bank, *val); + mce_printk(MCE_VERBOSE, "MCE: %pv: rd MC%u_CTL %#" PRIx64 "\n", v, bank, + *val); break; case MSR_IA32_MC0_STATUS: @@ -121,8 +120,9 @@ static int bank_mce_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) { *val = v->arch.vmce.bank[bank].mci_status; if ( *val ) - mce_printk(MCE_VERBOSE, "MCE: %pv: rd MC%u_STATUS %#"PRIx64"\n", - v, bank, *val); + mce_printk(MCE_VERBOSE, + "MCE: %pv: rd MC%u_STATUS %#" PRIx64 "\n", v, bank, + *val); } break; @@ -131,7 +131,7 @@ static int bank_mce_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) { *val = v->arch.vmce.bank[bank].mci_addr; if ( *val ) - mce_printk(MCE_VERBOSE, "MCE: %pv: rd MC%u_ADDR %#"PRIx64"\n", + mce_printk(MCE_VERBOSE, "MCE: %pv: rd MC%u_ADDR %#" PRIx64 "\n", v, bank, *val); } break; @@ -141,13 +141,13 @@ static int bank_mce_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) { *val = v->arch.vmce.bank[bank].mci_misc; if ( *val ) - mce_printk(MCE_VERBOSE, "MCE: %pv: rd MC%u_MISC %#"PRIx64"\n", + mce_printk(MCE_VERBOSE, "MCE: %pv: rd MC%u_MISC %#" PRIx64 "\n", v, bank, *val); } break; default: - switch ( boot_cpu_data.x86_vendor ) + switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: ret = vmce_intel_rdmsr(v, msr, val); @@ -181,24 +181,26 @@ int vmce_rdmsr(uint32_t msr, uint64_t *val) spin_lock(&cur->arch.vmce.lock); - switch ( msr ) + switch (msr) { case MSR_IA32_MCG_STATUS: *val = cur->arch.vmce.mcg_status; if ( *val ) - mce_printk(MCE_VERBOSE, - "MCE: %pv: rd MCG_STATUS %#"PRIx64"\n", cur, *val); + mce_printk(MCE_VERBOSE, "MCE: %pv: rd MCG_STATUS %#" PRIx64 "\n", + cur, *val); break; case MSR_IA32_MCG_CAP: *val = cur->arch.vmce.mcg_cap; - mce_printk(MCE_VERBOSE, "MCE: %pv: rd MCG_CAP %#"PRIx64"\n", cur, *val); + mce_printk(MCE_VERBOSE, "MCE: %pv: rd MCG_CAP %#" PRIx64 "\n", cur, + *val); break; case MSR_IA32_MCG_CTL: if ( cur->arch.vmce.mcg_cap & MCG_CTL_P ) *val = ~0ULL; - mce_printk(MCE_VERBOSE, "MCE: %pv: rd MCG_CTL %#"PRIx64"\n", cur, *val); + mce_printk(MCE_VERBOSE, "MCE: %pv: rd MCG_CTL %#" PRIx64 "\n", cur, + *val); break; case MSR_IA32_MCG_EXT_CTL: @@ -210,7 +212,7 @@ int vmce_rdmsr(uint32_t msr, uint64_t *val) if ( cur->arch.vmce.mcg_cap & MCG_LMCE_P ) { *val = cur->arch.vmce.mcg_ext_ctl; - mce_printk(MCE_VERBOSE, "MCE: %pv: rd MCG_EXT_CTL %#"PRIx64"\n", + mce_printk(MCE_VERBOSE, "MCE: %pv: rd MCG_EXT_CTL %#" PRIx64 "\n", cur, *val); } else @@ -240,7 +242,7 @@ static int bank_mce_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) int ret = 1; unsigned int bank = (msr - MSR_IA32_MC0_CTL) / 4; - switch ( msr & (-MSR_IA32_MC0_CTL | 3) ) + switch (msr & (-MSR_IA32_MC0_CTL | 3)) { case MSR_IA32_MC0_CTL: /* @@ -250,8 +252,8 @@ static int bank_mce_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) break; case MSR_IA32_MC0_STATUS: - mce_printk(MCE_VERBOSE, "MCE: %pv: wr MC%u_STATUS %#"PRIx64"\n", - v, bank, val); + mce_printk(MCE_VERBOSE, "MCE: %pv: wr MC%u_STATUS %#" PRIx64 "\n", v, + bank, val); if ( val ) ret = -1; else if ( bank < GUEST_MC_BANK_NUM ) @@ -259,8 +261,8 @@ static int bank_mce_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) break; case MSR_IA32_MC0_ADDR: - mce_printk(MCE_VERBOSE, "MCE: %pv: wr MC%u_ADDR %#"PRIx64"\n", - v, bank, val); + mce_printk(MCE_VERBOSE, "MCE: %pv: wr MC%u_ADDR %#" PRIx64 "\n", v, + bank, val); if ( val ) ret = -1; else if ( bank < GUEST_MC_BANK_NUM ) @@ -268,8 +270,8 @@ static int bank_mce_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) break; case MSR_IA32_MC0_MISC: - mce_printk(MCE_VERBOSE, "MCE: %pv: wr MC%u_MISC %#"PRIx64"\n", - v, bank, val); + mce_printk(MCE_VERBOSE, "MCE: %pv: wr MC%u_MISC %#" PRIx64 "\n", v, + bank, val); if ( val ) ret = -1; else if ( bank < GUEST_MC_BANK_NUM ) @@ -277,7 +279,7 @@ static int bank_mce_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) break; default: - switch ( boot_cpu_data.x86_vendor ) + switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: ret = vmce_intel_wrmsr(v, msr, val); @@ -309,7 +311,7 @@ int vmce_wrmsr(uint32_t msr, uint64_t val) spin_lock(&cur->arch.vmce.lock); - switch ( msr ) + switch (msr) { case MSR_IA32_MCG_CTL: /* If MCG_CTL exists then stick to all 1's, else ignore. */ @@ -317,8 +319,8 @@ int vmce_wrmsr(uint32_t msr, uint64_t val) case MSR_IA32_MCG_STATUS: cur->arch.vmce.mcg_status = val; - mce_printk(MCE_VERBOSE, "MCE: %pv: wr MCG_STATUS %"PRIx64"\n", - cur, val); + mce_printk(MCE_VERBOSE, "MCE: %pv: wr MCG_STATUS %" PRIx64 "\n", cur, + val); break; case MSR_IA32_MCG_CAP: @@ -336,8 +338,8 @@ int vmce_wrmsr(uint32_t msr, uint64_t val) cur->arch.vmce.mcg_ext_ctl = val; else ret = -1; - mce_printk(MCE_VERBOSE, "MCE: %pv: wr MCG_EXT_CTL %"PRIx64"%s\n", - cur, val, (ret == -1) ? ", not supported" : ""); + mce_printk(MCE_VERBOSE, "MCE: %pv: wr MCG_EXT_CTL %" PRIx64 "%s\n", cur, + val, (ret == -1) ? ", not supported" : ""); break; default: @@ -381,8 +383,8 @@ static int vmce_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) return err ?: vmce_restore_vcpu(v, &ctxt); } -HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt, - vmce_load_vcpu_ctxt, 1, HVMSR_PER_VCPU); +HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt, vmce_load_vcpu_ctxt, + 1, HVMSR_PER_VCPU); #endif /* @@ -399,7 +401,7 @@ int inject_vmce(struct domain *d, int vcpu) struct vcpu *v; int ret = -ESRCH; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { if ( vcpu != VMCE_INJECT_BROADCAST && vcpu != v->vcpu_id ) continue; @@ -436,8 +438,10 @@ static int vcpu_fill_mc_msrs(struct vcpu *v, uint64_t mcg_status, { if ( v->arch.vmce.mcg_status & MCG_STATUS_MCIP ) { - mce_printk(MCE_QUIET, "MCE: %pv: guest has not handled previous" - " vMCE yet!\n", v); + mce_printk(MCE_QUIET, + "MCE: %pv: guest has not handled previous" + " vMCE yet!\n", + v); return -EBUSY; } @@ -483,15 +487,15 @@ int fill_vmsr_data(struct mcinfo_bank *mc_bank, struct domain *d, * the severest error on vCPU0, the less severe errors on other * vCPUs will not prevent guest from recovering on those vCPUs. */ - ret = vcpu_fill_mc_msrs(v, gstatus, mc_bank->mc_status, - mc_bank->mc_addr, mc_bank->mc_misc); + ret = vcpu_fill_mc_msrs(v, gstatus, mc_bank->mc_status, mc_bank->mc_addr, + mc_bank->mc_misc); if ( broadcast ) - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { if ( !v->vcpu_id ) continue; - err = vcpu_fill_mc_msrs(v, MCG_STATUS_MCIP | MCG_STATUS_RIPV, - 0, 0, 0); + err = vcpu_fill_mc_msrs(v, MCG_STATUS_MCIP | MCG_STATUS_RIPV, 0, 0, + 0); if ( err ) ret = err; } @@ -500,10 +504,9 @@ int fill_vmsr_data(struct mcinfo_bank *mc_bank, struct domain *d, } /* It's said some ram is setup as mmio_direct for UC cache attribute */ -#define P2M_UNMAP_TYPES (p2m_to_mask(p2m_ram_rw) \ - | p2m_to_mask(p2m_ram_logdirty) \ - | p2m_to_mask(p2m_ram_ro) \ - | p2m_to_mask(p2m_mmio_direct)) +#define P2M_UNMAP_TYPES \ + (p2m_to_mask(p2m_ram_rw) | p2m_to_mask(p2m_ram_logdirty) | \ + p2m_to_mask(p2m_ram_ro) | p2m_to_mask(p2m_mmio_direct)) /* * Currently all CPUs are redenzevous at the MCE softirq handler, no @@ -530,7 +533,7 @@ int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn) rc = -1; r_mfn = get_gfn_query(d, gfn, &pt); - if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES) + if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES ) { ASSERT(mfn_eq(r_mfn, mfn)); rc = p2m_change_type_one(d, gfn, pt, p2m_ram_broken); @@ -551,7 +554,7 @@ int vmce_enable_mca_cap(struct domain *d, uint64_t cap) { if ( !lmce_support ) return -EINVAL; - for_each_vcpu(d, v) + for_each_vcpu (d, v) v->arch.vmce.mcg_cap |= MCG_LMCE_P; } diff --git a/xen/arch/x86/cpu/mtrr/generic.c b/xen/arch/x86/cpu/mtrr/generic.c index 8f9cf1b1d7..983e06273d 100644 --- a/xen/arch/x86/cpu/mtrr/generic.c +++ b/xen/arch/x86/cpu/mtrr/generic.c @@ -1,5 +1,5 @@ /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong - because MTRRs can span upto 40 bits (36bits on most modern x86) */ + because MTRRs can span upto 40 bits (36bits on most modern x86) */ #include #include #include @@ -13,42 +13,41 @@ #include #include "mtrr.h" -static const struct fixed_range_block { - uint32_t base_msr; /* start address of an MTRR block */ - unsigned int ranges; /* number of MTRRs in this block */ +static const struct fixed_range_block +{ + uint32_t base_msr; /* start address of an MTRR block */ + unsigned int ranges; /* number of MTRRs in this block */ } fixed_range_blocks[] = { - { MSR_MTRRfix64K_00000, (0x80000 - 0x00000) >> (16 + 3) }, - { MSR_MTRRfix16K_80000, (0xC0000 - 0x80000) >> (14 + 3) }, - { MSR_MTRRfix4K_C0000, (0x100000 - 0xC0000) >> (12 + 3) }, - {} -}; + {MSR_MTRRfix64K_00000, (0x80000 - 0x00000) >> (16 + 3)}, + {MSR_MTRRfix16K_80000, (0xC0000 - 0x80000) >> (14 + 3)}, + {MSR_MTRRfix4K_C0000, (0x100000 - 0xC0000) >> (12 + 3)}, + {}}; static unsigned long smp_changes_mask; struct mtrr_state mtrr_state = {}; /* Get the MSR pair relating to a var range */ -static void -get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) +static void get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) { - rdmsrl(MSR_IA32_MTRR_PHYSBASE(index), vr->base); - rdmsrl(MSR_IA32_MTRR_PHYSMASK(index), vr->mask); + rdmsrl(MSR_IA32_MTRR_PHYSBASE(index), vr->base); + rdmsrl(MSR_IA32_MTRR_PHYSMASK(index), vr->mask); } -static void -get_fixed_ranges(mtrr_type * frs) +static void get_fixed_ranges(mtrr_type *frs) { - uint64_t *p = (uint64_t *) frs; - const struct fixed_range_block *block; + uint64_t *p = (uint64_t *)frs; + const struct fixed_range_block *block; - if (!mtrr_state.have_fixed) - return; + if ( !mtrr_state.have_fixed ) + return; - for (block = fixed_range_blocks; block->ranges; ++block) { - unsigned int i; + for ( block = fixed_range_blocks; block->ranges; ++block ) + { + unsigned int i; - for (i = 0; i < block->ranges; ++i, ++p) - rdmsrl(block->base_msr + i, *p); - } + for ( i = 0; i < block->ranges; ++i, ++p ) + rdmsrl(block->base_msr + i, *p); + } } bool is_var_mtrr_overlapped(const struct mtrr_state *m) @@ -85,38 +84,39 @@ bool is_var_mtrr_overlapped(const struct mtrr_state *m) void mtrr_save_fixed_ranges(void *info) { - get_fixed_ranges(mtrr_state.fixed_ranges); + get_fixed_ranges(mtrr_state.fixed_ranges); } /* Grab all of the MTRR state for this CPU into *state */ void __init get_mtrr_state(void) { - unsigned int i; - struct mtrr_var_range *vrs; - uint64_t msr_content; - - if (!mtrr_state.var_ranges) { - mtrr_state.var_ranges = xmalloc_array(struct mtrr_var_range, - num_var_ranges); - if (!mtrr_state.var_ranges) - return; - } - vrs = mtrr_state.var_ranges; - - rdmsrl(MSR_MTRRcap, msr_content); - mtrr_state.have_fixed = (msr_content >> 8) & 1; - - for (i = 0; i < num_var_ranges; i++) - get_mtrr_var_range(i, &vrs[i]); - get_fixed_ranges(mtrr_state.fixed_ranges); - - rdmsrl(MSR_MTRRdefType, msr_content); - mtrr_state.def_type = (msr_content & 0xff); - mtrr_state.enabled = MASK_EXTR(msr_content, MTRRdefType_E); - mtrr_state.fixed_enabled = MASK_EXTR(msr_content, MTRRdefType_FE); - - /* Store mtrr_cap for HVM MTRR virtualisation. */ - rdmsrl(MSR_MTRRcap, mtrr_state.mtrr_cap); + unsigned int i; + struct mtrr_var_range *vrs; + uint64_t msr_content; + + if ( !mtrr_state.var_ranges ) + { + mtrr_state.var_ranges = + xmalloc_array(struct mtrr_var_range, num_var_ranges); + if ( !mtrr_state.var_ranges ) + return; + } + vrs = mtrr_state.var_ranges; + + rdmsrl(MSR_MTRRcap, msr_content); + mtrr_state.have_fixed = (msr_content >> 8) & 1; + + for ( i = 0; i < num_var_ranges; i++ ) + get_mtrr_var_range(i, &vrs[i]); + get_fixed_ranges(mtrr_state.fixed_ranges); + + rdmsrl(MSR_MTRRdefType, msr_content); + mtrr_state.def_type = (msr_content & 0xff); + mtrr_state.enabled = MASK_EXTR(msr_content, MTRRdefType_E); + mtrr_state.fixed_enabled = MASK_EXTR(msr_content, MTRRdefType_FE); + + /* Store mtrr_cap for HVM MTRR virtualisation. */ + rdmsrl(MSR_MTRRcap, mtrr_state.mtrr_cap); } static bool_t __initdata mtrr_show; @@ -124,16 +124,15 @@ boolean_param("mtrr.show", mtrr_show); static const char *__init mtrr_attrib_to_str(mtrr_type x) { - static const char __initconst strings[MTRR_NUM_TYPES][16] = - { - [MTRR_TYPE_UNCACHABLE] = "uncachable", - [MTRR_TYPE_WRCOMB] = "write-combining", - [MTRR_TYPE_WRTHROUGH] = "write-through", - [MTRR_TYPE_WRPROT] = "write-protect", - [MTRR_TYPE_WRBACK] = "write-back", - }; - - return (x < ARRAY_SIZE(strings) && strings[x][0]) ? strings[x] : "?"; + static const char __initconst strings[MTRR_NUM_TYPES][16] = { + [MTRR_TYPE_UNCACHABLE] = "uncachable", + [MTRR_TYPE_WRCOMB] = "write-combining", + [MTRR_TYPE_WRTHROUGH] = "write-through", + [MTRR_TYPE_WRPROT] = "write-protect", + [MTRR_TYPE_WRBACK] = "write-back", + }; + + return (x < ARRAY_SIZE(strings) && strings[x][0]) ? strings[x] : "?"; } static unsigned int __initdata last_fixed_start; @@ -142,113 +141,124 @@ static mtrr_type __initdata last_fixed_type; static void __init print_fixed_last(const char *level) { - if (!last_fixed_end) - return; + if ( !last_fixed_end ) + return; - printk("%s %05x-%05x %s\n", level, last_fixed_start, - last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); + printk("%s %05x-%05x %s\n", level, last_fixed_start, last_fixed_end - 1, + mtrr_attrib_to_str(last_fixed_type)); - last_fixed_end = 0; + last_fixed_end = 0; } static void __init update_fixed_last(unsigned int base, unsigned int end, - mtrr_type type) + mtrr_type type) { - last_fixed_start = base; - last_fixed_end = end; - last_fixed_type = type; + last_fixed_start = base; + last_fixed_end = end; + last_fixed_type = type; } static void __init print_fixed(unsigned int base, unsigned int step, - const mtrr_type *types, const char *level) + const mtrr_type *types, const char *level) { - unsigned i; - - for (i = 0; i < 8; ++i, ++types, base += step) { - if (last_fixed_end == 0) { - update_fixed_last(base, base + step, *types); - continue; - } - if (last_fixed_end == base && last_fixed_type == *types) { - last_fixed_end = base + step; - continue; - } - /* new segments: gap or different type */ - print_fixed_last(level); - update_fixed_last(base, base + step, *types); - } + unsigned i; + + for ( i = 0; i < 8; ++i, ++types, base += step ) + { + if ( last_fixed_end == 0 ) + { + update_fixed_last(base, base + step, *types); + continue; + } + if ( last_fixed_end == base && last_fixed_type == *types ) + { + last_fixed_end = base + step; + continue; + } + /* new segments: gap or different type */ + print_fixed_last(level); + update_fixed_last(base, base + step, *types); + } } static void __init print_mtrr_state(const char *level) { - unsigned int i; - int width; - - printk("%sMTRR default type: %s\n", level, - mtrr_attrib_to_str(mtrr_state.def_type)); - if (mtrr_state.have_fixed) { - const mtrr_type *fr = mtrr_state.fixed_ranges; - const struct fixed_range_block *block = fixed_range_blocks; - unsigned int base = 0, step = 0x10000; - - printk("%sMTRR fixed ranges %sabled:\n", level, - mtrr_state.fixed_enabled ? "en" : "dis"); - for (; block->ranges; ++block, step >>= 2) { - for (i = 0; i < block->ranges; ++i, fr += 8) { - print_fixed(base, step, fr, level); - base += 8 * step; - } - } - print_fixed_last(level); - } - printk("%sMTRR variable ranges %sabled:\n", level, - mtrr_state.enabled ? "en" : "dis"); - width = (paddr_bits - PAGE_SHIFT + 3) / 4; - - for (i = 0; i < num_var_ranges; ++i) { - if (mtrr_state.var_ranges[i].mask & MTRR_PHYSMASK_VALID) - printk("%s %u base %0*"PRIx64"000 mask %0*"PRIx64"000 %s\n", - level, i, - width, mtrr_state.var_ranges[i].base >> 12, - width, mtrr_state.var_ranges[i].mask >> 12, - mtrr_attrib_to_str(mtrr_state.var_ranges[i].base & - MTRR_PHYSBASE_TYPE_MASK)); - else - printk("%s %u disabled\n", level, i); - } - - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD - && boot_cpu_data.x86 >= 0xf) { - uint64_t syscfg, tom2; - - rdmsrl(MSR_K8_SYSCFG, syscfg); - if (syscfg & (1 << 21)) { - rdmsrl(MSR_K8_TOP_MEM2, tom2); - printk("%sTOM2: %012"PRIx64"%s\n", level, tom2, - syscfg & (1 << 22) ? " (WB)" : ""); - } - } + unsigned int i; + int width; + + printk("%sMTRR default type: %s\n", level, + mtrr_attrib_to_str(mtrr_state.def_type)); + if ( mtrr_state.have_fixed ) + { + const mtrr_type *fr = mtrr_state.fixed_ranges; + const struct fixed_range_block *block = fixed_range_blocks; + unsigned int base = 0, step = 0x10000; + + printk("%sMTRR fixed ranges %sabled:\n", level, + mtrr_state.fixed_enabled ? "en" : "dis"); + for ( ; block->ranges; ++block, step >>= 2 ) + { + for ( i = 0; i < block->ranges; ++i, fr += 8 ) + { + print_fixed(base, step, fr, level); + base += 8 * step; + } + } + print_fixed_last(level); + } + printk("%sMTRR variable ranges %sabled:\n", level, + mtrr_state.enabled ? "en" : "dis"); + width = (paddr_bits - PAGE_SHIFT + 3) / 4; + + for ( i = 0; i < num_var_ranges; ++i ) + { + if ( mtrr_state.var_ranges[i].mask & MTRR_PHYSMASK_VALID ) + printk("%s %u base %0*" PRIx64 "000 mask %0*" PRIx64 "000 %s\n", + level, i, width, mtrr_state.var_ranges[i].base >> 12, width, + mtrr_state.var_ranges[i].mask >> 12, + mtrr_attrib_to_str(mtrr_state.var_ranges[i].base & + MTRR_PHYSBASE_TYPE_MASK)); + else + printk("%s %u disabled\n", level, i); + } + + if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0xf ) + { + uint64_t syscfg, tom2; + + rdmsrl(MSR_K8_SYSCFG, syscfg); + if ( syscfg & (1 << 21) ) + { + rdmsrl(MSR_K8_TOP_MEM2, tom2); + printk("%sTOM2: %012" PRIx64 "%s\n", level, tom2, + syscfg & (1 << 22) ? " (WB)" : ""); + } + } } /* Some BIOS's are fucked and don't set all MTRRs the same! */ void __init mtrr_state_warn(void) { - unsigned long mask = smp_changes_mask; - - if (mtrr_show) - print_mtrr_state(mask ? KERN_WARNING : ""); - if (!mask) - return; - if (mask & MTRR_CHANGE_MASK_FIXED) - printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); - if (mask & MTRR_CHANGE_MASK_VARIABLE) - printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n"); - if (mask & MTRR_CHANGE_MASK_DEFTYPE) - printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); - printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); - printk(KERN_INFO "mtrr: corrected configuration.\n"); - if (!mtrr_show) - print_mtrr_state(KERN_INFO); + unsigned long mask = smp_changes_mask; + + if ( mtrr_show ) + print_mtrr_state(mask ? KERN_WARNING : ""); + if ( !mask ) + return; + if ( mask & MTRR_CHANGE_MASK_FIXED ) + printk(KERN_WARNING + "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); + if ( mask & MTRR_CHANGE_MASK_VARIABLE ) + printk(KERN_WARNING + "mtrr: your CPUs had inconsistent variable MTRR settings\n"); + if ( mask & MTRR_CHANGE_MASK_DEFTYPE ) + printk(KERN_WARNING + "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); + printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); + printk(KERN_INFO "mtrr: corrected configuration.\n"); + if ( !mtrr_show ) + print_mtrr_state(KERN_INFO); } /* Doesn't attempt to pass an error out to MTRR users @@ -256,12 +266,11 @@ void __init mtrr_state_warn(void) worth it because the best error handling is to ignore it. */ static void mtrr_wrmsr(unsigned int msr, uint64_t msr_content) { - if (wrmsr_safe(msr, msr_content) < 0) - printk(KERN_ERR - "MTRR: CPU %u: Writing MSR %x to %"PRIx64" failed\n", - smp_processor_id(), msr, msr_content); - /* Cache overlap status for efficient HVM MTRR virtualisation. */ - mtrr_state.overlapped = is_var_mtrr_overlapped(&mtrr_state); + if ( wrmsr_safe(msr, msr_content) < 0 ) + printk(KERN_ERR "MTRR: CPU %u: Writing MSR %x to %" PRIx64 " failed\n", + smp_processor_id(), msr, msr_content); + /* Cache overlap status for efficient HVM MTRR virtualisation. */ + mtrr_state.overlapped = is_var_mtrr_overlapped(&mtrr_state); } /** @@ -274,63 +283,67 @@ static void mtrr_wrmsr(unsigned int msr, uint64_t msr_content) */ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) { - uint64_t msr_content, val; + uint64_t msr_content, val; - rdmsrl(msr, msr_content); - val = ((uint64_t)msrwords[1] << 32) | msrwords[0]; + rdmsrl(msr, msr_content); + val = ((uint64_t)msrwords[1] << 32) | msrwords[0]; - if (msr_content != val) { - mtrr_wrmsr(msr, val); - *changed = true; - } + if ( msr_content != val ) + { + mtrr_wrmsr(msr, val); + *changed = true; + } } -int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) +int generic_get_free_region(unsigned long base, unsigned long size, + int replace_reg) /* [SUMMARY] Get a free MTRR. The starting (base) address of the region. The size (in bytes) of the region. [RETURNS] The index of the region on success, else -1 on error. */ { - int i, max; - mtrr_type ltype; - unsigned long lbase, lsize; - - max = num_var_ranges; - if (replace_reg >= 0 && replace_reg < max) - return replace_reg; - for (i = 0; i < max; ++i) { - mtrr_if->get(i, &lbase, &lsize, <ype); - if (lsize == 0) - return i; - } - return -ENOSPC; + int i, max; + mtrr_type ltype; + unsigned long lbase, lsize; + + max = num_var_ranges; + if ( replace_reg >= 0 && replace_reg < max ) + return replace_reg; + for ( i = 0; i < max; ++i ) + { + mtrr_if->get(i, &lbase, &lsize, <ype); + if ( lsize == 0 ) + return i; + } + return -ENOSPC; } static void generic_get_mtrr(unsigned int reg, unsigned long *base, - unsigned long *size, mtrr_type *type) + unsigned long *size, mtrr_type *type) { - uint64_t _mask, _base; - - rdmsrl(MSR_IA32_MTRR_PHYSMASK(reg), _mask); - if (!(_mask & MTRR_PHYSMASK_VALID)) { - /* Invalid (i.e. free) range */ - *base = 0; - *size = 0; - *type = 0; - return; - } - - rdmsrl(MSR_IA32_MTRR_PHYSBASE(reg), _base); - - /* Work out the shifted address mask. */ - _mask = size_or_mask | (_mask >> PAGE_SHIFT); - - /* This works correctly if size is a power of two, i.e. a - contiguous range. */ - *size = -(uint32_t)_mask; - *base = _base >> PAGE_SHIFT; - *type = _base & 0xff; + uint64_t _mask, _base; + + rdmsrl(MSR_IA32_MTRR_PHYSMASK(reg), _mask); + if ( !(_mask & MTRR_PHYSMASK_VALID) ) + { + /* Invalid (i.e. free) range */ + *base = 0; + *size = 0; + *type = 0; + return; + } + + rdmsrl(MSR_IA32_MTRR_PHYSBASE(reg), _base); + + /* Work out the shifted address mask. */ + _mask = size_or_mask | (_mask >> PAGE_SHIFT); + + /* This works correctly if size is a power of two, i.e. a + contiguous range. */ + *size = -(uint32_t)_mask; + *base = _base >> PAGE_SHIFT; + *type = _base & 0xff; } /** @@ -339,58 +352,60 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, */ static bool set_fixed_ranges(mtrr_type *frs) { - unsigned long long *saved = (unsigned long long *) frs; - bool changed = false; - int block=-1, range; + unsigned long long *saved = (unsigned long long *)frs; + bool changed = false; + int block = -1, range; - while (fixed_range_blocks[++block].ranges) - for (range=0; range < fixed_range_blocks[block].ranges; range++) - set_fixed_range(fixed_range_blocks[block].base_msr + range, - &changed, (unsigned int *) saved++); + while ( fixed_range_blocks[++block].ranges ) + for ( range = 0; range < fixed_range_blocks[block].ranges; range++ ) + set_fixed_range(fixed_range_blocks[block].base_msr + range, + &changed, (unsigned int *)saved++); - return changed; + return changed; } /* Set the MSR pair relating to a var range. Returns true if changes are made */ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) { - uint32_t lo, hi, base_lo, base_hi, mask_lo, mask_hi; - uint64_t msr_content; - bool changed = false; - - rdmsrl(MSR_IA32_MTRR_PHYSBASE(index), msr_content); - lo = (uint32_t)msr_content; - hi = (uint32_t)(msr_content >> 32); - base_lo = (uint32_t)vr->base; - base_hi = (uint32_t)(vr->base >> 32); - - lo &= 0xfffff0ffUL; - base_lo &= 0xfffff0ffUL; - hi &= size_and_mask >> (32 - PAGE_SHIFT); - base_hi &= size_and_mask >> (32 - PAGE_SHIFT); - - if ((base_lo != lo) || (base_hi != hi)) { - mtrr_wrmsr(MSR_IA32_MTRR_PHYSBASE(index), vr->base); - changed = true; - } - - rdmsrl(MSR_IA32_MTRR_PHYSMASK(index), msr_content); - lo = (uint32_t)msr_content; - hi = (uint32_t)(msr_content >> 32); - mask_lo = (uint32_t)vr->mask; - mask_hi = (uint32_t)(vr->mask >> 32); - - lo &= 0xfffff800UL; - mask_lo &= 0xfffff800UL; - hi &= size_and_mask >> (32 - PAGE_SHIFT); - mask_hi &= size_and_mask >> (32 - PAGE_SHIFT); - - if ((mask_lo != lo) || (mask_hi != hi)) { - mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(index), vr->mask); - changed = true; - } - return changed; + uint32_t lo, hi, base_lo, base_hi, mask_lo, mask_hi; + uint64_t msr_content; + bool changed = false; + + rdmsrl(MSR_IA32_MTRR_PHYSBASE(index), msr_content); + lo = (uint32_t)msr_content; + hi = (uint32_t)(msr_content >> 32); + base_lo = (uint32_t)vr->base; + base_hi = (uint32_t)(vr->base >> 32); + + lo &= 0xfffff0ffUL; + base_lo &= 0xfffff0ffUL; + hi &= size_and_mask >> (32 - PAGE_SHIFT); + base_hi &= size_and_mask >> (32 - PAGE_SHIFT); + + if ( (base_lo != lo) || (base_hi != hi) ) + { + mtrr_wrmsr(MSR_IA32_MTRR_PHYSBASE(index), vr->base); + changed = true; + } + + rdmsrl(MSR_IA32_MTRR_PHYSMASK(index), msr_content); + lo = (uint32_t)msr_content; + hi = (uint32_t)(msr_content >> 32); + mask_lo = (uint32_t)vr->mask; + mask_hi = (uint32_t)(vr->mask >> 32); + + lo &= 0xfffff800UL; + mask_lo &= 0xfffff800UL; + hi &= size_and_mask >> (32 - PAGE_SHIFT); + mask_hi &= size_and_mask >> (32 - PAGE_SHIFT); + + if ( (mask_lo != lo) || (mask_hi != hi) ) + { + mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(index), vr->mask); + changed = true; + } + return changed; } static uint64_t deftype; @@ -403,30 +418,30 @@ static unsigned long set_mtrr_state(void) [RETURNS] 0 if no changes made, else a mask indication what was changed. */ { - unsigned int i; - unsigned long change_mask = 0; - - for (i = 0; i < num_var_ranges; i++) - if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) - change_mask |= MTRR_CHANGE_MASK_VARIABLE; - - if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) - change_mask |= MTRR_CHANGE_MASK_FIXED; - - /* Set_mtrr_restore restores the old value of MTRRdefType, - so to set it we fiddle with the saved value */ - if ((deftype & 0xff) != mtrr_state.def_type - || MASK_EXTR(deftype, MTRRdefType_E) != mtrr_state.enabled - || MASK_EXTR(deftype, MTRRdefType_FE) != mtrr_state.fixed_enabled) { - deftype = (deftype & ~0xcff) | mtrr_state.def_type | - MASK_INSR(mtrr_state.enabled, MTRRdefType_E) | - MASK_INSR(mtrr_state.fixed_enabled, MTRRdefType_FE); - change_mask |= MTRR_CHANGE_MASK_DEFTYPE; - } - - return change_mask; -} + unsigned int i; + unsigned long change_mask = 0; + + for ( i = 0; i < num_var_ranges; i++ ) + if ( set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]) ) + change_mask |= MTRR_CHANGE_MASK_VARIABLE; + if ( mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges) ) + change_mask |= MTRR_CHANGE_MASK_FIXED; + + /* Set_mtrr_restore restores the old value of MTRRdefType, + so to set it we fiddle with the saved value */ + if ( (deftype & 0xff) != mtrr_state.def_type || + MASK_EXTR(deftype, MTRRdefType_E) != mtrr_state.enabled || + MASK_EXTR(deftype, MTRRdefType_FE) != mtrr_state.fixed_enabled ) + { + deftype = (deftype & ~0xcff) | mtrr_state.def_type | + MASK_INSR(mtrr_state.enabled, MTRRdefType_E) | + MASK_INSR(mtrr_state.fixed_enabled, MTRRdefType_FE); + change_mask |= MTRR_CHANGE_MASK_DEFTYPE; + } + + return change_mask; +} static DEFINE_SPINLOCK(set_atomicity_lock); @@ -439,79 +454,80 @@ static DEFINE_SPINLOCK(set_atomicity_lock); static bool prepare_set(void) { - unsigned long cr4; + unsigned long cr4; - /* Note that this is not ideal, since the cache is only flushed/disabled - for this CPU while the MTRRs are changed, but changing this requires - more invasive changes to the way the kernel boots */ + /* Note that this is not ideal, since the cache is only flushed/disabled + for this CPU while the MTRRs are changed, but changing this requires + more invasive changes to the way the kernel boots */ - spin_lock(&set_atomicity_lock); + spin_lock(&set_atomicity_lock); - /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ - write_cr0(read_cr0() | X86_CR0_CD); - wbinvd(); + /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ + write_cr0(read_cr0() | X86_CR0_CD); + wbinvd(); - cr4 = read_cr4(); - if (cr4 & X86_CR4_PGE) - write_cr4(cr4 & ~X86_CR4_PGE); - else if (use_invpcid) - invpcid_flush_all(); - else - write_cr3(read_cr3()); + cr4 = read_cr4(); + if ( cr4 & X86_CR4_PGE ) + write_cr4(cr4 & ~X86_CR4_PGE); + else if ( use_invpcid ) + invpcid_flush_all(); + else + write_cr3(read_cr3()); - /* Save MTRR state */ - rdmsrl(MSR_MTRRdefType, deftype); + /* Save MTRR state */ + rdmsrl(MSR_MTRRdefType, deftype); - /* Disable MTRRs, and set the default type to uncached */ - mtrr_wrmsr(MSR_MTRRdefType, deftype & ~0xcff); + /* Disable MTRRs, and set the default type to uncached */ + mtrr_wrmsr(MSR_MTRRdefType, deftype & ~0xcff); - return cr4 & X86_CR4_PGE; + return cr4 & X86_CR4_PGE; } static void post_set(bool pge) { - /* Intel (P6) standard MTRRs */ - mtrr_wrmsr(MSR_MTRRdefType, deftype); + /* Intel (P6) standard MTRRs */ + mtrr_wrmsr(MSR_MTRRdefType, deftype); - /* Enable caches */ - write_cr0(read_cr0() & ~X86_CR0_CD); + /* Enable caches */ + write_cr0(read_cr0() & ~X86_CR0_CD); - /* Reenable CR4.PGE (also flushes the TLB) */ - if (pge) - write_cr4(read_cr4() | X86_CR4_PGE); - else if (use_invpcid) - invpcid_flush_all(); - else - write_cr3(read_cr3()); + /* Reenable CR4.PGE (also flushes the TLB) */ + if ( pge ) + write_cr4(read_cr4() | X86_CR4_PGE); + else if ( use_invpcid ) + invpcid_flush_all(); + else + write_cr3(read_cr3()); - spin_unlock(&set_atomicity_lock); + spin_unlock(&set_atomicity_lock); } static void generic_set_all(void) { - unsigned long mask, count; - unsigned long flags; - bool pge; + unsigned long mask, count; + unsigned long flags; + bool pge; - local_irq_save(flags); - pge = prepare_set(); + local_irq_save(flags); + pge = prepare_set(); - /* Actually set the state */ - mask = set_mtrr_state(); + /* Actually set the state */ + mask = set_mtrr_state(); - post_set(pge); - local_irq_restore(flags); + post_set(pge); + local_irq_restore(flags); - /* Use the atomic bitops to update the global mask */ - for (count = 0; count < sizeof mask * 8; ++count) { - if (mask & 0x01) - set_bit(count, &smp_changes_mask); - mask >>= 1; - } + /* Use the atomic bitops to update the global mask */ + for ( count = 0; count < sizeof mask * 8; ++count ) + { + if ( mask & 0x01 ) + set_bit(count, &smp_changes_mask); + mask >>= 1; + } } static void generic_set_mtrr(unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type) + unsigned long size, mtrr_type type) /* [SUMMARY] Set variable MTRR register on the local CPU. The register to set. The base address of the region. @@ -522,88 +538,98 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, [RETURNS] Nothing. */ { - unsigned long flags; - struct mtrr_var_range *vr; - bool pge; - - vr = &mtrr_state.var_ranges[reg]; - - local_irq_save(flags); - pge = prepare_set(); - - if (size == 0) { - /* The invalid bit is kept in the mask, so we simply clear the - relevant mask register to disable a range. */ - memset(vr, 0, sizeof(*vr)); - mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), 0); - } else { - uint32_t base_lo, base_hi, mask_lo, mask_hi; - - base_lo = base << PAGE_SHIFT | type; - base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); - mask_lo = (-size << PAGE_SHIFT) | MTRR_PHYSMASK_VALID; - mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); - vr->base = ((uint64_t)base_hi << 32) | base_lo; - vr->mask = ((uint64_t)mask_hi << 32) | mask_lo; - - mtrr_wrmsr(MSR_IA32_MTRR_PHYSBASE(reg), vr->base); - mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), vr->mask); - } - - post_set(pge); - local_irq_restore(flags); + unsigned long flags; + struct mtrr_var_range *vr; + bool pge; + + vr = &mtrr_state.var_ranges[reg]; + + local_irq_save(flags); + pge = prepare_set(); + + if ( size == 0 ) + { + /* The invalid bit is kept in the mask, so we simply clear the + relevant mask register to disable a range. */ + memset(vr, 0, sizeof(*vr)); + mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), 0); + } + else + { + uint32_t base_lo, base_hi, mask_lo, mask_hi; + + base_lo = base << PAGE_SHIFT | type; + base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); + mask_lo = (-size << PAGE_SHIFT) | MTRR_PHYSMASK_VALID; + mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); + vr->base = ((uint64_t)base_hi << 32) | base_lo; + vr->mask = ((uint64_t)mask_hi << 32) | mask_lo; + + mtrr_wrmsr(MSR_IA32_MTRR_PHYSBASE(reg), vr->base); + mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), vr->mask); + } + + post_set(pge); + local_irq_restore(flags); } -int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) +int generic_validate_add_page(unsigned long base, unsigned long size, + unsigned int type) { - unsigned long lbase, last; - - /* For Intel PPro stepping <= 7, must be 4 MiB aligned - and not touch 0x70000000->0x7003FFFF */ - if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && - boot_cpu_data.x86_model == 1 && - boot_cpu_data.x86_mask <= 7) { - if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { - printk(KERN_WARNING "mtrr: base(%#lx000) is not 4 MiB aligned\n", base); - return -EINVAL; - } - if (!(base + size < 0x70000 || base > 0x7003F) && - (type == MTRR_TYPE_WRCOMB - || type == MTRR_TYPE_WRBACK)) { - printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); - return -EINVAL; - } - } - - /* Check upper bits of base and last are equal and lower bits are 0 - for base and 1 for last */ - last = base + size - 1; - for (lbase = base; !(lbase & 1) && (last & 1); - lbase = lbase >> 1, last = last >> 1) ; - if (lbase != last) { - printk(KERN_WARNING "mtrr: base(%#lx000) is not aligned on a size(%#lx000) boundary\n", - base, size); - return -EINVAL; - } - return 0; -} + unsigned long lbase, last; + /* For Intel PPro stepping <= 7, must be 4 MiB aligned + and not touch 0x70000000->0x7003FFFF */ + if ( is_cpu(INTEL) && boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model == 1 && boot_cpu_data.x86_mask <= 7 ) + { + if ( base & ((1 << (22 - PAGE_SHIFT)) - 1) ) + { + printk(KERN_WARNING "mtrr: base(%#lx000) is not 4 MiB aligned\n", + base); + return -EINVAL; + } + if ( !(base + size < 0x70000 || base > 0x7003F) && + (type == MTRR_TYPE_WRCOMB || type == MTRR_TYPE_WRBACK) ) + { + printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and " + "0x7003FFFF may hang the CPU.\n"); + return -EINVAL; + } + } + + /* Check upper bits of base and last are equal and lower bits are 0 + for base and 1 for last */ + last = base + size - 1; + for ( lbase = base; !(lbase & 1) && (last & 1); + lbase = lbase >> 1, last = last >> 1 ) + ; + if ( lbase != last ) + { + printk( + KERN_WARNING + "mtrr: base(%#lx000) is not aligned on a size(%#lx000) boundary\n", + base, size); + return -EINVAL; + } + return 0; +} static int generic_have_wrcomb(void) { - unsigned long config; - rdmsrl(MSR_MTRRcap, config); - return (config & (1ULL << 10)); + unsigned long config; + rdmsrl(MSR_MTRRcap, config); + return (config & (1ULL << 10)); } /* generic structure... */ const struct mtrr_ops generic_mtrr_ops = { - .use_intel_if = true, - .set_all = generic_set_all, - .get = generic_get_mtrr, - .get_free_region = generic_get_free_region, - .set = generic_set_mtrr, - .validate_add_page = generic_validate_add_page, - .have_wrcomb = generic_have_wrcomb, + .use_intel_if = true, + .set_all = generic_set_all, + .get = generic_get_mtrr, + .get_free_region = generic_get_free_region, + .set = generic_set_mtrr, + .validate_add_page = generic_validate_add_page, + .have_wrcomb = generic_have_wrcomb, }; diff --git a/xen/arch/x86/cpu/mtrr/main.c b/xen/arch/x86/cpu/mtrr/main.c index e9df53f00d..4b6c9f66a6 100644 --- a/xen/arch/x86/cpu/mtrr/main.c +++ b/xen/arch/x86/cpu/mtrr/main.c @@ -24,9 +24,9 @@ Operating System Writer's Guide" (Intel document number 242692), section 11.11.7 - This was cleaned and made readable by Patrick Mochel - on 6-7 March 2002. - Source: Intel Architecture Software Developers Manual, Volume 3: + This was cleaned and made readable by Patrick Mochel + on 6-7 March 2002. + Source: Intel Architecture Software Developers Manual, Volume 3: System Programming Guide; Section 9.11. (1997 edition - PPro). */ @@ -46,8 +46,10 @@ #define mutex_lock(_m) spin_lock(_m) #define mutex_unlock(_m) spin_unlock(_m) #define dump_stack() ((void)0) -#define get_cpu() smp_processor_id() -#define put_cpu() do {} while(0) +#define get_cpu() smp_processor_id() +#define put_cpu() \ + do { \ + } while ( 0 ) u32 __read_mostly num_var_ranges = 0; @@ -59,74 +61,77 @@ u64 __read_mostly size_and_mask; const struct mtrr_ops *__read_mostly mtrr_if = NULL; -static void set_mtrr(unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type); - -static const char *const mtrr_strings[MTRR_NUM_TYPES] = -{ - "uncachable", /* 0 */ - "write-combining", /* 1 */ - "?", /* 2 */ - "?", /* 3 */ - "write-through", /* 4 */ - "write-protect", /* 5 */ - "write-back", /* 6 */ +static void set_mtrr(unsigned int reg, unsigned long base, unsigned long size, + mtrr_type type); + +static const char *const mtrr_strings[MTRR_NUM_TYPES] = { + "uncachable", /* 0 */ + "write-combining", /* 1 */ + "?", /* 2 */ + "?", /* 3 */ + "write-through", /* 4 */ + "write-protect", /* 5 */ + "write-back", /* 6 */ }; static const char *mtrr_attrib_to_str(int x) { - return (x <= 6) ? mtrr_strings[x] : "?"; + return (x <= 6) ? mtrr_strings[x] : "?"; } /* Returns non-zero if we have the write-combining memory type */ static int have_wrcomb(void) { - return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0); + return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0); } /* This function returns the number of variable MTRRs */ static void __init set_num_var_ranges(void) { - unsigned long config = 0; - - if (use_intel()) { - rdmsrl(MSR_MTRRcap, config); - } else if (is_cpu(AMD)) - config = 2; - else if (is_cpu(CENTAUR)) - config = 8; - num_var_ranges = MASK_EXTR(config, MTRRcap_VCNT); + unsigned long config = 0; + + if ( use_intel() ) + { + rdmsrl(MSR_MTRRcap, config); + } + else if ( is_cpu(AMD) ) + config = 2; + else if ( is_cpu(CENTAUR) ) + config = 8; + num_var_ranges = MASK_EXTR(config, MTRRcap_VCNT); } static void __init init_table(void) { - int i, max; - - max = num_var_ranges; - if ((usage_table = xmalloc_array(unsigned int, max)) == NULL) { - printk(KERN_ERR "mtrr: could not allocate\n"); - return; - } - for (i = 0; i < max; i++) - usage_table[i] = 1; + int i, max; + + max = num_var_ranges; + if ( (usage_table = xmalloc_array(unsigned int, max)) == NULL ) + { + printk(KERN_ERR "mtrr: could not allocate\n"); + return; + } + for ( i = 0; i < max; i++ ) + usage_table[i] = 1; } -struct set_mtrr_data { - atomic_t count; - atomic_t gate; - unsigned long smp_base; - unsigned long smp_size; - unsigned int smp_reg; - mtrr_type smp_type; +struct set_mtrr_data +{ + atomic_t count; + atomic_t gate; + unsigned long smp_base; + unsigned long smp_size; + unsigned int smp_reg; + mtrr_type smp_type; }; /* As per the IA32 SDM vol-3: 10.11.8 MTRR Considerations in MP Systems section * MTRRs updates must to be synchronized across all the processors. * This flags avoids multiple cpu synchronization while booting each cpu. * At the boot & resume time, this flag is turned on in mtrr_aps_sync_begin(). - * Using this flag the mtrr initialization (and the all cpus sync up) in the - * mtrr_ap_init() is avoided while booting each cpu. - * After all the cpus have came up, then mtrr_aps_sync_end() synchronizes all + * Using this flag the mtrr initialization (and the all cpus sync up) in the + * mtrr_ap_init() is avoided while booting each cpu. + * After all the cpus have came up, then mtrr_aps_sync_end() synchronizes all * the cpus and updates mtrrs on all of them. Then this flag is turned off. */ int hold_mtrr_updates_on_aps; @@ -136,37 +141,37 @@ static void ipi_handler(void *info) [RETURNS] Nothing. */ { - struct set_mtrr_data *data = info; - unsigned long flags; - - local_irq_save(flags); - - atomic_dec(&data->count); - while(!atomic_read(&data->gate)) - cpu_relax(); - - /* The master has cleared me to execute */ - if (data->smp_reg == ~0U) /* update all mtrr registers */ - /* At the cpu hot-add time this will reinitialize mtrr - * registres on the existing cpus. It is ok. */ - mtrr_if->set_all(); - else /* single mtrr register update */ - mtrr_if->set(data->smp_reg, data->smp_base, - data->smp_size, data->smp_type); - - atomic_dec(&data->count); - while(atomic_read(&data->gate)) - cpu_relax(); - - atomic_dec(&data->count); - local_irq_restore(flags); + struct set_mtrr_data *data = info; + unsigned long flags; + + local_irq_save(flags); + + atomic_dec(&data->count); + while ( !atomic_read(&data->gate) ) + cpu_relax(); + + /* The master has cleared me to execute */ + if ( data->smp_reg == ~0U ) /* update all mtrr registers */ + /* At the cpu hot-add time this will reinitialize mtrr + * registres on the existing cpus. It is ok. */ + mtrr_if->set_all(); + else /* single mtrr register update */ + mtrr_if->set(data->smp_reg, data->smp_base, data->smp_size, + data->smp_type); + + atomic_dec(&data->count); + while ( atomic_read(&data->gate) ) + cpu_relax(); + + atomic_dec(&data->count); + local_irq_restore(flags); } -static inline int types_compatible(mtrr_type type1, mtrr_type type2) { - return type1 == MTRR_TYPE_UNCACHABLE || - type2 == MTRR_TYPE_UNCACHABLE || - (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || - (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH); +static inline int types_compatible(mtrr_type type1, mtrr_type type2) +{ + return type1 == MTRR_TYPE_UNCACHABLE || type2 == MTRR_TYPE_UNCACHABLE || + (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || + (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH); } /** @@ -177,10 +182,10 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) { * @type: mtrr type * * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: - * + * * 1. Send IPI to do the following: * 2. Disable Interrupts - * 3. Wait for all procs to do so + * 3. Wait for all procs to do so * 4. Enter no-fill cache mode * 5. Flush caches * 6. Clear PGE bit @@ -190,87 +195,87 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) { * 10. Enable all range registers * 11. Flush all TLBs and caches again * 12. Enter normal cache mode and reenable caching - * 13. Set PGE + * 13. Set PGE * 14. Wait for buddies to catch up * 15. Enable interrupts. - * + * * What does that mean for us? Well, first we set data.count to the number * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait * until it hits 0 and proceed. We set the data.gate flag and reset data.count. - * Meanwhile, they are waiting for that flag to be set. Once it's set, each - * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it - * differently, so we call mtrr_if->set() callback and let them take care of it. - * When they're done, they again decrement data->count and wait for data.gate to - * be reset. - * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag. - * Everyone then enables interrupts and we all continue on. + * Meanwhile, they are waiting for that flag to be set. Once it's set, each + * CPU goes through the transition of updating MTRRs. The CPU vendors may each + * do it differently, so we call mtrr_if->set() callback and let them take care + * of it. When they're done, they again decrement data->count and wait for + * data.gate to be reset. When we finish, we wait for data.count to hit 0 and + * toggle the data.gate flag. Everyone then enables interrupts and we all + * continue on. * * Note that the mechanism is the same for UP systems, too; all the SMP stuff * becomes nops. */ -static void set_mtrr(unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type) +static void set_mtrr(unsigned int reg, unsigned long base, unsigned long size, + mtrr_type type) { - cpumask_t allbutself; - unsigned int nr_cpus; - struct set_mtrr_data data; - unsigned long flags; - - cpumask_andnot(&allbutself, &cpu_online_map, - cpumask_of(smp_processor_id())); - nr_cpus = cpumask_weight(&allbutself); - - data.smp_reg = reg; - data.smp_base = base; - data.smp_size = size; - data.smp_type = type; - atomic_set(&data.count, nr_cpus); - atomic_set(&data.gate,0); - - /* Start the ball rolling on other CPUs */ - on_selected_cpus(&allbutself, ipi_handler, &data, 0); - - local_irq_save(flags); - - while (atomic_read(&data.count)) - cpu_relax(); - - /* ok, reset count and toggle gate */ - atomic_set(&data.count, nr_cpus); - smp_wmb(); - atomic_set(&data.gate,1); - - /* do our MTRR business */ - - /* HACK! - * We use this same function to initialize the mtrrs on boot. - * The state of the boot cpu's mtrrs has been saved, and we want - * to replicate across all the APs. - * If we're doing that @reg is set to something special... - */ - if (reg == ~0U) /* update all mtrr registers */ - /* at boot or resume time, this will reinitialize the mtrrs on - * the bp. It is ok. */ - mtrr_if->set_all(); - else /* update the single mtrr register */ - mtrr_if->set(reg,base,size,type); - - /* wait for the others */ - while (atomic_read(&data.count)) - cpu_relax(); - - atomic_set(&data.count, nr_cpus); - smp_wmb(); - atomic_set(&data.gate,0); - - /* - * Wait here for everyone to have seen the gate change - * So we're the last ones to touch 'data' - */ - while (atomic_read(&data.count)) - cpu_relax(); - - local_irq_restore(flags); + cpumask_t allbutself; + unsigned int nr_cpus; + struct set_mtrr_data data; + unsigned long flags; + + cpumask_andnot(&allbutself, &cpu_online_map, + cpumask_of(smp_processor_id())); + nr_cpus = cpumask_weight(&allbutself); + + data.smp_reg = reg; + data.smp_base = base; + data.smp_size = size; + data.smp_type = type; + atomic_set(&data.count, nr_cpus); + atomic_set(&data.gate, 0); + + /* Start the ball rolling on other CPUs */ + on_selected_cpus(&allbutself, ipi_handler, &data, 0); + + local_irq_save(flags); + + while ( atomic_read(&data.count) ) + cpu_relax(); + + /* ok, reset count and toggle gate */ + atomic_set(&data.count, nr_cpus); + smp_wmb(); + atomic_set(&data.gate, 1); + + /* do our MTRR business */ + + /* HACK! + * We use this same function to initialize the mtrrs on boot. + * The state of the boot cpu's mtrrs has been saved, and we want + * to replicate across all the APs. + * If we're doing that @reg is set to something special... + */ + if ( reg == ~0U ) /* update all mtrr registers */ + /* at boot or resume time, this will reinitialize the mtrrs on + * the bp. It is ok. */ + mtrr_if->set_all(); + else /* update the single mtrr register */ + mtrr_if->set(reg, base, size, type); + + /* wait for the others */ + while ( atomic_read(&data.count) ) + cpu_relax(); + + atomic_set(&data.count, nr_cpus); + smp_wmb(); + atomic_set(&data.gate, 0); + + /* + * Wait here for everyone to have seen the gate change + * So we're the last ones to touch 'data' + */ + while ( atomic_read(&data.count) ) + cpu_relax(); + + local_irq_restore(flags); } /** @@ -283,7 +288,7 @@ static void set_mtrr(unsigned int reg, unsigned long base, * Memory type region registers control the caching on newer Intel and * non Intel processors. This function allows drivers to request an * MTRR is added. The details and hardware specifics of each processor's - * implementation are hidden from the caller, but nevertheless the + * implementation are hidden from the caller, but nevertheless the * caller should expect to need to provide a power of two size on an * equivalent power of two boundary. * @@ -309,113 +314,125 @@ static void set_mtrr(unsigned int reg, unsigned long base, * failures and do not wish system log messages to be sent. */ -int mtrr_add_page(unsigned long base, unsigned long size, - unsigned int type, char increment) +int mtrr_add_page(unsigned long base, unsigned long size, unsigned int type, + char increment) { - int i, replace, error; - mtrr_type ltype; - unsigned long lbase, lsize; - - if (!mtrr_if) - return -ENXIO; - - if ((error = mtrr_if->validate_add_page(base,size,type))) - return error; - - if (type >= MTRR_NUM_TYPES) { - printk(KERN_WARNING "mtrr: type: %u invalid\n", type); - return -EINVAL; - } - - /* If the type is WC, check that this processor supports it */ - if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { - printk(KERN_WARNING - "mtrr: your processor doesn't support write-combining\n"); - return -EOPNOTSUPP; - } - - if (!size) { - printk(KERN_WARNING "mtrr: zero sized request\n"); - return -EINVAL; - } - - if ((base | (base + size - 1)) >> (paddr_bits - PAGE_SHIFT)) { - printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); - return -EINVAL; - } - - error = -EINVAL; - replace = -1; - - /* Search for existing MTRR */ - mutex_lock(&mtrr_mutex); - for (i = 0; i < num_var_ranges; ++i) { - mtrr_if->get(i, &lbase, &lsize, <ype); - if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase) - continue; - /* At this point we know there is some kind of overlap/enclosure */ - if (base < lbase || base + size - 1 > lbase + lsize - 1) { - if (base <= lbase && base + size - 1 >= lbase + lsize - 1) { - /* New region encloses an existing region */ - if (type == ltype) { - replace = replace == -1 ? i : -2; - continue; - } - else if (types_compatible(type, ltype)) - continue; - } - printk(KERN_WARNING - "mtrr: %#lx000,%#lx000 overlaps existing" - " %#lx000,%#lx000\n", base, size, lbase, - lsize); - goto out; - } - /* New region is enclosed by an existing region */ - if (ltype != type) { - if (types_compatible(type, ltype)) - continue; - printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", - base, size, mtrr_attrib_to_str(ltype), - mtrr_attrib_to_str(type)); - goto out; - } - if (increment) - ++usage_table[i]; - error = i; - goto out; - } - /* Search for an empty MTRR */ - i = mtrr_if->get_free_region(base, size, replace); - if (i >= 0) { - set_mtrr(i, base, size, type); - if (likely(replace < 0)) - usage_table[i] = 1; - else { - usage_table[i] = usage_table[replace] + !!increment; - if (unlikely(replace != i)) { - set_mtrr(replace, 0, 0, 0); - usage_table[replace] = 0; - } - } - } else - printk(KERN_INFO "mtrr: no more MTRRs available\n"); - error = i; - out: - mutex_unlock(&mtrr_mutex); - return error; + int i, replace, error; + mtrr_type ltype; + unsigned long lbase, lsize; + + if ( !mtrr_if ) + return -ENXIO; + + if ( (error = mtrr_if->validate_add_page(base, size, type)) ) + return error; + + if ( type >= MTRR_NUM_TYPES ) + { + printk(KERN_WARNING "mtrr: type: %u invalid\n", type); + return -EINVAL; + } + + /* If the type is WC, check that this processor supports it */ + if ( (type == MTRR_TYPE_WRCOMB) && !have_wrcomb() ) + { + printk(KERN_WARNING + "mtrr: your processor doesn't support write-combining\n"); + return -EOPNOTSUPP; + } + + if ( !size ) + { + printk(KERN_WARNING "mtrr: zero sized request\n"); + return -EINVAL; + } + + if ( (base | (base + size - 1)) >> (paddr_bits - PAGE_SHIFT) ) + { + printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); + return -EINVAL; + } + + error = -EINVAL; + replace = -1; + + /* Search for existing MTRR */ + mutex_lock(&mtrr_mutex); + for ( i = 0; i < num_var_ranges; ++i ) + { + mtrr_if->get(i, &lbase, &lsize, <ype); + if ( !lsize || base > lbase + lsize - 1 || base + size - 1 < lbase ) + continue; + /* At this point we know there is some kind of overlap/enclosure */ + if ( base < lbase || base + size - 1 > lbase + lsize - 1 ) + { + if ( base <= lbase && base + size - 1 >= lbase + lsize - 1 ) + { + /* New region encloses an existing region */ + if ( type == ltype ) + { + replace = replace == -1 ? i : -2; + continue; + } + else if ( types_compatible(type, ltype) ) + continue; + } + printk(KERN_WARNING "mtrr: %#lx000,%#lx000 overlaps existing" + " %#lx000,%#lx000\n", + base, size, lbase, lsize); + goto out; + } + /* New region is enclosed by an existing region */ + if ( ltype != type ) + { + if ( types_compatible(type, ltype) ) + continue; + printk(KERN_WARNING + "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", + base, size, mtrr_attrib_to_str(ltype), + mtrr_attrib_to_str(type)); + goto out; + } + if ( increment ) + ++usage_table[i]; + error = i; + goto out; + } + /* Search for an empty MTRR */ + i = mtrr_if->get_free_region(base, size, replace); + if ( i >= 0 ) + { + set_mtrr(i, base, size, type); + if ( likely(replace < 0) ) + usage_table[i] = 1; + else + { + usage_table[i] = usage_table[replace] + !!increment; + if ( unlikely(replace != i) ) + { + set_mtrr(replace, 0, 0, 0); + usage_table[replace] = 0; + } + } + } + else + printk(KERN_INFO "mtrr: no more MTRRs available\n"); + error = i; +out: + mutex_unlock(&mtrr_mutex); + return error; } static int mtrr_check(unsigned long base, unsigned long size) { - if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { - printk(KERN_WARNING - "mtrr: size and base must be multiples of 4 kiB\n"); - printk(KERN_DEBUG - "mtrr: size: %#lx base: %#lx\n", size, base); - dump_stack(); - return -1; - } - return 0; + if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) ) + { + printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n"); + printk(KERN_DEBUG "mtrr: size: %#lx base: %#lx\n", size, base); + dump_stack(); + return -1; + } + return 0; } /** @@ -428,7 +445,7 @@ static int mtrr_check(unsigned long base, unsigned long size) * Memory type region registers control the caching on newer Intel and * non Intel processors. This function allows drivers to request an * MTRR is added. The details and hardware specifics of each processor's - * implementation are hidden from the caller, but nevertheless the + * implementation are hidden from the caller, but nevertheless the * caller should expect to need to provide a power of two size on an * equivalent power of two boundary. * @@ -454,14 +471,13 @@ static int mtrr_check(unsigned long base, unsigned long size) * failures and do not wish system log messages to be sent. */ -int __init -mtrr_add(unsigned long base, unsigned long size, unsigned int type, - char increment) +int __init mtrr_add(unsigned long base, unsigned long size, unsigned int type, + char increment) { - if (mtrr_check(base, size)) - return -EINVAL; - return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, - increment); + if ( mtrr_check(base, size) ) + return -EINVAL; + return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, + increment); } /** @@ -473,7 +489,7 @@ mtrr_add(unsigned long base, unsigned long size, unsigned int type, * If register is supplied then base and size are ignored. This is * how drivers should call it. * - * Releases an MTRR region. If the usage count drops to zero the + * Releases an MTRR region. If the usage count drops to zero the * register is freed and the region returns to default state. * On success the register is returned, on failure a negative error * code. @@ -481,50 +497,57 @@ mtrr_add(unsigned long base, unsigned long size, unsigned int type, int mtrr_del_page(int reg, unsigned long base, unsigned long size) { - int i, max; - mtrr_type ltype; - unsigned long lbase, lsize; - int error = -EINVAL; - - if (!mtrr_if) - return -ENXIO; - - max = num_var_ranges; - mutex_lock(&mtrr_mutex); - if (reg < 0) { - /* Search for existing MTRR */ - for (i = 0; i < max; ++i) { - mtrr_if->get(i, &lbase, &lsize, <ype); - if (lbase == base && lsize == size) { - reg = i; - break; - } - } - if (reg < 0) { - printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base, - size); - goto out; - } - } - if (reg >= max) { - printk(KERN_WARNING "mtrr: register: %d too big\n", reg); - goto out; - } - mtrr_if->get(reg, &lbase, &lsize, <ype); - if (lsize < 1) { - printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); - goto out; - } - if (usage_table[reg] < 1) { - printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg); - goto out; - } - if (--usage_table[reg] < 1) - set_mtrr(reg, 0, 0, 0); - error = reg; - out: - mutex_unlock(&mtrr_mutex); - return error; + int i, max; + mtrr_type ltype; + unsigned long lbase, lsize; + int error = -EINVAL; + + if ( !mtrr_if ) + return -ENXIO; + + max = num_var_ranges; + mutex_lock(&mtrr_mutex); + if ( reg < 0 ) + { + /* Search for existing MTRR */ + for ( i = 0; i < max; ++i ) + { + mtrr_if->get(i, &lbase, &lsize, <ype); + if ( lbase == base && lsize == size ) + { + reg = i; + break; + } + } + if ( reg < 0 ) + { + printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base, + size); + goto out; + } + } + if ( reg >= max ) + { + printk(KERN_WARNING "mtrr: register: %d too big\n", reg); + goto out; + } + mtrr_if->get(reg, &lbase, &lsize, <ype); + if ( lsize < 1 ) + { + printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); + goto out; + } + if ( usage_table[reg] < 1 ) + { + printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg); + goto out; + } + if ( --usage_table[reg] < 1 ) + set_mtrr(reg, 0, 0, 0); + error = reg; +out: + mutex_unlock(&mtrr_mutex); + return error; } /** * mtrr_del - delete a memory type region @@ -535,65 +558,67 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) * If register is supplied then base and size are ignored. This is * how drivers should call it. * - * Releases an MTRR region. If the usage count drops to zero the + * Releases an MTRR region. If the usage count drops to zero the * register is freed and the region returns to default state. * On success the register is returned, on failure a negative error * code. */ -int __init -mtrr_del(int reg, unsigned long base, unsigned long size) +int __init mtrr_del(int reg, unsigned long base, unsigned long size) { - if (mtrr_check(base, size)) - return -EINVAL; - return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); + if ( mtrr_check(base, size) ) + return -EINVAL; + return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); } /* The suspend/resume methods are only for CPU without MTRR. CPU using generic * MTRR driver doesn't require this */ -struct mtrr_value { - mtrr_type ltype; - unsigned long lbase; - unsigned long lsize; +struct mtrr_value +{ + mtrr_type ltype; + unsigned long lbase; + unsigned long lsize; }; /** * mtrr_bp_init - initialize mtrrs on the boot CPU * - * This needs to be called early; before any of the other CPUs are + * This needs to be called early; before any of the other CPUs are * initialized (i.e. before smp_init()). - * + * */ void __init mtrr_bp_init(void) { - if (cpu_has_mtrr) { - mtrr_if = &generic_mtrr_ops; - size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1); - size_and_mask = ~size_or_mask & 0xfffff00000ULL; - } - - if (mtrr_if) { - set_num_var_ranges(); - init_table(); - if (use_intel()) - get_mtrr_state(); - } + if ( cpu_has_mtrr ) + { + mtrr_if = &generic_mtrr_ops; + size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1); + size_and_mask = ~size_or_mask & 0xfffff00000ULL; + } + + if ( mtrr_if ) + { + set_num_var_ranges(); + init_table(); + if ( use_intel() ) + get_mtrr_state(); + } } void mtrr_ap_init(void) { - if (!mtrr_if || !use_intel() || hold_mtrr_updates_on_aps) - return; - /* - * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, - * but this routine will be called in cpu boot time, holding the lock - * breaks it. This routine is called in two cases: 1.very earily time - * of software resume, when there absolutely isn't mtrr entry changes; - * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to - * prevent mtrr entry changes - */ - set_mtrr(~0U, 0, 0, 0); + if ( !mtrr_if || !use_intel() || hold_mtrr_updates_on_aps ) + return; + /* + * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, + * but this routine will be called in cpu boot time, holding the lock + * breaks it. This routine is called in two cases: 1.very earily time + * of software resume, when there absolutely isn't mtrr entry changes; + * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to + * prevent mtrr entry changes + */ + set_mtrr(~0U, 0, 0, 0); } /** @@ -601,43 +626,43 @@ void mtrr_ap_init(void) */ void mtrr_save_state(void) { - int cpu = get_cpu(); + int cpu = get_cpu(); - if (cpu == 0) - mtrr_save_fixed_ranges(NULL); - else - on_selected_cpus(cpumask_of(0), mtrr_save_fixed_ranges, NULL, 1); - put_cpu(); + if ( cpu == 0 ) + mtrr_save_fixed_ranges(NULL); + else + on_selected_cpus(cpumask_of(0), mtrr_save_fixed_ranges, NULL, 1); + put_cpu(); } void mtrr_aps_sync_begin(void) { - if (!use_intel()) - return; - hold_mtrr_updates_on_aps = 1; + if ( !use_intel() ) + return; + hold_mtrr_updates_on_aps = 1; } void mtrr_aps_sync_end(void) { - if (!use_intel()) - return; - set_mtrr(~0U, 0, 0, 0); - hold_mtrr_updates_on_aps = 0; + if ( !use_intel() ) + return; + set_mtrr(~0U, 0, 0, 0); + hold_mtrr_updates_on_aps = 0; } void mtrr_bp_restore(void) { - if (!use_intel()) - return; - mtrr_if->set_all(); + if ( !use_intel() ) + return; + mtrr_if->set_all(); } static int __init mtrr_init_finialize(void) { - if (!mtrr_if) - return 0; - if (use_intel()) - mtrr_state_warn(); - return 0; + if ( !mtrr_if ) + return 0; + if ( use_intel() ) + mtrr_state_warn(); + return 0; } __initcall(mtrr_init_finialize); diff --git a/xen/arch/x86/cpu/mwait-idle.c b/xen/arch/x86/cpu/mwait-idle.c index f89c52f256..3ae2fcedfe 100644 --- a/xen/arch/x86/cpu/mwait-idle.c +++ b/xen/arch/x86/cpu/mwait-idle.c @@ -66,9 +66,9 @@ #define PREFIX "mwait-idle: " #ifdef DEBUG -# define pr_debug(fmt...) printk(KERN_DEBUG fmt) +#define pr_debug(fmt...) printk(KERN_DEBUG fmt) #else -# define pr_debug(fmt...) +#define pr_debug(fmt...) #endif static __initdata bool_t opt_mwait_idle = 1; @@ -80,35 +80,37 @@ static unsigned int mwait_substates; /* Reliable LAPIC Timer States, bit 1 for C1 etc. Default to only C1. */ static unsigned int lapic_timer_reliable_states = (1 << 1); -struct idle_cpu { - const struct cpuidle_state *state_table; - - /* - * Hardware C-state auto-demotion may not always be optimal. - * Indicate which enable bits to clear here. - */ - unsigned long auto_demotion_disable_flags; - bool_t byt_auto_demotion_disable_flag; - bool_t disable_promotion_to_c1e; +struct idle_cpu +{ + const struct cpuidle_state *state_table; + + /* + * Hardware C-state auto-demotion may not always be optimal. + * Indicate which enable bits to clear here. + */ + unsigned long auto_demotion_disable_flags; + bool_t byt_auto_demotion_disable_flag; + bool_t disable_promotion_to_c1e; }; static const struct idle_cpu *icpu; -static const struct cpuidle_state { - char name[16]; - unsigned int flags; - unsigned int exit_latency; /* in US */ - unsigned int target_residency; /* in US */ -} *cpuidle_state_table; +static const struct cpuidle_state +{ + char name[16]; + unsigned int flags; + unsigned int exit_latency; /* in US */ + unsigned int target_residency; /* in US */ +} * cpuidle_state_table; -#define CPUIDLE_FLAG_DISABLED 0x1 +#define CPUIDLE_FLAG_DISABLED 0x1 /* * Set this flag for states where the HW flushes the TLB for us * and so we don't need cross-calls to keep it consistent. * If this flag is set, SW flushes the TLB, so even if the * HW doesn't do the flushing, this flag is safe to use. */ -#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 +#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 /* * MWAIT takes an 8-bit "hint" in EAX "suggesting" @@ -119,8 +121,9 @@ static const struct cpuidle_state { */ #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF) #define MWAIT2flg(eax) ((eax & 0xFF) << 24) -#define MWAIT_HINT2CSTATE(hint) (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) -#define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK) +#define MWAIT_HINT2CSTATE(hint) \ + (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) +#define MWAIT_HINT2SUBSTATE(hint) ((hint)&MWAIT_CSTATE_MASK) /* * States are indexed by the cstate number, @@ -128,643 +131,628 @@ static const struct cpuidle_state { * Thus C0 is a dummy. */ static const struct cpuidle_state nehalem_cstates[] = { - { - .name = "C1-NHM", - .flags = MWAIT2flg(0x00), - .exit_latency = 3, - .target_residency = 6, - }, - { - .name = "C1E-NHM", - .flags = MWAIT2flg(0x01), - .exit_latency = 10, - .target_residency = 20, - }, - { - .name = "C3-NHM", - .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 20, - .target_residency = 80, - }, - { - .name = "C6-NHM", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 200, - .target_residency = 800, - }, - {} -}; + { + .name = "C1-NHM", + .flags = MWAIT2flg(0x00), + .exit_latency = 3, + .target_residency = 6, + }, + { + .name = "C1E-NHM", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 20, + }, + { + .name = "C3-NHM", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 20, + .target_residency = 80, + }, + { + .name = "C6-NHM", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 200, + .target_residency = 800, + }, + {}}; static const struct cpuidle_state snb_cstates[] = { - { - .name = "C1-SNB", - .flags = MWAIT2flg(0x00), - .exit_latency = 2, - .target_residency = 2, - }, - { - .name = "C1E-SNB", - .flags = MWAIT2flg(0x01), - .exit_latency = 10, - .target_residency = 20, - }, - { - .name = "C3-SNB", - .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 80, - .target_residency = 211, - }, - { - .name = "C6-SNB", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 104, - .target_residency = 345, - }, - { - .name = "C7-SNB", - .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 109, - .target_residency = 345, - }, - {} -}; + { + .name = "C1-SNB", + .flags = MWAIT2flg(0x00), + .exit_latency = 2, + .target_residency = 2, + }, + { + .name = "C1E-SNB", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 20, + }, + { + .name = "C3-SNB", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 80, + .target_residency = 211, + }, + { + .name = "C6-SNB", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 104, + .target_residency = 345, + }, + { + .name = "C7-SNB", + .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 109, + .target_residency = 345, + }, + {}}; static const struct cpuidle_state byt_cstates[] = { - { - .name = "C1-BYT", - .flags = MWAIT2flg(0x00), - .exit_latency = 1, - .target_residency = 1, - }, - { - .name = "C6N-BYT", - .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 300, - .target_residency = 275, - }, - { - .name = "C6S-BYT", - .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 500, - .target_residency = 560, - }, - { - .name = "C7-BYT", - .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 1200, - .target_residency = 4000, - }, - { - .name = "C7S-BYT", - .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 10000, - .target_residency = 20000, - }, - {} -}; + { + .name = "C1-BYT", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + }, + { + .name = "C6N-BYT", + .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 300, + .target_residency = 275, + }, + { + .name = "C6S-BYT", + .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 500, + .target_residency = 560, + }, + { + .name = "C7-BYT", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 1200, + .target_residency = 4000, + }, + { + .name = "C7S-BYT", + .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 10000, + .target_residency = 20000, + }, + {}}; static const struct cpuidle_state cht_cstates[] = { - { - .name = "C1-CHT", - .flags = MWAIT2flg(0x00), - .exit_latency = 1, - .target_residency = 1, - }, - { - .name = "C6N-CHT", - .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 80, - .target_residency = 275, - }, - { - .name = "C6S-CHT", - .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 200, - .target_residency = 560, - }, - { - .name = "C7-CHT", - .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 1200, - .target_residency = 4000, - }, - { - .name = "C7S-CHT", - .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 10000, - .target_residency = 20000, - }, - {} -}; + { + .name = "C1-CHT", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + }, + { + .name = "C6N-CHT", + .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 80, + .target_residency = 275, + }, + { + .name = "C6S-CHT", + .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 200, + .target_residency = 560, + }, + { + .name = "C7-CHT", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 1200, + .target_residency = 4000, + }, + { + .name = "C7S-CHT", + .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 10000, + .target_residency = 20000, + }, + {}}; static const struct cpuidle_state ivb_cstates[] = { - { - .name = "C1-IVB", - .flags = MWAIT2flg(0x00), - .exit_latency = 1, - .target_residency = 1, - }, - { - .name = "C1E-IVB", - .flags = MWAIT2flg(0x01), - .exit_latency = 10, - .target_residency = 20, - }, - { - .name = "C3-IVB", - .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 59, - .target_residency = 156, - }, - { - .name = "C6-IVB", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 80, - .target_residency = 300, - }, - { - .name = "C7-IVB", - .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 87, - .target_residency = 300, - }, - {} -}; + { + .name = "C1-IVB", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + }, + { + .name = "C1E-IVB", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 20, + }, + { + .name = "C3-IVB", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 59, + .target_residency = 156, + }, + { + .name = "C6-IVB", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 80, + .target_residency = 300, + }, + { + .name = "C7-IVB", + .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 87, + .target_residency = 300, + }, + {}}; static const struct cpuidle_state ivt_cstates[] = { - { - .name = "C1-IVT", - .flags = MWAIT2flg(0x00), - .exit_latency = 1, - .target_residency = 1, - }, - { - .name = "C1E-IVT", - .flags = MWAIT2flg(0x01), - .exit_latency = 10, - .target_residency = 80, - }, - { - .name = "C3-IVT", - .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 59, - .target_residency = 156, - }, - { - .name = "C6-IVT", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 82, - .target_residency = 300, - }, - {} -}; + { + .name = "C1-IVT", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + }, + { + .name = "C1E-IVT", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 80, + }, + { + .name = "C3-IVT", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 59, + .target_residency = 156, + }, + { + .name = "C6-IVT", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 82, + .target_residency = 300, + }, + {}}; static const struct cpuidle_state ivt_cstates_4s[] = { - { - .name = "C1-IVT-4S", - .flags = MWAIT2flg(0x00), - .exit_latency = 1, - .target_residency = 1, - }, - { - .name = "C1E-IVT-4S", - .flags = MWAIT2flg(0x01), - .exit_latency = 10, - .target_residency = 250, - }, - { - .name = "C3-IVT-4S", - .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 59, - .target_residency = 300, - }, - { - .name = "C6-IVT-4S", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 84, - .target_residency = 400, - }, - {} -}; + { + .name = "C1-IVT-4S", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + }, + { + .name = "C1E-IVT-4S", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 250, + }, + { + .name = "C3-IVT-4S", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 59, + .target_residency = 300, + }, + { + .name = "C6-IVT-4S", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 84, + .target_residency = 400, + }, + {}}; static const struct cpuidle_state ivt_cstates_8s[] = { - { - .name = "C1-IVT-8S", - .flags = MWAIT2flg(0x00), - .exit_latency = 1, - .target_residency = 1, - }, - { - .name = "C1E-IVT-8S", - .flags = MWAIT2flg(0x01), - .exit_latency = 10, - .target_residency = 500, - }, - { - .name = "C3-IVT-8S", - .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 59, - .target_residency = 600, - }, - { - .name = "C6-IVT-8S", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 88, - .target_residency = 700, - }, - {} -}; + { + .name = "C1-IVT-8S", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + }, + { + .name = "C1E-IVT-8S", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 500, + }, + { + .name = "C3-IVT-8S", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 59, + .target_residency = 600, + }, + { + .name = "C6-IVT-8S", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 88, + .target_residency = 700, + }, + {}}; static const struct cpuidle_state hsw_cstates[] = { - { - .name = "C1-HSW", - .flags = MWAIT2flg(0x00), - .exit_latency = 2, - .target_residency = 2, - }, - { - .name = "C1E-HSW", - .flags = MWAIT2flg(0x01), - .exit_latency = 10, - .target_residency = 20, - }, - { - .name = "C3-HSW", - .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 33, - .target_residency = 100, - }, - { - .name = "C6-HSW", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 133, - .target_residency = 400, - }, - { - .name = "C7s-HSW", - .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 166, - .target_residency = 500, - }, - { - .name = "C8-HSW", - .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 300, - .target_residency = 900, - }, - { - .name = "C9-HSW", - .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 600, - .target_residency = 1800, - }, - { - .name = "C10-HSW", - .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 2600, - .target_residency = 7700, - }, - {} -}; + { + .name = "C1-HSW", + .flags = MWAIT2flg(0x00), + .exit_latency = 2, + .target_residency = 2, + }, + { + .name = "C1E-HSW", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 20, + }, + { + .name = "C3-HSW", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 33, + .target_residency = 100, + }, + { + .name = "C6-HSW", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 133, + .target_residency = 400, + }, + { + .name = "C7s-HSW", + .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 166, + .target_residency = 500, + }, + { + .name = "C8-HSW", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 300, + .target_residency = 900, + }, + { + .name = "C9-HSW", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 600, + .target_residency = 1800, + }, + { + .name = "C10-HSW", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 2600, + .target_residency = 7700, + }, + {}}; static const struct cpuidle_state bdw_cstates[] = { - { - .name = "C1-BDW", - .flags = MWAIT2flg(0x00), - .exit_latency = 2, - .target_residency = 2, - }, - { - .name = "C1E-BDW", - .flags = MWAIT2flg(0x01), - .exit_latency = 10, - .target_residency = 20, - }, - { - .name = "C3-BDW", - .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 40, - .target_residency = 100, - }, - { - .name = "C6-BDW", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 133, - .target_residency = 400, - }, - { - .name = "C7s-BDW", - .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 166, - .target_residency = 500, - }, - { - .name = "C8-BDW", - .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 300, - .target_residency = 900, - }, - { - .name = "C9-BDW", - .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 600, - .target_residency = 1800, - }, - { - .name = "C10-BDW", - .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 2600, - .target_residency = 7700, - }, - {} -}; + { + .name = "C1-BDW", + .flags = MWAIT2flg(0x00), + .exit_latency = 2, + .target_residency = 2, + }, + { + .name = "C1E-BDW", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 20, + }, + { + .name = "C3-BDW", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 40, + .target_residency = 100, + }, + { + .name = "C6-BDW", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 133, + .target_residency = 400, + }, + { + .name = "C7s-BDW", + .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 166, + .target_residency = 500, + }, + { + .name = "C8-BDW", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 300, + .target_residency = 900, + }, + { + .name = "C9-BDW", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 600, + .target_residency = 1800, + }, + { + .name = "C10-BDW", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 2600, + .target_residency = 7700, + }, + {}}; static struct cpuidle_state skl_cstates[] = { - { - .name = "C1-SKL", - .flags = MWAIT2flg(0x00), - .exit_latency = 2, - .target_residency = 2, - }, - { - .name = "C1E-SKL", - .flags = MWAIT2flg(0x01), - .exit_latency = 10, - .target_residency = 20, - }, - { - .name = "C3-SKL", - .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 70, - .target_residency = 100, - }, - { - .name = "C6-SKL", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 85, - .target_residency = 200, - }, - { - .name = "C7s-SKL", - .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 124, - .target_residency = 800, - }, - { - .name = "C8-SKL", - .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 200, - .target_residency = 800, - }, - { - .name = "C9-SKL", - .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 480, - .target_residency = 5000, - }, - { - .name = "C10-SKL", - .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 890, - .target_residency = 5000, - }, - {} -}; + { + .name = "C1-SKL", + .flags = MWAIT2flg(0x00), + .exit_latency = 2, + .target_residency = 2, + }, + { + .name = "C1E-SKL", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 20, + }, + { + .name = "C3-SKL", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 70, + .target_residency = 100, + }, + { + .name = "C6-SKL", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 85, + .target_residency = 200, + }, + { + .name = "C7s-SKL", + .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 124, + .target_residency = 800, + }, + { + .name = "C8-SKL", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 200, + .target_residency = 800, + }, + { + .name = "C9-SKL", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 480, + .target_residency = 5000, + }, + { + .name = "C10-SKL", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 890, + .target_residency = 5000, + }, + {}}; static const struct cpuidle_state skx_cstates[] = { - { - .name = "C1-SKX", - .flags = MWAIT2flg(0x00), - .exit_latency = 2, - .target_residency = 2, - }, - { - .name = "C1E-SKX", - .flags = MWAIT2flg(0x01), - .exit_latency = 10, - .target_residency = 20, - }, - { - .name = "C6-SKX", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 133, - .target_residency = 600, - }, - {} -}; + { + .name = "C1-SKX", + .flags = MWAIT2flg(0x00), + .exit_latency = 2, + .target_residency = 2, + }, + { + .name = "C1E-SKX", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 20, + }, + { + .name = "C6-SKX", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 133, + .target_residency = 600, + }, + {}}; static const struct cpuidle_state atom_cstates[] = { - { - .name = "C1E-ATM", - .flags = MWAIT2flg(0x00), - .exit_latency = 10, - .target_residency = 20, - }, - { - .name = "C2-ATM", - .flags = MWAIT2flg(0x10), - .exit_latency = 20, - .target_residency = 80, - }, - { - .name = "C4-ATM", - .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 100, - .target_residency = 400, - }, - { - .name = "C6-ATM", - .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 140, - .target_residency = 560, - }, - {} -}; + { + .name = "C1E-ATM", + .flags = MWAIT2flg(0x00), + .exit_latency = 10, + .target_residency = 20, + }, + { + .name = "C2-ATM", + .flags = MWAIT2flg(0x10), + .exit_latency = 20, + .target_residency = 80, + }, + { + .name = "C4-ATM", + .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 100, + .target_residency = 400, + }, + { + .name = "C6-ATM", + .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 140, + .target_residency = 560, + }, + {}}; static const struct cpuidle_state tangier_cstates[] = { - { - .name = "C1-TNG", - .flags = MWAIT2flg(0x00), - .exit_latency = 1, - .target_residency = 4, - }, - { - .name = "C4-TNG", - .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 100, - .target_residency = 400, - }, - { - .name = "C6-TNG", - .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 140, - .target_residency = 560, - }, - { - .name = "C7-TNG", - .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 1200, - .target_residency = 4000, - }, - { - .name = "C9-TNG", - .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 10000, - .target_residency = 20000, - }, - {} -}; + { + .name = "C1-TNG", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 4, + }, + { + .name = "C4-TNG", + .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 100, + .target_residency = 400, + }, + { + .name = "C6-TNG", + .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 140, + .target_residency = 560, + }, + { + .name = "C7-TNG", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 1200, + .target_residency = 4000, + }, + { + .name = "C9-TNG", + .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 10000, + .target_residency = 20000, + }, + {}}; static const struct cpuidle_state avn_cstates[] = { - { - .name = "C1-AVN", - .flags = MWAIT2flg(0x00), - .exit_latency = 2, - .target_residency = 2, - }, - { - .name = "C6-AVN", - .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 15, - .target_residency = 45, - }, - {} -}; + { + .name = "C1-AVN", + .flags = MWAIT2flg(0x00), + .exit_latency = 2, + .target_residency = 2, + }, + { + .name = "C6-AVN", + .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 15, + .target_residency = 45, + }, + {}}; static const struct cpuidle_state knl_cstates[] = { - { - .name = "C1-KNL", - .flags = MWAIT2flg(0x00), - .exit_latency = 1, - .target_residency = 2, - }, - { - .name = "C6-KNL", - .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 120, - .target_residency = 500, - }, - {} -}; + { + .name = "C1-KNL", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 2, + }, + { + .name = "C6-KNL", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 120, + .target_residency = 500, + }, + {}}; static struct cpuidle_state bxt_cstates[] = { - { - .name = "C1-BXT", - .flags = MWAIT2flg(0x00), - .exit_latency = 2, - .target_residency = 2, - }, - { - .name = "C1E-BXT", - .flags = MWAIT2flg(0x01), - .exit_latency = 10, - .target_residency = 20, - }, - { - .name = "C6-BXT", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 133, - .target_residency = 133, - }, - { - .name = "C7s-BXT", - .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 155, - .target_residency = 155, - }, - { - .name = "C8-BXT", - .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 1000, - .target_residency = 1000, - }, - { - .name = "C9-BXT", - .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 2000, - .target_residency = 2000, - }, - { - .name = "C10-BXT", - .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 10000, - .target_residency = 10000, - }, - {} -}; + { + .name = "C1-BXT", + .flags = MWAIT2flg(0x00), + .exit_latency = 2, + .target_residency = 2, + }, + { + .name = "C1E-BXT", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 20, + }, + { + .name = "C6-BXT", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 133, + .target_residency = 133, + }, + { + .name = "C7s-BXT", + .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 155, + .target_residency = 155, + }, + { + .name = "C8-BXT", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 1000, + .target_residency = 1000, + }, + { + .name = "C9-BXT", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 2000, + .target_residency = 2000, + }, + { + .name = "C10-BXT", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 10000, + .target_residency = 10000, + }, + {}}; static const struct cpuidle_state dnv_cstates[] = { - { - .name = "C1-DNV", - .flags = MWAIT2flg(0x00), - .exit_latency = 2, - .target_residency = 2, - }, - { - .name = "C1E-DNV", - .flags = MWAIT2flg(0x01), - .exit_latency = 10, - .target_residency = 20, - }, - { - .name = "C6-DNV", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 50, - .target_residency = 500, - }, - {} -}; + { + .name = "C1-DNV", + .flags = MWAIT2flg(0x00), + .exit_latency = 2, + .target_residency = 2, + }, + { + .name = "C1E-DNV", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 20, + }, + { + .name = "C6-DNV", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 50, + .target_residency = 500, + }, + {}}; static void mwait_idle(void) { - unsigned int cpu = smp_processor_id(); - struct acpi_processor_power *power = processor_powers[cpu]; - struct acpi_processor_cx *cx = NULL; - unsigned int eax, next_state, cstate; - u64 before, after; - u32 exp = 0, pred = 0, irq_traced[4] = { 0 }; - - if (max_cstate > 0 && power && !sched_has_urgent_vcpu() && - (next_state = cpuidle_current_governor->select(power)) > 0) { - do { - cx = &power->states[next_state]; - } while (cx->type > max_cstate && --next_state); - if (!next_state) - cx = NULL; - menu_get_trace_data(&exp, &pred); - } - if (!cx) { - if (pm_idle_save) - pm_idle_save(); - else - { - struct cpu_info *info = get_cpu_info(); - - spec_ctrl_enter_idle(info); - safe_halt(); - spec_ctrl_exit_idle(info); - } - return; - } - - cpufreq_dbs_timer_suspend(); - - sched_tick_suspend(); - /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */ - process_pending_softirqs(); - - /* Interrupts must be disabled for C2 and higher transitions. */ - local_irq_disable(); - - if (!cpu_is_haltable(cpu)) { - local_irq_enable(); - sched_tick_resume(); - cpufreq_dbs_timer_resume(); - return; - } - - eax = cx->address; - cstate = ((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; + unsigned int cpu = smp_processor_id(); + struct acpi_processor_power *power = processor_powers[cpu]; + struct acpi_processor_cx *cx = NULL; + unsigned int eax, next_state, cstate; + u64 before, after; + u32 exp = 0, pred = 0, irq_traced[4] = {0}; + + if ( max_cstate > 0 && power && !sched_has_urgent_vcpu() && + (next_state = cpuidle_current_governor->select(power)) > 0 ) + { + do { + cx = &power->states[next_state]; + } while ( cx->type > max_cstate && --next_state ); + if ( !next_state ) + cx = NULL; + menu_get_trace_data(&exp, &pred); + } + if ( !cx ) + { + if ( pm_idle_save ) + pm_idle_save(); + else + { + struct cpu_info *info = get_cpu_info(); + + spec_ctrl_enter_idle(info); + safe_halt(); + spec_ctrl_exit_idle(info); + } + return; + } + + cpufreq_dbs_timer_suspend(); + + sched_tick_suspend(); + /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */ + process_pending_softirqs(); + + /* Interrupts must be disabled for C2 and higher transitions. */ + local_irq_disable(); + + if ( !cpu_is_haltable(cpu) ) + { + local_irq_enable(); + sched_tick_resume(); + cpufreq_dbs_timer_resume(); + return; + } + + eax = cx->address; + cstate = ((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; #if 0 /* XXX Can we/do we need to do something similar on Xen? */ /* @@ -775,189 +763,190 @@ static void mwait_idle(void) leave_mm(cpu); #endif - if (!(lapic_timer_reliable_states & (1 << cstate))) - lapic_timer_off(); + if ( !(lapic_timer_reliable_states & (1 << cstate)) ) + lapic_timer_off(); - before = cpuidle_get_tick(); - TRACE_4D(TRC_PM_IDLE_ENTRY, cx->type, before, exp, pred); + before = cpuidle_get_tick(); + TRACE_4D(TRC_PM_IDLE_ENTRY, cx->type, before, exp, pred); - update_last_cx_stat(power, cx, before); + update_last_cx_stat(power, cx, before); - if (cpu_is_haltable(cpu)) - mwait_idle_with_hints(eax, MWAIT_ECX_INTERRUPT_BREAK); + if ( cpu_is_haltable(cpu) ) + mwait_idle_with_hints(eax, MWAIT_ECX_INTERRUPT_BREAK); - after = cpuidle_get_tick(); + after = cpuidle_get_tick(); - cstate_restore_tsc(); - trace_exit_reason(irq_traced); - TRACE_6D(TRC_PM_IDLE_EXIT, cx->type, after, - irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); + cstate_restore_tsc(); + trace_exit_reason(irq_traced); + TRACE_6D(TRC_PM_IDLE_EXIT, cx->type, after, irq_traced[0], irq_traced[1], + irq_traced[2], irq_traced[3]); - /* Now back in C0. */ - update_idle_stats(power, cx, before, after); - local_irq_enable(); + /* Now back in C0. */ + update_idle_stats(power, cx, before, after); + local_irq_enable(); - if (!(lapic_timer_reliable_states & (1 << cstate))) - lapic_timer_on(); + if ( !(lapic_timer_reliable_states & (1 << cstate)) ) + lapic_timer_on(); - sched_tick_resume(); - cpufreq_dbs_timer_resume(); + sched_tick_resume(); + cpufreq_dbs_timer_resume(); - if ( cpuidle_current_governor->reflect ) - cpuidle_current_governor->reflect(power); + if ( cpuidle_current_governor->reflect ) + cpuidle_current_governor->reflect(power); } static void auto_demotion_disable(void *dummy) { - u64 msr_bits; + u64 msr_bits; - rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); - msr_bits &= ~(icpu->auto_demotion_disable_flags); - wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); + rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); + msr_bits &= ~(icpu->auto_demotion_disable_flags); + wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); } static void byt_auto_demotion_disable(void *dummy) { - wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); - wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); + wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); + wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); } static void c1e_promotion_disable(void *dummy) { - u64 msr_bits; + u64 msr_bits; - rdmsrl(MSR_IA32_POWER_CTL, msr_bits); - msr_bits &= ~0x2; - wrmsrl(MSR_IA32_POWER_CTL, msr_bits); + rdmsrl(MSR_IA32_POWER_CTL, msr_bits); + msr_bits &= ~0x2; + wrmsrl(MSR_IA32_POWER_CTL, msr_bits); } static const struct idle_cpu idle_cpu_nehalem = { - .state_table = nehalem_cstates, - .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, - .disable_promotion_to_c1e = 1, + .state_table = nehalem_cstates, + .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, + .disable_promotion_to_c1e = 1, }; static const struct idle_cpu idle_cpu_atom = { - .state_table = atom_cstates, + .state_table = atom_cstates, }; static const struct idle_cpu idle_cpu_tangier = { - .state_table = tangier_cstates, + .state_table = tangier_cstates, }; static const struct idle_cpu idle_cpu_lincroft = { - .state_table = atom_cstates, - .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE, + .state_table = atom_cstates, + .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE, }; static const struct idle_cpu idle_cpu_snb = { - .state_table = snb_cstates, - .disable_promotion_to_c1e = 1, + .state_table = snb_cstates, + .disable_promotion_to_c1e = 1, }; static const struct idle_cpu idle_cpu_byt = { - .state_table = byt_cstates, - .disable_promotion_to_c1e = 1, - .byt_auto_demotion_disable_flag = 1, + .state_table = byt_cstates, + .disable_promotion_to_c1e = 1, + .byt_auto_demotion_disable_flag = 1, }; static const struct idle_cpu idle_cpu_cht = { - .state_table = cht_cstates, - .disable_promotion_to_c1e = 1, - .byt_auto_demotion_disable_flag = 1, + .state_table = cht_cstates, + .disable_promotion_to_c1e = 1, + .byt_auto_demotion_disable_flag = 1, }; static const struct idle_cpu idle_cpu_ivb = { - .state_table = ivb_cstates, - .disable_promotion_to_c1e = 1, + .state_table = ivb_cstates, + .disable_promotion_to_c1e = 1, }; static const struct idle_cpu idle_cpu_ivt = { - .state_table = ivt_cstates, - .disable_promotion_to_c1e = 1, + .state_table = ivt_cstates, + .disable_promotion_to_c1e = 1, }; static const struct idle_cpu idle_cpu_hsw = { - .state_table = hsw_cstates, - .disable_promotion_to_c1e = 1, + .state_table = hsw_cstates, + .disable_promotion_to_c1e = 1, }; static const struct idle_cpu idle_cpu_bdw = { - .state_table = bdw_cstates, - .disable_promotion_to_c1e = 1, + .state_table = bdw_cstates, + .disable_promotion_to_c1e = 1, }; static const struct idle_cpu idle_cpu_skl = { - .state_table = skl_cstates, - .disable_promotion_to_c1e = 1, + .state_table = skl_cstates, + .disable_promotion_to_c1e = 1, }; static const struct idle_cpu idle_cpu_skx = { - .state_table = skx_cstates, - .disable_promotion_to_c1e = 1, + .state_table = skx_cstates, + .disable_promotion_to_c1e = 1, }; static const struct idle_cpu idle_cpu_avn = { - .state_table = avn_cstates, - .disable_promotion_to_c1e = 1, + .state_table = avn_cstates, + .disable_promotion_to_c1e = 1, }; static const struct idle_cpu idle_cpu_knl = { - .state_table = knl_cstates, + .state_table = knl_cstates, }; static const struct idle_cpu idle_cpu_bxt = { - .state_table = bxt_cstates, - .disable_promotion_to_c1e = 1, + .state_table = bxt_cstates, + .disable_promotion_to_c1e = 1, }; static const struct idle_cpu idle_cpu_dnv = { - .state_table = dnv_cstates, - .disable_promotion_to_c1e = 1, + .state_table = dnv_cstates, + .disable_promotion_to_c1e = 1, }; -#define ICPU(model, cpu) \ - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ALWAYS, &idle_cpu_##cpu} +#define ICPU(model, cpu) \ + { \ + X86_VENDOR_INTEL, 6, model, X86_FEATURE_ALWAYS, &idle_cpu_##cpu \ + } static const struct x86_cpu_id intel_idle_ids[] __initconstrel = { - ICPU(0x1a, nehalem), - ICPU(0x1e, nehalem), - ICPU(0x1f, nehalem), - ICPU(0x25, nehalem), - ICPU(0x2c, nehalem), - ICPU(0x2e, nehalem), - ICPU(0x2f, nehalem), - ICPU(0x1c, atom), - ICPU(0x26, lincroft), - ICPU(0x2a, snb), - ICPU(0x2d, snb), - ICPU(0x36, atom), - ICPU(0x37, byt), - ICPU(0x4a, tangier), - ICPU(0x4c, cht), - ICPU(0x3a, ivb), - ICPU(0x3e, ivt), - ICPU(0x3c, hsw), - ICPU(0x3f, hsw), - ICPU(0x45, hsw), - ICPU(0x46, hsw), - ICPU(0x4d, avn), - ICPU(0x3d, bdw), - ICPU(0x47, bdw), - ICPU(0x4f, bdw), - ICPU(0x56, bdw), - ICPU(0x4e, skl), - ICPU(0x5e, skl), - ICPU(0x8e, skl), - ICPU(0x9e, skl), - ICPU(0x55, skx), - ICPU(0x57, knl), - ICPU(0x85, knl), - ICPU(0x5c, bxt), - ICPU(0x7a, bxt), - ICPU(0x5f, dnv), - {} -}; + ICPU(0x1a, nehalem), + ICPU(0x1e, nehalem), + ICPU(0x1f, nehalem), + ICPU(0x25, nehalem), + ICPU(0x2c, nehalem), + ICPU(0x2e, nehalem), + ICPU(0x2f, nehalem), + ICPU(0x1c, atom), + ICPU(0x26, lincroft), + ICPU(0x2a, snb), + ICPU(0x2d, snb), + ICPU(0x36, atom), + ICPU(0x37, byt), + ICPU(0x4a, tangier), + ICPU(0x4c, cht), + ICPU(0x3a, ivb), + ICPU(0x3e, ivt), + ICPU(0x3c, hsw), + ICPU(0x3f, hsw), + ICPU(0x45, hsw), + ICPU(0x46, hsw), + ICPU(0x4d, avn), + ICPU(0x3d, bdw), + ICPU(0x47, bdw), + ICPU(0x4f, bdw), + ICPU(0x56, bdw), + ICPU(0x4e, skl), + ICPU(0x5e, skl), + ICPU(0x8e, skl), + ICPU(0x9e, skl), + ICPU(0x55, skx), + ICPU(0x57, knl), + ICPU(0x85, knl), + ICPU(0x5c, bxt), + ICPU(0x7a, bxt), + ICPU(0x5f, dnv), + {}}; /* * ivt_idle_state_table_update(void) @@ -967,23 +956,26 @@ static const struct x86_cpu_id intel_idle_ids[] __initconstrel = { */ static void __init ivt_idle_state_table_update(void) { - /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */ - unsigned int cpu, max_apicid = boot_cpu_physical_apicid; - - for_each_present_cpu(cpu) - if (max_apicid < x86_cpu_to_apicid[cpu]) - max_apicid = x86_cpu_to_apicid[cpu]; - switch (apicid_to_socket(max_apicid)) { - case 0: case 1: - /* 1 and 2 socket systems use default ivt_cstates */ - break; - case 2: case 3: - cpuidle_state_table = ivt_cstates_4s; - break; - default: - cpuidle_state_table = ivt_cstates_8s; - break; - } + /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */ + unsigned int cpu, max_apicid = boot_cpu_physical_apicid; + + for_each_present_cpu (cpu) + if ( max_apicid < x86_cpu_to_apicid[cpu] ) + max_apicid = x86_cpu_to_apicid[cpu]; + switch (apicid_to_socket(max_apicid)) + { + case 0: + case 1: + /* 1 and 2 socket systems use default ivt_cstates */ + break; + case 2: + case 3: + cpuidle_state_table = ivt_cstates_4s; + break; + default: + cpuidle_state_table = ivt_cstates_8s; + break; + } } /* @@ -991,18 +983,18 @@ static void __init ivt_idle_state_table_update(void) */ static const unsigned int __initconst irtl_ns_units[] = { - 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 }; + 1, 32, 1024, 32768, 1048576, 33554432, 0, 0}; static unsigned long long __init irtl_2_usec(unsigned long long irtl) { - unsigned long long ns; + unsigned long long ns; - if (!irtl) - return 0; + if ( !irtl ) + return 0; - ns = irtl_ns_units[(irtl >> 10) & 0x7]; + ns = irtl_ns_units[(irtl >> 10) & 0x7]; - return (irtl & 0x3FF) * ns / 1000; + return (irtl & 0x3FF) * ns / 1000; } /* * bxt_idle_state_table_update(void) @@ -1012,43 +1004,48 @@ static unsigned long long __init irtl_2_usec(unsigned long long irtl) */ static void __init bxt_idle_state_table_update(void) { - unsigned long long msr; - unsigned int usec; - - rdmsrl(MSR_PKGC6_IRTL, msr); - usec = irtl_2_usec(msr); - if (usec) { - bxt_cstates[2].exit_latency = usec; - bxt_cstates[2].target_residency = usec; - } - - rdmsrl(MSR_PKGC7_IRTL, msr); - usec = irtl_2_usec(msr); - if (usec) { - bxt_cstates[3].exit_latency = usec; - bxt_cstates[3].target_residency = usec; - } - - rdmsrl(MSR_PKGC8_IRTL, msr); - usec = irtl_2_usec(msr); - if (usec) { - bxt_cstates[4].exit_latency = usec; - bxt_cstates[4].target_residency = usec; - } - - rdmsrl(MSR_PKGC9_IRTL, msr); - usec = irtl_2_usec(msr); - if (usec) { - bxt_cstates[5].exit_latency = usec; - bxt_cstates[5].target_residency = usec; - } - - rdmsrl(MSR_PKGC10_IRTL, msr); - usec = irtl_2_usec(msr); - if (usec) { - bxt_cstates[6].exit_latency = usec; - bxt_cstates[6].target_residency = usec; - } + unsigned long long msr; + unsigned int usec; + + rdmsrl(MSR_PKGC6_IRTL, msr); + usec = irtl_2_usec(msr); + if ( usec ) + { + bxt_cstates[2].exit_latency = usec; + bxt_cstates[2].target_residency = usec; + } + + rdmsrl(MSR_PKGC7_IRTL, msr); + usec = irtl_2_usec(msr); + if ( usec ) + { + bxt_cstates[3].exit_latency = usec; + bxt_cstates[3].target_residency = usec; + } + + rdmsrl(MSR_PKGC8_IRTL, msr); + usec = irtl_2_usec(msr); + if ( usec ) + { + bxt_cstates[4].exit_latency = usec; + bxt_cstates[4].target_residency = usec; + } + + rdmsrl(MSR_PKGC9_IRTL, msr); + usec = irtl_2_usec(msr); + if ( usec ) + { + bxt_cstates[5].exit_latency = usec; + bxt_cstates[5].target_residency = usec; + } + + rdmsrl(MSR_PKGC10_IRTL, msr); + usec = irtl_2_usec(msr); + if ( usec ) + { + bxt_cstates[6].exit_latency = usec; + bxt_cstates[6].target_residency = usec; + } } /* @@ -1059,33 +1056,34 @@ static void __init bxt_idle_state_table_update(void) */ static void __init sklh_idle_state_table_update(void) { - u64 msr; + u64 msr; - /* if PC10 disabled via cmdline max_cstate=7 or shallower */ - if (max_cstate <= 7) - return; + /* if PC10 disabled via cmdline max_cstate=7 or shallower */ + if ( max_cstate <= 7 ) + return; - /* if PC10 not present in CPUID.MWAIT.EDX */ - if ((mwait_substates & (MWAIT_CSTATE_MASK << 28)) == 0) - return; + /* if PC10 not present in CPUID.MWAIT.EDX */ + if ( (mwait_substates & (MWAIT_CSTATE_MASK << 28)) == 0 ) + return; - rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr); + rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr); - /* PC10 is not enabled in PKG C-state limit */ - if ((msr & 0xF) != 8) - return; + /* PC10 is not enabled in PKG C-state limit */ + if ( (msr & 0xF) != 8 ) + return; - /* if SGX is present */ - if (boot_cpu_has(X86_FEATURE_SGX)) { - rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); + /* if SGX is present */ + if ( boot_cpu_has(X86_FEATURE_SGX) ) + { + rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); - /* if SGX is enabled */ - if (msr & IA32_FEATURE_CONTROL_SGX_ENABLE) - return; - } + /* if SGX is enabled */ + if ( msr & IA32_FEATURE_CONTROL_SGX_ENABLE ) + return; + } - skl_cstates[5].flags |= CPUIDLE_FLAG_DISABLED; /* C8-SKL */ - skl_cstates[6].flags |= CPUIDLE_FLAG_DISABLED; /* C9-SKL */ + skl_cstates[5].flags |= CPUIDLE_FLAG_DISABLED; /* C8-SKL */ + skl_cstates[6].flags |= CPUIDLE_FLAG_DISABLED; /* C9-SKL */ } /* @@ -1095,177 +1093,184 @@ static void __init sklh_idle_state_table_update(void) */ static void __init mwait_idle_state_table_update(void) { - switch (boot_cpu_data.x86_model) { - case 0x3e: /* IVT */ - ivt_idle_state_table_update(); - break; - case 0x5c: /* BXT */ - case 0x7a: - bxt_idle_state_table_update(); - break; - case 0x5e: /* SKL-H */ - sklh_idle_state_table_update(); - break; - } + switch (boot_cpu_data.x86_model) + { + case 0x3e: /* IVT */ + ivt_idle_state_table_update(); + break; + case 0x5c: /* BXT */ + case 0x7a: + bxt_idle_state_table_update(); + break; + case 0x5e: /* SKL-H */ + sklh_idle_state_table_update(); + break; + } } static int __init mwait_idle_probe(void) { - unsigned int eax, ebx, ecx; - const struct x86_cpu_id *id = x86_match_cpu(intel_idle_ids); + unsigned int eax, ebx, ecx; + const struct x86_cpu_id *id = x86_match_cpu(intel_idle_ids); - if (!id) { - pr_debug(PREFIX "does not run on family %d model %d\n", - boot_cpu_data.x86, boot_cpu_data.x86_model); - return -ENODEV; - } + if ( !id ) + { + pr_debug(PREFIX "does not run on family %d model %d\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + return -ENODEV; + } - if (!boot_cpu_has(X86_FEATURE_MONITOR)) { - pr_debug(PREFIX "Please enable MWAIT in BIOS SETUP\n"); - return -ENODEV; - } + if ( !boot_cpu_has(X86_FEATURE_MONITOR) ) + { + pr_debug(PREFIX "Please enable MWAIT in BIOS SETUP\n"); + return -ENODEV; + } - if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) - return -ENODEV; + if ( boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF ) + return -ENODEV; - cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); + cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); - if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || - !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || - !mwait_substates) - return -ENODEV; + if ( !(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || + !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || !mwait_substates ) + return -ENODEV; - if (!max_cstate || !opt_mwait_idle) { - pr_debug(PREFIX "disabled\n"); - return -EPERM; - } + if ( !max_cstate || !opt_mwait_idle ) + { + pr_debug(PREFIX "disabled\n"); + return -EPERM; + } - pr_debug(PREFIX "MWAIT substates: %#x\n", mwait_substates); + pr_debug(PREFIX "MWAIT substates: %#x\n", mwait_substates); - icpu = id->driver_data; - cpuidle_state_table = icpu->state_table; + icpu = id->driver_data; + cpuidle_state_table = icpu->state_table; - if (boot_cpu_has(X86_FEATURE_ARAT)) - lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; + if ( boot_cpu_has(X86_FEATURE_ARAT) ) + lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; - pr_debug(PREFIX "v" MWAIT_IDLE_VERSION " model %#x\n", - boot_cpu_data.x86_model); + pr_debug(PREFIX "v" MWAIT_IDLE_VERSION " model %#x\n", + boot_cpu_data.x86_model); - pr_debug(PREFIX "lapic_timer_reliable_states %#x\n", - lapic_timer_reliable_states); + pr_debug(PREFIX "lapic_timer_reliable_states %#x\n", + lapic_timer_reliable_states); - mwait_idle_state_table_update(); + mwait_idle_state_table_update(); - return 0; + return 0; } -static int mwait_idle_cpu_init(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int mwait_idle_cpu_init(struct notifier_block *nfb, unsigned long action, + void *hcpu) { - unsigned int cpu = (unsigned long)hcpu, cstate; - struct acpi_processor_power *dev = processor_powers[cpu]; - - switch (action) { - default: - return NOTIFY_DONE; - - case CPU_UP_PREPARE: - cpuidle_init_cpu(cpu); - return NOTIFY_DONE; - - case CPU_ONLINE: - if (!dev) - return NOTIFY_DONE; - break; - } - - dev->count = 1; - - for (cstate = 0; cpuidle_state_table[cstate].target_residency; ++cstate) { - unsigned int num_substates, hint, state; - struct acpi_processor_cx *cx; - - hint = flg2MWAIT(cpuidle_state_table[cstate].flags); - state = MWAIT_HINT2CSTATE(hint) + 1; - - if (state > max_cstate) { - printk(PREFIX "max C-state %u reached\n", max_cstate); - break; - } - - /* Number of sub-states for this state in CPUID.MWAIT. */ - num_substates = (mwait_substates >> (state * 4)) - & MWAIT_SUBSTATE_MASK; - /* If NO sub-states for this state in CPUID, skip it. */ - if (num_substates == 0) - continue; - - /* if state marked as disabled, skip it */ - if (cpuidle_state_table[cstate].flags & - CPUIDLE_FLAG_DISABLED) { - printk(XENLOG_DEBUG PREFIX "state %s is disabled", - cpuidle_state_table[cstate].name); - continue; - } - - if (dev->count >= ACPI_PROCESSOR_MAX_POWER) { - printk(PREFIX "max C-state count of %u reached\n", - ACPI_PROCESSOR_MAX_POWER); - break; - } - - if (state > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) && - !pm_idle_save) - setup_clear_cpu_cap(X86_FEATURE_TSC_RELIABLE); - - cx = dev->states + dev->count; - cx->type = state; - cx->address = hint; - cx->entry_method = ACPI_CSTATE_EM_FFH; - cx->latency = cpuidle_state_table[cstate].exit_latency; - cx->target_residency = - cpuidle_state_table[cstate].target_residency; - - dev->count++; - } - - if (icpu->auto_demotion_disable_flags) - on_selected_cpus(cpumask_of(cpu), auto_demotion_disable, NULL, 1); - - if (icpu->byt_auto_demotion_disable_flag) - on_selected_cpus(cpumask_of(cpu), byt_auto_demotion_disable, NULL, 1); - - if (icpu->disable_promotion_to_c1e) - on_selected_cpus(cpumask_of(cpu), c1e_promotion_disable, NULL, 1); - - return NOTIFY_DONE; + unsigned int cpu = (unsigned long)hcpu, cstate; + struct acpi_processor_power *dev = processor_powers[cpu]; + + switch (action) + { + default: + return NOTIFY_DONE; + + case CPU_UP_PREPARE: + cpuidle_init_cpu(cpu); + return NOTIFY_DONE; + + case CPU_ONLINE: + if ( !dev ) + return NOTIFY_DONE; + break; + } + + dev->count = 1; + + for ( cstate = 0; cpuidle_state_table[cstate].target_residency; ++cstate ) + { + unsigned int num_substates, hint, state; + struct acpi_processor_cx *cx; + + hint = flg2MWAIT(cpuidle_state_table[cstate].flags); + state = MWAIT_HINT2CSTATE(hint) + 1; + + if ( state > max_cstate ) + { + printk(PREFIX "max C-state %u reached\n", max_cstate); + break; + } + + /* Number of sub-states for this state in CPUID.MWAIT. */ + num_substates = (mwait_substates >> (state * 4)) & MWAIT_SUBSTATE_MASK; + /* If NO sub-states for this state in CPUID, skip it. */ + if ( num_substates == 0 ) + continue; + + /* if state marked as disabled, skip it */ + if ( cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_DISABLED ) + { + printk(XENLOG_DEBUG PREFIX "state %s is disabled", + cpuidle_state_table[cstate].name); + continue; + } + + if ( dev->count >= ACPI_PROCESSOR_MAX_POWER ) + { + printk(PREFIX "max C-state count of %u reached\n", + ACPI_PROCESSOR_MAX_POWER); + break; + } + + if ( state > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) && + !pm_idle_save ) + setup_clear_cpu_cap(X86_FEATURE_TSC_RELIABLE); + + cx = dev->states + dev->count; + cx->type = state; + cx->address = hint; + cx->entry_method = ACPI_CSTATE_EM_FFH; + cx->latency = cpuidle_state_table[cstate].exit_latency; + cx->target_residency = cpuidle_state_table[cstate].target_residency; + + dev->count++; + } + + if ( icpu->auto_demotion_disable_flags ) + on_selected_cpus(cpumask_of(cpu), auto_demotion_disable, NULL, 1); + + if ( icpu->byt_auto_demotion_disable_flag ) + on_selected_cpus(cpumask_of(cpu), byt_auto_demotion_disable, NULL, 1); + + if ( icpu->disable_promotion_to_c1e ) + on_selected_cpus(cpumask_of(cpu), c1e_promotion_disable, NULL, 1); + + return NOTIFY_DONE; } int __init mwait_idle_init(struct notifier_block *nfb) { - int err; - - if (pm_idle_save) - return -ENODEV; - - err = mwait_idle_probe(); - if (!err && !boot_cpu_has(X86_FEATURE_ARAT)) { - hpet_broadcast_init(); - if (xen_cpuidle < 0 && !hpet_broadcast_is_available()) - err = -ENODEV; - else if(!lapic_timer_init()) - err = -EINVAL; - if (err) - pr_debug(PREFIX "not used (%d)\n", err); - } - if (!err) { - nfb->notifier_call = mwait_idle_cpu_init; - mwait_idle_cpu_init(nfb, CPU_UP_PREPARE, NULL); - - pm_idle_save = pm_idle; - pm_idle = mwait_idle; - dead_idle = acpi_dead_idle; - } - - return err; + int err; + + if ( pm_idle_save ) + return -ENODEV; + + err = mwait_idle_probe(); + if ( !err && !boot_cpu_has(X86_FEATURE_ARAT) ) + { + hpet_broadcast_init(); + if ( xen_cpuidle < 0 && !hpet_broadcast_is_available() ) + err = -ENODEV; + else if ( !lapic_timer_init() ) + err = -EINVAL; + if ( err ) + pr_debug(PREFIX "not used (%d)\n", err); + } + if ( !err ) + { + nfb->notifier_call = mwait_idle_cpu_init; + mwait_idle_cpu_init(nfb, CPU_UP_PREPARE, NULL); + + pm_idle_save = pm_idle; + pm_idle = mwait_idle; + dead_idle = acpi_dead_idle; + } + + return err; } diff --git a/xen/arch/x86/cpu/shanghai.c b/xen/arch/x86/cpu/shanghai.c index 9156c850fe..ea3009dffa 100644 --- a/xen/arch/x86/cpu/shanghai.c +++ b/xen/arch/x86/cpu/shanghai.c @@ -16,9 +16,9 @@ static void init_shanghai(struct cpuinfo_x86 *c) } static const struct cpu_dev shanghai_cpu_dev = { - .c_vendor = " Shang", - .c_ident = {" Shanghai "}, - .c_init = init_shanghai, + .c_vendor = " Shang", + .c_ident = {" Shanghai "}, + .c_init = init_shanghai, }; int __init shanghai_init_cpu(void) diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c index 8324d62f11..e1e7fbdbae 100644 --- a/xen/arch/x86/cpu/vpmu.c +++ b/xen/arch/x86/cpu/vpmu.c @@ -108,8 +108,8 @@ void vpmu_lvtpc_update(uint32_t val) apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc); } -int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, - uint64_t supported, bool_t is_write) +int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, uint64_t supported, + bool_t is_write) { struct vcpu *curr = current; struct vpmu_struct *vpmu; @@ -121,9 +121,8 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, * profiling the whole system. */ if ( likely(vpmu_mode == XENPMU_MODE_OFF) || - ((vpmu_mode & XENPMU_MODE_ALL) && - !is_hardware_domain(curr->domain)) ) - goto nop; + ((vpmu_mode & XENPMU_MODE_ALL) && !is_hardware_domain(curr->domain)) ) + goto nop; vpmu = vcpu_vpmu(curr); ops = vpmu->arch_vpmu_ops; @@ -143,7 +142,7 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, * (and unload) it again. */ if ( !has_vlapic(curr->domain) && vpmu->xenpmu_data && - vpmu_is_set(vpmu, VPMU_CACHED) ) + vpmu_is_set(vpmu, VPMU_CACHED) ) { vpmu_set(vpmu, VPMU_CONTEXT_SAVE); ops->arch_vpmu_save(curr, 0); @@ -152,7 +151,7 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, return ret; - nop: +nop: if ( !is_write && (msr != MSR_IA32_MISC_ENABLE) ) *msr_content = 0; @@ -318,7 +317,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs) vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC); - switch ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) ) + switch (GET_APIC_DELIVERY_MODE(vlapic_lvtpc)) { case APIC_MODE_FIXED: vlapic_set_irq(vlapic, vlapic_lvtpc & APIC_VECTOR_MASK, 0); @@ -354,7 +353,7 @@ void vpmu_save(struct vcpu *v) int pcpu = smp_processor_id(); if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_ALLOCATED | VPMU_CONTEXT_LOADED) ) - return; + return; vpmu->last_pcpu = pcpu; per_cpu(last_vcpu, pcpu) = v; @@ -387,11 +386,11 @@ int vpmu_load(struct vcpu *v, bool_t from_guest) */ if ( vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) ) { - on_selected_cpus(cpumask_of(vpmu->last_pcpu), - vpmu_save_force, (void *)v, 1); + on_selected_cpus(cpumask_of(vpmu->last_pcpu), vpmu_save_force, + (void *)v, 1); vpmu_reset(vpmu, VPMU_CONTEXT_LOADED); } - } + } /* Prevent forced context save from remote CPU */ local_irq_disable(); @@ -414,7 +413,7 @@ int vpmu_load(struct vcpu *v, bool_t from_guest) /* Only when PMU is counting, we load PMU context immediately. */ if ( !vpmu_is_set(vpmu, VPMU_RUNNING) || (!has_vlapic(vpmu_vcpu(vpmu)->domain) && - vpmu_is_set(vpmu, VPMU_CACHED)) ) + vpmu_is_set(vpmu, VPMU_CACHED)) ) return 0; if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load ) @@ -450,7 +449,7 @@ static int vpmu_arch_initialise(struct vcpu *v) if ( !vpmu_available(v) ) return 0; - switch ( vendor ) + switch (vendor) { case X86_VENDOR_AMD: ret = svm_vpmu_initialise(v); @@ -464,7 +463,8 @@ static int vpmu_arch_initialise(struct vcpu *v) if ( vpmu_mode != XENPMU_MODE_OFF ) { printk(XENLOG_G_WARNING "VPMU: Unknown CPU vendor %d. " - "Disabling VPMU\n", vendor); + "Disabling VPMU\n", + vendor); opt_vpmu_enabled = 0; vpmu_mode = XENPMU_MODE_OFF; } @@ -490,13 +490,12 @@ static void get_vpmu(struct vcpu *v) * so we don't need to include it in the count. */ if ( !is_hardware_domain(v->domain) && - (vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV)) ) + (vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV)) ) { vpmu_count++; vpmu_set(vcpu_vpmu(v), VPMU_AVAILABLE); } - else if ( is_hardware_domain(v->domain) && - (vpmu_mode != XENPMU_MODE_OFF) ) + else if ( is_hardware_domain(v->domain) && (vpmu_mode != XENPMU_MODE_OFF) ) vpmu_set(vcpu_vpmu(v), VPMU_AVAILABLE); spin_unlock(&vpmu_lock); @@ -515,11 +514,10 @@ static void put_vpmu(struct vcpu *v) vpmu_count--; vpmu_reset(vcpu_vpmu(v), VPMU_AVAILABLE); } - else if ( is_hardware_domain(v->domain) && - (vpmu_mode != XENPMU_MODE_OFF) ) + else if ( is_hardware_domain(v->domain) && (vpmu_mode != XENPMU_MODE_OFF) ) vpmu_reset(vcpu_vpmu(v), VPMU_AVAILABLE); - out: +out: spin_unlock(&vpmu_lock); } @@ -557,8 +555,7 @@ static void vpmu_arch_destroy(struct vcpu *v) */ if ( cpu_online(vpmu->last_pcpu) && per_cpu(last_vcpu, vpmu->last_pcpu) == v ) - on_selected_cpus(cpumask_of(vpmu->last_pcpu), - vpmu_clear_last, v, 1); + on_selected_cpus(cpumask_of(vpmu->last_pcpu), vpmu_clear_last, v, 1); if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy ) { @@ -570,7 +567,7 @@ static void vpmu_arch_destroy(struct vcpu *v) on_selected_cpus(cpumask_of(vcpu_vpmu(v)->last_pcpu), vpmu_save_force, v, 1); - vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); + vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); } } @@ -692,7 +689,7 @@ long do_xenpmu_op(unsigned int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg) return ret; /* Check major version when parameters are specified */ - switch ( op ) + switch (op) { case XENPMU_mode_set: case XENPMU_feature_set: @@ -705,7 +702,7 @@ long do_xenpmu_op(unsigned int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg) return -EINVAL; } - switch ( op ) + switch (op) { case XENPMU_mode_set: { @@ -725,9 +722,8 @@ long do_xenpmu_op(unsigned int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg) * We can always safely switch between XENPMU_MODE_SELF and * XENPMU_MODE_HV while other VPMUs are active. */ - if ( (vpmu_count == 0) || - ((vpmu_mode ^ pmu_params.val) == - (XENPMU_MODE_SELF | XENPMU_MODE_HV)) ) + if ( (vpmu_count == 0) || ((vpmu_mode ^ pmu_params.val) == + (XENPMU_MODE_SELF | XENPMU_MODE_HV)) ) vpmu_mode = pmu_params.val; else if ( vpmu_mode != pmu_params.val ) { @@ -754,9 +750,9 @@ long do_xenpmu_op(unsigned int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg) break; case XENPMU_feature_set: - if ( pmu_params.val & ~(XENPMU_FEATURE_INTEL_BTS | - XENPMU_FEATURE_IPC_ONLY | - XENPMU_FEATURE_ARCH_ONLY)) + if ( pmu_params.val & + ~(XENPMU_FEATURE_INTEL_BTS | XENPMU_FEATURE_IPC_ONLY | + XENPMU_FEATURE_ARCH_ONLY) ) return -EINVAL; spin_lock(&vpmu_lock); @@ -812,7 +808,7 @@ long do_xenpmu_op(unsigned int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg) vpmu_set(vpmu, VPMU_CACHED); ret = -EIO; } - break ; + break; default: ret = -EINVAL; @@ -821,8 +817,8 @@ long do_xenpmu_op(unsigned int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg) return ret; } -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct vcpu *vcpu = per_cpu(last_vcpu, cpu); @@ -844,9 +840,7 @@ static int cpu_callback( return NOTIFY_DONE; } -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback -}; +static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback}; static int __init vpmu_init(void) { @@ -867,19 +861,20 @@ static int __init vpmu_init(void) return 0; } - switch ( vendor ) + switch (vendor) { case X86_VENDOR_AMD: if ( amd_vpmu_init() ) - vpmu_mode = XENPMU_MODE_OFF; + vpmu_mode = XENPMU_MODE_OFF; break; case X86_VENDOR_INTEL: if ( core2_vpmu_init() ) - vpmu_mode = XENPMU_MODE_OFF; + vpmu_mode = XENPMU_MODE_OFF; break; default: printk(XENLOG_WARNING "VPMU: Unknown CPU vendor: %d. " - "Turning VPMU off.\n", vendor); + "Turning VPMU off.\n", + vendor); vpmu_mode = XENPMU_MODE_OFF; break; } @@ -887,8 +882,8 @@ static int __init vpmu_init(void) if ( vpmu_mode != XENPMU_MODE_OFF ) { register_cpu_notifier(&cpu_nfb); - printk(XENLOG_INFO "VPMU: version " __stringify(XENPMU_VER_MAJ) "." - __stringify(XENPMU_VER_MIN) "\n"); + printk(XENLOG_INFO "VPMU: version " __stringify( + XENPMU_VER_MAJ) "." __stringify(XENPMU_VER_MIN) "\n"); } else opt_vpmu_enabled = 0; diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c index 5efc39b4eb..430c616ef4 100644 --- a/xen/arch/x86/cpu/vpmu_amd.c +++ b/xen/arch/x86/cpu/vpmu_amd.c @@ -30,14 +30,14 @@ #include #include -#define MSR_F10H_EVNTSEL_GO_SHIFT 40 -#define MSR_F10H_EVNTSEL_EN_SHIFT 22 -#define MSR_F10H_COUNTER_LENGTH 48 +#define MSR_F10H_EVNTSEL_GO_SHIFT 40 +#define MSR_F10H_EVNTSEL_EN_SHIFT 22 +#define MSR_F10H_COUNTER_LENGTH 48 #define is_guest_mode(msr) ((msr) & (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT)) #define is_pmu_enabled(msr) ((msr) & (1ULL << MSR_F10H_EVNTSEL_EN_SHIFT)) #define set_guest_mode(msr) (msr |= (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT)) -#define is_overflowed(msr) (!((msr) & (1ULL << (MSR_F10H_COUNTER_LENGTH-1)))) +#define is_overflowed(msr) (!((msr) & (1ULL << (MSR_F10H_COUNTER_LENGTH - 1)))) static unsigned int __read_mostly num_counters; static const u32 __read_mostly *counters; @@ -47,56 +47,41 @@ static bool_t __read_mostly k7_counters_mirrored; /* Total size of PMU registers block (copied to/from PV(H) guest) */ static unsigned int __read_mostly regs_sz; -#define F10H_NUM_COUNTERS 4 -#define F15H_NUM_COUNTERS 6 -#define MAX_NUM_COUNTERS F15H_NUM_COUNTERS +#define F10H_NUM_COUNTERS 4 +#define F15H_NUM_COUNTERS 6 +#define MAX_NUM_COUNTERS F15H_NUM_COUNTERS /* PMU Counter MSRs. */ -static const u32 AMD_F10H_COUNTERS[] = { - MSR_K7_PERFCTR0, - MSR_K7_PERFCTR1, - MSR_K7_PERFCTR2, - MSR_K7_PERFCTR3 -}; +static const u32 AMD_F10H_COUNTERS[] = {MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, + MSR_K7_PERFCTR2, MSR_K7_PERFCTR3}; /* PMU Control MSRs. */ -static const u32 AMD_F10H_CTRLS[] = { - MSR_K7_EVNTSEL0, - MSR_K7_EVNTSEL1, - MSR_K7_EVNTSEL2, - MSR_K7_EVNTSEL3 -}; +static const u32 AMD_F10H_CTRLS[] = {MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, + MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3}; static const u32 AMD_F15H_COUNTERS[] = { - MSR_AMD_FAM15H_PERFCTR0, - MSR_AMD_FAM15H_PERFCTR1, - MSR_AMD_FAM15H_PERFCTR2, - MSR_AMD_FAM15H_PERFCTR3, - MSR_AMD_FAM15H_PERFCTR4, - MSR_AMD_FAM15H_PERFCTR5 -}; + MSR_AMD_FAM15H_PERFCTR0, MSR_AMD_FAM15H_PERFCTR1, MSR_AMD_FAM15H_PERFCTR2, + MSR_AMD_FAM15H_PERFCTR3, MSR_AMD_FAM15H_PERFCTR4, MSR_AMD_FAM15H_PERFCTR5}; static const u32 AMD_F15H_CTRLS[] = { - MSR_AMD_FAM15H_EVNTSEL0, - MSR_AMD_FAM15H_EVNTSEL1, - MSR_AMD_FAM15H_EVNTSEL2, - MSR_AMD_FAM15H_EVNTSEL3, - MSR_AMD_FAM15H_EVNTSEL4, - MSR_AMD_FAM15H_EVNTSEL5 -}; + MSR_AMD_FAM15H_EVNTSEL0, MSR_AMD_FAM15H_EVNTSEL1, MSR_AMD_FAM15H_EVNTSEL2, + MSR_AMD_FAM15H_EVNTSEL3, MSR_AMD_FAM15H_EVNTSEL4, MSR_AMD_FAM15H_EVNTSEL5}; /* Bits [63:42], [39:36], 21 and 19 are reserved */ -#define CTRL_RSVD_MASK ((-1ULL & (~((1ULL << 42) - 1))) | \ - (0xfULL << 36) | (1ULL << 21) | (1ULL << 19)) +#define CTRL_RSVD_MASK \ + ((-1ULL & (~((1ULL << 42) - 1))) | (0xfULL << 36) | (1ULL << 21) | \ + (1ULL << 19)) static uint64_t __read_mostly ctrl_rsvd[MAX_NUM_COUNTERS]; /* Use private context as a flag for MSR bitmap */ -#define msr_bitmap_on(vpmu) do { \ - (vpmu)->priv_context = (void *)-1L; \ - } while (0) -#define msr_bitmap_off(vpmu) do { \ - (vpmu)->priv_context = NULL; \ - } while (0) +#define msr_bitmap_on(vpmu) \ + do { \ + (vpmu)->priv_context = (void *)-1L; \ + } while ( 0 ) +#define msr_bitmap_off(vpmu) \ + do { \ + (vpmu)->priv_context = NULL; \ + } while ( 0 ) #define is_msr_bitmap_on(vpmu) ((vpmu)->priv_context != NULL) static inline int get_pmu_reg_type(u32 addr, unsigned int *idx) @@ -114,10 +99,10 @@ static inline int get_pmu_reg_type(u32 addr, unsigned int *idx) } if ( (addr >= MSR_AMD_FAM15H_EVNTSEL0) && - (addr <= MSR_AMD_FAM15H_PERFCTR5 ) ) + (addr <= MSR_AMD_FAM15H_PERFCTR5) ) { *idx = (addr - MSR_AMD_FAM15H_EVNTSEL0) >> 1; - if (addr & 1) + if ( addr & 1 ) return MSR_TYPE_COUNTER; else return MSR_TYPE_CTRL; @@ -129,7 +114,7 @@ static inline int get_pmu_reg_type(u32 addr, unsigned int *idx) static inline u32 get_fam15h_addr(u32 addr) { - switch ( addr ) + switch (addr) { case MSR_K7_PERFCTR0: return MSR_AMD_FAM15H_PERFCTR0; @@ -285,7 +270,7 @@ static inline void context_save(struct vcpu *v) rdmsrl(counters[i], counter_regs[i]); } -static int amd_vpmu_save(struct vcpu *v, bool_t to_guest) +static int amd_vpmu_save(struct vcpu *v, bool_t to_guest) { struct vpmu_struct *vpmu = vcpu_vpmu(v); unsigned int i; @@ -332,19 +317,19 @@ static void context_update(unsigned int msr, u64 msr_content) uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls); if ( k7_counters_mirrored && - ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)) ) + ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)) ) { msr = get_fam15h_addr(msr); } for ( i = 0; i < num_counters; i++ ) { - if ( msr == ctrls[i] ) - { - ctrl_regs[i] = msr_content; - return; - } - else if (msr == counters[i] ) + if ( msr == ctrls[i] ) + { + ctrl_regs[i] = msr_content; + return; + } + else if ( msr == counters[i] ) { counter_regs[i] = msr_content; return; @@ -362,7 +347,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, ASSERT(!supported); - if ( (type == MSR_TYPE_CTRL ) && + if ( (type == MSR_TYPE_CTRL) && ((msr_content & CTRL_RSVD_MASK) != ctrl_rsvd[idx]) ) return -EINVAL; @@ -374,29 +359,29 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, } /* check if the first counter is enabled */ - if ( (type == MSR_TYPE_CTRL) && - is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) ) + if ( (type == MSR_TYPE_CTRL) && is_pmu_enabled(msr_content) && + !vpmu_is_set(vpmu, VPMU_RUNNING) ) { if ( !acquire_pmu_ownership(PMU_OWNER_HVM) ) return 0; vpmu_set(vpmu, VPMU_RUNNING); if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) ) - amd_vpmu_set_msr_bitmap(v); + amd_vpmu_set_msr_bitmap(v); } /* stop saving & restore if guest stops first counter */ - if ( (type == MSR_TYPE_CTRL) && - (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) ) + if ( (type == MSR_TYPE_CTRL) && (is_pmu_enabled(msr_content) == 0) && + vpmu_is_set(vpmu, VPMU_RUNNING) ) { vpmu_reset(vpmu, VPMU_RUNNING); if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) ) - amd_vpmu_unset_msr_bitmap(v); + amd_vpmu_unset_msr_bitmap(v); release_pmu_ownership(PMU_OWNER_HVM); } - if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) - || vpmu_is_set(vpmu, VPMU_FROZEN) ) + if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) || + vpmu_is_set(vpmu, VPMU_FROZEN) ) { context_load(v); vpmu_set(vpmu, VPMU_CONTEXT_LOADED); @@ -416,8 +401,8 @@ static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) struct vcpu *v = current; struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) - || vpmu_is_set(vpmu, VPMU_FROZEN) ) + if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) || + vpmu_is_set(vpmu, VPMU_FROZEN) ) { context_load(v); vpmu_set(vpmu, VPMU_CONTEXT_LOADED); @@ -458,8 +443,8 @@ static void amd_vpmu_dump(const struct vcpu *v) printk(" VPMU state: 0x%x ", vpmu->flags); if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) ) { - printk("\n"); - return; + printk("\n"); + return; } printk("("); @@ -482,8 +467,8 @@ static void amd_vpmu_dump(const struct vcpu *v) rdmsrl(ctrls[i], ctrl); rdmsrl(counters[i], cntr); printk(" %#x: %#lx (%#lx in HW) %#x: %#lx (%#lx in HW)\n", - ctrls[i], ctrl_regs[i], ctrl, - counters[i], counter_regs[i], cntr); + ctrls[i], ctrl_regs[i], ctrl, counters[i], counter_regs[i], + cntr); } } @@ -494,8 +479,7 @@ static const struct arch_vpmu_ops amd_vpmu_ops = { .arch_vpmu_destroy = amd_vpmu_destroy, .arch_vpmu_save = amd_vpmu_save, .arch_vpmu_load = amd_vpmu_load, - .arch_vpmu_dump = amd_vpmu_dump -}; + .arch_vpmu_dump = amd_vpmu_dump}; int svm_vpmu_initialise(struct vcpu *v) { @@ -511,7 +495,8 @@ int svm_vpmu_initialise(struct vcpu *v) ctxt = xmalloc_bytes(sizeof(*ctxt) + regs_sz); if ( !ctxt ) { - printk(XENLOG_G_WARNING "Insufficient memory for PMU, " + printk(XENLOG_G_WARNING + "Insufficient memory for PMU, " " PMU feature is unavailable on domain %d vcpu %d.\n", v->vcpu_id, v->domain->domain_id); return -ENOMEM; @@ -542,7 +527,7 @@ int __init amd_vpmu_init(void) { unsigned int i; - switch ( current_cpu_data.x86 ) + switch (current_cpu_data.x86) { case 0x15: num_counters = F15H_NUM_COUNTERS; @@ -565,8 +550,8 @@ int __init amd_vpmu_init(void) return -EINVAL; } - if ( sizeof(struct xen_pmu_data) + - 2 * sizeof(uint64_t) * num_counters > PAGE_SIZE ) + if ( sizeof(struct xen_pmu_data) + 2 * sizeof(uint64_t) * num_counters > + PAGE_SIZE ) { printk(XENLOG_WARNING "VPMU: Register bank does not fit into VPMU shared page\n"); @@ -585,4 +570,3 @@ int __init amd_vpmu_init(void) return 0; } - diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c index 6e27f6ec8e..0dd9dbddbd 100644 --- a/xen/arch/x86/cpu/vpmu_intel.c +++ b/xen/arch/x86/cpu/vpmu_intel.c @@ -43,28 +43,31 @@ * cpuid 0xa - Architectural Performance Monitoring Leaf * Register eax */ -#define PMU_VERSION_SHIFT 0 /* Version ID */ -#define PMU_VERSION_BITS 8 /* 8 bits 0..7 */ -#define PMU_VERSION_MASK (((1 << PMU_VERSION_BITS) - 1) << PMU_VERSION_SHIFT) - -#define PMU_GENERAL_NR_SHIFT 8 /* Number of general pmu registers */ -#define PMU_GENERAL_NR_BITS 8 /* 8 bits 8..15 */ -#define PMU_GENERAL_NR_MASK (((1 << PMU_GENERAL_NR_BITS) - 1) << PMU_GENERAL_NR_SHIFT) - -#define PMU_GENERAL_WIDTH_SHIFT 16 /* Width of general pmu registers */ -#define PMU_GENERAL_WIDTH_BITS 8 /* 8 bits 16..23 */ -#define PMU_GENERAL_WIDTH_MASK (((1 << PMU_GENERAL_WIDTH_BITS) - 1) << PMU_GENERAL_WIDTH_SHIFT) +#define PMU_VERSION_SHIFT 0 /* Version ID */ +#define PMU_VERSION_BITS 8 /* 8 bits 0..7 */ +#define PMU_VERSION_MASK (((1 << PMU_VERSION_BITS) - 1) << PMU_VERSION_SHIFT) + +#define PMU_GENERAL_NR_SHIFT 8 /* Number of general pmu registers */ +#define PMU_GENERAL_NR_BITS 8 /* 8 bits 8..15 */ +#define PMU_GENERAL_NR_MASK \ + (((1 << PMU_GENERAL_NR_BITS) - 1) << PMU_GENERAL_NR_SHIFT) + +#define PMU_GENERAL_WIDTH_SHIFT 16 /* Width of general pmu registers */ +#define PMU_GENERAL_WIDTH_BITS 8 /* 8 bits 16..23 */ +#define PMU_GENERAL_WIDTH_MASK \ + (((1 << PMU_GENERAL_WIDTH_BITS) - 1) << PMU_GENERAL_WIDTH_SHIFT) /* Register edx */ -#define PMU_FIXED_NR_SHIFT 0 /* Number of fixed pmu registers */ -#define PMU_FIXED_NR_BITS 5 /* 5 bits 0..4 */ -#define PMU_FIXED_NR_MASK (((1 << PMU_FIXED_NR_BITS) -1) << PMU_FIXED_NR_SHIFT) +#define PMU_FIXED_NR_SHIFT 0 /* Number of fixed pmu registers */ +#define PMU_FIXED_NR_BITS 5 /* 5 bits 0..4 */ +#define PMU_FIXED_NR_MASK (((1 << PMU_FIXED_NR_BITS) - 1) << PMU_FIXED_NR_SHIFT) -#define PMU_FIXED_WIDTH_SHIFT 5 /* Width of fixed pmu registers */ -#define PMU_FIXED_WIDTH_BITS 8 /* 8 bits 5..12 */ -#define PMU_FIXED_WIDTH_MASK (((1 << PMU_FIXED_WIDTH_BITS) -1) << PMU_FIXED_WIDTH_SHIFT) +#define PMU_FIXED_WIDTH_SHIFT 5 /* Width of fixed pmu registers */ +#define PMU_FIXED_WIDTH_BITS 8 /* 8 bits 5..12 */ +#define PMU_FIXED_WIDTH_MASK \ + (((1 << PMU_FIXED_WIDTH_BITS) - 1) << PMU_FIXED_WIDTH_SHIFT) /* Alias registers (0x4c1) for full-width writes to PMCs */ -#define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_A_PERFCTR0)) +#define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_A_PERFCTR0)) static bool_t __read_mostly full_width_write; /* @@ -75,14 +78,15 @@ static bool_t __read_mostly full_width_write; #define FIXED_CTR_CTRL_MASK ((1 << FIXED_CTR_CTRL_BITS) - 1) #define FIXED_CTR_CTRL_ANYTHREAD_MASK 0x4 -#define ARCH_CNTR_ENABLED (1ULL << 22) +#define ARCH_CNTR_ENABLED (1ULL << 22) #define ARCH_CNTR_PIN_CONTROL (1ULL << 19) /* Number of general-purpose and fixed performance counters */ static unsigned int __read_mostly arch_pmc_cnt, fixed_pmc_cnt; /* Masks used for testing whether and MSR is valid */ -#define ARCH_CTRL_MASK (~((1ull << 32) - 1) | (1ull << 21) | ARCH_CNTR_PIN_CONTROL) +#define ARCH_CTRL_MASK \ + (~((1ull << 32) - 1) | (1ull << 21) | ARCH_CNTR_PIN_CONTROL) static uint64_t __read_mostly fixed_ctrl_mask, fixed_counters_mask; static uint64_t __read_mostly global_ovf_ctrl_mask, global_ctrl_mask; @@ -90,8 +94,8 @@ static uint64_t __read_mostly global_ovf_ctrl_mask, global_ctrl_mask; static unsigned int __read_mostly regs_sz; /* Offset into context of the beginning of PMU register block */ static const unsigned int regs_off = - sizeof(((struct xen_pmu_intel_ctxt *)0)->fixed_counters) + - sizeof(((struct xen_pmu_intel_ctxt *)0)->arch_counters); + sizeof(((struct xen_pmu_intel_ctxt *)0)->fixed_counters) + + sizeof(((struct xen_pmu_intel_ctxt *)0)->arch_counters); /* * QUIRK to workaround an issue on various family 6 cpus. @@ -110,7 +114,7 @@ static void check_pmc_quirk(void) if ( current_cpu_data.x86 == 6 ) is_pmc_quirk = 1; else - is_pmc_quirk = 0; + is_pmc_quirk = 0; } static void handle_pmc_quirk(u64 msr_content) @@ -181,7 +185,7 @@ static int is_core2_vpmu_msr(u32 msr_index, int *type, int *index) { u32 msr_index_pmc; - switch ( msr_index ) + switch (msr_index) { case MSR_CORE_PERF_FIXED_CTR_CTRL: case MSR_IA32_DS_AREA: @@ -391,9 +395,8 @@ static int core2_vpmu_verify(struct vcpu *v) } if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) && - !(is_hvm_vcpu(v) - ? is_canonical_address(core2_vpmu_cxt->ds_area) - : __addr_ok(core2_vpmu_cxt->ds_area)) ) + !(is_hvm_vcpu(v) ? is_canonical_address(core2_vpmu_cxt->ds_area) + : __addr_ok(core2_vpmu_cxt->ds_area)) ) return -EINVAL; if ( (core2_vpmu_cxt->global_ctrl & enabled_cntrs) || @@ -461,17 +464,16 @@ static int core2_vpmu_alloc_resource(struct vcpu *v) goto out_err; } - core2_vpmu_cxt = xzalloc_bytes(sizeof(*core2_vpmu_cxt) + - sizeof(uint64_t) * fixed_pmc_cnt + - sizeof(struct xen_pmu_cntr_pair) * - arch_pmc_cnt); + core2_vpmu_cxt = xzalloc_bytes( + sizeof(*core2_vpmu_cxt) + sizeof(uint64_t) * fixed_pmc_cnt + + sizeof(struct xen_pmu_cntr_pair) * arch_pmc_cnt); p = xzalloc(uint64_t); if ( !core2_vpmu_cxt || !p ) goto out_err; core2_vpmu_cxt->fixed_counters = sizeof(*core2_vpmu_cxt); - core2_vpmu_cxt->arch_counters = core2_vpmu_cxt->fixed_counters + - sizeof(uint64_t) * fixed_pmc_cnt; + core2_vpmu_cxt->arch_counters = + core2_vpmu_cxt->fixed_counters + sizeof(uint64_t) * fixed_pmc_cnt; vpmu->context = core2_vpmu_cxt; vpmu->priv_context = p; @@ -543,7 +545,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, core2_vpmu_cxt = vpmu->context; enabled_cntrs = vpmu->priv_context; - switch ( msr ) + switch (msr) { case MSR_CORE_PERF_GLOBAL_OVF_CTRL: if ( msr_content & global_ovf_ctrl_mask ) @@ -553,11 +555,11 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, return 0; case MSR_CORE_PERF_GLOBAL_STATUS: gdprintk(XENLOG_INFO, "Can not write readonly MSR: " - "MSR_PERF_GLOBAL_STATUS(0x38E)!\n"); + "MSR_PERF_GLOBAL_STATUS(0x38E)!\n"); return -EINVAL; case MSR_IA32_PEBS_ENABLE: - if ( vpmu_features & (XENPMU_FEATURE_IPC_ONLY | - XENPMU_FEATURE_ARCH_ONLY) ) + if ( vpmu_features & + (XENPMU_FEATURE_IPC_ONLY | XENPMU_FEATURE_ARCH_ONLY) ) return -EINVAL; if ( msr_content ) /* PEBS is reported as unavailable in MSR_IA32_MISC_ENABLE */ @@ -622,11 +624,11 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, return -EINVAL; /* PMC filters */ - if ( vpmu_features & (XENPMU_FEATURE_IPC_ONLY | - XENPMU_FEATURE_ARCH_ONLY) ) + if ( vpmu_features & + (XENPMU_FEATURE_IPC_ONLY | XENPMU_FEATURE_ARCH_ONLY) ) { blocked = 1; - switch ( umaskevent ) + switch (umaskevent) { /* * See the Pre-Defined Architectural Performance Events table @@ -634,9 +636,9 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, * Developer's Manual, Volume 3B, System Programming Guide, * Part 2. */ - case 0x003c: /* UnHalted Core Cycles */ - case 0x013c: /* UnHalted Reference Cycles */ - case 0x00c0: /* Instructions Retired */ + case 0x003c: /* UnHalted Core Cycles */ + case 0x013c: /* UnHalted Reference Cycles */ + case 0x00c0: /* Instructions Retired */ blocked = 0; break; } @@ -645,15 +647,15 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, if ( vpmu_features & XENPMU_FEATURE_ARCH_ONLY ) { /* Additional counters beyond IPC only; blocked already set. */ - switch ( umaskevent ) + switch (umaskevent) { - case 0x4f2e: /* Last Level Cache References */ - case 0x412e: /* Last Level Cache Misses */ - case 0x00c4: /* Branch Instructions Retired */ - case 0x00c5: /* All Branch Mispredict Retired */ + case 0x4f2e: /* Last Level Cache References */ + case 0x412e: /* Last Level Cache Misses */ + case 0x00c4: /* Branch Instructions Retired */ + case 0x00c5: /* All Branch Mispredict Retired */ blocked = 0; break; - } + } } if ( blocked ) @@ -703,7 +705,7 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) if ( core2_vpmu_msr_common_check(msr, &type, &index) ) { core2_vpmu_cxt = vpmu->context; - switch ( msr ) + switch (msr) { case MSR_CORE_PERF_GLOBAL_OVF_CTRL: *msr_content = 0; @@ -743,7 +745,7 @@ static void core2_vpmu_dump(const struct vcpu *v) struct xen_pmu_cntr_pair *cntr_pair; if ( !core2_vpmu_cxt || !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) ) - return; + return; if ( !vpmu_is_set(vpmu, VPMU_RUNNING) ) { @@ -761,8 +763,8 @@ static void core2_vpmu_dump(const struct vcpu *v) /* Print the contents of the counter and its configuration msr. */ for ( i = 0; i < arch_pmc_cnt; i++ ) - printk(" general_%d: 0x%016lx ctrl: 0x%016lx\n", - i, cntr_pair[i].counter, cntr_pair[i].control); + printk(" general_%d: 0x%016lx ctrl: 0x%016lx\n", i, + cntr_pair[i].counter, cntr_pair[i].control); /* * The configuration of the fixed counter is 4 bits each in the @@ -771,8 +773,7 @@ static void core2_vpmu_dump(const struct vcpu *v) val = core2_vpmu_cxt->fixed_ctrl; for ( i = 0; i < fixed_pmc_cnt; i++ ) { - printk(" fixed_%d: 0x%016lx ctrl: %#lx\n", - i, fixed_counters[i], + printk(" fixed_%d: 0x%016lx ctrl: %#lx\n", i, fixed_counters[i], val & FIXED_CTR_CTRL_MASK); val >>= FIXED_CTR_CTRL_BITS; } @@ -826,8 +827,7 @@ static const struct arch_vpmu_ops core2_vpmu_ops = { .arch_vpmu_destroy = core2_vpmu_destroy, .arch_vpmu_save = core2_vpmu_save, .arch_vpmu_load = core2_vpmu_load, - .arch_vpmu_dump = core2_vpmu_dump -}; + .arch_vpmu_dump = core2_vpmu_dump}; int vmx_vpmu_initialise(struct vcpu *v) { @@ -854,7 +854,7 @@ int vmx_vpmu_initialise(struct vcpu *v) { if ( !ds_warned ) printk(XENLOG_G_WARNING "CPU doesn't support 64-bit DS Area" - " - Debug Store disabled for guests\n"); + " - Debug Store disabled for guests\n"); break; } vpmu_set(vpmu, VPMU_CPU_HAS_DS); @@ -865,7 +865,7 @@ int vmx_vpmu_initialise(struct vcpu *v) vpmu_reset(vpmu, VPMU_CPU_HAS_DS); if ( !ds_warned ) printk(XENLOG_G_WARNING "CPU has set BTS_UNAVAIL" - " - Debug Store disabled for guests\n"); + " - Debug Store disabled for guests\n"); break; } @@ -887,7 +887,7 @@ int vmx_vpmu_initialise(struct vcpu *v) break; } ds_warned = 1; - func_out: +func_out: /* PV domains can allocate resources immediately */ if ( is_pv_vcpu(v) && !core2_vpmu_alloc_resource(v) ) @@ -907,11 +907,11 @@ int __init core2_vpmu_init(void) if ( current_cpu_data.cpuid_level >= 0xa ) version = MASK_EXTR(cpuid_eax(0xa), PMU_VERSION_MASK); - switch ( version ) + switch (version) { case 4: printk(XENLOG_INFO "VPMU: PMU version 4 is not fully supported. " - "Emulating version 3\n"); + "Emulating version 3\n"); /* FALLTHROUGH */ case 2: @@ -937,16 +937,16 @@ int __init core2_vpmu_init(void) fixed_ctrl_mask = ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1); /* mask .AnyThread bits for all fixed counters */ - for( i = 0; i < fixed_pmc_cnt; i++ ) - fixed_ctrl_mask |= - (FIXED_CTR_CTRL_ANYTHREAD_MASK << (FIXED_CTR_CTRL_BITS * i)); + for ( i = 0; i < fixed_pmc_cnt; i++ ) + fixed_ctrl_mask |= + (FIXED_CTR_CTRL_ANYTHREAD_MASK << (FIXED_CTR_CTRL_BITS * i)); fixed_counters_mask = ~((1ull << core2_get_bitwidth_fix_count()) - 1); - global_ctrl_mask = ~((((1ULL << fixed_pmc_cnt) - 1) << 32) | - ((1ULL << arch_pmc_cnt) - 1)); - global_ovf_ctrl_mask = ~(0xC000000000000000 | - (((1ULL << fixed_pmc_cnt) - 1) << 32) | - ((1ULL << arch_pmc_cnt) - 1)); + global_ctrl_mask = + ~((((1ULL << fixed_pmc_cnt) - 1) << 32) | ((1ULL << arch_pmc_cnt) - 1)); + global_ovf_ctrl_mask = + ~(0xC000000000000000 | (((1ULL << fixed_pmc_cnt) - 1) << 32) | + ((1ULL << arch_pmc_cnt) - 1)); if ( version > 2 ) /* * Even though we don't support Uncore counters guests should be @@ -961,7 +961,8 @@ int __init core2_vpmu_init(void) check_pmc_quirk(); if ( sizeof(struct xen_pmu_data) + sizeof(uint64_t) * fixed_pmc_cnt + - sizeof(struct xen_pmu_cntr_pair) * arch_pmc_cnt > PAGE_SIZE ) + sizeof(struct xen_pmu_cntr_pair) * arch_pmc_cnt > + PAGE_SIZE ) { printk(XENLOG_WARNING "VPMU: Register bank does not fit into VPMU share page\n"); @@ -971,4 +972,3 @@ int __init core2_vpmu_init(void) return 0; } - diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c index ab0aab678c..140020dc52 100644 --- a/xen/arch/x86/cpuid.c +++ b/xen/arch/x86/cpuid.c @@ -65,22 +65,21 @@ static int __init parse_xen_cpuid(const char *s) custom_param("cpuid", parse_xen_cpuid); #define EMPTY_LEAF ((struct cpuid_leaf){}) -static void zero_leaves(struct cpuid_leaf *l, - unsigned int first, unsigned int last) +static void zero_leaves(struct cpuid_leaf *l, unsigned int first, + unsigned int last) { memset(&l[first], 0, sizeof(*l) * (last - first + 1)); } struct cpuid_policy __read_mostly raw_cpuid_policy, - __read_mostly host_cpuid_policy, - __read_mostly pv_max_cpuid_policy, + __read_mostly host_cpuid_policy, __read_mostly pv_max_cpuid_policy, __read_mostly hvm_max_cpuid_policy; static void sanitise_featureset(uint32_t *fs) { /* for_each_set_bit() uses unsigned longs. Extend with zeroes. */ - uint32_t disabled_features[ - ROUNDUP(FSCAPINTS, sizeof(unsigned long)/sizeof(uint32_t))] = {}; + uint32_t disabled_features[ROUNDUP(FSCAPINTS, sizeof(unsigned long) / + sizeof(uint32_t))] = {}; unsigned int i; for ( i = 0; i < FSCAPINTS; ++i ) @@ -95,8 +94,8 @@ static void sanitise_featureset(uint32_t *fs) disabled_features[i] = ~fs[i] & deep_features[i]; } - for_each_set_bit(i, (void *)disabled_features, - sizeof(disabled_features) * 8) + for_each_set_bit (i, (void *)disabled_features, + sizeof(disabled_features) * 8) { const uint32_t *dfs = x86_cpuid_lookup_deep_deps(i); unsigned int j; @@ -129,52 +128,47 @@ static void recalculate_xstate(struct cpuid_policy *p) if ( p->basic.avx ) { xstates |= X86_XCR0_YMM; - xstate_size = max(xstate_size, - xstate_offsets[X86_XCR0_YMM_POS] + - xstate_sizes[X86_XCR0_YMM_POS]); + xstate_size = max(xstate_size, xstate_offsets[X86_XCR0_YMM_POS] + + xstate_sizes[X86_XCR0_YMM_POS]); } if ( p->feat.mpx ) { xstates |= X86_XCR0_BNDREGS | X86_XCR0_BNDCSR; - xstate_size = max(xstate_size, - xstate_offsets[X86_XCR0_BNDCSR_POS] + - xstate_sizes[X86_XCR0_BNDCSR_POS]); + xstate_size = max(xstate_size, xstate_offsets[X86_XCR0_BNDCSR_POS] + + xstate_sizes[X86_XCR0_BNDCSR_POS]); } if ( p->feat.avx512f ) { xstates |= X86_XCR0_OPMASK | X86_XCR0_ZMM | X86_XCR0_HI_ZMM; - xstate_size = max(xstate_size, - xstate_offsets[X86_XCR0_HI_ZMM_POS] + - xstate_sizes[X86_XCR0_HI_ZMM_POS]); + xstate_size = max(xstate_size, xstate_offsets[X86_XCR0_HI_ZMM_POS] + + xstate_sizes[X86_XCR0_HI_ZMM_POS]); } if ( p->feat.pku ) { xstates |= X86_XCR0_PKRU; - xstate_size = max(xstate_size, - xstate_offsets[X86_XCR0_PKRU_POS] + - xstate_sizes[X86_XCR0_PKRU_POS]); + xstate_size = max(xstate_size, xstate_offsets[X86_XCR0_PKRU_POS] + + xstate_sizes[X86_XCR0_PKRU_POS]); } if ( p->extd.lwp ) { xstates |= X86_XCR0_LWP; - xstate_size = max(xstate_size, - xstate_offsets[X86_XCR0_LWP_POS] + - xstate_sizes[X86_XCR0_LWP_POS]); + xstate_size = max(xstate_size, xstate_offsets[X86_XCR0_LWP_POS] + + xstate_sizes[X86_XCR0_LWP_POS]); } - p->xstate.max_size = xstate_size; - p->xstate.xcr0_low = xstates & ~XSTATE_XSAVES_ONLY; + p->xstate.max_size = xstate_size; + p->xstate.xcr0_low = xstates & ~XSTATE_XSAVES_ONLY; p->xstate.xcr0_high = (xstates & ~XSTATE_XSAVES_ONLY) >> 32; p->xstate.Da1 = Da1; if ( p->xstate.xsaves ) { - p->xstate.xss_low = xstates & XSTATE_XSAVES_ONLY; - p->xstate.xss_high = (xstates & XSTATE_XSAVES_ONLY) >> 32; + p->xstate.xss_low = xstates & XSTATE_XSAVES_ONLY; + p->xstate.xss_high = (xstates & XSTATE_XSAVES_ONLY) >> 32; } else xstates &= ~XSTATE_XSAVES_ONLY; @@ -186,10 +180,10 @@ static void recalculate_xstate(struct cpuid_policy *p) if ( !(xstates & curr_xstate) ) continue; - p->xstate.comp[i].size = xstate_sizes[i]; + p->xstate.comp[i].size = xstate_sizes[i]; p->xstate.comp[i].offset = xstate_offsets[i]; - p->xstate.comp[i].xss = curr_xstate & XSTATE_XSAVES_ONLY; - p->xstate.comp[i].align = curr_xstate & xstate_align; + p->xstate.comp[i].xss = curr_xstate & XSTATE_XSAVES_ONLY; + p->xstate.comp[i].align = curr_xstate & xstate_align; } } @@ -200,7 +194,7 @@ static void recalculate_xstate(struct cpuid_policy *p) static void recalculate_misc(struct cpuid_policy *p) { p->basic.raw_fms &= 0x0fff0fff; /* Clobber Processor Type on Intel. */ - p->basic.apic_id = 0; /* Dynamic. */ + p->basic.apic_id = 0; /* Dynamic. */ p->basic.raw[0x5] = EMPTY_LEAF; /* MONITOR not exposed to guests. */ p->basic.raw[0x6] = EMPTY_LEAF; /* Therm/Power not exposed to guests. */ @@ -219,10 +213,10 @@ static void recalculate_misc(struct cpuid_policy *p) p->extd.raw[0x8].d = 0; - switch ( p->x86_vendor ) + switch (p->x86_vendor) { case X86_VENDOR_INTEL: - p->basic.l2_nr_queries = 1; /* Fixed to 1 query. */ + p->basic.l2_nr_queries = 1; /* Fixed to 1 query. */ p->basic.raw[0x3] = EMPTY_LEAF; /* PSN - always hidden. */ p->basic.raw[0x9] = EMPTY_LEAF; /* DCA - always hidden. */ @@ -282,7 +276,7 @@ static void __init calculate_host_policy(void) *p = raw_cpuid_policy; p->basic.max_leaf = - min_t(uint32_t, p->basic.max_leaf, ARRAY_SIZE(p->basic.raw) - 1); + min_t(uint32_t, p->basic.max_leaf, ARRAY_SIZE(p->basic.raw) - 1); p->feat.max_subleaf = min_t(uint32_t, p->feat.max_subleaf, ARRAY_SIZE(p->feat.raw) - 1); p->extd.max_leaf = 0x80000000 | min_t(uint32_t, p->extd.max_leaf & 0xffff, @@ -299,14 +293,13 @@ static void __init calculate_host_policy(void) if ( p->extd.svm ) { /* Clamp to implemented features which require hardware support. */ - p->extd.raw[0xa].d &= ((1u << SVM_FEATURE_NPT) | - (1u << SVM_FEATURE_LBRV) | - (1u << SVM_FEATURE_NRIPS) | - (1u << SVM_FEATURE_PAUSEFILTER) | - (1u << SVM_FEATURE_DECODEASSISTS)); + p->extd.raw[0xa].d &= + ((1u << SVM_FEATURE_NPT) | (1u << SVM_FEATURE_LBRV) | + (1u << SVM_FEATURE_NRIPS) | (1u << SVM_FEATURE_PAUSEFILTER) | + (1u << SVM_FEATURE_DECODEASSISTS)); /* Enable features which are always emulated. */ - p->extd.raw[0xa].d |= ((1u << SVM_FEATURE_VMCBCLEAN) | - (1u << SVM_FEATURE_TSCRATEMSR)); + p->extd.raw[0xa].d |= + ((1u << SVM_FEATURE_VMCBCLEAN) | (1u << SVM_FEATURE_TSCRATEMSR)); } } @@ -373,8 +366,8 @@ static void __init calculate_hvm_max_policy(void) *p = host_cpuid_policy; cpuid_policy_to_featureset(p, hvm_featureset); - hvm_featuremask = hvm_hap_supported() ? - hvm_hap_featuremask : hvm_shadow_featuremask; + hvm_featuremask = + hvm_hap_supported() ? hvm_hap_featuremask : hvm_shadow_featuremask; for ( i = 0; i < ARRAY_SIZE(hvm_featureset); ++i ) hvm_featureset[i] &= hvm_featuremask[i]; @@ -443,8 +436,8 @@ bool recheck_cpu_features(unsigned int cpu) if ( !(~c.x86_capability[i] & bsp->x86_capability[i]) ) continue; - printk(XENLOG_ERR "CPU%u: cap[%2u] is %08x (expected %08x)\n", - cpu, i, c.x86_capability[i], bsp->x86_capability[i]); + printk(XENLOG_ERR "CPU%u: cap[%2u] is %08x (expected %08x)\n", cpu, i, + c.x86_capability[i], bsp->x86_capability[i]); okay = false; } @@ -462,12 +455,13 @@ void recalculate_cpuid_policy(struct domain *d) p->x86_vendor = get_cpu_vendor(p->basic.vendor_ebx, p->basic.vendor_ecx, p->basic.vendor_edx, gcv_guest); - p->basic.max_leaf = min(p->basic.max_leaf, max->basic.max_leaf); + p->basic.max_leaf = min(p->basic.max_leaf, max->basic.max_leaf); p->feat.max_subleaf = min(p->feat.max_subleaf, max->feat.max_subleaf); - p->extd.max_leaf = 0x80000000 | min(p->extd.max_leaf & 0xffff, - (p->x86_vendor == X86_VENDOR_AMD - ? CPUID_GUEST_NR_EXTD_AMD - : CPUID_GUEST_NR_EXTD_INTEL) - 1); + p->extd.max_leaf = 0x80000000 | min(p->extd.max_leaf & 0xffff, + (p->x86_vendor == X86_VENDOR_AMD + ? CPUID_GUEST_NR_EXTD_AMD + : CPUID_GUEST_NR_EXTD_INTEL) - + 1); cpuid_policy_to_featureset(p, fs); cpuid_policy_to_featureset(max, max_fs); @@ -530,8 +524,8 @@ void recalculate_cpuid_policy(struct domain *d) /* Fold host's FDP_EXCP_ONLY and NO_FPU_SEL into guest's view. */ fs[FEATURESET_7b0] &= ~special_features[FEATURESET_7b0]; - fs[FEATURESET_7b0] |= (host_cpuid_policy.feat._7b0 & - special_features[FEATURESET_7b0]); + fs[FEATURESET_7b0] |= + (host_cpuid_policy.feat._7b0 & special_features[FEATURESET_7b0]); cpuid_featureset_to_policy(fs, p); @@ -539,8 +533,8 @@ void recalculate_cpuid_policy(struct domain *d) p->basic.clflush_size = max->basic.clflush_size; p->extd.maxphysaddr = min(p->extd.maxphysaddr, max->extd.maxphysaddr); - p->extd.maxphysaddr = min_t(uint8_t, p->extd.maxphysaddr, - paging_max_paddr_bits(d)); + p->extd.maxphysaddr = + min_t(uint8_t, p->extd.maxphysaddr, paging_max_paddr_bits(d)); p->extd.maxphysaddr = max_t(uint8_t, p->extd.maxphysaddr, (p->basic.pae || p->basic.pse36) ? 36 : 32); @@ -551,8 +545,7 @@ void recalculate_cpuid_policy(struct domain *d) for ( i = 0; i < ARRAY_SIZE(p->cache.raw); ++i ) { - if ( p->cache.subleaf[i].type >= 1 && - p->cache.subleaf[i].type <= 3 ) + if ( p->cache.subleaf[i].type >= 1 && p->cache.subleaf[i].type <= 3 ) { /* Subleaf has a valid cache type. Zero reserved fields. */ p->cache.raw[i].a &= 0xffffc3ffu; @@ -585,8 +578,7 @@ void recalculate_cpuid_policy(struct domain *d) int init_domain_cpuid_policy(struct domain *d) { struct cpuid_policy *p = - xmemdup(is_pv_domain(d) ? &pv_max_cpuid_policy - : &hvm_max_cpuid_policy); + xmemdup(is_pv_domain(d) ? &pv_max_cpuid_policy : &hvm_max_cpuid_policy); if ( !p ) return -ENOMEM; @@ -601,8 +593,8 @@ int init_domain_cpuid_policy(struct domain *d) return 0; } -void guest_cpuid(const struct vcpu *v, uint32_t leaf, - uint32_t subleaf, struct cpuid_leaf *res) +void guest_cpuid(const struct vcpu *v, uint32_t leaf, uint32_t subleaf, + struct cpuid_leaf *res) { const struct domain *d = v->domain; const struct cpuid_policy *p = d->arch.cpuid; @@ -616,15 +608,15 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf, * - Fill in *res with static data. * - Dispatch the virtualised leaves to their respective handlers. */ - switch ( leaf ) + switch (leaf) { case 0 ... CPUID_GUEST_NR_BASIC - 1: ASSERT(p->basic.max_leaf < ARRAY_SIZE(p->basic.raw)); - if ( leaf > min_t(uint32_t, p->basic.max_leaf, - ARRAY_SIZE(p->basic.raw) - 1) ) + if ( leaf > + min_t(uint32_t, p->basic.max_leaf, ARRAY_SIZE(p->basic.raw) - 1) ) return; - switch ( leaf ) + switch (leaf) { case 0x4: if ( subleaf >= ARRAY_SIZE(p->cache.raw) ) @@ -703,7 +695,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf, * Second pass: * - Dynamic adjustments */ - switch ( leaf ) + switch (leaf) { const struct cpu_user_regs *regs; @@ -714,8 +706,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf, res->b |= (v->vcpu_id * 2) << 24; /* TODO: Rework vPMU control in terms of toolstack choices. */ - if ( vpmu_available(v) && - vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) ) + if ( vpmu_available(v) && vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) ) { res->d |= cpufeat_mask(X86_FEATURE_DS); if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) ) @@ -852,13 +843,13 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf, break; case 0x7: - switch ( subleaf ) + switch (subleaf) { case 0: /* OSPKE clear in policy. Fast-forward CR4 back in. */ - if ( (is_pv_domain(d) - ? v->arch.pv.ctrlreg[4] - : v->arch.hvm.guest_cr[4]) & X86_CR4_PKE ) + if ( (is_pv_domain(d) ? v->arch.pv.ctrlreg[4] + : v->arch.hvm.guest_cr[4]) & + X86_CR4_PKE ) res->c |= cpufeat_mask(X86_FEATURE_OSPKE); break; } @@ -895,7 +886,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf, break; case XSTATE_CPUID: - switch ( subleaf ) + switch (subleaf) { case 1: if ( p->xstate.xsaves ) @@ -912,7 +903,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf, * Read CPUID[0xD,0/1].EBX from hardware. They vary with * enabled XSTATE, and appropraite XCR0|XSS are in context. */ - case 0: + case 0: res->b = cpuid_count_ebx(leaf, subleaf); } break; @@ -921,8 +912,8 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf, case 0x80000001: /* SYSCALL is hidden outside of long mode on Intel. */ - if ( p->x86_vendor == X86_VENDOR_INTEL && - is_hvm_domain(d) && !hvm_long_mode_active(v) ) + if ( p->x86_vendor == X86_VENDOR_INTEL && is_hvm_domain(d) && + !hvm_long_mode_active(v) ) res->d &= ~cpufeat_mask(X86_FEATURE_SYSCALL); common_leaf1_adjustments: diff --git a/xen/arch/x86/crash.c b/xen/arch/x86/crash.c index 60c98b6175..a466b5e38b 100644 --- a/xen/arch/x86/crash.c +++ b/xen/arch/x86/crash.c @@ -86,15 +86,15 @@ static void noreturn do_nmi_crash(const struct cpu_user_regs *regs) * non-fatal MCE), the LAPIC will force us back here rather than * wandering back into regular Xen code. */ - switch ( current_local_apic_mode() ) + switch (current_local_apic_mode()) { u32 apic_id; case APIC_MODE_X2APIC: apic_id = apic_rdmsr(APIC_ID); - apic_wrmsr(APIC_ICR, APIC_DM_NMI | APIC_DEST_PHYSICAL - | ((u64)apic_id << 32)); + apic_wrmsr(APIC_ICR, + APIC_DM_NMI | APIC_DEST_PHYSICAL | ((u64)apic_id << 32)); break; case APIC_MODE_XAPIC: @@ -111,7 +111,7 @@ static void noreturn do_nmi_crash(const struct cpu_user_regs *regs) break; } - for ( ; ; ) + for ( ;; ) halt(); } @@ -132,8 +132,8 @@ static void nmi_shootdown_cpus(void) * Disable IST for MCEs to avoid stack corruption race conditions, and * change the NMI handler to a nop to avoid deviation from this codepath. */ - _set_gate_lower(&idt_tables[cpu][TRAP_nmi], - SYS_DESC_irq_gate, 0, &trap_nop); + _set_gate_lower(&idt_tables[cpu][TRAP_nmi], SYS_DESC_irq_gate, 0, + &trap_nop); set_ist(&idt_tables[cpu][TRAP_machine_check], IST_NONE); /* @@ -159,8 +159,8 @@ static void nmi_shootdown_cpus(void) if ( cpumask_empty(&waiting_to_crash) ) printk("Shot down all CPUs\n"); else - printk("Failed to shoot down CPUs {%*pbl}\n", - nr_cpu_ids, cpumask_bits(&waiting_to_crash)); + printk("Failed to shoot down CPUs {%*pbl}\n", nr_cpu_ids, + cpumask_bits(&waiting_to_crash)); /* Crash shutdown any IOMMU functionality as the crashdump kernel is not * happy when booting if interrupt/dma remapping is still enabled */ diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c index a500df01ac..babc413d24 100644 --- a/xen/arch/x86/debug.c +++ b/xen/arch/x86/debug.c @@ -22,7 +22,7 @@ #include #include -/* +/* * This file for general routines common to more than one debugger, like kdb, * gdbsx, etc.. */ @@ -30,9 +30,18 @@ #ifdef XEN_KDB_CONFIG #include "../kdb/include/kdbdefs.h" #include "../kdb/include/kdbproto.h" -#define DBGP(...) {(kdbdbg) ? kdbp(__VA_ARGS__):0;} -#define DBGP1(...) {(kdbdbg>1) ? kdbp(__VA_ARGS__):0;} -#define DBGP2(...) {(kdbdbg>2) ? kdbp(__VA_ARGS__):0;} +#define DBGP(...) \ + { \ + (kdbdbg) ? kdbp(__VA_ARGS__) : 0; \ + } +#define DBGP1(...) \ + { \ + (kdbdbg > 1) ? kdbp(__VA_ARGS__) : 0; \ + } +#define DBGP2(...) \ + { \ + (kdbdbg > 2) ? kdbp(__VA_ARGS__) : 0; \ + } #else #define DBGP1(...) ((void)0) #define DBGP2(...) ((void)0) @@ -42,8 +51,8 @@ typedef unsigned long dbgva_t; typedef unsigned char dbgbyte_t; /* Returns: mfn for the given (hvm guest) vaddr */ -static mfn_t -dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr, gfn_t *gfn) +static mfn_t dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr, + gfn_t *gfn) { mfn_t mfn; uint32_t pfec = PFEC_page_present; @@ -65,8 +74,8 @@ dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr, gfn_t *gfn) mfn = INVALID_MFN; } else - DBGP2("X: vaddr:%lx domid:%d mfn:%#"PRI_mfn"\n", - vaddr, dp->domain_id, mfn_x(mfn)); + DBGP2("X: vaddr:%lx domid:%d mfn:%#" PRI_mfn "\n", vaddr, dp->domain_id, + mfn_x(mfn)); if ( mfn_eq(mfn, INVALID_MFN) ) { @@ -77,21 +86,20 @@ dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr, gfn_t *gfn) return mfn; } -/* +/* * pgd3val: this is the value of init_mm.pgd[3] in a PV guest. It is optional. - * This to assist debug of modules in the guest. The kernel address - * space seems is always mapped, but modules are not necessarily - * mapped in any arbitraty guest cr3 that we pick if pgd3val is 0. - * Modules should always be addressible if we use cr3 from init_mm. - * Since pgd3val is already a pgd value, cr3->pgd[3], we just need to + * This to assist debug of modules in the guest. The kernel address + * space seems is always mapped, but modules are not necessarily + * mapped in any arbitraty guest cr3 that we pick if pgd3val is 0. + * Modules should always be addressible if we use cr3 from init_mm. + * Since pgd3val is already a pgd value, cr3->pgd[3], we just need to * do 2 level lookups. * * NOTE: 4 level paging works for 32 PAE guests also because cpu runs in IA32-e * mode. - * Returns: mfn for the given (pv guest) vaddr + * Returns: mfn for the given (pv guest) vaddr */ -static mfn_t -dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val) +static mfn_t dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val) { l4_pgentry_t l4e, *l4t; l3_pgentry_t l3e, *l3t; @@ -100,8 +108,8 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val) unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3); mfn_t mfn = maddr_to_mfn(cr3_pa(cr3)); - DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id, - cr3, pgd3val); + DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id, cr3, + pgd3val); if ( pgd3val == 0 ) { @@ -109,7 +117,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val) l4e = l4t[l4_table_offset(vaddr)]; unmap_domain_page(l4t); mfn = l4e_get_mfn(l4e); - DBGP2("l4t:%p l4to:%lx l4e:%lx mfn:%#"PRI_mfn"\n", l4t, + DBGP2("l4t:%p l4to:%lx l4e:%lx mfn:%#" PRI_mfn "\n", l4t, l4_table_offset(vaddr), l4e, mfn_x(mfn)); if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) ) { @@ -121,7 +129,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val) l3e = l3t[l3_table_offset(vaddr)]; unmap_domain_page(l3t); mfn = l3e_get_mfn(l3e); - DBGP2("l3t:%p l3to:%lx l3e:%lx mfn:%#"PRI_mfn"\n", l3t, + DBGP2("l3t:%p l3to:%lx l3e:%lx mfn:%#" PRI_mfn "\n", l3t, l3_table_offset(vaddr), l3e, mfn_x(mfn)); if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || (l3e_get_flags(l3e) & _PAGE_PSE) ) @@ -135,8 +143,8 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val) l2e = l2t[l2_table_offset(vaddr)]; unmap_domain_page(l2t); mfn = l2e_get_mfn(l2e); - DBGP2("l2t:%p l2to:%lx l2e:%lx mfn:%#"PRI_mfn"\n", - l2t, l2_table_offset(vaddr), l2e, mfn_x(mfn)); + DBGP2("l2t:%p l2to:%lx l2e:%lx mfn:%#" PRI_mfn "\n", l2t, + l2_table_offset(vaddr), l2e, mfn_x(mfn)); if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || (l2e_get_flags(l2e) & _PAGE_PSE) ) { @@ -147,15 +155,15 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val) l1e = l1t[l1_table_offset(vaddr)]; unmap_domain_page(l1t); mfn = l1e_get_mfn(l1e); - DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%#"PRI_mfn"\n", l1t, l1_table_offset(vaddr), - l1e, mfn_x(mfn)); + DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%#" PRI_mfn "\n", l1t, + l1_table_offset(vaddr), l1e, mfn_x(mfn)); return mfn_valid(mfn) ? mfn : INVALID_MFN; } /* Returns: number of bytes remaining to be copied */ -static unsigned int dbg_rw_guest_mem(struct domain *dp, void * __user gaddr, - void * __user buf, unsigned int len, +static unsigned int dbg_rw_guest_mem(struct domain *dp, void *__user gaddr, + void *__user buf, unsigned int len, bool toaddr, uint64_t pgd3) { while ( len > 0 ) @@ -168,23 +176,22 @@ static unsigned int dbg_rw_guest_mem(struct domain *dp, void * __user gaddr, pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len); - mfn = (is_hvm_domain(dp) - ? dbg_hvm_va2mfn(addr, dp, toaddr, &gfn) - : dbg_pv_va2mfn(addr, dp, pgd3)); + mfn = (is_hvm_domain(dp) ? dbg_hvm_va2mfn(addr, dp, toaddr, &gfn) + : dbg_pv_va2mfn(addr, dp, pgd3)); if ( mfn_eq(mfn, INVALID_MFN) ) break; va = map_domain_page(mfn); - va = va + (addr & (PAGE_SIZE-1)); + va = va + (addr & (PAGE_SIZE - 1)); if ( toaddr ) { - copy_from_user(va, buf, pagecnt); /* va = buf */ + copy_from_user(va, buf, pagecnt); /* va = buf */ paging_mark_dirty(dp, mfn); } else { - copy_to_user(buf, va, pagecnt); /* buf = va */ + copy_to_user(buf, va, pagecnt); /* buf = va */ } unmap_domain_page(va); @@ -199,19 +206,18 @@ static unsigned int dbg_rw_guest_mem(struct domain *dp, void * __user gaddr, return len; } -/* +/* * addr is hypervisor addr if domid == DOMID_IDLE, else it's guest addr * buf is debugger buffer. * if toaddr, then addr = buf (write to addr), else buf = addr (rd from guest) * pgd3: value of init_mm.pgd[3] in guest. see above. - * Returns: number of bytes remaining to be copied. + * Returns: number of bytes remaining to be copied. */ -unsigned int dbg_rw_mem(void * __user addr, void * __user buf, - unsigned int len, domid_t domid, bool toaddr, - uint64_t pgd3) +unsigned int dbg_rw_mem(void *__user addr, void *__user buf, unsigned int len, + domid_t domid, bool toaddr, uint64_t pgd3) { - DBGP2("gmem:addr:%lx buf:%p len:$%u domid:%d toaddr:%x\n", - addr, buf, len, domid, toaddr); + DBGP2("gmem:addr:%lx buf:%p len:$%u domid:%d toaddr:%x\n", addr, buf, len, + domid, toaddr); if ( domid == DOMID_IDLE ) { diff --git a/xen/arch/x86/delay.c b/xen/arch/x86/delay.c index 2662c26272..a1ad305b6a 100644 --- a/xen/arch/x86/delay.c +++ b/xen/arch/x86/delay.c @@ -21,9 +21,8 @@ void __udelay(unsigned long usecs) unsigned long s, e; s = rdtsc_ordered(); - do - { + do { rep_nop(); e = rdtsc_ordered(); - } while ((e-s) < ticks); + } while ( (e - s) < ticks ); } diff --git a/xen/arch/x86/dmi_scan.c b/xen/arch/x86/dmi_scan.c index fcdf2d3952..bf2bf3bcbf 100644 --- a/xen/arch/x86/dmi_scan.c +++ b/xen/arch/x86/dmi_scan.c @@ -12,95 +12,99 @@ #include #include -#define bt_ioremap(b,l) ((void *)__acpi_map_table(b,l)) -#define bt_iounmap(b,l) ((void)0) -#define memcpy_fromio memcpy +#define bt_ioremap(b, l) ((void *)__acpi_map_table(b, l)) +#define bt_iounmap(b, l) ((void)0) +#define memcpy_fromio memcpy #define alloc_bootmem(l) xmalloc_bytes(l) -struct __packed dmi_eps { - char anchor[5]; /* "_DMI_" */ - u8 checksum; - u16 size; - u32 address; - u16 num_structures; - u8 revision; +struct __packed dmi_eps +{ + char anchor[5]; /* "_DMI_" */ + u8 checksum; + u16 size; + u32 address; + u16 num_structures; + u8 revision; }; -struct __packed smbios_eps { - char anchor[4]; /* "_SM_" */ - u8 checksum; - u8 length; - u8 major, minor; - u16 max_size; - u8 revision; - u8 _rsrvd_[5]; - struct dmi_eps dmi; +struct __packed smbios_eps +{ + char anchor[4]; /* "_SM_" */ + u8 checksum; + u8 length; + u8 major, minor; + u16 max_size; + u8 revision; + u8 _rsrvd_[5]; + struct dmi_eps dmi; }; -struct __packed smbios3_eps { - char anchor[5]; /* "_SM3_" */ - u8 checksum; - u8 length; - u8 major, minor; - u8 docrev; - u8 revision; - u8 _rsrvd_; - u32 max_size; - u64 address; +struct __packed smbios3_eps +{ + char anchor[5]; /* "_SM3_" */ + u8 checksum; + u8 length; + u8 major, minor; + u8 docrev; + u8 revision; + u8 _rsrvd_; + u32 max_size; + u64 address; }; struct dmi_header { - u8 type; - u8 length; - u16 handle; + u8 type; + u8 length; + u16 handle; }; -enum dmi_entry_type { - DMI_ENTRY_BIOS = 0, - DMI_ENTRY_SYSTEM, - DMI_ENTRY_BASEBOARD, - DMI_ENTRY_CHASSIS, - DMI_ENTRY_PROCESSOR, - DMI_ENTRY_MEM_CONTROLLER, - DMI_ENTRY_MEM_MODULE, - DMI_ENTRY_CACHE, - DMI_ENTRY_PORT_CONNECTOR, - DMI_ENTRY_SYSTEM_SLOT, - DMI_ENTRY_ONBOARD_DEVICE, - DMI_ENTRY_OEMSTRINGS, - DMI_ENTRY_SYSCONF, - DMI_ENTRY_BIOS_LANG, - DMI_ENTRY_GROUP_ASSOC, - DMI_ENTRY_SYSTEM_EVENT_LOG, - DMI_ENTRY_PHYS_MEM_ARRAY, - DMI_ENTRY_MEM_DEVICE, - DMI_ENTRY_32_MEM_ERROR, - DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR, - DMI_ENTRY_MEM_DEV_MAPPED_ADDR, - DMI_ENTRY_BUILTIN_POINTING_DEV, - DMI_ENTRY_PORTABLE_BATTERY, - DMI_ENTRY_SYSTEM_RESET, - DMI_ENTRY_HW_SECURITY, - DMI_ENTRY_SYSTEM_POWER_CONTROLS, - DMI_ENTRY_VOLTAGE_PROBE, - DMI_ENTRY_COOLING_DEV, - DMI_ENTRY_TEMP_PROBE, - DMI_ENTRY_ELECTRICAL_CURRENT_PROBE, - DMI_ENTRY_OOB_REMOTE_ACCESS, - DMI_ENTRY_BIS_ENTRY, - DMI_ENTRY_SYSTEM_BOOT, - DMI_ENTRY_MGMT_DEV, - DMI_ENTRY_MGMT_DEV_COMPONENT, - DMI_ENTRY_MGMT_DEV_THRES, - DMI_ENTRY_MEM_CHANNEL, - DMI_ENTRY_IPMI_DEV, - DMI_ENTRY_SYS_POWER_SUPPLY, - DMI_ENTRY_ADDITIONAL, - DMI_ENTRY_ONBOARD_DEV_EXT, - DMI_ENTRY_MGMT_CONTROLLER_HOST, - DMI_ENTRY_INACTIVE = 126, - DMI_ENTRY_END_OF_TABLE = 127, +enum dmi_entry_type +{ + DMI_ENTRY_BIOS = 0, + DMI_ENTRY_SYSTEM, + DMI_ENTRY_BASEBOARD, + DMI_ENTRY_CHASSIS, + DMI_ENTRY_PROCESSOR, + DMI_ENTRY_MEM_CONTROLLER, + DMI_ENTRY_MEM_MODULE, + DMI_ENTRY_CACHE, + DMI_ENTRY_PORT_CONNECTOR, + DMI_ENTRY_SYSTEM_SLOT, + DMI_ENTRY_ONBOARD_DEVICE, + DMI_ENTRY_OEMSTRINGS, + DMI_ENTRY_SYSCONF, + DMI_ENTRY_BIOS_LANG, + DMI_ENTRY_GROUP_ASSOC, + DMI_ENTRY_SYSTEM_EVENT_LOG, + DMI_ENTRY_PHYS_MEM_ARRAY, + DMI_ENTRY_MEM_DEVICE, + DMI_ENTRY_32_MEM_ERROR, + DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR, + DMI_ENTRY_MEM_DEV_MAPPED_ADDR, + DMI_ENTRY_BUILTIN_POINTING_DEV, + DMI_ENTRY_PORTABLE_BATTERY, + DMI_ENTRY_SYSTEM_RESET, + DMI_ENTRY_HW_SECURITY, + DMI_ENTRY_SYSTEM_POWER_CONTROLS, + DMI_ENTRY_VOLTAGE_PROBE, + DMI_ENTRY_COOLING_DEV, + DMI_ENTRY_TEMP_PROBE, + DMI_ENTRY_ELECTRICAL_CURRENT_PROBE, + DMI_ENTRY_OOB_REMOTE_ACCESS, + DMI_ENTRY_BIS_ENTRY, + DMI_ENTRY_SYSTEM_BOOT, + DMI_ENTRY_MGMT_DEV, + DMI_ENTRY_MGMT_DEV_COMPONENT, + DMI_ENTRY_MGMT_DEV_THRES, + DMI_ENTRY_MEM_CHANNEL, + DMI_ENTRY_IPMI_DEV, + DMI_ENTRY_SYS_POWER_SUPPLY, + DMI_ENTRY_ADDITIONAL, + DMI_ENTRY_ONBOARD_DEV_EXT, + DMI_ENTRY_MGMT_CONTROLLER_HOST, + DMI_ENTRY_INACTIVE = 126, + DMI_ENTRY_END_OF_TABLE = 127, }; #undef DMI_DEBUG @@ -111,88 +115,88 @@ enum dmi_entry_type { #define dmi_printk(x) #endif -static char * __init dmi_string(struct dmi_header *dm, u8 s) +static char *__init dmi_string(struct dmi_header *dm, u8 s) { - char *bp=(char *)dm; - bp+=dm->length; - if(!s) - return ""; - s--; - while(s>0 && *bp) - { - bp+=strlen(bp); - bp++; - s--; - } - return bp; + char *bp = (char *)dm; + bp += dm->length; + if ( !s ) + return ""; + s--; + while ( s > 0 && *bp ) + { + bp += strlen(bp); + bp++; + s--; + } + return bp; } /* * We have to be cautious here. We have seen BIOSes with DMI pointers * pointing to completely the wrong place for example */ - + static int __init dmi_table(paddr_t base, u32 len, int num, - void (*decode)(struct dmi_header *)) + void (*decode)(struct dmi_header *)) { - u8 *buf; - struct dmi_header *dm; - u8 *data; - int i=0; - - buf = bt_ioremap(base, len); - if(buf==NULL) - return -1; - - data = buf; - - /* - * Stop when we have seen all the items the table claimed to have - * (SMBIOS < 3.0 only) OR we reach an end-of-table marker (SMBIOS - * >= 3.0 only) OR we run off the end of the table (should never - * happen but sometimes does on bogus implementations.) - */ - while((num < 0 || i < num) && data-buf+sizeof(struct dmi_header)<=len) - { - dm=(struct dmi_header *)data; - /* - * We want to know the total length (formated area and strings) - * before decoding to make sure we won't run off the table in - * dmi_decode or dmi_string - */ - data+=dm->length; - while(data-buftype == DMI_ENTRY_END_OF_TABLE) - break; - data+=2; - i++; - } - bt_iounmap(buf, len); - return 0; + u8 *buf; + struct dmi_header *dm; + u8 *data; + int i = 0; + + buf = bt_ioremap(base, len); + if ( buf == NULL ) + return -1; + + data = buf; + + /* + * Stop when we have seen all the items the table claimed to have + * (SMBIOS < 3.0 only) OR we reach an end-of-table marker (SMBIOS + * >= 3.0 only) OR we run off the end of the table (should never + * happen but sometimes does on bogus implementations.) + */ + while ( (num < 0 || i < num) && + data - buf + sizeof(struct dmi_header) <= len ) + { + dm = (struct dmi_header *)data; + /* + * We want to know the total length (formated area and strings) + * before decoding to make sure we won't run off the table in + * dmi_decode or dmi_string + */ + data += dm->length; + while ( data - buf < len - 1 && (data[0] || data[1]) ) + data++; + if ( data - buf < len - 1 ) + decode(dm); + /* + * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0] + * For tables behind a 64-bit entry point, we have no item + * count and no exact table length, so stop on end-of-table + * marker. For tables behind a 32-bit entry point, we have + * seen OEM structures behind the end-of-table marker on + * some systems, so don't trust it. + */ + if ( num < 0 && dm->type == DMI_ENTRY_END_OF_TABLE ) + break; + data += 2; + i++; + } + bt_iounmap(buf, len); + return 0; } - static inline bool __init dmi_checksum(const void __iomem *buf, unsigned int len) { - u8 sum = 0; - const u8 *p = buf; - unsigned int a; - - for (a = 0; a < len; a++) - sum += p[a]; - return sum == 0; + u8 sum = 0; + const u8 *p = buf; + unsigned int a; + + for ( a = 0; a < len; a++ ) + sum += p[a]; + return sum == 0; } static u32 __initdata efi_dmi_address; @@ -208,223 +212,235 @@ static u32 __initdata efi_smbios3_size; */ void __init dmi_efi_get_table(const void *smbios, const void *smbios3) { - const struct smbios_eps *eps = smbios; - const struct smbios3_eps *eps3 = smbios3; - - if (eps3 && memcmp(eps3->anchor, "_SM3_", 5) == 0 && - eps3->length >= sizeof(*eps3) && - dmi_checksum(eps3, eps3->length)) { - efi_smbios3_address = eps3->address; - efi_smbios3_size = eps3->max_size; - return; - } - - if (eps && memcmp(eps->anchor, "_SM_", 4) == 0 && - eps->length >= sizeof(*eps) && - dmi_checksum(eps, eps->length)) { - efi_smbios_address = (u32)(long)eps; - efi_smbios_size = eps->length; - - if (memcmp(eps->dmi.anchor, "_DMI_", 5) == 0 && - dmi_checksum(&eps->dmi, sizeof(eps->dmi))) { - efi_dmi_address = eps->dmi.address; - efi_dmi_size = eps->dmi.size; - } - } + const struct smbios_eps *eps = smbios; + const struct smbios3_eps *eps3 = smbios3; + + if ( eps3 && memcmp(eps3->anchor, "_SM3_", 5) == 0 && + eps3->length >= sizeof(*eps3) && dmi_checksum(eps3, eps3->length) ) + { + efi_smbios3_address = eps3->address; + efi_smbios3_size = eps3->max_size; + return; + } + + if ( eps && memcmp(eps->anchor, "_SM_", 4) == 0 && + eps->length >= sizeof(*eps) && dmi_checksum(eps, eps->length) ) + { + efi_smbios_address = (u32)(long)eps; + efi_smbios_size = eps->length; + + if ( memcmp(eps->dmi.anchor, "_DMI_", 5) == 0 && + dmi_checksum(&eps->dmi, sizeof(eps->dmi)) ) + { + efi_dmi_address = eps->dmi.address; + efi_dmi_size = eps->dmi.size; + } + } } const char *__init dmi_get_table(paddr_t *base, u32 *len) { - static unsigned int __initdata instance; - - if (efi_enabled(EFI_BOOT)) { - if (efi_smbios3_size && !(instance & 1)) { - *base = efi_smbios3_address; - *len = efi_smbios3_size; - instance |= 1; - return "SMBIOSv3"; - } - if (efi_dmi_size && !(instance & 2)) { - *base = efi_dmi_address; - *len = efi_dmi_size; - instance |= 2; - return "DMI"; - } - if (efi_smbios_size && !(instance & 4)) { - *base = efi_smbios_address; - *len = efi_smbios_size; - instance |= 4; - return "SMBIOS"; - } - } else { - char __iomem *p = maddr_to_virt(0xF0000), *q; - union { - struct dmi_eps dmi; - struct smbios3_eps smbios3; - } eps; - - for (q = p; q <= p + 0x10000 - sizeof(eps.dmi); q += 16) { - memcpy_fromio(&eps, q, sizeof(eps.dmi)); - if (!(instance & 1) && - memcmp(eps.dmi.anchor, "_DMI_", 5) == 0 && - dmi_checksum(&eps.dmi, sizeof(eps.dmi))) { - *base = eps.dmi.address; - *len = eps.dmi.size; - instance |= 1; - return "DMI"; - } - - BUILD_BUG_ON(sizeof(eps.smbios3) <= sizeof(eps.dmi)); - if ((instance & 2) || - q > p + 0x10000 - sizeof(eps.smbios3)) - continue; - memcpy_fromio(&eps.dmi + 1, q + sizeof(eps.dmi), - sizeof(eps.smbios3) - sizeof(eps.dmi)); - if (!memcmp(eps.smbios3.anchor, "_SM3_", 5) && - eps.smbios3.length >= sizeof(eps.smbios3) && - q <= p + 0x10000 - eps.smbios3.length && - dmi_checksum(q, eps.smbios3.length)) { - *base = eps.smbios3.address; - *len = eps.smbios3.max_size; - instance |= 2; - return "SMBIOSv3"; - } - } - } - return NULL; + static unsigned int __initdata instance; + + if ( efi_enabled(EFI_BOOT) ) + { + if ( efi_smbios3_size && !(instance & 1) ) + { + *base = efi_smbios3_address; + *len = efi_smbios3_size; + instance |= 1; + return "SMBIOSv3"; + } + if ( efi_dmi_size && !(instance & 2) ) + { + *base = efi_dmi_address; + *len = efi_dmi_size; + instance |= 2; + return "DMI"; + } + if ( efi_smbios_size && !(instance & 4) ) + { + *base = efi_smbios_address; + *len = efi_smbios_size; + instance |= 4; + return "SMBIOS"; + } + } + else + { + char __iomem *p = maddr_to_virt(0xF0000), *q; + union { + struct dmi_eps dmi; + struct smbios3_eps smbios3; + } eps; + + for ( q = p; q <= p + 0x10000 - sizeof(eps.dmi); q += 16 ) + { + memcpy_fromio(&eps, q, sizeof(eps.dmi)); + if ( !(instance & 1) && memcmp(eps.dmi.anchor, "_DMI_", 5) == 0 && + dmi_checksum(&eps.dmi, sizeof(eps.dmi)) ) + { + *base = eps.dmi.address; + *len = eps.dmi.size; + instance |= 1; + return "DMI"; + } + + BUILD_BUG_ON(sizeof(eps.smbios3) <= sizeof(eps.dmi)); + if ( (instance & 2) || q > p + 0x10000 - sizeof(eps.smbios3) ) + continue; + memcpy_fromio(&eps.dmi + 1, q + sizeof(eps.dmi), + sizeof(eps.smbios3) - sizeof(eps.dmi)); + if ( !memcmp(eps.smbios3.anchor, "_SM3_", 5) && + eps.smbios3.length >= sizeof(eps.smbios3) && + q <= p + 0x10000 - eps.smbios3.length && + dmi_checksum(q, eps.smbios3.length) ) + { + *base = eps.smbios3.address; + *len = eps.smbios3.max_size; + instance |= 2; + return "SMBIOSv3"; + } + } + } + return NULL; } typedef union { - const struct smbios_eps __iomem *legacy; - const struct smbios3_eps __iomem *v3; + const struct smbios_eps __iomem *legacy; + const struct smbios3_eps __iomem *v3; } smbios_eps_u __attribute__((transparent_union)); static int __init _dmi_iterate(const struct dmi_eps *dmi, - const smbios_eps_u smbios, - void (*decode)(struct dmi_header *)) + const smbios_eps_u smbios, + void (*decode)(struct dmi_header *)) { - int num; - u32 len; - paddr_t base; - - if (!dmi) { - num = -1; - len = smbios.v3->max_size; - base = smbios.v3->address; - printk(KERN_INFO "SMBIOS %d.%d present.\n", - smbios.v3->major, smbios.v3->minor); - dmi_printk((KERN_INFO "SMBIOS v3 table at 0x%"PRIpaddr".\n", base)); - } else { - num = dmi->num_structures; - len = dmi->size; - base = dmi->address; - - /* - * DMI version 0.0 means that the real version is taken from - * the SMBIOS version, which we may not know at this point. - */ - if (dmi->revision) - printk(KERN_INFO "DMI %d.%d present.\n", - dmi->revision >> 4, dmi->revision & 0x0f); - else if (!smbios.legacy) - printk(KERN_INFO "DMI present.\n"); - dmi_printk((KERN_INFO "%d structures occupying %u bytes.\n", - num, len)); - dmi_printk((KERN_INFO "DMI table at 0x%08X.\n", (u32)base)); - } - return dmi_table(base, len, num, decode); + int num; + u32 len; + paddr_t base; + + if ( !dmi ) + { + num = -1; + len = smbios.v3->max_size; + base = smbios.v3->address; + printk(KERN_INFO "SMBIOS %d.%d present.\n", smbios.v3->major, + smbios.v3->minor); + dmi_printk((KERN_INFO "SMBIOS v3 table at 0x%" PRIpaddr ".\n", base)); + } + else + { + num = dmi->num_structures; + len = dmi->size; + base = dmi->address; + + /* + * DMI version 0.0 means that the real version is taken from + * the SMBIOS version, which we may not know at this point. + */ + if ( dmi->revision ) + printk(KERN_INFO "DMI %d.%d present.\n", dmi->revision >> 4, + dmi->revision & 0x0f); + else if ( !smbios.legacy ) + printk(KERN_INFO "DMI present.\n"); + dmi_printk((KERN_INFO "%d structures occupying %u bytes.\n", num, len)); + dmi_printk((KERN_INFO "DMI table at 0x%08X.\n", (u32)base)); + } + return dmi_table(base, len, num, decode); } static int __init dmi_iterate(void (*decode)(struct dmi_header *)) { - struct dmi_eps dmi; - struct smbios3_eps smbios3; - char __iomem *p, *q; - - dmi.size = 0; - smbios3.length = 0; - - p = maddr_to_virt(0xF0000); - for (q = p; q < p + 0x10000; q += 16) { - if (!dmi.size) { - memcpy_fromio(&dmi, q, sizeof(dmi)); - if (memcmp(dmi.anchor, "_DMI_", 5) || - !dmi_checksum(&dmi, sizeof(dmi))) - dmi.size = 0; - } - if (!smbios3.length && - q <= p + 0x10000 - sizeof(smbios3)) { - memcpy_fromio(&smbios3, q, sizeof(smbios3)); - if (memcmp(smbios3.anchor, "_SM3_", 5) || - smbios3.length < sizeof(smbios3) || - q < p + 0x10000 - smbios3.length || - !dmi_checksum(q, smbios3.length)) - smbios3.length = 0; - } - } - - if (smbios3.length) - return _dmi_iterate(NULL, &smbios3, decode); - if (dmi.size) - return _dmi_iterate(&dmi, NULL, decode); - return -1; + struct dmi_eps dmi; + struct smbios3_eps smbios3; + char __iomem *p, *q; + + dmi.size = 0; + smbios3.length = 0; + + p = maddr_to_virt(0xF0000); + for ( q = p; q < p + 0x10000; q += 16 ) + { + if ( !dmi.size ) + { + memcpy_fromio(&dmi, q, sizeof(dmi)); + if ( memcmp(dmi.anchor, "_DMI_", 5) || + !dmi_checksum(&dmi, sizeof(dmi)) ) + dmi.size = 0; + } + if ( !smbios3.length && q <= p + 0x10000 - sizeof(smbios3) ) + { + memcpy_fromio(&smbios3, q, sizeof(smbios3)); + if ( memcmp(smbios3.anchor, "_SM3_", 5) || + smbios3.length < sizeof(smbios3) || + q < p + 0x10000 - smbios3.length || + !dmi_checksum(q, smbios3.length) ) + smbios3.length = 0; + } + } + + if ( smbios3.length ) + return _dmi_iterate(NULL, &smbios3, decode); + if ( dmi.size ) + return _dmi_iterate(&dmi, NULL, decode); + return -1; } static int __init dmi_efi_iterate(void (*decode)(struct dmi_header *)) { - int ret = -1; - - while (efi.smbios3 != EFI_INVALID_TABLE_ADDR) { - struct smbios3_eps eps; - const struct smbios3_eps __iomem *p; - - p = bt_ioremap(efi.smbios3, sizeof(eps)); - if (!p) - break; - memcpy_fromio(&eps, p, sizeof(eps)); - bt_iounmap(p, sizeof(eps)); - - if (memcmp(eps.anchor, "_SM3_", 5) || - eps.length < sizeof(eps)) - break; - - p = bt_ioremap(efi.smbios3, eps.length); - if (!p) - break; - if (dmi_checksum(p, eps.length)) - ret = _dmi_iterate(NULL, p, decode); - bt_iounmap(p, eps.length); - break; - } - - if (ret != 0 && efi.smbios != EFI_INVALID_TABLE_ADDR) { - struct smbios_eps eps; - const struct smbios_eps __iomem *p; - - p = bt_ioremap(efi.smbios, sizeof(eps)); - if (!p) - return -1; - memcpy_fromio(&eps, p, sizeof(eps)); - bt_iounmap(p, sizeof(eps)); - - if (memcmp(eps.anchor, "_SM_", 4) || - eps.length < sizeof(eps)) - return -1; - - p = bt_ioremap(efi.smbios, eps.length); - if (!p) - return -1; - if (dmi_checksum(p, eps.length) && - memcmp(eps.dmi.anchor, "_DMI_", 5) == 0 && - dmi_checksum(&eps.dmi, sizeof(eps.dmi))) { - printk(KERN_INFO "SMBIOS %d.%d present.\n", - eps.major, eps.minor); - ret = _dmi_iterate(&eps.dmi, p, decode); - } - bt_iounmap(p, eps.length); - } - - return ret; + int ret = -1; + + while ( efi.smbios3 != EFI_INVALID_TABLE_ADDR ) + { + struct smbios3_eps eps; + const struct smbios3_eps __iomem *p; + + p = bt_ioremap(efi.smbios3, sizeof(eps)); + if ( !p ) + break; + memcpy_fromio(&eps, p, sizeof(eps)); + bt_iounmap(p, sizeof(eps)); + + if ( memcmp(eps.anchor, "_SM3_", 5) || eps.length < sizeof(eps) ) + break; + + p = bt_ioremap(efi.smbios3, eps.length); + if ( !p ) + break; + if ( dmi_checksum(p, eps.length) ) + ret = _dmi_iterate(NULL, p, decode); + bt_iounmap(p, eps.length); + break; + } + + if ( ret != 0 && efi.smbios != EFI_INVALID_TABLE_ADDR ) + { + struct smbios_eps eps; + const struct smbios_eps __iomem *p; + + p = bt_ioremap(efi.smbios, sizeof(eps)); + if ( !p ) + return -1; + memcpy_fromio(&eps, p, sizeof(eps)); + bt_iounmap(p, sizeof(eps)); + + if ( memcmp(eps.anchor, "_SM_", 4) || eps.length < sizeof(eps) ) + return -1; + + p = bt_ioremap(efi.smbios, eps.length); + if ( !p ) + return -1; + if ( dmi_checksum(p, eps.length) && + memcmp(eps.dmi.anchor, "_DMI_", 5) == 0 && + dmi_checksum(&eps.dmi, sizeof(eps.dmi)) ) + { + printk(KERN_INFO "SMBIOS %d.%d present.\n", eps.major, eps.minor); + ret = _dmi_iterate(&eps.dmi, p, decode); + } + bt_iounmap(p, eps.length); + } + + return ret; } static char *__initdata dmi_ident[DMI_STRING_MAX]; @@ -432,28 +448,31 @@ static char *__initdata dmi_ident[DMI_STRING_MAX]; /* * Save a DMI string */ - + static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string) { - char *d = (char*)dm; - char *p = dmi_string(dm, d[string]); - if(p==NULL || *p == 0) - return; - if (dmi_ident[slot]) - return; - dmi_ident[slot] = alloc_bootmem(strlen(p)+1); - if(dmi_ident[slot]) - strlcpy(dmi_ident[slot], p, strlen(p)+1); - else - printk(KERN_ERR "dmi_save_ident: out of memory.\n"); + char *d = (char *)dm; + char *p = dmi_string(dm, d[string]); + if ( p == NULL || *p == 0 ) + return; + if ( dmi_ident[slot] ) + return; + dmi_ident[slot] = alloc_bootmem(strlen(p) + 1); + if ( dmi_ident[slot] ) + strlcpy(dmi_ident[slot], p, strlen(p) + 1); + else + printk(KERN_ERR "dmi_save_ident: out of memory.\n"); } /* * Ugly compatibility crap. */ -#define dmi_blacklist dmi_system_id -#define NO_MATCH { DMI_NONE, NULL} -#define MATCH DMI_MATCH +#define dmi_blacklist dmi_system_id +#define NO_MATCH \ + { \ + DMI_NONE, NULL \ + } +#define MATCH DMI_MATCH /* * Toshiba keyboard likes to repeat keys when they are not repeated. @@ -461,8 +480,11 @@ static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string) static __init int broken_toshiba_keyboard(struct dmi_blacklist *d) { - printk(KERN_WARNING "Toshiba with broken keyboard detected. If your keyboard sometimes generates 3 keypresses instead of one, see http://davyd.ucc.asn.au/projects/toshiba/README\n"); - return 0; + printk(KERN_WARNING + "Toshiba with broken keyboard detected. If your keyboard sometimes " + "generates 3 keypresses instead of one, see " + "http://davyd.ucc.asn.au/projects/toshiba/README\n"); + return 0; } static int __init ich10_bios_quirk(struct dmi_system_id *d) @@ -472,7 +494,8 @@ static int __init ich10_bios_quirk(struct dmi_system_id *d) if ( pci_conf_read16(0, 0, 0x1f, 0, PCI_VENDOR_ID) != 0x8086 ) return 0; - switch ( pci_conf_read16(0, 0, 0x1f, 0, PCI_DEVICE_ID) ) { + switch (pci_conf_read16(0, 0, 0x1f, 0, PCI_DEVICE_ID)) + { case 0x3a14: case 0x3a16: case 0x3a18: @@ -491,155 +514,160 @@ static int __init ich10_bios_quirk(struct dmi_system_id *d) #ifdef CONFIG_ACPI_SLEEP static __init int reset_videomode_after_s3(struct dmi_blacklist *d) { - /* See acpi_wakeup.S */ - acpi_video_flags |= 2; - return 0; + /* See acpi_wakeup.S */ + acpi_video_flags |= 2; + return 0; } #endif -static __init int dmi_disable_acpi(struct dmi_blacklist *d) -{ - if (!acpi_force) { - printk(KERN_NOTICE "%s detected: acpi off\n",d->ident); - disable_acpi(); - } else { - printk(KERN_NOTICE - "Warning: DMI blacklist says broken, but acpi forced\n"); - } - return 0; -} +static __init int dmi_disable_acpi(struct dmi_blacklist *d) +{ + if ( !acpi_force ) + { + printk(KERN_NOTICE "%s detected: acpi off\n", d->ident); + disable_acpi(); + } + else + { + printk(KERN_NOTICE + "Warning: DMI blacklist says broken, but acpi forced\n"); + } + return 0; +} /* * Limit ACPI to CPU enumeration for HT */ -static __init int force_acpi_ht(struct dmi_blacklist *d) -{ - if (!acpi_force) { - printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", d->ident); - disable_acpi(); - acpi_ht = 1; - } else { - printk(KERN_NOTICE - "Warning: acpi=force overrules DMI blacklist: acpi=ht\n"); - } - return 0; -} +static __init int force_acpi_ht(struct dmi_blacklist *d) +{ + if ( !acpi_force ) + { + printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", d->ident); + disable_acpi(); + acpi_ht = 1; + } + else + { + printk(KERN_NOTICE + "Warning: acpi=force overrules DMI blacklist: acpi=ht\n"); + } + return 0; +} /* * Process the DMI blacklists */ - /* - * This will be expanded over time to force things like the APM + * This will be expanded over time to force things like the APM * interrupt mask settings according to the laptop */ - -static __initdata struct dmi_blacklist dmi_blacklist[]={ - { broken_toshiba_keyboard, "Toshiba Satellite 4030cdt", { /* Keyboard generates spurious repeats */ - MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"), - NO_MATCH, NO_MATCH, NO_MATCH - } }, +static __initdata struct dmi_blacklist dmi_blacklist[] = { + + {broken_toshiba_keyboard, + "Toshiba Satellite 4030cdt", + {/* Keyboard generates spurious repeats */ + MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"), NO_MATCH, NO_MATCH, NO_MATCH}}, #ifdef CONFIG_ACPI_SLEEP - { reset_videomode_after_s3, "Toshiba Satellite 4030cdt", { /* Reset video mode after returning from ACPI S3 sleep */ - MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"), - NO_MATCH, NO_MATCH, NO_MATCH - } }, + {reset_videomode_after_s3, + "Toshiba Satellite 4030cdt", + {/* Reset video mode after returning from ACPI S3 sleep */ + MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"), NO_MATCH, NO_MATCH, NO_MATCH}}, #endif - { ich10_bios_quirk, "Intel board & BIOS", - /* - * BIOS leaves legacy USB emulation enabled while - * SMM can't properly handle it. - */ - { - MATCH(DMI_BOARD_VENDOR, "Intel Corp"), - MATCH(DMI_BIOS_VENDOR, "Intel Corp"), - NO_MATCH, NO_MATCH - } - }, - - /* - * If your system is blacklisted here, but you find that acpi=force - * works for you, please contact acpi-devel@sourceforge.net - */ - - /* - * Boxes that need ACPI disabled - */ - - { dmi_disable_acpi, "IBM Thinkpad", { - MATCH(DMI_BOARD_VENDOR, "IBM"), - MATCH(DMI_BOARD_NAME, "2629H1G"), - NO_MATCH, NO_MATCH }}, - - /* - * Boxes that need acpi=ht - */ - - { force_acpi_ht, "FSC Primergy T850", { - MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"), - NO_MATCH, NO_MATCH }}, - - { force_acpi_ht, "DELL GX240", { - MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"), - MATCH(DMI_BOARD_NAME, "OptiPlex GX240"), - NO_MATCH, NO_MATCH }}, - - { force_acpi_ht, "HP VISUALIZE NT Workstation", { - MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"), - NO_MATCH, NO_MATCH }}, - - { force_acpi_ht, "Compaq Workstation W8000", { - MATCH(DMI_SYS_VENDOR, "Compaq"), - MATCH(DMI_PRODUCT_NAME, "Workstation W8000"), - NO_MATCH, NO_MATCH }}, - - { force_acpi_ht, "ASUS P4B266", { - MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), - MATCH(DMI_BOARD_NAME, "P4B266"), - NO_MATCH, NO_MATCH }}, - - { force_acpi_ht, "ASUS P2B-DS", { - MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), - MATCH(DMI_BOARD_NAME, "P2B-DS"), - NO_MATCH, NO_MATCH }}, - - { force_acpi_ht, "ASUS CUR-DLS", { - MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), - MATCH(DMI_BOARD_NAME, "CUR-DLS"), - NO_MATCH, NO_MATCH }}, - - { force_acpi_ht, "ABIT i440BX-W83977", { - MATCH(DMI_BOARD_VENDOR, "ABIT "), - MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"), - NO_MATCH, NO_MATCH }}, - - { force_acpi_ht, "IBM Bladecenter", { - MATCH(DMI_BOARD_VENDOR, "IBM"), - MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"), - NO_MATCH, NO_MATCH }}, - - { force_acpi_ht, "IBM eServer xSeries 360", { - MATCH(DMI_BOARD_VENDOR, "IBM"), - MATCH(DMI_BOARD_NAME, "eServer xSeries 360"), - NO_MATCH, NO_MATCH }}, - - { force_acpi_ht, "IBM eserver xSeries 330", { - MATCH(DMI_BOARD_VENDOR, "IBM"), - MATCH(DMI_BOARD_NAME, "eserver xSeries 330"), - NO_MATCH, NO_MATCH }}, - - { force_acpi_ht, "IBM eserver xSeries 440", { - MATCH(DMI_BOARD_VENDOR, "IBM"), - MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"), - NO_MATCH, NO_MATCH }}, - - { NULL, } -}; + {ich10_bios_quirk, + "Intel board & BIOS", + /* + * BIOS leaves legacy USB emulation enabled while + * SMM can't properly handle it. + */ + {MATCH(DMI_BOARD_VENDOR, "Intel Corp"), + MATCH(DMI_BIOS_VENDOR, "Intel Corp"), NO_MATCH, NO_MATCH}}, + + /* + * If your system is blacklisted here, but you find that acpi=force + * works for you, please contact acpi-devel@sourceforge.net + */ + + /* + * Boxes that need ACPI disabled + */ + + {dmi_disable_acpi, + "IBM Thinkpad", + {MATCH(DMI_BOARD_VENDOR, "IBM"), MATCH(DMI_BOARD_NAME, "2629H1G"), + NO_MATCH, NO_MATCH}}, + + /* + * Boxes that need acpi=ht + */ + + {force_acpi_ht, + "FSC Primergy T850", + {MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"), NO_MATCH, NO_MATCH}}, + + {force_acpi_ht, + "DELL GX240", + {MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"), + MATCH(DMI_BOARD_NAME, "OptiPlex GX240"), NO_MATCH, NO_MATCH}}, + + {force_acpi_ht, + "HP VISUALIZE NT Workstation", + {MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"), NO_MATCH, + NO_MATCH}}, + + {force_acpi_ht, + "Compaq Workstation W8000", + {MATCH(DMI_SYS_VENDOR, "Compaq"), + MATCH(DMI_PRODUCT_NAME, "Workstation W8000"), NO_MATCH, NO_MATCH}}, + + {force_acpi_ht, + "ASUS P4B266", + {MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + MATCH(DMI_BOARD_NAME, "P4B266"), NO_MATCH, NO_MATCH}}, + + {force_acpi_ht, + "ASUS P2B-DS", + {MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + MATCH(DMI_BOARD_NAME, "P2B-DS"), NO_MATCH, NO_MATCH}}, + + {force_acpi_ht, + "ASUS CUR-DLS", + {MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + MATCH(DMI_BOARD_NAME, "CUR-DLS"), NO_MATCH, NO_MATCH}}, + + {force_acpi_ht, + "ABIT i440BX-W83977", + {MATCH(DMI_BOARD_VENDOR, "ABIT "), + MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"), NO_MATCH, NO_MATCH}}, + + {force_acpi_ht, + "IBM Bladecenter", + {MATCH(DMI_BOARD_VENDOR, "IBM"), + MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"), NO_MATCH, + NO_MATCH}}, + + {force_acpi_ht, + "IBM eServer xSeries 360", + {MATCH(DMI_BOARD_VENDOR, "IBM"), + MATCH(DMI_BOARD_NAME, "eServer xSeries 360"), NO_MATCH, NO_MATCH}}, + + {force_acpi_ht, + "IBM eserver xSeries 330", + {MATCH(DMI_BOARD_VENDOR, "IBM"), + MATCH(DMI_BOARD_NAME, "eserver xSeries 330"), NO_MATCH, NO_MATCH}}, + + {force_acpi_ht, + "IBM eserver xSeries 440", + {MATCH(DMI_BOARD_VENDOR, "IBM"), + MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"), NO_MATCH, NO_MATCH}}, + + { + NULL, + }}; /* * Process a DMI table entry. Right now all we care about are the BIOS @@ -650,59 +678,48 @@ static __initdata struct dmi_blacklist dmi_blacklist[]={ static void __init dmi_decode(struct dmi_header *dm) { #ifdef DMI_DEBUG - u8 *data = (u8 *)dm; + u8 *data = (u8 *)dm; #endif - - switch(dm->type) - { - case DMI_ENTRY_BIOS: - dmi_printk(("BIOS Vendor: %s\n", - dmi_string(dm, data[4]))); - dmi_save_ident(dm, DMI_BIOS_VENDOR, 4); - dmi_printk(("BIOS Version: %s\n", - dmi_string(dm, data[5]))); - dmi_save_ident(dm, DMI_BIOS_VERSION, 5); - dmi_printk(("BIOS Release: %s\n", - dmi_string(dm, data[8]))); - dmi_save_ident(dm, DMI_BIOS_DATE, 8); - break; - case DMI_ENTRY_SYSTEM: - dmi_printk(("System Vendor: %s\n", - dmi_string(dm, data[4]))); - dmi_save_ident(dm, DMI_SYS_VENDOR, 4); - dmi_printk(("Product Name: %s\n", - dmi_string(dm, data[5]))); - dmi_save_ident(dm, DMI_PRODUCT_NAME, 5); - dmi_printk(("Version: %s\n", - dmi_string(dm, data[6]))); - dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); - dmi_printk(("Serial Number: %s\n", - dmi_string(dm, data[7]))); - break; - case DMI_ENTRY_BASEBOARD: - dmi_printk(("Board Vendor: %s\n", - dmi_string(dm, data[4]))); - dmi_save_ident(dm, DMI_BOARD_VENDOR, 4); - dmi_printk(("Board Name: %s\n", - dmi_string(dm, data[5]))); - dmi_save_ident(dm, DMI_BOARD_NAME, 5); - dmi_printk(("Board Version: %s\n", - dmi_string(dm, data[6]))); - dmi_save_ident(dm, DMI_BOARD_VERSION, 6); - break; - } + + switch (dm->type) + { + case DMI_ENTRY_BIOS: + dmi_printk(("BIOS Vendor: %s\n", dmi_string(dm, data[4]))); + dmi_save_ident(dm, DMI_BIOS_VENDOR, 4); + dmi_printk(("BIOS Version: %s\n", dmi_string(dm, data[5]))); + dmi_save_ident(dm, DMI_BIOS_VERSION, 5); + dmi_printk(("BIOS Release: %s\n", dmi_string(dm, data[8]))); + dmi_save_ident(dm, DMI_BIOS_DATE, 8); + break; + case DMI_ENTRY_SYSTEM: + dmi_printk(("System Vendor: %s\n", dmi_string(dm, data[4]))); + dmi_save_ident(dm, DMI_SYS_VENDOR, 4); + dmi_printk(("Product Name: %s\n", dmi_string(dm, data[5]))); + dmi_save_ident(dm, DMI_PRODUCT_NAME, 5); + dmi_printk(("Version: %s\n", dmi_string(dm, data[6]))); + dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); + dmi_printk(("Serial Number: %s\n", dmi_string(dm, data[7]))); + break; + case DMI_ENTRY_BASEBOARD: + dmi_printk(("Board Vendor: %s\n", dmi_string(dm, data[4]))); + dmi_save_ident(dm, DMI_BOARD_VENDOR, 4); + dmi_printk(("Board Name: %s\n", dmi_string(dm, data[5]))); + dmi_save_ident(dm, DMI_BOARD_NAME, 5); + dmi_printk(("Board Version: %s\n", dmi_string(dm, data[6]))); + dmi_save_ident(dm, DMI_BOARD_VERSION, 6); + break; + } } void __init dmi_scan_machine(void) { - if ((!efi_enabled(EFI_BOOT) ? dmi_iterate(dmi_decode) : - dmi_efi_iterate(dmi_decode)) == 0) - dmi_check_system(dmi_blacklist); - else - printk(KERN_INFO "DMI not present.\n"); + if ( (!efi_enabled(EFI_BOOT) ? dmi_iterate(dmi_decode) + : dmi_efi_iterate(dmi_decode)) == 0 ) + dmi_check_system(dmi_blacklist); + else + printk(KERN_INFO "DMI not present.\n"); } - /** * dmi_check_system - check system DMI data * @list: array of dmi_system_id structures to match against @@ -713,26 +730,29 @@ void __init dmi_scan_machine(void) */ int __init dmi_check_system(struct dmi_system_id *list) { - int i, count = 0; - struct dmi_system_id *d = list; - - while (d->ident) { - for (i = 0; i < ARRAY_SIZE(d->matches); i++) { - int s = d->matches[i].slot; - if (s == DMI_NONE) - continue; - if (dmi_ident[s] && strstr(dmi_ident[s], d->matches[i].substr)) - continue; - /* No match */ - goto fail; - } - if (d->callback && d->callback(d)) - break; - count++; -fail: d++; - } - - return count; + int i, count = 0; + struct dmi_system_id *d = list; + + while ( d->ident ) + { + for ( i = 0; i < ARRAY_SIZE(d->matches); i++ ) + { + int s = d->matches[i].slot; + if ( s == DMI_NONE ) + continue; + if ( dmi_ident[s] && strstr(dmi_ident[s], d->matches[i].substr) ) + continue; + /* No match */ + goto fail; + } + if ( d->callback && d->callback(d) ) + break; + count++; + fail: + d++; + } + + return count; } /** @@ -755,54 +775,56 @@ fail: d++; */ bool __init dmi_get_date(int field, int *yearp, int *monthp, int *dayp) { - int year = 0, month = 0, day = 0; - bool exists; - const char *s, *e, *y; - - s = field < DMI_STRING_MAX ? dmi_ident[field] : NULL; - exists = !!s; - if (!exists) - goto out; - - /* - * Determine year first. We assume the date string resembles - * mm/dd/yy[yy] but the original code extracted only the year - * from the end. Keep the behavior in the spirit of no - * surprises. - */ - y = strrchr(s, '/'); - if (!y) - goto out; - - y++; - year = simple_strtoul(y, &e, 10); - if (y != e && year < 100) { /* 2-digit year */ - year += 1900; - if (year < 1996) /* no dates < spec 1.0 */ - year += 100; - } - if (year > 9999) /* year should fit in %04d */ - year = 0; - - /* parse the mm and dd */ - month = simple_strtoul(s, &e, 10); - if (s == e || *e != '/' || !month || month > 12) { - month = 0; - goto out; - } - - s = e + 1; - day = simple_strtoul(s, &e, 10); - if (s == y || s == e || *e != '/' || day > 31) - day = 0; + int year = 0, month = 0, day = 0; + bool exists; + const char *s, *e, *y; + + s = field < DMI_STRING_MAX ? dmi_ident[field] : NULL; + exists = !!s; + if ( !exists ) + goto out; + + /* + * Determine year first. We assume the date string resembles + * mm/dd/yy[yy] but the original code extracted only the year + * from the end. Keep the behavior in the spirit of no + * surprises. + */ + y = strrchr(s, '/'); + if ( !y ) + goto out; + + y++; + year = simple_strtoul(y, &e, 10); + if ( y != e && year < 100 ) + { /* 2-digit year */ + year += 1900; + if ( year < 1996 ) /* no dates < spec 1.0 */ + year += 100; + } + if ( year > 9999 ) /* year should fit in %04d */ + year = 0; + + /* parse the mm and dd */ + month = simple_strtoul(s, &e, 10); + if ( s == e || *e != '/' || !month || month > 12 ) + { + month = 0; + goto out; + } + + s = e + 1; + day = simple_strtoul(s, &e, 10); + if ( s == y || s == e || *e != '/' || day > 31 ) + day = 0; out: - if (yearp) - *yearp = year; - if (monthp) - *monthp = month; - if (dayp) - *dayp = day; - return exists; + if ( yearp ) + *yearp = year; + if ( monthp ) + *monthp = month; + if ( dayp ) + *dayp = day; + return exists; } void __init dmi_end_boot(void) diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c index 6ebe36766b..4a419769ea 100644 --- a/xen/arch/x86/dom0_build.c +++ b/xen/arch/x86/dom0_build.c @@ -1,6 +1,6 @@ /****************************************************************************** * dom0_build.c - * + * * Copyright (c) 2002-2005, K A Fraser */ @@ -20,7 +20,8 @@ #include #include -struct memsize { +struct memsize +{ long nr_pages; unsigned int percent; bool minus; @@ -28,7 +29,7 @@ struct memsize { static struct memsize __initdata dom0_size; static struct memsize __initdata dom0_min_size; -static struct memsize __initdata dom0_max_size = { .nr_pages = LONG_MAX }; +static struct memsize __initdata dom0_max_size = {.nr_pages = LONG_MAX}; static bool __initdata dom0_mem_set; static bool __init memsize_gt_zero(const struct memsize *sz) @@ -73,15 +74,14 @@ static unsigned long __init get_memsize(const struct memsize *sz, static int __init parse_amt(const char *s, const char **ps, struct memsize *sz) { unsigned long val; - struct memsize tmp = { }; + struct memsize tmp = {}; unsigned int items = 0; tmp.minus = (*s == '-'); if ( tmp.minus ) s++; - do - { + do { if ( !isdigit(*s) ) return -EINVAL; @@ -145,14 +145,14 @@ static unsigned int __initdata opt_dom0_max_vcpus_max = UINT_MAX; static int __init parse_dom0_max_vcpus(const char *s) { - if ( *s == '-' ) /* -M */ + if ( *s == '-' ) /* -M */ opt_dom0_max_vcpus_max = simple_strtoul(s + 1, &s, 0); - else /* N, N-, or N-M */ + else /* N, N-, or N-M */ { opt_dom0_max_vcpus_min = simple_strtoul(s, &s, 0); if ( opt_dom0_max_vcpus_min == 0 ) opt_dom0_max_vcpus_min = 1; - if ( !*s ) /* N */ + if ( !*s ) /* N */ opt_dom0_max_vcpus_max = opt_dom0_max_vcpus_min; else if ( *s++ == '-' && *s ) /* N-M */ opt_dom0_max_vcpus_max = simple_strtoul(s, &s, 0); @@ -163,8 +163,8 @@ static int __init parse_dom0_max_vcpus(const char *s) custom_param("dom0_max_vcpus", parse_dom0_max_vcpus); static __initdata unsigned int dom0_nr_pxms; -static __initdata unsigned int dom0_pxms[MAX_NUMNODES] = - { [0 ... MAX_NUMNODES - 1] = ~0 }; +static __initdata unsigned int dom0_pxms[MAX_NUMNODES] = { + [0 ... MAX_NUMNODES - 1] = ~0}; static __initdata bool dom0_affinity_relaxed; static int __init parse_dom0_nodes(const char *s) @@ -198,8 +198,7 @@ custom_param("dom0_nodes", parse_dom0_nodes); static cpumask_t __initdata dom0_cpus; -struct vcpu *__init dom0_setup_vcpu(struct domain *d, - unsigned int vcpu_id, +struct vcpu *__init dom0_setup_vcpu(struct domain *d, unsigned int vcpu_id, unsigned int prev_cpu) { unsigned int cpu = cpumask_cycle(prev_cpu, &dom0_cpus); @@ -243,14 +242,13 @@ unsigned int __init dom0_max_vcpus(void) return nr_cpu_ids; } - for ( i = 0; i < dom0_nr_pxms; ++i ) if ( (node = pxm_to_node(dom0_pxms[i])) != NUMA_NO_NODE ) node_set(node, dom0_nodes); nodes_and(dom0_nodes, dom0_nodes, node_online_map); if ( nodes_empty(dom0_nodes) ) dom0_nodes = node_online_map; - for_each_node_mask ( node, dom0_nodes ) + for_each_node_mask(node, dom0_nodes) cpumask_or(&dom0_cpus, &dom0_cpus, &node_to_cpumask(node)); cpumask_and(&dom0_cpus, &dom0_cpus, cpupool0->cpu_valid); if ( cpumask_empty(&dom0_cpus) ) @@ -273,8 +271,9 @@ struct vcpu *__init alloc_dom0_vcpu0(struct domain *dom0) dom0->node_affinity = dom0_nodes; dom0->auto_node_affinity = !dom0_nr_pxms; - return dom0_setup_vcpu(dom0, 0, - cpumask_last(&dom0_cpus) /* so it wraps around to first pcpu */); + return dom0_setup_vcpu( + dom0, 0, + cpumask_last(&dom0_cpus) /* so it wraps around to first pcpu */); } #ifdef CONFIG_SHADOW_PAGING @@ -321,7 +320,7 @@ string_param("dom0_ioports_disable", opt_dom0_ioports_disable); static bool __initdata ro_hpet = true; boolean_param("ro-hpet", ro_hpet); -unsigned int __initdata dom0_memflags = MEMF_no_dma|MEMF_exact_node; +unsigned int __initdata dom0_memflags = MEMF_no_dma | MEMF_exact_node; unsigned long __init dom0_paging_pages(const struct domain *d, unsigned long nr_pages) @@ -334,8 +333,9 @@ unsigned long __init dom0_paging_pages(const struct domain *d, return ((memkb + 1023) / 1024) << (20 - PAGE_SHIFT); } -unsigned long __init dom0_compute_nr_pages( - struct domain *d, struct elf_dom_parms *parms, unsigned long initrd_len) +unsigned long __init dom0_compute_nr_pages(struct domain *d, + struct elf_dom_parms *parms, + unsigned long initrd_len) { nodeid_t node; unsigned long avail = 0, nr_pages, min_pages, max_pages; @@ -344,13 +344,11 @@ unsigned long __init dom0_compute_nr_pages( if ( !dom0_mem_set && CONFIG_DOM0_MEM[0] ) parse_dom0_mem(CONFIG_DOM0_MEM); - for_each_node_mask ( node, dom0_nodes ) - avail += avail_domheap_pages_region(node, 0, 0) + - initial_images_nrpages(node); + for_each_node_mask(node, dom0_nodes) avail += + avail_domheap_pages_region(node, 0, 0) + initial_images_nrpages(node); /* Reserve memory for further dom0 vcpu-struct allocations... */ - avail -= (d->max_vcpus - 1UL) - << get_order_from_bytes(sizeof(struct vcpu)); + avail -= (d->max_vcpus - 1UL) << get_order_from_bytes(sizeof(struct vcpu)); /* ...and compat_l4's, if needed. */ if ( is_pv_32bit_domain(d) ) avail -= d->max_vcpus - 1; @@ -364,9 +362,9 @@ unsigned long __init dom0_compute_nr_pages( avail -= max_pdx >> s; } - need_paging = is_hvm_domain(d) && - (!iommu_hap_pt_share || !paging_mode_hap(d)); - for ( ; ; need_paging = false ) + need_paging = + is_hvm_domain(d) && (!iommu_hap_pt_share || !paging_mode_hap(d)); + for ( ;; need_paging = false ) { nr_pages = get_memsize(&dom0_size, avail); min_pages = get_memsize(&dom0_min_size, avail); @@ -379,18 +377,20 @@ unsigned long __init dom0_compute_nr_pages( */ if ( !nr_pages ) { - nr_pages = avail - (pv_shim ? pv_shim_mem(avail) + nr_pages = + avail - (pv_shim ? pv_shim_mem(avail) : min(avail / 16, 128UL << (20 - PAGE_SHIFT))); if ( is_hvm_domain(d) && !need_paging ) /* * Temporary workaround message until internal (paging) memory * accounting required to build a pvh dom0 is improved. */ - printk("WARNING: PVH dom0 without dom0_mem set is still unstable. " - "If you get crashes during boot, try adding a dom0_mem parameter\n"); + printk( + "WARNING: PVH dom0 without dom0_mem set is still unstable. " + "If you get crashes during boot, try adding a dom0_mem " + "parameter\n"); } - /* Clamp according to min/max limits and available memory. */ nr_pages = max(nr_pages, min_pages); nr_pages = min(nr_pages, max_pages); @@ -403,8 +403,8 @@ unsigned long __init dom0_compute_nr_pages( avail -= dom0_paging_pages(d, nr_pages); } - if ( is_pv_domain(d) && - (parms->p2m_base == UNSET_ADDR) && !memsize_gt_zero(&dom0_size) && + if ( is_pv_domain(d) && (parms->p2m_base == UNSET_ADDR) && + !memsize_gt_zero(&dom0_size) && (!memsize_gt_zero(&dom0_min_size) || (nr_pages > min_pages)) ) { /* @@ -458,7 +458,8 @@ static void __init process_dom0_ioports_disable(struct domain *dom0) { parse_error: printk("Invalid ioport range <%s> " - "in dom0_ioports_disable, skipping\n", t); + "in dom0_ioports_disable, skipping\n", + t); continue; } @@ -472,8 +473,8 @@ static void __init process_dom0_ioports_disable(struct domain *dom0) if ( (*u != '\0') || (io_to < io_from) || (io_to >= 65536) ) goto parse_error; - printk("Disabling dom0 access to ioport range %04lx-%04lx\n", - io_from, io_to); + printk("Disabling dom0 access to ioport range %04lx-%04lx\n", io_from, + io_to); if ( ioports_deny_access(dom0, io_from, io_to) != 0 ) BUG(); @@ -538,9 +539,9 @@ int __init dom0_setup_permissions(struct domain *d) rc |= iomem_deny_access(d, mfn, mfn); } /* MSI range. */ - rc |= iomem_deny_access(d, paddr_to_pfn(MSI_ADDR_BASE_LO), - paddr_to_pfn(MSI_ADDR_BASE_LO + - MSI_ADDR_DEST_ID_MASK)); + rc |= iomem_deny_access( + d, paddr_to_pfn(MSI_ADDR_BASE_LO), + paddr_to_pfn(MSI_ADDR_BASE_LO + MSI_ADDR_DEST_ID_MASK)); /* HyperTransport range. */ if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) rc |= iomem_deny_access(d, paddr_to_pfn(0xfdULL << 32), @@ -552,8 +553,7 @@ int __init dom0_setup_permissions(struct domain *d) unsigned long sfn, efn; sfn = max_t(unsigned long, paddr_to_pfn(e820.map[i].addr), 0x100ul); efn = paddr_to_pfn(e820.map[i].addr + e820.map[i].size - 1); - if ( (e820.map[i].type == E820_UNUSABLE) && - (e820.map[i].size != 0) && + if ( (e820.map[i].type == E820_UNUSABLE) && (e820.map[i].size != 0) && (sfn <= efn) ) rc |= iomem_deny_access(d, sfn, efn); } diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index a1cd38fb70..8f42bf0043 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -73,8 +73,8 @@ DEFINE_PER_CPU(struct vcpu *, curr_vcpu); static void default_idle(void); -void (*pm_idle) (void) __read_mostly = default_idle; -void (*dead_idle) (void) __read_mostly = default_dead_idle; +void (*pm_idle)(void) __read_mostly = default_idle; +void (*dead_idle)(void) __read_mostly = default_dead_idle; static void default_idle(void) { @@ -100,7 +100,7 @@ void default_dead_idle(void) */ spec_ctrl_enter_idle(get_cpu_info()); wbinvd(); - for ( ; ; ) + for ( ;; ) halt(); } @@ -114,7 +114,7 @@ static void play_dead(void) * this case, heap corruption or #PF can occur (when heap debugging is * enabled). For example, even printk() can involve tasklet scheduling, * which touches per-cpu vars. - * + * * Consider very carefully when adding code to *dead_idle. Most hypervisor * subsystems are unsafe to call. */ @@ -127,7 +127,7 @@ static void idle_loop(void) { unsigned int cpu = smp_processor_id(); - for ( ; ; ) + for ( ;; ) { if ( cpu_is_offline(cpu) ) play_dead(); @@ -140,8 +140,8 @@ static void idle_loop(void) * and then, after it is done, whether softirqs became pending * while we were scrubbing. */ - else if ( !softirq_pending(cpu) && !scrub_free_pages() && - !softirq_pending(cpu) ) + else if ( !softirq_pending(cpu) && !scrub_free_pages() && + !softirq_pending(cpu) ) pm_idle(); do_softirq(); /* @@ -183,14 +183,14 @@ void dump_pageframe_info(struct domain *d) unsigned long total[MASK_EXTR(PGT_type_mask, PGT_type_mask) + 1] = {}; spin_lock(&d->page_alloc_lock); - page_list_for_each ( page, &d->page_list ) + page_list_for_each (page, &d->page_list) { - unsigned int index = MASK_EXTR(page->u.inuse.type_info, - PGT_type_mask); + unsigned int index = + MASK_EXTR(page->u.inuse.type_info, PGT_type_mask); if ( ++total[index] > 16 ) { - switch ( page->u.inuse.type_info & PGT_type_mask ) + switch (page->u.inuse.type_info & PGT_type_mask) { case PGT_none: case PGT_writable_page: @@ -198,8 +198,8 @@ void dump_pageframe_info(struct domain *d) } } printk(" DomPage %p: caf=%08lx, taf=%" PRtype_info "\n", - _p(mfn_x(page_to_mfn(page))), - page->count_info, page->u.inuse.type_info); + _p(mfn_x(page_to_mfn(page))), page->count_info, + page->u.inuse.type_info); } spin_unlock(&d->page_alloc_lock); } @@ -208,11 +208,11 @@ void dump_pageframe_info(struct domain *d) p2m_pod_dump_data(d); spin_lock(&d->page_alloc_lock); - page_list_for_each ( page, &d->xenpage_list ) + page_list_for_each (page, &d->xenpage_list) { printk(" XenPage %p: caf=%08lx, taf=%" PRtype_info "\n", - _p(mfn_x(page_to_mfn(page))), - page->count_info, page->u.inuse.type_info); + _p(mfn_x(page_to_mfn(page))), page->count_info, + page->u.inuse.type_info); } spin_unlock(&d->page_alloc_lock); } @@ -280,10 +280,9 @@ struct domain *alloc_domain_struct(void) static unsigned int __read_mostly bits; if ( unlikely(!bits) ) - bits = _domain_struct_bits(); + bits = _domain_struct_bits(); #endif - #ifndef CONFIG_LOCK_PROFILE BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE); #endif @@ -392,7 +391,7 @@ int arch_vcpu_create(struct vcpu *v) return rc; - fail: +fail: vcpu_destroy_fpu(v); xfree(v->arch.msrs); v->arch.msrs = NULL; @@ -468,8 +467,7 @@ static bool emulation_flags_ok(const struct domain *d, uint32_t emflags) return true; } -int arch_domain_create(struct domain *d, - struct xen_domctl_createdomain *config) +int arch_domain_create(struct domain *d, struct xen_domctl_createdomain *config) { bool paging_initialised = false; uint32_t emflags; @@ -486,7 +484,7 @@ int arch_domain_create(struct domain *d, { static const struct arch_csw idle_csw = { .from = paravirt_ctxt_switch_from, - .to = paravirt_ctxt_switch_to, + .to = paravirt_ctxt_switch_to, .tail = continue_idle_domain, }; @@ -510,11 +508,10 @@ int arch_domain_create(struct domain *d, if ( !opt_allow_unsafe ) { printk(XENLOG_G_ERR "Xen does not allow DomU creation on this CPU" - " for security reasons.\n"); + " for security reasons.\n"); return -EPERM; } - printk(XENLOG_G_WARNING - "Dom%d may compromise security on this CPU.\n", + printk(XENLOG_G_WARNING "Dom%d may compromise security on this CPU.\n", d->domain_id); } @@ -535,7 +532,7 @@ int arch_domain_create(struct domain *d, if ( !emulation_flags_ok(d, emflags) ) { printk(XENLOG_G_ERR "d%d: Xen does not allow %s domain creation " - "with the current selection of emulators: %#x\n", + "with the current selection of emulators: %#x\n", d->domain_id, is_hvm_domain(d) ? "HVM" : "PV", emflags); return -EOPNOTSUPP; } @@ -615,7 +612,7 @@ int arch_domain_create(struct domain *d, return 0; - fail: +fail: d->is_dying = DOMDYING_dead; psr_domain_free(d); iommu_domain_destroy(d); @@ -690,7 +687,7 @@ int arch_domain_soft_reset(struct domain *d) hvm_domain_soft_reset(d); spin_lock(&d->event_lock); - for ( i = 0; i < d->nr_pirqs ; i++ ) + for ( i = 0; i < d->nr_pirqs; i++ ) { if ( domain_pirq_to_emuirq(d, i) != IRQ_UNBOUND ) { @@ -710,7 +707,7 @@ int arch_domain_soft_reset(struct domain *d) */ owner = page_get_owner_and_reference(page); - ASSERT( owner == d ); + ASSERT(owner == d); mfn = page_to_mfn(page); gfn = mfn_to_gmfn(d, mfn_x(mfn)); @@ -734,7 +731,8 @@ int arch_domain_soft_reset(struct domain *d) if ( !new_page ) { printk(XENLOG_G_ERR "Failed to alloc a page to replace" - " Dom%d's shared_info frame %lx\n", d->domain_id, gfn); + " Dom%d's shared_info frame %lx\n", + d->domain_id, gfn); ret = -ENOMEM; goto exit_put_gfn; } @@ -753,12 +751,13 @@ int arch_domain_soft_reset(struct domain *d) if ( ret ) { printk(XENLOG_G_ERR "Failed to add a page to replace" - " Dom%d's shared_info frame %lx\n", d->domain_id, gfn); + " Dom%d's shared_info frame %lx\n", + d->domain_id, gfn); free_domheap_page(new_page); } - exit_put_gfn: +exit_put_gfn: put_gfn(d, gfn); - exit_put_page: +exit_put_page: put_page(page); return ret; @@ -775,8 +774,7 @@ CHECK_FIELD_(struct, vcpu_guest_context, fpu_ctxt); #undef xen_vcpu_guest_context /* Called by XEN_DOMCTL_setvcpucontext and VCPUOP_initialise. */ -int arch_set_info_guest( - struct vcpu *v, vcpu_guest_context_u c) +int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c) { struct domain *d = v->domain; unsigned int i; @@ -838,13 +836,12 @@ int arch_set_info_guest( } /* LDT safety checks. */ - if ( ((c(ldt_base) & (PAGE_SIZE - 1)) != 0) || - (c(ldt_ents) > 8192) ) + if ( ((c(ldt_base) & (PAGE_SIZE - 1)) != 0) || (c(ldt_ents) > 8192) ) return -EINVAL; } v->arch.flags &= ~TF_kernel_mode; - if ( (flags & VGCF_in_kernel) || is_hvm_domain(d)/*???*/ ) + if ( (flags & VGCF_in_kernel) || is_hvm_domain(d) /*???*/ ) v->arch.flags |= TF_kernel_mode; v->arch.vgc_flags = flags; @@ -866,8 +863,7 @@ int arch_set_info_guest( if ( is_pv_domain(d) ) { for ( i = 0; i < ARRAY_SIZE(c.cmp->trap_ctxt); ++i ) - XLAT_trap_info(v->arch.pv.trap_ctxt + i, - c.cmp->trap_ctxt + i); + XLAT_trap_info(v->arch.pv.trap_ctxt + i, c.cmp->trap_ctxt + i); } } @@ -913,7 +909,9 @@ int arch_set_info_guest( pfn = pagetable_get_pfn(v->arch.guest_table_user); fail |= xen_pfn_to_cr3(pfn) != c.nat->ctrlreg[1]; } - } else { + } + else + { l4_pgentry_t *l4tab = map_domain_page(_mfn(pfn)); pfn = l4e_get_pfn(*l4tab); @@ -929,7 +927,7 @@ int arch_set_info_guest( fail |= v->arch.pv.ldt_ents != c(ldt_ents); if ( fail ) - return -EOPNOTSUPP; + return -EOPNOTSUPP; } v->arch.pv.kernel_ss = c(kernel_ss); @@ -943,8 +941,7 @@ int arch_set_info_guest( { v->arch.pv.syscall_callback_eip = c.nat->syscall_callback_eip; /* non-nul selector kills fs_base */ - v->arch.pv.fs_base = - !(v->arch.user_regs.fs & ~3) ? c.nat->fs_base : 0; + v->arch.pv.fs_base = !(v->arch.user_regs.fs & ~3) ? c.nat->fs_base : 0; v->arch.pv.gs_base_kernel = c.nat->gs_base_kernel; /* non-nul selector kills gs_base_user */ v->arch.pv.gs_base_user = @@ -991,7 +988,7 @@ int arch_set_info_guest( spin_lock_recursive(&d->page_alloc_lock); - for ( i = 0; ; ) + for ( i = 0;; ) { struct page_info *page = page_list_remove_head(&d->page_list); @@ -1063,10 +1060,9 @@ int arch_set_info_guest( if ( !compat ) rc = put_old_guest_table(v); if ( !rc ) - rc = get_page_type_preemptible(cr3_page, - !compat ? PGT_root_page_table - : PGT_l3_page_table); - switch ( rc ) + rc = get_page_type_preemptible( + cr3_page, !compat ? PGT_root_page_table : PGT_l3_page_table); + switch (rc) { case -EINTR: rc = -ERESTART; @@ -1098,7 +1094,7 @@ int arch_set_info_guest( else if ( !paging_mode_refcounts(d) ) { rc = get_page_type_preemptible(cr3_page, PGT_root_page_table); - switch ( rc ) + switch (rc) { case -EINTR: rc = -ERESTART; @@ -1120,7 +1116,7 @@ int arch_set_info_guest( } } if ( !rc ) - v->arch.guest_table_user = pagetable_from_page(cr3_page); + v->arch.guest_table_user = pagetable_from_page(cr3_page); } } else @@ -1128,8 +1124,9 @@ int arch_set_info_guest( l4_pgentry_t *l4tab; l4tab = map_domain_page(pagetable_get_mfn(v->arch.guest_table)); - *l4tab = l4e_from_mfn(page_to_mfn(cr3_page), - _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED); + *l4tab = l4e_from_mfn(page_to_mfn(cr3_page), _PAGE_PRESENT | _PAGE_RW | + _PAGE_USER | + _PAGE_ACCESSED); unmap_domain_page(l4tab); } if ( rc ) @@ -1154,7 +1151,7 @@ int arch_set_info_guest( update_cr3(v); #endif /* CONFIG_PV */ - out: +out: if ( flags & VGCF_online ) clear_bit(_VPF_down, &v->pause_flags); else @@ -1197,13 +1194,11 @@ int arch_vcpu_reset(struct vcpu *v) return 0; } -long -arch_do_vcpu_op( - int cmd, struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg) +long arch_do_vcpu_op(int cmd, struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg) { long rc = 0; - switch ( cmd ) + switch (cmd) { case VCPUOP_register_vcpu_time_memory_area: { @@ -1232,9 +1227,8 @@ arch_do_vcpu_op( if ( !is_pinned_vcpu(v) ) break; - cpu_id.phys_id = - (uint64_t)x86_cpu_to_apicid[v->vcpu_id] | - ((uint64_t)acpi_get_processor_id(v->vcpu_id) << 32); + cpu_id.phys_id = (uint64_t)x86_cpu_to_apicid[v->vcpu_id] | + ((uint64_t)acpi_get_processor_id(v->vcpu_id) << 32); rc = -EFAULT; if ( copy_to_guest(arg, &cpu_id, 1) ) @@ -1257,25 +1251,25 @@ arch_do_vcpu_op( * the safe side and re-initialize both to flat segment values before loading * a nul selector. */ -#define preload_segment(seg, value) do { \ - if ( !((value) & ~3) && \ - boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) \ - asm volatile ( "movl %k0, %%" #seg \ - :: "r" (FLAT_USER_DS32) ); \ -} while ( false ) - -#define loadsegment(seg,value) ({ \ - int __r = 1; \ - asm volatile ( \ - "1: movl %k1,%%" #seg "\n2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: xorl %k0,%k0\n" \ - " movl %k0,%%" #seg "\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b, 3b) \ - : "=r" (__r) : "r" (value), "0" (__r) );\ - __r; }) +#define preload_segment(seg, value) \ + do { \ + if ( !((value) & ~3) && boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) \ + asm volatile("movl %k0, %%" #seg ::"r"(FLAT_USER_DS32)); \ + } while ( false ) + +#define loadsegment(seg, value) \ + ({ \ + int __r = 1; \ + asm volatile("1: movl %k1,%%" #seg "\n2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: xorl %k0,%k0\n" \ + " movl %k0,%%" #seg "\n" \ + " jmp 2b\n" \ + ".previous\n" _ASM_EXTABLE(1b, 3b) \ + : "=r"(__r) \ + : "r"(value), "0"(__r)); \ + __r; \ + }) /* * save_segments() writes a mask of segments which are dirty (non-zero), @@ -1283,12 +1277,12 @@ arch_do_vcpu_op( * MSR writes. */ static DEFINE_PER_CPU(unsigned int, dirty_segment_mask); -#define DIRTY_DS 0x01 -#define DIRTY_ES 0x02 -#define DIRTY_FS 0x04 -#define DIRTY_GS 0x08 -#define DIRTY_FS_BASE 0x10 -#define DIRTY_GS_BASE 0x20 +#define DIRTY_DS 0x01 +#define DIRTY_ES 0x02 +#define DIRTY_FS 0x04 +#define DIRTY_GS 0x08 +#define DIRTY_FS_BASE 0x10 +#define DIRTY_GS_BASE 0x20 static void load_segments(struct vcpu *n) { @@ -1306,13 +1300,15 @@ static void load_segments(struct vcpu *n) !(read_cr4() & X86_CR4_FSGSBASE) && !((uregs->fs | uregs->gs) & ~3) ) { unsigned long gsb = n->arch.flags & TF_kernel_mode - ? n->arch.pv.gs_base_kernel : n->arch.pv.gs_base_user; + ? n->arch.pv.gs_base_kernel + : n->arch.pv.gs_base_user; unsigned long gss = n->arch.flags & TF_kernel_mode - ? n->arch.pv.gs_base_user : n->arch.pv.gs_base_kernel; + ? n->arch.pv.gs_base_user + : n->arch.pv.gs_base_kernel; - fs_gs_done = svm_load_segs(n->arch.pv.ldt_ents, LDT_VIRT_START(n), - uregs->fs, n->arch.pv.fs_base, - uregs->gs, gsb, gss); + fs_gs_done = + svm_load_segs(n->arch.pv.ldt_ents, LDT_VIRT_START(n), uregs->fs, + n->arch.pv.fs_base, uregs->gs, gsb, gss); } #endif if ( !fs_gs_done ) @@ -1364,59 +1360,56 @@ static void load_segments(struct vcpu *n) wrgsshadow(n->arch.pv.gs_base_kernel); /* This can only be non-zero if selector is NULL. */ - if ( n->arch.pv.gs_base_user | - (dirty_segment_mask & DIRTY_GS_BASE) ) + if ( n->arch.pv.gs_base_user | (dirty_segment_mask & DIRTY_GS_BASE) ) wrgsbase(n->arch.pv.gs_base_user); /* If in kernel mode then switch the GS bases around. */ if ( (n->arch.flags & TF_kernel_mode) ) - asm volatile ( "swapgs" ); + asm volatile("swapgs"); } if ( unlikely(!all_segs_okay) ) { struct pv_vcpu *pv = &n->arch.pv; struct cpu_user_regs *regs = guest_cpu_user_regs(); - unsigned long *rsp = - (unsigned long *)(((n->arch.flags & TF_kernel_mode) - ? regs->rsp : pv->kernel_sp) & ~0xf); + unsigned long *rsp = (unsigned long *)(((n->arch.flags & TF_kernel_mode) + ? regs->rsp + : pv->kernel_sp) & + ~0xf); unsigned long cs_and_mask, rflags; /* Fold upcall mask and architectural IOPL into RFLAGS.IF. */ - rflags = regs->rflags & ~(X86_EFLAGS_IF|X86_EFLAGS_IOPL); + rflags = regs->rflags & ~(X86_EFLAGS_IF | X86_EFLAGS_IOPL); rflags |= !vcpu_info(n, evtchn_upcall_mask) << 9; if ( VM_ASSIST(n->domain, architectural_iopl) ) rflags |= n->arch.pv.iopl; if ( is_pv_32bit_vcpu(n) ) { - unsigned int *esp = ring_1(regs) ? - (unsigned int *)regs->rsp : - (unsigned int *)pv->kernel_sp; + unsigned int *esp = ring_1(regs) ? (unsigned int *)regs->rsp + : (unsigned int *)pv->kernel_sp; int ret = 0; /* CS longword also contains full evtchn_upcall_mask. */ - cs_and_mask = (unsigned short)regs->cs | + cs_and_mask = + (unsigned short)regs->cs | ((unsigned int)vcpu_info(n, evtchn_upcall_mask) << 16); if ( !ring_1(regs) ) { - ret = put_user(regs->ss, esp-1); - ret |= put_user(regs->esp, esp-2); + ret = put_user(regs->ss, esp - 1); + ret |= put_user(regs->esp, esp - 2); esp -= 2; } - if ( ret | - put_user(rflags, esp-1) | - put_user(cs_and_mask, esp-2) | - put_user(regs->eip, esp-3) | - put_user(uregs->gs, esp-4) | - put_user(uregs->fs, esp-5) | - put_user(uregs->es, esp-6) | - put_user(uregs->ds, esp-7) ) + if ( ret | put_user(rflags, esp - 1) | + put_user(cs_and_mask, esp - 2) | put_user(regs->eip, esp - 3) | + put_user(uregs->gs, esp - 4) | put_user(uregs->fs, esp - 5) | + put_user(uregs->es, esp - 6) | put_user(uregs->ds, esp - 7) ) { - gprintk(XENLOG_ERR, - "error while creating compat failsafe callback frame\n"); + gprintk( + XENLOG_ERR, + "error while creating compat failsafe callback frame\n"); domain_crash(n->domain); } @@ -1424,12 +1417,12 @@ static void load_segments(struct vcpu *n) vcpu_info(n, evtchn_upcall_mask) = 1; regs->entry_vector |= TRAP_syscall; - regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT| - X86_EFLAGS_IOPL|X86_EFLAGS_TF); - regs->ss = FLAT_COMPAT_KERNEL_SS; - regs->esp = (unsigned long)(esp-7); - regs->cs = FLAT_COMPAT_KERNEL_CS; - regs->eip = pv->failsafe_callback_eip; + regs->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_RF | X86_EFLAGS_NT | + X86_EFLAGS_IOPL | X86_EFLAGS_TF); + regs->ss = FLAT_COMPAT_KERNEL_SS; + regs->esp = (unsigned long)(esp - 7); + regs->cs = FLAT_COMPAT_KERNEL_CS; + regs->eip = pv->failsafe_callback_eip; return; } @@ -1440,19 +1433,14 @@ static void load_segments(struct vcpu *n) /* CS longword also contains full evtchn_upcall_mask. */ cs_and_mask = (unsigned long)regs->cs | - ((unsigned long)vcpu_info(n, evtchn_upcall_mask) << 32); - - if ( put_user(regs->ss, rsp- 1) | - put_user(regs->rsp, rsp- 2) | - put_user(rflags, rsp- 3) | - put_user(cs_and_mask, rsp- 4) | - put_user(regs->rip, rsp- 5) | - put_user(uregs->gs, rsp- 6) | - put_user(uregs->fs, rsp- 7) | - put_user(uregs->es, rsp- 8) | - put_user(uregs->ds, rsp- 9) | - put_user(regs->r11, rsp-10) | - put_user(regs->rcx, rsp-11) ) + ((unsigned long)vcpu_info(n, evtchn_upcall_mask) << 32); + + if ( put_user(regs->ss, rsp - 1) | put_user(regs->rsp, rsp - 2) | + put_user(rflags, rsp - 3) | put_user(cs_and_mask, rsp - 4) | + put_user(regs->rip, rsp - 5) | put_user(uregs->gs, rsp - 6) | + put_user(uregs->fs, rsp - 7) | put_user(uregs->es, rsp - 8) | + put_user(uregs->ds, rsp - 9) | put_user(regs->r11, rsp - 10) | + put_user(regs->rcx, rsp - 11) ) { gprintk(XENLOG_ERR, "error while creating failsafe callback frame\n"); @@ -1463,12 +1451,12 @@ static void load_segments(struct vcpu *n) vcpu_info(n, evtchn_upcall_mask) = 1; regs->entry_vector |= TRAP_syscall; - regs->rflags &= ~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF| - X86_EFLAGS_NT|X86_EFLAGS_IOPL|X86_EFLAGS_TF); - regs->ss = FLAT_KERNEL_SS; - regs->rsp = (unsigned long)(rsp-11); - regs->cs = FLAT_KERNEL_CS; - regs->rip = pv->failsafe_callback_eip; + regs->rflags &= ~(X86_EFLAGS_AC | X86_EFLAGS_VM | X86_EFLAGS_RF | + X86_EFLAGS_NT | X86_EFLAGS_IOPL | X86_EFLAGS_TF); + regs->ss = FLAT_KERNEL_SS; + regs->rsp = (unsigned long)(rsp - 11); + regs->cs = FLAT_KERNEL_CS; + regs->rip = pv->failsafe_callback_eip; } } @@ -1541,9 +1529,8 @@ void paravirt_ctxt_switch_to(struct vcpu *v) root_pgentry_t *root_pgt = this_cpu(root_pgt); if ( root_pgt ) - root_pgt[root_table_offset(PERDOMAIN_VIRT_START)] = - l4e_from_page(v->domain->arch.perdomain_l3_pg, - __PAGE_HYPERVISOR_RW); + root_pgt[root_table_offset(PERDOMAIN_VIRT_START)] = l4e_from_page( + v->domain->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW); if ( unlikely(v->arch.dr7 & DR7_ACTIVE_MASK) ) activate_debugregs(v); @@ -1555,7 +1542,7 @@ void paravirt_ctxt_switch_to(struct vcpu *v) /* Update per-VCPU guest runstate shared memory area (if registered). */ void update_runstate_area(struct vcpu *v) { - struct guest_memory_policy policy = { .nested_guest_mode = false }; + struct guest_memory_policy policy = {.nested_guest_mode = false}; if ( !v->runstate_guest ) return; @@ -1603,12 +1590,12 @@ static inline bool need_full_gdt(const struct domain *d) static void __context_switch(void) { struct cpu_user_regs *stack_regs = guest_cpu_user_regs(); - unsigned int cpu = smp_processor_id(); - struct vcpu *p = per_cpu(curr_vcpu, cpu); - struct vcpu *n = current; - struct domain *pd = p->domain, *nd = n->domain; - seg_desc_t *gdt; - struct desc_ptr gdt_desc; + unsigned int cpu = smp_processor_id(); + struct vcpu *p = per_cpu(curr_vcpu, cpu); + struct vcpu *n = current; + struct domain *pd = p->domain, *nd = n->domain; + seg_desc_t *gdt; + struct desc_ptr gdt_desc; ASSERT(p != n); ASSERT(!vcpu_cpu_dirty(n)); @@ -1648,8 +1635,8 @@ static void __context_switch(void) psr_ctxt_switch_to(nd); - gdt = !is_pv_32bit_domain(nd) ? per_cpu(gdt_table, cpu) : - per_cpu(compat_gdt_table, cpu); + gdt = !is_pv_32bit_domain(nd) ? per_cpu(gdt_table, cpu) + : per_cpu(compat_gdt_table, cpu); if ( need_full_gdt(nd) ) { unsigned long mfn = virt_to_mfn(gdt); @@ -1665,7 +1652,7 @@ static void __context_switch(void) ((p->vcpu_id != n->vcpu_id) || !need_full_gdt(nd)) ) { gdt_desc.limit = LAST_RESERVED_GDT_BYTE; - gdt_desc.base = (unsigned long)(gdt - FIRST_RESERVED_GDT_ENTRY); + gdt_desc.base = (unsigned long)(gdt - FIRST_RESERVED_GDT_ENTRY); lgdt(&gdt_desc); } @@ -1695,7 +1682,6 @@ static void __context_switch(void) per_cpu(curr_vcpu, cpu) = n; } - void context_switch(struct vcpu *prev, struct vcpu *next) { unsigned int cpu = smp_processor_id(); @@ -1839,12 +1825,12 @@ void sync_vcpu_execstate(struct vcpu *v) } } -static int relinquish_memory( - struct domain *d, struct page_list_head *list, unsigned long type) +static int relinquish_memory(struct domain *d, struct page_list_head *list, + unsigned long type) { - struct page_info *page; - unsigned long x, y; - int ret = 0; + struct page_info *page; + unsigned long x, y; + int ret = 0; /* Use a recursive lock, as we may enter 'free_domheap_page'. */ spin_lock_recursive(&d->page_alloc_lock); @@ -1861,7 +1847,7 @@ static int relinquish_memory( if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) ) ret = put_page_and_type_preemptible(page); - switch ( ret ) + switch (ret) { case 0: break; @@ -1888,19 +1874,20 @@ static int relinquish_memory( * reference or partially validated. */ y = page->u.inuse.type_info; - for ( ; ; ) + for ( ;; ) { x = y; if ( likely((x & PGT_type_mask) != type) || - likely(!(x & (PGT_validated|PGT_partial))) ) + likely(!(x & (PGT_validated | PGT_partial))) ) break; y = cmpxchg(&page->u.inuse.type_info, x, - x & ~(PGT_validated|PGT_partial)); + x & ~(PGT_validated | PGT_partial)); if ( likely(y == x) ) { - /* No need for atomic update of type_info here: noone else updates it. */ - switch ( ret = free_page_type(page, x, 1) ) + /* No need for atomic update of type_info here: noone else + * updates it. */ + switch (ret = free_page_type(page, x, 1)) { case 0: break; @@ -1944,7 +1931,7 @@ static int relinquish_memory( /* list is empty at this point. */ page_list_move(list, &d->arch.relmem_list); - out: +out: spin_unlock_recursive(&d->page_alloc_lock); return ret; } @@ -1961,7 +1948,7 @@ int domain_relinquish_resources(struct domain *d) * logic implements a co-routine, stashing state in struct domain across * hypercall continuation boundaries. */ - switch ( d->arch.rel_priv ) + switch (d->arch.rel_priv) { /* * Record the current progress. Subsequent hypercall continuations @@ -1973,10 +1960,12 @@ int domain_relinquish_resources(struct domain *d) * To avoid redundant work, there should be a marker before each * function which may return -ERESTART. */ -#define PROGRESS(x) \ - d->arch.rel_priv = PROG_ ## x; /* Fallthrough */ case PROG_ ## x +#define PROGRESS(x) \ + d->arch.rel_priv = PROG_##x; /* Fallthrough */ \ + case PROG_##x - enum { + enum + { PROG_paging = 1, PROG_vcpu_pagetables, PROG_shared, @@ -1992,17 +1981,19 @@ int domain_relinquish_resources(struct domain *d) if ( ret ) return ret; - PROGRESS(paging): + PROGRESS(paging) + : - /* Tear down paging-assistance stuff. */ - ret = paging_teardown(d); + /* Tear down paging-assistance stuff. */ + ret = paging_teardown(d); if ( ret ) return ret; - PROGRESS(vcpu_pagetables): + PROGRESS(vcpu_pagetables) + : - /* Drop the in-use references to page-table bases. */ - for_each_vcpu ( d, v ) + /* Drop the in-use references to page-table bases. */ + for_each_vcpu (d, v) { ret = vcpu_destroy_pagetables(v); if ( ret ) @@ -2011,13 +2002,13 @@ int domain_relinquish_resources(struct domain *d) if ( altp2m_active(d) ) { - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) altp2m_vcpu_disable_ve(v); } if ( is_pv_domain(d) ) { - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { /* Relinquish GDT/LDT mappings. */ pv_destroy_ldt(v); @@ -2033,9 +2024,10 @@ int domain_relinquish_resources(struct domain *d) d->arch.auto_unmask = 0; } - PROGRESS(shared): + PROGRESS(shared) + : - if ( is_hvm_domain(d) ) + if ( is_hvm_domain(d) ) { /* If the domain has shared pages, relinquish them allowing * for preemption. */ @@ -2049,32 +2041,35 @@ int domain_relinquish_resources(struct domain *d) INIT_PAGE_LIST_HEAD(&d->arch.relmem_list); spin_unlock(&d->page_alloc_lock); - PROGRESS(xen): + PROGRESS(xen) + : - ret = relinquish_memory(d, &d->xenpage_list, ~0UL); + ret = relinquish_memory(d, &d->xenpage_list, ~0UL); if ( ret ) return ret; - PROGRESS(l4): + PROGRESS(l4) + : - ret = relinquish_memory(d, &d->page_list, PGT_l4_page_table); + ret = relinquish_memory(d, &d->page_list, PGT_l4_page_table); if ( ret ) return ret; - PROGRESS(l3): + PROGRESS(l3) + : - ret = relinquish_memory(d, &d->page_list, PGT_l3_page_table); + ret = relinquish_memory(d, &d->page_list, PGT_l3_page_table); if ( ret ) return ret; - PROGRESS(l2): + PROGRESS(l2) + : - ret = relinquish_memory(d, &d->page_list, PGT_l2_page_table); + ret = relinquish_memory(d, &d->page_list, PGT_l2_page_table); if ( ret ) return ret; - PROGRESS(done): - break; + PROGRESS(done): break; #undef PROGRESS @@ -2119,7 +2114,7 @@ void vcpu_kick(struct vcpu *v) * pending flag. These values may fluctuate (after all, we hold no * locks) but the key insight is that each change will cause * evtchn_upcall_pending to be polled. - * + * * NB2. We save the running flag across the unblock to avoid a needless * IPI for domains that we IPI'd to unblock. */ @@ -2160,7 +2155,6 @@ static int __init init_vcpu_kick_softirq(void) } __initcall(init_vcpu_kick_softirq); - /* * Local variables: * mode: C diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c index 4a07cfb18e..01e0f2bd7f 100644 --- a/xen/arch/x86/domain_page.c +++ b/xen/arch/x86/domain_page.c @@ -168,7 +168,7 @@ void *map_domain_page(mfn_t mfn) l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_mfn(mfn, __PAGE_HYPERVISOR_RW)); - out: +out: local_irq_restore(flags); return (void *)MAPCACHE_VIRT_START + pfn_to_paddr(idx); } @@ -209,8 +209,7 @@ void unmap_domain_page(const void *ptr) if ( hashent->idx != MAPHASHENT_NOTINUSE ) { /* /First/, zap the PTE. */ - ASSERT(l1e_get_pfn(MAPCACHE_L1ENT(hashent->idx)) == - hashent->mfn); + ASSERT(l1e_get_pfn(MAPCACHE_L1ENT(hashent->idx)) == hashent->mfn); l1e_write(&MAPCACHE_L1ENT(hashent->idx), l1e_empty()); /* /Second/, mark as garbage. */ set_bit(hashent->idx, dcache->garbage); @@ -243,19 +242,21 @@ int mapcache_domain_init(struct domain *d) return 0; #endif - BUILD_BUG_ON(MAPCACHE_VIRT_END + PAGE_SIZE * (3 + - 2 * PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long))) > + BUILD_BUG_ON(MAPCACHE_VIRT_END + + PAGE_SIZE * + (3 + 2 * PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * + sizeof(long))) > MAPCACHE_VIRT_START + (PERDOMAIN_SLOT_MBYTES << 20)); bitmap_pages = PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long)); dcache->inuse = (void *)MAPCACHE_VIRT_END + PAGE_SIZE; - dcache->garbage = dcache->inuse + - (bitmap_pages + 1) * PAGE_SIZE / sizeof(long); + dcache->garbage = + dcache->inuse + (bitmap_pages + 1) * PAGE_SIZE / sizeof(long); spin_lock_init(&dcache->lock); return create_perdomain_mapping(d, (unsigned long)dcache->inuse, - 2 * bitmap_pages + 1, - NIL(l1_pgentry_t *), NULL); + 2 * bitmap_pages + 1, NIL(l1_pgentry_t *), + NULL); } int mapcache_vcpu_init(struct vcpu *v) @@ -277,11 +278,11 @@ int mapcache_vcpu_init(struct vcpu *v) /* Populate bit maps. */ if ( !rc ) - rc = create_perdomain_mapping(d, (unsigned long)dcache->inuse, - nr, NULL, NIL(struct page_info *)); + rc = create_perdomain_mapping(d, (unsigned long)dcache->inuse, nr, + NULL, NIL(struct page_info *)); if ( !rc ) - rc = create_perdomain_mapping(d, (unsigned long)dcache->garbage, - nr, NULL, NIL(struct page_info *)); + rc = create_perdomain_mapping(d, (unsigned long)dcache->garbage, nr, + NULL, NIL(struct page_info *)); if ( rc ) return rc; @@ -304,10 +305,9 @@ int mapcache_vcpu_init(struct vcpu *v) void *map_domain_page_global(mfn_t mfn) { - ASSERT(!in_irq() && - ((system_state >= SYS_STATE_boot && - system_state < SYS_STATE_active) || - local_irq_is_enabled())); + ASSERT(!in_irq() && ((system_state >= SYS_STATE_boot && + system_state < SYS_STATE_active) || + local_irq_is_enabled())); #ifdef NDEBUG if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) ) diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index 9bf2d0820f..894bdc8e35 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -24,7 +24,7 @@ #include #include #include -#include /* for hvm_acpi_power_button */ +#include /* for hvm_acpi_power_button */ #include /* for arch_do_domctl */ #include #include @@ -38,10 +38,10 @@ static int gdbsx_guest_mem_io(domid_t domid, struct xen_domctl_gdbsx_memio *iop) { - void * __user gva = (void *)iop->gva, * __user uva = (void *)iop->uva; + void *__user gva = (void *)iop->gva, *__user uva = (void *)iop->uva; - iop->remain = dbg_rw_mem(gva, uva, iop->len, domid, - !!iop->gwr, iop->pgd3val); + iop->remain = + dbg_rw_mem(gva, uva, iop->len, domid, !!iop->gwr, iop->pgd3val); return iop->remain ? -EFAULT : 0; } @@ -50,7 +50,7 @@ static int update_domain_cpuid_info(struct domain *d, const struct xen_domctl_cpuid *ctl) { struct cpuid_policy *p = d->arch.cpuid; - const struct cpuid_leaf leaf = { ctl->eax, ctl->ebx, ctl->ecx, ctl->edx }; + const struct cpuid_leaf leaf = {ctl->eax, ctl->ebx, ctl->ecx, ctl->edx}; int old_vendor = p->x86_vendor; unsigned int old_7d0 = p->feat.raw[0].d, old_e8b = p->extd.raw[8].b; bool call_policy_changed = false; /* Avoid for_each_vcpu() unnecessarily */ @@ -59,28 +59,27 @@ static int update_domain_cpuid_info(struct domain *d, * Skip update for leaves we don't care about, to avoid the overhead of * recalculate_cpuid_policy(). */ - switch ( ctl->input[0] ) + switch (ctl->input[0]) { case 0x00000000 ... ARRAY_SIZE(p->basic.raw) - 1: - if ( ctl->input[0] == 4 && - ctl->input[1] >= ARRAY_SIZE(p->cache.raw) ) + if ( ctl->input[0] == 4 && ctl->input[1] >= ARRAY_SIZE(p->cache.raw) ) return 0; - if ( ctl->input[0] == 7 && - ctl->input[1] >= ARRAY_SIZE(p->feat.raw) ) + if ( ctl->input[0] == 7 && ctl->input[1] >= ARRAY_SIZE(p->feat.raw) ) return 0; - if ( ctl->input[0] == 0xb && - ctl->input[1] >= ARRAY_SIZE(p->topo.raw) ) + if ( ctl->input[0] == 0xb && ctl->input[1] >= ARRAY_SIZE(p->topo.raw) ) return 0; BUILD_BUG_ON(ARRAY_SIZE(p->xstate.raw) < 2); if ( ctl->input[0] == XSTATE_CPUID && - ctl->input[1] != 1 ) /* Everything else automatically calculated. */ + ctl->input[1] != + 1 ) /* Everything else automatically calculated. */ return 0; break; - case 0x40000000: case 0x40000100: + case 0x40000000: + case 0x40000100: /* Only care about the max_leaf limit. */ case 0x80000000 ... 0x80000000 + ARRAY_SIZE(p->extd.raw) - 1: @@ -91,10 +90,10 @@ static int update_domain_cpuid_info(struct domain *d, } /* Insert ctl data into cpuid_policy. */ - switch ( ctl->input[0] ) + switch (ctl->input[0]) { case 0x00000000 ... ARRAY_SIZE(p->basic.raw) - 1: - switch ( ctl->input[0] ) + switch (ctl->input[0]) { case 4: p->cache.raw[ctl->input[1]] = leaf; @@ -133,7 +132,7 @@ static int update_domain_cpuid_info(struct domain *d, recalculate_cpuid_policy(d); - switch ( ctl->input[0] ) + switch (ctl->input[0]) { case 0: call_policy_changed = (p->x86_vendor != old_vendor); @@ -156,7 +155,7 @@ static int update_domain_cpuid_info(struct domain *d, if ( cpu_has_htt ) edx |= cpufeat_mask(X86_FEATURE_HTT); - switch ( boot_cpu_data.x86_vendor ) + switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: /* @@ -230,10 +229,10 @@ static int update_domain_cpuid_info(struct domain *d, * If the IBRS/IBPB policy has changed, we need to recalculate the MSR * interception bitmaps. */ - call_policy_changed = (is_hvm_domain(d) && - ((old_7d0 ^ p->feat.raw[0].d) & - (cpufeat_mask(X86_FEATURE_IBRSB) | - cpufeat_mask(X86_FEATURE_L1D_FLUSH)))); + call_policy_changed = + (is_hvm_domain(d) && ((old_7d0 ^ p->feat.raw[0].d) & + (cpufeat_mask(X86_FEATURE_IBRSB) | + cpufeat_mask(X86_FEATURE_L1D_FLUSH)))); break; case 0xa: @@ -245,7 +244,7 @@ static int update_domain_cpuid_info(struct domain *d, { struct vcpu *v; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) vpmu_destroy(v); } break; @@ -285,7 +284,7 @@ static int update_domain_cpuid_info(struct domain *d, if ( p->x86_vendor != X86_VENDOR_AMD ) edx &= ~CPUID_COMMON_1D_FEATURES; - switch ( boot_cpu_data.x86_vendor ) + switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: mask &= ((uint64_t)edx << 32) | ecx; @@ -314,9 +313,9 @@ static int update_domain_cpuid_info(struct domain *d, * If the IBPB policy has changed, we need to recalculate the MSR * interception bitmaps. */ - call_policy_changed = (is_hvm_domain(d) && - ((old_e8b ^ p->extd.raw[8].b) & - cpufeat_mask(X86_FEATURE_IBPB))); + call_policy_changed = + (is_hvm_domain(d) && + ((old_e8b ^ p->extd.raw[8].b) & cpufeat_mask(X86_FEATURE_IBPB))); break; } @@ -324,7 +323,7 @@ static int update_domain_cpuid_info(struct domain *d, { struct vcpu *v; - for_each_vcpu( d, v ) + for_each_vcpu (d, v) cpuid_policy_updated(v); } @@ -349,7 +348,7 @@ static int vcpu_set_vmce(struct vcpu *v, }; #undef VMCE_SIZE - struct hvm_vmce_vcpu vmce = { }; + struct hvm_vmce_vcpu vmce = {}; unsigned int evc_vmce_size = min(evc->size - offsetof(typeof(*evc), vmce), sizeof(evc->vmce)); unsigned int i = 0; @@ -380,9 +379,8 @@ void arch_get_domain_info(const struct domain *d, #define MAX_IOPORTS 0x10000 -long arch_do_domctl( - struct xen_domctl *domctl, struct domain *d, - XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) +long arch_do_domctl(struct xen_domctl *domctl, struct domain *d, + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) { struct vcpu *curr = current; struct domain *currd = curr->domain; @@ -390,14 +388,13 @@ long arch_do_domctl( bool copyback = false; unsigned long i; - switch ( domctl->cmd ) + switch (domctl->cmd) { - case XEN_DOMCTL_shadow_op: ret = paging_domctl(d, &domctl->u.shadow_op, u_domctl, 0); if ( ret == -ERESTART ) - return hypercall_create_continuation(__HYPERVISOR_arch_1, - "h", u_domctl); + return hypercall_create_continuation(__HYPERVISOR_arch_1, "h", + u_domctl); copyback = true; break; @@ -450,8 +447,7 @@ long arch_do_domctl( page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC); - if ( unlikely(!page) || - unlikely(is_xen_heap_page(page)) ) + if ( unlikely(!page) || unlikely(is_xen_heap_page(page)) ) { if ( unlikely(p2m_is_broken(t)) ) type = XEN_DOMCTL_PFINFO_BROKEN; @@ -460,7 +456,7 @@ long arch_do_domctl( } else { - switch( page->u.inuse.type_info & PGT_type_mask ) + switch (page->u.inuse.type_info & PGT_type_mask) { case PGT_l1_page_table: type = XEN_DOMCTL_PFINFO_L1TAB; @@ -526,7 +522,7 @@ long arch_do_domctl( case XEN_DOMCTL_sethvmcontext: { - struct hvm_domain_context c = { .size = domctl->u.hvmcontext.size }; + struct hvm_domain_context c = {.size = domctl->u.hvmcontext.size}; ret = -EINVAL; if ( (d == currd) || /* no domain_pause() */ @@ -552,7 +548,7 @@ long arch_do_domctl( case XEN_DOMCTL_gethvmcontext: { - struct hvm_domain_context c = { 0 }; + struct hvm_domain_context c = {0}; ret = -EINVAL; if ( (d == currd) || /* no domain_pause() */ @@ -613,7 +609,8 @@ long arch_do_domctl( ret = -EOPNOTSUPP; else if ( is_pv_domain(d) ) { - if ( ((domctl->u.address_size.size == 64) && !d->arch.is_32bit_pv) || + if ( ((domctl->u.address_size.size == 64) && + !d->arch.is_32bit_pv) || ((domctl->u.address_size.size == 32) && d->arch.is_32bit_pv) ) ret = 0; else if ( domctl->u.address_size.size == 32 ) @@ -659,7 +656,7 @@ long arch_do_domctl( (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL ) break; - switch ( domctl->u.sendtrigger.trigger ) + switch (domctl->u.sendtrigger.trigger) { case XEN_DOMCTL_SENDTRIGGER_NMI: ret = 0; @@ -770,7 +767,7 @@ long arch_do_domctl( ret = -EINVAL; if ( ((fgp | fmp | (np - 1)) >= MAX_IOPORTS) || - ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) ) + ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) ) { printk(XENLOG_G_ERR "ioport_map:invalid:dom%d gport=%x mport=%x nr=%x\n", @@ -793,8 +790,8 @@ long arch_do_domctl( "ioport_map:add: dom%d gport=%x mport=%x nr=%x\n", d->domain_id, fgp, fmp, np); - list_for_each_entry(g2m_ioport, &hvm->g2m_ioport_list, list) - if (g2m_ioport->mport == fmp ) + list_for_each_entry (g2m_ioport, &hvm->g2m_ioport_list, list) + if ( g2m_ioport->mport == fmp ) { g2m_ioport->gport = fgp; g2m_ioport->np = np; @@ -827,7 +824,7 @@ long arch_do_domctl( printk(XENLOG_G_INFO "ioport_map:remove: dom%d gport=%x mport=%x nr=%x\n", d->domain_id, fgp, fmp, np); - list_for_each_entry(g2m_ioport, &hvm->g2m_ioport_list, list) + list_for_each_entry (g2m_ioport, &hvm->g2m_ioport_list, list) if ( g2m_ioport->mport == fmp ) { list_del(&g2m_ioport->list); @@ -836,9 +833,10 @@ long arch_do_domctl( } ret = ioports_deny_access(d, fmp, fmp + np - 1); if ( ret && is_hardware_domain(currd) ) - printk(XENLOG_ERR - "ioport_map: error %ld denying dom%d access to [%x,%x]\n", - ret, d->domain_id, fmp, fmp + np - 1); + printk( + XENLOG_ERR + "ioport_map: error %ld denying dom%d access to [%x,%x]\n", + ret, d->domain_id, fmp, fmp + np - 1); } if ( !ret ) memory_type_changed(d); @@ -852,8 +850,7 @@ long arch_do_domctl( struct vcpu *v; ret = -ESRCH; - if ( (evc->vcpu >= d->max_vcpus) || - ((v = d->vcpu[evc->vcpu]) == NULL) ) + if ( (evc->vcpu >= d->max_vcpus) || ((v = d->vcpu[evc->vcpu]) == NULL) ) break; if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext ) @@ -867,26 +864,22 @@ long arch_do_domctl( if ( is_pv_domain(d) ) { - evc->sysenter_callback_cs = - v->arch.pv.sysenter_callback_cs; - evc->sysenter_callback_eip = - v->arch.pv.sysenter_callback_eip; - evc->sysenter_disables_events = + evc->sysenter_callback_cs = v->arch.pv.sysenter_callback_cs; + evc->sysenter_callback_eip = v->arch.pv.sysenter_callback_eip; + evc->sysenter_disables_events = v->arch.pv.sysenter_disables_events; - evc->syscall32_callback_cs = - v->arch.pv.syscall32_callback_cs; - evc->syscall32_callback_eip = - v->arch.pv.syscall32_callback_eip; + evc->syscall32_callback_cs = v->arch.pv.syscall32_callback_cs; + evc->syscall32_callback_eip = v->arch.pv.syscall32_callback_eip; evc->syscall32_disables_events = v->arch.pv.syscall32_disables_events; } else { - evc->sysenter_callback_cs = 0; - evc->sysenter_callback_eip = 0; - evc->sysenter_disables_events = 0; - evc->syscall32_callback_cs = 0; - evc->syscall32_callback_eip = 0; + evc->sysenter_callback_cs = 0; + evc->sysenter_callback_eip = 0; + evc->sysenter_disables_events = 0; + evc->syscall32_callback_cs = 0; + evc->syscall32_callback_eip = 0; evc->syscall32_disables_events = 0; } evc->vmce.caps = v->arch.vmce.mcg_cap; @@ -912,17 +905,13 @@ long arch_do_domctl( break; domain_pause(d); fixup_guest_code_selector(d, evc->sysenter_callback_cs); - v->arch.pv.sysenter_callback_cs = - evc->sysenter_callback_cs; - v->arch.pv.sysenter_callback_eip = - evc->sysenter_callback_eip; + v->arch.pv.sysenter_callback_cs = evc->sysenter_callback_cs; + v->arch.pv.sysenter_callback_eip = evc->sysenter_callback_eip; v->arch.pv.sysenter_disables_events = evc->sysenter_disables_events; fixup_guest_code_selector(d, evc->syscall32_callback_cs); - v->arch.pv.syscall32_callback_cs = - evc->syscall32_callback_cs; - v->arch.pv.syscall32_callback_eip = - evc->syscall32_callback_eip; + v->arch.pv.syscall32_callback_cs = evc->syscall32_callback_cs; + v->arch.pv.syscall32_callback_eip = evc->syscall32_callback_eip; v->arch.pv.syscall32_disables_events = evc->syscall32_disables_events; } @@ -975,10 +964,9 @@ long arch_do_domctl( else { domain_pause(d); - ret = tsc_set_info(d, domctl->u.tsc_info.tsc_mode, - domctl->u.tsc_info.elapsed_nsec, - domctl->u.tsc_info.gtsc_khz, - domctl->u.tsc_info.incarnation); + ret = tsc_set_info( + d, domctl->u.tsc_info.tsc_mode, domctl->u.tsc_info.elapsed_nsec, + domctl->u.tsc_info.gtsc_khz, domctl->u.tsc_info.incarnation); domain_unpause(d); } break; @@ -1011,7 +999,7 @@ long arch_do_domctl( domctl->u.gdbsx_guest_memio.remain = domctl->u.gdbsx_guest_memio.len; ret = gdbsx_guest_mem_io(domctl->domain, &domctl->u.gdbsx_guest_memio); if ( !ret ) - copyback = true; + copyback = true; break; case XEN_DOMCTL_gdbsx_pausevcpu: @@ -1056,7 +1044,7 @@ long arch_do_domctl( domctl->u.gdbsx_domstatus.paused = d->controller_pause_count > 0; if ( domctl->u.gdbsx_domstatus.paused ) { - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { if ( v->arch.gdbsx_vcpu_event ) { @@ -1083,8 +1071,7 @@ long arch_do_domctl( #define PV_XSAVE_SIZE(xcr0) (PV_XSAVE_HDR_SIZE + xstate_ctxt_size(xcr0)) ret = -ESRCH; - if ( (evc->vcpu >= d->max_vcpus) || - ((v = d->vcpu[evc->vcpu]) == NULL) ) + if ( (evc->vcpu >= d->max_vcpus) || ((v = d->vcpu[evc->vcpu]) == NULL) ) goto vcpuextstate_out; ret = -EINVAL; @@ -1147,9 +1134,9 @@ long arch_do_domctl( if ( copy_to_guest_offset(evc->buffer, offset, xsave_area, xsave_size) ) - ret = -EFAULT; + ret = -EFAULT; xfree(xsave_area); - } + } vcpu_unpause(v); @@ -1174,8 +1161,9 @@ long arch_do_domctl( ret = -ENOMEM; goto vcpuextstate_out; } - if ( copy_from_guest_offset(receive_buf, domctl->u.vcpuextstate.buffer, - offset, evc->size) ) + if ( copy_from_guest_offset(receive_buf, + domctl->u.vcpuextstate.buffer, offset, + evc->size) ) { ret = -EFAULT; xfree(receive_buf); @@ -1241,8 +1229,7 @@ long arch_do_domctl( ret = -EPERM; else { - audit_p2m(d, - &domctl->u.audit_p2m.orphans, + audit_p2m(d, &domctl->u.audit_p2m.orphans, &domctl->u.audit_p2m.m2p_bad, &domctl->u.audit_p2m.p2m_bad); copyback = true; @@ -1298,7 +1285,8 @@ long arch_do_domctl( if ( domctl->cmd == XEN_DOMCTL_get_vcpu_msrs ) { - ret = 0; copyback = true; + ret = 0; + copyback = true; /* NULL guest handle is a request for max size. */ if ( guest_handle_is_null(vmsrs->msrs) ) @@ -1370,7 +1358,7 @@ long arch_do_domctl( if ( msr.reserved ) break; - switch ( msr.index ) + switch (msr.index) { case MSR_SPEC_CTRL: case MSR_INTEL_MISC_FEATURES_ENABLES: @@ -1404,7 +1392,7 @@ long arch_do_domctl( break; } - switch ( domctl->u.psr_cmt_op.cmd ) + switch (domctl->u.psr_cmt_op.cmd) { case XEN_DOMCTL_PSR_CMT_OP_ATTACH: ret = psr_alloc_rmid(d); @@ -1429,47 +1417,42 @@ long arch_do_domctl( break; case XEN_DOMCTL_psr_alloc: - switch ( domctl->u.psr_alloc.cmd ) + switch (domctl->u.psr_alloc.cmd) { case XEN_DOMCTL_PSR_SET_L3_CBM: ret = psr_set_val(d, domctl->u.psr_alloc.target, - domctl->u.psr_alloc.data, - PSR_TYPE_L3_CBM); + domctl->u.psr_alloc.data, PSR_TYPE_L3_CBM); break; case XEN_DOMCTL_PSR_SET_L3_CODE: ret = psr_set_val(d, domctl->u.psr_alloc.target, - domctl->u.psr_alloc.data, - PSR_TYPE_L3_CODE); + domctl->u.psr_alloc.data, PSR_TYPE_L3_CODE); break; case XEN_DOMCTL_PSR_SET_L3_DATA: ret = psr_set_val(d, domctl->u.psr_alloc.target, - domctl->u.psr_alloc.data, - PSR_TYPE_L3_DATA); + domctl->u.psr_alloc.data, PSR_TYPE_L3_DATA); break; case XEN_DOMCTL_PSR_SET_L2_CBM: ret = psr_set_val(d, domctl->u.psr_alloc.target, - domctl->u.psr_alloc.data, - PSR_TYPE_L2_CBM); + domctl->u.psr_alloc.data, PSR_TYPE_L2_CBM); break; case XEN_DOMCTL_PSR_SET_MBA_THRTL: ret = psr_set_val(d, domctl->u.psr_alloc.target, - domctl->u.psr_alloc.data, - PSR_TYPE_MBA_THRTL); + domctl->u.psr_alloc.data, PSR_TYPE_MBA_THRTL); break; -#define domctl_psr_get_val(d, domctl, type, copyback) ({ \ - uint32_t v_; \ - int r_ = psr_get_val((d), (domctl)->u.psr_alloc.target, \ - &v_, (type)); \ - \ - (domctl)->u.psr_alloc.data = v_; \ - (copyback) = true; \ - r_; \ -}) +#define domctl_psr_get_val(d, domctl, type, copyback) \ + ({ \ + uint32_t v_; \ + int r_ = psr_get_val((d), (domctl)->u.psr_alloc.target, &v_, (type)); \ + \ + (domctl)->u.psr_alloc.data = v_; \ + (copyback) = true; \ + r_; \ + }) case XEN_DOMCTL_PSR_GET_L3_CBM: ret = domctl_psr_get_val(d, domctl, PSR_TYPE_L3_CBM, copyback); @@ -1510,8 +1493,7 @@ long arch_do_domctl( if ( guest_handle_is_null(domctl->u.cpu_policy.cpuid_policy) ) domctl->u.cpu_policy.nr_leaves = CPUID_MAX_SERIALISED_LEAVES; else if ( (ret = x86_cpuid_copy_to_buffer( - d->arch.cpuid, - domctl->u.cpu_policy.cpuid_policy, + d->arch.cpuid, domctl->u.cpu_policy.cpuid_policy, &domctl->u.cpu_policy.nr_leaves)) ) break; @@ -1519,8 +1501,7 @@ long arch_do_domctl( if ( guest_handle_is_null(domctl->u.cpu_policy.msr_policy) ) domctl->u.cpu_policy.nr_msrs = MSR_MAX_SERIALISED_ENTRIES; else if ( (ret = x86_msr_copy_to_buffer( - d->arch.msr, - domctl->u.cpu_policy.msr_policy, + d->arch.msr, domctl->u.cpu_policy.msr_policy, &domctl->u.cpu_policy.nr_msrs)) ) break; @@ -1552,7 +1533,7 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) #define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld)) memcpy(&c.nat->fpu_ctxt, v->arch.fpu_ctxt, sizeof(c.nat->fpu_ctxt)); - c(flags = v->arch.vgc_flags & ~(VGCF_i387_valid|VGCF_in_kernel)); + c(flags = v->arch.vgc_flags & ~(VGCF_i387_valid | VGCF_in_kernel)); if ( v->fpu_initialised ) c(flags |= VGCF_i387_valid); if ( !(v->pause_flags & VPF_down) ) @@ -1570,16 +1551,14 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) if ( is_pv_domain(d) ) { for ( i = 0; i < ARRAY_SIZE(c.cmp->trap_ctxt); ++i ) - XLAT_trap_info(c.cmp->trap_ctxt + i, - v->arch.pv.trap_ctxt + i); + XLAT_trap_info(c.cmp->trap_ctxt + i, v->arch.pv.trap_ctxt + i); } } for ( i = 0; i < ARRAY_SIZE(v->arch.dr); ++i ) c(debugreg[i] = v->arch.dr[i]); c(debugreg[6] = v->arch.dr6); - c(debugreg[7] = v->arch.dr7 | - (is_pv_domain(d) ? v->arch.pv.dr7_emul : 0)); + c(debugreg[7] = v->arch.dr7 | (is_pv_domain(d) ? v->arch.pv.dr7_emul : 0)); if ( is_hvm_domain(d) ) { @@ -1649,11 +1628,12 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) if ( !compat ) { - c.nat->ctrlreg[3] = xen_pfn_to_cr3( - pagetable_get_pfn(v->arch.guest_table)); - c.nat->ctrlreg[1] = - pagetable_is_null(v->arch.guest_table_user) ? 0 - : xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table_user)); + c.nat->ctrlreg[3] = + xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table)); + c.nat->ctrlreg[1] = pagetable_is_null(v->arch.guest_table_user) + ? 0 + : xen_pfn_to_cr3(pagetable_get_pfn( + v->arch.guest_table_user)); } else { diff --git a/xen/arch/x86/e820.c b/xen/arch/x86/e820.c index 0c67ccd262..2f8543ca24 100644 --- a/xen/arch/x86/e820.c +++ b/xen/arch/x86/e820.c @@ -44,30 +44,31 @@ struct e820map __initdata e820_raw; */ int __init e820_all_mapped(u64 start, u64 end, unsigned type) { - int i; - - for (i = 0; i < e820.nr_map; i++) { - struct e820entry *ei = &e820.map[i]; - - if (type && ei->type != type) - continue; - /* is the region (part) in overlap with the current region ?*/ - if (ei->addr >= end || ei->addr + ei->size <= start) - continue; - - /* if the region is at the beginning of we move - * start to the end of the region since it's ok until there - */ - if (ei->addr <= start) - start = ei->addr + ei->size; - /* - * if start is now at or beyond end, we're done, full - * coverage - */ - if (start >= end) - return 1; - } - return 0; + int i; + + for ( i = 0; i < e820.nr_map; i++ ) + { + struct e820entry *ei = &e820.map[i]; + + if ( type && ei->type != type ) + continue; + /* is the region (part) in overlap with the current region ?*/ + if ( ei->addr >= end || ei->addr + ei->size <= start ) + continue; + + /* if the region is at the beginning of we move + * start to the end of the region since it's ok until there + */ + if ( ei->addr <= start ) + start = ei->addr + ei->size; + /* + * if start is now at or beyond end, we're done, full + * coverage + */ + if ( start >= end ) + return 1; + } + return 0; } static void __init add_memory_region(unsigned long long start, @@ -77,7 +78,8 @@ static void __init add_memory_region(unsigned long long start, x = e820.nr_map; - if (x == ARRAY_SIZE(e820.map)) { + if ( x == ARRAY_SIZE(e820.map) ) + { printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); return; } @@ -88,15 +90,17 @@ static void __init add_memory_region(unsigned long long start, e820.nr_map++; } -static void __init print_e820_memory_map(struct e820entry *map, unsigned int entries) +static void __init print_e820_memory_map(struct e820entry *map, + unsigned int entries) { unsigned int i; - for (i = 0; i < entries; i++) { - printk(" %016Lx - %016Lx ", - (unsigned long long)(map[i].addr), + for ( i = 0; i < entries; i++ ) + { + printk(" %016Lx - %016Lx ", (unsigned long long)(map[i].addr), (unsigned long long)(map[i].addr + map[i].size)); - switch (map[i].type) { + switch (map[i].type) + { case E820_RAM: printk("(usable)\n"); break; @@ -122,16 +126,17 @@ static void __init print_e820_memory_map(struct e820entry *map, unsigned int ent /* * Sanitize the BIOS e820 map. * - * Some e820 responses include overlapping entries. The following + * Some e820 responses include overlapping entries. The following * replaces the original e820 map with a new one, removing overlaps. * */ -struct change_member { +struct change_member +{ struct e820entry *pbios; /* pointer to original bios entry */ unsigned long long addr; /* address for this change point */ }; -static struct change_member change_point_list[2*E820MAX] __initdata; -static struct change_member *change_point[2*E820MAX] __initdata; +static struct change_member change_point_list[2 * E820MAX] __initdata; +static struct change_member *change_point[2 * E820MAX] __initdata; static struct e820entry *overlap_list[E820MAX] __initdata; static struct e820entry new_bios[E820MAX] __initdata; @@ -183,106 +188,113 @@ int __init sanitize_e820_map(struct e820entry *biosmap, unsigned int *pnr_map) */ /* if there's only one memory region, don't bother */ - if (*pnr_map < 2) + if ( *pnr_map < 2 ) return -1; old_nr = *pnr_map; /* bail out if we find any unreasonable addresses in bios map */ - for (i=0; iaddr = biosmap[i].addr; change_point[chgidx++]->pbios = &biosmap[i]; change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size; change_point[chgidx++]->pbios = &biosmap[i]; } } - chg_nr = chgidx; /* true number of change-points */ + chg_nr = chgidx; /* true number of change-points */ /* sort change-point list by memory addresses (low -> high) */ still_changing = 1; - while (still_changing) { + while ( still_changing ) + { still_changing = 0; - for (i=1; i < chg_nr; i++) { + for ( i = 1; i < chg_nr; i++ ) + { /* if > , swap */ /* or, if current= & last=, swap */ - if ((change_point[i]->addr < change_point[i-1]->addr) || - ((change_point[i]->addr == change_point[i-1]->addr) && - (change_point[i]->addr == change_point[i]->pbios->addr) && - (change_point[i-1]->addr != change_point[i-1]->pbios->addr)) - ) + if ( (change_point[i]->addr < change_point[i - 1]->addr) || + ((change_point[i]->addr == change_point[i - 1]->addr) && + (change_point[i]->addr == change_point[i]->pbios->addr) && + (change_point[i - 1]->addr != + change_point[i - 1]->pbios->addr)) ) { change_tmp = change_point[i]; - change_point[i] = change_point[i-1]; - change_point[i-1] = change_tmp; - still_changing=1; + change_point[i] = change_point[i - 1]; + change_point[i - 1] = change_tmp; + still_changing = 1; } } } /* create a new bios memory map, removing overlaps */ - overlap_entries=0; /* number of entries in the overlap table */ - new_bios_entry=0; /* index for creating new bios map entries */ - last_type = 0; /* start with undefined memory type */ - last_addr = 0; /* start with 0 as last starting address */ + overlap_entries = 0; /* number of entries in the overlap table */ + new_bios_entry = 0; /* index for creating new bios map entries */ + last_type = 0; /* start with undefined memory type */ + last_addr = 0; /* start with 0 as last starting address */ /* loop through change-points, determining affect on the new bios map */ - for (chgidx=0; chgidx < chg_nr; chgidx++) + for ( chgidx = 0; chgidx < chg_nr; chgidx++ ) { /* keep track of all overlapping bios entries */ - if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr) + if ( change_point[chgidx]->addr == change_point[chgidx]->pbios->addr ) { /* add map entry to overlap list (> 1 entry implies an overlap) */ - overlap_list[overlap_entries++]=change_point[chgidx]->pbios; + overlap_list[overlap_entries++] = change_point[chgidx]->pbios; } else { /* remove entry from list (order independent, so swap with last) */ - for (i=0; ipbios) - overlap_list[i] = overlap_list[overlap_entries-1]; + if ( overlap_list[i] == change_point[chgidx]->pbios ) + overlap_list[i] = overlap_list[overlap_entries - 1]; } overlap_entries--; } /* if there are overlapping entries, decide which "type" to use */ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */ current_type = 0; - for (i=0; itype > current_type) + for ( i = 0; i < overlap_entries; i++ ) + if ( overlap_list[i]->type > current_type ) current_type = overlap_list[i]->type; /* continue building up new bios map based on this information */ - if (current_type != last_type) { - if (last_type != 0) { + if ( current_type != last_type ) + { + if ( last_type != 0 ) + { new_bios[new_bios_entry].size = change_point[chgidx]->addr - last_addr; - /* move forward only if the new size was non-zero */ - if (new_bios[new_bios_entry].size != 0) - if (++new_bios_entry >= ARRAY_SIZE(new_bios)) - break; /* no more space left for new bios entries */ + /* move forward only if the new size was non-zero */ + if ( new_bios[new_bios_entry].size != 0 ) + if ( ++new_bios_entry >= ARRAY_SIZE(new_bios) ) + break; /* no more space left for new bios entries */ } - if (current_type != 0) { + if ( current_type != 0 ) + { new_bios[new_bios_entry].addr = change_point[chgidx]->addr; new_bios[new_bios_entry].type = current_type; - last_addr=change_point[chgidx]->addr; + last_addr = change_point[chgidx]->addr; } last_type = current_type; } } - new_nr = new_bios_entry; /* retain count for new bios entries */ + new_nr = new_bios_entry; /* retain count for new bios entries */ /* copy new bios mapping into original location */ - memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry)); + memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry)); *pnr_map = new_nr; return 0; @@ -304,10 +316,10 @@ int __init sanitize_e820_map(struct e820entry *biosmap, unsigned int *pnr_map) * thinkpad 560x, for example, does not cooperate with the memory * detection code.) */ -static int __init copy_e820_map(struct e820entry * biosmap, int nr_map) +static int __init copy_e820_map(struct e820entry *biosmap, int nr_map) { /* Only one memory region (or negative)? Ignore it */ - if (nr_map < 2) + if ( nr_map < 2 ) return -1; do { @@ -317,29 +329,30 @@ static int __init copy_e820_map(struct e820entry * biosmap, int nr_map) unsigned long type = biosmap->type; /* Overflow in 64 bits? Ignore the memory map. */ - if (start > end) + if ( start > end ) return -1; /* * Some BIOSes claim RAM in the 640k - 1M region. * Not right. Fix it up. */ - if (type == E820_RAM) { - if (start < 0x100000ULL && end > 0xA0000ULL) { - if (start < 0xA0000ULL) - add_memory_region(start, 0xA0000ULL-start, type); - if (end <= 0x100000ULL) + if ( type == E820_RAM ) + { + if ( start < 0x100000ULL && end > 0xA0000ULL ) + { + if ( start < 0xA0000ULL ) + add_memory_region(start, 0xA0000ULL - start, type); + if ( end <= 0x100000ULL ) continue; start = 0x100000ULL; size = end - start; } } add_memory_region(start, size, type); - } while (biosmap++,--nr_map); + } while ( biosmap++, --nr_map ); return 0; } - /* * Find the highest page frame number we have available */ @@ -348,16 +361,17 @@ static unsigned long __init find_max_pfn(void) int i; unsigned long max_pfn = 0; - for (i = 0; i < e820.nr_map; i++) { + for ( i = 0; i < e820.nr_map; i++ ) + { unsigned long start, end; /* RAM? */ - if (e820.map[i].type != E820_RAM) + if ( e820.map[i].type != E820_RAM ) continue; start = PFN_UP(e820.map[i].addr); end = PFN_DOWN(e820.map[i].addr + e820.map[i].size); - if (start >= end) + if ( start >= end ) continue; - if (end > max_pfn) + if ( end > max_pfn ) max_pfn = end; } @@ -370,7 +384,7 @@ static void __init clip_to_limit(uint64_t limit, char *warnmsg) char _warnmsg[160]; uint64_t old_limit = 0; - for ( ; ; ) + for ( ;; ) { /* Find a RAM region needing clipping. */ for ( i = 0; i < e820.nr_map; i++ ) @@ -380,10 +394,10 @@ static void __init clip_to_limit(uint64_t limit, char *warnmsg) /* If none found, we are done. */ if ( i == e820.nr_map ) - break; + break; - old_limit = max_t( - uint64_t, old_limit, e820.map[i].addr + e820.map[i].size); + old_limit = + max_t(uint64_t, old_limit, e820.map[i].addr + e820.map[i].size); /* We try to convert clipped RAM areas to E820_UNUSABLE. */ if ( e820_change_range_type(&e820, max(e820.map[i].addr, limit), @@ -392,7 +406,7 @@ static void __init clip_to_limit(uint64_t limit, char *warnmsg) continue; /* - * If the type change fails (e.g., not space in table) then we clip or + * If the type change fails (e.g., not space in table) then we clip or * delete the region as appropriate. */ if ( e820.map[i].addr < limit ) @@ -401,7 +415,7 @@ static void __init clip_to_limit(uint64_t limit, char *warnmsg) } else { - memmove(&e820.map[i], &e820.map[i+1], + memmove(&e820.map[i], &e820.map[i + 1], (e820.nr_map - i - 1) * sizeof(struct e820entry)); e820.nr_map--; } @@ -411,7 +425,7 @@ static void __init clip_to_limit(uint64_t limit, char *warnmsg) { if ( warnmsg ) { - snprintf(_warnmsg, sizeof(_warnmsg), warnmsg, (long)(limit>>30)); + snprintf(_warnmsg, sizeof(_warnmsg), warnmsg, (long)(limit >> 30)); printk("WARNING: %s\n", _warnmsg); } printk("Truncating RAM from %lukB to %lukB\n", @@ -430,9 +444,7 @@ static uint64_t __init mtrr_top_of_ram(void) if ( e820_mtrr_clip == -1 ) { char vendor[13]; - cpuid(0x00000000, &eax, - (uint32_t *)&vendor[0], - (uint32_t *)&vendor[8], + cpuid(0x00000000, &eax, (uint32_t *)&vendor[0], (uint32_t *)&vendor[8], (uint32_t *)&vendor[4]); vendor[12] = '\0'; e820_mtrr_clip = !strcmp(vendor, "GenuineIntel"); @@ -447,7 +459,7 @@ static uint64_t __init mtrr_top_of_ram(void) /* Does the CPU support architectural MTRRs? */ cpuid(0x00000001, &eax, &ebx, &ecx, &edx); if ( !test_bit(X86_FEATURE_MTRR & 31, &edx) ) - return 0; + return 0; /* Find the physical address size for this CPU. */ eax = cpuid_eax(0x80000000); @@ -463,7 +475,8 @@ static uint64_t __init mtrr_top_of_ram(void) rdmsrl(MSR_MTRRdefType, mtrr_def); if ( e820_verbose ) - printk(" MTRR cap: %"PRIx64" type: %"PRIx64"\n", mtrr_cap, mtrr_def); + printk(" MTRR cap: %" PRIx64 " type: %" PRIx64 "\n", mtrr_cap, + mtrr_def); /* MTRRs enabled, and default memory type is not writeback? */ if ( !test_bit(11, &mtrr_def) || ((uint8_t)mtrr_def == MTRR_TYPE_WRBACK) ) @@ -480,8 +493,8 @@ static uint64_t __init mtrr_top_of_ram(void) rdmsrl(MSR_IA32_MTRR_PHYSMASK(i), mask); if ( e820_verbose ) - printk(" MTRR[%d]: base %"PRIx64" mask %"PRIx64"\n", - i, base, mask); + printk(" MTRR[%d]: base %" PRIx64 " mask %" PRIx64 "\n", i, base, + mask); if ( !test_bit(11, &mask) || ((uint8_t)base != MTRR_TYPE_WRBACK) ) continue; @@ -495,7 +508,7 @@ static uint64_t __init mtrr_top_of_ram(void) static void __init reserve_dmi_region(void) { - for ( ; ; ) + for ( ;; ) { paddr_t base; u32 len; @@ -505,7 +518,8 @@ static void __init reserve_dmi_region(void) break; if ( ((base + len) > base) && reserve_e820_ram(&e820, base, base + len) ) - printk("WARNING: %s table located in E820 RAM %"PRIpaddr"-%"PRIpaddr". Fixed.\n", + printk("WARNING: %s table located in E820 RAM %" PRIpaddr + "-%" PRIpaddr ". Fixed.\n", what, base, base + len); } } @@ -528,20 +542,21 @@ static void __init machine_specific_memory_setup(struct e820map *raw) if ( e820.map[i].type == E820_RAM ) size += e820.map[i].size; if ( size > opt_availmem ) - clip_to_limit( - e820.map[i-1].addr + e820.map[i-1].size - (size-opt_availmem), - NULL); + clip_to_limit(e820.map[i - 1].addr + e820.map[i - 1].size - + (size - opt_availmem), + NULL); } - mpt_limit = ((RDWR_MPT_VIRT_END - RDWR_MPT_VIRT_START) - / sizeof(unsigned long)) << PAGE_SHIFT; - ro_mpt_limit = ((RO_MPT_VIRT_END - RO_MPT_VIRT_START) - / sizeof(unsigned long)) << PAGE_SHIFT; + mpt_limit = + ((RDWR_MPT_VIRT_END - RDWR_MPT_VIRT_START) / sizeof(unsigned long)) + << PAGE_SHIFT; + ro_mpt_limit = + ((RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(unsigned long)) + << PAGE_SHIFT; if ( mpt_limit > ro_mpt_limit ) mpt_limit = ro_mpt_limit; - clip_to_limit(mpt_limit, - "Only the first %lu GB of the physical " - "memory map can be accessed by Xen."); + clip_to_limit(mpt_limit, "Only the first %lu GB of the physical " + "memory map can be accessed by Xen."); reserve_dmi_region(); @@ -551,8 +566,8 @@ static void __init machine_specific_memory_setup(struct e820map *raw) } /* This function relies on the passed in e820->map[] being sorted. */ -int __init e820_add_range( - struct e820map *e820, uint64_t s, uint64_t e, uint32_t type) +int __init e820_add_range(struct e820map *e820, uint64_t s, uint64_t e, + uint32_t type) { unsigned int i; @@ -584,7 +599,8 @@ int __init e820_add_range( if ( e820->nr_map >= ARRAY_SIZE(e820->map) ) { printk(XENLOG_WARNING "E820: overflow while adding region" - " %"PRIx64"-%"PRIx64"\n", s, e); + " %" PRIx64 "-%" PRIx64 "\n", + s, e); return 0; } @@ -599,9 +615,8 @@ int __init e820_add_range( return 1; } -int __init e820_change_range_type( - struct e820map *e820, uint64_t s, uint64_t e, - uint32_t orig_type, uint32_t new_type) +int __init e820_change_range_type(struct e820map *e820, uint64_t s, uint64_t e, + uint32_t orig_type, uint32_t new_type) { uint64_t rs = 0, re = 0; int i; @@ -627,23 +642,23 @@ int __init e820_change_range_type( if ( (e820->nr_map + 1) > ARRAY_SIZE(e820->map) ) goto overflow; - memmove(&e820->map[i+1], &e820->map[i], - (e820->nr_map-i) * sizeof(e820->map[0])); + memmove(&e820->map[i + 1], &e820->map[i], + (e820->nr_map - i) * sizeof(e820->map[0])); e820->nr_map++; if ( s == rs ) { e820->map[i].size = e - s; e820->map[i].type = new_type; - e820->map[i+1].addr = e; - e820->map[i+1].size = re - e; + e820->map[i + 1].addr = e; + e820->map[i + 1].size = re - e; } else { e820->map[i].size = s - rs; - e820->map[i+1].addr = s; - e820->map[i+1].size = e - s; - e820->map[i+1].type = new_type; + e820->map[i + 1].addr = s; + e820->map[i + 1].size = e - s; + e820->map[i + 1].type = new_type; } } else @@ -651,35 +666,36 @@ int __init e820_change_range_type( if ( (e820->nr_map + 2) > ARRAY_SIZE(e820->map) ) goto overflow; - memmove(&e820->map[i+2], &e820->map[i], - (e820->nr_map-i) * sizeof(e820->map[0])); + memmove(&e820->map[i + 2], &e820->map[i], + (e820->nr_map - i) * sizeof(e820->map[0])); e820->nr_map += 2; e820->map[i].size = s - rs; - e820->map[i+1].addr = s; - e820->map[i+1].size = e - s; - e820->map[i+1].type = new_type; - e820->map[i+2].addr = e; - e820->map[i+2].size = re - e; + e820->map[i + 1].addr = s; + e820->map[i + 1].size = e - s; + e820->map[i + 1].type = new_type; + e820->map[i + 2].addr = e; + e820->map[i + 2].size = re - e; } /* Finally, look for any opportunities to merge adjacent e820 entries. */ for ( i = 0; i < (e820->nr_map - 1); i++ ) { - if ( (e820->map[i].type != e820->map[i+1].type) || - ((e820->map[i].addr + e820->map[i].size) != e820->map[i+1].addr) ) + if ( (e820->map[i].type != e820->map[i + 1].type) || + ((e820->map[i].addr + e820->map[i].size) != + e820->map[i + 1].addr) ) continue; - e820->map[i].size += e820->map[i+1].size; - memmove(&e820->map[i+1], &e820->map[i+2], - (e820->nr_map-i-2) * sizeof(e820->map[0])); + e820->map[i].size += e820->map[i + 1].size; + memmove(&e820->map[i + 1], &e820->map[i + 2], + (e820->nr_map - i - 2) * sizeof(e820->map[0])); e820->nr_map--; i--; } return 1; - overflow: - printk("Overflow in e820 while reserving region %"PRIx64"-%"PRIx64"\n", +overflow: + printk("Overflow in e820 while reserving region %" PRIx64 "-%" PRIx64 "\n", s, e); return 0; } diff --git a/xen/arch/x86/efi/mkreloc.c b/xen/arch/x86/efi/mkreloc.c index 1aca79672a..7a44e3e05f 100644 --- a/xen/arch/x86/efi/mkreloc.c +++ b/xen/arch/x86/efi/mkreloc.c @@ -9,7 +9,8 @@ #include #include -struct mz_hdr { +struct mz_hdr +{ uint16_t signature; #define MZ_SIGNATURE 0x5a4d uint16_t last_page_size; @@ -29,7 +30,8 @@ struct mz_hdr { uint32_t extended_header_base; }; -struct pe_hdr { +struct pe_hdr +{ uint32_t signature; #define PE_SIGNATURE 0x00004550 uint16_t cpu; @@ -39,9 +41,10 @@ struct pe_hdr { uint32_t symbol_count; uint16_t opt_hdr_size; uint16_t flags; - struct { + struct + { uint16_t magic; -#define PE_MAGIC_EXE32 0x010b +#define PE_MAGIC_EXE32 0x010b #define PE_MAGIC_EXE32PLUS 0x020b uint8_t linker_major, linker_minor; uint32_t code_size, data_size, bss_size; @@ -51,11 +54,12 @@ struct pe_hdr { #define PE_PAGE_SIZE 0x1000 -#define PE_BASE_RELOC_ABS 0 -#define PE_BASE_RELOC_HIGHLOW 3 -#define PE_BASE_RELOC_DIR64 10 +#define PE_BASE_RELOC_ABS 0 +#define PE_BASE_RELOC_HIGHLOW 3 +#define PE_BASE_RELOC_DIR64 10 -struct coff_section { +struct coff_section +{ char name[8]; uint32_t size; uint32_t rva; @@ -66,23 +70,20 @@ struct coff_section { uint16_t relocation_count; uint16_t line_number_count; uint32_t flags; -#define COFF_SECTION_BSS 0x00000080U +#define COFF_SECTION_BSS 0x00000080U #define COFF_SECTION_DISCARDABLE 0x02000000U -#define COFF_SECTION_WRITEABLE 0x80000000U +#define COFF_SECTION_WRITEABLE 0x80000000U }; static void usage(const char *cmd, int rc) { - fprintf(rc ? stderr : stdout, - "Usage: %s \n", - cmd); + fprintf(rc ? stderr : stdout, "Usage: %s \n", cmd); exit(rc); } static unsigned int load(const char *name, int *handle, struct coff_section **sections, - uint_fast64_t *image_base, - uint32_t *image_size, + uint_fast64_t *image_base, uint32_t *image_size, unsigned int *width) { int in = open(name, O_RDONLY); @@ -90,15 +91,13 @@ static unsigned int load(const char *name, int *handle, struct pe_hdr pe_hdr; uint32_t base; - if ( in < 0 || - read(in, &mz_hdr, sizeof(mz_hdr)) != sizeof(mz_hdr) ) + if ( in < 0 || read(in, &mz_hdr, sizeof(mz_hdr)) != sizeof(mz_hdr) ) { perror(name); exit(2); } if ( mz_hdr.signature != MZ_SIGNATURE || - mz_hdr.relocations < sizeof(mz_hdr) || - !mz_hdr.extended_header_base ) + mz_hdr.relocations < sizeof(mz_hdr) || !mz_hdr.extended_header_base ) { fprintf(stderr, "%s: Wrong DOS file format\n", name); exit(2); @@ -117,9 +116,9 @@ static unsigned int load(const char *name, int *handle, perror(name); exit(3); } - switch ( (pe_hdr.signature == PE_SIGNATURE && - pe_hdr.opt_hdr_size > sizeof(pe_hdr.opt_hdr)) * - pe_hdr.opt_hdr.magic ) + switch ((pe_hdr.signature == PE_SIGNATURE && + pe_hdr.opt_hdr_size > sizeof(pe_hdr.opt_hdr)) * + pe_hdr.opt_hdr.magic) { case PE_MAGIC_EXE32: *width = 32; @@ -142,7 +141,7 @@ static unsigned int load(const char *name, int *handle, } if ( lseek(in, mz_hdr.extended_header_base + offsetof(struct pe_hdr, opt_hdr) + - pe_hdr.opt_hdr_size, + pe_hdr.opt_hdr_size, SEEK_SET) < 0 || read(in, *sections, pe_hdr.section_count * sizeof(**sections)) != pe_hdr.section_count * sizeof(**sections) ) @@ -187,9 +186,9 @@ static void unmap_section(const void *ptr, const struct coff_section *sec) } static void diff_sections(const unsigned char *ptr1, const unsigned char *ptr2, - const struct coff_section *sec, - int_fast64_t diff, unsigned int width, - uint_fast64_t base, uint_fast64_t end) + const struct coff_section *sec, int_fast64_t diff, + unsigned int width, uint_fast64_t base, + uint_fast64_t end) { static uint_fast32_t cur_rva, reloc_size; unsigned int disp = 0; @@ -216,16 +215,15 @@ static void diff_sections(const unsigned char *ptr1, const unsigned char *ptr2, uint64_t u64; } val1, val2; int_fast64_t delta; - unsigned int reloc = (width == 4 ? PE_BASE_RELOC_HIGHLOW : - PE_BASE_RELOC_DIR64); + unsigned int reloc = + (width == 4 ? PE_BASE_RELOC_HIGHLOW : PE_BASE_RELOC_DIR64); if ( ptr1[i] == ptr2[i] ) continue; if ( i < disp || i + width - disp > sec->file_size ) { - fprintf(stderr, - "Bogus difference at %.8s:%08" PRIxFAST32 "\n", + fprintf(stderr, "Bogus difference at %.8s:%08" PRIxFAST32 "\n", sec->name, i); exit(3); } @@ -250,11 +248,11 @@ static void diff_sections(const unsigned char *ptr1, const unsigned char *ptr2, reloc_size += reloc_size & 2; if ( reloc_size ) printf("\t.equ rva_%08" PRIxFAST32 "_relocs," - " %#08" PRIxFAST32 "\n", + " %#08" PRIxFAST32 "\n", cur_rva, reloc_size); printf("\t.balign 4\n" "\t.long %#08" PRIxFAST32 "," - " rva_%08" PRIxFAST32 "_relocs\n", + " rva_%08" PRIxFAST32 "_relocs\n", rva, rva); cur_rva = rva; reloc_size = 8; @@ -262,18 +260,20 @@ static void diff_sections(const unsigned char *ptr1, const unsigned char *ptr2, else if ( rva != cur_rva ) { fprintf(stderr, - "Cannot handle decreasing RVA (at %.8s:%08" PRIxFAST32 ")\n", + "Cannot handle decreasing RVA (at %.8s:%08" PRIxFAST32 + ")\n", sec->name, i); exit(3); } if ( !(sec->flags & COFF_SECTION_WRITEABLE) ) fprintf(stderr, - "Warning: relocation to r/o section %.8s:%08" PRIxFAST32 "\n", + "Warning: relocation to r/o section %.8s:%08" PRIxFAST32 + "\n", sec->name, i); - printf("\t.word (%u << 12) | 0x%03" PRIxFAST32 "\n", - reloc, sec->rva + i - disp - rva); + printf("\t.word (%u << 12) | 0x%03" PRIxFAST32 "\n", reloc, + sec->rva + i - disp - rva); reloc_size += 2; i += width - disp - 1; } @@ -287,9 +287,7 @@ int main(int argc, char *argv[]) uint32_t size1, size2; struct coff_section *sec1, *sec2; - if ( argc == 1 || - !strcmp(argv[1], "-?") || - !strcmp(argv[1], "-h") || + if ( argc == 1 || !strcmp(argv[1], "-?") || !strcmp(argv[1], "-h") || !strcmp(argv[1], "--help") ) usage(*argv, argc == 1); @@ -329,8 +327,7 @@ int main(int argc, char *argv[]) const void *ptr1, *ptr2; if ( memcmp(sec1[i].name, sec2[i].name, sizeof(sec1[i].name)) || - sec1[i].rva != sec2[i].rva || - sec1[i].size != sec2[i].size || + sec1[i].rva != sec2[i].rva || sec1[i].size != sec2[i].size || sec1[i].file_size != sec2[i].file_size || sec1[i].flags != sec2[i].flags ) { @@ -339,7 +336,7 @@ int main(int argc, char *argv[]) } if ( !sec1[i].size || - (sec1[i].flags & (COFF_SECTION_DISCARDABLE|COFF_SECTION_BSS)) ) + (sec1[i].flags & (COFF_SECTION_DISCARDABLE | COFF_SECTION_BSS)) ) continue; /* @@ -366,8 +363,8 @@ int main(int argc, char *argv[]) ptr1 = map_section(sec1 + i, in1, argv[1]); ptr2 = map_section(sec2 + i, in2, argv[2]); - diff_sections(ptr1, ptr2, sec1 + i, base2 - base1, width1, - base1, base1 + size1); + diff_sections(ptr1, ptr2, sec1 + i, base2 - base1, width1, base1, + base1 + size1); unmap_section(ptr1, sec1 + i); unmap_section(ptr2, sec2 + i); diff --git a/xen/arch/x86/efi/stub.c b/xen/arch/x86/efi/stub.c index 0c481e3235..3bdda9a62e 100644 --- a/xen/arch/x86/efi/stub.c +++ b/xen/arch/x86/efi/stub.c @@ -33,12 +33,12 @@ void __init noreturn efi_multiboot2(EFI_HANDLE ImageHandle, * in assembly because here this convention may * not be directly supported by C compiler. */ - asm volatile( - " call *%3 \n" - "0: hlt \n" - " jmp 0b \n" - : "+c" (StdErr), "=d" (StdErr) : "1" (err), "rm" (StdErr->OutputString) - : "rax", "r8", "r9", "r10", "r11", "memory"); + asm volatile(" call *%3 \n" + "0: hlt \n" + " jmp 0b \n" + : "+c"(StdErr), "=d"(StdErr) + : "1"(err), "rm"(StdErr->OutputString) + : "rax", "r8", "r9", "r10", "r11", "memory"); unreachable(); } @@ -48,9 +48,13 @@ bool efi_enabled(unsigned int feature) return false; } -void __init efi_init_memory(void) { } +void __init efi_init_memory(void) +{ +} -void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t l4e) { } +void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t l4e) +{ +} bool efi_rs_using_pgtables(void) { @@ -63,8 +67,12 @@ unsigned long efi_get_time(void) return 0; } -void efi_halt_system(void) { } -void efi_reset_system(bool warm) { } +void efi_halt_system(void) +{ +} +void efi_reset_system(bool warm) +{ +} int efi_get_info(uint32_t idx, union xenpf_efi_info *info) { diff --git a/xen/arch/x86/emul-i8254.c b/xen/arch/x86/emul-i8254.c index 73be4188ad..ed0e341989 100644 --- a/xen/arch/x86/emul-i8254.c +++ b/xen/arch/x86/emul-i8254.c @@ -1,10 +1,10 @@ /* * QEMU 8253/8254 interval timer emulation - * + * * Copyright (c) 2003-2004 Fabrice Bellard * Copyright (c) 2006 Intel Corperation * Copyright (c) 2007 Keir Fraser, XenSource Inc. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the @@ -39,36 +39,36 @@ #include #define domain_vpit(x) (&(x)->arch.vpit) -#define vcpu_vpit(x) (domain_vpit((x)->domain)) +#define vcpu_vpit(x) (domain_vpit((x)->domain)) #define vpit_domain(x) (container_of((x), struct domain, arch.vpit)) -#define vpit_vcpu(x) (pt_global_vcpu_target(vpit_domain(x))) +#define vpit_vcpu(x) (pt_global_vcpu_target(vpit_domain(x))) #define RW_STATE_LSB 1 #define RW_STATE_MSB 2 #define RW_STATE_WORD0 3 #define RW_STATE_WORD1 4 -static int handle_pit_io( - int dir, unsigned int port, unsigned int bytes, uint32_t *val); -static int handle_speaker_io( - int dir, unsigned int port, unsigned int bytes, uint32_t *val); +static int handle_pit_io(int dir, unsigned int port, unsigned int bytes, + uint32_t *val); +static int handle_speaker_io(int dir, unsigned int port, unsigned int bytes, + uint32_t *val); #define get_guest_time(v) \ - (is_hvm_vcpu(v) ? hvm_get_guest_time(v) : (u64)get_s_time()) + (is_hvm_vcpu(v) ? hvm_get_guest_time(v) : (u64)get_s_time()) static int pit_get_count(PITState *pit, int channel) { uint64_t d; - int counter; + int counter; struct hvm_hw_pit_channel *c = &pit->hw.channels[channel]; struct vcpu *v = vpit_vcpu(pit); ASSERT(spin_is_locked(&pit->lock)); - d = muldiv64(get_guest_time(v) - pit->count_load_time[channel], - PIT_FREQ, SYSTEM_TIME_HZ); + d = muldiv64(get_guest_time(v) - pit->count_load_time[channel], PIT_FREQ, + SYSTEM_TIME_HZ); - switch ( c->mode ) + switch (c->mode) { case 0: case 1: @@ -96,10 +96,10 @@ static int pit_get_out(PITState *pit, int channel) ASSERT(spin_is_locked(&pit->lock)); - d = muldiv64(get_guest_time(v) - pit->count_load_time[channel], - PIT_FREQ, SYSTEM_TIME_HZ); + d = muldiv64(get_guest_time(v) - pit->count_load_time[channel], PIT_FREQ, + SYSTEM_TIME_HZ); - switch ( s->mode ) + switch (s->mode) { default: case 0: @@ -130,7 +130,7 @@ static void pit_set_gate(PITState *pit, int channel, int val) ASSERT(spin_is_locked(&pit->lock)); - switch ( s->mode ) + switch (s->mode) { default: case 0: @@ -184,13 +184,13 @@ static void pit_load_count(PITState *pit, int channel, int val) if ( (v == NULL) || !is_hvm_vcpu(v) || (channel != 0) ) return; - switch ( s->mode ) + switch (s->mode) { case 2: case 3: /* Periodic timer. */ TRACE_2D(TRC_HVM_EMUL_PIT_START_TIMER, period, period); - create_periodic_time(v, &pit->pt0, period, period, 0, pit_time_fired, + create_periodic_time(v, &pit->pt0, period, period, 0, pit_time_fired, &pit->count_load_time[channel], false); break; case 1: @@ -229,10 +229,8 @@ static void pit_latch_status(PITState *pit, int channel) if ( !c->status_latched ) { /* TODO: Return NULL COUNT (bit 6). */ - c->status = ((pit_get_out(pit, channel) << 7) | - (c->rw_mode << 4) | - (c->mode << 1) | - c->bcd); + c->status = ((pit_get_out(pit, channel) << 7) | (c->rw_mode << 4) | + (c->mode << 1) | c->bcd); c->status_latched = 1; } } @@ -242,7 +240,7 @@ static void pit_ioport_write(struct PITState *pit, uint32_t addr, uint32_t val) int channel, access; struct hvm_hw_pit_channel *s; - val &= 0xff; + val &= 0xff; addr &= 3; spin_lock(&pit->lock); @@ -291,7 +289,7 @@ static void pit_ioport_write(struct PITState *pit, uint32_t addr, uint32_t val) { /* Write Count. */ s = &pit->hw.channels[addr]; - switch ( s->write_state ) + switch (s->write_state) { default: case RW_STATE_LSB: @@ -318,7 +316,7 @@ static uint32_t pit_ioport_read(struct PITState *pit, uint32_t addr) { int ret, count; struct hvm_hw_pit_channel *s; - + addr &= 3; s = &pit->hw.channels[addr]; @@ -331,7 +329,7 @@ static uint32_t pit_ioport_read(struct PITState *pit, uint32_t addr) } else if ( s->count_latched ) { - switch ( s->count_latched ) + switch (s->count_latched) { default: case RW_STATE_LSB: @@ -350,7 +348,7 @@ static uint32_t pit_ioport_read(struct PITState *pit, uint32_t addr) } else { - switch ( s->read_state ) + switch (s->read_state) { default: case RW_STATE_LSB: @@ -401,7 +399,7 @@ static int pit_save(struct vcpu *v, hvm_domain_context_t *h) return 0; spin_lock(&pit->lock); - + rc = hvm_save_entry(PIT, 0, h, &pit->hw); spin_unlock(&pit->lock); @@ -424,10 +422,10 @@ static int pit_load(struct domain *d, hvm_domain_context_t *h) spin_unlock(&pit->lock); return 1; } - + /* - * Recreate platform timers from hardware state. There will be some - * time jitter here, but the wall-clock will have jumped massively, so + * Recreate platform timers from hardware state. There will be some + * time jitter here, but the wall-clock will have jumped massively, so * we hope the guest can handle it. */ pit->pt0.last_plt_gtime = get_guest_time(d->vcpu[0]); @@ -503,9 +501,9 @@ void pit_deinit(struct domain *d) } } -/* the intercept action for PIT DM retval:0--not handled; 1--handled */ -static int handle_pit_io( - int dir, unsigned int port, unsigned int bytes, uint32_t *val) +/* the intercept action for PIT DM retval:0--not handled; 1--handled */ +static int handle_pit_io(int dir, unsigned int port, unsigned int bytes, + uint32_t *val) { struct PITState *vpit = vcpu_vpit(current); @@ -531,15 +529,14 @@ static int handle_pit_io( return X86EMUL_OKAY; } -static void speaker_ioport_write( - struct PITState *pit, uint32_t addr, uint32_t val) +static void speaker_ioport_write(struct PITState *pit, uint32_t addr, + uint32_t val) { pit->hw.speaker_data_on = (val >> 1) & 1; pit_set_gate(pit, 2, val & 1); } -static uint32_t speaker_ioport_read( - struct PITState *pit, uint32_t addr) +static uint32_t speaker_ioport_read(struct PITState *pit, uint32_t addr) { /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */ unsigned int refresh_clock = ((unsigned int)NOW() >> 14) & 1; @@ -547,8 +544,8 @@ static uint32_t speaker_ioport_read( (pit_get_out(pit, 2) << 5) | (refresh_clock << 4)); } -static int handle_speaker_io( - int dir, unsigned int port, uint32_t bytes, uint32_t *val) +static int handle_speaker_io(int dir, unsigned int port, uint32_t bytes, + uint32_t *val) { struct PITState *vpit = vcpu_vpit(current); @@ -568,13 +565,11 @@ static int handle_speaker_io( int pv_pit_handler(int port, int data, int write) { - ioreq_t ioreq = { - .size = 1, - .type = IOREQ_TYPE_PIO, - .addr = port, - .dir = write ? IOREQ_WRITE : IOREQ_READ, - .data = data - }; + ioreq_t ioreq = {.size = 1, + .type = IOREQ_TYPE_PIO, + .addr = port, + .dir = write ? IOREQ_WRITE : IOREQ_READ, + .data = data}; if ( !has_vpit(current->domain) ) return ~0; diff --git a/xen/arch/x86/extable.c b/xen/arch/x86/extable.c index a6430a6d8d..9ed73baa1c 100644 --- a/xen/arch/x86/extable.c +++ b/xen/arch/x86/extable.c @@ -15,47 +15,48 @@ static inline unsigned long ex_addr(const struct exception_table_entry *x) { - return EX_FIELD(x, addr); + return EX_FIELD(x, addr); } static inline unsigned long ex_cont(const struct exception_table_entry *x) { - return EX_FIELD(x, cont); + return EX_FIELD(x, cont); } static int init_or_livepatch cmp_ex(const void *a, const void *b) { - const struct exception_table_entry *l = a, *r = b; - unsigned long lip = ex_addr(l); - unsigned long rip = ex_addr(r); - - /* avoid overflow */ - if (lip > rip) - return 1; - if (lip < rip) - return -1; - return 0; + const struct exception_table_entry *l = a, *r = b; + unsigned long lip = ex_addr(l); + unsigned long rip = ex_addr(r); + + /* avoid overflow */ + if ( lip > rip ) + return 1; + if ( lip < rip ) + return -1; + return 0; } #ifndef swap_ex static void init_or_livepatch swap_ex(void *a, void *b, int size) { - struct exception_table_entry *l = a, *r = b, tmp; - long delta = b - a; - - tmp = *l; - l->addr = r->addr + delta; - l->cont = r->cont + delta; - r->addr = tmp.addr - delta; - r->cont = tmp.cont - delta; + struct exception_table_entry *l = a, *r = b, tmp; + long delta = b - a; + + tmp = *l; + l->addr = r->addr + delta; + l->cont = r->cont + delta; + r->addr = tmp.addr - delta; + r->cont = tmp.cont - delta; } #endif -void init_or_livepatch sort_exception_table(struct exception_table_entry *start, - const struct exception_table_entry *stop) +void init_or_livepatch +sort_exception_table(struct exception_table_entry *start, + const struct exception_table_entry *stop) { - sort(start, stop - start, - sizeof(struct exception_table_entry), cmp_ex, swap_ex); + sort(start, stop - start, sizeof(struct exception_table_entry), cmp_ex, + swap_ex); } void __init sort_exception_tables(void) @@ -76,18 +77,17 @@ search_one_extable(const struct exception_table_entry *first, { mid = (last - first) / 2 + first; diff = ex_addr(mid) - value; - if (diff == 0) + if ( diff == 0 ) return ex_cont(mid); - else if (diff < 0) - first = mid+1; + else if ( diff < 0 ) + first = mid + 1; else - last = mid-1; + last = mid - 1; } return 0; } -unsigned long -search_exception_table(const struct cpu_user_regs *regs) +unsigned long search_exception_table(const struct cpu_user_regs *regs) { const struct virtual_region *region = find_text_region(regs->rip); unsigned long stub = this_cpu(stubs.addr); @@ -96,16 +96,16 @@ search_exception_table(const struct cpu_user_regs *regs) return search_one_extable(region->ex, region->ex_end - 1, regs->rip); if ( regs->rip >= stub + STUB_BUF_SIZE / 2 && - regs->rip < stub + STUB_BUF_SIZE && - regs->rsp > (unsigned long)regs && + regs->rip < stub + STUB_BUF_SIZE && regs->rsp > (unsigned long)regs && regs->rsp < (unsigned long)get_cpu_info() ) { unsigned long retptr = *(unsigned long *)regs->rsp; region = find_text_region(retptr); - retptr = region && region->ex - ? search_one_extable(region->ex, region->ex_end - 1, retptr) - : 0; + retptr = + region && region->ex + ? search_one_extable(region->ex, region->ex_end - 1, retptr) + : 0; if ( retptr ) { /* @@ -128,21 +128,22 @@ search_exception_table(const struct cpu_user_regs *regs) #ifndef NDEBUG static int __init stub_selftest(void) { - static const struct { + static const struct + { uint8_t opc[4]; uint64_t rax; union stub_exception_token res; } tests[] __initconst = { - { .opc = { 0x0f, 0xb9, 0xc3, 0xc3 }, /* ud1 */ - .res.fields.trapnr = TRAP_invalid_op }, - { .opc = { 0x90, 0x02, 0x00, 0xc3 }, /* nop; add (%rax),%al */ - .rax = 0x0123456789abcdef, - .res.fields.trapnr = TRAP_gp_fault }, - { .opc = { 0x02, 0x04, 0x04, 0xc3 }, /* add (%rsp,%rax),%al */ - .rax = 0xfedcba9876543210, - .res.fields.trapnr = TRAP_stack_error }, - { .opc = { 0xcc, 0xc3, 0xc3, 0xc3 }, /* int3 */ - .res.fields.trapnr = TRAP_int3 }, + {.opc = {0x0f, 0xb9, 0xc3, 0xc3}, /* ud1 */ + .res.fields.trapnr = TRAP_invalid_op}, + {.opc = {0x90, 0x02, 0x00, 0xc3}, /* nop; add (%rax),%al */ + .rax = 0x0123456789abcdef, + .res.fields.trapnr = TRAP_gp_fault}, + {.opc = {0x02, 0x04, 0x04, 0xc3}, /* add (%rsp,%rax),%al */ + .rax = 0xfedcba9876543210, + .res.fields.trapnr = TRAP_stack_error}, + {.opc = {0xcc, 0xc3, 0xc3, 0xc3}, /* int3 */ + .res.fields.trapnr = TRAP_int3}, }; unsigned long addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2; unsigned int i; @@ -152,24 +153,23 @@ static int __init stub_selftest(void) for ( i = 0; i < ARRAY_SIZE(tests); ++i ) { - uint8_t *ptr = map_domain_page(_mfn(this_cpu(stubs.mfn))) + - (addr & ~PAGE_MASK); - union stub_exception_token res = { .raw = ~0 }; + uint8_t *ptr = + map_domain_page(_mfn(this_cpu(stubs.mfn))) + (addr & ~PAGE_MASK); + union stub_exception_token res = {.raw = ~0}; memset(ptr, 0xcc, STUB_BUF_SIZE / 2); memcpy(ptr, tests[i].opc, ARRAY_SIZE(tests[i].opc)); unmap_domain_page(ptr); - asm volatile ( "INDIRECT_CALL %[stb]\n" - ".Lret%=:\n\t" - ".pushsection .fixup,\"ax\"\n" - ".Lfix%=:\n\t" - "pop %[exn]\n\t" - "jmp .Lret%=\n\t" - ".popsection\n\t" - _ASM_EXTABLE(.Lret%=, .Lfix%=) - : [exn] "+m" (res) - : [stb] "r" (addr), "a" (tests[i].rax)); + asm volatile("INDIRECT_CALL %[stb]\n" + ".Lret%=:\n\t" + ".pushsection .fixup,\"ax\"\n" + ".Lfix%=:\n\t" + "pop %[exn]\n\t" + "jmp .Lret%=\n\t" + ".popsection\n\t" _ASM_EXTABLE(.Lret %=, .Lfix %=) + : [exn] "+m"(res) + : [stb] "r"(addr), "a"(tests[i].rax)); if ( res.raw != tests[i].res.raw ) { @@ -184,19 +184,19 @@ static int __init stub_selftest(void) } if ( fail ) - warning_add("SELFTEST FAILURE: CORRECT BEHAVIOR CANNOT BE GUARANTEED\n"); + warning_add( + "SELFTEST FAILURE: CORRECT BEHAVIOR CANNOT BE GUARANTEED\n"); return 0; } __initcall(stub_selftest); #endif -unsigned long -search_pre_exception_table(struct cpu_user_regs *regs) +unsigned long search_pre_exception_table(struct cpu_user_regs *regs) { unsigned long addr = regs->rip; - unsigned long fixup = search_one_extable( - __start___pre_ex_table, __stop___pre_ex_table-1, addr); + unsigned long fixup = search_one_extable(__start___pre_ex_table, + __stop___pre_ex_table - 1, addr); if ( fixup ) { dprintk(XENLOG_INFO, "Pre-exception: %p -> %p\n", _p(addr), _p(fixup)); diff --git a/xen/arch/x86/flushtlb.c b/xen/arch/x86/flushtlb.c index 4004129c49..0929806bde 100644 --- a/xen/arch/x86/flushtlb.c +++ b/xen/arch/x86/flushtlb.c @@ -1,9 +1,9 @@ /****************************************************************************** * flushtlb.c - * + * * TLB flushes are timestamped using a global virtual 'clock' which ticks * on any TLB flush on any processor. - * + * * Copyright (c) 2003-2006, K A Fraser */ @@ -29,7 +29,7 @@ DEFINE_PER_CPU(u32, tlbflush_time); /* * pre_flush(): Increment the virtual TLB-flush clock. Returns new clock value. - * + * * This must happen *before* we flush the TLB. If we do it after, we race other * CPUs invalidating PTEs. For example, a page invalidated after the flush * might get the old timestamp, but this CPU can speculatively fetch the @@ -46,14 +46,13 @@ static u32 pre_flush(void) if ( unlikely(t1 == 0) ) goto skip_clocktick; t2 = (t + 1) & WRAP_MASK; - } - while ( unlikely((t = cmpxchg(&tlbflush_clock, t1, t2)) != t1) ); + } while ( unlikely((t = cmpxchg(&tlbflush_clock, t1, t2)) != t1) ); /* Clock wrapped: we will lead a global TLB shootdown. */ if ( unlikely(t2 == 0) ) raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ); - skip_clocktick: +skip_clocktick: hvm_flush_guest_tlbs(); return t2; @@ -61,14 +60,14 @@ static u32 pre_flush(void) /* * post_flush(): Update this CPU's timestamp with specified clock value. - * - * Note that this happens *after* flushing the TLB, as otherwise we can race a - * NEED_FLUSH() test on another CPU. (e.g., other CPU sees the updated CPU + * + * Note that this happens *after* flushing the TLB, as otherwise we can race a + * NEED_FLUSH() test on another CPU. (e.g., other CPU sees the updated CPU * stamp and so does not force a synchronous TLB flush, but the flush in this - * function hasn't yet occurred and so the TLB might be stale). The ordering - * would only actually matter if this function were interruptible, and - * something that abuses the stale mapping could exist in an interrupt - * handler. In fact neither of these is the case, so really we are being ultra + * function hasn't yet occurred and so the TLB might be stale). The ordering + * would only actually matter if this function were interruptible, and + * something that abuses the stale mapping could exist in an interrupt + * handler. In fact neither of these is the case, so really we are being ultra * paranoid. */ static void post_flush(u32 t) @@ -175,8 +174,7 @@ void switch_cr3_cr4(unsigned long cr3, unsigned long cr4) * * Note also that PGE is always clear in old_cr4. */ - if ( old_pcid != cr3_pcid(cr3) && - !(cr4 & X86_CR4_PGE) && + if ( old_pcid != cr3_pcid(cr3) && !(cr4 & X86_CR4_PGE) && (old_cr4 & X86_CR4_PCIDE) <= (cr4 & X86_CR4_PCIDE) ) invpcid_flush_single_context(old_pcid); @@ -194,7 +192,7 @@ unsigned int flush_area_local(const void *va, unsigned int flags) { unsigned int order = (flags - 1) & FLUSH_ORDER_MASK; - if ( flags & (FLUSH_TLB|FLUSH_TLB_GLOBAL) ) + if ( flags & (FLUSH_TLB | FLUSH_TLB_GLOBAL) ) { if ( order == 0 ) { @@ -225,8 +223,10 @@ unsigned int flush_area_local(const void *va, unsigned int flags) } } else - asm volatile ( "invlpg %0" - : : "m" (*(const char *)(va)) : "memory" ); + asm volatile("invlpg %0" + : + : "m"(*(const char *)(va)) + : "memory"); } else do_tlb_flush(); @@ -240,18 +240,18 @@ unsigned int flush_area_local(const void *va, unsigned int flags) if ( order < (BITS_PER_LONG - PAGE_SHIFT) ) sz = 1UL << (order + PAGE_SHIFT); - if ( (!(flags & (FLUSH_TLB|FLUSH_TLB_GLOBAL)) || + if ( (!(flags & (FLUSH_TLB | FLUSH_TLB_GLOBAL)) || (flags & FLUSH_VA_VALID)) && c->x86_clflush_size && c->x86_cache_size && sz && ((sz >> 10) < c->x86_cache_size) ) { alternative("", "sfence", X86_FEATURE_CLFLUSHOPT); for ( i = 0; i < sz; i += c->x86_clflush_size ) - alternative_input(".byte " __stringify(NOP_DS_PREFIX) ";" - " clflush %0", - "data16 clflush %0", /* clflushopt */ - X86_FEATURE_CLFLUSHOPT, - "m" (((const char *)va)[i])); + alternative_input( + ".byte " __stringify(NOP_DS_PREFIX) ";" + " clflush %0", + "data16 clflush %0", /* clflushopt */ + X86_FEATURE_CLFLUSHOPT, "m"(((const char *)va)[i])); flags &= ~FLUSH_CACHE; } else diff --git a/xen/arch/x86/gdbstub.c b/xen/arch/x86/gdbstub.c index ff9f7f9a69..d1af2e4659 100644 --- a/xen/arch/x86/gdbstub.c +++ b/xen/arch/x86/gdbstub.c @@ -1,7 +1,7 @@ /* * x86-specific gdb stub routines * based on x86 cdb(xen/arch/x86/cdb.c), but Extensively modified. - * + * * Copyright (C) 2006 Isaku Yamahata * VA Linux Systems Japan. K.K. * @@ -9,61 +9,53 @@ * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; If not, see . */ #include -u16 -gdb_arch_signal_num(struct cpu_user_regs *regs, unsigned long cookie) +u16 gdb_arch_signal_num(struct cpu_user_regs *regs, unsigned long cookie) { - return 5; /* TRAP signal. see include/gdb/signals.h */ + return 5; /* TRAP signal. see include/gdb/signals.h */ } /* * Use __copy_*_user to make us page-fault safe, but not otherwise restrict * our access to the full virtual address space. */ -unsigned int -gdb_arch_copy_from_user(void *dest, const void *src, unsigned len) +unsigned int gdb_arch_copy_from_user(void *dest, const void *src, unsigned len) { return __copy_from_user(dest, src, len); } -unsigned int -gdb_arch_copy_to_user(void *dest, const void *src, unsigned len) +unsigned int gdb_arch_copy_to_user(void *dest, const void *src, unsigned len) { return __copy_to_user(dest, src, len); } -void -gdb_arch_print_state(struct cpu_user_regs *regs) +void gdb_arch_print_state(struct cpu_user_regs *regs) { /* XXX */ } -void -gdb_arch_enter(struct cpu_user_regs *regs) +void gdb_arch_enter(struct cpu_user_regs *regs) { /* nothing */ } -void -gdb_arch_exit(struct cpu_user_regs *regs) +void gdb_arch_exit(struct cpu_user_regs *regs) { /* nothing */ } -void -gdb_arch_resume(struct cpu_user_regs *regs, - unsigned long addr, unsigned long type, - struct gdb_context *ctx) +void gdb_arch_resume(struct cpu_user_regs *regs, unsigned long addr, + unsigned long type, struct gdb_context *ctx) { if ( addr != -1UL ) regs->rip = addr; diff --git a/xen/arch/x86/genapic/bigsmp.c b/xen/arch/x86/genapic/bigsmp.c index 91a973ac16..316d5790b6 100644 --- a/xen/arch/x86/genapic/bigsmp.c +++ b/xen/arch/x86/genapic/bigsmp.c @@ -13,36 +13,31 @@ static __init int force_bigsmp(struct dmi_system_id *d) { - printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident); - def_to_bigsmp = true; - return 0; + printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident); + def_to_bigsmp = true; + return 0; } - static struct dmi_system_id __initdata bigsmp_dmi_table[] = { - { force_bigsmp, "UNISYS ES7000-ONE", { - DMI_MATCH(DMI_PRODUCT_NAME, "ES7000-ONE") - }}, - - { } -}; + {force_bigsmp, + "UNISYS ES7000-ONE", + {DMI_MATCH(DMI_PRODUCT_NAME, "ES7000-ONE")}}, + {}}; static __init int probe_bigsmp(void) -{ - /* - * We don't implement cluster mode, so force use of - * physical mode in both cases. - */ - if (acpi_gbl_FADT.flags & - (ACPI_FADT_APIC_CLUSTER | ACPI_FADT_APIC_PHYSICAL)) - def_to_bigsmp = true; - else if (!def_to_bigsmp) - dmi_check_system(bigsmp_dmi_table); - return def_to_bigsmp; -} +{ + /* + * We don't implement cluster mode, so force use of + * physical mode in both cases. + */ + if ( acpi_gbl_FADT.flags & + (ACPI_FADT_APIC_CLUSTER | ACPI_FADT_APIC_PHYSICAL) ) + def_to_bigsmp = true; + else if ( !def_to_bigsmp ) + dmi_check_system(bigsmp_dmi_table); + return def_to_bigsmp; +} const struct genapic __initconstrel apic_bigsmp = { - APIC_INIT("bigsmp", probe_bigsmp), - GENAPIC_PHYS -}; + APIC_INIT("bigsmp", probe_bigsmp), GENAPIC_PHYS}; diff --git a/xen/arch/x86/genapic/default.c b/xen/arch/x86/genapic/default.c index 53ebf20a3f..cd1d855457 100644 --- a/xen/arch/x86/genapic/default.c +++ b/xen/arch/x86/genapic/default.c @@ -1,4 +1,4 @@ -/* +/* * Default generic APIC driver. This handles upto 8 CPUs. */ #include @@ -16,11 +16,9 @@ /* should be called last. */ static __init int probe_default(void) -{ - return 1; -} +{ + return 1; +} const struct genapic __initconstrel apic_default = { - APIC_INIT("default", probe_default), - GENAPIC_FLAT -}; + APIC_INIT("default", probe_default), GENAPIC_FLAT}; diff --git a/xen/arch/x86/genapic/delivery.c b/xen/arch/x86/genapic/delivery.c index a86b8c9422..9171428a66 100644 --- a/xen/arch/x86/genapic/delivery.c +++ b/xen/arch/x86/genapic/delivery.c @@ -11,27 +11,27 @@ void init_apic_ldr_flat(void) { - unsigned long val; + unsigned long val; - apic_write(APIC_DFR, APIC_DFR_FLAT); - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - val |= SET_xAPIC_LOGICAL_ID(1UL << smp_processor_id()); - apic_write(APIC_LDR, val); + apic_write(APIC_DFR, APIC_DFR_FLAT); + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + val |= SET_xAPIC_LOGICAL_ID(1UL << smp_processor_id()); + apic_write(APIC_LDR, val); } void __init clustered_apic_check_flat(void) { - printk("Enabling APIC mode: Flat. Using %d I/O APICs\n", nr_ioapics); + printk("Enabling APIC mode: Flat. Using %d I/O APICs\n", nr_ioapics); } const cpumask_t *vector_allocation_cpumask_flat(int cpu) { - return &cpu_online_map; -} + return &cpu_online_map; +} unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask) { - return cpumask_bits(cpumask)[0]&0xFF; + return cpumask_bits(cpumask)[0] & 0xFF; } /* @@ -40,25 +40,25 @@ unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask) void init_apic_ldr_phys(void) { - unsigned long val; - apic_write(APIC_DFR, APIC_DFR_FLAT); - /* A dummy logical ID should be fine. We only deliver in phys mode. */ - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - apic_write(APIC_LDR, val); + unsigned long val; + apic_write(APIC_DFR, APIC_DFR_FLAT); + /* A dummy logical ID should be fine. We only deliver in phys mode. */ + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + apic_write(APIC_LDR, val); } void __init clustered_apic_check_phys(void) { - printk("Enabling APIC mode: Phys. Using %d I/O APICs\n", nr_ioapics); + printk("Enabling APIC mode: Phys. Using %d I/O APICs\n", nr_ioapics); } const cpumask_t *vector_allocation_cpumask_phys(int cpu) { - return cpumask_of(cpu); + return cpumask_of(cpu); } unsigned int cpu_mask_to_apicid_phys(const cpumask_t *cpumask) { - /* As we are using single CPU as destination, pick only one CPU here */ - return cpu_physical_id(cpumask_any(cpumask)); + /* As we are using single CPU as destination, pick only one CPU here */ + return cpu_physical_id(cpumask_any(cpumask)); } diff --git a/xen/arch/x86/genapic/probe.c b/xen/arch/x86/genapic/probe.c index 6aa7eb7b1c..9551dd4e4e 100644 --- a/xen/arch/x86/genapic/probe.c +++ b/xen/arch/x86/genapic/probe.c @@ -1,8 +1,8 @@ -/* Copyright 2003 Andi Kleen, SuSE Labs. - * Subject to the GNU Public License, v.2 - * +/* Copyright 2003 Andi Kleen, SuSE Labs. + * Subject to the GNU Public License, v.2 + * * Generic x86 APIC driver probe layer. - */ + */ #include #include #include @@ -18,96 +18,106 @@ struct genapic __read_mostly genapic; const struct genapic *const __initconstrel apic_probe[] = { - &apic_bigsmp, - &apic_default, /* must be last */ - NULL, + &apic_bigsmp, + &apic_default, /* must be last */ + NULL, }; static bool_t __initdata cmdline_apic; void __init generic_bigsmp_probe(void) { - /* - * This routine is used to switch to bigsmp mode when - * - There is no apic= option specified by the user - * - generic_apic_probe() has choosen apic_default as the sub_arch - * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support - */ - - if (!cmdline_apic && genapic.name == apic_default.name) - if (apic_bigsmp.probe()) { - genapic = apic_bigsmp; - printk(KERN_INFO "Overriding APIC driver with %s\n", - genapic.name); - } + /* + * This routine is used to switch to bigsmp mode when + * - There is no apic= option specified by the user + * - generic_apic_probe() has choosen apic_default as the sub_arch + * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support + */ + + if ( !cmdline_apic && genapic.name == apic_default.name ) + if ( apic_bigsmp.probe() ) + { + genapic = apic_bigsmp; + printk(KERN_INFO "Overriding APIC driver with %s\n", genapic.name); + } } static int __init genapic_apic_force(const char *str) { - int i, rc = -EINVAL; + int i, rc = -EINVAL; - for (i = 0; apic_probe[i]; i++) - if (!strcmp(apic_probe[i]->name, str)) { - genapic = *apic_probe[i]; - rc = 0; - } + for ( i = 0; apic_probe[i]; i++ ) + if ( !strcmp(apic_probe[i]->name, str) ) + { + genapic = *apic_probe[i]; + rc = 0; + } - return rc; + return rc; } custom_param("apic", genapic_apic_force); -void __init generic_apic_probe(void) -{ - bool changed; - int i; - - record_boot_APIC_mode(); - - check_x2apic_preenabled(); - cmdline_apic = changed = !!genapic.name; - - for (i = 0; !changed && apic_probe[i]; i++) { - if (apic_probe[i]->probe()) { - changed = 1; - genapic = *apic_probe[i]; - } - } - if (!changed) - genapic = apic_default; - - printk(KERN_INFO "Using APIC driver %s\n", genapic.name); -} +void __init generic_apic_probe(void) +{ + bool changed; + int i; + + record_boot_APIC_mode(); + + check_x2apic_preenabled(); + cmdline_apic = changed = !!genapic.name; + + for ( i = 0; !changed && apic_probe[i]; i++ ) + { + if ( apic_probe[i]->probe() ) + { + changed = 1; + genapic = *apic_probe[i]; + } + } + if ( !changed ) + genapic = apic_default; + + printk(KERN_INFO "Using APIC driver %s\n", genapic.name); +} /* These functions can switch the APIC even after the initial ->probe() */ -int __init mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) -{ - int i; - for (i = 0; apic_probe[i]; ++i) { - if (apic_probe[i]->mps_oem_check(mpc,oem,productid)) { - if (!cmdline_apic) { - genapic = *apic_probe[i]; - printk(KERN_INFO "Switched to APIC driver `%s'.\n", - genapic.name); - } - return 1; - } - } - return 0; -} +int __init mps_oem_check(struct mp_config_table *mpc, char *oem, + char *productid) +{ + int i; + for ( i = 0; apic_probe[i]; ++i ) + { + if ( apic_probe[i]->mps_oem_check(mpc, oem, productid) ) + { + if ( !cmdline_apic ) + { + genapic = *apic_probe[i]; + printk(KERN_INFO "Switched to APIC driver `%s'.\n", + genapic.name); + } + return 1; + } + } + return 0; +} int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) { - int i; - for (i = 0; apic_probe[i]; ++i) { - if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { - if (!cmdline_apic) { - genapic = *apic_probe[i]; - printk(KERN_INFO "Switched to APIC driver `%s'.\n", - genapic.name); - } - return 1; - } - } - return 0; + int i; + for ( i = 0; apic_probe[i]; ++i ) + { + if ( apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id) ) + { + if ( !cmdline_apic ) + { + genapic = *apic_probe[i]; + printk(KERN_INFO "Switched to APIC driver `%s'.\n", + genapic.name); + } + return 1; + } + } + return 0; } diff --git a/xen/arch/x86/genapic/x2apic.c b/xen/arch/x86/genapic/x2apic.c index 334dae527e..63e402c996 100644 --- a/xen/arch/x86/genapic/x2apic.c +++ b/xen/arch/x86/genapic/x2apic.c @@ -55,9 +55,10 @@ static void init_apic_ldr_x2apic_cluster(void) } per_cpu(cluster_cpus, this_cpu) = cluster_cpus_spare; - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) { - if (this_cpu == cpu || x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) + if ( this_cpu == cpu || + x2apic_cluster(this_cpu) != x2apic_cluster(cpu) ) continue; per_cpu(cluster_cpus, this_cpu) = per_cpu(cluster_cpus, cpu); break; @@ -83,7 +84,7 @@ static unsigned int cpu_mask_to_apicid_x2apic_cluster(const cpumask_t *cpumask) unsigned int dest = per_cpu(cpu_2_logical_apicid, cpu); const cpumask_t *cluster_cpus = per_cpu(cluster_cpus, cpu); - for_each_cpu ( cpu, cluster_cpus ) + for_each_cpu (cpu, cluster_cpus) if ( cpumask_test_cpu(cpu, cpumask) ) dest |= per_cpu(cpu_2_logical_apicid, cpu); @@ -105,7 +106,7 @@ static void send_IPI_mask_x2apic_phys(const cpumask_t *cpumask, int vector) * Ensure that any synchronisation data written in program order by this * CPU is seen by notified remote CPUs. The WRMSR contained within * apic_icr_write() can otherwise be executed early. - * + * * The reason smp_mb() is sufficient here is subtle: the register arguments * to WRMSR must depend on a memory read executed after the barrier. This * is guaranteed by cpu_physical_id(), which reads from a global array (and @@ -115,13 +116,13 @@ static void send_IPI_mask_x2apic_phys(const cpumask_t *cpumask, int vector) local_irq_save(flags); - for_each_cpu ( cpu, cpumask ) + for_each_cpu (cpu, cpumask) { if ( !cpu_online(cpu) || (cpu == smp_processor_id()) ) continue; msr_content = cpu_physical_id(cpu); - msr_content = (msr_content << 32) | APIC_DM_FIXED | - APIC_DEST_PHYSICAL | vector; + msr_content = + (msr_content << 32) | APIC_DM_FIXED | APIC_DEST_PHYSICAL | vector; apic_wrmsr(APIC_ICR, msr_content); } @@ -147,7 +148,7 @@ static void send_IPI_mask_x2apic_cluster(const cpumask_t *cpumask, int vector) uint64_t msr_content = 0; cluster_cpus = per_cpu(cluster_cpus, cpumask_first(ipimask)); - for_each_cpu ( cpu, cluster_cpus ) + for_each_cpu (cpu, cluster_cpus) { if ( !cpumask_test_cpu(cpu, ipimask) ) continue; @@ -155,8 +156,8 @@ static void send_IPI_mask_x2apic_cluster(const cpumask_t *cpumask, int vector) } BUG_ON(!msr_content); - msr_content = (msr_content << 32) | APIC_DM_FIXED | - APIC_DEST_LOGICAL | vector; + msr_content = + (msr_content << 32) | APIC_DM_FIXED | APIC_DEST_LOGICAL | vector; apic_wrmsr(APIC_ICR, msr_content); } @@ -172,8 +173,7 @@ static const struct genapic __initconstrel apic_x2apic_phys = { .vector_allocation_cpumask = vector_allocation_cpumask_phys, .cpu_mask_to_apicid = cpu_mask_to_apicid_phys, .send_IPI_mask = send_IPI_mask_x2apic_phys, - .send_IPI_self = send_IPI_self_x2apic -}; + .send_IPI_self = send_IPI_self_x2apic}; static const struct genapic __initconstrel apic_x2apic_cluster = { APIC_INIT("x2apic_cluster", NULL), @@ -184,16 +184,16 @@ static const struct genapic __initconstrel apic_x2apic_cluster = { .vector_allocation_cpumask = vector_allocation_cpumask_x2apic_cluster, .cpu_mask_to_apicid = cpu_mask_to_apicid_x2apic_cluster, .send_IPI_mask = send_IPI_mask_x2apic_cluster, - .send_IPI_self = send_IPI_self_x2apic -}; + .send_IPI_self = send_IPI_self_x2apic}; -static int update_clusterinfo( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int update_clusterinfo(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int err = 0; - switch (action) { + switch (action) + { case CPU_UP_PREPARE: per_cpu(cpu_2_logical_apicid, cpu) = BAD_APICID; if ( !cluster_cpus_spare ) @@ -220,11 +220,11 @@ static int update_clusterinfo( return !err ? NOTIFY_DONE : notifier_from_errno(err); } -static struct notifier_block x2apic_cpu_nfb = { - .notifier_call = update_clusterinfo -}; +static struct notifier_block x2apic_cpu_nfb = {.notifier_call = + update_clusterinfo}; -static s8 __initdata x2apic_phys = -1; /* By default we use logical cluster mode. */ +static s8 __initdata x2apic_phys = + -1; /* By default we use logical cluster mode. */ boolean_param("x2apic_phys", x2apic_phys); const struct genapic *__init apic_x2apic_probe(void) diff --git a/xen/arch/x86/guest/pvh-boot.c b/xen/arch/x86/guest/pvh-boot.c index ca8e156f7d..91fc15280e 100644 --- a/xen/arch/x86/guest/pvh-boot.c +++ b/xen/arch/x86/guest/pvh-boot.c @@ -35,8 +35,7 @@ static multiboot_info_t __initdata pvh_mbi; static module_t __initdata pvh_mbi_mods[8]; static const char *__initdata pvh_loader = "PVH Directboot"; -static void __init convert_pvh_info(multiboot_info_t **mbi, - module_t **mod) +static void __init convert_pvh_info(multiboot_info_t **mbi, module_t **mod) { const struct hvm_start_info *pvh_info = __va(pvh_start_info_pa); const struct hvm_modlist_entry *entry; @@ -75,8 +74,8 @@ static void __init convert_pvh_info(multiboot_info_t **mbi, BUG_ON(entry[i].cmdline_paddr >> 32); pvh_mbi_mods[i].mod_start = entry[i].paddr; - pvh_mbi_mods[i].mod_end = entry[i].paddr + entry[i].size; - pvh_mbi_mods[i].string = entry[i].cmdline_paddr; + pvh_mbi_mods[i].mod_end = entry[i].paddr + entry[i].size; + pvh_mbi_mods[i].string = entry[i].cmdline_paddr; } rsdp_hint = pvh_info->rsdp_paddr; @@ -119,21 +118,21 @@ void __init pvh_print_info(void) printk("PVH start info: (pa %08x)\n", pvh_start_info_pa); printk(" version: %u\n", pvh_info->version); - printk(" flags: %#"PRIx32"\n", pvh_info->flags); + printk(" flags: %#" PRIx32 "\n", pvh_info->flags); printk(" nr_modules: %u\n", pvh_info->nr_modules); - printk(" modlist_pa: %016"PRIx64"\n", pvh_info->modlist_paddr); - printk(" cmdline_pa: %016"PRIx64"\n", pvh_info->cmdline_paddr); + printk(" modlist_pa: %016" PRIx64 "\n", pvh_info->modlist_paddr); + printk(" cmdline_pa: %016" PRIx64 "\n", pvh_info->cmdline_paddr); if ( pvh_info->cmdline_paddr ) printk(" cmdline: '%s'\n", (char *)__va(pvh_info->cmdline_paddr)); - printk(" rsdp_pa: %016"PRIx64"\n", pvh_info->rsdp_paddr); + printk(" rsdp_pa: %016" PRIx64 "\n", pvh_info->rsdp_paddr); entry = __va(pvh_info->modlist_paddr); for ( i = 0; i < pvh_info->nr_modules; i++ ) { - printk(" mod[%u].pa: %016"PRIx64"\n", i, entry[i].paddr); - printk(" mod[%u].size: %016"PRIu64"\n", i, entry[i].size); - printk(" mod[%u].cmdline_pa: %016"PRIx64"\n", - i, entry[i].cmdline_paddr); + printk(" mod[%u].pa: %016" PRIx64 "\n", i, entry[i].paddr); + printk(" mod[%u].size: %016" PRIu64 "\n", i, entry[i].size); + printk(" mod[%u].cmdline_pa: %016" PRIx64 "\n", i, + entry[i].cmdline_paddr); if ( entry[i].cmdline_paddr ) printk(" mod[%1u].cmdline: '%s'\n", i, (char *)__va(entry[i].cmdline_paddr)); diff --git a/xen/arch/x86/guest/xen.c b/xen/arch/x86/guest/xen.c index 7b7a5badab..8e0d29809c 100644 --- a/xen/arch/x86/guest/xen.c +++ b/xen/arch/x86/guest/xen.c @@ -51,15 +51,14 @@ static void __init find_xen_leaves(void) { uint32_t eax, ebx, ecx, edx, base; - for ( base = XEN_CPUID_FIRST_LEAF; - base < XEN_CPUID_FIRST_LEAF + 0x10000; base += 0x100 ) + for ( base = XEN_CPUID_FIRST_LEAF; base < XEN_CPUID_FIRST_LEAF + 0x10000; + base += 0x100 ) { cpuid(base, &eax, &ebx, &ecx, &edx); if ( (ebx == XEN_CPUID_SIGNATURE_EBX) && (ecx == XEN_CPUID_SIGNATURE_ECX) && - (edx == XEN_CPUID_SIGNATURE_EDX) && - ((eax - base) >= 2) ) + (edx == XEN_CPUID_SIGNATURE_EDX) && ((eax - base) >= 2) ) { xen_cpuid_base = base; break; @@ -175,8 +174,8 @@ static void __init init_memmap(void) * avoid the know MMIO hole below 4GiB. Note that this is subject to future * discussion and improvements. */ - if ( rangeset_add_range(mem, 0, max_t(unsigned long, max_page - 1, - PFN_DOWN(GB(4) - 1))) ) + if ( rangeset_add_range( + mem, 0, max_t(unsigned long, max_page - 1, PFN_DOWN(GB(4) - 1))) ) panic("unable to add RAM to in-use PFN rangeset\n"); for ( i = 0; i < e820.nr_map; i++ ) diff --git a/xen/arch/x86/hpet.c b/xen/arch/x86/hpet.c index 4b08488ef1..1b526ff332 100644 --- a/xen/arch/x86/hpet.c +++ b/xen/arch/x86/hpet.c @@ -18,29 +18,29 @@ #include #include -#define MAX_DELTA_NS MILLISECS(10*1000) +#define MAX_DELTA_NS MILLISECS(10 * 1000) #define MIN_DELTA_NS MICROSECS(20) -#define HPET_EVT_USED_BIT 0 -#define HPET_EVT_USED (1 << HPET_EVT_USED_BIT) +#define HPET_EVT_USED_BIT 0 +#define HPET_EVT_USED (1 << HPET_EVT_USED_BIT) #define HPET_EVT_DISABLE_BIT 1 -#define HPET_EVT_DISABLE (1 << HPET_EVT_DISABLE_BIT) -#define HPET_EVT_LEGACY_BIT 2 -#define HPET_EVT_LEGACY (1 << HPET_EVT_LEGACY_BIT) +#define HPET_EVT_DISABLE (1 << HPET_EVT_DISABLE_BIT) +#define HPET_EVT_LEGACY_BIT 2 +#define HPET_EVT_LEGACY (1 << HPET_EVT_LEGACY_BIT) struct hpet_event_channel { unsigned long mult; - int shift; - s_time_t next_event; + int shift; + s_time_t next_event; cpumask_var_t cpumask; - spinlock_t lock; - void (*event_handler)(struct hpet_event_channel *); + spinlock_t lock; + void (*event_handler)(struct hpet_event_channel *); - unsigned int idx; /* physical channel idx */ - unsigned int cpu; /* msi target */ - struct msi_desc msi;/* msi state */ - unsigned int flags; /* HPET_EVT_x */ + unsigned int idx; /* physical channel idx */ + unsigned int cpu; /* msi target */ + struct msi_desc msi; /* msi state */ + unsigned int flags; /* HPET_EVT_x */ } __cacheline_aligned; static struct hpet_event_channel *__read_mostly hpet_events; @@ -78,7 +78,7 @@ static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec, uint64_t tmp = ((uint64_t)ticks) << shift; do_div(tmp, nsec); - return (unsigned long) tmp; + return (unsigned long)tmp; } /* @@ -91,7 +91,7 @@ static inline unsigned long ns2ticks(unsigned long nsec, int shift, { uint64_t tmp = ((uint64_t)nsec * factor) >> shift; - return (unsigned long) tmp; + return (unsigned long)tmp; } static int hpet_next_event(unsigned long delta, int timer) @@ -110,9 +110,8 @@ static int hpet_next_event(unsigned long delta, int timer) return ((cmp + 2 - cnt) > delta) ? -ETIME : 0; } -static int reprogram_hpet_evt_channel( - struct hpet_event_channel *ch, - s_time_t expire, s_time_t now, int force) +static int reprogram_hpet_evt_channel(struct hpet_event_channel *ch, + s_time_t expire, s_time_t now, int force) { int64_t delta; int ret; @@ -163,7 +162,7 @@ static void evt_do_broadcast(cpumask_t *mask) cpuidle_wakeup_mwait(mask); if ( !cpumask_empty(mask) ) - cpumask_raise_softirq(mask, TIMER_SOFTIRQ); + cpumask_raise_softirq(mask, TIMER_SOFTIRQ); } static void handle_hpet_broadcast(struct hpet_event_channel *ch) @@ -185,7 +184,7 @@ again: now = NOW(); /* find all expired events */ - for_each_cpu(cpu, ch->cpumask) + for_each_cpu (cpu, ch->cpumask) { s_time_t deadline = ACCESS_ONCE(per_cpu(timer_deadline, cpu)); @@ -211,7 +210,7 @@ again: } static void hpet_interrupt_handler(int irq, void *data, - struct cpu_user_regs *regs) + struct cpu_user_regs *regs) { struct hpet_event_channel *ch = (struct hpet_event_channel *)data; @@ -219,7 +218,9 @@ static void hpet_interrupt_handler(int irq, void *data, if ( !ch->event_handler ) { - printk(XENLOG_WARNING "Spurious HPET timer interrupt on HPET timer %d\n", ch->idx); + printk(XENLOG_WARNING + "Spurious HPET timer interrupt on HPET timer %d\n", + ch->idx); return; } @@ -266,8 +267,8 @@ static int hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg *msg) return 0; } -static void __maybe_unused -hpet_msi_read(struct hpet_event_channel *ch, struct msi_msg *msg) +static void __maybe_unused hpet_msi_read(struct hpet_event_channel *ch, + struct msi_msg *msg) { msg->data = hpet_read32(HPET_Tn_ROUTE(ch->idx)); msg->address_lo = hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4); @@ -312,13 +313,13 @@ static void hpet_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask) * IRQ Chip for MSI HPET Devices, */ static hw_irq_controller hpet_msi_type = { - .typename = "HPET-MSI", - .startup = hpet_msi_startup, - .shutdown = hpet_msi_shutdown, - .enable = hpet_msi_unmask, - .disable = hpet_msi_mask, - .ack = hpet_msi_ack, - .set_affinity = hpet_msi_set_affinity, + .typename = "HPET-MSI", + .startup = hpet_msi_startup, + .shutdown = hpet_msi_shutdown, + .enable = hpet_msi_unmask, + .disable = hpet_msi_mask, + .ack = hpet_msi_ack, + .set_affinity = hpet_msi_set_affinity, }; static int __hpet_setup_msi_irq(struct irq_desc *desc) @@ -483,8 +484,7 @@ static void set_channel_irq_affinity(struct hpet_event_channel *ch) ch->event_handler(ch); } -static void hpet_attach_channel(unsigned int cpu, - struct hpet_event_channel *ch) +static void hpet_attach_channel(unsigned int cpu, struct hpet_event_channel *ch) { ASSERT(!local_irq_is_enabled()); spin_lock(&ch->lock); @@ -501,8 +501,7 @@ static void hpet_attach_channel(unsigned int cpu, set_channel_irq_affinity(ch); } -static void hpet_detach_channel(unsigned int cpu, - struct hpet_event_channel *ch) +static void hpet_detach_channel(unsigned int cpu, struct hpet_event_channel *ch) { unsigned int next; @@ -538,7 +537,7 @@ static void handle_rtc_once(uint8_t index, uint8_t value) return; /* RTC Reg B, contain PIE/AIE/UIE */ - if ( value & (RTC_PIE | RTC_AIE | RTC_UIE ) ) + if ( value & (RTC_PIE | RTC_AIE | RTC_UIE) ) { cpuidle_disable_deep_cstate(); pv_rtc_handler = NULL; @@ -600,8 +599,8 @@ void __init hpet_broadcast_init(void) * The period is a femto seconds value. We need to calculate the scaled * math multiplication factor for nanosecond to hpet tick conversion. */ - hpet_events[i].mult = div_sc((unsigned long)hpet_rate, - 1000000000ul, 32); + hpet_events[i].mult = + div_sc((unsigned long)hpet_rate, 1000000000ul, 32); hpet_events[i].shift = 32; hpet_events[i].next_event = STIME_MAX; spin_lock_init(&hpet_events[i].lock); @@ -745,8 +744,8 @@ void hpet_broadcast_exit(void) int hpet_broadcast_is_available(void) { - return ((hpet_events && (hpet_events->flags & HPET_EVT_LEGACY)) - || num_hpets_used > 0); + return ((hpet_events && (hpet_events->flags & HPET_EVT_LEGACY)) || + num_hpets_used > 0); } int hpet_legacy_irq_tick(void) @@ -754,8 +753,8 @@ int hpet_legacy_irq_tick(void) this_cpu(irq_count)--; if ( !hpet_events || - (hpet_events->flags & (HPET_EVT_DISABLE|HPET_EVT_LEGACY)) != - HPET_EVT_LEGACY ) + (hpet_events->flags & (HPET_EVT_DISABLE | HPET_EVT_LEGACY)) != + HPET_EVT_LEGACY ) return 0; hpet_events->event_handler(hpet_events); return 1; @@ -835,9 +834,10 @@ void hpet_resume(u32 *boot_cfg) cfg &= ~HPET_TN_ENABLE; if ( cfg & HPET_TN_RESERVED ) { - printk(XENLOG_WARNING - "HPET: reserved bits %#x set in channel %u config register\n", - cfg & HPET_TN_RESERVED, i); + printk( + XENLOG_WARNING + "HPET: reserved bits %#x set in channel %u config register\n", + cfg & HPET_TN_RESERVED, i); cfg &= ~HPET_TN_RESERVED; } hpet_write32(cfg, HPET_Tn_CFG(i)); diff --git a/xen/arch/x86/hvm/asid.c b/xen/arch/x86/hvm/asid.c index 9d3c671a5f..4ded30eed1 100644 --- a/xen/arch/x86/hvm/asid.c +++ b/xen/arch/x86/hvm/asid.c @@ -49,11 +49,12 @@ boolean_param("asid", opt_asid_enabled); */ /* Per-CPU ASID management. */ -struct hvm_asid_data { - uint64_t core_asid_generation; - uint32_t next_asid; - uint32_t max_asid; - bool_t disabled; +struct hvm_asid_data +{ + uint64_t core_asid_generation; + uint32_t next_asid; + uint32_t max_asid; + bool_t disabled; }; static DEFINE_PER_CPU(struct hvm_asid_data, hvm_asid_data); @@ -114,7 +115,7 @@ bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid) { struct hvm_asid_data *data = &this_cpu(hvm_asid_data); - /* On erratum #170 systems we must flush the TLB. + /* On erratum #170 systems we must flush the TLB. * Generation overruns are taken here, too. */ if ( data->disabled ) goto disabled; @@ -138,11 +139,11 @@ bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid) /* * When we assign ASID 1, flush all TLB entries as we are starting a new - * generation, and all old ASID allocations are now stale. + * generation, and all old ASID allocations are now stale. */ return (asid->asid == 1); - disabled: +disabled: asid->asid = 0; return 0; } diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c index d6d0e8be89..a02694535a 100644 --- a/xen/arch/x86/hvm/dm.c +++ b/xen/arch/x86/hvm/dm.c @@ -27,7 +27,8 @@ #include -struct dmop_args { +struct dmop_args +{ domid_t domid; unsigned int nr_bufs; /* Reserve enough buf elements for all current hypercalls. */ @@ -45,17 +46,17 @@ static bool _raw_copy_from_guest_buf_offset(void *dst, if ( buf_idx >= args->nr_bufs ) return false; - buf_bytes = args->buf[buf_idx].size; + buf_bytes = args->buf[buf_idx].size; if ( (offset_bytes + dst_bytes) < offset_bytes || (offset_bytes + dst_bytes) > buf_bytes ) return false; - return !copy_from_guest_offset(dst, args->buf[buf_idx].h, - offset_bytes, dst_bytes); + return !copy_from_guest_offset(dst, args->buf[buf_idx].h, offset_bytes, + dst_bytes); } -#define COPY_FROM_GUEST_BUF_OFFSET(dst, bufs, buf_idx, offset_bytes) \ +#define COPY_FROM_GUEST_BUF_OFFSET(dst, bufs, buf_idx, offset_bytes) \ _raw_copy_from_guest_buf_offset(&(dst), bufs, buf_idx, offset_bytes, \ sizeof(dst)) @@ -74,19 +75,18 @@ static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn, if ( ((nr + 7) / 8) > buf->size ) return -EINVAL; - return shadow_mode_enabled(d) ? - shadow_track_dirty_vram(d, first_pfn, nr, buf->h) : - hap_track_dirty_vram(d, first_pfn, nr, buf->h); + return shadow_mode_enabled(d) + ? shadow_track_dirty_vram(d, first_pfn, nr, buf->h) + : hap_track_dirty_vram(d, first_pfn, nr, buf->h); } -static int set_pci_intx_level(struct domain *d, uint16_t domain, - uint8_t bus, uint8_t device, - uint8_t intx, uint8_t level) +static int set_pci_intx_level(struct domain *d, uint16_t domain, uint8_t bus, + uint8_t device, uint8_t intx, uint8_t level) { if ( domain != 0 || bus != 0 || device > 0x1f || intx > 3 ) return -EINVAL; - switch ( level ) + switch (level) { case 0: hvm_pci_intx_deassert(d, device, intx); @@ -101,13 +101,12 @@ static int set_pci_intx_level(struct domain *d, uint16_t domain, return 0; } -static int set_isa_irq_level(struct domain *d, uint8_t isa_irq, - uint8_t level) +static int set_isa_irq_level(struct domain *d, uint8_t isa_irq, uint8_t level) { if ( isa_irq > 15 ) return -EINVAL; - switch ( level ) + switch (level) { case 0: hvm_isa_irq_deassert(d, isa_irq); @@ -122,15 +121,14 @@ static int set_isa_irq_level(struct domain *d, uint8_t isa_irq, return 0; } -static int modified_memory(struct domain *d, - const struct dmop_args *bufs, +static int modified_memory(struct domain *d, const struct dmop_args *bufs, struct xen_dm_op_modified_memory *header) { #define EXTENTS_BUFFER 1 /* Process maximum of 256 pfns before checking for continuation. */ const unsigned int cont_check_interval = 0x100; - unsigned int *rem_extents = &header->nr_extents; + unsigned int *rem_extents = &header->nr_extents; unsigned int batch_rem_pfns = cont_check_interval; /* Used for continuation. */ unsigned int *pfns_done = &header->opaque; @@ -139,8 +137,7 @@ static int modified_memory(struct domain *d, return 0; if ( (bufs->buf[EXTENTS_BUFFER].size / - sizeof(struct xen_dm_op_modified_memory_extent)) < - *rem_extents ) + sizeof(struct xen_dm_op_modified_memory_extent)) < *rem_extents ) return -EINVAL; while ( *rem_extents > 0 ) @@ -225,12 +222,10 @@ static bool allow_p2m_type_change(p2m_type_t old, p2m_type_t new) if ( old == p2m_ioreq_server ) return new == p2m_ram_rw; - return p2m_is_ram(old) || - (p2m_is_hole(old) && new == p2m_mmio_dm); + return p2m_is_ram(old) || (p2m_is_hole(old) && new == p2m_mmio_dm); } -static int set_mem_type(struct domain *d, - struct xen_dm_op_set_mem_type *data) +static int set_mem_type(struct domain *d, struct xen_dm_op_set_mem_type *data) { xen_pfn_t last_pfn = data->first_pfn + data->nr - 1; unsigned int iter = 0, mem_type; @@ -238,8 +233,8 @@ static int set_mem_type(struct domain *d, /* Interface types to internal p2m types */ static const p2m_type_t memtype[] = { - [HVMMEM_ram_rw] = p2m_ram_rw, - [HVMMEM_ram_ro] = p2m_ram_ro, + [HVMMEM_ram_rw] = p2m_ram_rw, + [HVMMEM_ram_ro] = p2m_ram_ro, [HVMMEM_mmio_dm] = p2m_mmio_dm, [HVMMEM_unused] = p2m_invalid, [HVMMEM_ioreq_server] = p2m_ioreq_server, @@ -320,9 +315,8 @@ static int inject_event(struct domain *d, if ( data->vcpuid >= d->max_vcpus || !(v = d->vcpu[data->vcpuid]) ) return -EINVAL; - if ( cmpxchg(&v->arch.hvm.inject_event.vector, - HVM_EVENT_VECTOR_UNSET, HVM_EVENT_VECTOR_UPDATING) != - HVM_EVENT_VECTOR_UNSET ) + if ( cmpxchg(&v->arch.hvm.inject_event.vector, HVM_EVENT_VECTOR_UNSET, + HVM_EVENT_VECTOR_UPDATING) != HVM_EVENT_VECTOR_UNSET ) return -EBUSY; v->arch.hvm.inject_event.type = data->type; @@ -344,24 +338,35 @@ static int dm_op(const struct dmop_args *op_args) size_t offset; static const uint8_t op_size[] = { - [XEN_DMOP_create_ioreq_server] = sizeof(struct xen_dm_op_create_ioreq_server), - [XEN_DMOP_get_ioreq_server_info] = sizeof(struct xen_dm_op_get_ioreq_server_info), - [XEN_DMOP_map_io_range_to_ioreq_server] = sizeof(struct xen_dm_op_ioreq_server_range), - [XEN_DMOP_unmap_io_range_from_ioreq_server] = sizeof(struct xen_dm_op_ioreq_server_range), - [XEN_DMOP_set_ioreq_server_state] = sizeof(struct xen_dm_op_set_ioreq_server_state), - [XEN_DMOP_destroy_ioreq_server] = sizeof(struct xen_dm_op_destroy_ioreq_server), - [XEN_DMOP_track_dirty_vram] = sizeof(struct xen_dm_op_track_dirty_vram), - [XEN_DMOP_set_pci_intx_level] = sizeof(struct xen_dm_op_set_pci_intx_level), - [XEN_DMOP_set_isa_irq_level] = sizeof(struct xen_dm_op_set_isa_irq_level), - [XEN_DMOP_set_pci_link_route] = sizeof(struct xen_dm_op_set_pci_link_route), - [XEN_DMOP_modified_memory] = sizeof(struct xen_dm_op_modified_memory), - [XEN_DMOP_set_mem_type] = sizeof(struct xen_dm_op_set_mem_type), - [XEN_DMOP_inject_event] = sizeof(struct xen_dm_op_inject_event), - [XEN_DMOP_inject_msi] = sizeof(struct xen_dm_op_inject_msi), - [XEN_DMOP_map_mem_type_to_ioreq_server] = sizeof(struct xen_dm_op_map_mem_type_to_ioreq_server), - [XEN_DMOP_remote_shutdown] = sizeof(struct xen_dm_op_remote_shutdown), - [XEN_DMOP_relocate_memory] = sizeof(struct xen_dm_op_relocate_memory), - [XEN_DMOP_pin_memory_cacheattr] = sizeof(struct xen_dm_op_pin_memory_cacheattr), + [XEN_DMOP_create_ioreq_server] = + sizeof(struct xen_dm_op_create_ioreq_server), + [XEN_DMOP_get_ioreq_server_info] = + sizeof(struct xen_dm_op_get_ioreq_server_info), + [XEN_DMOP_map_io_range_to_ioreq_server] = + sizeof(struct xen_dm_op_ioreq_server_range), + [XEN_DMOP_unmap_io_range_from_ioreq_server] = + sizeof(struct xen_dm_op_ioreq_server_range), + [XEN_DMOP_set_ioreq_server_state] = + sizeof(struct xen_dm_op_set_ioreq_server_state), + [XEN_DMOP_destroy_ioreq_server] = + sizeof(struct xen_dm_op_destroy_ioreq_server), + [XEN_DMOP_track_dirty_vram] = sizeof(struct xen_dm_op_track_dirty_vram), + [XEN_DMOP_set_pci_intx_level] = + sizeof(struct xen_dm_op_set_pci_intx_level), + [XEN_DMOP_set_isa_irq_level] = + sizeof(struct xen_dm_op_set_isa_irq_level), + [XEN_DMOP_set_pci_link_route] = + sizeof(struct xen_dm_op_set_pci_link_route), + [XEN_DMOP_modified_memory] = sizeof(struct xen_dm_op_modified_memory), + [XEN_DMOP_set_mem_type] = sizeof(struct xen_dm_op_set_mem_type), + [XEN_DMOP_inject_event] = sizeof(struct xen_dm_op_inject_event), + [XEN_DMOP_inject_msi] = sizeof(struct xen_dm_op_inject_msi), + [XEN_DMOP_map_mem_type_to_ioreq_server] = + sizeof(struct xen_dm_op_map_mem_type_to_ioreq_server), + [XEN_DMOP_remote_shutdown] = sizeof(struct xen_dm_op_remote_shutdown), + [XEN_DMOP_relocate_memory] = sizeof(struct xen_dm_op_relocate_memory), + [XEN_DMOP_pin_memory_cacheattr] = + sizeof(struct xen_dm_op_pin_memory_cacheattr), }; rc = rcu_lock_remote_domain_by_id(op_args->domid, &d); @@ -403,12 +408,11 @@ static int dm_op(const struct dmop_args *op_args) if ( op.pad ) goto out; - switch ( op.op ) + switch (op.op) { case XEN_DMOP_create_ioreq_server: { - struct xen_dm_op_create_ioreq_server *data = - &op.u.create_ioreq_server; + struct xen_dm_op_create_ioreq_server *data = &op.u.create_ioreq_server; const_op = false; @@ -416,8 +420,7 @@ static int dm_op(const struct dmop_args *op_args) if ( data->pad[0] || data->pad[1] || data->pad[2] ) break; - rc = hvm_create_ioreq_server(d, data->handle_bufioreq, - &data->id); + rc = hvm_create_ioreq_server(d, data->handle_bufioreq, &data->id); break; } @@ -433,12 +436,11 @@ static int dm_op(const struct dmop_args *op_args) if ( data->flags & ~valid_flags ) break; - rc = hvm_get_ioreq_server_info(d, data->id, - (data->flags & XEN_DMOP_no_gfns) ? - NULL : &data->ioreq_gfn, - (data->flags & XEN_DMOP_no_gfns) ? - NULL : &data->bufioreq_gfn, - &data->bufioreq_port); + rc = hvm_get_ioreq_server_info( + d, data->id, + (data->flags & XEN_DMOP_no_gfns) ? NULL : &data->ioreq_gfn, + (data->flags & XEN_DMOP_no_gfns) ? NULL : &data->bufioreq_gfn, + &data->bufioreq_port); break; } @@ -483,8 +485,8 @@ static int dm_op(const struct dmop_args *op_args) break; if ( first_gfn == 0 ) - rc = hvm_map_mem_type_to_ioreq_server(d, data->id, - data->type, data->flags); + rc = hvm_map_mem_type_to_ioreq_server(d, data->id, data->type, + data->flags); else rc = 0; @@ -548,8 +550,7 @@ static int dm_op(const struct dmop_args *op_args) case XEN_DMOP_track_dirty_vram: { - const struct xen_dm_op_track_dirty_vram *data = - &op.u.track_dirty_vram; + const struct xen_dm_op_track_dirty_vram *data = &op.u.track_dirty_vram; rc = -EINVAL; if ( data->pad ) @@ -567,9 +568,8 @@ static int dm_op(const struct dmop_args *op_args) const struct xen_dm_op_set_pci_intx_level *data = &op.u.set_pci_intx_level; - rc = set_pci_intx_level(d, data->domain, data->bus, - data->device, data->intx, - data->level); + rc = set_pci_intx_level(d, data->domain, data->bus, data->device, + data->intx, data->level); break; } @@ -593,8 +593,7 @@ static int dm_op(const struct dmop_args *op_args) case XEN_DMOP_modified_memory: { - struct xen_dm_op_modified_memory *data = - &op.u.modified_memory; + struct xen_dm_op_modified_memory *data = &op.u.modified_memory; rc = modified_memory(d, op_args, data); const_op = !rc; @@ -603,8 +602,7 @@ static int dm_op(const struct dmop_args *op_args) case XEN_DMOP_set_mem_type: { - struct xen_dm_op_set_mem_type *data = - &op.u.set_mem_type; + struct xen_dm_op_set_mem_type *data = &op.u.set_mem_type; const_op = false; @@ -618,8 +616,7 @@ static int dm_op(const struct dmop_args *op_args) case XEN_DMOP_inject_event: { - const struct xen_dm_op_inject_event *data = - &op.u.inject_event; + const struct xen_dm_op_inject_event *data = &op.u.inject_event; rc = -EINVAL; if ( data->pad0 || data->pad1 ) @@ -631,8 +628,7 @@ static int dm_op(const struct dmop_args *op_args) case XEN_DMOP_inject_msi: { - const struct xen_dm_op_inject_msi *data = - &op.u.inject_msi; + const struct xen_dm_op_inject_msi *data = &op.u.inject_msi; rc = -EINVAL; if ( data->pad ) @@ -644,8 +640,7 @@ static int dm_op(const struct dmop_args *op_args) case XEN_DMOP_remote_shutdown: { - const struct xen_dm_op_remote_shutdown *data = - &op.u.remote_shutdown; + const struct xen_dm_op_remote_shutdown *data = &op.u.remote_shutdown; domain_shutdown(d, data->reason); rc = 0; @@ -694,8 +689,8 @@ static int dm_op(const struct dmop_args *op_args) break; } - rc = hvm_set_mem_pinned_cacheattr(d, data->start, data->end, - data->type); + rc = + hvm_set_mem_pinned_cacheattr(d, data->start, data->end, data->type); break; } @@ -704,12 +699,12 @@ static int dm_op(const struct dmop_args *op_args) break; } - if ( (!rc || rc == -ERESTART) && - !const_op && copy_to_guest_offset(op_args->buf[0].h, offset, - (void *)&op.u, op_size[op.op]) ) + if ( (!rc || rc == -ERESTART) && !const_op && + copy_to_guest_offset(op_args->buf[0].h, offset, (void *)&op.u, + op_size[op.op]) ) rc = -EFAULT; - out: +out: rcu_unlock_domain(d); return rc; @@ -732,8 +727,7 @@ CHECK_dm_op_remote_shutdown; CHECK_dm_op_relocate_memory; CHECK_dm_op_pin_memory_cacheattr; -int compat_dm_op(domid_t domid, - unsigned int nr_bufs, +int compat_dm_op(domid_t domid, unsigned int nr_bufs, XEN_GUEST_HANDLE_PARAM(void) bufs) { struct dmop_args args; @@ -754,7 +748,7 @@ int compat_dm_op(domid_t domid, return -EFAULT; #define XLAT_dm_op_buf_HNDL_h(_d_, _s_) \ - guest_from_compat_handle((_d_)->h, (_s_)->h) + guest_from_compat_handle((_d_)->h, (_s_)->h) XLAT_dm_op_buf(&args.buf[i], &cmp); @@ -764,14 +758,13 @@ int compat_dm_op(domid_t domid, rc = dm_op(&args); if ( rc == -ERESTART ) - rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih", - domid, nr_bufs, bufs); + rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih", domid, + nr_bufs, bufs); return rc; } -long do_dm_op(domid_t domid, - unsigned int nr_bufs, +long do_dm_op(domid_t domid, unsigned int nr_bufs, XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs) { struct dmop_args args; @@ -789,8 +782,8 @@ long do_dm_op(domid_t domid, rc = dm_op(&args); if ( rc == -ERESTART ) - rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih", - domid, nr_bufs, bufs); + rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih", domid, + nr_bufs, bufs); return rc; } diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c index f98ee06806..58b1677c12 100644 --- a/xen/arch/x86/hvm/dom0_build.c +++ b/xen/arch/x86/hvm/dom0_build.c @@ -77,9 +77,9 @@ static int __init modify_identity_mmio(struct domain *d, unsigned long pfn, { int rc; - for ( ; ; ) + for ( ;; ) { - rc = map ? map_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn)) + rc = map ? map_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn)) : unmap_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn)); if ( rc == 0 ) break; @@ -103,14 +103,15 @@ static int __init pvh_populate_memory_range(struct domain *d, unsigned long start, unsigned long nr_pages) { - struct { + struct + { unsigned long align; unsigned int order; } static const __initconst orders[] = { /* NB: must be sorted by decreasing size. */ - { .align = PFN_DOWN(GB(1)), .order = PAGE_ORDER_1G }, - { .align = PFN_DOWN(MB(2)), .order = PAGE_ORDER_2M }, - { .align = PFN_DOWN(KB(4)), .order = PAGE_ORDER_4K }, + {.align = PFN_DOWN(GB(1)), .order = PAGE_ORDER_1G}, + {.align = PFN_DOWN(MB(2)), .order = PAGE_ORDER_2M}, + {.align = PFN_DOWN(KB(4)), .order = PAGE_ORDER_4K}, }; unsigned int max_order = MAX_ORDER, i = 0; struct page_info *page; @@ -128,11 +129,11 @@ static int __init pvh_populate_memory_range(struct domain *d, nr_pages >= (1UL << orders[j].order) ) break; - switch ( j ) + switch (j) { case ARRAY_SIZE(orders): - printk("Unable to find allocation order for [%#lx,%#lx)\n", - start, start + nr_pages); + printk("Unable to find allocation order for [%#lx,%#lx)\n", start, + start + nr_pages); return -EINVAL; case 0: @@ -145,8 +146,8 @@ static int __init pvh_populate_memory_range(struct domain *d, * Aim to allocate until the higher next order alignment or the * end of the region. */ - end = min(ROUNDUP(start + 1, orders[j - 1].align), - start + nr_pages); + end = + min(ROUNDUP(start + 1, orders[j - 1].align), start + nr_pages); break; } @@ -173,12 +174,11 @@ static int __init pvh_populate_memory_range(struct domain *d, continue; } - rc = guest_physmap_add_page(d, _gfn(start), page_to_mfn(page), - order); + rc = guest_physmap_add_page(d, _gfn(start), page_to_mfn(page), order); if ( rc != 0 ) { - printk("Failed to populate memory: [%#lx,%#lx): %d\n", - start, start + (1UL << order), rc); + printk("Failed to populate memory: [%#lx,%#lx): %d\n", start, + start + (1UL << order), rc); return rc; } start += 1UL << order; @@ -203,7 +203,7 @@ static int __init pvh_steal_ram(struct domain *d, unsigned long size, * Alignment 0 should be set to 1, so it doesn't wrap around in the * calculations below. */ - align = align ? : 1; + align = align ?: 1; while ( i-- ) { struct e820entry *entry = &d->arch.e820[i]; @@ -316,8 +316,8 @@ static int __init pvh_setup_vmx_realmode_helpers(struct domain *d) * when using Intel EPT. Create a 32-bit non-PAE page directory of * superpages. */ - ident_pt = map_domain_gfn(p2m_get_hostp2m(d), _gfn(PFN_DOWN(gaddr)), - &mfn, &p2mt, 0, &rc); + ident_pt = map_domain_gfn(p2m_get_hostp2m(d), _gfn(PFN_DOWN(gaddr)), &mfn, + &p2mt, 0, &rc); if ( ident_pt == NULL ) { printk("Unable to map identity page tables\n"); @@ -328,7 +328,8 @@ static int __init pvh_setup_vmx_realmode_helpers(struct domain *d) put_page(mfn_to_page(mfn)); d->arch.hvm.params[HVM_PARAM_IDENT_PT] = gaddr; if ( pvh_add_mem_range(d, gaddr, gaddr + PAGE_SIZE, E820_RESERVED) ) - printk("Unable to set identity page tables as reserved in the memory map\n"); + printk("Unable to set identity page tables as reserved in the memory " + "map\n"); return 0; } @@ -376,8 +377,7 @@ static __init void pvh_setup_e820(struct domain *d, unsigned long nr_pages) * order to prevent this code from getting out of sync. */ start = ROUNDUP(entry->addr, PAGE_SIZE << PAGE_ORDER_4K); - end = (entry->addr + entry->size) & - ~((PAGE_SIZE << PAGE_ORDER_4K) - 1); + end = (entry->addr + entry->size) & ~((PAGE_SIZE << PAGE_ORDER_4K) - 1); if ( start >= end ) continue; @@ -401,7 +401,7 @@ static __init void pvh_setup_e820(struct domain *d, unsigned long nr_pages) { cur_pages += pages; } - next: + next: d->arch.nr_e820++; entry_guest++; ASSERT(d->arch.nr_e820 <= e820.nr_map + 1); @@ -460,15 +460,13 @@ static int __init pvh_populate_p2m(struct domain *d) { uint64_t end = min_t(uint64_t, MB(1), d->arch.e820[i].addr + d->arch.e820[i].size); - enum hvm_translation_result res = - hvm_copy_to_guest_phys(mfn_to_maddr(_mfn(addr)), - mfn_to_virt(addr), - d->arch.e820[i].addr - end, - v); + enum hvm_translation_result res = hvm_copy_to_guest_phys( + mfn_to_maddr(_mfn(addr)), mfn_to_virt(addr), + d->arch.e820[i].addr - end, v); if ( res != HVMTRANS_okay ) - printk("Failed to copy [%#lx, %#lx): %d\n", - addr, addr + size, res); + printk("Failed to copy [%#lx, %#lx): %d\n", addr, addr + size, + res); } } @@ -502,8 +500,8 @@ static int __init pvh_load_kernel(struct domain *d, const module_t *image, struct elf_binary elf; struct elf_dom_parms parms; paddr_t last_addr; - struct hvm_start_info start_info = { 0 }; - struct hvm_modlist_entry mod = { 0 }; + struct hvm_start_info start_info = {0}; + struct hvm_modlist_entry mod = {0}; struct vcpu *v = d->vcpu[0]; int rc; @@ -668,7 +666,7 @@ static int __init pvh_setup_cpus(struct domain *d, paddr_t entry, } static int __init acpi_count_intr_ovr(struct acpi_subtable_header *header, - const unsigned long end) + const unsigned long end) { acpi_intr_overrides++; return 0; @@ -819,19 +817,18 @@ static int __init pvh_setup_acpi_madt(struct domain *d, paddr_t *addr) rc = 0; - out: +out: xfree(madt); return rc; } -static bool __init acpi_memory_banned(unsigned long address, - unsigned long size) +static bool __init acpi_memory_banned(unsigned long address, unsigned long size) { unsigned long mfn = PFN_DOWN(address); unsigned long nr_pages = PFN_UP((address & ~PAGE_MASK) + size), i; - for ( i = 0 ; i < nr_pages; i++ ) + for ( i = 0; i < nr_pages; i++ ) if ( !page_is_ram_type(mfn + i, RAM_TYPE_RESERVED) && !page_is_ram_type(mfn + i, RAM_TYPE_ACPI) ) return true; @@ -850,7 +847,7 @@ static bool __init pvh_acpi_table_allowed(const char *sig, }; unsigned int i; - for ( i = 0 ; i < ARRAY_SIZE(allowed_tables); i++ ) + for ( i = 0; i < ARRAY_SIZE(allowed_tables); i++ ) { if ( strncmp(sig, allowed_tables[i], ACPI_NAME_SIZE) ) continue; @@ -901,7 +898,7 @@ static int __init pvh_setup_acpi_xsdt(struct domain *d, paddr_t madt_addr, acpi_dmar_reinstate(); /* Count the number of tables that will be added to the XSDT. */ - for( i = 0; i < acpi_gbl_root_table_list.count; i++ ) + for ( i = 0; i < acpi_gbl_root_table_list.count; i++ ) { if ( pvh_acpi_xsdt_table_allowed(tables[i].signature.ascii, tables[i].address, tables[i].length) ) @@ -947,7 +944,7 @@ static int __init pvh_setup_acpi_xsdt(struct domain *d, paddr_t madt_addr, xsdt->table_offset_entry[0] = madt_addr; /* Copy the addresses of the rest of the allowed tables. */ - for( i = 0, j = 1; i < acpi_gbl_root_table_list.count; i++ ) + for ( i = 0, j = 1; i < acpi_gbl_root_table_list.count; i++ ) { if ( pvh_acpi_xsdt_table_allowed(tables[i].signature.ascii, tables[i].address, tables[i].length) ) @@ -983,7 +980,7 @@ static int __init pvh_setup_acpi_xsdt(struct domain *d, paddr_t madt_addr, rc = 0; - out: +out: xfree(xsdt); return rc; @@ -996,14 +993,13 @@ static int __init pvh_setup_acpi(struct domain *d, paddr_t start_info) unsigned int i; int rc; struct acpi_table_rsdp *native_rsdp, rsdp = { - .signature = ACPI_SIG_RSDP, - .revision = 2, - .length = sizeof(rsdp), - }; - + .signature = ACPI_SIG_RSDP, + .revision = 2, + .length = sizeof(rsdp), + }; /* Scan top-level tables and add their regions to the guest memory map. */ - for( i = 0; i < acpi_gbl_root_table_list.count; i++ ) + for ( i = 0; i < acpi_gbl_root_table_list.count; i++ ) { const char *sig = acpi_gbl_root_table_list.tables[i].signature.ascii; unsigned long addr = acpi_gbl_root_table_list.tables[i].address; @@ -1015,9 +1011,9 @@ static int __init pvh_setup_acpi(struct domain *d, paddr_t start_info) * re-using MADT memory. */ if ( strncmp(sig, ACPI_SIG_MADT, ACPI_NAME_SIZE) - ? pvh_acpi_table_allowed(sig, addr, size) - : !acpi_memory_banned(addr, size) ) - pvh_add_mem_range(d, addr, addr + size, E820_ACPI); + ? pvh_acpi_table_allowed(sig, addr, size) + : !acpi_memory_banned(addr, size) ) + pvh_add_mem_range(d, addr, addr + size, E820_ACPI); } /* Identity map ACPI e820 regions. */ @@ -1028,14 +1024,15 @@ static int __init pvh_setup_acpi(struct domain *d, paddr_t start_info) continue; pfn = PFN_DOWN(d->arch.e820[i].addr); - nr_pages = PFN_UP((d->arch.e820[i].addr & ~PAGE_MASK) + - d->arch.e820[i].size); + nr_pages = + PFN_UP((d->arch.e820[i].addr & ~PAGE_MASK) + d->arch.e820[i].size); rc = modify_identity_mmio(d, pfn, nr_pages, true); if ( rc ) { - printk("Failed to map ACPI region [%#lx, %#lx) into Dom0 memory map\n", - pfn, pfn + nr_pages); + printk( + "Failed to map ACPI region [%#lx, %#lx) into Dom0 memory map\n", + pfn, pfn + nr_pages); return rc; } } @@ -1062,10 +1059,10 @@ static int __init pvh_setup_acpi(struct domain *d, paddr_t start_info) * Calling acpi_tb_checksum here is a layering violation, but * introducing a wrapper for such simple usage seems overkill. */ - rsdp.checksum -= acpi_tb_checksum(ACPI_CAST_PTR(u8, &rsdp), - ACPI_RSDP_REV0_SIZE); - rsdp.extended_checksum -= acpi_tb_checksum(ACPI_CAST_PTR(u8, &rsdp), - sizeof(rsdp)); + rsdp.checksum -= + acpi_tb_checksum(ACPI_CAST_PTR(u8, &rsdp), ACPI_RSDP_REV0_SIZE); + rsdp.extended_checksum -= + acpi_tb_checksum(ACPI_CAST_PTR(u8, &rsdp), sizeof(rsdp)); /* * Place the new RSDP in guest memory space. @@ -1095,12 +1092,9 @@ static int __init pvh_setup_acpi(struct domain *d, paddr_t start_info) } /* Copy RSDP address to start_info. */ - rc = hvm_copy_to_guest_phys(start_info + - offsetof(struct hvm_start_info, rsdp_paddr), - &rsdp_paddr, - sizeof(((struct hvm_start_info *) - 0)->rsdp_paddr), - d->vcpu[0]); + rc = hvm_copy_to_guest_phys( + start_info + offsetof(struct hvm_start_info, rsdp_paddr), &rsdp_paddr, + sizeof(((struct hvm_start_info *)0)->rsdp_paddr), d->vcpu[0]); if ( rc ) { printk("Unable to copy RSDP address to start info\n"); @@ -1129,8 +1123,7 @@ static void __hwdom_init pvh_setup_mmcfg(struct domain *d) } int __init dom0_construct_pvh(struct domain *d, const module_t *image, - unsigned long image_headroom, - module_t *initrd, + unsigned long image_headroom, module_t *initrd, char *cmdline) { paddr_t entry, start_info; diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c index 5d5a746a25..bf525be126 100644 --- a/xen/arch/x86/hvm/domain.c +++ b/xen/arch/x86/hvm/domain.c @@ -26,7 +26,6 @@ static int check_segment(struct segment_register *reg, enum x86_segment seg) { - if ( reg->pad != 0 ) { gprintk(XENLOG_ERR, "Segment attribute bits 12-15 are not zero\n"); @@ -70,7 +69,7 @@ static int check_segment(struct segment_register *reg, enum x86_segment seg) return -EINVAL; } - switch ( seg ) + switch (seg) { case x86_seg_cs: if ( !(reg->type & 0x8) ) @@ -120,7 +119,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx) if ( ctx->pad != 0 ) return -EINVAL; - switch ( ctx->mode ) + switch (ctx->mode) { default: return -EINVAL; @@ -136,13 +135,15 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx) ctx->cpu_regs.x86_32.pad2[2] != 0 ) return -EINVAL; -#define SEG(s, r) ({ \ - s = (struct segment_register) \ - { 0, { (r)->s ## _ar }, (r)->s ## _limit, (r)->s ## _base }; \ - /* Set accessed / busy bit for present segments. */ \ - if ( s.p ) \ - s.type |= (x86_seg_##s != x86_seg_tr ? 1 : 2); \ - check_segment(&s, x86_seg_ ## s); }) +#define SEG(s, r) \ + ({ \ + s = (struct segment_register){ \ + 0, {(r)->s##_ar}, (r)->s##_limit, (r)->s##_base}; \ + /* Set accessed / busy bit for present segments. */ \ + if ( s.p ) \ + s.type |= (x86_seg_##s != x86_seg_tr ? 1 : 2); \ + check_segment(&s, x86_seg_##s); \ + }) rc = SEG(cs, regs); rc |= SEG(ds, regs); @@ -193,21 +194,21 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx) return -EINVAL; } - uregs->rax = regs->eax; - uregs->rcx = regs->ecx; - uregs->rdx = regs->edx; - uregs->rbx = regs->ebx; - uregs->rsp = regs->esp; - uregs->rbp = regs->ebp; - uregs->rsi = regs->esi; - uregs->rdi = regs->edi; - uregs->rip = regs->eip; + uregs->rax = regs->eax; + uregs->rcx = regs->ecx; + uregs->rdx = regs->edx; + uregs->rbx = regs->ebx; + uregs->rsp = regs->esp; + uregs->rbp = regs->ebp; + uregs->rsi = regs->esi; + uregs->rdi = regs->edi; + uregs->rip = regs->eip; uregs->rflags = regs->eflags; v->arch.hvm.guest_cr[0] = regs->cr0; v->arch.hvm.guest_cr[3] = regs->cr3; v->arch.hvm.guest_cr[4] = regs->cr4; - v->arch.hvm.guest_efer = regs->efer; + v->arch.hvm.guest_efer = regs->efer; } break; @@ -244,30 +245,29 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx) return -EINVAL; } - uregs->rax = regs->rax; - uregs->rcx = regs->rcx; - uregs->rdx = regs->rdx; - uregs->rbx = regs->rbx; - uregs->rsp = regs->rsp; - uregs->rbp = regs->rbp; - uregs->rsi = regs->rsi; - uregs->rdi = regs->rdi; - uregs->rip = regs->rip; + uregs->rax = regs->rax; + uregs->rcx = regs->rcx; + uregs->rdx = regs->rdx; + uregs->rbx = regs->rbx; + uregs->rsp = regs->rsp; + uregs->rbp = regs->rbp; + uregs->rsi = regs->rsi; + uregs->rdi = regs->rdi; + uregs->rip = regs->rip; uregs->rflags = regs->rflags; v->arch.hvm.guest_cr[0] = regs->cr0; v->arch.hvm.guest_cr[3] = regs->cr3; v->arch.hvm.guest_cr[4] = regs->cr4; - v->arch.hvm.guest_efer = regs->efer; + v->arch.hvm.guest_efer = regs->efer; -#define SEG(l, a) (struct segment_register){ 0, { a }, l, 0 } +#define SEG(l, a) (struct segment_register){0, {a}, l, 0} cs = SEG(~0u, 0xa9b); /* 64bit code segment. */ ds = ss = es = SEG(~0u, 0xc93); tr = SEG(0x67, 0x8b); /* 64bit TSS (busy). */ #undef SEG } break; - } if ( v->arch.hvm.guest_efer & EFER_LME ) @@ -284,7 +284,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx) if ( errstr ) { gprintk(XENLOG_ERR, "Bad EFER value (%#016lx): %s\n", - v->arch.hvm.guest_efer, errstr); + v->arch.hvm.guest_efer, errstr); return -EINVAL; } @@ -296,13 +296,11 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx) if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) ) { /* Shadow-mode CR3 change. Check PDBR and update refcounts. */ - struct page_info *page = get_page_from_gfn(v->domain, - v->arch.hvm.guest_cr[3] >> PAGE_SHIFT, - NULL, P2M_ALLOC); + struct page_info *page = get_page_from_gfn( + v->domain, v->arch.hvm.guest_cr[3] >> PAGE_SHIFT, NULL, P2M_ALLOC); if ( !page ) { - gprintk(XENLOG_ERR, "Invalid CR3: %#lx\n", - v->arch.hvm.guest_cr[3]); + gprintk(XENLOG_ERR, "Invalid CR3: %#lx\n", v->arch.hvm.guest_cr[3]); return -EINVAL; } @@ -316,10 +314,8 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx) hvm_set_segment_register(v, x86_seg_tr, &tr); /* Sync AP's TSC with BSP's. */ - v->arch.hvm.cache_tsc_offset = - d->vcpu[0]->arch.hvm.cache_tsc_offset; - hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset, - d->arch.hvm.sync_tsc); + v->arch.hvm.cache_tsc_offset = d->vcpu[0]->arch.hvm.cache_tsc_offset; + hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset, d->arch.hvm.sync_tsc); paging_update_paging_modes(v); diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 2d02ef1521..ddf0cce880 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -1,10 +1,10 @@ /****************************************************************************** * hvm/emulate.c - * + * * HVM instruction emulation. Used for MMIO and VMX real mode. - * + * * Copyright (c) 2008, Citrix Systems, Inc. - * + * * Authors: * Keir Fraser */ @@ -54,22 +54,18 @@ static void hvmtrace_io_assist(const ioreq_t *p) size *= 2; } - trace_var(event, 0/*!cycles*/, size, buffer); + trace_var(event, 0 /*!cycles*/, size, buffer); } -static int null_read(const struct hvm_io_handler *io_handler, - uint64_t addr, - uint32_t size, - uint64_t *data) +static int null_read(const struct hvm_io_handler *io_handler, uint64_t addr, + uint32_t size, uint64_t *data) { *data = ~0ul; return X86EMUL_OKAY; } -static int null_write(const struct hvm_io_handler *handler, - uint64_t addr, - uint32_t size, - uint64_t data) +static int null_write(const struct hvm_io_handler *handler, uint64_t addr, + uint32_t size, uint64_t data) { return X86EMUL_OKAY; } @@ -80,8 +76,7 @@ static int set_context_data(void *buffer, unsigned int size) if ( curr->arch.vm_event ) { - unsigned int safe_size = - min(size, curr->arch.vm_event->emul.read.size); + unsigned int safe_size = min(size, curr->arch.vm_event->emul.read.size); memcpy(buffer, curr->arch.vm_event->emul.read.data, safe_size); memset(buffer + safe_size, 0, size - safe_size); @@ -91,19 +86,13 @@ static int set_context_data(void *buffer, unsigned int size) return X86EMUL_UNHANDLEABLE; } -static const struct hvm_io_ops null_ops = { - .read = null_read, - .write = null_write -}; +static const struct hvm_io_ops null_ops = {.read = null_read, + .write = null_write}; -static const struct hvm_io_handler null_handler = { - .ops = &null_ops -}; +static const struct hvm_io_handler null_handler = {.ops = &null_ops}; static int ioreq_server_read(const struct hvm_io_handler *io_handler, - uint64_t addr, - uint32_t size, - uint64_t *data) + uint64_t addr, uint32_t size, uint64_t *data) { if ( hvm_copy_from_guest_phys(data, addr, size) != HVMTRANS_okay ) return X86EMUL_UNHANDLEABLE; @@ -111,18 +100,15 @@ static int ioreq_server_read(const struct hvm_io_handler *io_handler, return X86EMUL_OKAY; } -static const struct hvm_io_ops ioreq_server_ops = { - .read = ioreq_server_read, - .write = null_write -}; +static const struct hvm_io_ops ioreq_server_ops = {.read = ioreq_server_read, + .write = null_write}; static const struct hvm_io_handler ioreq_server_handler = { - .ops = &ioreq_server_ops -}; + .ops = &ioreq_server_ops}; -static int hvmemul_do_io( - bool_t is_mmio, paddr_t addr, unsigned long *reps, unsigned int size, - uint8_t dir, bool_t df, bool_t data_is_addr, uintptr_t data) +static int hvmemul_do_io(bool_t is_mmio, paddr_t addr, unsigned long *reps, + unsigned int size, uint8_t dir, bool_t df, + bool_t data_is_addr, uintptr_t data) { struct vcpu *curr = current; struct domain *currd = curr->domain; @@ -151,7 +137,7 @@ static int hvmemul_do_io( return X86EMUL_UNHANDLEABLE; } - switch ( vio->io_req.state ) + switch (vio->io_req.state) { case STATE_IOREQ_NONE: break; @@ -161,11 +147,8 @@ static int hvmemul_do_io( /* Verify the emulation request has been correctly re-issued */ if ( (p.type != (is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO)) || - (p.addr != addr) || - (p.size != size) || - (p.count > *reps) || - (p.dir != dir) || - (p.df != df) || + (p.addr != addr) || (p.size != size) || (p.count > *reps) || + (p.dir != dir) || (p.df != df) || (p.data_is_ptr != data_is_addr) || (data_is_addr && (p.data != data)) ) domain_crash(currd); @@ -200,8 +183,7 @@ static int hvmemul_do_io( if ( tail < p.size ) /* single rep spans GFN */ p.count = 1; else - p.count = min(p.count, - (p.df ? (off + p.size) : tail) / p.size); + p.count = min(p.count, (p.df ? (off + p.size) : tail) / p.size); } ASSERT(p.count); @@ -216,7 +198,7 @@ static int hvmemul_do_io( ASSERT(p.count <= *reps); *reps = vio->io_req.count = p.count; - switch ( rc ) + switch (rc) { case X86EMUL_OKAY: vio->io_req.state = STATE_IOREQ_NONE; @@ -320,7 +302,7 @@ static int hvmemul_do_io( if ( rc != X86EMUL_OKAY ) return rc; - finish_access: +finish_access: if ( dir == IOREQ_READ ) { hvmtrace_io_assist(&p); @@ -332,16 +314,16 @@ static int hvmemul_do_io( return X86EMUL_OKAY; } -static int hvmemul_do_io_buffer( - bool_t is_mmio, paddr_t addr, unsigned long *reps, unsigned int size, - uint8_t dir, bool_t df, void *buffer) +static int hvmemul_do_io_buffer(bool_t is_mmio, paddr_t addr, + unsigned long *reps, unsigned int size, + uint8_t dir, bool_t df, void *buffer) { int rc; BUG_ON(buffer == NULL); - rc = hvmemul_do_io(is_mmio, addr, reps, size, dir, df, 0, - (uintptr_t)buffer); + rc = + hvmemul_do_io(is_mmio, addr, reps, size, dir, df, 0, (uintptr_t)buffer); ASSERT(rc != X86EMUL_UNIMPLEMENTED); @@ -356,8 +338,7 @@ static int hvmemul_acquire_page(unsigned long gmfn, struct page_info **page) struct domain *curr_d = current->domain; p2m_type_t p2mt; - switch ( check_get_page_from_gfn(curr_d, _gfn(gmfn), false, &p2mt, - page) ) + switch (check_get_page_from_gfn(curr_d, _gfn(gmfn), false, &p2mt, page)) { case 0: break; @@ -390,9 +371,9 @@ static inline void hvmemul_release_page(struct page_info *page) put_page(page); } -static int hvmemul_do_io_addr( - bool_t is_mmio, paddr_t addr, unsigned long *reps, - unsigned int size, uint8_t dir, bool_t df, paddr_t ram_gpa) +static int hvmemul_do_io_addr(bool_t is_mmio, paddr_t addr, unsigned long *reps, + unsigned int size, uint8_t dir, bool_t df, + paddr_t ram_gpa) { struct vcpu *v = current; unsigned long ram_gmfn = paddr_to_pfn(ram_gpa); @@ -409,11 +390,9 @@ static int hvmemul_do_io_addr( nr_pages++; /* Detemine how many reps will fit within this page */ - count = min_t(unsigned long, - *reps, - df ? - ((page_off + size - 1) & ~PAGE_MASK) / size : - (PAGE_SIZE - page_off) / size); + count = min_t(unsigned long, *reps, + df ? ((page_off + size - 1) & ~PAGE_MASK) / size + : (PAGE_SIZE - page_off) / size); if ( count == 0 ) { @@ -433,8 +412,7 @@ static int hvmemul_do_io_addr( count = 1; } - rc = hvmemul_do_io(is_mmio, addr, &count, size, dir, df, 1, - ram_gpa); + rc = hvmemul_do_io(is_mmio, addr, &count, size, dir, df, 1, ram_gpa); ASSERT(rc != X86EMUL_UNIMPLEMENTED); @@ -443,7 +421,7 @@ static int hvmemul_do_io_addr( *reps = count; - out: +out: while ( nr_pages ) hvmemul_release_page(ram_page[--nr_pages]); @@ -456,9 +434,7 @@ static int hvmemul_do_io_addr( * IOREQ_WRITE means a write from to . Each access has * width . */ -int hvmemul_do_pio_buffer(uint16_t port, - unsigned int size, - uint8_t dir, +int hvmemul_do_pio_buffer(uint16_t port, unsigned int size, uint8_t dir, void *buffer) { unsigned long one_rep = 1; @@ -476,11 +452,8 @@ int hvmemul_do_pio_buffer(uint16_t port, * Each access will be done to/from successive RAM addresses, increasing * if is 0 or decreasing if is 1. */ -static int hvmemul_do_pio_addr(uint16_t port, - unsigned long *reps, - unsigned int size, - uint8_t dir, - bool_t df, +static int hvmemul_do_pio_addr(uint16_t port, unsigned long *reps, + unsigned int size, uint8_t dir, bool_t df, paddr_t ram_addr) { return hvmemul_do_io_addr(0, port, reps, size, dir, df, ram_addr); @@ -500,11 +473,8 @@ static int hvmemul_do_pio_addr(uint16_t port, * pointer; there is no implicit interation over a * block of memory starting at . */ -static int hvmemul_do_mmio_buffer(paddr_t mmio_gpa, - unsigned long *reps, - unsigned int size, - uint8_t dir, - bool_t df, +static int hvmemul_do_mmio_buffer(paddr_t mmio_gpa, unsigned long *reps, + unsigned int size, uint8_t dir, bool_t df, void *buffer) { return hvmemul_do_io_buffer(1, mmio_gpa, reps, size, dir, df, buffer); @@ -520,11 +490,8 @@ static int hvmemul_do_mmio_buffer(paddr_t mmio_gpa, * Each access will be done to/from successive RAM *and* MMIO addresses, * increasing if is 0 or decreasing if is 1. */ -static int hvmemul_do_mmio_addr(paddr_t mmio_gpa, - unsigned long *reps, - unsigned int size, - uint8_t dir, - bool_t df, +static int hvmemul_do_mmio_addr(paddr_t mmio_gpa, unsigned long *reps, + unsigned int size, uint8_t dir, bool_t df, paddr_t ram_gpa) { return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa); @@ -538,14 +505,14 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa, * In debug builds, map() checks that each slot in hvmemul_ctxt->mfn[] is * clean before use, and poisions unused slots with INVALID_MFN. */ -static void *hvmemul_map_linear_addr( - unsigned long linear, unsigned int bytes, uint32_t pfec, - struct hvm_emulate_ctxt *hvmemul_ctxt) +static void *hvmemul_map_linear_addr(unsigned long linear, unsigned int bytes, + uint32_t pfec, + struct hvm_emulate_ctxt *hvmemul_ctxt) { struct vcpu *curr = current; void *err, *mapping; - unsigned int nr_frames = ((linear + bytes - !!bytes) >> PAGE_SHIFT) - - (linear >> PAGE_SHIFT) + 1; + unsigned int nr_frames = + ((linear + bytes - !!bytes) >> PAGE_SHIFT) - (linear >> PAGE_SHIFT) + 1; unsigned int i; /* @@ -563,8 +530,7 @@ static void *hvmemul_map_linear_addr( * The higher level emulation callers are responsible for ensuring that * mfns[] is large enough for the requested write size. */ - if ( bytes == 0 || - nr_frames > ARRAY_SIZE(hvmemul_ctxt->mfn) ) + if ( bytes == 0 || nr_frames > ARRAY_SIZE(hvmemul_ctxt->mfn) ) { ASSERT_UNREACHABLE(); goto unhandleable; @@ -576,7 +542,8 @@ static void *hvmemul_map_linear_addr( struct page_info *page; pagefault_info_t pfinfo; p2m_type_t p2mt; - unsigned long addr = i ? (linear + (i << PAGE_SHIFT)) & PAGE_MASK : linear; + unsigned long addr = + i ? (linear + (i << PAGE_SHIFT)) & PAGE_MASK : linear; if ( hvmemul_ctxt->ctxt.addr_size < 64 ) addr = (uint32_t)addr; @@ -584,10 +551,10 @@ static void *hvmemul_map_linear_addr( /* Error checking. Confirm that the current slot is clean. */ ASSERT(mfn_x(*mfn) == 0); - res = hvm_translate_get_page(curr, addr, true, pfec, - &pfinfo, &page, NULL, &p2mt); + res = hvm_translate_get_page(curr, addr, true, pfec, &pfinfo, &page, + NULL, &p2mt); - switch ( res ) + switch (res) { case HVMTRANS_okay: break; @@ -635,8 +602,7 @@ static void *hvmemul_map_linear_addr( if ( nr_frames == 1 ) mapping = map_domain_page(hvmemul_ctxt->mfn[0]); /* Multiple frames? Need to vmap(). */ - else if ( (mapping = vmap(hvmemul_ctxt->mfn, - nr_frames)) == NULL ) + else if ( (mapping = vmap(hvmemul_ctxt->mfn, nr_frames)) == NULL ) goto unhandleable; #ifndef NDEBUG /* Poision unused mfn[]s with INVALID_MFN. */ @@ -648,10 +614,10 @@ static void *hvmemul_map_linear_addr( #endif return mapping + (linear & ~PAGE_MASK); - unhandleable: +unhandleable: err = ERR_PTR(~X86EMUL_UNHANDLEABLE); - out: +out: /* Drop all held references. */ while ( mfn-- > hvmemul_ctxt->mfn ) put_page(mfn_to_page(*mfn)); @@ -659,13 +625,13 @@ static void *hvmemul_map_linear_addr( return err; } -static void hvmemul_unmap_linear_addr( - void *mapping, unsigned long linear, unsigned int bytes, - struct hvm_emulate_ctxt *hvmemul_ctxt) +static void hvmemul_unmap_linear_addr(void *mapping, unsigned long linear, + unsigned int bytes, + struct hvm_emulate_ctxt *hvmemul_ctxt) { struct domain *currd = current->domain; - unsigned int nr_frames = ((linear + bytes - !!bytes) >> PAGE_SHIFT) - - (linear >> PAGE_SHIFT) + 1; + unsigned int nr_frames = + ((linear + bytes - !!bytes) >> PAGE_SHIFT) - (linear >> PAGE_SHIFT) + 1; unsigned int i; mfn_t *mfn = &hvmemul_ctxt->mfn[0]; @@ -700,13 +666,10 @@ static void hvmemul_unmap_linear_addr( * the valid computed range. It is always >0 when X86EMUL_OKAY is returned. * @pfec indicates the access checks to be performed during page-table walks. */ -static int hvmemul_linear_to_phys( - unsigned long addr, - paddr_t *paddr, - unsigned int bytes_per_rep, - unsigned long *reps, - uint32_t pfec, - struct hvm_emulate_ctxt *hvmemul_ctxt) +static int hvmemul_linear_to_phys(unsigned long addr, paddr_t *paddr, + unsigned int bytes_per_rep, + unsigned long *reps, uint32_t pfec, + struct hvm_emulate_ctxt *hvmemul_ctxt) { struct vcpu *curr = current; unsigned long pfn, npfn, done, todo, i, offset = addr & ~PAGE_MASK; @@ -733,13 +696,14 @@ static int hvmemul_linear_to_phys( /* Do page-straddling first iteration forwards via recursion. */ paddr_t _paddr; unsigned long one_rep = 1; - int rc = hvmemul_linear_to_phys( - addr, &_paddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt); + int rc = hvmemul_linear_to_phys(addr, &_paddr, bytes_per_rep, &one_rep, + pfec, hvmemul_ctxt); if ( rc != X86EMUL_OKAY ) return rc; pfn = _paddr >> PAGE_SHIFT; } - else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == gfn_x(INVALID_GFN) ) + else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == + gfn_x(INVALID_GFN) ) { if ( pfec & (PFEC_page_paged | PFEC_page_shared) ) return X86EMUL_RETRY; @@ -782,16 +746,13 @@ static int hvmemul_linear_to_phys( *paddr = ((paddr_t)pfn << PAGE_SHIFT) | offset; return X86EMUL_OKAY; } - -static int hvmemul_virtual_to_linear( - enum x86_segment seg, - unsigned long offset, - unsigned int bytes_per_rep, - unsigned long *reps, - enum hvm_access_type access_type, - struct hvm_emulate_ctxt *hvmemul_ctxt, - unsigned long *linear) +static int hvmemul_virtual_to_linear(enum x86_segment seg, unsigned long offset, + unsigned int bytes_per_rep, + unsigned long *reps, + enum hvm_access_type access_type, + struct hvm_emulate_ctxt *hvmemul_ctxt, + unsigned long *linear) { struct segment_register *reg; int okay; @@ -811,7 +772,7 @@ static int hvmemul_virtual_to_linear( */ if ( unlikely(current->domain->arch.mem_access_emulate_each_rep) && current->arch.vm_event->emulate_flags != 0 ) - max_reps = 1; + max_reps = 1; /* * Clip repetitions to avoid overflow when multiplying by @bytes_per_rep. @@ -860,16 +821,16 @@ static int hvmemul_virtual_to_linear( */ *reps = 0; if ( is_x86_user_segment(seg) ) - x86_emul_hw_exception((seg == x86_seg_ss) - ? TRAP_stack_error - : TRAP_gp_fault, 0, &hvmemul_ctxt->ctxt); + x86_emul_hw_exception((seg == x86_seg_ss) ? TRAP_stack_error + : TRAP_gp_fault, + 0, &hvmemul_ctxt->ctxt); return X86EMUL_EXCEPTION; } -static int hvmemul_phys_mmio_access( - struct hvm_mmio_cache *cache, paddr_t gpa, unsigned int size, uint8_t dir, - uint8_t *buffer, unsigned int offset) +static int hvmemul_phys_mmio_access(struct hvm_mmio_cache *cache, paddr_t gpa, + unsigned int size, uint8_t dir, + uint8_t *buffer, unsigned int offset) { unsigned long one_rep = 1; unsigned int chunk; @@ -896,8 +857,8 @@ static int hvmemul_phys_mmio_access( */ ASSERT(size != 0); chunk = 1u << (fls(size) - 1); - if ( chunk > sizeof (long) ) - chunk = sizeof (long); + if ( chunk > sizeof(long) ) + chunk = sizeof(long); for ( ;; ) { @@ -908,7 +869,8 @@ static int hvmemul_phys_mmio_access( if ( dir == IOREQ_READ ) memcpy(&buffer[offset], &cache->buffer[offset], chunk); - else if ( memcmp(&buffer[offset], &cache->buffer[offset], chunk) != 0 ) + else if ( memcmp(&buffer[offset], &cache->buffer[offset], chunk) != + 0 ) domain_crash(current->domain); } else @@ -951,30 +913,29 @@ static int hvmemul_phys_mmio_access( * subsequent cycles) by looking up the result or previous I/O in a * cache indexed by linear MMIO address. */ -static struct hvm_mmio_cache *hvmemul_find_mmio_cache( - struct hvm_vcpu_io *vio, unsigned long gla, uint8_t dir) +static struct hvm_mmio_cache * +hvmemul_find_mmio_cache(struct hvm_vcpu_io *vio, unsigned long gla, uint8_t dir) { unsigned int i; struct hvm_mmio_cache *cache; - for ( i = 0; i < vio->mmio_cache_count; i ++ ) + for ( i = 0; i < vio->mmio_cache_count; i++ ) { cache = &vio->mmio_cache[i]; - if ( gla == cache->gla && - dir == cache->dir ) + if ( gla == cache->gla && dir == cache->dir ) return cache; } i = vio->mmio_cache_count++; - if( i == ARRAY_SIZE(vio->mmio_cache) ) + if ( i == ARRAY_SIZE(vio->mmio_cache) ) { domain_crash(current->domain); return NULL; } cache = &vio->mmio_cache[i]; - memset(cache, 0, sizeof (*cache)); + memset(cache, 0, sizeof(*cache)); cache->gla = gla; cache->dir = dir; @@ -990,14 +951,14 @@ static void latch_linear_to_phys(struct hvm_vcpu_io *vio, unsigned long gla, vio->mmio_gla = gla & PAGE_MASK; vio->mmio_gpfn = PFN_DOWN(gpa); - vio->mmio_access = (struct npfec){ .gla_valid = 1, - .read_access = 1, - .write_access = write }; + vio->mmio_access = + (struct npfec){.gla_valid = 1, .read_access = 1, .write_access = write}; } -static int hvmemul_linear_mmio_access( - unsigned long gla, unsigned int size, uint8_t dir, void *buffer, - uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt, bool_t known_gpfn) +static int hvmemul_linear_mmio_access(unsigned long gla, unsigned int size, + uint8_t dir, void *buffer, uint32_t pfec, + struct hvm_emulate_ctxt *hvmemul_ctxt, + bool_t known_gpfn) { struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io; unsigned long offset = gla & ~PAGE_MASK; @@ -1026,7 +987,8 @@ static int hvmemul_linear_mmio_access( for ( ;; ) { - rc = hvmemul_phys_mmio_access(cache, gpa, chunk, dir, buffer, buffer_offset); + rc = hvmemul_phys_mmio_access(cache, gpa, chunk, dir, buffer, + buffer_offset); if ( rc != X86EMUL_OKAY ) break; @@ -1047,22 +1009,22 @@ static int hvmemul_linear_mmio_access( return rc; } -static inline int hvmemul_linear_mmio_read( - unsigned long gla, unsigned int size, void *buffer, - uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt, - bool_t translate) +static inline int +hvmemul_linear_mmio_read(unsigned long gla, unsigned int size, void *buffer, + uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt, + bool_t translate) { - return hvmemul_linear_mmio_access(gla, size, IOREQ_READ, buffer, - pfec, hvmemul_ctxt, translate); + return hvmemul_linear_mmio_access(gla, size, IOREQ_READ, buffer, pfec, + hvmemul_ctxt, translate); } -static inline int hvmemul_linear_mmio_write( - unsigned long gla, unsigned int size, void *buffer, - uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt, - bool_t translate) +static inline int +hvmemul_linear_mmio_write(unsigned long gla, unsigned int size, void *buffer, + uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt, + bool_t translate) { - return hvmemul_linear_mmio_access(gla, size, IOREQ_WRITE, buffer, - pfec, hvmemul_ctxt, translate); + return hvmemul_linear_mmio_access(gla, size, IOREQ_WRITE, buffer, pfec, + hvmemul_ctxt, translate); } static bool known_gla(unsigned long addr, unsigned int bytes, uint32_t pfec) @@ -1080,7 +1042,7 @@ static bool known_gla(unsigned long addr, unsigned int bytes, uint32_t pfec) return false; } else if ( !vio->mmio_access.read_access ) - return false; + return false; return (vio->mmio_gla == (addr & PAGE_MASK) && (addr & ~PAGE_MASK) + bytes <= PAGE_SIZE); @@ -1092,7 +1054,7 @@ static int linear_read(unsigned long addr, unsigned int bytes, void *p_data, pagefault_info_t pfinfo; int rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo); - switch ( rc ) + switch (rc) { unsigned int offset, part1; @@ -1117,8 +1079,8 @@ static int linear_read(unsigned long addr, unsigned int bytes, void *p_data, part1 = PAGE_SIZE - offset; rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt); if ( rc == X86EMUL_OKAY ) - rc = linear_read(addr + part1, bytes - part1, p_data + part1, - pfec, hvmemul_ctxt); + rc = linear_read(addr + part1, bytes - part1, p_data + part1, pfec, + hvmemul_ctxt); return rc; case HVMTRANS_gfn_paged_out: @@ -1135,7 +1097,7 @@ static int linear_write(unsigned long addr, unsigned int bytes, void *p_data, pagefault_info_t pfinfo; int rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo); - switch ( rc ) + switch (rc) { unsigned int offset, part1; @@ -1157,8 +1119,8 @@ static int linear_write(unsigned long addr, unsigned int bytes, void *p_data, part1 = PAGE_SIZE - offset; rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt); if ( rc == X86EMUL_OKAY ) - rc = linear_write(addr + part1, bytes - part1, p_data + part1, - pfec, hvmemul_ctxt); + rc = linear_write(addr + part1, bytes - part1, p_data + part1, pfec, + hvmemul_ctxt); return rc; case HVMTRANS_gfn_paged_out: @@ -1169,13 +1131,10 @@ static int linear_write(unsigned long addr, unsigned int bytes, void *p_data, return X86EMUL_UNHANDLEABLE; } -static int __hvmemul_read( - enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - enum hvm_access_type access_type, - struct hvm_emulate_ctxt *hvmemul_ctxt) +static int __hvmemul_read(enum x86_segment seg, unsigned long offset, + void *p_data, unsigned int bytes, + enum hvm_access_type access_type, + struct hvm_emulate_ctxt *hvmemul_ctxt) { unsigned long addr, reps = 1; uint32_t pfec = PFEC_page_present; @@ -1188,20 +1147,17 @@ static int __hvmemul_read( if ( access_type == hvm_access_insn_fetch ) pfec |= PFEC_insn_fetch; - rc = hvmemul_virtual_to_linear( - seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr); + rc = hvmemul_virtual_to_linear(seg, offset, bytes, &reps, access_type, + hvmemul_ctxt, &addr); if ( rc != X86EMUL_OKAY || !bytes ) return rc; return linear_read(addr, bytes, p_data, pfec, hvmemul_ctxt); } -static int hvmemul_read( - enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_read(enum x86_segment seg, unsigned long offset, + void *p_data, unsigned int bytes, + struct x86_emulate_ctxt *ctxt) { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); @@ -1209,17 +1165,12 @@ static int hvmemul_read( if ( unlikely(hvmemul_ctxt->set_context) ) return set_context_data(p_data, bytes); - return __hvmemul_read( - seg, offset, p_data, bytes, hvm_access_read, - container_of(ctxt, struct hvm_emulate_ctxt, ctxt)); + return __hvmemul_read(seg, offset, p_data, bytes, hvm_access_read, + container_of(ctxt, struct hvm_emulate_ctxt, ctxt)); } -int hvmemul_insn_fetch( - enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt) +int hvmemul_insn_fetch(enum x86_segment seg, unsigned long offset, void *p_data, + unsigned int bytes, struct x86_emulate_ctxt *ctxt) { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); @@ -1230,8 +1181,7 @@ int hvmemul_insn_fetch( * Fall back if requested bytes are not in the prefetch cache. * But always perform the (fake) read when bytes == 0. */ - if ( !bytes || - unlikely((insn_off + bytes) > hvmemul_ctxt->insn_buf_bytes) ) + if ( !bytes || unlikely((insn_off + bytes) > hvmemul_ctxt->insn_buf_bytes) ) { int rc = __hvmemul_read(seg, offset, p_data, bytes, hvm_access_insn_fetch, hvmemul_ctxt); @@ -1261,12 +1211,9 @@ int hvmemul_insn_fetch( return X86EMUL_OKAY; } -static int hvmemul_write( - enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_write(enum x86_segment seg, unsigned long offset, + void *p_data, unsigned int bytes, + struct x86_emulate_ctxt *ctxt) { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); @@ -1280,8 +1227,8 @@ static int hvmemul_write( else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 ) pfec |= PFEC_user_mode; - rc = hvmemul_virtual_to_linear( - seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr); + rc = hvmemul_virtual_to_linear(seg, offset, bytes, &reps, hvm_access_write, + hvmemul_ctxt, &addr); if ( rc != X86EMUL_OKAY || !bytes ) return rc; @@ -1289,7 +1236,7 @@ static int hvmemul_write( { mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt); if ( IS_ERR(mapping) ) - return ~PTR_ERR(mapping); + return ~PTR_ERR(mapping); } if ( !mapping ) @@ -1302,13 +1249,10 @@ static int hvmemul_write( return X86EMUL_OKAY; } -static int hvmemul_rmw( - enum x86_segment seg, - unsigned long offset, - unsigned int bytes, - uint32_t *eflags, - struct x86_emulate_state *state, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_rmw(enum x86_segment seg, unsigned long offset, + unsigned int bytes, uint32_t *eflags, + struct x86_emulate_state *state, + struct x86_emulate_ctxt *ctxt) { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); @@ -1317,8 +1261,8 @@ static int hvmemul_rmw( int rc; void *mapping = NULL; - rc = hvmemul_virtual_to_linear( - seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr); + rc = hvmemul_virtual_to_linear(seg, offset, bytes, &reps, hvm_access_write, + hvmemul_ctxt, &addr); if ( rc != X86EMUL_OKAY || !bytes ) return rc; @@ -1355,114 +1299,85 @@ static int hvmemul_rmw( return rc; } -static int hvmemul_write_discard( - enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_write_discard(enum x86_segment seg, unsigned long offset, + void *p_data, unsigned int bytes, + struct x86_emulate_ctxt *ctxt) { /* Discarding the write. */ return X86EMUL_OKAY; } -static int hvmemul_rep_ins_discard( - uint16_t src_port, - enum x86_segment dst_seg, - unsigned long dst_offset, - unsigned int bytes_per_rep, - unsigned long *reps, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_rep_ins_discard(uint16_t src_port, enum x86_segment dst_seg, + unsigned long dst_offset, + unsigned int bytes_per_rep, + unsigned long *reps, + struct x86_emulate_ctxt *ctxt) { return X86EMUL_OKAY; } -static int hvmemul_rep_movs_discard( - enum x86_segment src_seg, - unsigned long src_offset, - enum x86_segment dst_seg, - unsigned long dst_offset, - unsigned int bytes_per_rep, - unsigned long *reps, - struct x86_emulate_ctxt *ctxt) +static int +hvmemul_rep_movs_discard(enum x86_segment src_seg, unsigned long src_offset, + enum x86_segment dst_seg, unsigned long dst_offset, + unsigned int bytes_per_rep, unsigned long *reps, + struct x86_emulate_ctxt *ctxt) { return X86EMUL_OKAY; } -static int hvmemul_rep_stos_discard( - void *p_data, - enum x86_segment seg, - unsigned long offset, - unsigned int bytes_per_rep, - unsigned long *reps, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_rep_stos_discard(void *p_data, enum x86_segment seg, + unsigned long offset, + unsigned int bytes_per_rep, + unsigned long *reps, + struct x86_emulate_ctxt *ctxt) { return X86EMUL_OKAY; } -static int hvmemul_rep_outs_discard( - enum x86_segment src_seg, - unsigned long src_offset, - uint16_t dst_port, - unsigned int bytes_per_rep, - unsigned long *reps, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_rep_outs_discard(enum x86_segment src_seg, + unsigned long src_offset, uint16_t dst_port, + unsigned int bytes_per_rep, + unsigned long *reps, + struct x86_emulate_ctxt *ctxt) { return X86EMUL_OKAY; } -static int hvmemul_cmpxchg_discard( - enum x86_segment seg, - unsigned long offset, - void *p_old, - void *p_new, - unsigned int bytes, - bool lock, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_cmpxchg_discard(enum x86_segment seg, unsigned long offset, + void *p_old, void *p_new, unsigned int bytes, + bool lock, struct x86_emulate_ctxt *ctxt) { return X86EMUL_OKAY; } -static int hvmemul_read_io_discard( - unsigned int port, - unsigned int bytes, - unsigned long *val, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_read_io_discard(unsigned int port, unsigned int bytes, + unsigned long *val, + struct x86_emulate_ctxt *ctxt) { return X86EMUL_OKAY; } -static int hvmemul_write_io_discard( - unsigned int port, - unsigned int bytes, - unsigned long val, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_write_io_discard(unsigned int port, unsigned int bytes, + unsigned long val, + struct x86_emulate_ctxt *ctxt) { return X86EMUL_OKAY; } -static int hvmemul_write_msr_discard( - unsigned int reg, - uint64_t val, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_write_msr_discard(unsigned int reg, uint64_t val, + struct x86_emulate_ctxt *ctxt) { return X86EMUL_OKAY; } -static int hvmemul_wbinvd_discard( - struct x86_emulate_ctxt *ctxt) +static int hvmemul_wbinvd_discard(struct x86_emulate_ctxt *ctxt) { return X86EMUL_OKAY; } -static int hvmemul_cmpxchg( - enum x86_segment seg, - unsigned long offset, - void *p_old, - void *p_new, - unsigned int bytes, - bool lock, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_cmpxchg(enum x86_segment seg, unsigned long offset, + void *p_old, void *p_new, unsigned int bytes, + bool lock, struct x86_emulate_ctxt *ctxt) { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); @@ -1473,8 +1388,8 @@ static int hvmemul_cmpxchg( int rc; void *mapping = NULL; - rc = hvmemul_virtual_to_linear( - seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr); + rc = hvmemul_virtual_to_linear(seg, offset, bytes, &reps, hvm_access_write, + hvmemul_ctxt, &addr); if ( rc != X86EMUL_OKAY ) return rc; @@ -1493,15 +1408,18 @@ static int hvmemul_cmpxchg( if ( !mapping ) { /* Fix this in case the guest is really relying on r-m-w atomicity. */ - return hvmemul_linear_mmio_write(addr, bytes, p_new, pfec, - hvmemul_ctxt, + return hvmemul_linear_mmio_write(addr, bytes, p_new, pfec, hvmemul_ctxt, vio->mmio_access.write_access && - vio->mmio_gla == (addr & PAGE_MASK)); + vio->mmio_gla == + (addr & PAGE_MASK)); } - switch ( bytes ) + switch (bytes) { - case 1: case 2: case 4: case 8: + case 1: + case 2: + case 4: + case 8: { unsigned long old = 0, new = 0, cur; @@ -1549,24 +1467,20 @@ static int hvmemul_cmpxchg( return rc; } -static int hvmemul_validate( - const struct x86_emulate_state *state, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_validate(const struct x86_emulate_state *state, + struct x86_emulate_ctxt *ctxt) { const struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); return !hvmemul_ctxt->validate || hvmemul_ctxt->validate(state, ctxt) - ? X86EMUL_OKAY : X86EMUL_UNHANDLEABLE; + ? X86EMUL_OKAY + : X86EMUL_UNHANDLEABLE; } -static int hvmemul_rep_ins( - uint16_t src_port, - enum x86_segment dst_seg, - unsigned long dst_offset, - unsigned int bytes_per_rep, - unsigned long *reps, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_rep_ins(uint16_t src_port, enum x86_segment dst_seg, + unsigned long dst_offset, unsigned int bytes_per_rep, + unsigned long *reps, struct x86_emulate_ctxt *ctxt) { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); @@ -1576,21 +1490,20 @@ static int hvmemul_rep_ins( p2m_type_t p2mt; int rc; - rc = hvmemul_virtual_to_linear( - dst_seg, dst_offset, bytes_per_rep, reps, hvm_access_write, - hvmemul_ctxt, &addr); + rc = hvmemul_virtual_to_linear(dst_seg, dst_offset, bytes_per_rep, reps, + hvm_access_write, hvmemul_ctxt, &addr); if ( rc != X86EMUL_OKAY ) return rc; if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 ) pfec |= PFEC_user_mode; - rc = hvmemul_linear_to_phys( - addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt); + rc = hvmemul_linear_to_phys(addr, &gpa, bytes_per_rep, reps, pfec, + hvmemul_ctxt); if ( rc != X86EMUL_OKAY ) return rc; - (void) get_gfn_query_unlocked(current->domain, gpa >> PAGE_SHIFT, &p2mt); + (void)get_gfn_query_unlocked(current->domain, gpa >> PAGE_SHIFT, &p2mt); if ( p2mt == p2m_mmio_direct || p2mt == p2m_mmio_dm ) return X86EMUL_UNHANDLEABLE; @@ -1598,10 +1511,9 @@ static int hvmemul_rep_ins( !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa); } -static int hvmemul_rep_outs_set_context( - uint16_t dst_port, - unsigned int bytes_per_rep, - unsigned long *reps) +static int hvmemul_rep_outs_set_context(uint16_t dst_port, + unsigned int bytes_per_rep, + unsigned long *reps) { const struct arch_vm_event *ev = current->arch.vm_event; const uint8_t *ptr; @@ -1638,13 +1550,9 @@ static int hvmemul_rep_outs_set_context( return rc; } -static int hvmemul_rep_outs( - enum x86_segment src_seg, - unsigned long src_offset, - uint16_t dst_port, - unsigned int bytes_per_rep, - unsigned long *reps, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_rep_outs(enum x86_segment src_seg, unsigned long src_offset, + uint16_t dst_port, unsigned int bytes_per_rep, + unsigned long *reps, struct x86_emulate_ctxt *ctxt) { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); @@ -1657,21 +1565,20 @@ static int hvmemul_rep_outs( if ( unlikely(hvmemul_ctxt->set_context) ) return hvmemul_rep_outs_set_context(dst_port, bytes_per_rep, reps); - rc = hvmemul_virtual_to_linear( - src_seg, src_offset, bytes_per_rep, reps, hvm_access_read, - hvmemul_ctxt, &addr); + rc = hvmemul_virtual_to_linear(src_seg, src_offset, bytes_per_rep, reps, + hvm_access_read, hvmemul_ctxt, &addr); if ( rc != X86EMUL_OKAY ) return rc; if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 ) pfec |= PFEC_user_mode; - rc = hvmemul_linear_to_phys( - addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt); + rc = hvmemul_linear_to_phys(addr, &gpa, bytes_per_rep, reps, pfec, + hvmemul_ctxt); if ( rc != X86EMUL_OKAY ) return rc; - (void) get_gfn_query_unlocked(current->domain, gpa >> PAGE_SHIFT, &p2mt); + (void)get_gfn_query_unlocked(current->domain, gpa >> PAGE_SHIFT, &p2mt); if ( p2mt == p2m_mmio_direct || p2mt == p2m_mmio_dm ) return X86EMUL_UNHANDLEABLE; @@ -1679,14 +1586,10 @@ static int hvmemul_rep_outs( !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa); } -static int hvmemul_rep_movs( - enum x86_segment src_seg, - unsigned long src_offset, - enum x86_segment dst_seg, - unsigned long dst_offset, - unsigned int bytes_per_rep, - unsigned long *reps, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_rep_movs(enum x86_segment src_seg, unsigned long src_offset, + enum x86_segment dst_seg, unsigned long dst_offset, + unsigned int bytes_per_rep, unsigned long *reps, + struct x86_emulate_ctxt *ctxt) { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); @@ -1698,15 +1601,13 @@ static int hvmemul_rep_movs( int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF); char *buf; - rc = hvmemul_virtual_to_linear( - src_seg, src_offset, bytes_per_rep, reps, hvm_access_read, - hvmemul_ctxt, &saddr); + rc = hvmemul_virtual_to_linear(src_seg, src_offset, bytes_per_rep, reps, + hvm_access_read, hvmemul_ctxt, &saddr); if ( rc != X86EMUL_OKAY ) return rc; - rc = hvmemul_virtual_to_linear( - dst_seg, dst_offset, bytes_per_rep, reps, hvm_access_write, - hvmemul_ctxt, &daddr); + rc = hvmemul_virtual_to_linear(dst_seg, dst_offset, bytes_per_rep, reps, + hvm_access_write, hvmemul_ctxt, &daddr); if ( rc != X86EMUL_OKAY ) return rc; @@ -1750,8 +1651,8 @@ static int hvmemul_rep_movs( } /* Check for MMIO ops */ - (void) get_gfn_query_unlocked(current->domain, sgpa >> PAGE_SHIFT, &sp2mt); - (void) get_gfn_query_unlocked(current->domain, dgpa >> PAGE_SHIFT, &dp2mt); + (void)get_gfn_query_unlocked(current->domain, sgpa >> PAGE_SHIFT, &sp2mt); + (void)get_gfn_query_unlocked(current->domain, dgpa >> PAGE_SHIFT, &dp2mt); if ( sp2mt == p2m_mmio_direct || dp2mt == p2m_mmio_direct || (sp2mt == p2m_mmio_dm && dp2mt == p2m_mmio_dm) ) @@ -1760,15 +1661,15 @@ static int hvmemul_rep_movs( if ( sp2mt == p2m_mmio_dm ) { latch_linear_to_phys(vio, saddr, sgpa, 0); - return hvmemul_do_mmio_addr( - sgpa, reps, bytes_per_rep, IOREQ_READ, df, dgpa); + return hvmemul_do_mmio_addr(sgpa, reps, bytes_per_rep, IOREQ_READ, df, + dgpa); } if ( dp2mt == p2m_mmio_dm ) { latch_linear_to_phys(vio, daddr, dgpa, 1); - return hvmemul_do_mmio_addr( - dgpa, reps, bytes_per_rep, IOREQ_WRITE, df, sgpa); + return hvmemul_do_mmio_addr(dgpa, reps, bytes_per_rep, IOREQ_WRITE, df, + sgpa); } /* RAM-to-RAM copy: emulate as equivalent of memmove(dgpa, sgpa, bytes). */ @@ -1799,7 +1700,7 @@ static int hvmemul_rep_movs( { rc = set_context_data(buf, bytes); - if ( rc != X86EMUL_OKAY) + if ( rc != X86EMUL_OKAY ) { xfree(buf); return rc; @@ -1826,8 +1727,9 @@ static int hvmemul_rep_movs( return X86EMUL_RETRY; if ( rc != HVMTRANS_okay ) { - gdprintk(XENLOG_WARNING, "Failed memory-to-memory REP MOVS: sgpa=%" - PRIpaddr" dgpa=%"PRIpaddr" reps=%lu bytes_per_rep=%u\n", + gdprintk(XENLOG_WARNING, + "Failed memory-to-memory REP MOVS: sgpa=%" PRIpaddr + " dgpa=%" PRIpaddr " reps=%lu bytes_per_rep=%u\n", sgpa, dgpa, *reps, bytes_per_rep); return X86EMUL_UNHANDLEABLE; } @@ -1835,13 +1737,9 @@ static int hvmemul_rep_movs( return X86EMUL_OKAY; } -static int hvmemul_rep_stos( - void *p_data, - enum x86_segment seg, - unsigned long offset, - unsigned int bytes_per_rep, - unsigned long *reps, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_rep_stos(void *p_data, enum x86_segment seg, + unsigned long offset, unsigned int bytes_per_rep, + unsigned long *reps, struct x86_emulate_ctxt *ctxt) { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); @@ -1880,14 +1778,14 @@ static int hvmemul_rep_stos( /* Check for MMIO op */ (void)get_gfn_query_unlocked(current->domain, gpa >> PAGE_SHIFT, &p2mt); - switch ( p2mt ) + switch (p2mt) { unsigned long bytes; char *buf; default: /* Allocate temporary buffer. */ - for ( ; ; ) + for ( ;; ) { bytes = *reps * bytes_per_rep; buf = xmalloc_bytes(bytes); @@ -1899,22 +1797,21 @@ static int hvmemul_rep_stos( if ( !buf ) buf = p_data; else - switch ( bytes_per_rep ) + switch (bytes_per_rep) { unsigned long dummy; -#define CASE(bits, suffix) \ - case (bits) / 8: \ - asm ( "rep stos" #suffix \ - : "=m" (*buf), \ - "=D" (dummy), "=c" (dummy) \ - : "a" (*(const uint##bits##_t *)p_data), \ - "1" (buf), "2" (*reps) : "memory" ); \ - break - CASE(8, b); - CASE(16, w); - CASE(32, l); - CASE(64, q); +#define CASE(bits, suffix) \ + case (bits) / 8: \ + asm("rep stos" #suffix \ + : "=m"(*buf), "=D"(dummy), "=c"(dummy) \ + : "a"(*(const uint##bits##_t *)p_data), "1"(buf), "2"(*reps) \ + : "memory"); \ + break + CASE(8, b); + CASE(16, w); + CASE(32, l); + CASE(64, q); #undef CASE default: @@ -1932,7 +1829,7 @@ static int hvmemul_rep_stos( if ( buf != p_data ) xfree(buf); - switch ( rc ) + switch (rc) { case HVMTRANS_gfn_paged_out: case HVMTRANS_gfn_shared: @@ -1942,7 +1839,8 @@ static int hvmemul_rep_stos( } gdprintk(XENLOG_WARNING, - "Failed REP STOS: gpa=%"PRIpaddr" reps=%lu bytes_per_rep=%u\n", + "Failed REP STOS: gpa=%" PRIpaddr + " reps=%lu bytes_per_rep=%u\n", gpa, *reps, bytes_per_rep); /* fall through */ case p2m_mmio_direct: @@ -1955,27 +1853,25 @@ static int hvmemul_rep_stos( } } -static int hvmemul_read_segment( - enum x86_segment seg, - struct segment_register *reg, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_read_segment(enum x86_segment seg, + struct segment_register *reg, + struct x86_emulate_ctxt *ctxt) { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt); if ( IS_ERR(sreg) ) - return -PTR_ERR(sreg); + return -PTR_ERR(sreg); *reg = *sreg; return X86EMUL_OKAY; } -static int hvmemul_write_segment( - enum x86_segment seg, - const struct segment_register *reg, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_write_segment(enum x86_segment seg, + const struct segment_register *reg, + struct x86_emulate_ctxt *ctxt) { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); @@ -1991,11 +1887,8 @@ static int hvmemul_write_segment( return X86EMUL_OKAY; } -static int hvmemul_read_io( - unsigned int port, - unsigned int bytes, - unsigned long *val, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_read_io(unsigned int port, unsigned int bytes, + unsigned long *val, struct x86_emulate_ctxt *ctxt) { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); @@ -2008,21 +1901,16 @@ static int hvmemul_read_io( return hvmemul_do_pio_buffer(port, bytes, IOREQ_READ, val); } -static int hvmemul_write_io( - unsigned int port, - unsigned int bytes, - unsigned long val, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_write_io(unsigned int port, unsigned int bytes, + unsigned long val, struct x86_emulate_ctxt *ctxt) { return hvmemul_do_pio_buffer(port, bytes, IOREQ_WRITE, &val); } -static int hvmemul_read_cr( - unsigned int reg, - unsigned long *val, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_read_cr(unsigned int reg, unsigned long *val, + struct x86_emulate_ctxt *ctxt) { - switch ( reg ) + switch (reg) { case 0: case 2: @@ -2038,15 +1926,13 @@ static int hvmemul_read_cr( return X86EMUL_UNHANDLEABLE; } -static int hvmemul_write_cr( - unsigned int reg, - unsigned long val, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_write_cr(unsigned int reg, unsigned long val, + struct x86_emulate_ctxt *ctxt) { int rc; HVMTRACE_LONG_2D(CR_WRITE, reg, TRC_PAR_LONG(val)); - switch ( reg ) + switch (reg) { case 0: rc = hvm_set_cr0(val, true); @@ -2076,10 +1962,8 @@ static int hvmemul_write_cr( return rc; } -static int hvmemul_read_xcr( - unsigned int reg, - uint64_t *val, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_read_xcr(unsigned int reg, uint64_t *val, + struct x86_emulate_ctxt *ctxt) { int rc = x86emul_read_xcr(reg, val, ctxt); @@ -2089,20 +1973,16 @@ static int hvmemul_read_xcr( return rc; } -static int hvmemul_write_xcr( - unsigned int reg, - uint64_t val, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_write_xcr(unsigned int reg, uint64_t val, + struct x86_emulate_ctxt *ctxt) { HVMTRACE_LONG_2D(XCR_WRITE, reg, TRC_PAR_LONG(val)); return x86emul_write_xcr(reg, val, ctxt); } -static int hvmemul_read_msr( - unsigned int reg, - uint64_t *val, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_read_msr(unsigned int reg, uint64_t *val, + struct x86_emulate_ctxt *ctxt) { int rc = hvm_msr_read_intercept(reg, val); @@ -2112,10 +1992,8 @@ static int hvmemul_read_msr( return rc; } -static int hvmemul_write_msr( - unsigned int reg, - uint64_t val, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_write_msr(unsigned int reg, uint64_t val, + struct x86_emulate_ctxt *ctxt) { int rc = hvm_msr_write_intercept(reg, val, true); @@ -2125,23 +2003,21 @@ static int hvmemul_write_msr( return rc; } -static int hvmemul_wbinvd( - struct x86_emulate_ctxt *ctxt) +static int hvmemul_wbinvd(struct x86_emulate_ctxt *ctxt) { hvm_funcs.wbinvd_intercept(); return X86EMUL_OKAY; } -int hvmemul_cpuid(uint32_t leaf, uint32_t subleaf, - struct cpuid_leaf *res, struct x86_emulate_ctxt *ctxt) +int hvmemul_cpuid(uint32_t leaf, uint32_t subleaf, struct cpuid_leaf *res, + struct x86_emulate_ctxt *ctxt) { guest_cpuid(current, leaf, subleaf, res); return X86EMUL_OKAY; } -static int hvmemul_get_fpu( - enum x86_emulate_fpu_type type, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_get_fpu(enum x86_emulate_fpu_type type, + struct x86_emulate_ctxt *ctxt) { struct vcpu *curr = current; @@ -2170,9 +2046,9 @@ static int hvmemul_get_fpu( { uint16_t fcw; - asm ( "fnstcw %0" : "=m" (fcw) ); + asm("fnstcw %0" : "=m"(fcw)); if ( (fcw & 0x3f) == 0x3f ) - asm ( "fldcw %0" :: "m" (fpu_ctxt->fcw) ); + asm("fldcw %0" ::"m"(fpu_ctxt->fcw)); else ASSERT(fcw == fpu_ctxt->fcw); } @@ -2181,10 +2057,9 @@ static int hvmemul_get_fpu( return X86EMUL_OKAY; } -static void hvmemul_put_fpu( - struct x86_emulate_ctxt *ctxt, - enum x86_emulate_fpu_type backout, - const struct x86_emul_fpu_aux *aux) +static void hvmemul_put_fpu(struct x86_emulate_ctxt *ctxt, + enum x86_emulate_fpu_type backout, + const struct x86_emul_fpu_aux *aux) { struct vcpu *curr = current; @@ -2210,7 +2085,7 @@ static void hvmemul_put_fpu( !(fpu_ctxt->fsw & ~fpu_ctxt->fcw & 0x003f) ) dval = false; - switch ( mode ) + switch (mode) { case 8: fpu_ctxt->fip.addr = aux->ip; @@ -2219,18 +2094,20 @@ static void hvmemul_put_fpu( fpu_ctxt->x[FPU_WORD_SIZE_OFFSET] = 8; break; - case 4: case 2: + case 4: + case 2: fpu_ctxt->fip.offs = aux->ip; - fpu_ctxt->fip.sel = aux->cs; + fpu_ctxt->fip.sel = aux->cs; if ( dval ) { fpu_ctxt->fdp.offs = aux->dp; - fpu_ctxt->fdp.sel = aux->ds; + fpu_ctxt->fdp.sel = aux->ds; } fpu_ctxt->x[FPU_WORD_SIZE_OFFSET] = mode; break; - case 0: case 1: + case 0: + case 1: fpu_ctxt->fip.addr = aux->ip | (aux->cs << 4); if ( dval ) fpu_ctxt->fdp.addr = aux->dp | (aux->ds << 4); @@ -2268,18 +2145,16 @@ static void hvmemul_put_fpu( } } -static int hvmemul_invlpg( - enum x86_segment seg, - unsigned long offset, - struct x86_emulate_ctxt *ctxt) +static int hvmemul_invlpg(enum x86_segment seg, unsigned long offset, + struct x86_emulate_ctxt *ctxt) { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); unsigned long addr, reps = 1; int rc; - rc = hvmemul_virtual_to_linear( - seg, offset, 1, &reps, hvm_access_none, hvmemul_ctxt, &addr); + rc = hvmemul_virtual_to_linear(seg, offset, 1, &reps, hvm_access_none, + hvmemul_ctxt, &addr); if ( rc == X86EMUL_EXCEPTION ) { @@ -2301,8 +2176,7 @@ static int hvmemul_invlpg( return rc; } -static int hvmemul_vmfunc( - struct x86_emulate_ctxt *ctxt) +static int hvmemul_vmfunc(struct x86_emulate_ctxt *ctxt) { int rc; @@ -2316,63 +2190,63 @@ static int hvmemul_vmfunc( } static const struct x86_emulate_ops hvm_emulate_ops = { - .read = hvmemul_read, - .insn_fetch = hvmemul_insn_fetch, - .write = hvmemul_write, - .rmw = hvmemul_rmw, - .cmpxchg = hvmemul_cmpxchg, - .validate = hvmemul_validate, - .rep_ins = hvmemul_rep_ins, - .rep_outs = hvmemul_rep_outs, - .rep_movs = hvmemul_rep_movs, - .rep_stos = hvmemul_rep_stos, - .read_segment = hvmemul_read_segment, + .read = hvmemul_read, + .insn_fetch = hvmemul_insn_fetch, + .write = hvmemul_write, + .rmw = hvmemul_rmw, + .cmpxchg = hvmemul_cmpxchg, + .validate = hvmemul_validate, + .rep_ins = hvmemul_rep_ins, + .rep_outs = hvmemul_rep_outs, + .rep_movs = hvmemul_rep_movs, + .rep_stos = hvmemul_rep_stos, + .read_segment = hvmemul_read_segment, .write_segment = hvmemul_write_segment, - .read_io = hvmemul_read_io, - .write_io = hvmemul_write_io, - .read_cr = hvmemul_read_cr, - .write_cr = hvmemul_write_cr, - .read_xcr = hvmemul_read_xcr, - .write_xcr = hvmemul_write_xcr, - .read_msr = hvmemul_read_msr, - .write_msr = hvmemul_write_msr, - .wbinvd = hvmemul_wbinvd, - .cpuid = hvmemul_cpuid, - .get_fpu = hvmemul_get_fpu, - .put_fpu = hvmemul_put_fpu, - .invlpg = hvmemul_invlpg, - .vmfunc = hvmemul_vmfunc, + .read_io = hvmemul_read_io, + .write_io = hvmemul_write_io, + .read_cr = hvmemul_read_cr, + .write_cr = hvmemul_write_cr, + .read_xcr = hvmemul_read_xcr, + .write_xcr = hvmemul_write_xcr, + .read_msr = hvmemul_read_msr, + .write_msr = hvmemul_write_msr, + .wbinvd = hvmemul_wbinvd, + .cpuid = hvmemul_cpuid, + .get_fpu = hvmemul_get_fpu, + .put_fpu = hvmemul_put_fpu, + .invlpg = hvmemul_invlpg, + .vmfunc = hvmemul_vmfunc, }; static const struct x86_emulate_ops hvm_emulate_ops_no_write = { - .read = hvmemul_read, - .insn_fetch = hvmemul_insn_fetch, - .write = hvmemul_write_discard, - .cmpxchg = hvmemul_cmpxchg_discard, - .rep_ins = hvmemul_rep_ins_discard, - .rep_outs = hvmemul_rep_outs_discard, - .rep_movs = hvmemul_rep_movs_discard, - .rep_stos = hvmemul_rep_stos_discard, - .read_segment = hvmemul_read_segment, + .read = hvmemul_read, + .insn_fetch = hvmemul_insn_fetch, + .write = hvmemul_write_discard, + .cmpxchg = hvmemul_cmpxchg_discard, + .rep_ins = hvmemul_rep_ins_discard, + .rep_outs = hvmemul_rep_outs_discard, + .rep_movs = hvmemul_rep_movs_discard, + .rep_stos = hvmemul_rep_stos_discard, + .read_segment = hvmemul_read_segment, .write_segment = hvmemul_write_segment, - .read_io = hvmemul_read_io_discard, - .write_io = hvmemul_write_io_discard, - .read_cr = hvmemul_read_cr, - .write_cr = hvmemul_write_cr, - .read_xcr = hvmemul_read_xcr, - .write_xcr = hvmemul_write_xcr, - .read_msr = hvmemul_read_msr, - .write_msr = hvmemul_write_msr_discard, - .wbinvd = hvmemul_wbinvd_discard, - .cpuid = hvmemul_cpuid, - .get_fpu = hvmemul_get_fpu, - .put_fpu = hvmemul_put_fpu, - .invlpg = hvmemul_invlpg, - .vmfunc = hvmemul_vmfunc, + .read_io = hvmemul_read_io_discard, + .write_io = hvmemul_write_io_discard, + .read_cr = hvmemul_read_cr, + .write_cr = hvmemul_write_cr, + .read_xcr = hvmemul_read_xcr, + .write_xcr = hvmemul_write_xcr, + .read_msr = hvmemul_read_msr, + .write_msr = hvmemul_write_msr_discard, + .wbinvd = hvmemul_wbinvd_discard, + .cpuid = hvmemul_cpuid, + .get_fpu = hvmemul_get_fpu, + .put_fpu = hvmemul_put_fpu, + .invlpg = hvmemul_invlpg, + .vmfunc = hvmemul_vmfunc, }; static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt, - const struct x86_emulate_ops *ops) + const struct x86_emulate_ops *ops) { const struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs; struct vcpu *curr = current; @@ -2437,8 +2311,7 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt, return rc; } -int hvm_emulate_one( - struct hvm_emulate_ctxt *hvmemul_ctxt) +int hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt) { return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops); } @@ -2446,18 +2319,18 @@ int hvm_emulate_one( int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla) { static const struct x86_emulate_ops hvm_intercept_ops_mmcfg = { - .read = x86emul_unhandleable_rw, + .read = x86emul_unhandleable_rw, .insn_fetch = hvmemul_insn_fetch, - .write = mmcfg_intercept_write, - .cpuid = hvmemul_cpuid, + .write = mmcfg_intercept_write, + .cpuid = hvmemul_cpuid, }; static const struct x86_emulate_ops hvm_ro_emulate_ops_mmio = { - .read = x86emul_unhandleable_rw, + .read = x86emul_unhandleable_rw, .insn_fetch = hvmemul_insn_fetch, - .write = mmio_ro_emulated_write, - .cpuid = hvmemul_cpuid, + .write = mmio_ro_emulated_write, + .cpuid = hvmemul_cpuid, }; - struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = gla }; + struct mmio_ro_emulate_ctxt mmio_ro_ctxt = {.cr2 = gla}; struct hvm_emulate_ctxt ctxt; const struct x86_emulate_ops *ops; unsigned int seg, bdf; @@ -2472,11 +2345,10 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla) else ops = &hvm_ro_emulate_ops_mmio; - hvm_emulate_init_once(&ctxt, x86_insn_is_mem_write, - guest_cpu_user_regs()); + hvm_emulate_init_once(&ctxt, x86_insn_is_mem_write, guest_cpu_user_regs()); ctxt.ctxt.data = &mmio_ro_ctxt; rc = _hvm_emulate_one(&ctxt, ops); - switch ( rc ) + switch (rc) { case X86EMUL_UNHANDLEABLE: case X86EMUL_UNIMPLEMENTED: @@ -2493,19 +2365,20 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla) } void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr, - unsigned int errcode) + unsigned int errcode) { - struct hvm_emulate_ctxt ctx = {{ 0 }}; + struct hvm_emulate_ctxt ctx = {{0}}; int rc; hvm_emulate_init_once(&ctx, NULL, guest_cpu_user_regs()); - switch ( kind ) + switch (kind) { case EMUL_KIND_NOWRITE: rc = _hvm_emulate_one(&ctx, &hvm_emulate_ops_no_write); break; - case EMUL_KIND_SET_CONTEXT_INSN: { + case EMUL_KIND_SET_CONTEXT_INSN: + { struct vcpu *curr = current; struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io; @@ -2527,7 +2400,7 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr, rc = hvm_emulate_one(&ctx); } - switch ( rc ) + switch (rc) { case X86EMUL_RETRY: /* @@ -2553,10 +2426,9 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr, hvm_emulate_writeback(&ctx); } -void hvm_emulate_init_once( - struct hvm_emulate_ctxt *hvmemul_ctxt, - hvm_emulate_validate_t *validate, - struct cpu_user_regs *regs) +void hvm_emulate_init_once(struct hvm_emulate_ctxt *hvmemul_ctxt, + hvm_emulate_validate_t *validate, + struct cpu_user_regs *regs) { struct vcpu *curr = current; @@ -2572,10 +2444,9 @@ void hvm_emulate_init_once( hvmemul_ctxt->ctxt.force_writeback = true; } -void hvm_emulate_init_per_insn( - struct hvm_emulate_ctxt *hvmemul_ctxt, - const unsigned char *insn_buf, - unsigned int insn_bytes) +void hvm_emulate_init_per_insn(struct hvm_emulate_ctxt *hvmemul_ctxt, + const unsigned char *insn_buf, + unsigned int insn_bytes) { struct vcpu *curr = current; unsigned int pfec = PFEC_page_present; @@ -2583,8 +2454,7 @@ void hvm_emulate_init_per_insn( hvmemul_ctxt->ctxt.lma = hvm_long_mode_active(curr); - if ( hvmemul_ctxt->ctxt.lma && - hvmemul_ctxt->seg_reg[x86_seg_cs].l ) + if ( hvmemul_ctxt->ctxt.lma && hvmemul_ctxt->seg_reg[x86_seg_cs].l ) hvmemul_ctxt->ctxt.addr_size = hvmemul_ctxt->ctxt.sp_size = 64; else { @@ -2601,19 +2471,18 @@ void hvm_emulate_init_per_insn( if ( !insn_bytes ) { hvmemul_ctxt->insn_buf_bytes = - hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf) ?: - (hvm_virtual_to_linear_addr(x86_seg_cs, - &hvmemul_ctxt->seg_reg[x86_seg_cs], - hvmemul_ctxt->insn_buf_eip, - sizeof(hvmemul_ctxt->insn_buf), - hvm_access_insn_fetch, - &hvmemul_ctxt->seg_reg[x86_seg_cs], - &addr) && - hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr, - sizeof(hvmemul_ctxt->insn_buf), - pfec | PFEC_insn_fetch, - NULL) == HVMTRANS_okay) ? - sizeof(hvmemul_ctxt->insn_buf) : 0; + hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf) + ?: (hvm_virtual_to_linear_addr( + x86_seg_cs, &hvmemul_ctxt->seg_reg[x86_seg_cs], + hvmemul_ctxt->insn_buf_eip, + sizeof(hvmemul_ctxt->insn_buf), hvm_access_insn_fetch, + &hvmemul_ctxt->seg_reg[x86_seg_cs], &addr) && + hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr, + sizeof(hvmemul_ctxt->insn_buf), + pfec | PFEC_insn_fetch, + NULL) == HVMTRANS_okay) + ? sizeof(hvmemul_ctxt->insn_buf) + : 0; } else { @@ -2622,8 +2491,7 @@ void hvm_emulate_init_per_insn( } } -void hvm_emulate_writeback( - struct hvm_emulate_ctxt *hvmemul_ctxt) +void hvm_emulate_writeback(struct hvm_emulate_ctxt *hvmemul_ctxt) { enum x86_segment seg; @@ -2634,8 +2502,7 @@ void hvm_emulate_writeback( { hvm_set_segment_register(current, seg, &hvmemul_ctxt->seg_reg[seg]); seg = find_next_bit(&hvmemul_ctxt->seg_reg_dirty, - ARRAY_SIZE(hvmemul_ctxt->seg_reg), - seg+1); + ARRAY_SIZE(hvmemul_ctxt->seg_reg), seg + 1); } } @@ -2643,9 +2510,8 @@ void hvm_emulate_writeback( * Callers which pass a known in-range x86_segment can rely on the return * pointer being valid. Other callers must explicitly check for errors. */ -struct segment_register *hvmemul_get_seg_reg( - enum x86_segment seg, - struct hvm_emulate_ctxt *hvmemul_ctxt) +struct segment_register * +hvmemul_get_seg_reg(enum x86_segment seg, struct hvm_emulate_ctxt *hvmemul_ctxt) { unsigned int idx = seg; @@ -2659,14 +2525,20 @@ struct segment_register *hvmemul_get_seg_reg( static const char *guest_x86_mode_to_str(int mode) { - switch ( mode ) + switch (mode) { - case 0: return "Real"; - case 1: return "v86"; - case 2: return "16bit"; - case 4: return "32bit"; - case 8: return "64bit"; - default: return "Unknown"; + case 0: + return "Real"; + case 1: + return "v86"; + case 2: + return "16bit"; + case 4: + return "32bit"; + case 8: + return "64bit"; + default: + return "Unknown"; } } @@ -2678,10 +2550,9 @@ void hvm_dump_emulation_state(const char *loglvl, const char *prefix, const struct segment_register *cs = hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt); - printk("%s%s emulation failed (%d): %pv %s @ %04x:%08lx -> %*ph\n", - loglvl, prefix, rc, curr, mode_str, cs->sel, - hvmemul_ctxt->insn_buf_eip, hvmemul_ctxt->insn_buf_bytes, - hvmemul_ctxt->insn_buf); + printk("%s%s emulation failed (%d): %pv %s @ %04x:%08lx -> %*ph\n", loglvl, + prefix, rc, curr, mode_str, cs->sel, hvmemul_ctxt->insn_buf_eip, + hvmemul_ctxt->insn_buf_bytes, hvmemul_ctxt->insn_buf); } /* diff --git a/xen/arch/x86/hvm/grant_table.c b/xen/arch/x86/hvm/grant_table.c index ecd7d078ab..3bfa9133d5 100644 --- a/xen/arch/x86/hvm/grant_table.c +++ b/xen/arch/x86/hvm/grant_table.c @@ -25,8 +25,7 @@ #include -int create_grant_p2m_mapping(uint64_t addr, mfn_t frame, - unsigned int flags, +int create_grant_p2m_mapping(uint64_t addr, mfn_t frame, unsigned int flags, unsigned int cache_flags) { p2m_type_t p2mt; @@ -39,8 +38,7 @@ int create_grant_p2m_mapping(uint64_t addr, mfn_t frame, p2mt = p2m_grant_map_ro; else p2mt = p2m_grant_map_rw; - rc = guest_physmap_add_entry(current->domain, - _gfn(addr >> PAGE_SHIFT), + rc = guest_physmap_add_entry(current->domain, _gfn(addr >> PAGE_SHIFT), frame, PAGE_ORDER_4K, p2mt); if ( rc ) return GNTST_general_error; @@ -48,8 +46,8 @@ int create_grant_p2m_mapping(uint64_t addr, mfn_t frame, return GNTST_okay; } -int replace_grant_p2m_mapping(uint64_t addr, mfn_t frame, - uint64_t new_addr, unsigned int flags) +int replace_grant_p2m_mapping(uint64_t addr, mfn_t frame, uint64_t new_addr, + unsigned int flags) { unsigned long gfn = (unsigned long)(addr >> PAGE_SHIFT); p2m_type_t type; @@ -64,7 +62,8 @@ int replace_grant_p2m_mapping(uint64_t addr, mfn_t frame, { put_gfn(d, gfn); gdprintk(XENLOG_WARNING, - "old mapping invalid (type %d, mfn %" PRI_mfn ", frame %"PRI_mfn")\n", + "old mapping invalid (type %d, mfn %" PRI_mfn + ", frame %" PRI_mfn ")\n", type, mfn_x(old_mfn), mfn_x(frame)); return GNTST_general_error; } diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c index a916758106..478f67a8f2 100644 --- a/xen/arch/x86/hvm/hpet.c +++ b/xen/arch/x86/hvm/hpet.c @@ -28,14 +28,14 @@ #include #define domain_vhpet(x) (&(x)->arch.hvm.pl_time->vhpet) -#define vcpu_vhpet(x) (domain_vhpet((x)->domain)) +#define vcpu_vhpet(x) (domain_vhpet((x)->domain)) #define vhpet_domain(x) (container_of(x, struct pl_time, vhpet)->domain) -#define vhpet_vcpu(x) (pt_global_vcpu_target(vhpet_domain(x))) +#define vhpet_vcpu(x) (pt_global_vcpu_target(vhpet_domain(x))) -#define HPET_BASE_ADDRESS 0xfed00000ULL -#define HPET_MMAP_SIZE 1024 -#define S_TO_NS 1000000000ULL /* 1s = 10^9 ns */ -#define S_TO_FS 1000000000000000ULL /* 1s = 10^15 fs */ +#define HPET_BASE_ADDRESS 0xfed00000ULL +#define HPET_MMAP_SIZE 1024 +#define S_TO_NS 1000000000ULL /* 1s = 10^9 ns */ +#define S_TO_FS 1000000000000000ULL /* 1s = 10^15 fs */ /* Frequency_of_Xen_systeme_time / frequency_of_HPET = 16 */ #define STIME_PER_HPET_TICK 16 @@ -45,21 +45,23 @@ /* can be routed to IOAPIC.redirect_table[23..20] */ #define HPET_TN_INT_ROUTE_CAP_VAL MASK_INSR(0x00f00000, HPET_TN_INT_ROUTE_CAP) -#define HPET_TN(reg, addr) (((addr) - HPET_Tn_##reg(0)) / \ - (HPET_Tn_##reg(1) - HPET_Tn_##reg(0))) +#define HPET_TN(reg, addr) \ + (((addr)-HPET_Tn_##reg(0)) / (HPET_Tn_##reg(1) - HPET_Tn_##reg(0))) -#define hpet_tick_to_ns(h, tick) \ - ((s_time_t)((((tick) > (h)->hpet_to_ns_limit) ? \ - ~0ULL : (tick) * (h)->hpet_to_ns_scale) >> 10)) +#define hpet_tick_to_ns(h, tick) \ + ((s_time_t)((((tick) > (h)->hpet_to_ns_limit) \ + ? ~0ULL \ + : (tick) * (h)->hpet_to_ns_scale) >> \ + 10)) -#define timer_config(h, n) (h->hpet.timers[n].config) -#define timer_enabled(h, n) (timer_config(h, n) & HPET_TN_ENABLE) -#define timer_is_periodic(h, n) (timer_config(h, n) & HPET_TN_PERIODIC) -#define timer_is_32bit(h, n) (timer_config(h, n) & HPET_TN_32BIT) -#define hpet_enabled(h) (h->hpet.config & HPET_CFG_ENABLE) -#define timer_level(h, n) (timer_config(h, n) & HPET_TN_LEVEL) +#define timer_config(h, n) (h->hpet.timers[n].config) +#define timer_enabled(h, n) (timer_config(h, n) & HPET_TN_ENABLE) +#define timer_is_periodic(h, n) (timer_config(h, n) & HPET_TN_PERIODIC) +#define timer_is_32bit(h, n) (timer_config(h, n) & HPET_TN_32BIT) +#define hpet_enabled(h) (h->hpet.config & HPET_CFG_ENABLE) +#define timer_level(h, n) (timer_config(h, n) & HPET_TN_LEVEL) -#define timer_int_route(h, n) MASK_EXTR(timer_config(h, n), HPET_TN_ROUTE) +#define timer_int_route(h, n) MASK_EXTR(timer_config(h, n), HPET_TN_ROUTE) #define timer_int_route_cap(h, n) \ MASK_EXTR(timer_config(h, n), HPET_TN_INT_ROUTE_CAP) @@ -90,7 +92,7 @@ static uint64_t hpet_get_comparator(HPETState *h, unsigned int tn, { /* update comparator by number of periods elapsed since last update */ uint64_t period = h->hpet.period[tn]; - if (period) + if ( period ) { elapsed = hpet_read_maincounter(h, guest_time) - comparator; if ( (int64_t)elapsed >= 0 ) @@ -112,7 +114,7 @@ static inline uint64_t hpet_read64(HPETState *h, unsigned long addr, { addr &= ~7; - switch ( addr ) + switch (addr) { case HPET_ID: return h->hpet.capability; @@ -129,10 +131,10 @@ static inline uint64_t hpet_read64(HPETState *h, unsigned long addr, case HPET_Tn_CMP(0): case HPET_Tn_CMP(1): case HPET_Tn_CMP(2): - return hpet_get_comparator(h, - array_index_nospec(HPET_TN(CMP, addr), - ARRAY_SIZE(h->hpet.timers)), - guest_time); + return hpet_get_comparator( + h, + array_index_nospec(HPET_TN(CMP, addr), ARRAY_SIZE(h->hpet.timers)), + guest_time); case HPET_Tn_ROUTE(0): case HPET_Tn_ROUTE(1): case HPET_Tn_ROUTE(2): @@ -142,8 +144,8 @@ static inline uint64_t hpet_read64(HPETState *h, unsigned long addr, return 0; } -static inline int hpet_check_access_length( - unsigned long addr, unsigned long len) +static inline int hpet_check_access_length(unsigned long addr, + unsigned long len) { if ( (addr & (len - 1)) || (len > 8) ) { @@ -152,17 +154,18 @@ static inline int hpet_check_access_length( * in unexpected behaviour or master abort, but should not crash/hang. * Hence we read all-ones, drop writes, and log a warning. */ - gdprintk(XENLOG_WARNING, "HPET: access across register boundary: " - "%lx %lx\n", addr, len); + gdprintk(XENLOG_WARNING, + "HPET: access across register boundary: " + "%lx %lx\n", + addr, len); return -EINVAL; } return 0; } -static int hpet_read( - struct vcpu *v, unsigned long addr, unsigned int length, - unsigned long *pval) +static int hpet_read(struct vcpu *v, unsigned long addr, unsigned int length, + unsigned long *pval) { HPETState *h = vcpu_vhpet(v); unsigned long result; @@ -174,7 +177,7 @@ static int hpet_read( goto out; } - addr &= HPET_MMAP_SIZE-1; + addr &= HPET_MMAP_SIZE - 1; if ( hpet_check_access_length(addr, length) != 0 ) { @@ -200,13 +203,12 @@ static int hpet_read( if ( length != 8 ) result = (val >> ((addr & 7) * 8)) & ((1ULL << (length * 8)) - 1); - out: +out: *pval = result; return X86EMUL_OKAY; } -static void hpet_stop_timer(HPETState *h, unsigned int tn, - uint64_t guest_time) +static void hpet_stop_timer(HPETState *h, unsigned int tn, uint64_t guest_time) { ASSERT(tn < HPET_TIMER_NUM); ASSERT(rw_is_write_locked(&h->lock)); @@ -230,10 +232,9 @@ static void hpet_timer_fired(struct vcpu *v, void *data) /* the number of HPET tick that stands for * 1/(2^10) second, namely, 0.9765625 milliseconds */ -#define HPET_TINY_TIME_SPAN ((h->stime_freq >> 10) / STIME_PER_HPET_TICK) +#define HPET_TINY_TIME_SPAN ((h->stime_freq >> 10) / STIME_PER_HPET_TICK) -static void hpet_set_timer(HPETState *h, unsigned int tn, - uint64_t guest_time) +static void hpet_set_timer(HPETState *h, unsigned int tn, uint64_t guest_time) { uint64_t tn_cmp, cur_tick, diff; unsigned int irq; @@ -259,11 +260,11 @@ static void hpet_set_timer(HPETState *h, unsigned int tn, return; } - tn_cmp = hpet_get_comparator(h, tn, guest_time); + tn_cmp = hpet_get_comparator(h, tn, guest_time); cur_tick = hpet_read_maincounter(h, guest_time); if ( timer_is_32bit(h, tn) ) { - tn_cmp = (uint32_t)tn_cmp; + tn_cmp = (uint32_t)tn_cmp; cur_tick = (uint32_t)cur_tick; } @@ -277,7 +278,8 @@ static void hpet_set_timer(HPETState *h, unsigned int tn, */ if ( (int64_t)diff < 0 ) diff = (timer_is_32bit(h, tn) && (-diff > HPET_TINY_TIME_SPAN)) - ? (uint32_t)diff : 0; + ? (uint32_t)diff + : 0; destroy_periodic_time(&h->pt[tn]); if ( (tn <= 1) && (h->hpet.config & HPET_CFG_LEGACY) ) @@ -305,19 +307,17 @@ static void hpet_set_timer(HPETState *h, unsigned int tn, * status register) before another interrupt can be delivered. */ oneshot = !timer_is_periodic(h, tn) || timer_level(h, tn); - TRACE_2_LONG_4D(TRC_HVM_EMUL_HPET_START_TIMER, tn, irq, - TRC_PAR_LONG(hpet_tick_to_ns(h, diff)), - TRC_PAR_LONG(oneshot ? 0LL : - hpet_tick_to_ns(h, h->hpet.period[tn]))); - create_periodic_time(vhpet_vcpu(h), &h->pt[tn], - hpet_tick_to_ns(h, diff), + TRACE_2_LONG_4D( + TRC_HVM_EMUL_HPET_START_TIMER, tn, irq, + TRC_PAR_LONG(hpet_tick_to_ns(h, diff)), + TRC_PAR_LONG(oneshot ? 0LL : hpet_tick_to_ns(h, h->hpet.period[tn]))); + create_periodic_time(vhpet_vcpu(h), &h->pt[tn], hpet_tick_to_ns(h, diff), oneshot ? 0 : hpet_tick_to_ns(h, h->hpet.period[tn]), irq, timer_level(h, tn) ? hpet_timer_fired : NULL, (void *)(unsigned long)tn, timer_level(h, tn)); } -static inline uint64_t hpet_fixup_reg( - uint64_t new, uint64_t old, uint64_t mask) +static inline uint64_t hpet_fixup_reg(uint64_t new, uint64_t old, uint64_t mask) { new &= mask; new |= old & ~mask; @@ -337,14 +337,12 @@ static void timer_sanitize_int_route(HPETState *h, unsigned int tn) * If the requested interrupt is not valid and the timer is * enabled pick the first irq. */ - timer_config(h, tn) |= - MASK_INSR(find_first_set_bit(timer_int_route_cap(h, tn)), - HPET_TN_ROUTE); + timer_config(h, tn) |= MASK_INSR( + find_first_set_bit(timer_int_route_cap(h, tn)), HPET_TN_ROUTE); } -static int hpet_write( - struct vcpu *v, unsigned long addr, - unsigned int length, unsigned long val) +static int hpet_write(struct vcpu *v, unsigned long addr, unsigned int length, + unsigned long val) { HPETState *h = vcpu_vhpet(v); uint64_t old_val, new_val; @@ -353,15 +351,15 @@ static int hpet_write( /* Acculumate a bit mask of timers whos state is changed by this write. */ unsigned long start_timers = 0; - unsigned long stop_timers = 0; -#define set_stop_timer(n) (__set_bit((n), &stop_timers)) -#define set_start_timer(n) (__set_bit((n), &start_timers)) -#define set_restart_timer(n) (set_stop_timer(n),set_start_timer(n)) + unsigned long stop_timers = 0; +#define set_stop_timer(n) (__set_bit((n), &stop_timers)) +#define set_start_timer(n) (__set_bit((n), &start_timers)) +#define set_restart_timer(n) (set_stop_timer(n), set_start_timer(n)) if ( !v->domain->arch.hvm.params[HVM_PARAM_HPET_ENABLED] ) goto out; - addr &= HPET_MMAP_SIZE-1; + addr &= HPET_MMAP_SIZE - 1; if ( hpet_check_access_length(addr, length) != 0 ) goto out; @@ -372,15 +370,15 @@ static int hpet_write( old_val = hpet_read64(h, addr, guest_time); new_val = val; if ( length != 8 ) - new_val = hpet_fixup_reg( - new_val << (addr & 7) * 8, old_val, - ((1ULL << (length*8)) - 1) << ((addr & 7) * 8)); + new_val = + hpet_fixup_reg(new_val << (addr & 7) * 8, old_val, + ((1ULL << (length * 8)) - 1) << ((addr & 7) * 8)); - switch ( addr & ~7 ) + switch (addr & ~7) { case HPET_CFG: - h->hpet.config = hpet_fixup_reg(new_val, old_val, - HPET_CFG_ENABLE | HPET_CFG_LEGACY); + h->hpet.config = + hpet_fixup_reg(new_val, old_val, HPET_CFG_ENABLE | HPET_CFG_LEGACY); if ( !(old_val & HPET_CFG_ENABLE) && (new_val & HPET_CFG_ENABLE) ) { @@ -389,9 +387,9 @@ static int hpet_write( for ( i = 0; i < HPET_TIMER_NUM; i++ ) { h->hpet.comparator64[i] = - h->hpet.timers[i].config & HPET_TN_32BIT ? - (uint32_t)h->hpet.timers[i].cmp : - h->hpet.timers[i].cmp; + h->hpet.timers[i].config & HPET_TN_32BIT + ? (uint32_t)h->hpet.timers[i].cmp + : h->hpet.timers[i].cmp; if ( timer_enabled(h, i) ) set_start_timer(i); } @@ -446,9 +444,8 @@ static int hpet_write( h->hpet.timers[tn].config = hpet_fixup_reg(new_val, old_val, - (HPET_TN_LEVEL | HPET_TN_ENABLE | - HPET_TN_PERIODIC | HPET_TN_SETVAL | - HPET_TN_32BIT | HPET_TN_ROUTE)); + (HPET_TN_LEVEL | HPET_TN_ENABLE | HPET_TN_PERIODIC | + HPET_TN_SETVAL | HPET_TN_32BIT | HPET_TN_ROUTE)); timer_sanitize_int_route(h, tn); @@ -467,7 +464,7 @@ static int hpet_write( * the right mode. */ set_restart_timer(tn); else if ( (new_val & HPET_TN_32BIT) && - !(old_val & HPET_TN_32BIT) ) + !(old_val & HPET_TN_32BIT) ) /* switching from 64 bit to 32 bit mode could cause timer * next fire time, or period, to change. */ set_restart_timer(tn); @@ -536,14 +533,14 @@ static int hpet_write( } /* stop/start timers whos state was changed by this write. */ - while (stop_timers) + while ( stop_timers ) { i = find_first_set_bit(stop_timers); __clear_bit(i, &stop_timers); hpet_stop_timer(h, i, guest_time); } - while (start_timers) + while ( start_timers ) { i = find_first_set_bit(start_timers); __clear_bit(i, &start_timers); @@ -556,22 +553,18 @@ static int hpet_write( write_unlock(&h->lock); - out: +out: return X86EMUL_OKAY; } static int hpet_range(struct vcpu *v, unsigned long addr) { - return ( (addr >= HPET_BASE_ADDRESS) && - (addr < (HPET_BASE_ADDRESS + HPET_MMAP_SIZE)) ); + return ((addr >= HPET_BASE_ADDRESS) && + (addr < (HPET_BASE_ADDRESS + HPET_MMAP_SIZE))); } static const struct hvm_mmio_ops hpet_mmio_ops = { - .check = hpet_range, - .read = hpet_read, - .write = hpet_write -}; - + .check = hpet_range, .read = hpet_read, .write = hpet_write}; static int hpet_save(struct vcpu *v, hvm_domain_context_t *h) { @@ -585,8 +578,8 @@ static int hpet_save(struct vcpu *v, hvm_domain_context_t *h) v = pt_global_vcpu_target(d); write_lock(&hp->lock); - guest_time = (v->arch.hvm.guest_time ?: hvm_get_guest_time(v)) / - STIME_PER_HPET_TICK; + guest_time = + (v->arch.hvm.guest_time ?: hvm_get_guest_time(v)) / STIME_PER_HPET_TICK; /* Write the proper value into the main counter */ if ( hpet_enabled(hp) ) @@ -711,7 +704,8 @@ static void hpet_set(HPETState *h) h->stime_freq = S_TO_NS; - h->hpet_to_ns_scale = ((S_TO_NS * STIME_PER_HPET_TICK) << 10) / h->stime_freq; + h->hpet_to_ns_scale = + ((S_TO_NS * STIME_PER_HPET_TICK) << 10) / h->stime_freq; h->hpet_to_ns_limit = ~0ULL / h->hpet_to_ns_scale; h->hpet.capability = 0x80860001ULL | @@ -720,12 +714,13 @@ static void hpet_set(HPETState *h) /* This is the number of femptoseconds per HPET tick. */ /* Here we define HPET's frequency to be 1/16 of Xen system time */ - h->hpet.capability |= ((S_TO_FS*STIME_PER_HPET_TICK/h->stime_freq) << 32); + h->hpet.capability |= + ((S_TO_FS * STIME_PER_HPET_TICK / h->stime_freq) << 32); for ( i = 0; i < HPET_TIMER_NUM; i++ ) { - h->hpet.timers[i].config = - HPET_TN_INT_ROUTE_CAP_VAL | HPET_TN_64BIT_CAP | HPET_TN_PERIODIC_CAP; + h->hpet.timers[i].config = HPET_TN_INT_ROUTE_CAP_VAL | + HPET_TN_64BIT_CAP | HPET_TN_PERIODIC_CAP; h->hpet.timers[i].cmp = ~0ULL; h->hpet.comparator64[i] = ~0ULL; h->pt[i].source = PTSRC_isa; diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 8adbb61b57..9f62cacead 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4,7 +4,7 @@ * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2005, International Business Machines Corporation. * Copyright (c) 2008, Citrix Systems, Inc. - * + * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. @@ -91,8 +91,8 @@ struct hvm_function_table hvm_funcs __read_mostly; * the hardware domain which needs a more permissive one. */ #define HVM_IOBITMAP_SIZE (3 * PAGE_SIZE) -unsigned long __section(".bss.page_aligned") __aligned(PAGE_SIZE) - hvm_io_bitmap[HVM_IOBITMAP_SIZE / BYTES_PER_LONG]; +unsigned long __section(".bss.page_aligned") + __aligned(PAGE_SIZE) hvm_io_bitmap[HVM_IOBITMAP_SIZE / BYTES_PER_LONG]; /* Xen command-line option to enable HAP */ static bool_t __initdata opt_hap_enabled = 1; @@ -113,13 +113,13 @@ static const char __initconst warning_hvm_fep[] = static bool_t __initdata opt_altp2m_enabled = 0; boolean_param("altp2m", opt_altp2m_enabled); -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int rc = 0; - switch ( action ) + switch (action) { case CPU_UP_PREPARE: rc = hvm_funcs.cpu_up_prepare(cpu); @@ -138,9 +138,7 @@ static int cpu_callback( return !rc ? NOTIFY_DONE : notifier_from_errno(rc); } -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback -}; +static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback}; static int __init hvm_enable(void) { @@ -213,7 +211,7 @@ presmp_initcall(hvm_enable); */ int hvm_event_needs_reinjection(uint8_t type, uint8_t vector) { - switch ( type ) + switch (type) { case X86_EVENTTYPE_EXT_INTR: case X86_EVENTTYPE_NMI: @@ -239,14 +237,10 @@ int hvm_event_needs_reinjection(uint8_t type, uint8_t vector) uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2) { const unsigned int contributory_exceptions = - (1 << TRAP_divide_error) | - (1 << TRAP_invalid_tss) | - (1 << TRAP_no_segment) | - (1 << TRAP_stack_error) | - (1 << TRAP_gp_fault); + (1 << TRAP_divide_error) | (1 << TRAP_invalid_tss) | + (1 << TRAP_no_segment) | (1 << TRAP_stack_error) | (1 << TRAP_gp_fault); const unsigned int page_faults = - (1 << TRAP_page_fault) | - (1 << TRAP_virtualisation); + (1 << TRAP_page_fault) | (1 << TRAP_virtualisation); /* Exception during double-fault delivery always causes a triple fault. */ if ( vec1 == TRAP_double_fault ) @@ -272,7 +266,7 @@ void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable) { struct vcpu *v; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) hvm_funcs.set_rdtsc_exiting(v, enable); } @@ -288,7 +282,7 @@ int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat) uint8_t *value = (uint8_t *)&guest_pat; for ( i = 0; i < 8; i++ ) - switch ( value[i] ) + switch (value[i]) { case PAT_TYPE_UC_MINUS: case PAT_TYPE_UNCACHABLE: @@ -298,8 +292,8 @@ int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat) case PAT_TYPE_WRTHROUGH: break; default: - HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid guest PAT: %"PRIx64"\n", - guest_pat); + HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid guest PAT: %" PRIx64 "\n", + guest_pat); return 0; } @@ -311,8 +305,7 @@ int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat) bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val) { - if ( !hvm_funcs.set_guest_bndcfgs || - !is_canonical_address(val) || + if ( !hvm_funcs.set_guest_bndcfgs || !is_canonical_address(val) || (val & IA32_BNDCFGS_RESERVED) ) return false; @@ -369,14 +362,12 @@ u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz) return 0; /* ratio = (gtsc_khz << hvm_funcs.tsc_scaling.ratio_frac_bits) / cpu_khz */ - asm ( "shldq %[frac],%[gkhz],%[zero] ; " - "shlq %[frac],%[gkhz] ; " - "divq %[hkhz] " - : "=d" (dummy), "=a" (ratio) - : [frac] "c" (ratio_frac_bits), - [gkhz] "a" ((u64) gtsc_khz), - [zero] "d" (0ULL), - [hkhz] "rm" ((u64) cpu_khz) ); + asm("shldq %[frac],%[gkhz],%[zero] ; " + "shlq %[frac],%[gkhz] ; " + "divq %[hkhz] " + : "=d"(dummy), "=a"(ratio) + : [frac] "c"(ratio_frac_bits), [gkhz] "a"((u64)gtsc_khz), + [zero] "d"(0ULL), [hkhz] "rm"((u64)cpu_khz)); return ratio > max_ratio ? 0 : ratio; } @@ -390,10 +381,10 @@ u64 hvm_scale_tsc(const struct domain *d, u64 tsc) return tsc; /* tsc = (tsc * ratio) >> hvm_funcs.tsc_scaling.ratio_frac_bits */ - asm ( "mulq %[ratio]; shrdq %[frac],%%rdx,%[tsc]" - : [tsc] "+a" (tsc), "=&d" (dummy) - : [frac] "c" (hvm_funcs.tsc_scaling.ratio_frac_bits), - [ratio] "rm" (ratio) ); + asm("mulq %[ratio]; shrdq %[frac],%%rdx,%[tsc]" + : [tsc] "+a"(tsc), "=&d"(dummy) + : + [frac] "c"(hvm_funcs.tsc_scaling.ratio_frac_bits), [ratio] "rm"(ratio)); return tsc; } @@ -497,7 +488,7 @@ void hvm_migrate_pirqs(struct vcpu *v) struct domain *d = v->domain; if ( !iommu_enabled || !hvm_domain_irq(d)->dpci ) - return; + return; spin_lock(&d->event_lock); pt_pirq_iterate(d, migrate_pirq, v); @@ -546,8 +537,8 @@ void hvm_do_resume(struct vcpu *v) } } -static int hvm_print_line( - int dir, unsigned int port, unsigned int bytes, uint32_t *val) +static int hvm_print_line(int dir, unsigned int port, unsigned int bytes, + uint32_t *val) { struct domain *cd = current->domain; char c = *val; @@ -584,7 +575,7 @@ int hvm_domain_initialise(struct domain *d) if ( !hvm_enabled ) { gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest " - "on a non-VT/AMDV platform.\n"); + "on a non-VT/AMDV platform.\n"); return -EINVAL; } @@ -603,20 +594,20 @@ int hvm_domain_initialise(struct domain *d) hvm_init_cacheattr_region_list(d); - rc = paging_enable(d, PG_refcounts|PG_translate|PG_external); + rc = paging_enable(d, PG_refcounts | PG_translate | PG_external); if ( rc != 0 ) goto fail0; nr_gsis = is_hardware_domain(d) ? nr_irqs_gsi : NR_HVM_DOMU_IRQS; d->arch.hvm.pl_time = xzalloc(struct pl_time); d->arch.hvm.params = xzalloc_array(uint64_t, HVM_NR_PARAMS); - d->arch.hvm.io_handler = xzalloc_array(struct hvm_io_handler, - NR_IO_HANDLERS); + d->arch.hvm.io_handler = + xzalloc_array(struct hvm_io_handler, NR_IO_HANDLERS); d->arch.hvm.irq = xzalloc_bytes(hvm_irq_size(nr_gsis)); rc = -ENOMEM; - if ( !d->arch.hvm.pl_time || !d->arch.hvm.irq || - !d->arch.hvm.params || !d->arch.hvm.io_handler ) + if ( !d->arch.hvm.pl_time || !d->arch.hvm.irq || !d->arch.hvm.params || + !d->arch.hvm.io_handler ) goto fail1; /* Set the number of GSIs */ @@ -672,21 +663,21 @@ int hvm_domain_initialise(struct domain *d) return 0; - fail2: +fail2: rtc_deinit(d); stdvga_deinit(d); vioapic_deinit(d); - fail1: +fail1: if ( is_hardware_domain(d) ) xfree(d->arch.hvm.io_bitmap); xfree(d->arch.hvm.io_handler); xfree(d->arch.hvm.params); xfree(d->arch.hvm.pl_time); xfree(d->arch.hvm.irq); - fail0: +fail0: hvm_destroy_cacheattr_region_list(d); destroy_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0); - fail: +fail: return rc; } @@ -728,7 +719,7 @@ void hvm_domain_destroy(struct domain *d) XFREE(d->arch.hvm.pl_time); XFREE(d->arch.hvm.irq); - list_for_each_safe ( ioport_list, tmp, &d->arch.hvm.g2m_ioport_list ) + list_for_each_safe (ioport_list, tmp, &d->arch.hvm.g2m_ioport_list) { ioport = list_entry(ioport_list, struct g2m_ioport, list); list_del(&ioport->list); @@ -767,8 +758,8 @@ static int hvm_load_tsc_adjust(struct domain *d, hvm_domain_context_t *h) return 0; } -HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust, - hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU); +HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust, hvm_load_tsc_adjust, + 1, HVMSR_PER_VCPU); static int hvm_save_cpu_ctxt(struct vcpu *v, hvm_domain_context_t *h) { @@ -786,8 +777,8 @@ static int hvm_save_cpu_ctxt(struct vcpu *v, hvm_domain_context_t *h) .rsp = v->arch.user_regs.rsp, .rip = v->arch.user_regs.rip, .rflags = v->arch.user_regs.rflags, - .r8 = v->arch.user_regs.r8, - .r9 = v->arch.user_regs.r9, + .r8 = v->arch.user_regs.r8, + .r9 = v->arch.user_regs.r9, .r10 = v->arch.user_regs.r10, .r11 = v->arch.user_regs.r11, .r12 = v->arch.user_regs.r12, @@ -914,12 +905,10 @@ const char *hvm_efer_valid(const struct vcpu *v, uint64_t value, } /* These reserved bits in lower 32 remain 0 after any load of CR0 */ -#define HVM_CR0_GUEST_RESERVED_BITS \ - (~((unsigned long) \ - (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | \ - X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | \ - X86_CR0_WP | X86_CR0_AM | X86_CR0_NW | \ - X86_CR0_CD | X86_CR0_PG))) +#define HVM_CR0_GUEST_RESERVED_BITS \ + (~((unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | \ + X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ + X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))) /* These bits in CR4 can be set by the guest. */ unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore) @@ -928,27 +917,22 @@ unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore) bool mce, vmxe; /* Logic broken out simply to aid readability below. */ - mce = p->basic.mce || p->basic.mca; + mce = p->basic.mce || p->basic.mca; vmxe = p->basic.vmx && (restore || nestedhvm_enabled(d)); - return ((p->basic.vme ? X86_CR4_VME | X86_CR4_PVI : 0) | - (p->basic.tsc ? X86_CR4_TSD : 0) | - (p->basic.de ? X86_CR4_DE : 0) | - (p->basic.pse ? X86_CR4_PSE : 0) | - (p->basic.pae ? X86_CR4_PAE : 0) | - (mce ? X86_CR4_MCE : 0) | - (p->basic.pge ? X86_CR4_PGE : 0) | - X86_CR4_PCE | - (p->basic.fxsr ? X86_CR4_OSFXSR : 0) | - (p->basic.sse ? X86_CR4_OSXMMEXCPT : 0) | - (p->feat.umip ? X86_CR4_UMIP : 0) | - (vmxe ? X86_CR4_VMXE : 0) | - (p->feat.fsgsbase ? X86_CR4_FSGSBASE : 0) | - (p->basic.pcid ? X86_CR4_PCIDE : 0) | - (p->basic.xsave ? X86_CR4_OSXSAVE : 0) | - (p->feat.smep ? X86_CR4_SMEP : 0) | - (p->feat.smap ? X86_CR4_SMAP : 0) | - (p->feat.pku ? X86_CR4_PKE : 0)); + return ( + (p->basic.vme ? X86_CR4_VME | X86_CR4_PVI : 0) | + (p->basic.tsc ? X86_CR4_TSD : 0) | (p->basic.de ? X86_CR4_DE : 0) | + (p->basic.pse ? X86_CR4_PSE : 0) | (p->basic.pae ? X86_CR4_PAE : 0) | + (mce ? X86_CR4_MCE : 0) | (p->basic.pge ? X86_CR4_PGE : 0) | + X86_CR4_PCE | (p->basic.fxsr ? X86_CR4_OSFXSR : 0) | + (p->basic.sse ? X86_CR4_OSXMMEXCPT : 0) | + (p->feat.umip ? X86_CR4_UMIP : 0) | (vmxe ? X86_CR4_VMXE : 0) | + (p->feat.fsgsbase ? X86_CR4_FSGSBASE : 0) | + (p->basic.pcid ? X86_CR4_PCIDE : 0) | + (p->basic.xsave ? X86_CR4_OSXSAVE : 0) | + (p->feat.smep ? X86_CR4_SMEP : 0) | (p->feat.smap ? X86_CR4_SMAP : 0) | + (p->feat.pku ? X86_CR4_PKE : 0)); } static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) @@ -974,9 +958,8 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) return -EINVAL; /* Sanity check some control registers. */ - if ( (ctxt.cr0 & HVM_CR0_GUEST_RESERVED_BITS) || - !(ctxt.cr0 & X86_CR0_ET) || - ((ctxt.cr0 & (X86_CR0_PE|X86_CR0_PG)) == X86_CR0_PG) ) + if ( (ctxt.cr0 & HVM_CR0_GUEST_RESERVED_BITS) || !(ctxt.cr0 & X86_CR0_ET) || + ((ctxt.cr0 & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PG) ) { printk(XENLOG_G_ERR "HVM%d restore: bad CR0 %#" PRIx64 "\n", d->domain_id, ctxt.cr0); @@ -993,8 +976,8 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) errstr = hvm_efer_valid(v, ctxt.msr_efer, MASK_EXTR(ctxt.cr0, X86_CR0_PG)); if ( errstr ) { - printk(XENLOG_G_ERR "%pv: HVM restore: bad EFER %#" PRIx64 " - %s\n", - v, ctxt.msr_efer, errstr); + printk(XENLOG_G_ERR "%pv: HVM restore: bad EFER %#" PRIx64 " - %s\n", v, + ctxt.msr_efer, errstr); return -EINVAL; } @@ -1007,17 +990,17 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) if ( ctxt.msr_tsc_aux != (uint32_t)ctxt.msr_tsc_aux ) { - printk(XENLOG_G_ERR "%pv: HVM restore: bad MSR_TSC_AUX %#"PRIx64"\n", + printk(XENLOG_G_ERR "%pv: HVM restore: bad MSR_TSC_AUX %#" PRIx64 "\n", v, ctxt.msr_tsc_aux); return -EINVAL; } - /* Older Xen versions used to save the segment arbytes directly + /* Older Xen versions used to save the segment arbytes directly * from the VMCS on Intel hosts. Detect this and rearrange them * into the struct segment_register format. */ -#define UNFOLD_ARBYTES(_r) \ - if ( (_r & 0xf000) && !(_r & 0x0f00) ) \ - _r = ((_r & 0xff) | ((_r >> 4) & 0xf00)) +#define UNFOLD_ARBYTES(_r) \ + if ( (_r & 0xf000) && !(_r & 0x0f00) ) \ + _r = ((_r & 0xff) | ((_r >> 4) & 0xf00)) UNFOLD_ARBYTES(ctxt.cs_arbytes); UNFOLD_ARBYTES(ctxt.ds_arbytes); UNFOLD_ARBYTES(ctxt.es_arbytes); @@ -1113,8 +1096,8 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) v->arch.user_regs.rsp = ctxt.rsp; v->arch.user_regs.rip = ctxt.rip; v->arch.user_regs.rflags = ctxt.rflags | X86_EFLAGS_MBS; - v->arch.user_regs.r8 = ctxt.r8; - v->arch.user_regs.r9 = ctxt.r9; + v->arch.user_regs.r8 = ctxt.r8; + v->arch.user_regs.r9 = ctxt.r9; v->arch.user_regs.r10 = ctxt.r10; v->arch.user_regs.r11 = ctxt.r11; v->arch.user_regs.r12 = ctxt.r12; @@ -1125,8 +1108,8 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) v->arch.dr[1] = ctxt.dr1; v->arch.dr[2] = ctxt.dr2; v->arch.dr[3] = ctxt.dr3; - v->arch.dr6 = ctxt.dr6; - v->arch.dr7 = ctxt.dr7; + v->arch.dr6 = ctxt.dr6; + v->arch.dr7 = ctxt.dr7; v->arch.vgc_flags = VGCF_online; @@ -1141,9 +1124,8 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt, 1, HVMSR_PER_VCPU); -#define HVM_CPU_XSAVE_SIZE(xcr0) (offsetof(struct hvm_hw_cpu_xsave, \ - save_area) + \ - xstate_ctxt_size(xcr0)) +#define HVM_CPU_XSAVE_SIZE(xcr0) \ + (offsetof(struct hvm_hw_cpu_xsave, save_area) + xstate_ctxt_size(xcr0)) static int hvm_save_cpu_xsave_states(struct vcpu *v, hvm_domain_context_t *h) { @@ -1152,7 +1134,7 @@ static int hvm_save_cpu_xsave_states(struct vcpu *v, hvm_domain_context_t *h) int err; if ( !cpu_has_xsave || !xsave_enabled(v) ) - return 0; /* do nothing */ + return 0; /* do nothing */ err = _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, size); if ( err ) @@ -1207,31 +1189,33 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h) /* Customized checking for entry since our entry is of variable length */ desc = (struct hvm_save_descriptor *)&h->data[h->cur]; - if ( sizeof (*desc) > h->size - h->cur) + if ( sizeof(*desc) > h->size - h->cur ) { - printk(XENLOG_G_WARNING - "HVM%d.%d restore: not enough data left to read xsave descriptor\n", - d->domain_id, vcpuid); + printk( + XENLOG_G_WARNING + "HVM%d.%d restore: not enough data left to read xsave descriptor\n", + d->domain_id, vcpuid); return -ENODATA; } - if ( desc->length + sizeof (*desc) > h->size - h->cur) + if ( desc->length + sizeof(*desc) > h->size - h->cur ) { - printk(XENLOG_G_WARNING - "HVM%d.%d restore: not enough data left to read %u xsave bytes\n", - d->domain_id, vcpuid, desc->length); + printk( + XENLOG_G_WARNING + "HVM%d.%d restore: not enough data left to read %u xsave bytes\n", + d->domain_id, vcpuid, desc->length); return -ENODATA; } - if ( desc->length < offsetof(struct hvm_hw_cpu_xsave, save_area) + - XSTATE_AREA_MIN_SIZE ) + if ( desc->length < + offsetof(struct hvm_hw_cpu_xsave, save_area) + XSTATE_AREA_MIN_SIZE ) { printk(XENLOG_G_WARNING "HVM%d.%d restore mismatch: xsave length %u < %zu\n", d->domain_id, vcpuid, desc->length, - offsetof(struct hvm_hw_cpu_xsave, - save_area) + XSTATE_AREA_MIN_SIZE); + offsetof(struct hvm_hw_cpu_xsave, save_area) + + XSTATE_AREA_MIN_SIZE); return -EINVAL; } - h->cur += sizeof (*desc); + h->cur += sizeof(*desc); desc_start = h->cur; ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur]; @@ -1242,8 +1226,9 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h) if ( err ) { printk(XENLOG_G_WARNING - "HVM%d.%d restore: inconsistent xsave state (feat=%#"PRIx64 - " accum=%#"PRIx64" xcr0=%#"PRIx64" bv=%#"PRIx64" err=%d)\n", + "HVM%d.%d restore: inconsistent xsave state (feat=%#" PRIx64 + " accum=%#" PRIx64 " xcr0=%#" PRIx64 " bv=%#" PRIx64 + " err=%d)\n", d->domain_id, vcpuid, ctxt->xfeature_mask, ctxt->xcr0_accum, ctxt->xcr0, ctxt->save_area.xsave_hdr.xstate_bv, err); return err; @@ -1262,7 +1247,8 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h) if ( h->data[desc_start + i] ) { printk(XENLOG_G_WARNING - "HVM%d.%u restore mismatch: xsave length %#x > %#x (non-zero data at %#x)\n", + "HVM%d.%u restore mismatch: xsave length %#x > %#x " + "(non-zero data at %#x)\n", d->domain_id, vcpuid, desc->length, size, i); return -EOPNOTSUPP; } @@ -1318,7 +1304,7 @@ static int hvm_save_cpu_msrs(struct vcpu *v, hvm_domain_context_t *h) int err; err = _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id, - HVM_CPU_MSR_SIZE(msr_count_max)); + HVM_CPU_MSR_SIZE(msr_count_max)); if ( err ) return err; ctxt = (struct hvm_msr *)&h->data[h->cur]; @@ -1389,14 +1375,15 @@ static int hvm_load_cpu_msrs(struct domain *d, hvm_domain_context_t *h) /* Customized checking for entry since our entry is of variable length */ desc = (struct hvm_save_descriptor *)&h->data[h->cur]; - if ( sizeof (*desc) > h->size - h->cur) + if ( sizeof(*desc) > h->size - h->cur ) { - printk(XENLOG_G_WARNING - "HVM%d.%d restore: not enough data left to read MSR descriptor\n", - d->domain_id, vcpuid); + printk( + XENLOG_G_WARNING + "HVM%d.%d restore: not enough data left to read MSR descriptor\n", + d->domain_id, vcpuid); return -ENODATA; } - if ( desc->length + sizeof (*desc) > h->size - h->cur) + if ( desc->length + sizeof(*desc) > h->size - h->cur ) { printk(XENLOG_G_WARNING "HVM%d.%d restore: not enough data left to read %u MSR bytes\n", @@ -1434,7 +1421,7 @@ static int hvm_load_cpu_msrs(struct domain *d, hvm_domain_context_t *h) for ( i = 0; !err && i < ctxt->count; ++i ) { - switch ( ctxt->msr[i].index ) + switch (ctxt->msr[i].index) { int rc; @@ -1463,9 +1450,7 @@ static int hvm_load_cpu_msrs(struct domain *d, hvm_domain_context_t *h) */ static int __init hvm_register_CPU_save_and_restore(void) { - hvm_register_savevm(CPU_XSAVE_CODE, - "CPU_XSAVE", - hvm_save_cpu_xsave_states, + hvm_register_savevm(CPU_XSAVE_CODE, "CPU_XSAVE", hvm_save_cpu_xsave_states, hvm_load_cpu_xsave_states, HVM_CPU_XSAVE_SIZE(xfeature_mask) + sizeof(struct hvm_save_descriptor), @@ -1475,9 +1460,7 @@ static int __init hvm_register_CPU_save_and_restore(void) msr_count_max += hvm_funcs.init_msr(); if ( msr_count_max ) - hvm_register_savevm(CPU_MSR_CODE, - "CPU_MSR", - hvm_save_cpu_msrs, + hvm_register_savevm(CPU_MSR_CODE, "CPU_MSR", hvm_save_cpu_msrs, hvm_load_cpu_msrs, HVM_CPU_MSR_SIZE(msr_count_max) + sizeof(struct hvm_save_descriptor), @@ -1506,13 +1489,13 @@ int hvm_vcpu_initialise(struct vcpu *v) if ( rc != 0 ) /* teardown: vlapic_destroy */ goto fail2; - if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) /* teardown: hvm_funcs.vcpu_destroy */ + if ( (rc = hvm_funcs.vcpu_initialise(v)) != + 0 ) /* teardown: hvm_funcs.vcpu_destroy */ goto fail3; - softirq_tasklet_init( - &v->arch.hvm.assert_evtchn_irq_tasklet, - (void(*)(unsigned long))hvm_assert_evtchn_irq, - (unsigned long)v); + softirq_tasklet_init(&v->arch.hvm.assert_evtchn_irq_tasklet, + (void (*)(unsigned long))hvm_assert_evtchn_irq, + (unsigned long)v); v->arch.hvm.inject_event.vector = HVM_EVENT_VECTOR_UNSET; @@ -1522,8 +1505,8 @@ int hvm_vcpu_initialise(struct vcpu *v) vcpu_nestedhvm(v).nv_vvmcxaddr = INVALID_PADDR; - if ( nestedhvm_enabled(d) - && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: nestedhvm_vcpu_destroy */ + if ( nestedhvm_enabled(d) && (rc = nestedhvm_vcpu_initialise(v)) < + 0 ) /* teardown: nestedhvm_vcpu_destroy */ goto fail5; rc = hvm_all_ioreq_servers_add_vcpu(d, v); @@ -1535,24 +1518,24 @@ int hvm_vcpu_initialise(struct vcpu *v) /* NB. All these really belong in hvm_domain_initialise(). */ pmtimer_init(v); hpet_init(d); - + /* Init guest TSC to start from zero. */ hvm_set_guest_tsc(v, 0); } return 0; - fail6: +fail6: nestedhvm_vcpu_destroy(v); - fail5: +fail5: free_compat_arg_xlat(v); - fail4: +fail4: hvm_funcs.vcpu_destroy(v); - fail3: +fail3: vlapic_destroy(v); - fail2: +fail2: hvm_vcpu_cacheattr_destroy(v); - fail1: +fail1: return rc; } @@ -1588,7 +1571,7 @@ void hvm_vcpu_down(struct vcpu *v) /* Any other VCPUs online? ... */ domain_lock(d); - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) if ( !(v->pause_flags & VPF_down) ) online_count++; domain_unlock(d); @@ -1627,8 +1610,7 @@ void hvm_triple_fault(void) struct domain *d = v->domain; u8 reason = d->arch.hvm.params[HVM_PARAM_TRIPLE_FAULT_REASON]; - gprintk(XENLOG_ERR, - "Triple fault - invoking HVM shutdown action %d\n", + gprintk(XENLOG_ERR, "Triple fault - invoking HVM shutdown action %d\n", reason); vcpu_show_execution_state(v); domain_shutdown(d, reason); @@ -1650,14 +1632,14 @@ void hvm_inject_event(const struct x86_event *event) if ( nestedhvm_enabled(curr->domain) && !nestedhvm_vmswitch_in_progress(curr) && nestedhvm_vcpu_in_guestmode(curr) && - nhvm_vmcx_guest_intercepts_event( - curr, event->vector, event->error_code) ) + nhvm_vmcx_guest_intercepts_event(curr, event->vector, + event->error_code) ) { enum nestedhvm_vmexits nsret; nsret = nhvm_vcpu_vmexit_event(curr, event); - switch ( nsret ) + switch (nsret) { case NESTEDHVM_VMEXIT_DONE: case NESTEDHVM_VMEXIT_ERROR: /* L1 guest will crash L2 guest */ @@ -1693,9 +1675,8 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, * If this succeeds, all is fine. * If this fails, inject a nested page fault into the guest. */ - if ( nestedhvm_enabled(currd) - && nestedhvm_vcpu_in_guestmode(curr) - && nestedhvm_paging_mode_hap(curr) ) + if ( nestedhvm_enabled(currd) && nestedhvm_vcpu_in_guestmode(curr) && + nestedhvm_paging_mode_hap(curr) ) { int rv; @@ -1707,11 +1688,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, * the same as for shadow paging. */ - rv = nestedhvm_hap_nested_page_fault(curr, &gpa, - npfec.read_access, - npfec.write_access, - npfec.insn_fetch); - switch (rv) { + rv = nestedhvm_hap_nested_page_fault(curr, &gpa, npfec.read_access, + npfec.write_access, + npfec.insn_fetch); + switch (rv) + { case NESTEDHVM_PAGEFAULT_DONE: case NESTEDHVM_PAGEFAULT_RETRY: return 1; @@ -1752,9 +1733,9 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, * locking order problems later and to handle unshare etc. */ hostp2m = p2m_get_hostp2m(currd); - mfn = get_gfn_type_access(hostp2m, gfn, &p2mt, &p2ma, - P2M_ALLOC | (npfec.write_access ? P2M_UNSHARE : 0), - NULL); + mfn = get_gfn_type_access( + hostp2m, gfn, &p2mt, &p2ma, + P2M_ALLOC | (npfec.write_access ? P2M_UNSHARE : 0), NULL); if ( ap2m_active ) { @@ -1782,7 +1763,8 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, case p2m_access_n: case p2m_access_n2rwx: default: - violation = npfec.read_access || npfec.write_access || npfec.insn_fetch; + violation = + npfec.read_access || npfec.write_access || npfec.insn_fetch; break; case p2m_access_r: violation = npfec.write_access || npfec.insn_fetch; @@ -1841,7 +1823,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, * If this GFN is emulated MMIO or marked as read-only, pass the fault * to the mmio handler. */ - if ( (p2mt == p2m_mmio_dm) || + if ( (p2mt == p2m_mmio_dm) || (npfec.write_access && (p2m_is_discard_write(p2mt) || (p2mt == p2m_ioreq_server))) ) { @@ -1859,12 +1841,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, if ( npfec.write_access && (p2mt == p2m_ram_shared) ) { ASSERT(p2m_is_hostp2m(p2m)); - sharing_enomem = - (mem_sharing_unshare_page(currd, gfn, 0) < 0); + sharing_enomem = (mem_sharing_unshare_page(currd, gfn, 0) < 0); rc = 1; goto out_put_gfn; } - + /* Spurious fault? PoD and log-dirty also take this path. */ if ( p2m_is_ram(p2mt) ) { @@ -1904,12 +1885,12 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, * Otherwise, this is an error condition. */ rc = fall_through; - out_put_gfn: +out_put_gfn: __put_gfn(p2m, gfn); if ( ap2m_active ) __put_gfn(hostp2m, gfn); - out: - /* All of these are delayed until we exit, since we might +out: + /* All of these are delayed until we exit, since we might * sleep on event ring wait queues, and we must not hold * locks in such circumstance */ if ( paged ) @@ -1920,7 +1901,8 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, if ( (rv = mem_sharing_notify_enomem(currd, gfn, true)) < 0 ) { - gdprintk(XENLOG_ERR, "Domain %hu attempt to unshare " + gdprintk(XENLOG_ERR, + "Domain %hu attempt to unshare " "gfn %lx, ENOMEM and no helper (rc %d)\n", currd->domain_id, gfn, rv); /* Crash the domain */ @@ -1961,8 +1943,8 @@ int hvm_set_efer(uint64_t value) errstr = hvm_efer_valid(v, value, -1); if ( errstr ) { - printk(XENLOG_G_WARNING - "%pv: Invalid EFER update: %#"PRIx64" -> %#"PRIx64" - %s\n", + printk(XENLOG_G_WARNING "%pv: Invalid EFER update: %#" PRIx64 + " -> %#" PRIx64 " - %s\n", v, v->arch.hvm.guest_efer, value, errstr); return X86EMUL_EXCEPTION; } @@ -2000,8 +1982,8 @@ int hvm_set_efer(uint64_t value) } if ( nestedhvm_enabled(v->domain) && cpu_has_svm && - ((value & EFER_SVME) == 0 ) && - ((value ^ v->arch.hvm.guest_efer) & EFER_SVME) ) + ((value & EFER_SVME) == 0) && + ((value ^ v->arch.hvm.guest_efer) & EFER_SVME) ) { /* Cleared EFER.SVME: Flush all nestedp2m tables */ p2m_flush_nestedp2m(v->domain); @@ -2021,7 +2003,7 @@ static bool_t domain_exit_uc_mode(struct vcpu *v) struct domain *d = v->domain; struct vcpu *vs; - for_each_vcpu ( d, vs ) + for_each_vcpu (d, vs) { if ( (vs == v) || !vs->is_initialised ) continue; @@ -2048,7 +2030,7 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr) HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(val)); HVM_DBG_LOG(DBG_LEVEL_1, "CR%u, value = %lx", cr, val); - switch ( cr ) + switch (cr) { case 0: rc = hvm_set_cr0(val, true); @@ -2077,7 +2059,7 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr) return rc; - exit_and_crash: +exit_and_crash: domain_crash(curr->domain); return X86EMUL_UNHANDLEABLE; } @@ -2087,7 +2069,7 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr) struct vcpu *curr = current; unsigned long val = 0, *reg = decode_gpr(guest_cpu_user_regs(), gpr); - switch ( cr ) + switch (cr) { case 0: case 2: @@ -2109,7 +2091,7 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr) return X86EMUL_OKAY; - exit_and_crash: +exit_and_crash: domain_crash(curr->domain); return X86EMUL_UNHANDLEABLE; } @@ -2167,8 +2149,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer) if ( (u32)value != value ) { HVM_DBG_LOG(DBG_LEVEL_1, - "Guest attempts to set upper 32 bits in CR0: %lx", - value); + "Guest attempts to set upper 32 bits in CR0: %lx", value); return X86EMUL_EXCEPTION; } @@ -2236,7 +2217,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer) if ( hvm_pcid_enabled(v) ) { HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to clear CR0.PG " - "while CR4.PCIDE=1"); + "while CR4.PCIDE=1"); return X86EMUL_EXCEPTION; } @@ -2254,17 +2235,18 @@ int hvm_set_cr0(unsigned long value, bool may_defer) } } - if ( ((value ^ old_value) & X86_CR0_CD) && - iommu_enabled && hvm_funcs.handle_cd && + if ( ((value ^ old_value) & X86_CR0_CD) && iommu_enabled && + hvm_funcs.handle_cd && (!rangeset_is_empty(d->iomem_caps) || - !rangeset_is_empty(d->arch.ioport_caps) || - has_arch_pdevs(d)) ) + !rangeset_is_empty(d->arch.ioport_caps) || has_arch_pdevs(d)) ) hvm_funcs.handle_cd(v, value); hvm_update_cr(v, 0, value); - if ( (value ^ old_value) & X86_CR0_PG ) { - if ( !nestedhvm_vmswitch_in_progress(v) && nestedhvm_vcpu_in_guestmode(v) ) + if ( (value ^ old_value) & X86_CR0_PG ) + { + if ( !nestedhvm_vmswitch_in_progress(v) && + nestedhvm_vcpu_in_guestmode(v) ) paging_update_nestedmode(v); else paging_update_paging_modes(v); @@ -2306,8 +2288,8 @@ int hvm_set_cr3(unsigned long value, bool may_defer) { /* Shadow-mode CR3 change. Check PDBR and update refcounts. */ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value); - page = get_page_from_gfn(v->domain, value >> PAGE_SHIFT, - NULL, P2M_ALLOC); + page = + get_page_from_gfn(v->domain, value >> PAGE_SHIFT, NULL, P2M_ALLOC); if ( !page ) goto bad_cr3; @@ -2321,7 +2303,7 @@ int hvm_set_cr3(unsigned long value, bool may_defer) paging_update_cr3(v, noflush); return X86EMUL_OKAY; - bad_cr3: +bad_cr3: gdprintk(XENLOG_ERR, "Invalid CR3\n"); domain_crash(v->domain); return X86EMUL_UNHANDLEABLE; @@ -2335,8 +2317,7 @@ int hvm_set_cr4(unsigned long value, bool may_defer) if ( value & ~hvm_cr4_guest_valid_bits(v->domain, false) ) { HVM_DBG_LOG(DBG_LEVEL_1, - "Guest attempts to set reserved bit in CR4: %lx", - value); + "Guest attempts to set reserved bit in CR4: %lx", value); return X86EMUL_EXCEPTION; } @@ -2345,7 +2326,7 @@ int hvm_set_cr4(unsigned long value, bool may_defer) if ( hvm_long_mode_active(v) ) { HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while " - "EFER.LMA is set"); + "EFER.LMA is set"); return X86EMUL_EXCEPTION; } } @@ -2353,10 +2334,10 @@ int hvm_set_cr4(unsigned long value, bool may_defer) old_cr = v->arch.hvm.guest_cr[4]; if ( (value & X86_CR4_PCIDE) && !(old_cr & X86_CR4_PCIDE) && - (!hvm_long_mode_active(v) || - (v->arch.hvm.guest_cr[3] & 0xfff)) ) + (!hvm_long_mode_active(v) || (v->arch.hvm.guest_cr[3] & 0xfff)) ) { - HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to change CR4.PCIDE from " + HVM_DBG_LOG(DBG_LEVEL_1, + "Guest attempts to change CR4.PCIDE from " "0 to 1 while either EFER.LMA=0 or CR3[11:0]!=000H"); return X86EMUL_EXCEPTION; } @@ -2386,7 +2367,8 @@ int hvm_set_cr4(unsigned long value, bool may_defer) (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE | X86_CR4_SMEP)) || (!(value & X86_CR4_PCIDE) && (old_cr & X86_CR4_PCIDE)) ) { - if ( !nestedhvm_vmswitch_in_progress(v) && nestedhvm_vcpu_in_guestmode(v) ) + if ( !nestedhvm_vmswitch_in_progress(v) && + nestedhvm_vcpu_in_guestmode(v) ) paging_update_nestedmode(v); else paging_update_paging_modes(v); @@ -2416,14 +2398,12 @@ int hvm_set_cr4(unsigned long value, bool may_defer) return X86EMUL_OKAY; } -bool_t hvm_virtual_to_linear_addr( - enum x86_segment seg, - const struct segment_register *reg, - unsigned long offset, - unsigned int bytes, - enum hvm_access_type access_type, - const struct segment_register *active_cs, - unsigned long *linear_addr) +bool_t hvm_virtual_to_linear_addr(enum x86_segment seg, + const struct segment_register *reg, + unsigned long offset, unsigned int bytes, + enum hvm_access_type access_type, + const struct segment_register *active_cs, + unsigned long *linear_addr) { const struct vcpu *curr = current; unsigned long addr = offset, last_byte; @@ -2495,7 +2475,7 @@ bool_t hvm_virtual_to_linear_addr( /* Read/write restrictions only exist for user segments. */ if ( reg->s ) { - switch ( access_type ) + switch (access_type) { case hvm_access_read: if ( (reg->type & 0xa) == 0x8 ) @@ -2533,7 +2513,7 @@ bool_t hvm_virtual_to_linear_addr( /* All checks ok. */ okay = 1; - out: +out: /* * Always return the correct linear address, even if a permission check * failed. The permissions failure is not relevant to some callers. @@ -2542,12 +2522,13 @@ bool_t hvm_virtual_to_linear_addr( return okay; } -struct hvm_write_map { +struct hvm_write_map +{ struct list_head list; struct page_info *page; }; -/* On non-NULL return, we leave this function holding an additional +/* On non-NULL return, we leave this function holding an additional * ref on the underlying mfn, if any */ static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent, bool_t *writable) @@ -2626,7 +2607,7 @@ void hvm_unmap_guest_frame(void *p, bool_t permanent) unmap_domain_page_global(p); spin_lock(&d->arch.hvm.write_map.lock); - list_for_each_entry(track, &d->arch.hvm.write_map.list, list) + list_for_each_entry (track, &d->arch.hvm.write_map.list, list) if ( track->page == page ) { paging_mark_dirty(d, mfn); @@ -2645,7 +2626,7 @@ void hvm_mapped_guest_frames_mark_dirty(struct domain *d) struct hvm_write_map *track; spin_lock(&d->arch.hvm.write_map.lock); - list_for_each_entry(track, &d->arch.hvm.write_map.list, list) + list_for_each_entry (track, &d->arch.hvm.write_map.list, list) paging_mark_dirty(d, page_to_mfn(track->page)); spin_unlock(&d->arch.hvm.write_map.lock); } @@ -2659,7 +2640,7 @@ static void *hvm_map_entry(unsigned long va, bool_t *writable) if ( ((va & ~PAGE_MASK) + 8) > PAGE_SIZE ) { gdprintk(XENLOG_ERR, "Descriptor table entry " - "straddles page boundary\n"); + "straddles page boundary\n"); goto fail; } @@ -2679,7 +2660,7 @@ static void *hvm_map_entry(unsigned long va, bool_t *writable) return v + (va & ~PAGE_MASK); - fail: +fail: domain_crash(current->domain); return NULL; } @@ -2689,8 +2670,8 @@ static void hvm_unmap_entry(void *p) hvm_unmap_guest_frame(p, 0); } -static int task_switch_load_seg( - enum x86_segment seg, uint16_t sel, unsigned int cpl, unsigned int eflags) +static int task_switch_load_seg(enum x86_segment seg, uint16_t sel, + unsigned int cpl, unsigned int eflags) { struct segment_register desctab, segr; seg_desc_t *pdesc = NULL, desc; @@ -2724,8 +2705,8 @@ static int task_switch_load_seg( if ( (seg == x86_seg_ldtr) && (sel & 4) ) goto fault; - hvm_get_segment_register( - v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, &desctab); + hvm_get_segment_register(v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, + &desctab); /* Segment not valid for use (cooked meaning of .p)? */ if ( !desctab.p ) @@ -2743,13 +2724,13 @@ static int task_switch_load_seg( desc = *pdesc; /* LDT descriptor is a system segment. All others are code/data. */ - if ( (desc.b & (1u<<12)) == ((seg == x86_seg_ldtr) << 12) ) + if ( (desc.b & (1u << 12)) == ((seg == x86_seg_ldtr) << 12) ) goto fault; dpl = (desc.b >> 13) & 3; rpl = sel & 3; - switch ( seg ) + switch (seg) { case x86_seg_cs: /* Code segment? */ @@ -2761,27 +2742,27 @@ static int task_switch_load_seg( break; case x86_seg_ss: /* Writable data segment? */ - if ( (desc.b & (_SEGMENT_CODE|_SEGMENT_WR)) != _SEGMENT_WR ) + if ( (desc.b & (_SEGMENT_CODE | _SEGMENT_WR)) != _SEGMENT_WR ) goto fault; if ( (dpl != cpl) || (dpl != rpl) ) goto fault; break; case x86_seg_ldtr: /* LDT system segment? */ - if ( (desc.b & _SEGMENT_TYPE) != (2u<<8) ) + if ( (desc.b & _SEGMENT_TYPE) != (2u << 8) ) goto fault; goto skip_accessed_flag; default: /* Readable code or data segment? */ - if ( (desc.b & (_SEGMENT_CODE|_SEGMENT_WR)) == _SEGMENT_CODE ) + if ( (desc.b & (_SEGMENT_CODE | _SEGMENT_WR)) == _SEGMENT_CODE ) goto fault; /* * Data or non-conforming code segment: * check DPL against RPL and CPL. */ - if ( ((desc.b & (_SEGMENT_EC|_SEGMENT_CODE)) != - (_SEGMENT_EC|_SEGMENT_CODE)) - && ((dpl < cpl) || (dpl < rpl)) ) + if ( ((desc.b & (_SEGMENT_EC | _SEGMENT_CODE)) != + (_SEGMENT_EC | _SEGMENT_CODE)) && + ((dpl < cpl) || (dpl < rpl)) ) goto fault; break; } @@ -2789,25 +2770,24 @@ static int task_switch_load_seg( /* Segment present in memory? */ if ( !(desc.b & _SEGMENT_P) ) { - fault_type = (seg != x86_seg_ss) ? TRAP_no_segment - : TRAP_stack_error; + fault_type = + (seg != x86_seg_ss) ? TRAP_no_segment : TRAP_stack_error; goto fault; } } while ( !(desc.b & 0x100) && /* Ensure Accessed flag is set */ - writable && /* except if we are to discard writes */ + writable && /* except if we are to discard writes */ (cmpxchg(&pdesc->b, desc.b, desc.b | 0x100) != desc.b) ); /* Force the Accessed flag in our local copy. */ desc.b |= 0x100; - skip_accessed_flag: +skip_accessed_flag: hvm_unmap_entry(pdesc); - segr.base = (((desc.b << 0) & 0xff000000u) | - ((desc.b << 16) & 0x00ff0000u) | - ((desc.a >> 16) & 0x0000ffffu)); - segr.attr = (((desc.b >> 8) & 0x00ffu) | - ((desc.b >> 12) & 0x0f00u)); + segr.base = + (((desc.b << 0) & 0xff000000u) | ((desc.b << 16) & 0x00ff0000u) | + ((desc.a >> 16) & 0x0000ffffu)); + segr.attr = (((desc.b >> 8) & 0x00ffu) | ((desc.b >> 12) & 0x0f00u)); segr.limit = (desc.b & 0x000f0000u) | (desc.a & 0x0000ffffu); if ( segr.g ) segr.limit = (segr.limit << 12) | 0xfffu; @@ -2816,23 +2796,25 @@ static int task_switch_load_seg( return 0; - fault: +fault: hvm_unmap_entry(pdesc); hvm_inject_hw_exception(fault_type, sel & 0xfffc); return 1; } -struct tss32 { - uint16_t back_link, :16; +struct tss32 +{ + uint16_t back_link, : 16; uint32_t esp0; - uint16_t ss0, :16; + uint16_t ss0, : 16; uint32_t esp1; - uint16_t ss1, :16; + uint16_t ss1, : 16; uint32_t esp2; - uint16_t ss2, :16; + uint16_t ss2, : 16; uint32_t cr3, eip, eflags, eax, ecx, edx, ebx, esp, ebp, esi, edi; - uint16_t es, :16, cs, :16, ss, :16, ds, :16, fs, :16, gs, :16, ldt, :16; + uint16_t es, : 16, cs, : 16, ss, : 16, ds, : 16, fs, : 16, gs, : 16, + ldt, : 16; uint16_t trace /* :1 */, iomap; }; @@ -2852,7 +2834,9 @@ void hvm_prepare_vm86_tss(struct vcpu *v, uint32_t base, uint32_t limit) * off-by-one mistake), which we deliberately don't fill with all ones. */ uint16_t iomap = (limit >= sizeof(struct tss32) + (0x100 / 8) + (0x400 / 8) - ? sizeof(struct tss32) : 0) + (0x100 / 8); + ? sizeof(struct tss32) + : 0) + + (0x100 / 8); ASSERT(limit >= sizeof(struct tss32) - 1); /* @@ -2863,13 +2847,13 @@ void hvm_prepare_vm86_tss(struct vcpu *v, uint32_t base, uint32_t limit) * argument. */ hvm_copy_to_guest_phys(base, NULL, limit + 1, v); - hvm_copy_to_guest_phys(base + offsetof(struct tss32, iomap), - &iomap, sizeof(iomap), v); + hvm_copy_to_guest_phys(base + offsetof(struct tss32, iomap), &iomap, + sizeof(iomap), v); } -void hvm_task_switch( - uint16_t tss_sel, enum hvm_task_switch_reason taskswitch_reason, - int32_t errcode) +void hvm_task_switch(uint16_t tss_sel, + enum hvm_task_switch_reason taskswitch_reason, + int32_t errcode) { struct vcpu *v = current; struct cpu_user_regs *regs = guest_cpu_user_regs(); @@ -2886,14 +2870,14 @@ void hvm_task_switch( if ( ((tss_sel & 0xfff8) + 7) > gdt.limit ) { - hvm_inject_hw_exception((taskswitch_reason == TSW_iret) ? - TRAP_invalid_tss : TRAP_gp_fault, - tss_sel & 0xfff8); + hvm_inject_hw_exception( + (taskswitch_reason == TSW_iret) ? TRAP_invalid_tss : TRAP_gp_fault, + tss_sel & 0xfff8); goto out; } - optss_desc = hvm_map_entry(gdt.base + (prev_tr.sel & 0xfff8), - &otd_writable); + optss_desc = + hvm_map_entry(gdt.base + (prev_tr.sel & 0xfff8), &otd_writable); if ( optss_desc == NULL ) goto out; @@ -2903,11 +2887,10 @@ void hvm_task_switch( tss_desc = *nptss_desc; tr.sel = tss_sel; - tr.base = (((tss_desc.b << 0) & 0xff000000u) | + tr.base = (((tss_desc.b << 0) & 0xff000000u) | ((tss_desc.b << 16) & 0x00ff0000u) | ((tss_desc.a >> 16) & 0x0000ffffu)); - tr.attr = (((tss_desc.b >> 8) & 0x00ffu) | - ((tss_desc.b >> 12) & 0x0f00u)); + tr.attr = (((tss_desc.b >> 8) & 0x00ffu) | ((tss_desc.b >> 12) & 0x0f00u)); tr.limit = (tss_desc.b & 0x000f0000u) | (tss_desc.a & 0x0000ffffu); if ( tr.g ) tr.limit = (tr.limit << 12) | 0xfffu; @@ -2926,14 +2909,14 @@ void hvm_task_switch( goto out; } - if ( tr.limit < (sizeof(tss)-1) ) + if ( tr.limit < (sizeof(tss) - 1) ) { hvm_inject_hw_exception(TRAP_invalid_tss, tss_sel & 0xfff8); goto out; } - rc = hvm_copy_from_guest_linear( - &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo); + rc = hvm_copy_from_guest_linear(&tss, prev_tr.base, sizeof(tss), + PFEC_page_present, &pfinfo); if ( rc == HVMTRANS_bad_linear_to_gfn ) hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); if ( rc != HVMTRANS_okay ) @@ -2943,16 +2926,16 @@ void hvm_task_switch( if ( taskswitch_reason == TSW_iret ) eflags &= ~X86_EFLAGS_NT; - tss.eip = regs->eip; + tss.eip = regs->eip; tss.eflags = eflags; - tss.eax = regs->eax; - tss.ecx = regs->ecx; - tss.edx = regs->edx; - tss.ebx = regs->ebx; - tss.esp = regs->esp; - tss.ebp = regs->ebp; - tss.esi = regs->esi; - tss.edi = regs->edi; + tss.eax = regs->eax; + tss.ecx = regs->ecx; + tss.edx = regs->edx; + tss.ebx = regs->ebx; + tss.esp = regs->esp; + tss.ebp = regs->ebp; + tss.esi = regs->esi; + tss.edi = regs->edi; hvm_get_segment_register(v, x86_seg_es, &segr); tss.es = segr.sel; @@ -2969,18 +2952,17 @@ void hvm_task_switch( hvm_get_segment_register(v, x86_seg_ldtr, &segr); tss.ldt = segr.sel; - rc = hvm_copy_to_guest_linear(prev_tr.base + offsetof(typeof(tss), eip), - &tss.eip, - offsetof(typeof(tss), trace) - - offsetof(typeof(tss), eip), - PFEC_page_present, &pfinfo); + rc = hvm_copy_to_guest_linear( + prev_tr.base + offsetof(typeof(tss), eip), &tss.eip, + offsetof(typeof(tss), trace) - offsetof(typeof(tss), eip), + PFEC_page_present, &pfinfo); if ( rc == HVMTRANS_bad_linear_to_gfn ) hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); if ( rc != HVMTRANS_okay ) goto out; - rc = hvm_copy_from_guest_linear( - &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo); + rc = hvm_copy_from_guest_linear(&tss, tr.base, sizeof(tss), + PFEC_page_present, &pfinfo); if ( rc == HVMTRANS_bad_linear_to_gfn ) hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); /* @@ -3001,16 +2983,16 @@ void hvm_task_switch( if ( rc != X86EMUL_OKAY ) goto out; - regs->rip = tss.eip; + regs->rip = tss.eip; regs->rflags = tss.eflags | X86_EFLAGS_MBS; - regs->rax = tss.eax; - regs->rcx = tss.ecx; - regs->rdx = tss.edx; - regs->rbx = tss.ebx; - regs->rsp = tss.esp; - regs->rbp = tss.ebp; - regs->rsi = tss.esi; - regs->rdi = tss.edi; + regs->rax = tss.eax; + regs->rcx = tss.ecx; + regs->rdx = tss.edx; + regs->rbx = tss.ebx; + regs->rsp = tss.esp; + regs->rbp = tss.ebp; + regs->rsi = tss.esi; + regs->rdi = tss.edi; exn_raised = 0; if ( task_switch_load_seg(x86_seg_es, tss.es, new_cpl, tss.eflags) || @@ -3026,9 +3008,9 @@ void hvm_task_switch( regs->eflags |= X86_EFLAGS_NT; tss.back_link = prev_tr.sel; - rc = hvm_copy_to_guest_linear(tr.base + offsetof(typeof(tss), back_link), - &tss.back_link, sizeof(tss.back_link), 0, - &pfinfo); + rc = hvm_copy_to_guest_linear( + tr.base + offsetof(typeof(tss), back_link), &tss.back_link, + sizeof(tss.back_link), 0, &pfinfo); if ( rc == HVMTRANS_bad_linear_to_gfn ) { hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); @@ -3044,8 +3026,8 @@ void hvm_task_switch( v->arch.hvm.guest_cr[0] |= X86_CR0_TS; hvm_update_guest_cr(v, 0); - if ( (taskswitch_reason == TSW_iret || - taskswitch_reason == TSW_jmp) && otd_writable ) + if ( (taskswitch_reason == TSW_iret || taskswitch_reason == TSW_jmp) && + otd_writable ) clear_bit(41, optss_desc); /* clear B flag of old task */ if ( taskswitch_reason != TSW_iret && ntd_writable ) @@ -3065,8 +3047,7 @@ void hvm_task_switch( else sp = regs->sp -= opsz; if ( hvm_virtual_to_linear_addr(x86_seg_ss, &segr, sp, opsz, - hvm_access_write, - &cs, &linear_addr) ) + hvm_access_write, &cs, &linear_addr) ) { rc = hvm_copy_to_guest_linear(linear_addr, &errcode, opsz, 0, &pfinfo); @@ -3083,15 +3064,16 @@ void hvm_task_switch( if ( (tss.trace & 1) && !exn_raised ) hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC); - out: +out: hvm_unmap_entry(optss_desc); hvm_unmap_entry(nptss_desc); } -enum hvm_translation_result hvm_translate_get_page( - struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec, - pagefault_info_t *pfinfo, struct page_info **page_p, - gfn_t *gfn_p, p2m_type_t *p2mt_p) +enum hvm_translation_result +hvm_translate_get_page(struct vcpu *v, unsigned long addr, bool linear, + uint32_t pfec, pagefault_info_t *pfinfo, + struct page_info **page_p, gfn_t *gfn_p, + p2m_type_t *p2mt_p) { struct page_info *page; p2m_type_t p2mt; @@ -3129,9 +3111,8 @@ enum hvm_translation_result hvm_translate_get_page( * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses, * - newer Windows (like Server 2012) for HPET accesses. */ - if ( v == current - && !nestedhvm_vcpu_in_guestmode(v) - && hvm_mmio_internal(gfn_to_gaddr(gfn)) ) + if ( v == current && !nestedhvm_vcpu_in_guestmode(v) && + hvm_mmio_internal(gfn_to_gaddr(gfn)) ) return HVMTRANS_bad_gfn_to_mfn; page = get_page_from_gfn(v->domain, gfn_x(gfn), &p2mt, P2M_UNSHARE); @@ -3165,13 +3146,14 @@ enum hvm_translation_result hvm_translate_get_page( return HVMTRANS_okay; } -#define HVMCOPY_from_guest (0u<<0) -#define HVMCOPY_to_guest (1u<<0) -#define HVMCOPY_phys (0u<<2) -#define HVMCOPY_linear (1u<<2) -static enum hvm_translation_result __hvm_copy( - void *buf, paddr_t addr, int size, struct vcpu *v, unsigned int flags, - uint32_t pfec, pagefault_info_t *pfinfo) +#define HVMCOPY_from_guest (0u << 0) +#define HVMCOPY_to_guest (1u << 0) +#define HVMCOPY_phys (0u << 2) +#define HVMCOPY_linear (1u << 2) +static enum hvm_translation_result __hvm_copy(void *buf, paddr_t addr, int size, + struct vcpu *v, + unsigned int flags, uint32_t pfec, + pagefault_info_t *pfinfo) { gfn_t gfn; struct page_info *page; @@ -3204,8 +3186,8 @@ static enum hvm_translation_result __hvm_copy( count = min_t(int, PAGE_SIZE - gpa, todo); - res = hvm_translate_get_page(v, addr, flags & HVMCOPY_linear, - pfec, pfinfo, &page, &gfn, &p2mt); + res = hvm_translate_get_page(v, addr, flags & HVMCOPY_linear, pfec, + pfinfo, &page, &gfn, &p2mt); if ( res != HVMTRANS_okay ) return res; @@ -3225,7 +3207,8 @@ static enum hvm_translation_result __hvm_copy( if ( xchg(&lastpage, gfn_x(gfn)) != gfn_x(gfn) ) dprintk(XENLOG_G_DEBUG, - "%pv attempted write to read-only gfn %#lx (mfn=%#"PRI_mfn")\n", + "%pv attempted write to read-only gfn %#lx " + "(mfn=%#" PRI_mfn ")\n", v, gfn_x(gfn), mfn_x(page_to_mfn(page))); } else @@ -3254,32 +3237,34 @@ static enum hvm_translation_result __hvm_copy( return HVMTRANS_okay; } -enum hvm_translation_result hvm_copy_to_guest_phys( - paddr_t paddr, void *buf, int size, struct vcpu *v) +enum hvm_translation_result hvm_copy_to_guest_phys(paddr_t paddr, void *buf, + int size, struct vcpu *v) { - return __hvm_copy(buf, paddr, size, v, - HVMCOPY_to_guest | HVMCOPY_phys, 0, NULL); + return __hvm_copy(buf, paddr, size, v, HVMCOPY_to_guest | HVMCOPY_phys, 0, + NULL); } -enum hvm_translation_result hvm_copy_from_guest_phys( - void *buf, paddr_t paddr, int size) +enum hvm_translation_result hvm_copy_from_guest_phys(void *buf, paddr_t paddr, + int size) { return __hvm_copy(buf, paddr, size, current, HVMCOPY_from_guest | HVMCOPY_phys, 0, NULL); } -enum hvm_translation_result hvm_copy_to_guest_linear( - unsigned long addr, void *buf, int size, uint32_t pfec, - pagefault_info_t *pfinfo) +enum hvm_translation_result hvm_copy_to_guest_linear(unsigned long addr, + void *buf, int size, + uint32_t pfec, + pagefault_info_t *pfinfo) { return __hvm_copy(buf, addr, size, current, HVMCOPY_to_guest | HVMCOPY_linear, PFEC_page_present | PFEC_write_access | pfec, pfinfo); } -enum hvm_translation_result hvm_copy_from_guest_linear( - void *buf, unsigned long addr, int size, uint32_t pfec, - pagefault_info_t *pfinfo) +enum hvm_translation_result hvm_copy_from_guest_linear(void *buf, + unsigned long addr, + int size, uint32_t pfec, + pagefault_info_t *pfinfo) { return __hvm_copy(buf, addr, size, current, HVMCOPY_from_guest | HVMCOPY_linear, @@ -3296,7 +3281,8 @@ unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len) return 0; } - rc = hvm_copy_to_guest_linear((unsigned long)to, (void *)from, len, 0, NULL); + rc = + hvm_copy_to_guest_linear((unsigned long)to, (void *)from, len, 0, NULL); return rc ? len : 0; /* fake a copy_to_user() return code */ } @@ -3345,14 +3331,14 @@ static uint64_t _hvm_rdtsc_intercept(void) struct domain *currd = curr->domain; if ( currd->arch.vtsc ) - switch ( hvm_guest_x86_mode(curr) ) + switch (hvm_guest_x86_mode(curr)) { case 8: case 4: case 2: if ( unlikely(hvm_get_cpl(curr)) ) { - case 1: + case 1: currd->arch.vtsc_usercount++; break; } @@ -3388,7 +3374,7 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) ret = X86EMUL_OKAY; - switch ( msr ) + switch (msr) { unsigned int index; @@ -3424,10 +3410,10 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) case MSR_MTRRdefType: if ( !d->arch.cpuid->basic.mtrr ) goto gp_fault; - *msr_content = v->arch.hvm.mtrr.def_type | - MASK_INSR(v->arch.hvm.mtrr.enabled, MTRRdefType_E) | - MASK_INSR(v->arch.hvm.mtrr.fixed_enabled, - MTRRdefType_FE); + *msr_content = + v->arch.hvm.mtrr.def_type | + MASK_INSR(v->arch.hvm.mtrr.enabled, MTRRdefType_E) | + MASK_INSR(v->arch.hvm.mtrr.fixed_enabled, MTRRdefType_FE); break; case MSR_MTRRfix64K_00000: if ( !d->arch.cpuid->basic.mtrr ) @@ -3439,26 +3425,24 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) if ( !d->arch.cpuid->basic.mtrr ) goto gp_fault; index = msr - MSR_MTRRfix16K_80000; - *msr_content = fixed_range_base[array_index_nospec(index + 1, - ARRAY_SIZE(v->arch.hvm.mtrr.fixed_ranges))]; + *msr_content = fixed_range_base[array_index_nospec( + index + 1, ARRAY_SIZE(v->arch.hvm.mtrr.fixed_ranges))]; break; - case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000: + case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000: if ( !d->arch.cpuid->basic.mtrr ) goto gp_fault; index = msr - MSR_MTRRfix4K_C0000; - *msr_content = fixed_range_base[array_index_nospec(index + 3, - ARRAY_SIZE(v->arch.hvm.mtrr.fixed_ranges))]; + *msr_content = fixed_range_base[array_index_nospec( + index + 3, ARRAY_SIZE(v->arch.hvm.mtrr.fixed_ranges))]; break; - case MSR_IA32_MTRR_PHYSBASE(0)...MSR_IA32_MTRR_PHYSMASK(MTRR_VCNT_MAX - 1): + case MSR_IA32_MTRR_PHYSBASE(0)... MSR_IA32_MTRR_PHYSMASK(MTRR_VCNT_MAX - 1): if ( !d->arch.cpuid->basic.mtrr ) goto gp_fault; index = msr - MSR_IA32_MTRR_PHYSBASE(0); - if ( (index / 2) >= - MASK_EXTR(v->arch.hvm.mtrr.mtrr_cap, MTRRcap_VCNT) ) + if ( (index / 2) >= MASK_EXTR(v->arch.hvm.mtrr.mtrr_cap, MTRRcap_VCNT) ) goto gp_fault; - *msr_content = var_range_base[array_index_nospec(index, - 2 * MASK_EXTR(v->arch.hvm.mtrr.mtrr_cap, - MTRRcap_VCNT))]; + *msr_content = var_range_base[array_index_nospec( + index, 2 * MASK_EXTR(v->arch.hvm.mtrr.mtrr_cap, MTRRcap_VCNT))]; break; case MSR_IA32_XSS: @@ -3475,30 +3459,29 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) case MSR_K8_ENABLE_C1E: case MSR_AMD64_NB_CFG: - /* - * These AMD-only registers may be accessed if this HVM guest - * has been migrated to an Intel host. This fixes a guest crash - * in this case. - */ - *msr_content = 0; - break; + /* + * These AMD-only registers may be accessed if this HVM guest + * has been migrated to an Intel host. This fixes a guest crash + * in this case. + */ + *msr_content = 0; + break; default: if ( (ret = vmce_rdmsr(msr, msr_content)) < 0 ) goto gp_fault; /* If ret == 0 then this is not an MCE MSR, see other MSRs. */ - ret = ((ret == 0) - ? hvm_funcs.msr_read_intercept(msr, msr_content) - : X86EMUL_OKAY); + ret = ((ret == 0) ? hvm_funcs.msr_read_intercept(msr, msr_content) + : X86EMUL_OKAY); break; } - out: - HVMTRACE_3D(MSR_READ, msr, - (uint32_t)*msr_content, (uint32_t)(*msr_content >> 32)); +out: + HVMTRACE_3D(MSR_READ, msr, (uint32_t)*msr_content, + (uint32_t)(*msr_content >> 32)); return ret; - gp_fault: +gp_fault: ret = X86EMUL_EXCEPTION; *msr_content = -1ull; goto out; @@ -3511,8 +3494,8 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content, struct domain *d = v->domain; int ret; - HVMTRACE_3D(MSR_WRITE, msr, - (uint32_t)msr_content, (uint32_t)(msr_content >> 32)); + HVMTRACE_3D(MSR_WRITE, msr, (uint32_t)msr_content, + (uint32_t)(msr_content >> 32)); if ( may_defer && unlikely(monitored_msr(v->domain, msr)) ) { @@ -3538,13 +3521,13 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content, ret = X86EMUL_OKAY; - switch ( msr ) + switch (msr) { unsigned int index; case MSR_EFER: if ( hvm_set_efer(msr_content) ) - return X86EMUL_EXCEPTION; + return X86EMUL_EXCEPTION; break; case MSR_IA32_TSC: @@ -3564,7 +3547,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content, case MSR_IA32_CR_PAT: if ( !hvm_set_guest_pat(v, msr_content) ) - goto gp_fault; + goto gp_fault; break; case MSR_MTRRcap: @@ -3573,9 +3556,8 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content, case MSR_MTRRdefType: if ( !d->arch.cpuid->basic.mtrr ) goto gp_fault; - if ( !mtrr_def_type_msr_set(v->domain, &v->arch.hvm.mtrr, - msr_content) ) - goto gp_fault; + if ( !mtrr_def_type_msr_set(v->domain, &v->arch.hvm.mtrr, msr_content) ) + goto gp_fault; break; case MSR_MTRRfix64K_00000: if ( !d->arch.cpuid->basic.mtrr ) @@ -3589,26 +3571,26 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content, if ( !d->arch.cpuid->basic.mtrr ) goto gp_fault; index = msr - MSR_MTRRfix16K_80000 + 1; - if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm.mtrr, - index, msr_content) ) + if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm.mtrr, index, + msr_content) ) goto gp_fault; break; - case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000: + case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000: if ( !d->arch.cpuid->basic.mtrr ) goto gp_fault; index = msr - MSR_MTRRfix4K_C0000 + 3; - if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm.mtrr, - index, msr_content) ) + if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm.mtrr, index, + msr_content) ) goto gp_fault; break; - case MSR_IA32_MTRR_PHYSBASE(0)...MSR_IA32_MTRR_PHYSMASK(MTRR_VCNT_MAX - 1): + case MSR_IA32_MTRR_PHYSBASE(0)... MSR_IA32_MTRR_PHYSMASK(MTRR_VCNT_MAX - 1): if ( !d->arch.cpuid->basic.mtrr ) goto gp_fault; index = msr - MSR_IA32_MTRR_PHYSBASE(0); if ( ((index / 2) >= MASK_EXTR(v->arch.hvm.mtrr.mtrr_cap, MTRRcap_VCNT)) || - !mtrr_var_range_msr_set(v->domain, &v->arch.hvm.mtrr, - msr, msr_content) ) + !mtrr_var_range_msr_set(v->domain, &v->arch.hvm.mtrr, msr, + msr_content) ) goto gp_fault; break; @@ -3633,9 +3615,8 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content, if ( (ret = vmce_wrmsr(msr, msr_content)) < 0 ) goto gp_fault; /* If ret == 0 then this is not an MCE MSR, see other MSRs. */ - ret = ((ret == 0) - ? hvm_funcs.msr_write_intercept(msr, msr_content) - : X86EMUL_OKAY); + ret = ((ret == 0) ? hvm_funcs.msr_write_intercept(msr, msr_content) + : X86EMUL_OKAY); break; } @@ -3651,7 +3632,7 @@ static bool is_sysdesc_access(const struct x86_emulate_state *state, unsigned int ext; int mode = x86_insn_modrm(state, NULL, &ext); - switch ( ctxt->opcode ) + switch (ctxt->opcode) { case X86EMUL_OPC(0x0f, 0x00): if ( !(ext & 4) ) /* SLDT / STR / LLDT / LTR */ @@ -3689,7 +3670,7 @@ int hvm_descriptor_access_intercept(uint64_t exit_info, static bool is_cross_vendor(const struct x86_emulate_state *state, const struct x86_emulate_ctxt *ctxt) { - switch ( ctxt->opcode ) + switch (ctxt->opcode) { case X86EMUL_OPC(0x0f, 0x05): /* syscall */ case X86EMUL_OPC(0x0f, 0x34): /* sysenter */ @@ -3712,16 +3693,16 @@ void hvm_ud_intercept(struct cpu_user_regs *regs) if ( opt_hvm_fep ) { const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs]; - uint32_t walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3) - ? PFEC_user_mode : 0) | PFEC_insn_fetch; + uint32_t walk = + ((ctxt.seg_reg[x86_seg_ss].dpl == 3) ? PFEC_user_mode : 0) | + PFEC_insn_fetch; unsigned long addr; char sig[5]; /* ud2; .ascii "xen" */ - if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip, - sizeof(sig), hvm_access_insn_fetch, - cs, &addr) && - (hvm_copy_from_guest_linear(sig, addr, sizeof(sig), - walk, NULL) == HVMTRANS_okay) && + if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip, sizeof(sig), + hvm_access_insn_fetch, cs, &addr) && + (hvm_copy_from_guest_linear(sig, addr, sizeof(sig), walk, NULL) == + HVMTRANS_okay) && (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) ) { regs->rip += sizeof(sig); @@ -3743,7 +3724,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs) return; } - switch ( hvm_emulate_one(&ctxt) ) + switch (hvm_emulate_one(&ctxt)) { case X86EMUL_UNHANDLEABLE: case X86EMUL_UNIMPLEMENTED: @@ -3764,7 +3745,8 @@ enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack) ASSERT(v == current); - if ( nestedhvm_enabled(v->domain) ) { + if ( nestedhvm_enabled(v->domain) ) + { enum hvm_intblk intr; intr = nhvm_interrupt_blocked(v); @@ -3778,12 +3760,12 @@ enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack) intr_shadow = hvm_funcs.get_interrupt_shadow(v); - if ( intr_shadow & (HVM_INTR_SHADOW_STI|HVM_INTR_SHADOW_MOV_SS) ) + if ( intr_shadow & (HVM_INTR_SHADOW_STI | HVM_INTR_SHADOW_MOV_SS) ) return hvm_intblk_shadow; if ( intack.source == hvm_intsrc_nmi ) - return ((intr_shadow & HVM_INTR_SHADOW_NMI) ? - hvm_intblk_nmi_iret : hvm_intblk_none); + return ((intr_shadow & HVM_INTR_SHADOW_NMI) ? hvm_intblk_nmi_iret + : hvm_intblk_none); if ( intack.source == hvm_intsrc_lapic ) { @@ -3822,8 +3804,7 @@ static void hvm_latch_shinfo_size(struct domain *d) /* Initialise a hypercall transfer page for a VMX domain using paravirtualised drivers. */ -void hvm_hypercall_page_initialise(struct domain *d, - void *hypercall_page) +void hvm_hypercall_page_initialise(struct domain *d, void *hypercall_page) { hvm_latch_shinfo_size(d); hvm_funcs.init_hypercall_page(d, hypercall_page); @@ -3901,8 +3882,7 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip) /* Sync AP's TSC with BSP's. */ v->arch.hvm.cache_tsc_offset = v->domain->vcpu[0]->arch.hvm.cache_tsc_offset; - hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset, - d->arch.hvm.sync_tsc); + hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset, d->arch.hvm.sync_tsc); v->arch.hvm.msr_tsc_adjust = 0; @@ -3912,7 +3892,7 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip) v->is_initialised = 1; clear_bit(_VPF_down, &v->pause_flags); - out: +out: domain_unlock(d); } @@ -3931,7 +3911,7 @@ static void hvm_s3_suspend(struct domain *d) return; } - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { int rc; @@ -3958,7 +3938,7 @@ static void hvm_s3_resume(struct domain *d) { struct vcpu *v; - for_each_vcpu( d, v ) + for_each_vcpu (d, v) hvm_set_guest_tsc(v, 0); domain_unpause(d); } @@ -3977,12 +3957,12 @@ bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v), return false; /* Pause all other vcpus. */ - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) if ( v != current && flush_vcpu(ctxt, v) ) vcpu_pause_nosync(v); /* Now that all VCPUs are signalled to deschedule, we wait... */ - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) if ( v != current && flush_vcpu(ctxt, v) ) while ( !vcpu_runnable(v) && v->is_running ) cpu_relax(); @@ -3993,7 +3973,7 @@ bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v), cpumask_clear(mask); /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */ - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { unsigned int cpu; @@ -4011,7 +3991,7 @@ bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v), flush_tlb_mask(mask); /* Done. */ - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) if ( v != current && flush_vcpu(ctxt, v) ) vcpu_unpause(v); @@ -4057,8 +4037,7 @@ static int hvmop_set_evtchn_upcall_vector( return 0; } -static int hvm_allow_set_param(struct domain *d, - const struct xen_hvm_param *a) +static int hvm_allow_set_param(struct domain *d, const struct xen_hvm_param *a) { uint64_t value = d->arch.hvm.params[a->index]; int rc; @@ -4067,7 +4046,7 @@ static int hvm_allow_set_param(struct domain *d, if ( rc ) return rc; - switch ( a->index ) + switch (a->index) { /* The following parameters can be set by the guest. */ case HVM_PARAM_CALLBACK_IRQ: @@ -4100,7 +4079,7 @@ static int hvm_allow_set_param(struct domain *d, if ( rc ) return rc; - switch ( a->index ) + switch (a->index) { /* The following parameters should only be changed once. */ case HVM_PARAM_VIRIDIAN: @@ -4120,8 +4099,7 @@ static int hvm_allow_set_param(struct domain *d, return rc; } -static int hvmop_set_param( - XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg) +static int hvmop_set_param(XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg) { struct domain *curr_d = current->domain; struct xen_hvm_param a; @@ -4147,7 +4125,7 @@ static int hvmop_set_param( if ( rc ) goto out; - switch ( a.index ) + switch (a.index) { case HVM_PARAM_CALLBACK_IRQ: hvm_set_callback_via(d, a.value); @@ -4158,8 +4136,7 @@ static int hvmop_set_param( rc = -EINVAL; break; case HVM_PARAM_VIRIDIAN: - if ( (a.value & ~HVMPV_feature_mask) || - !(a.value & HVMPV_base_freq) ) + if ( (a.value & ~HVMPV_feature_mask) || !(a.value & HVMPV_base_freq) ) rc = -EINVAL; break; case HVM_PARAM_IDENT_PT: @@ -4185,7 +4162,7 @@ static int hvmop_set_param( rc = 0; domain_pause(d); d->arch.hvm.params[a.index] = a.value; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) paging_update_cr3(v, false); domain_unpause(d); @@ -4232,17 +4209,15 @@ static int hvmop_set_param( */ if ( !paging_mode_hap(d) && a.value ) rc = -EINVAL; - if ( a.value && - d->arch.hvm.params[HVM_PARAM_ALTP2M] ) + if ( a.value && d->arch.hvm.params[HVM_PARAM_ALTP2M] ) rc = -EINVAL; /* Set up NHVM state for any vcpus that are already up. */ - if ( a.value && - !d->arch.hvm.params[HVM_PARAM_NESTEDHVM] ) - for_each_vcpu(d, v) + if ( a.value && !d->arch.hvm.params[HVM_PARAM_NESTEDHVM] ) + for_each_vcpu (d, v) if ( rc == 0 ) rc = nestedhvm_vcpu_initialise(v); if ( !a.value || rc ) - for_each_vcpu(d, v) + for_each_vcpu (d, v) nestedhvm_vcpu_destroy(v); break; case HVM_PARAM_ALTP2M: @@ -4251,8 +4226,7 @@ static int hvmop_set_param( break; if ( a.value > XEN_ALTP2M_limited ) rc = -EINVAL; - if ( a.value && - d->arch.hvm.params[HVM_PARAM_NESTEDHVM] ) + if ( a.value && d->arch.hvm.params[HVM_PARAM_NESTEDHVM] ) rc = -EINVAL; break; case HVM_PARAM_TRIPLE_FAULT_REASON: @@ -4266,8 +4240,7 @@ static int hvmop_set_param( { unsigned int i; - if ( a.value == 0 || - a.value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 ) + if ( a.value == 0 || a.value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 ) { rc = -EINVAL; break; @@ -4323,11 +4296,11 @@ static int hvmop_set_param( * 256 bits interrupt redirection bitmap + 64k bits I/O bitmap * plus one padding byte). */ - if ( (a.value >> 32) > sizeof(struct tss32) + - (0x100 / 8) + (0x10000 / 8) + 1 ) + if ( (a.value >> 32) > + sizeof(struct tss32) + (0x100 / 8) + (0x10000 / 8) + 1 ) a.value = (uint32_t)a.value | - ((sizeof(struct tss32) + (0x100 / 8) + - (0x10000 / 8) + 1) << 32); + ((sizeof(struct tss32) + (0x100 / 8) + (0x10000 / 8) + 1) + << 32); a.value |= VM86_TSS_UPDATED; break; @@ -4341,16 +4314,14 @@ static int hvmop_set_param( d->arch.hvm.params[a.index] = a.value; - HVM_DBG_LOG(DBG_LEVEL_HCALL, "set param %u = %"PRIx64, - a.index, a.value); + HVM_DBG_LOG(DBG_LEVEL_HCALL, "set param %u = %" PRIx64, a.index, a.value); - out: +out: rcu_unlock_domain(d); return rc; } -static int hvm_allow_get_param(struct domain *d, - const struct xen_hvm_param *a) +static int hvm_allow_get_param(struct domain *d, const struct xen_hvm_param *a) { int rc; @@ -4358,7 +4329,7 @@ static int hvm_allow_get_param(struct domain *d, if ( rc ) return rc; - switch ( a->index ) + switch (a->index) { /* The following parameters can be read by the guest. */ case HVM_PARAM_CALLBACK_IRQ: @@ -4388,8 +4359,7 @@ static int hvm_allow_get_param(struct domain *d, return rc; } -static int hvmop_get_param( - XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg) +static int hvmop_get_param(XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg) { struct xen_hvm_param a; struct domain *d; @@ -4413,7 +4383,7 @@ static int hvmop_get_param( if ( rc ) goto out; - switch ( a.index ) + switch (a.index) { case HVM_PARAM_ACPI_S_STATE: a.value = d->arch.hvm.is_s3_suspended ? 3 : 0; @@ -4424,8 +4394,8 @@ static int hvmop_get_param( break; case HVM_PARAM_VM86_TSS_SIZED: - a.value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] & - ~VM86_TSS_UPDATED; + a.value = + d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] & ~VM86_TSS_UPDATED; break; case HVM_PARAM_X87_FIP_WIDTH: @@ -4438,10 +4408,9 @@ static int hvmop_get_param( rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0; - HVM_DBG_LOG(DBG_LEVEL_HCALL, "get param %u = %"PRIx64, - a.index, a.value); + HVM_DBG_LOG(DBG_LEVEL_HCALL, "get param %u = %" PRIx64, a.index, a.value); - out: +out: rcu_unlock_domain(d); return rc; } @@ -4477,8 +4446,7 @@ static int hvmop_get_param( * subop should be evaluated for safety, with unsafe subops * blacklisted in xsm_hvm_altp2mhvm_op(). */ -static int do_altp2m_op( - XEN_GUEST_HANDLE_PARAM(void) arg) +static int do_altp2m_op(XEN_GUEST_HANDLE_PARAM(void) arg) { struct xen_hvm_altp2m_op a; struct domain *d = NULL; @@ -4491,11 +4459,10 @@ static int do_altp2m_op( if ( copy_from_guest(&a, arg, 1) ) return -EFAULT; - if ( a.pad1 || a.pad2 || - (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) ) + if ( a.pad1 || a.pad2 || (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) ) return -EINVAL; - switch ( a.cmd ) + switch (a.cmd) { case HVMOP_altp2m_get_domain_state: case HVMOP_altp2m_set_domain_state: @@ -4528,8 +4495,7 @@ static int do_altp2m_op( } if ( (a.cmd != HVMOP_altp2m_get_domain_state) && - (a.cmd != HVMOP_altp2m_set_domain_state) && - !d->arch.altp2m_active ) + (a.cmd != HVMOP_altp2m_set_domain_state) && !d->arch.altp2m_active ) { rc = -EOPNOTSUPP; goto out; @@ -4546,7 +4512,7 @@ static int do_altp2m_op( if ( (rc = xsm_hvm_altp2mhvm_op(XSM_OTHER, d, mode, a.cmd)) ) goto out; - switch ( a.cmd ) + switch (a.cmd) { case HVMOP_altp2m_get_domain_state: a.u.domain_state.state = altp2m_active(d); @@ -4575,7 +4541,7 @@ static int do_altp2m_op( if ( d->arch.altp2m_active != ostate && (ostate || !(rc = p2m_init_altp2m_by_id(d, 0))) ) { - for_each_vcpu( d, v ) + for_each_vcpu (d, v) { if ( !ostate ) altp2m_vcpu_initialise(v); @@ -4685,8 +4651,7 @@ static int do_altp2m_op( rc = -EINVAL; else rc = p2m_set_mem_access(d, _gfn(a.u.mem_access.gfn), 1, 0, 0, - a.u.mem_access.access, - a.u.mem_access.view); + a.u.mem_access.access, a.u.mem_access.view); break; case HVMOP_altp2m_set_mem_access_multi: @@ -4707,14 +4672,14 @@ static int do_altp2m_op( rc = p2m_set_mem_access_multi(d, a.u.set_mem_access_multi.pfn_list, a.u.set_mem_access_multi.access_list, a.u.set_mem_access_multi.nr, - a.u.set_mem_access_multi.opaque, - 0x3F, + a.u.set_mem_access_multi.opaque, 0x3F, a.u.set_mem_access_multi.view); if ( rc > 0 ) { a.u.set_mem_access_multi.opaque = rc; - if ( __copy_field_to_guest(guest_handle_cast(arg, xen_hvm_altp2m_op_t), - &a, u.set_mem_access_multi.opaque) ) + if ( __copy_field_to_guest( + guest_handle_cast(arg, xen_hvm_altp2m_op_t), &a, + u.set_mem_access_multi.opaque) ) rc = -EFAULT; else rc = hypercall_create_continuation(__HYPERVISOR_hvm_op, "lh", @@ -4744,14 +4709,14 @@ static int do_altp2m_op( rc = -EINVAL; else rc = p2m_change_altp2m_gfn(d, a.u.change_gfn.view, - _gfn(a.u.change_gfn.old_gfn), - _gfn(a.u.change_gfn.new_gfn)); + _gfn(a.u.change_gfn.old_gfn), + _gfn(a.u.change_gfn.new_gfn)); break; default: ASSERT_UNREACHABLE(); } - out: +out: rcu_unlock_domain(d); return rc; @@ -4765,33 +4730,31 @@ DEFINE_XEN_GUEST_HANDLE(compat_hvm_altp2m_op_t); * correctly the translation of all fields from compat_(*) to xen_(*). */ #ifndef CHECK_hvm_altp2m_op -#define CHECK_hvm_altp2m_op \ - CHECK_SIZE_(struct, hvm_altp2m_op); \ +#define CHECK_hvm_altp2m_op \ + CHECK_SIZE_(struct, hvm_altp2m_op); \ CHECK_FIELD_(struct, hvm_altp2m_op, version); \ - CHECK_FIELD_(struct, hvm_altp2m_op, cmd); \ - CHECK_FIELD_(struct, hvm_altp2m_op, domain); \ - CHECK_FIELD_(struct, hvm_altp2m_op, pad1); \ + CHECK_FIELD_(struct, hvm_altp2m_op, cmd); \ + CHECK_FIELD_(struct, hvm_altp2m_op, domain); \ + CHECK_FIELD_(struct, hvm_altp2m_op, pad1); \ CHECK_FIELD_(struct, hvm_altp2m_op, pad2) #endif /* CHECK_hvm_altp2m_op */ #ifndef CHECK_hvm_altp2m_set_mem_access_multi -#define CHECK_hvm_altp2m_set_mem_access_multi \ +#define CHECK_hvm_altp2m_set_mem_access_multi \ CHECK_FIELD_(struct, hvm_altp2m_set_mem_access_multi, view); \ - CHECK_FIELD_(struct, hvm_altp2m_set_mem_access_multi, pad); \ - CHECK_FIELD_(struct, hvm_altp2m_set_mem_access_multi, nr); \ + CHECK_FIELD_(struct, hvm_altp2m_set_mem_access_multi, pad); \ + CHECK_FIELD_(struct, hvm_altp2m_set_mem_access_multi, nr); \ CHECK_FIELD_(struct, hvm_altp2m_set_mem_access_multi, opaque) #endif /* CHECK_hvm_altp2m_set_mem_access_multi */ CHECK_hvm_altp2m_op; CHECK_hvm_altp2m_set_mem_access_multi; -static int compat_altp2m_op( - XEN_GUEST_HANDLE_PARAM(void) arg) +static int compat_altp2m_op(XEN_GUEST_HANDLE_PARAM(void) arg) { int rc = 0; struct compat_hvm_altp2m_op a; - union - { + union { XEN_GUEST_HANDLE_PARAM(void) hnd; struct xen_hvm_altp2m_op *altp2m_op; } nat; @@ -4802,21 +4765,22 @@ static int compat_altp2m_op( if ( copy_from_guest(&a, arg, 1) ) return -EFAULT; - if ( a.pad1 || a.pad2 || - (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) ) + if ( a.pad1 || a.pad2 || (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) ) return -EINVAL; set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE); - switch ( a.cmd ) + switch (a.cmd) { case HVMOP_altp2m_set_mem_access_multi: -#define XLAT_hvm_altp2m_set_mem_access_multi_HNDL_pfn_list(_d_, _s_); \ - guest_from_compat_handle((_d_)->pfn_list, (_s_)->pfn_list) -#define XLAT_hvm_altp2m_set_mem_access_multi_HNDL_access_list(_d_, _s_); \ - guest_from_compat_handle((_d_)->access_list, (_s_)->access_list) - XLAT_hvm_altp2m_set_mem_access_multi(&nat.altp2m_op->u.set_mem_access_multi, - &a.u.set_mem_access_multi); +#define XLAT_hvm_altp2m_set_mem_access_multi_HNDL_pfn_list(_d_, _s_) \ + ; \ + guest_from_compat_handle((_d_)->pfn_list, (_s_)->pfn_list) +#define XLAT_hvm_altp2m_set_mem_access_multi_HNDL_access_list(_d_, _s_) \ + ; \ + guest_from_compat_handle((_d_)->access_list, (_s_)->access_list) + XLAT_hvm_altp2m_set_mem_access_multi( + &nat.altp2m_op->u.set_mem_access_multi, &a.u.set_mem_access_multi); #undef XLAT_hvm_altp2m_set_mem_access_multi_HNDL_pfn_list #undef XLAT_hvm_altp2m_set_mem_access_multi_HNDL_access_list break; @@ -4826,15 +4790,15 @@ static int compat_altp2m_op( } /* Manually fill the common part of the xen_hvm_altp2m_op structure. */ - nat.altp2m_op->version = a.version; - nat.altp2m_op->cmd = a.cmd; - nat.altp2m_op->domain = a.domain; - nat.altp2m_op->pad1 = a.pad1; - nat.altp2m_op->pad2 = a.pad2; + nat.altp2m_op->version = a.version; + nat.altp2m_op->cmd = a.cmd; + nat.altp2m_op->domain = a.domain; + nat.altp2m_op->pad1 = a.pad1; + nat.altp2m_op->pad2 = a.pad2; rc = do_altp2m_op(nat.hnd); - switch ( a.cmd ) + switch (a.cmd) { case HVMOP_altp2m_set_mem_access_multi: /* @@ -4847,9 +4811,9 @@ static int compat_altp2m_op( ASSERT(rc == __HYPERVISOR_hvm_op); a.u.set_mem_access_multi.opaque = nat.altp2m_op->u.set_mem_access_multi.opaque; - if ( __copy_field_to_guest(guest_handle_cast(arg, - compat_hvm_altp2m_op_t), - &a, u.set_mem_access_multi.opaque) ) + if ( __copy_field_to_guest( + guest_handle_cast(arg, compat_hvm_altp2m_op_t), &a, + u.set_mem_access_multi.opaque) ) rc = -EFAULT; } break; @@ -4861,8 +4825,8 @@ static int compat_altp2m_op( return rc; } -static int hvmop_get_mem_type( - XEN_GUEST_HANDLE_PARAM(xen_hvm_get_mem_type_t) arg) +static int hvmop_get_mem_type(XEN_GUEST_HANDLE_PARAM(xen_hvm_get_mem_type_t) + arg) { struct xen_hvm_get_mem_type a; struct domain *d; @@ -4891,26 +4855,26 @@ static int hvmop_get_mem_type( */ get_gfn_query_unlocked(d, a.pfn, &t); if ( p2m_is_mmio(t) ) - a.mem_type = HVMMEM_mmio_dm; + a.mem_type = HVMMEM_mmio_dm; else if ( t == p2m_ioreq_server ) a.mem_type = HVMMEM_ioreq_server; else if ( p2m_is_readonly(t) ) - a.mem_type = HVMMEM_ram_ro; + a.mem_type = HVMMEM_ram_ro; else if ( p2m_is_ram(t) ) - a.mem_type = HVMMEM_ram_rw; + a.mem_type = HVMMEM_ram_rw; else if ( p2m_is_pod(t) ) - a.mem_type = HVMMEM_ram_rw; + a.mem_type = HVMMEM_ram_rw; else if ( p2m_is_grant(t) ) - a.mem_type = HVMMEM_ram_rw; + a.mem_type = HVMMEM_ram_rw; else - a.mem_type = HVMMEM_mmio_dm; + a.mem_type = HVMMEM_mmio_dm; rc = -EFAULT; if ( __copy_to_guest(arg, &a, 1) ) goto out; rc = 0; - out: +out: rcu_unlock_domain(d); return rc; @@ -4927,21 +4891,19 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) * the acquisition of a lock). */ - switch ( op ) + switch (op) { case HVMOP_set_evtchn_upcall_vector: rc = hvmop_set_evtchn_upcall_vector( guest_handle_cast(arg, xen_hvm_evtchn_upcall_vector_t)); break; - + case HVMOP_set_param: - rc = hvmop_set_param( - guest_handle_cast(arg, xen_hvm_param_t)); + rc = hvmop_set_param(guest_handle_cast(arg, xen_hvm_param_t)); break; case HVMOP_get_param: - rc = hvmop_get_param( - guest_handle_cast(arg, xen_hvm_param_t)); + rc = hvmop_get_param(guest_handle_cast(arg, xen_hvm_param_t)); break; case HVMOP_flush_tlbs: @@ -4949,8 +4911,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) break; case HVMOP_get_mem_type: - rc = hvmop_get_mem_type( - guest_handle_cast(arg, xen_hvm_get_mem_type_t)); + rc = hvmop_get_mem_type(guest_handle_cast(arg, xen_hvm_get_mem_type_t)); break; case HVMOP_pagetable_dying: @@ -4977,7 +4938,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case HVMOP_get_time: { + case HVMOP_get_time: + { xen_hvm_get_time_t gxt; gxt.now = NOW(); @@ -4986,19 +4948,20 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case HVMOP_xentrace: { + case HVMOP_xentrace: + { xen_hvm_xentrace_t tr; - if ( copy_from_guest(&tr, arg, 1 ) ) + if ( copy_from_guest(&tr, arg, 1) ) return -EFAULT; - if ( tr.extra_bytes > sizeof(tr.extra) - || (tr.event & ~((1u< sizeof(tr.extra) || + (tr.event & ~((1u << TRC_SUBCLS_SHIFT) - 1)) ) return -EINVAL; /* Cycles will be taken at the vmexit and vmenter */ - trace_var(tr.event | TRC_GUEST, 0 /*!cycles*/, - tr.extra_bytes, tr.extra); + trace_var(tr.event | TRC_GUEST, 0 /*!cycles*/, tr.extra_bytes, + tr.extra); break; } @@ -5022,8 +4985,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) } if ( rc == -ERESTART ) - rc = hypercall_create_continuation(__HYPERVISOR_hvm_op, "lh", - op, arg); + rc = hypercall_create_continuation(__HYPERVISOR_hvm_op, "lh", op, arg); return rc; } @@ -5032,22 +4994,21 @@ int hvm_debug_op(struct vcpu *v, int32_t op) { int rc; - switch ( op ) + switch (op) { - case XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON: - case XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF: - rc = -EOPNOTSUPP; - if ( !cpu_has_monitor_trap_flag ) - break; - rc = 0; - vcpu_pause(v); - v->arch.hvm.single_step = - (op == XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON); - vcpu_unpause(v); /* guest will latch new state */ - break; - default: - rc = -ENOSYS; + case XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON: + case XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF: + rc = -EOPNOTSUPP; + if ( !cpu_has_monitor_trap_flag ) break; + rc = 0; + vcpu_pause(v); + v->arch.hvm.single_step = (op == XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON); + vcpu_unpause(v); /* guest will latch new state */ + break; + default: + rc = -ENOSYS; + break; } return rc; @@ -5079,7 +5040,7 @@ void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg, { hvm_funcs.get_segment_register(v, seg, reg); - switch ( seg ) + switch (seg) { case x86_seg_ss: /* SVM may retain %ss.DB when %ss is loaded with a NULL selector. */ @@ -5150,23 +5111,23 @@ void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg, if ( reg->p ) reg->g = !!(reg->limit >> 20); - switch ( seg ) + switch (seg) { case x86_seg_cs: - ASSERT(reg->p); /* Usable. */ - ASSERT(reg->s); /* User segment. */ - ASSERT(reg->type & 0x1); /* Accessed. */ - ASSERT((reg->base >> 32) == 0); /* Upper bits clear. */ + ASSERT(reg->p); /* Usable. */ + ASSERT(reg->s); /* User segment. */ + ASSERT(reg->type & 0x1); /* Accessed. */ + ASSERT((reg->base >> 32) == 0); /* Upper bits clear. */ break; case x86_seg_ss: if ( reg->p ) { - ASSERT(reg->s); /* User segment. */ - ASSERT(!(reg->type & 0x8)); /* Data segment. */ - ASSERT(reg->type & 0x2); /* Writeable. */ - ASSERT(reg->type & 0x1); /* Accessed. */ - ASSERT((reg->base >> 32) == 0); /* Upper bits clear. */ + ASSERT(reg->s); /* User segment. */ + ASSERT(!(reg->type & 0x8)); /* Data segment. */ + ASSERT(reg->type & 0x2); /* Writeable. */ + ASSERT(reg->type & 0x1); /* Accessed. */ + ASSERT((reg->base >> 32) == 0); /* Upper bits clear. */ } break; @@ -5176,24 +5137,24 @@ void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg, case x86_seg_gs: if ( reg->p ) { - ASSERT(reg->s); /* User segment. */ + ASSERT(reg->s); /* User segment. */ if ( reg->type & 0x8 ) - ASSERT(reg->type & 0x2); /* Readable. */ + ASSERT(reg->type & 0x2); /* Readable. */ - ASSERT(reg->type & 0x1); /* Accessed. */ + ASSERT(reg->type & 0x1); /* Accessed. */ if ( seg == x86_seg_fs || seg == x86_seg_gs ) ASSERT(is_canonical_address(reg->base)); else - ASSERT((reg->base >> 32) == 0); /* Upper bits clear. */ + ASSERT((reg->base >> 32) == 0); /* Upper bits clear. */ } break; case x86_seg_tr: - ASSERT(reg->p); /* Usable. */ - ASSERT(!reg->s); /* System segment. */ - ASSERT(!(reg->sel & 0x4)); /* !TI. */ + ASSERT(reg->p); /* Usable. */ + ASSERT(!reg->s); /* System segment. */ + ASSERT(!(reg->sel & 0x4)); /* !TI. */ if ( reg->type == SYS_DESC_tss_busy ) ASSERT(is_canonical_address(reg->base)); else if ( reg->type == SYS_DESC_tss16_busy ) @@ -5205,8 +5166,8 @@ void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg, case x86_seg_ldtr: if ( reg->p ) { - ASSERT(!reg->s); /* System segment. */ - ASSERT(!(reg->sel & 0x4)); /* !TI. */ + ASSERT(!reg->s); /* System segment. */ + ASSERT(!(reg->sel & 0x4)); /* !TI. */ ASSERT(reg->type == SYS_DESC_ldt); ASSERT(is_canonical_address(reg->base)); } @@ -5215,7 +5176,7 @@ void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg, case x86_seg_gdtr: case x86_seg_idtr: ASSERT(is_canonical_address(reg->base)); - ASSERT((reg->limit >> 16) == 0); /* Upper bits clear. */ + ASSERT((reg->limit >> 16) == 0); /* Upper bits clear. */ break; default: @@ -5235,4 +5196,3 @@ void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg, * indent-tabs-mode: nil * End: */ - diff --git a/xen/arch/x86/hvm/hypercall.c b/xen/arch/x86/hvm/hypercall.c index 5bb1750595..95ca8fd496 100644 --- a/xen/arch/x86/hvm/hypercall.c +++ b/xen/arch/x86/hvm/hypercall.c @@ -29,7 +29,7 @@ static long hvm_memory_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) const struct vcpu *curr = current; long rc; - switch ( cmd & MEMOP_CMD_MASK ) + switch (cmd & MEMOP_CMD_MASK) { case XENMEM_machine_memory_map: case XENMEM_machphys_mapping: @@ -47,10 +47,11 @@ static long hvm_memory_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) return rc; } -static long hvm_grant_table_op( - unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) uop, unsigned int count) +static long hvm_grant_table_op(unsigned int cmd, + XEN_GUEST_HANDLE_PARAM(void) uop, + unsigned int count) { - switch ( cmd ) + switch (cmd) { case GNTTABOP_query_size: case GNTTABOP_setup_table: @@ -76,7 +77,7 @@ static long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { const struct vcpu *curr = current; - switch ( cmd ) + switch (cmd) { default: if ( !is_hardware_domain(curr->domain) ) @@ -103,19 +104,17 @@ static long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) return compat_physdev_op(cmd, arg); } -#define HYPERCALL(x) \ - [ __HYPERVISOR_ ## x ] = { (hypercall_fn_t *) do_ ## x, \ - (hypercall_fn_t *) do_ ## x } +#define HYPERCALL(x) \ + [__HYPERVISOR_##x] = {(hypercall_fn_t *)do_##x, (hypercall_fn_t *)do_##x} -#define HVM_CALL(x) \ - [ __HYPERVISOR_ ## x ] = { (hypercall_fn_t *) hvm_ ## x, \ - (hypercall_fn_t *) hvm_ ## x } +#define HVM_CALL(x) \ + [__HYPERVISOR_##x] = {(hypercall_fn_t *)hvm_##x, (hypercall_fn_t *)hvm_##x} -#define COMPAT_CALL(x) \ - [ __HYPERVISOR_ ## x ] = { (hypercall_fn_t *) do_ ## x, \ - (hypercall_fn_t *) compat_ ## x } +#define COMPAT_CALL(x) \ + [__HYPERVISOR_## \ + x] = {(hypercall_fn_t *)do_##x, (hypercall_fn_t *)compat_##x} -#define do_arch_1 paging_domctl_continuation +#define do_arch_1 paging_domctl_continuation static const hypercall_table_t hvm_hypercall_table[] = { HVM_CALL(memory_op), @@ -143,8 +142,7 @@ static const hypercall_table_t hvm_hypercall_table[] = { #endif HYPERCALL(xenpmu_op), COMPAT_CALL(dm_op), - HYPERCALL(arch_1) -}; + HYPERCALL(arch_1)}; #undef do_arch_1 @@ -159,7 +157,7 @@ int hvm_hypercall(struct cpu_user_regs *regs) int mode = hvm_guest_x86_mode(curr); unsigned long eax = regs->eax; - switch ( mode ) + switch (mode) { case 8: eax = regs->rax; @@ -167,13 +165,14 @@ int hvm_hypercall(struct cpu_user_regs *regs) case 4: case 2: if ( currd->arch.monitor.guest_request_userspace_enabled && - eax == __HYPERVISOR_hvm_op && - (mode == 8 ? regs->rdi : regs->ebx) == HVMOP_guest_request_vm_event ) + eax == __HYPERVISOR_hvm_op && + (mode == 8 ? regs->rdi : regs->ebx) == + HVMOP_guest_request_vm_event ) break; if ( unlikely(hvm_get_cpl(curr)) ) { - default: + default: regs->rax = -EPERM; return HVM_HCALL_completed; } @@ -217,32 +216,43 @@ int hvm_hypercall(struct cpu_user_regs *regs) #ifndef NDEBUG /* Deliberately corrupt parameter regs not used by this hypercall. */ - switch ( hypercall_args_table[eax].native ) + switch (hypercall_args_table[eax].native) { - case 0: rdi = 0xdeadbeefdeadf00dUL; - case 1: rsi = 0xdeadbeefdeadf00dUL; - case 2: rdx = 0xdeadbeefdeadf00dUL; - case 3: r10 = 0xdeadbeefdeadf00dUL; - case 4: r8 = 0xdeadbeefdeadf00dUL; - case 5: r9 = 0xdeadbeefdeadf00dUL; + case 0: + rdi = 0xdeadbeefdeadf00dUL; + case 1: + rsi = 0xdeadbeefdeadf00dUL; + case 2: + rdx = 0xdeadbeefdeadf00dUL; + case 3: + r10 = 0xdeadbeefdeadf00dUL; + case 4: + r8 = 0xdeadbeefdeadf00dUL; + case 5: + r9 = 0xdeadbeefdeadf00dUL; } #endif - regs->rax = hvm_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8, - r9); + regs->rax = hvm_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8, r9); #ifndef NDEBUG if ( !curr->hcall_preempted ) { /* Deliberately corrupt parameter regs used by this hypercall. */ - switch ( hypercall_args_table[eax].native ) + switch (hypercall_args_table[eax].native) { - case 6: regs->r9 = 0xdeadbeefdeadf00dUL; - case 5: regs->r8 = 0xdeadbeefdeadf00dUL; - case 4: regs->r10 = 0xdeadbeefdeadf00dUL; - case 3: regs->rdx = 0xdeadbeefdeadf00dUL; - case 2: regs->rsi = 0xdeadbeefdeadf00dUL; - case 1: regs->rdi = 0xdeadbeefdeadf00dUL; + case 6: + regs->r9 = 0xdeadbeefdeadf00dUL; + case 5: + regs->r8 = 0xdeadbeefdeadf00dUL; + case 4: + regs->r10 = 0xdeadbeefdeadf00dUL; + case 3: + regs->rdx = 0xdeadbeefdeadf00dUL; + case 2: + regs->rsi = 0xdeadbeefdeadf00dUL; + case 1: + regs->rdi = 0xdeadbeefdeadf00dUL; } } #endif @@ -261,34 +271,46 @@ int hvm_hypercall(struct cpu_user_regs *regs) #ifndef NDEBUG /* Deliberately corrupt parameter regs not used by this hypercall. */ - switch ( hypercall_args_table[eax].compat ) + switch (hypercall_args_table[eax].compat) { - case 0: ebx = 0xdeadf00d; - case 1: ecx = 0xdeadf00d; - case 2: edx = 0xdeadf00d; - case 3: esi = 0xdeadf00d; - case 4: edi = 0xdeadf00d; - case 5: ebp = 0xdeadf00d; + case 0: + ebx = 0xdeadf00d; + case 1: + ecx = 0xdeadf00d; + case 2: + edx = 0xdeadf00d; + case 3: + esi = 0xdeadf00d; + case 4: + edi = 0xdeadf00d; + case 5: + ebp = 0xdeadf00d; } #endif curr->hcall_compat = true; - regs->rax = hvm_hypercall_table[eax].compat(ebx, ecx, edx, esi, edi, - ebp); + regs->rax = + hvm_hypercall_table[eax].compat(ebx, ecx, edx, esi, edi, ebp); curr->hcall_compat = false; #ifndef NDEBUG if ( !curr->hcall_preempted ) { /* Deliberately corrupt parameter regs used by this hypercall. */ - switch ( hypercall_args_table[eax].compat ) + switch (hypercall_args_table[eax].compat) { - case 6: regs->rbp = 0xdeadf00d; - case 5: regs->rdi = 0xdeadf00d; - case 4: regs->rsi = 0xdeadf00d; - case 3: regs->rdx = 0xdeadf00d; - case 2: regs->rcx = 0xdeadf00d; - case 1: regs->rbx = 0xdeadf00d; + case 6: + regs->rbp = 0xdeadf00d; + case 5: + regs->rdi = 0xdeadf00d; + case 4: + regs->rsi = 0xdeadf00d; + case 3: + regs->rdx = 0xdeadf00d; + case 2: + regs->rcx = 0xdeadf00d; + case 1: + regs->rbx = 0xdeadf00d; } } #endif diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c index aac22c595d..c0faa439b0 100644 --- a/xen/arch/x86/hvm/intercept.c +++ b/xen/arch/x86/hvm/intercept.c @@ -42,23 +42,22 @@ static bool_t hvm_mmio_accept(const struct hvm_io_handler *handler, /* Make sure the handler will accept the whole access. */ last = hvm_mmio_last_byte(p); - if ( last != first && - !handler->mmio.ops->check(current, last) ) + if ( last != first && !handler->mmio.ops->check(current, last) ) domain_crash(current->domain); return 1; } -static int hvm_mmio_read(const struct hvm_io_handler *handler, - uint64_t addr, uint32_t size, uint64_t *data) +static int hvm_mmio_read(const struct hvm_io_handler *handler, uint64_t addr, + uint32_t size, uint64_t *data) { BUG_ON(handler->type != IOREQ_TYPE_COPY); return handler->mmio.ops->read(current, addr, size, data); } -static int hvm_mmio_write(const struct hvm_io_handler *handler, - uint64_t addr, uint32_t size, uint64_t data) +static int hvm_mmio_write(const struct hvm_io_handler *handler, uint64_t addr, + uint32_t size, uint64_t data) { BUG_ON(handler->type != IOREQ_TYPE_COPY); @@ -66,10 +65,7 @@ static int hvm_mmio_write(const struct hvm_io_handler *handler, } static const struct hvm_io_ops mmio_ops = { - .accept = hvm_mmio_accept, - .read = hvm_mmio_read, - .write = hvm_mmio_write -}; + .accept = hvm_mmio_accept, .read = hvm_mmio_read, .write = hvm_mmio_write}; static bool_t hvm_portio_accept(const struct hvm_io_handler *handler, const ioreq_t *p) @@ -82,8 +78,8 @@ static bool_t hvm_portio_accept(const struct hvm_io_handler *handler, return (p->addr >= start) && ((p->addr + p->size) <= end); } -static int hvm_portio_read(const struct hvm_io_handler *handler, - uint64_t addr, uint32_t size, uint64_t *data) +static int hvm_portio_read(const struct hvm_io_handler *handler, uint64_t addr, + uint32_t size, uint64_t *data) { uint32_t val = ~0u; int rc; @@ -96,8 +92,8 @@ static int hvm_portio_read(const struct hvm_io_handler *handler, return rc; } -static int hvm_portio_write(const struct hvm_io_handler *handler, - uint64_t addr, uint32_t size, uint64_t data) +static int hvm_portio_write(const struct hvm_io_handler *handler, uint64_t addr, + uint32_t size, uint64_t data) { uint32_t val = data; @@ -106,14 +102,11 @@ static int hvm_portio_write(const struct hvm_io_handler *handler, return handler->portio.action(IOREQ_WRITE, addr, size, &val); } -static const struct hvm_io_ops portio_ops = { - .accept = hvm_portio_accept, - .read = hvm_portio_read, - .write = hvm_portio_write -}; +static const struct hvm_io_ops portio_ops = {.accept = hvm_portio_accept, + .read = hvm_portio_read, + .write = hvm_portio_write}; -int hvm_process_io_intercept(const struct hvm_io_handler *handler, - ioreq_t *p) +int hvm_process_io_intercept(const struct hvm_io_handler *handler, ioreq_t *p) { const struct hvm_io_ops *ops = handler->ops; int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size; @@ -124,9 +117,7 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler, { for ( i = 0; i < p->count; i++ ) { - addr = (p->type == IOREQ_TYPE_COPY) ? - p->addr + step * i : - p->addr; + addr = (p->type == IOREQ_TYPE_COPY) ? p->addr + step * i : p->addr; data = 0; rc = ops->read(handler, addr, p->size, &data); if ( rc != X86EMUL_OKAY ) @@ -134,8 +125,8 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler, if ( p->data_is_ptr ) { - switch ( hvm_copy_to_guest_phys(p->data + step * i, - &data, p->size, current) ) + switch (hvm_copy_to_guest_phys(p->data + step * i, &data, + p->size, current)) { case HVMTRANS_okay: break; @@ -163,8 +154,8 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler, if ( p->data_is_ptr ) { data = 0; - switch ( hvm_copy_from_guest_phys(&data, p->data + step * i, - p->size) ) + switch (hvm_copy_from_guest_phys(&data, p->data + step * i, + p->size)) { case HVMTRANS_okay: break; @@ -184,9 +175,7 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler, else data = p->data; - addr = (p->type == IOREQ_TYPE_COPY) ? - p->addr + step * i : - p->addr; + addr = (p->type == IOREQ_TYPE_COPY) ? p->addr + step * i : p->addr; rc = ops->write(handler, addr, p->size, data); if ( rc != X86EMUL_OKAY ) break; @@ -216,13 +205,11 @@ static const struct hvm_io_handler *hvm_find_io_handler(const ioreq_t *p) struct domain *curr_d = current->domain; unsigned int i; - BUG_ON((p->type != IOREQ_TYPE_PIO) && - (p->type != IOREQ_TYPE_COPY)); + BUG_ON((p->type != IOREQ_TYPE_PIO) && (p->type != IOREQ_TYPE_COPY)); for ( i = 0; i < curr_d->arch.hvm.io_handler_count; i++ ) { - const struct hvm_io_handler *handler = - &curr_d->arch.hvm.io_handler[i]; + const struct hvm_io_handler *handler = &curr_d->arch.hvm.io_handler[i]; const struct hvm_io_ops *ops = handler->ops; if ( handler->type != p->type ) @@ -270,8 +257,7 @@ struct hvm_io_handler *hvm_next_io_handler(struct domain *d) return &d->arch.hvm.io_handler[i]; } -void register_mmio_handler(struct domain *d, - const struct hvm_mmio_ops *ops) +void register_mmio_handler(struct domain *d, const struct hvm_mmio_ops *ops) { struct hvm_io_handler *handler = hvm_next_io_handler(d); @@ -305,8 +291,7 @@ void relocate_portio_handler(struct domain *d, unsigned int old_port, for ( i = 0; i < d->arch.hvm.io_handler_count; i++ ) { - struct hvm_io_handler *handler = - &d->arch.hvm.io_handler[i]; + struct hvm_io_handler *handler = &d->arch.hvm.io_handler[i]; if ( handler->type != IOREQ_TYPE_PIO ) continue; diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index a5b0a23f06..1987abd7b3 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -94,7 +94,7 @@ bool hvm_emulate_one_insn(hvm_emulate_validate_t *validate, const char *descr) else vio->mmio_access = (struct npfec){}; - switch ( rc ) + switch (rc) { case X86EMUL_UNHANDLEABLE: hvm_dump_emulation_state(XENLOG_G_WARNING, descr, &ctxt, rc); @@ -120,9 +120,9 @@ bool handle_mmio_with_translation(unsigned long gla, unsigned long gpfn, { struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io; - vio->mmio_access = access.gla_valid && - access.kind == npfec_kind_with_gla - ? access : (struct npfec){}; + vio->mmio_access = access.gla_valid && access.kind == npfec_kind_with_gla + ? access + : (struct npfec){}; vio->mmio_gla = gla & PAGE_MASK; vio->mmio_gpfn = gpfn; return handle_mmio(); @@ -145,7 +145,7 @@ bool handle_pio(uint16_t port, unsigned int size, int dir) if ( hvm_ioreq_needs_completion(&vio->io_req) ) vio->io_completion = HVMIO_pio_completion; - switch ( rc ) + switch (rc) { case X86EMUL_OKAY: if ( dir == IOREQ_READ ) @@ -184,7 +184,7 @@ static bool_t g2m_portio_accept(const struct hvm_io_handler *handler, struct g2m_ioport *g2m_ioport; unsigned int start, end; - list_for_each_entry( g2m_ioport, &hvm->g2m_ioport_list, list ) + list_for_each_entry (g2m_ioport, &hvm->g2m_ioport_list, list) { start = g2m_ioport->gport; end = start + g2m_ioport->np; @@ -198,14 +198,14 @@ static bool_t g2m_portio_accept(const struct hvm_io_handler *handler, return 0; } -static int g2m_portio_read(const struct hvm_io_handler *handler, - uint64_t addr, uint32_t size, uint64_t *data) +static int g2m_portio_read(const struct hvm_io_handler *handler, uint64_t addr, + uint32_t size, uint64_t *data) { struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io; const struct g2m_ioport *g2m_ioport = vio->g2m_ioport; unsigned int mport = (addr - g2m_ioport->gport) + g2m_ioport->mport; - switch ( size ) + switch (size) { case 1: *data = inb(mport); @@ -223,14 +223,14 @@ static int g2m_portio_read(const struct hvm_io_handler *handler, return X86EMUL_OKAY; } -static int g2m_portio_write(const struct hvm_io_handler *handler, - uint64_t addr, uint32_t size, uint64_t data) +static int g2m_portio_write(const struct hvm_io_handler *handler, uint64_t addr, + uint32_t size, uint64_t data) { struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io; const struct g2m_ioport *g2m_ioport = vio->g2m_ioport; unsigned int mport = (addr - g2m_ioport->gport) + g2m_ioport->mport; - switch ( size ) + switch (size) { case 1: outb(data, mport); @@ -248,11 +248,9 @@ static int g2m_portio_write(const struct hvm_io_handler *handler, return X86EMUL_OKAY; } -static const struct hvm_io_ops g2m_portio_ops = { - .accept = g2m_portio_accept, - .read = g2m_portio_read, - .write = g2m_portio_write -}; +static const struct hvm_io_ops g2m_portio_ops = {.accept = g2m_portio_accept, + .read = g2m_portio_read, + .write = g2m_portio_write}; void register_g2m_portio_handler(struct domain *d) { @@ -300,8 +298,8 @@ static bool vpci_portio_accept(const struct hvm_io_handler *handler, return (p->addr == 0xcf8 && p->size == 4) || (p->addr & ~3) == 0xcfc; } -static int vpci_portio_read(const struct hvm_io_handler *handler, - uint64_t addr, uint32_t size, uint64_t *data) +static int vpci_portio_read(const struct hvm_io_handler *handler, uint64_t addr, + uint32_t size, uint64_t *data) { const struct domain *d = current->domain; unsigned int reg; @@ -383,7 +381,8 @@ void register_vpci_portio_handler(struct domain *d) handler->ops = &vpci_portio_ops; } -struct hvm_mmcfg { +struct hvm_mmcfg +{ struct list_head next; paddr_t addr; unsigned int size; @@ -397,7 +396,7 @@ static const struct hvm_mmcfg *vpci_mmcfg_find(const struct domain *d, { const struct hvm_mmcfg *mmcfg; - list_for_each_entry ( mmcfg, &d->arch.hvm.mmcfg_regions, next ) + list_for_each_entry (mmcfg, &d->arch.hvm.mmcfg_regions, next) if ( addr >= mmcfg->addr && addr < mmcfg->addr + mmcfg->size ) return mmcfg; @@ -432,8 +431,8 @@ static int vpci_mmcfg_accept(struct vcpu *v, unsigned long addr) return found; } -static int vpci_mmcfg_read(struct vcpu *v, unsigned long addr, - unsigned int len, unsigned long *data) +static int vpci_mmcfg_read(struct vcpu *v, unsigned long addr, unsigned int len, + unsigned long *data) { struct domain *d = v->domain; const struct hvm_mmcfg *mmcfg; @@ -533,7 +532,7 @@ int register_vpci_mmcfg_handler(struct domain *d, paddr_t addr, new->size = (end_bus - start_bus + 1) << 20; write_lock(&d->arch.hvm.mmcfg_lock); - list_for_each_entry ( mmcfg, &d->arch.hvm.mmcfg_regions, next ) + list_for_each_entry (mmcfg, &d->arch.hvm.mmcfg_regions, next) if ( new->addr < mmcfg->addr + mmcfg->size && mmcfg->addr < new->addr + new->size ) { @@ -541,8 +540,7 @@ int register_vpci_mmcfg_handler(struct domain *d, paddr_t addr, if ( new->addr == mmcfg->addr && new->start_bus == mmcfg->start_bus && - new->segment == mmcfg->segment && - new->size == mmcfg->size ) + new->segment == mmcfg->segment && new->size == mmcfg->size ) ret = 0; write_unlock(&d->arch.hvm.mmcfg_lock); xfree(new); @@ -565,8 +563,8 @@ void destroy_vpci_mmcfg(struct domain *d) write_lock(&d->arch.hvm.mmcfg_lock); while ( !list_empty(mmcfg_regions) ) { - struct hvm_mmcfg *mmcfg = list_first_entry(mmcfg_regions, - struct hvm_mmcfg, next); + struct hvm_mmcfg *mmcfg = + list_first_entry(mmcfg_regions, struct hvm_mmcfg, next); list_del(&mmcfg->next); xfree(mmcfg); diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c index 71f23227e6..febf28a1e6 100644 --- a/xen/arch/x86/hvm/ioreq.c +++ b/xen/arch/x86/hvm/ioreq.c @@ -43,8 +43,7 @@ static void set_ioreq_server(struct domain *d, unsigned int id, d->arch.hvm.ioreq_server.server[id] = s; } -#define GET_IOREQ_SERVER(d, id) \ - (d)->arch.hvm.ioreq_server.server[id] +#define GET_IOREQ_SERVER(d, id) (d)->arch.hvm.ioreq_server.server[id] static struct hvm_ioreq_server *get_ioreq_server(const struct domain *d, unsigned int id) @@ -63,10 +62,10 @@ static struct hvm_ioreq_server *get_ioreq_server(const struct domain *d, * This is a semantic that previously existed when ioreq servers * were held in a linked list. */ -#define FOR_EACH_IOREQ_SERVER(d, id, s) \ +#define FOR_EACH_IOREQ_SERVER(d, id, s) \ for ( (id) = MAX_NR_IOREQ_SERVERS; (id) != 0; ) \ - if ( !(s = GET_IOREQ_SERVER(d, --(id))) ) \ - continue; \ + if ( !(s = GET_IOREQ_SERVER(d, --(id))) ) \ + continue; \ else static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct vcpu *v) @@ -89,9 +88,7 @@ bool hvm_io_pending(struct vcpu *v) { struct hvm_ioreq_vcpu *sv; - list_for_each_entry ( sv, - &s->ioreq_vcpu_list, - list_entry ) + list_for_each_entry (sv, &s->ioreq_vcpu_list, list_entry) { if ( sv->vcpu == v && sv->pending ) return true; @@ -151,18 +148,19 @@ static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p) return false; /* bail */ } - switch ( prev_state = state ) + switch (prev_state = state) { case STATE_IORESP_READY: /* IORESP_READY -> NONE */ p->state = STATE_IOREQ_NONE; hvm_io_assist(sv, p->data); break; - case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */ + case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */ case STATE_IOREQ_INPROCESS: - wait_on_xen_event_channel(sv->ioreq_evtchn, - ({ state = p->state; - smp_rmb(); - state != prev_state; })); + wait_on_xen_event_channel(sv->ioreq_evtchn, ({ + state = p->state; + smp_rmb(); + state != prev_state; + })); goto recheck; default: gdprintk(XENLOG_ERR, "Weird HVM iorequest state %u\n", state); @@ -193,9 +191,7 @@ bool handle_hvm_io_completion(struct vcpu *v) { struct hvm_ioreq_vcpu *sv; - list_for_each_entry ( sv, - &s->ioreq_vcpu_list, - list_entry ) + list_for_each_entry (sv, &s->ioreq_vcpu_list, list_entry) { if ( sv->vcpu == v && sv->pending ) { @@ -210,7 +206,7 @@ bool handle_hvm_io_completion(struct vcpu *v) io_completion = vio->io_completion; vio->io_completion = HVMIO_no_completion; - switch ( io_completion ) + switch (io_completion) { case HVMIO_no_completion: break; @@ -219,8 +215,7 @@ bool handle_hvm_io_completion(struct vcpu *v) return handle_mmio(); case HVMIO_pio_completion: - return handle_pio(vio->io_req.addr, vio->io_req.size, - vio->io_req.dir); + return handle_pio(vio->io_req.addr, vio->io_req.size, vio->io_req.dir); case HVMIO_realmode_completion: { @@ -274,8 +269,7 @@ static gfn_t hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s) return hvm_alloc_legacy_ioreq_gfn(s); } -static bool hvm_free_legacy_ioreq_gfn(struct hvm_ioreq_server *s, - gfn_t gfn) +static bool hvm_free_legacy_ioreq_gfn(struct hvm_ioreq_server *s, gfn_t gfn) { struct domain *d = s->target; unsigned int i; @@ -283,7 +277,7 @@ static bool hvm_free_legacy_ioreq_gfn(struct hvm_ioreq_server *s, for ( i = HVM_PARAM_IOREQ_PFN; i <= HVM_PARAM_BUFIOREQ_PFN; i++ ) { if ( gfn_eq(gfn, _gfn(d->arch.hvm.params[i])) ) - break; + break; } if ( i > HVM_PARAM_BUFIOREQ_PFN ) return false; @@ -347,8 +341,7 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) if ( gfn_eq(iorp->gfn, INVALID_GFN) ) return -ENOMEM; - rc = prepare_ring_for_helper(d, gfn_x(iorp->gfn), &iorp->page, - &iorp->va); + rc = prepare_ring_for_helper(d, gfn_x(iorp->gfn), &iorp->page, &iorp->va); if ( rc ) hvm_unmap_ioreq_gfn(s, buf); @@ -397,7 +390,7 @@ static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, bool buf) clear_page(iorp->va); return 0; - fail: +fail: if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) put_page(page); put_page_and_type(page); @@ -459,8 +452,7 @@ static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) if ( gfn_eq(iorp->gfn, INVALID_GFN) ) return; - if ( guest_physmap_remove_page(d, iorp->gfn, - page_to_mfn(iorp->page), 0) ) + if ( guest_physmap_remove_page(d, iorp->gfn, page_to_mfn(iorp->page), 0) ) domain_crash(d); clear_page(iorp->va); } @@ -476,8 +468,7 @@ static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) clear_page(iorp->va); - rc = guest_physmap_add_page(d, iorp->gfn, - page_to_mfn(iorp->page), 0); + rc = guest_physmap_add_page(d, iorp->gfn, page_to_mfn(iorp->page), 0); if ( rc == 0 ) paging_mark_pfn_dirty(d, _pfn(gfn_x(iorp->gfn))); @@ -497,11 +488,9 @@ static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server *s, } } -#define HANDLE_BUFIOREQ(s) \ - ((s)->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF) +#define HANDLE_BUFIOREQ(s) ((s)->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF) -static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s, - struct vcpu *v) +static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s, struct vcpu *v) { struct hvm_ioreq_vcpu *sv; int rc; @@ -541,14 +530,14 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s, spin_unlock(&s->lock); return 0; - fail3: +fail3: free_xen_event_channel(v->domain, sv->ioreq_evtchn); - fail2: +fail2: spin_unlock(&s->lock); xfree(sv); - fail1: +fail1: return rc; } @@ -559,9 +548,7 @@ static void hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server *s, spin_lock(&s->lock); - list_for_each_entry ( sv, - &s->ioreq_vcpu_list, - list_entry ) + list_for_each_entry (sv, &s->ioreq_vcpu_list, list_entry) { if ( sv->vcpu != v ) continue; @@ -586,10 +573,7 @@ static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s) spin_lock(&s->lock); - list_for_each_entry_safe ( sv, - next, - &s->ioreq_vcpu_list, - list_entry ) + list_for_each_entry_safe(sv, next, &s->ioreq_vcpu_list, list_entry) { struct vcpu *v = sv->vcpu; @@ -667,15 +651,15 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s, char *name; rc = asprintf(&name, "ioreq_server %d %s", id, - (i == XEN_DMOP_IO_RANGE_PORT) ? "port" : - (i == XEN_DMOP_IO_RANGE_MEMORY) ? "memory" : - (i == XEN_DMOP_IO_RANGE_PCI) ? "pci" : - ""); + (i == XEN_DMOP_IO_RANGE_PORT) + ? "port" + : (i == XEN_DMOP_IO_RANGE_MEMORY) + ? "memory" + : (i == XEN_DMOP_IO_RANGE_PCI) ? "pci" : ""); if ( rc ) goto fail; - s->range[i] = rangeset_new(s->target, name, - RANGESETF_prettyprint_hex); + s->range[i] = rangeset_new(s->target, name, RANGESETF_prettyprint_hex); xfree(name); @@ -688,7 +672,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s, return 0; - fail: +fail: hvm_ioreq_server_free_rangesets(s); return rc; @@ -708,12 +692,10 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s) s->enabled = true; - list_for_each_entry ( sv, - &s->ioreq_vcpu_list, - list_entry ) + list_for_each_entry (sv, &s->ioreq_vcpu_list, list_entry) hvm_update_ioreq_evtchn(s, sv); - done: +done: spin_unlock(&s->lock); } @@ -729,13 +711,12 @@ static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s) s->enabled = false; - done: +done: spin_unlock(&s->lock); } -static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, - struct domain *d, int bufioreq_handling, - ioservid_t id) +static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, struct domain *d, + int bufioreq_handling, ioservid_t id) { struct domain *currd = current->domain; struct vcpu *v; @@ -759,7 +740,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, s->bufioreq_handling = bufioreq_handling; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { rc = hvm_ioreq_server_add_vcpu(s, v); if ( rc ) @@ -768,7 +749,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, return 0; - fail_add: +fail_add: hvm_ioreq_server_remove_all_vcpus(s); hvm_ioreq_server_unmap_pages(s); @@ -848,7 +829,7 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling, return 0; - fail: +fail: spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); domain_unpause(d); @@ -892,7 +873,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id) rc = 0; - out: +out: spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); return rc; @@ -939,7 +920,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id, rc = 0; - out: +out: spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); return rc; @@ -969,7 +950,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id, if ( rc ) goto out; - switch ( idx ) + switch (idx) { case XENMEM_resource_ioreq_server_frame_bufioreq: rc = -ENOENT; @@ -990,7 +971,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id, break; } - out: +out: spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); return rc; @@ -1019,7 +1000,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id, if ( s->emulator != current->domain ) goto out; - switch ( type ) + switch (type) { case XEN_DMOP_IO_RANGE_PORT: case XEN_DMOP_IO_RANGE_MEMORY: @@ -1042,7 +1023,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id, rc = rangeset_add_range(r, start, end); - out: +out: spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); return rc; @@ -1071,7 +1052,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id, if ( s->emulator != current->domain ) goto out; - switch ( type ) + switch (type) { case XEN_DMOP_IO_RANGE_PORT: case XEN_DMOP_IO_RANGE_MEMORY: @@ -1094,7 +1075,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id, rc = rangeset_remove_range(r, start, end); - out: +out: spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); return rc; @@ -1134,7 +1115,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id, rc = p2m_set_ioreq_server(d, flags, s); - out: +out: spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); if ( rc == 0 && flags == 0 ) @@ -1148,8 +1129,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id, return rc; } -int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id, - bool enabled) +int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id, bool enabled) { struct hvm_ioreq_server *s; int rc; @@ -1177,7 +1157,7 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id, rc = 0; - out: +out: spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); return rc; } @@ -1201,7 +1181,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v) return 0; - fail: +fail: while ( id-- != 0 ) { s = GET_IOREQ_SERVER(d, id); @@ -1225,7 +1205,7 @@ void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v) spin_lock_recursive(&d->arch.hvm.ioreq_server.lock); FOR_EACH_IOREQ_SERVER(d, id, s) - hvm_ioreq_server_remove_vcpu(s, v); + hvm_ioreq_server_remove_vcpu(s, v); spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); } @@ -1256,8 +1236,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d) spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); } -struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, - ioreq_t *p) +struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, ioreq_t *p) { struct hvm_ioreq_server *s; uint32_t cf8; @@ -1270,8 +1249,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, cf8 = d->arch.hvm.pci_cf8; - if ( p->type == IOREQ_TYPE_PIO && - (p->addr & ~3) == 0xcfc && + if ( p->type == IOREQ_TYPE_PIO && (p->addr & ~3) == 0xcfc && CF8_ENABLED(cf8) ) { uint32_t x86_fam; @@ -1284,10 +1262,9 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, type = XEN_DMOP_IO_RANGE_PCI; addr = ((uint64_t)sbdf.sbdf << 32) | reg; /* AMD extended configuration space access? */ - if ( CF8_ADDR_HI(cf8) && - d->arch.cpuid->x86_vendor == X86_VENDOR_AMD && - (x86_fam = get_cpu_family( - d->arch.cpuid->basic.raw_fms, NULL, NULL)) > 0x10 && + if ( CF8_ADDR_HI(cf8) && d->arch.cpuid->x86_vendor == X86_VENDOR_AMD && + (x86_fam = get_cpu_family(d->arch.cpuid->basic.raw_fms, NULL, + NULL)) > 0x10 && x86_fam <= 0x17 ) { uint64_t msr_val; @@ -1299,8 +1276,8 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, } else { - type = (p->type == IOREQ_TYPE_PIO) ? - XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY; + type = (p->type == IOREQ_TYPE_PIO) ? XEN_DMOP_IO_RANGE_PORT + : XEN_DMOP_IO_RANGE_MEMORY; addr = p->addr; } @@ -1313,7 +1290,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, r = s->range[type]; - switch ( type ) + switch (type) { unsigned long start, end; @@ -1354,10 +1331,8 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p) struct domain *d = current->domain; struct hvm_ioreq_page *iorp; buffered_iopage_t *pg; - buf_ioreq_t bp = { .data = p->data, - .addr = p->addr, - .type = p->type, - .dir = p->dir }; + buf_ioreq_t bp = { + .data = p->data, .addr = p->addr, .type = p->type, .dir = p->dir}; /* Timeoffset sends 64b data, but no address. Use two consecutive slots. */ int qw = 0; @@ -1381,7 +1356,7 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p) if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) ) return 0; - switch ( p->size ) + switch (p->size) { case 1: bp.size = 0; @@ -1416,7 +1391,8 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p) if ( qw ) { bp.data = p->data >> 32; - pg->buf_ioreq[(pg->ptrs.write_pointer+1) % IOREQ_BUFFER_SLOT_NUM] = bp; + pg->buf_ioreq[(pg->ptrs.write_pointer + 1) % IOREQ_BUFFER_SLOT_NUM] = + bp; } /* Make the ioreq_t visible /before/ write_pointer. */ @@ -1431,8 +1407,8 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p) union bufioreq_pointers old = pg->ptrs, new; unsigned int n = old.read_pointer / IOREQ_BUFFER_SLOT_NUM; - new.read_pointer = old.read_pointer - n * IOREQ_BUFFER_SLOT_NUM; - new.write_pointer = old.write_pointer - n * IOREQ_BUFFER_SLOT_NUM; + new.read_pointer = old.read_pointer - n *IOREQ_BUFFER_SLOT_NUM; + new.write_pointer = old.write_pointer - n *IOREQ_BUFFER_SLOT_NUM; cmpxchg(&pg->ptrs.full, old.full, new.full); } @@ -1442,8 +1418,7 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p) return X86EMUL_OKAY; } -int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p, - bool buffered) +int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p, bool buffered) { struct vcpu *curr = current; struct domain *d = curr->domain; @@ -1457,9 +1432,7 @@ int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p, if ( unlikely(!vcpu_start_shutdown_deferral(curr)) ) return X86EMUL_RETRY; - list_for_each_entry ( sv, - &s->ioreq_vcpu_list, - list_entry ) + list_for_each_entry (sv, &s->ioreq_vcpu_list, list_entry) { if ( sv->vcpu == curr ) { @@ -1520,8 +1493,8 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered) return failed; } -static int hvm_access_cf8( - int dir, unsigned int port, unsigned int bytes, uint32_t *val) +static int hvm_access_cf8(int dir, unsigned int port, unsigned int bytes, + uint32_t *val) { struct domain *d = current->domain; diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c index e03a87ad50..36226758f3 100644 --- a/xen/arch/x86/hvm/irq.c +++ b/xen/arch/x86/hvm/irq.c @@ -1,8 +1,8 @@ /****************************************************************************** * irq.c - * + * * Interrupt distribution and delivery logic. - * + * * Copyright (c) 2006, K A Fraser, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it @@ -35,8 +35,7 @@ bool hvm_domain_use_pirq(const struct domain *d, const struct pirq *pirq) /* Must be called with hvm_domain->irq_lock hold */ static void assert_gsi(struct domain *d, unsigned ioapic_gsi) { - struct pirq *pirq = - pirq_info(d, domain_emuirq_to_pirq(d, ioapic_gsi)); + struct pirq *pirq = pirq_info(d, domain_emuirq_to_pirq(d, ioapic_gsi)); if ( hvm_domain_use_pirq(d, pirq) ) { @@ -90,22 +89,21 @@ static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq) /* Must be called with hvm_domain->irq_lock hold */ static void deassert_irq(struct domain *d, unsigned isa_irq) { - struct pirq *pirq = - pirq_info(d, domain_emuirq_to_pirq(d, isa_irq)); + struct pirq *pirq = pirq_info(d, domain_emuirq_to_pirq(d, isa_irq)); if ( !hvm_domain_use_pirq(d, pirq) ) vpic_irq_negative_edge(d, isa_irq); } -static void __hvm_pci_intx_assert( - struct domain *d, unsigned int device, unsigned int intx) +static void __hvm_pci_intx_assert(struct domain *d, unsigned int device, + unsigned int intx) { struct hvm_irq *hvm_irq = hvm_domain_irq(d); unsigned int gsi, link, isa_irq; ASSERT((device <= 31) && (intx <= 3)); - if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx.i) ) + if ( __test_and_set_bit(device * 4 + intx, &hvm_irq->pci_intx.i) ) return; gsi = hvm_pci_intx_gsi(device, intx); @@ -117,30 +115,30 @@ static void __hvm_pci_intx_assert( if ( hvm_irq->gsi_assert_count[gsi]++ == 0 ) assert_gsi(d, gsi); - link = hvm_pci_intx_link(device, intx); + link = hvm_pci_intx_link(device, intx); isa_irq = hvm_irq->pci_link.route[link]; if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq && (hvm_irq->gsi_assert_count[isa_irq]++ == 0) ) assert_irq(d, isa_irq, isa_irq); } -void hvm_pci_intx_assert( - struct domain *d, unsigned int device, unsigned int intx) +void hvm_pci_intx_assert(struct domain *d, unsigned int device, + unsigned int intx) { spin_lock(&d->arch.hvm.irq_lock); __hvm_pci_intx_assert(d, device, intx); spin_unlock(&d->arch.hvm.irq_lock); } -static void __hvm_pci_intx_deassert( - struct domain *d, unsigned int device, unsigned int intx) +static void __hvm_pci_intx_deassert(struct domain *d, unsigned int device, + unsigned int intx) { struct hvm_irq *hvm_irq = hvm_domain_irq(d); unsigned int gsi, link, isa_irq; ASSERT((device <= 31) && (intx <= 3)); - if ( !__test_and_clear_bit(device*4 + intx, &hvm_irq->pci_intx.i) ) + if ( !__test_and_clear_bit(device * 4 + intx, &hvm_irq->pci_intx.i) ) return; gsi = hvm_pci_intx_gsi(device, intx); @@ -151,15 +149,15 @@ static void __hvm_pci_intx_deassert( } --hvm_irq->gsi_assert_count[gsi]; - link = hvm_pci_intx_link(device, intx); + link = hvm_pci_intx_link(device, intx); isa_irq = hvm_irq->pci_link.route[link]; if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq && (--hvm_irq->gsi_assert_count[isa_irq] == 0) ) deassert_irq(d, isa_irq); } -void hvm_pci_intx_deassert( - struct domain *d, unsigned int device, unsigned int intx) +void hvm_pci_intx_deassert(struct domain *d, unsigned int device, + unsigned int intx) { spin_lock(&d->arch.hvm.irq_lock); __hvm_pci_intx_deassert(d, device, intx); @@ -232,8 +230,7 @@ int hvm_isa_irq_assert(struct domain *d, unsigned int isa_irq, return vector; } -void hvm_isa_irq_deassert( - struct domain *d, unsigned int isa_irq) +void hvm_isa_irq_deassert(struct domain *d, unsigned int isa_irq) { struct hvm_irq *hvm_irq = hvm_domain_irq(d); unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); @@ -266,7 +263,7 @@ static void hvm_set_callback_irq_level(struct vcpu *v) hvm_irq->callback_via_asserted = asserted; /* Callback status has changed. Update the callback via. */ - switch ( hvm_irq->callback_via_type ) + switch (hvm_irq->callback_via_type) { case HVMIRQ_callback_gsi: gsi = hvm_irq->callback_via.gsi; @@ -283,7 +280,7 @@ static void hvm_set_callback_irq_level(struct vcpu *v) } break; case HVMIRQ_callback_pci_intx: - pdev = hvm_irq->callback_via.pci.dev; + pdev = hvm_irq->callback_via.pci.dev; pintx = hvm_irq->callback_via.pci.intx; if ( asserted ) __hvm_pci_intx_assert(d, pdev, pintx); @@ -293,7 +290,7 @@ static void hvm_set_callback_irq_level(struct vcpu *v) break; } - out: +out: spin_unlock(&d->arch.hvm.irq_lock); } @@ -351,8 +348,7 @@ int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq) for ( i = 0; i < NR_LINK; i++ ) if ( hvm_irq->dpci->link_cnt[i] && hvm_irq->pci_link.route[i] ) - set_bit(hvm_irq->pci_link.route[i], - &hvm_irq->dpci->isairq_map); + set_bit(hvm_irq->pci_link.route[i], &hvm_irq->dpci->isairq_map); } if ( hvm_irq->pci_link_assert_count[link] == 0 ) @@ -367,24 +363,24 @@ int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq) vpic_irq_positive_edge(d, isa_irq); } - out: +out: spin_unlock(&d->arch.hvm.irq_lock); - dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n", - d->domain_id, link, old_isa_irq, isa_irq); + dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n", d->domain_id, + link, old_isa_irq, isa_irq); return 0; } int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data) { - uint32_t tmp = (uint32_t) addr; - uint8_t dest = (tmp & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; - uint8_t dest_mode = !!(tmp & MSI_ADDR_DESTMODE_MASK); - uint8_t delivery_mode = (data & MSI_DATA_DELIVERY_MODE_MASK) - >> MSI_DATA_DELIVERY_MODE_SHIFT; - uint8_t trig_mode = (data & MSI_DATA_TRIGGER_MASK) - >> MSI_DATA_TRIGGER_SHIFT; + uint32_t tmp = (uint32_t)addr; + uint8_t dest = (tmp & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; + uint8_t dest_mode = !!(tmp & MSI_ADDR_DESTMODE_MASK); + uint8_t delivery_mode = + (data & MSI_DATA_DELIVERY_MODE_MASK) >> MSI_DATA_DELIVERY_MODE_SHIFT; + uint8_t trig_mode = + (data & MSI_DATA_TRIGGER_MASK) >> MSI_DATA_TRIGGER_SHIFT; uint8_t vector = data & MSI_DATA_VECTOR_MASK; if ( !vector ) @@ -423,7 +419,7 @@ int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data) void hvm_set_callback_via(struct domain *d, uint64_t via) { struct hvm_irq *hvm_irq = hvm_domain_irq(d); - unsigned int gsi=0, pdev=0, pintx=0; + unsigned int gsi = 0, pdev = 0, pintx = 0; uint8_t via_type; struct vcpu *v; @@ -441,7 +437,7 @@ void hvm_set_callback_via(struct domain *d, uint64_t via) /* Tear down old callback via. */ if ( hvm_irq->callback_via_asserted ) { - switch ( hvm_irq->callback_via_type ) + switch (hvm_irq->callback_via_type) { case HVMIRQ_callback_gsi: gsi = hvm_irq->callback_via.gsi; @@ -449,7 +445,7 @@ void hvm_set_callback_via(struct domain *d, uint64_t via) vpic_irq_negative_edge(d, gsi); break; case HVMIRQ_callback_pci_intx: - pdev = hvm_irq->callback_via.pci.dev; + pdev = hvm_irq->callback_via.pci.dev; pintx = hvm_irq->callback_via.pci.intx; __hvm_pci_intx_deassert(d, pdev, pintx); break; @@ -459,7 +455,7 @@ void hvm_set_callback_via(struct domain *d, uint64_t via) } /* Set up new callback via. */ - switch ( hvm_irq->callback_via_type = via_type ) + switch (hvm_irq->callback_via_type = via_type) { case HVMIRQ_callback_gsi: gsi = hvm_irq->callback_via.gsi = (uint8_t)via; @@ -474,10 +470,10 @@ void hvm_set_callback_via(struct domain *d, uint64_t via) } break; case HVMIRQ_callback_pci_intx: - pdev = hvm_irq->callback_via.pci.dev = (uint8_t)(via >> 11) & 31; + pdev = hvm_irq->callback_via.pci.dev = (uint8_t)(via >> 11) & 31; pintx = hvm_irq->callback_via.pci.intx = (uint8_t)via & 3; if ( hvm_irq->callback_via_asserted ) - __hvm_pci_intx_assert(d, pdev, pintx); + __hvm_pci_intx_assert(d, pdev, pintx); break; case HVMIRQ_callback_vector: hvm_irq->callback_via.vector = (uint8_t)via; @@ -488,13 +484,13 @@ void hvm_set_callback_via(struct domain *d, uint64_t via) spin_unlock(&d->arch.hvm.irq_lock); - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) if ( is_vcpu_online(v) ) hvm_assert_evtchn_irq(v); #ifndef NDEBUG printk(XENLOG_G_INFO "Dom%u callback via changed to ", d->domain_id); - switch ( via_type ) + switch (via_type) { case HVMIRQ_callback_gsi: printk("GSI %u\n", gsi); @@ -523,8 +519,8 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v) if ( unlikely(v->mce_pending) ) return hvm_intack_mce; - if ( (plat->irq->callback_via_type == HVMIRQ_callback_vector) - && vcpu_info(v, evtchn_upcall_pending) ) + if ( (plat->irq->callback_via_type == HVMIRQ_callback_vector) && + vcpu_info(v, evtchn_upcall_pending) ) return hvm_intack_vector(plat->irq->callback_via.vector); if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output ) @@ -537,12 +533,12 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v) return hvm_intack_none; } -struct hvm_intack hvm_vcpu_ack_pending_irq( - struct vcpu *v, struct hvm_intack intack) +struct hvm_intack hvm_vcpu_ack_pending_irq(struct vcpu *v, + struct hvm_intack intack) { int vector; - switch ( intack.source ) + switch (intack.source) { case hvm_intsrc_nmi: if ( !test_and_clear_bool(v->nmi_pending) ) @@ -585,40 +581,36 @@ int hvm_local_events_need_delivery(struct vcpu *v) static void irq_dump(struct domain *d) { struct hvm_irq *hvm_irq = hvm_domain_irq(d); - int i; + int i; printk("Domain %d:\n", d->domain_id); - printk("PCI 0x%16.16"PRIx64"%16.16"PRIx64 - " ISA 0x%8.8"PRIx32" ROUTE %u %u %u %u\n", - hvm_irq->pci_intx.pad[0], hvm_irq->pci_intx.pad[1], - (uint32_t) hvm_irq->isa_irq.pad[0], - hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1], - hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]); + printk("PCI 0x%16.16" PRIx64 "%16.16" PRIx64 " ISA 0x%8.8" PRIx32 + " ROUTE %u %u %u %u\n", + hvm_irq->pci_intx.pad[0], hvm_irq->pci_intx.pad[1], + (uint32_t)hvm_irq->isa_irq.pad[0], hvm_irq->pci_link.route[0], + hvm_irq->pci_link.route[1], hvm_irq->pci_link.route[2], + hvm_irq->pci_link.route[3]); for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 ) - printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8 - " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", - i, i+7, - hvm_irq->gsi_assert_count[i+0], - hvm_irq->gsi_assert_count[i+1], - hvm_irq->gsi_assert_count[i+2], - hvm_irq->gsi_assert_count[i+3], - hvm_irq->gsi_assert_count[i+4], - hvm_irq->gsi_assert_count[i+5], - hvm_irq->gsi_assert_count[i+6], - hvm_irq->gsi_assert_count[i+7]); + printk( + "GSI [%x - %x] %2.2" PRIu8 " %2.2" PRIu8 " %2.2" PRIu8 " %2.2" PRIu8 + " %2.2" PRIu8 " %2.2" PRIu8 " %2.2" PRIu8 " %2.2" PRIu8 "\n", + i, i + 7, hvm_irq->gsi_assert_count[i + 0], + hvm_irq->gsi_assert_count[i + 1], hvm_irq->gsi_assert_count[i + 2], + hvm_irq->gsi_assert_count[i + 3], hvm_irq->gsi_assert_count[i + 4], + hvm_irq->gsi_assert_count[i + 5], hvm_irq->gsi_assert_count[i + 6], + hvm_irq->gsi_assert_count[i + 7]); if ( i != hvm_irq->nr_gsis ) { printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1); - for ( ; i < hvm_irq->nr_gsis; i++) - printk(" %2"PRIu8, hvm_irq->gsi_assert_count[i]); + for ( ; i < hvm_irq->nr_gsis; i++ ) + printk(" %2" PRIu8, hvm_irq->gsi_assert_count[i]); printk("\n"); } - printk("Link %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", - hvm_irq->pci_link_assert_count[0], - hvm_irq->pci_link_assert_count[1], + printk("Link %2.2" PRIu8 " %2.2" PRIu8 " %2.2" PRIu8 " %2.2" PRIu8 "\n", + hvm_irq->pci_link_assert_count[0], hvm_irq->pci_link_assert_count[1], hvm_irq->pci_link_assert_count[2], hvm_irq->pci_link_assert_count[3]); - printk("Callback via %i:%#"PRIx32",%s asserted\n", - hvm_irq->callback_via_type, hvm_irq->callback_via.gsi, + printk("Callback via %i:%#" PRIx32 ",%s asserted\n", + hvm_irq->callback_via_type, hvm_irq->callback_via.gsi, hvm_irq->callback_via_asserted ? "" : " not"); } @@ -630,7 +622,7 @@ static void dump_irq_info(unsigned char key) rcu_read_lock(&domlist_read_lock); - for_each_domain ( d ) + for_each_domain (d) if ( is_hvm_domain(d) ) irq_dump(d); @@ -653,7 +645,7 @@ static int irq_save_pci(struct vcpu *v, hvm_domain_context_t *h) spin_lock(&d->arch.hvm.irq_lock); - pdev = hvm_irq->callback_via.pci.dev; + pdev = hvm_irq->callback_via.pci.dev; pintx = hvm_irq->callback_via.pci.intx; asserted = (hvm_irq->callback_via_asserted && (hvm_irq->callback_via_type == HVMIRQ_callback_pci_intx)); @@ -670,7 +662,7 @@ static int irq_save_pci(struct vcpu *v, hvm_domain_context_t *h) rc = hvm_save_entry(PCI_IRQ, 0, h, &hvm_irq->pci_intx); if ( asserted ) - __hvm_pci_intx_assert(d, pdev, pintx); + __hvm_pci_intx_assert(d, pdev, pintx); spin_unlock(&d->arch.hvm.irq_lock); @@ -683,7 +675,7 @@ static int irq_save_isa(struct vcpu *v, hvm_domain_context_t *h) struct hvm_irq *hvm_irq = hvm_domain_irq(d); /* Save ISA IRQ lines */ - return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) ); + return (hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq)); } static int irq_save_link(struct vcpu *v, hvm_domain_context_t *h) @@ -692,7 +684,7 @@ static int irq_save_link(struct vcpu *v, hvm_domain_context_t *h) struct hvm_irq *hvm_irq = hvm_domain_irq(d); /* Save PCI-ISA link state */ - return ( hvm_save_entry(PCI_LINK, 0, h, &hvm_irq->pci_link) ); + return (hvm_save_entry(PCI_LINK, 0, h, &hvm_irq->pci_link)); } static int irq_load_pci(struct domain *d, hvm_domain_context_t *h) @@ -707,7 +699,7 @@ static int irq_load_pci(struct domain *d, hvm_domain_context_t *h) /* Clear the PCI link assert counts */ for ( link = 0; link < 4; link++ ) hvm_irq->pci_link_assert_count[link] = 0; - + /* Clear the GSI link assert counts */ for ( gsi = 0; gsi < hvm_irq->nr_gsis; gsi++ ) hvm_irq->gsi_assert_count[gsi] = 0; @@ -715,7 +707,7 @@ static int irq_load_pci(struct domain *d, hvm_domain_context_t *h) /* Recalculate the counts from the IRQ line state */ for ( dev = 0; dev < 32; dev++ ) for ( intx = 0; intx < 4; intx++ ) - if ( test_bit(dev*4 + intx, &hvm_irq->pci_intx.i) ) + if ( test_bit(dev * 4 + intx, &hvm_irq->pci_intx.i) ) { /* Direct GSI assert */ gsi = hvm_pci_intx_gsi(dev, intx); @@ -746,7 +738,6 @@ static int irq_load_isa(struct domain *d, hvm_domain_context_t *h) return 0; } - static int irq_load_link(struct domain *d, hvm_domain_context_t *h) { struct hvm_irq *hvm_irq = hvm_domain_irq(d); @@ -760,9 +751,9 @@ static int irq_load_link(struct domain *d, hvm_domain_context_t *h) for ( link = 0; link < 4; link++ ) if ( hvm_irq->pci_link.route[link] > 15 ) { - gdprintk(XENLOG_ERR, - "HVM restore: PCI-ISA link %u out of range (%u)\n", - link, hvm_irq->pci_link.route[link]); + gdprintk(XENLOG_ERR, + "HVM restore: PCI-ISA link %u out of range (%u)\n", link, + hvm_irq->pci_link.route[link]); return -EINVAL; } @@ -781,9 +772,9 @@ static int irq_load_link(struct domain *d, hvm_domain_context_t *h) return 0; } -HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci, - 1, HVMSR_PER_DOM); -HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa, - 1, HVMSR_PER_DOM); -HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link, - 1, HVMSR_PER_DOM); +HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci, 1, + HVMSR_PER_DOM); +HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa, 1, + HVMSR_PER_DOM); +HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link, 1, + HVMSR_PER_DOM); diff --git a/xen/arch/x86/hvm/monitor.c b/xen/arch/x86/hvm/monitor.c index 2a41ccc930..2caed98719 100644 --- a/xen/arch/x86/hvm/monitor.c +++ b/xen/arch/x86/hvm/monitor.c @@ -46,12 +46,10 @@ bool hvm_monitor_cr(unsigned int index, unsigned long value, unsigned long old) { bool sync = ad->monitor.write_ctrlreg_sync & ctrlreg_bitmask; - vm_event_request_t req = { - .reason = VM_EVENT_REASON_WRITE_CTRLREG, - .u.write_ctrlreg.index = index, - .u.write_ctrlreg.new_value = value, - .u.write_ctrlreg.old_value = old - }; + vm_event_request_t req = {.reason = VM_EVENT_REASON_WRITE_CTRLREG, + .u.write_ctrlreg.index = index, + .u.write_ctrlreg.new_value = value, + .u.write_ctrlreg.old_value = old}; if ( monitor_traps(curr, sync, &req) >= 0 ) return 1; @@ -70,11 +68,11 @@ bool hvm_monitor_emul_unimplemented(void) */ vm_event_request_t req = { .reason = VM_EVENT_REASON_EMUL_UNIMPLEMENTED, - .vcpu_id = curr->vcpu_id, + .vcpu_id = curr->vcpu_id, }; return curr->domain->arch.monitor.emul_unimplemented_enabled && - monitor_traps(curr, true, &req) == 1; + monitor_traps(curr, true, &req) == 1; } void hvm_monitor_msr(unsigned int msr, uint64_t new_value, uint64_t old_value) @@ -83,14 +81,12 @@ void hvm_monitor_msr(unsigned int msr, uint64_t new_value, uint64_t old_value) if ( monitored_msr(curr->domain, msr) && (!monitored_msr_onchangeonly(curr->domain, msr) || - new_value != old_value) ) + new_value != old_value) ) { - vm_event_request_t req = { - .reason = VM_EVENT_REASON_MOV_TO_MSR, - .u.mov_to_msr.msr = msr, - .u.mov_to_msr.new_value = new_value, - .u.mov_to_msr.old_value = old_value - }; + vm_event_request_t req = {.reason = VM_EVENT_REASON_MOV_TO_MSR, + .u.mov_to_msr.msr = msr, + .u.mov_to_msr.new_value = new_value, + .u.mov_to_msr.old_value = old_value}; monitor_traps(curr, 1, &req); } @@ -136,17 +132,17 @@ static inline unsigned long gfn_of_rip(unsigned long rip) int hvm_monitor_debug(unsigned long rip, enum hvm_monitor_debug_type type, unsigned long trap_type, unsigned long insn_length) { - /* - * rc < 0 error in monitor/vm_event, crash - * !rc continue normally - * rc > 0 paused waiting for response, work here is done - */ + /* + * rc < 0 error in monitor/vm_event, crash + * !rc continue normally + * rc > 0 paused waiting for response, work here is done + */ struct vcpu *curr = current; struct arch_domain *ad = &curr->domain->arch; vm_event_request_t req = {}; bool sync; - switch ( type ) + switch (type) { case HVM_MONITOR_SOFTWARE_BREAKPOINT: if ( !ad->monitor.software_breakpoint_enabled ) diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c index b8fa340d5a..d56c0e0a2a 100644 --- a/xen/arch/x86/hvm/mtrr.c +++ b/xen/arch/x86/hvm/mtrr.c @@ -27,14 +27,18 @@ #include /* Get page attribute fields (PAn) from PAT MSR. */ -#define pat_cr_2_paf(pat_cr,n) ((((uint64_t)pat_cr) >> ((n)<<3)) & 0xff) +#define pat_cr_2_paf(pat_cr, n) ((((uint64_t)pat_cr) >> ((n) << 3)) & 0xff) /* PAT entry to PTE flags (PAT, PCD, PWT bits). */ -static const uint8_t pat_entry_2_pte_flags[8] = { - 0, _PAGE_PWT, - _PAGE_PCD, _PAGE_PCD | _PAGE_PWT, - _PAGE_PAT, _PAGE_PAT | _PAGE_PWT, - _PAGE_PAT | _PAGE_PCD, _PAGE_PAT | _PAGE_PCD | _PAGE_PWT }; +static const uint8_t pat_entry_2_pte_flags[8] = {0, + _PAGE_PWT, + _PAGE_PCD, + _PAGE_PCD | _PAGE_PWT, + _PAGE_PAT, + _PAGE_PAT | _PAGE_PWT, + _PAGE_PAT | _PAGE_PCD, + _PAGE_PAT | _PAGE_PCD | + _PAGE_PWT}; /* Effective mm type lookup table, according to MTRR and PAT. */ static const uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = { @@ -45,14 +49,14 @@ static const uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = { #define WP MTRR_TYPE_WRPROT #define WT MTRR_TYPE_WRTHROUGH -/* PAT(UC, WC, RS, RS, WT, WP, WB, UC-) */ -/* MTRR(UC) */ {UC, WC, RS, RS, UC, UC, UC, UC}, -/* MTRR(WC) */ {UC, WC, RS, RS, UC, UC, WC, WC}, -/* MTRR(RS) */ {RS, RS, RS, RS, RS, RS, RS, RS}, -/* MTRR(RS) */ {RS, RS, RS, RS, RS, RS, RS, RS}, -/* MTRR(WT) */ {UC, WC, RS, RS, WT, WP, WT, UC}, -/* MTRR(WP) */ {UC, WC, RS, RS, WT, WP, WP, WC}, -/* MTRR(WB) */ {UC, WC, RS, RS, WT, WP, WB, UC} + /* PAT(UC, WC, RS, RS, WT, WP, WB, UC-) */ + /* MTRR(UC) */ {UC, WC, RS, RS, UC, UC, UC, UC}, + /* MTRR(WC) */ {UC, WC, RS, RS, UC, UC, WC, WC}, + /* MTRR(RS) */ {RS, RS, RS, RS, RS, RS, RS, RS}, + /* MTRR(RS) */ {RS, RS, RS, RS, RS, RS, RS, RS}, + /* MTRR(WT) */ {UC, WC, RS, RS, WT, WP, WT, UC}, + /* MTRR(WP) */ {UC, WC, RS, RS, WT, WP, WP, WC}, + /* MTRR(WB) */ {UC, WC, RS, RS, WT, WP, WB, UC} #undef UC #undef WC @@ -66,14 +70,13 @@ static const uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = { * Reverse lookup table, to find a pat type according to MTRR and effective * memory type. This table is dynamically generated. */ -static uint8_t __read_mostly mtrr_epat_tbl[MTRR_NUM_TYPES][MEMORY_NUM_TYPES] = - { [0 ... MTRR_NUM_TYPES-1] = - { [0 ... MEMORY_NUM_TYPES-1] = INVALID_MEM_TYPE } - }; +static uint8_t __read_mostly mtrr_epat_tbl[MTRR_NUM_TYPES][MEMORY_NUM_TYPES] = { + [0 ... MTRR_NUM_TYPES - 1] = {[0 ... MEMORY_NUM_TYPES - 1] = + INVALID_MEM_TYPE}}; /* Lookup table for PAT entry of a given PAT value in host PAT. */ -static uint8_t __read_mostly pat_entry_tbl[PAT_TYPE_NUMS] = - { [0 ... PAT_TYPE_NUMS-1] = INVALID_MEM_TYPE }; +static uint8_t __read_mostly pat_entry_tbl[PAT_TYPE_NUMS] = { + [0 ... PAT_TYPE_NUMS - 1] = INVALID_MEM_TYPE}; static int __init hvm_mtrr_pat_init(void) { @@ -124,9 +127,9 @@ int hvm_vcpu_cacheattr_init(struct vcpu *v) { struct mtrr_state *m = &v->arch.hvm.mtrr; unsigned int num_var_ranges = - is_hardware_domain(v->domain) ? MASK_EXTR(mtrr_state.mtrr_cap, - MTRRcap_VCNT) - : MTRR_VCNT; + is_hardware_domain(v->domain) + ? MASK_EXTR(mtrr_state.mtrr_cap, MTRRcap_VCNT) + : MTRR_VCNT; if ( num_var_ranges > MTRR_VCNT_MAX ) { @@ -144,15 +147,14 @@ int hvm_vcpu_cacheattr_init(struct vcpu *v) m->mtrr_cap = (1u << 10) | (1u << 8) | num_var_ranges; - v->arch.hvm.pat_cr = - ((uint64_t)PAT_TYPE_WRBACK) | /* PAT0: WB */ - ((uint64_t)PAT_TYPE_WRTHROUGH << 8) | /* PAT1: WT */ - ((uint64_t)PAT_TYPE_UC_MINUS << 16) | /* PAT2: UC- */ - ((uint64_t)PAT_TYPE_UNCACHABLE << 24) | /* PAT3: UC */ - ((uint64_t)PAT_TYPE_WRBACK << 32) | /* PAT4: WB */ - ((uint64_t)PAT_TYPE_WRTHROUGH << 40) | /* PAT5: WT */ - ((uint64_t)PAT_TYPE_UC_MINUS << 48) | /* PAT6: UC- */ - ((uint64_t)PAT_TYPE_UNCACHABLE << 56); /* PAT7: UC */ + v->arch.hvm.pat_cr = ((uint64_t)PAT_TYPE_WRBACK) | /* PAT0: WB */ + ((uint64_t)PAT_TYPE_WRTHROUGH << 8) | /* PAT1: WT */ + ((uint64_t)PAT_TYPE_UC_MINUS << 16) | /* PAT2: UC- */ + ((uint64_t)PAT_TYPE_UNCACHABLE << 24) | /* PAT3: UC */ + ((uint64_t)PAT_TYPE_WRBACK << 32) | /* PAT4: WB */ + ((uint64_t)PAT_TYPE_WRTHROUGH << 40) | /* PAT5: WT */ + ((uint64_t)PAT_TYPE_UC_MINUS << 48) | /* PAT6: UC- */ + ((uint64_t)PAT_TYPE_UNCACHABLE << 56); /* PAT7: UC */ if ( is_hardware_domain(v->domain) ) { @@ -162,8 +164,8 @@ int hvm_vcpu_cacheattr_init(struct vcpu *v) if ( mtrr_state.have_fixed ) for ( i = 0; i < NUM_FIXED_MSR; i++ ) - mtrr_fix_range_msr_set(d, m, i, - ((uint64_t *)mtrr_state.fixed_ranges)[i]); + mtrr_fix_range_msr_set( + d, m, i, ((uint64_t *)mtrr_state.fixed_ranges)[i]); for ( i = 0; i < num_var_ranges; i++ ) { @@ -173,11 +175,11 @@ int hvm_vcpu_cacheattr_init(struct vcpu *v) mtrr_state.var_ranges[i].mask); } - mtrr_def_type_msr_set(d, m, - mtrr_state.def_type | - MASK_INSR(mtrr_state.fixed_enabled, - MTRRdefType_FE) | - MASK_INSR(mtrr_state.enabled, MTRRdefType_E)); + mtrr_def_type_msr_set( + d, m, + mtrr_state.def_type | + MASK_INSR(mtrr_state.fixed_enabled, MTRRdefType_FE) | + MASK_INSR(mtrr_state.enabled, MTRRdefType_E)); } return 0; @@ -194,100 +196,100 @@ void hvm_vcpu_cacheattr_destroy(struct vcpu *v) * May return a negative value when order > 0, indicating to the caller * that the respective mapping needs splitting. */ -static int get_mtrr_type(const struct mtrr_state *m, - paddr_t pa, unsigned int order) +static int get_mtrr_type(const struct mtrr_state *m, paddr_t pa, + unsigned int order) { - uint8_t overlap_mtrr = 0; - uint8_t overlap_mtrr_pos = 0; - uint64_t mask = -(uint64_t)PAGE_SIZE << order; - unsigned int seg, num_var_ranges = MASK_EXTR(m->mtrr_cap, MTRRcap_VCNT); - - if ( unlikely(!m->enabled) ) - return MTRR_TYPE_UNCACHABLE; - - pa &= mask; - if ( (pa < 0x100000) && m->fixed_enabled ) - { - /* Fixed range MTRR takes effect. */ - uint32_t addr = (uint32_t)pa, index; - - if ( addr < 0x80000 ) - { - /* 0x00000 ... 0x7FFFF in 64k steps */ - if ( order > 4 ) - return -1; - seg = (addr >> 16); - return m->fixed_ranges[seg]; - } - else if ( addr < 0xc0000 ) - { - /* 0x80000 ... 0xBFFFF in 16k steps */ - if ( order > 2 ) - return -1; - seg = (addr - 0x80000) >> 14; - index = (seg >> 3) + 1; - seg &= 7; /* select 0-7 segments */ - return m->fixed_ranges[index*8 + seg]; - } - else - { - /* 0xC0000 ... 0xFFFFF in 4k steps */ - if ( order ) - return -1; - seg = (addr - 0xc0000) >> 12; - index = (seg >> 3) + 3; - seg &= 7; /* select 0-7 segments */ - return m->fixed_ranges[index*8 + seg]; - } - } - - /* Match with variable MTRRs. */ - for ( seg = 0; seg < num_var_ranges; seg++ ) - { - uint64_t phys_base = m->var_ranges[seg].base; - uint64_t phys_mask = m->var_ranges[seg].mask; - - if ( phys_mask & MTRR_PHYSMASK_VALID ) - { - phys_mask &= mask; - if ( (pa & phys_mask) == (phys_base & phys_mask) ) - { - if ( unlikely(m->overlapped) || order ) - { + uint8_t overlap_mtrr = 0; + uint8_t overlap_mtrr_pos = 0; + uint64_t mask = -(uint64_t)PAGE_SIZE << order; + unsigned int seg, num_var_ranges = MASK_EXTR(m->mtrr_cap, MTRRcap_VCNT); + + if ( unlikely(!m->enabled) ) + return MTRR_TYPE_UNCACHABLE; + + pa &= mask; + if ( (pa < 0x100000) && m->fixed_enabled ) + { + /* Fixed range MTRR takes effect. */ + uint32_t addr = (uint32_t)pa, index; + + if ( addr < 0x80000 ) + { + /* 0x00000 ... 0x7FFFF in 64k steps */ + if ( order > 4 ) + return -1; + seg = (addr >> 16); + return m->fixed_ranges[seg]; + } + else if ( addr < 0xc0000 ) + { + /* 0x80000 ... 0xBFFFF in 16k steps */ + if ( order > 2 ) + return -1; + seg = (addr - 0x80000) >> 14; + index = (seg >> 3) + 1; + seg &= 7; /* select 0-7 segments */ + return m->fixed_ranges[index * 8 + seg]; + } + else + { + /* 0xC0000 ... 0xFFFFF in 4k steps */ + if ( order ) + return -1; + seg = (addr - 0xc0000) >> 12; + index = (seg >> 3) + 3; + seg &= 7; /* select 0-7 segments */ + return m->fixed_ranges[index * 8 + seg]; + } + } + + /* Match with variable MTRRs. */ + for ( seg = 0; seg < num_var_ranges; seg++ ) + { + uint64_t phys_base = m->var_ranges[seg].base; + uint64_t phys_mask = m->var_ranges[seg].mask; + + if ( phys_mask & MTRR_PHYSMASK_VALID ) + { + phys_mask &= mask; + if ( (pa & phys_mask) == (phys_base & phys_mask) ) + { + if ( unlikely(m->overlapped) || order ) + { overlap_mtrr |= 1 << (phys_base & MTRR_PHYSBASE_TYPE_MASK); overlap_mtrr_pos = phys_base & MTRR_PHYSBASE_TYPE_MASK; - } - else - { - /* If no overlap, return the found one */ - return (phys_base & MTRR_PHYSBASE_TYPE_MASK); - } - } - } - } - - /* Not found? */ - if ( unlikely(overlap_mtrr == 0) ) - return m->def_type; - - /* One match, or multiple identical ones? */ - if ( likely(overlap_mtrr == (1 << overlap_mtrr_pos)) ) - return overlap_mtrr_pos; - - if ( order ) - return -1; - - /* Two or more matches, one being UC? */ - if ( overlap_mtrr & (1 << MTRR_TYPE_UNCACHABLE) ) - return MTRR_TYPE_UNCACHABLE; - - /* Two or more matches, all of them WT and WB? */ - if ( overlap_mtrr == - ((1 << MTRR_TYPE_WRTHROUGH) | (1 << MTRR_TYPE_WRBACK)) ) - return MTRR_TYPE_WRTHROUGH; - - /* Behaviour is undefined, but return the last overlapped type. */ - return overlap_mtrr_pos; + } + else + { + /* If no overlap, return the found one */ + return (phys_base & MTRR_PHYSBASE_TYPE_MASK); + } + } + } + } + + /* Not found? */ + if ( unlikely(overlap_mtrr == 0) ) + return m->def_type; + + /* One match, or multiple identical ones? */ + if ( likely(overlap_mtrr == (1 << overlap_mtrr_pos)) ) + return overlap_mtrr_pos; + + if ( order ) + return -1; + + /* Two or more matches, one being UC? */ + if ( overlap_mtrr & (1 << MTRR_TYPE_UNCACHABLE) ) + return MTRR_TYPE_UNCACHABLE; + + /* Two or more matches, all of them WT and WB? */ + if ( overlap_mtrr == + ((1 << MTRR_TYPE_WRTHROUGH) | (1 << MTRR_TYPE_WRBACK)) ) + return MTRR_TYPE_WRTHROUGH; + + /* Behaviour is undefined, but return the last overlapped type. */ + return overlap_mtrr_pos; } /* @@ -300,7 +302,7 @@ static uint8_t page_pat_type(uint64_t pat_cr, uint32_t pte_flags) int32_t pat_entry; /* PCD/PWT -> bit 1/0 of PAT entry */ - pat_entry = ( pte_flags >> 3 ) & 0x3; + pat_entry = (pte_flags >> 3) & 0x3; /* PAT bits as bit 2 of PAT entry */ if ( pte_flags & _PAGE_PAT ) pat_entry |= 4; @@ -311,17 +313,15 @@ static uint8_t page_pat_type(uint64_t pat_cr, uint32_t pte_flags) /* * Effective memory type for leaf page. */ -static uint8_t effective_mm_type(struct mtrr_state *m, - uint64_t pat, - paddr_t gpa, - uint32_t pte_flags, +static uint8_t effective_mm_type(struct mtrr_state *m, uint64_t pat, + paddr_t gpa, uint32_t pte_flags, uint8_t gmtrr_mtype) { uint8_t mtrr_mtype, pat_value, effective; - + /* if get_pat_flags() gives a dedicated MTRR type, * just use it - */ + */ if ( gmtrr_mtype == NO_HARDCODE_MEM_TYPE ) mtrr_mtype = get_mtrr_type(m, gpa, 0); else @@ -334,11 +334,8 @@ static uint8_t effective_mm_type(struct mtrr_state *m, return effective; } -uint32_t get_pat_flags(struct vcpu *v, - uint32_t gl1e_flags, - paddr_t gpaddr, - paddr_t spaddr, - uint8_t gmtrr_mtype) +uint32_t get_pat_flags(struct vcpu *v, uint32_t gl1e_flags, paddr_t gpaddr, + paddr_t spaddr, uint8_t gmtrr_mtype) { uint8_t guest_eff_mm_type; uint8_t shadow_mtrr_type; @@ -349,8 +346,8 @@ uint32_t get_pat_flags(struct vcpu *v, /* 1. Get the effective memory type of guest physical address, * with the pair of guest MTRR and PAT */ - guest_eff_mm_type = effective_mm_type(g, pat, gpaddr, - gl1e_flags, gmtrr_mtype); + guest_eff_mm_type = + effective_mm_type(g, pat, gpaddr, gl1e_flags, gmtrr_mtype); /* 2. Get the memory type of host physical address, with MTRR */ shadow_mtrr_type = get_mtrr_type(&mtrr_state, spaddr, 0); @@ -367,13 +364,13 @@ uint32_t get_pat_flags(struct vcpu *v, struct domain *d = v->domain; p2m_type_t p2mt; get_gfn_query_unlocked(d, paddr_to_pfn(gpaddr), &p2mt); - if (p2m_is_ram(p2mt)) + if ( p2m_is_ram(p2mt) ) gdprintk(XENLOG_WARNING, - "Conflict occurs for a given guest l1e flags:%x " - "at %"PRIx64" (the effective mm type:%d), " - "because the host mtrr type is:%d\n", - gl1e_flags, (uint64_t)gpaddr, guest_eff_mm_type, - shadow_mtrr_type); + "Conflict occurs for a given guest l1e flags:%x " + "at %" PRIx64 " (the effective mm type:%d), " + "because the host mtrr type is:%d\n", + gl1e_flags, (uint64_t)gpaddr, guest_eff_mm_type, + shadow_mtrr_type); pat_entry_value = PAT_TYPE_UNCACHABLE; } /* 4. Get the pte flags */ @@ -382,7 +379,7 @@ uint32_t get_pat_flags(struct vcpu *v, static inline bool_t valid_mtrr_type(uint8_t type) { - switch ( type ) + switch (type) { case MTRR_TYPE_UNCACHABLE: case MTRR_TYPE_WRBACK: @@ -403,15 +400,15 @@ bool_t mtrr_def_type_msr_set(struct domain *d, struct mtrr_state *m, if ( unlikely(!valid_mtrr_type(def_type)) ) { - HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid MTRR def type:%x\n", def_type); - return 0; + HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid MTRR def type:%x\n", def_type); + return 0; } if ( unlikely(msr_content && (msr_content & ~0xcffUL)) ) { - HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%"PRIx64"\n", - msr_content); - return 0; + HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%" PRIx64 "\n", + msr_content); + return 0; } if ( m->enabled != enabled || m->fixed_enabled != fixed_enabled || @@ -433,7 +430,7 @@ bool_t mtrr_fix_range_msr_set(struct domain *d, struct mtrr_state *m, if ( fixed_range_base[row] != msr_content ) { - uint8_t *range = (uint8_t*)&msr_content; + uint8_t *range = (uint8_t *)&msr_content; unsigned int i; for ( i = 0; i < 8; i++ ) @@ -449,12 +446,12 @@ bool_t mtrr_fix_range_msr_set(struct domain *d, struct mtrr_state *m, return 1; } -bool_t mtrr_var_range_msr_set( - struct domain *d, struct mtrr_state *m, uint32_t msr, uint64_t msr_content) +bool_t mtrr_var_range_msr_set(struct domain *d, struct mtrr_state *m, + uint32_t msr, uint64_t msr_content) { uint32_t index, phys_addr; uint64_t msr_mask; - uint64_t *var_range_base = (uint64_t*)m->var_ranges; + uint64_t *var_range_base = (uint64_t *)m->var_ranges; index = msr - MSR_IA32_MTRR_PHYSBASE(0); if ( (index / 2) >= MASK_EXTR(m->mtrr_cap, MTRRcap_VCNT) ) @@ -477,7 +474,7 @@ bool_t mtrr_var_range_msr_set( msr_mask |= (index & 1) ? 0x7ffUL : 0xf00UL; if ( unlikely(msr_content & msr_mask) ) { - HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%"PRIx64"\n", + HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%" PRIx64 "\n", msr_content); return 0; } @@ -512,9 +509,8 @@ bool mtrr_pat_not_equal(const struct vcpu *vd, const struct vcpu *vs) if ( md->fixed_enabled != ms->fixed_enabled ) return true; - if ( md->fixed_enabled && - memcmp(md->fixed_ranges, ms->fixed_ranges, - sizeof(md->fixed_ranges)) ) + if ( md->fixed_enabled && memcmp(md->fixed_ranges, ms->fixed_ranges, + sizeof(md->fixed_ranges)) ) return true; /* Test variable ranges. */ @@ -528,7 +524,8 @@ bool mtrr_pat_not_equal(const struct vcpu *vd, const struct vcpu *vs) return vd->arch.hvm.pat_cr != vs->arch.hvm.pat_cr; } -struct hvm_mem_pinned_cacheattr_range { +struct hvm_mem_pinned_cacheattr_range +{ struct list_head list; uint64_t start, end; uint32_t type; @@ -549,9 +546,8 @@ void hvm_destroy_cacheattr_region_list(struct domain *d) while ( !list_empty(head) ) { - range = list_entry(head->next, - struct hvm_mem_pinned_cacheattr_range, - list); + range = + list_entry(head->next, struct hvm_mem_pinned_cacheattr_range, list); list_del(&range->list); xfree(range); } @@ -567,9 +563,7 @@ int hvm_get_mem_pinned_cacheattr(struct domain *d, gfn_t gfn, ASSERT(is_hvm_domain(d)); rcu_read_lock(&pinned_cacheattr_rcu_lock); - list_for_each_entry_rcu ( range, - &d->arch.hvm.pinned_cacheattr_ranges, - list ) + list_for_each_entry_rcu(range, &d->arch.hvm.pinned_cacheattr_ranges, list) { if ( ((gfn_x(gfn) & mask) >= range->start) && ((gfn_x(gfn) | ~mask) <= range->end) ) @@ -606,38 +600,37 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start, if ( gfn_end < gfn_start || (gfn_start | gfn_end) >> paddr_bits ) return -EINVAL; - switch ( type ) + switch (type) { case XEN_DOMCTL_DELETE_MEM_CACHEATTR: /* Remove the requested range. */ rcu_read_lock(&pinned_cacheattr_rcu_lock); - list_for_each_entry_rcu ( range, - &d->arch.hvm.pinned_cacheattr_ranges, - list ) - if ( range->start == gfn_start && range->end == gfn_end ) + list_for_each_entry_rcu(range, &d->arch.hvm.pinned_cacheattr_ranges, + list) if ( range->start == gfn_start && + range->end == gfn_end ) + { + rcu_read_unlock(&pinned_cacheattr_rcu_lock); + list_del_rcu(&range->list); + type = range->type; + call_rcu(&range->rcu, free_pinned_cacheattr_entry); + p2m_memory_type_changed(d); + switch (type) { - rcu_read_unlock(&pinned_cacheattr_rcu_lock); - list_del_rcu(&range->list); - type = range->type; - call_rcu(&range->rcu, free_pinned_cacheattr_entry); - p2m_memory_type_changed(d); - switch ( type ) - { - case PAT_TYPE_UC_MINUS: - /* - * For EPT we can also avoid the flush in this case; - * see epte_get_entry_emt(). - */ - if ( hap_enabled(d) && cpu_has_vmx ) + case PAT_TYPE_UC_MINUS: + /* + * For EPT we can also avoid the flush in this case; + * see epte_get_entry_emt(). + */ + if ( hap_enabled(d) && cpu_has_vmx ) case PAT_TYPE_UNCACHABLE: - break; - /* fall through */ - default: - flush_all(FLUSH_CACHE); break; - } - return 0; + /* fall through */ + default: + flush_all(FLUSH_CACHE); + break; } + return 0; + } rcu_read_unlock(&pinned_cacheattr_rcu_lock); return -ENOENT; @@ -654,9 +647,7 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start, } rcu_read_lock(&pinned_cacheattr_rcu_lock); - list_for_each_entry_rcu ( range, - &d->arch.hvm.pinned_cacheattr_ranges, - list ) + list_for_each_entry_rcu(range, &d->arch.hvm.pinned_cacheattr_ranges, list) { if ( range->start == gfn_start && range->end == gfn_end ) { @@ -694,11 +685,11 @@ static int hvm_save_mtrr_msr(struct vcpu *v, hvm_domain_context_t *h) { const struct mtrr_state *mtrr_state = &v->arch.hvm.mtrr; struct hvm_hw_mtrr hw_mtrr = { - .msr_mtrr_def_type = mtrr_state->def_type | - MASK_INSR(mtrr_state->fixed_enabled, - MTRRdefType_FE) | - MASK_INSR(mtrr_state->enabled, MTRRdefType_E), - .msr_mtrr_cap = mtrr_state->mtrr_cap, + .msr_mtrr_def_type = + mtrr_state->def_type | + MASK_INSR(mtrr_state->fixed_enabled, MTRRdefType_FE) | + MASK_INSR(mtrr_state->enabled, MTRRdefType_E), + .msr_mtrr_cap = mtrr_state->mtrr_cap, }; unsigned int i; @@ -706,8 +697,8 @@ static int hvm_save_mtrr_msr(struct vcpu *v, hvm_domain_context_t *h) (ARRAY_SIZE(hw_mtrr.msr_mtrr_var) / 2) ) { dprintk(XENLOG_G_ERR, - "HVM save: %pv: too many (%lu) variable range MTRRs\n", - v, MASK_EXTR(hw_mtrr.msr_mtrr_cap, MTRRcap_VCNT)); + "HVM save: %pv: too many (%lu) variable range MTRRs\n", v, + MASK_EXTR(hw_mtrr.msr_mtrr_cap, MTRRcap_VCNT)); return -EINVAL; } @@ -749,8 +740,8 @@ static int hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h) if ( MASK_EXTR(hw_mtrr.msr_mtrr_cap, MTRRcap_VCNT) > MTRR_VCNT ) { dprintk(XENLOG_G_ERR, - "HVM restore: %pv: too many (%lu) variable range MTRRs\n", - v, MASK_EXTR(hw_mtrr.msr_mtrr_cap, MTRRcap_VCNT)); + "HVM restore: %pv: too many (%lu) variable range MTRRs\n", v, + MASK_EXTR(hw_mtrr.msr_mtrr_cap, MTRRcap_VCNT)); return -EINVAL; } @@ -765,11 +756,9 @@ static int hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h) for ( i = 0; i < MASK_EXTR(hw_mtrr.msr_mtrr_cap, MTRRcap_VCNT); i++ ) { - mtrr_var_range_msr_set(d, mtrr_state, - MSR_IA32_MTRR_PHYSBASE(i), + mtrr_var_range_msr_set(d, mtrr_state, MSR_IA32_MTRR_PHYSBASE(i), hw_mtrr.msr_mtrr_var[i * 2]); - mtrr_var_range_msr_set(d, mtrr_state, - MSR_IA32_MTRR_PHYSMASK(i), + mtrr_var_range_msr_set(d, mtrr_state, MSR_IA32_MTRR_PHYSMASK(i), hw_mtrr.msr_mtrr_var[i * 2 + 1]); } @@ -805,8 +794,9 @@ int epte_get_entry_emt(struct domain *d, unsigned long gfn, mfn_t mfn, if ( rangeset_overlaps_range(mmio_ro_ranges, mfn_x(mfn), mfn_x(mfn) | ((1UL << order) - 1)) ) { - if ( !order || rangeset_contains_range(mmio_ro_ranges, mfn_x(mfn), - mfn_x(mfn) | ((1UL << order) - 1)) ) + if ( !order || + rangeset_contains_range(mmio_ro_ranges, mfn_x(mfn), + mfn_x(mfn) | ((1UL << order) - 1)) ) { *ipat = 1; return MTRR_TYPE_UNCACHABLE; @@ -847,10 +837,10 @@ int epte_get_entry_emt(struct domain *d, unsigned long gfn, mfn_t mfn, if ( gmtrr_mtype == -EADDRNOTAVAIL ) return -1; - gmtrr_mtype = is_hvm_domain(d) && v ? - get_mtrr_type(&v->arch.hvm.mtrr, - gfn << PAGE_SHIFT, order) : - MTRR_TYPE_WRBACK; + gmtrr_mtype = + is_hvm_domain(d) && v + ? get_mtrr_type(&v->arch.hvm.mtrr, gfn << PAGE_SHIFT, order) + : MTRR_TYPE_WRBACK; hmtrr_mtype = get_mtrr_type(&mtrr_state, mfn_x(mfn) << PAGE_SHIFT, order); if ( gmtrr_mtype < 0 || hmtrr_mtype < 0 ) return -1; diff --git a/xen/arch/x86/hvm/nestedhvm.c b/xen/arch/x86/hvm/nestedhvm.c index bd1101987d..2adc7d6f03 100644 --- a/xen/arch/x86/hvm/nestedhvm.c +++ b/xen/arch/x86/hvm/nestedhvm.c @@ -19,7 +19,7 @@ #include #include #include -#include /* for struct p2m_domain */ +#include /* for struct p2m_domain */ #include #include /* for local_event_delivery_(en|dis)able */ #include /* for paging_mode_hap() */ @@ -27,14 +27,12 @@ static unsigned long *shadow_io_bitmap[3]; /* Nested VCPU */ -bool_t -nestedhvm_vcpu_in_guestmode(struct vcpu *v) +bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v) { return vcpu_nestedhvm(v).nv_guestmode; } -void -nestedhvm_vcpu_reset(struct vcpu *v) +void nestedhvm_vcpu_reset(struct vcpu *v) { struct nestedvcpu *nv = &vcpu_nestedhvm(v); @@ -61,8 +59,7 @@ nestedhvm_vcpu_reset(struct vcpu *v) nestedhvm_vcpu_exit_guestmode(v); } -int -nestedhvm_vcpu_initialise(struct vcpu *v) +int nestedhvm_vcpu_initialise(struct vcpu *v) { int rc = -EOPNOTSUPP; @@ -71,27 +68,26 @@ nestedhvm_vcpu_initialise(struct vcpu *v) if ( !hvm_funcs.nhvm_vcpu_initialise || ((rc = hvm_funcs.nhvm_vcpu_initialise(v)) != 0) ) - return rc; + return rc; nestedhvm_vcpu_reset(v); return 0; } -void -nestedhvm_vcpu_destroy(struct vcpu *v) +void nestedhvm_vcpu_destroy(struct vcpu *v) { if ( hvm_funcs.nhvm_vcpu_destroy ) hvm_funcs.nhvm_vcpu_destroy(v); } -static void -nestedhvm_flushtlb_ipi(void *info) +static void nestedhvm_flushtlb_ipi(void *info) { struct vcpu *v = current; struct domain *d = info; ASSERT(d != NULL); - if (v->domain != d) { + if ( v->domain != d ) + { /* This cpu doesn't belong to the domain */ return; } @@ -105,11 +101,10 @@ nestedhvm_flushtlb_ipi(void *info) vcpu_nestedhvm(v).stale_np2m = true; } -void -nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m) +void nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m) { - on_selected_cpus(p2m->dirty_cpumask, nestedhvm_flushtlb_ipi, - p2m->domain, 1); + on_selected_cpus(p2m->dirty_cpumask, nestedhvm_flushtlb_ipi, p2m->domain, + 1); cpumask_clear(p2m->dirty_cpumask); } @@ -127,8 +122,7 @@ nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m) * iomap[2] set set */ -static int __init -nestedhvm_setup(void) +static int __init nestedhvm_setup(void) { /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */ unsigned nr = cpu_has_vmx ? 2 : 3; @@ -169,20 +163,22 @@ nestedhvm_setup(void) } __initcall(nestedhvm_setup); -unsigned long * -nestedhvm_vcpu_iomap_get(bool_t port_80, bool_t port_ed) +unsigned long *nestedhvm_vcpu_iomap_get(bool_t port_80, bool_t port_ed) { int i; - if (!hvm_port80_allowed) + if ( !hvm_port80_allowed ) port_80 = 1; - if (port_80 == 0) { - if (port_ed == 0) + if ( port_80 == 0 ) + { + if ( port_ed == 0 ) return hvm_io_bitmap; i = 0; - } else { - if (port_ed == 0) + } + else + { + if ( port_ed == 0 ) i = 1; else i = 2; diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c index 402bc8e6a2..ea372005a3 100644 --- a/xen/arch/x86/hvm/pmtimer.c +++ b/xen/arch/x86/hvm/pmtimer.c @@ -1,5 +1,5 @@ /* - * hvm/pmtimer.c: emulation of the ACPI PM timer + * hvm/pmtimer.c: emulation of the ACPI PM timer * * Copyright (c) 2007, XenSource inc. * Copyright (c) 2006, Intel Corporation. @@ -25,33 +25,33 @@ /* Slightly more readable port I/O addresses for the registers we intercept */ #define PM1a_STS_ADDR_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0) -#define PM1a_EN_ADDR_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 2) -#define TMR_VAL_ADDR_V0 (ACPI_PM_TMR_BLK_ADDRESS_V0) +#define PM1a_EN_ADDR_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 2) +#define TMR_VAL_ADDR_V0 (ACPI_PM_TMR_BLK_ADDRESS_V0) #define PM1a_STS_ADDR_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1) -#define PM1a_EN_ADDR_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 2) -#define TMR_VAL_ADDR_V1 (ACPI_PM_TMR_BLK_ADDRESS_V1) +#define PM1a_EN_ADDR_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 2) +#define TMR_VAL_ADDR_V1 (ACPI_PM_TMR_BLK_ADDRESS_V1) /* The interesting bits of the PM1a_STS register */ -#define TMR_STS (1 << 0) -#define GBL_STS (1 << 5) +#define TMR_STS (1 << 0) +#define GBL_STS (1 << 5) #define PWRBTN_STS (1 << 8) #define SLPBTN_STS (1 << 9) /* The same in PM1a_EN */ -#define TMR_EN (1 << 0) -#define GBL_EN (1 << 5) -#define PWRBTN_EN (1 << 8) -#define SLPBTN_EN (1 << 9) +#define TMR_EN (1 << 0) +#define GBL_EN (1 << 5) +#define PWRBTN_EN (1 << 8) +#define SLPBTN_EN (1 << 9) /* Mask of bits in PM1a_STS that can generate an SCI. */ -#define SCI_MASK (TMR_STS|PWRBTN_STS|SLPBTN_STS|GBL_STS) +#define SCI_MASK (TMR_STS | PWRBTN_STS | SLPBTN_STS | GBL_STS) /* SCI IRQ number (must match SCI_INT number in ACPI FADT in hvmloader) */ #define SCI_IRQ 9 /* We provide a 32-bit counter (must match the TMR_VAL_EXT bit in the FADT) */ -#define TMR_VAL_MASK (0xffffffff) -#define TMR_VAL_MSB (0x80000000) +#define TMR_VAL_MASK (0xffffffff) +#define TMR_VAL_MSB (0x80000000) /* Dispatch SCIs based on the PM1a_STS and PM1a_EN registers */ static void pmt_update_sci(PMTState *s) @@ -99,7 +99,7 @@ static void pmt_update_time(PMTState *s) uint64_t curr_gtime, tmp; struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm.acpi; uint32_t tmr_val = acpi->tmr_val, msb = tmr_val & TMR_VAL_MSB; - + ASSERT(spin_is_locked(&s->lock)); /* Update the timer */ @@ -136,7 +136,8 @@ static void pmt_timer_callback(void *opaque) pmt_update_time(s); /* How close are we to the next MSB flip? */ - pmt_cycles_until_flip = TMR_VAL_MSB - + pmt_cycles_until_flip = + TMR_VAL_MSB - (s->vcpu->domain->arch.hvm.acpi.tmr_val & (TMR_VAL_MSB - 1)); /* Overall time between MSB flips */ @@ -152,8 +153,8 @@ static void pmt_timer_callback(void *opaque) } /* Handle port I/O to the PM1a_STS and PM1a_EN registers */ -static int handle_evt_io( - int dir, unsigned int port, unsigned int bytes, uint32_t *val) +static int handle_evt_io(int dir, unsigned int port, unsigned int bytes, + uint32_t *val) { struct vcpu *v = current; struct hvm_hw_acpi *acpi = &v->domain->arch.hvm.acpi; @@ -162,21 +163,19 @@ static int handle_evt_io( int i; addr = port - - ((v->domain->arch.hvm.params[ - HVM_PARAM_ACPI_IOPORTS_LOCATION] == 0) ? - PM1a_STS_ADDR_V0 : PM1a_STS_ADDR_V1); + ((v->domain->arch.hvm.params[HVM_PARAM_ACPI_IOPORTS_LOCATION] == 0) + ? PM1a_STS_ADDR_V0 + : PM1a_STS_ADDR_V1); spin_lock(&s->lock); if ( dir == IOREQ_WRITE ) { /* Handle this I/O one byte at a time */ - for ( i = bytes, data = *val; - i > 0; - i--, addr++, data >>= 8 ) + for ( i = bytes, data = *val; i > 0; i--, addr++, data >>= 8 ) { byte = data & 0xff; - switch ( addr ) + switch (addr) { /* PM1a_STS register bits are write-to-clear */ case 0 /* PM1a_STS_ADDR */: @@ -192,8 +191,8 @@ static int handle_evt_io( acpi->pm1a_en = (acpi->pm1a_en & 0xff) | (byte << 8); break; default: - gdprintk(XENLOG_WARNING, - "Bad ACPI PM register write: %x bytes (%x) at %x\n", + gdprintk(XENLOG_WARNING, + "Bad ACPI PM register write: %x bytes (%x) at %x\n", bytes, *val, port); } } @@ -204,8 +203,10 @@ static int handle_evt_io( { data = acpi->pm1a_sts | ((uint32_t)acpi->pm1a_en << 16); data >>= 8 * addr; - if ( bytes == 1 ) data &= 0xff; - else if ( bytes == 2 ) data &= 0xffff; + if ( bytes == 1 ) + data &= 0xff; + else if ( bytes == 2 ) + data &= 0xffff; *val = data; } @@ -214,10 +215,9 @@ static int handle_evt_io( return X86EMUL_OKAY; } - /* Handle port I/O to the TMR_VAL register */ -static int handle_pmt_io( - int dir, unsigned int port, unsigned int bytes, uint32_t *val) +static int handle_pmt_io(int dir, unsigned int port, unsigned int bytes, + uint32_t *val) { struct vcpu *v = current; struct hvm_hw_acpi *acpi = &v->domain->arch.hvm.acpi; @@ -267,12 +267,14 @@ static int acpi_save(struct vcpu *v, hvm_domain_context_t *h) * goes forwards. */ x = (((s->vcpu->arch.hvm.guest_time ?: hvm_get_guest_time(s->vcpu)) - - s->last_gtime) * s->scale) >> 32; - if ( x < 1UL<<31 ) + s->last_gtime) * + s->scale) >> + 32; + if ( x < 1UL << 31 ) acpi->tmr_val += x; if ( (acpi->tmr_val & TMR_VAL_MSB) != msb ) acpi->pm1a_sts |= TMR_STS; - /* No point in setting the SCI here because we'll already have saved the + /* No point in setting the SCI here because we'll already have saved the * IRQ and *PIC state; we'll fix it up when we restore the domain */ rc = hvm_save_entry(PMTIMER, 0, h, acpi); @@ -302,16 +304,15 @@ static int acpi_load(struct domain *d, hvm_domain_context_t *h) s->last_gtime = hvm_get_guest_time(s->vcpu); s->not_accounted = 0; - /* Set the SCI state from the registers */ + /* Set the SCI state from the registers */ pmt_update_sci(s); spin_unlock(&s->lock); - + return 0; } -HVM_REGISTER_SAVE_RESTORE(PMTIMER, acpi_save, acpi_load, - 1, HVMSR_PER_DOM); +HVM_REGISTER_SAVE_RESTORE(PMTIMER, acpi_save, acpi_load, 1, HVMSR_PER_DOM); int pmtimer_change_ioport(struct domain *d, unsigned int version) { @@ -368,7 +369,6 @@ void pmtimer_init(struct vcpu *v) pmt_timer_callback(s); } - void pmtimer_deinit(struct domain *d) { PMTState *s = &d->arch.hvm.pl_time->vpmt; diff --git a/xen/arch/x86/hvm/quirks.c b/xen/arch/x86/hvm/quirks.c index a298ccdbb9..d04c07eaca 100644 --- a/xen/arch/x86/hvm/quirks.c +++ b/xen/arch/x86/hvm/quirks.c @@ -1,6 +1,6 @@ /****************************************************************************** * x86/hvm/quirks.c - * + * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. @@ -41,50 +41,28 @@ static int __init check_port80(void) * Quirk table for systems that misbehave (lock up, etc.) if port * 0x80 is used: */ - static struct dmi_system_id __initdata hvm_no_port80_dmi_table[] = - { - { - .callback = dmi_hvm_deny_port80, - .ident = "Compaq Presario V6000", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), - DMI_MATCH(DMI_BOARD_NAME, "30B7") - } - }, - { - .callback = dmi_hvm_deny_port80, - .ident = "HP Pavilion dv9000z", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), - DMI_MATCH(DMI_BOARD_NAME, "30B9") - } - }, - { - .callback = dmi_hvm_deny_port80, - .ident = "HP Pavilion dv6000", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), - DMI_MATCH(DMI_BOARD_NAME, "30B8") - } - }, - { - .callback = dmi_hvm_deny_port80, - .ident = "HP Pavilion tx1000", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), - DMI_MATCH(DMI_BOARD_NAME, "30BF") - } - }, - { - .callback = dmi_hvm_deny_port80, - .ident = "Presario F700", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), - DMI_MATCH(DMI_BOARD_NAME, "30D3") - } - }, - { } - }; + static struct dmi_system_id __initdata hvm_no_port80_dmi_table[] = { + {.callback = dmi_hvm_deny_port80, + .ident = "Compaq Presario V6000", + .matches = {DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), + DMI_MATCH(DMI_BOARD_NAME, "30B7")}}, + {.callback = dmi_hvm_deny_port80, + .ident = "HP Pavilion dv9000z", + .matches = {DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), + DMI_MATCH(DMI_BOARD_NAME, "30B9")}}, + {.callback = dmi_hvm_deny_port80, + .ident = "HP Pavilion dv6000", + .matches = {DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), + DMI_MATCH(DMI_BOARD_NAME, "30B8")}}, + {.callback = dmi_hvm_deny_port80, + .ident = "HP Pavilion tx1000", + .matches = {DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), + DMI_MATCH(DMI_BOARD_NAME, "30BF")}}, + {.callback = dmi_hvm_deny_port80, + .ident = "Presario F700", + .matches = {DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), + DMI_MATCH(DMI_BOARD_NAME, "30D3")}}, + {}}; dmi_check_system(hvm_no_port80_dmi_table); diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c index 42339682e8..31bfd693f4 100644 --- a/xen/arch/x86/hvm/rtc.c +++ b/xen/arch/x86/hvm/rtc.c @@ -1,8 +1,8 @@ /* * QEMU MC146818 RTC emulation - * + * * Copyright (c) 2003-2004 Fabrice Bellard - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the @@ -29,25 +29,26 @@ #include #include -#define USEC_PER_SEC 1000000UL -#define NS_PER_USEC 1000UL -#define NS_PER_SEC 1000000000ULL +#define USEC_PER_SEC 1000000UL +#define NS_PER_USEC 1000UL +#define NS_PER_SEC 1000000000ULL -#define SEC_PER_MIN 60 -#define SEC_PER_HOUR 3600 -#define MIN_PER_HOUR 60 -#define HOUR_PER_DAY 24 +#define SEC_PER_MIN 60 +#define SEC_PER_HOUR 3600 +#define MIN_PER_HOUR 60 +#define HOUR_PER_DAY 24 #define domain_vrtc(x) (&(x)->arch.hvm.pl_time->vrtc) -#define vcpu_vrtc(x) (domain_vrtc((x)->domain)) +#define vcpu_vrtc(x) (domain_vrtc((x)->domain)) #define vrtc_domain(x) (container_of(x, struct pl_time, vrtc)->domain) -#define vrtc_vcpu(x) (pt_global_vcpu_target(vrtc_domain(x))) -#define epoch_year 1900 -#define get_year(x) (x + epoch_year) +#define vrtc_vcpu(x) (pt_global_vcpu_target(vrtc_domain(x))) +#define epoch_year 1900 +#define get_year(x) (x + epoch_year) -enum rtc_mode { - rtc_mode_no_ack, - rtc_mode_strict +enum rtc_mode +{ + rtc_mode_no_ack, + rtc_mode_strict }; /* This must be in sync with how hvmloader sets the ACPI WAET flags. */ @@ -67,8 +68,7 @@ static void rtc_update_irq(RTCState *s) return; /* IRQ is raised if any source is both raised & enabled */ - if ( !(s->hw.cmos_data[RTC_REG_B] & - s->hw.cmos_data[RTC_REG_C] & + if ( !(s->hw.cmos_data[RTC_REG_B] & s->hw.cmos_data[RTC_REG_C] & (RTC_PF | RTC_AF | RTC_UF)) ) return; @@ -86,9 +86,8 @@ static void rtc_pf_callback(struct vcpu *v, void *opaque) spin_lock(&s->lock); - if ( !rtc_mode_is(s, no_ack) - && (s->hw.cmos_data[RTC_REG_C] & RTC_IRQF) - && ++(s->pt_dead_ticks) >= 10 ) + if ( !rtc_mode_is(s, no_ack) && (s->hw.cmos_data[RTC_REG_C] & RTC_IRQF) && + ++(s->pt_dead_ticks) >= 10 ) { /* VM is ignoring its RTC; no point in running the timer */ TRACE_0D(TRC_HVM_EMUL_RTC_STOP_TIMER); @@ -96,7 +95,7 @@ static void rtc_pf_callback(struct vcpu *v, void *opaque) s->period = 0; } - s->hw.cmos_data[RTC_REG_C] |= RTC_PF|RTC_IRQF; + s->hw.cmos_data[RTC_REG_C] |= RTC_PF | RTC_IRQF; spin_unlock(&s->lock); } @@ -112,8 +111,8 @@ static void check_for_pf_ticks(RTCState *s) return; now = NOW(); - if ( (now - s->start_time) / s->period - != (s->check_ticks_since - s->start_time) / s->period ) + if ( (now - s->start_time) / s->period != + (s->check_ticks_since - s->start_time) / s->period ) s->hw.cmos_data[RTC_REG_C] |= RTC_PF; s->check_ticks_since = now; @@ -131,7 +130,7 @@ static void rtc_timer_update(RTCState *s) s->pt_dead_ticks = 0; period_code = s->hw.cmos_data[RTC_REG_A] & RTC_RATE_SELECT; - switch ( s->hw.cmos_data[RTC_REG_A] & RTC_DIV_CTL ) + switch (s->hw.cmos_data[RTC_REG_A] & RTC_DIV_CTL) { case RTC_REF_CLCK_32KHZ: if ( (period_code != 0) && (period_code <= 2) ) @@ -155,8 +154,8 @@ static void rtc_timer_update(RTCState *s) if ( s->hw.cmos_data[RTC_REG_B] & RTC_PIE ) { TRACE_2D(TRC_HVM_EMUL_RTC_START_TIMER, delta, period); - create_periodic_time(v, &s->pt, delta, period, - RTC_IRQ, rtc_pf_callback, s, false); + create_periodic_time(v, &s->pt, delta, period, RTC_IRQ, + rtc_pf_callback, s, false); } else s->check_ticks_since = now; @@ -183,12 +182,12 @@ static void check_update_timer(RTCState *s) ASSERT(spin_is_locked(&s->lock)); - if (!(s->hw.cmos_data[RTC_REG_C] & RTC_UF) && - !(s->hw.cmos_data[RTC_REG_B] & RTC_SET)) + if ( !(s->hw.cmos_data[RTC_REG_C] & RTC_UF) && + !(s->hw.cmos_data[RTC_REG_B] & RTC_SET) ) { s->use_timer = 1; guest_usec = get_localtime_us(d) % USEC_PER_SEC; - if (guest_usec >= (USEC_PER_SEC - 244)) + if ( guest_usec >= (USEC_PER_SEC - 244) ) { /* RTC is in update cycle */ s->hw.cmos_data[RTC_REG_A] |= RTC_UIP; @@ -221,7 +220,7 @@ static void rtc_update_timer(void *opaque) RTCState *s = opaque; spin_lock(&s->lock); - if (!(s->hw.cmos_data[RTC_REG_B] & RTC_SET)) + if ( !(s->hw.cmos_data[RTC_REG_B] & RTC_SET) ) { s->hw.cmos_data[RTC_REG_A] |= RTC_UIP; set_timer(&s->update_timer2, s->next_update_time + 244000UL); @@ -234,7 +233,7 @@ static void rtc_update_timer2(void *opaque) RTCState *s = opaque; spin_lock(&s->lock); - if (!(s->hw.cmos_data[RTC_REG_B] & RTC_SET)) + if ( !(s->hw.cmos_data[RTC_REG_B] & RTC_SET) ) { s->hw.cmos_data[RTC_REG_C] |= RTC_UF; s->hw.cmos_data[RTC_REG_A] &= ~RTC_UIP; @@ -257,8 +256,8 @@ static void alarm_timer_update(RTCState *s) stop_timer(&s->alarm_timer); - if (!(s->hw.cmos_data[RTC_REG_C] & RTC_AF) && - !(s->hw.cmos_data[RTC_REG_B] & RTC_SET)) + if ( !(s->hw.cmos_data[RTC_REG_C] & RTC_AF) && + !(s->hw.cmos_data[RTC_REG_B] & RTC_SET) ) { s->current_tm = gmtime(get_localtime(d)); rtc_copy_date(s); @@ -274,33 +273,33 @@ static void alarm_timer_update(RTCState *s) next_update_time = USEC_PER_SEC - (get_localtime_us(d) % USEC_PER_SEC); next_update_time = next_update_time * NS_PER_USEC + NOW(); - if ((s->hw.cmos_data[RTC_HOURS_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_HOURS_ALARM] & 0xc0) == 0xc0 ) { - if ((s->hw.cmos_data[RTC_MINUTES_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_MINUTES_ALARM] & 0xc0) == 0xc0 ) { - if ((s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0 ) next_alarm_sec = 1; - else if (cur_sec < alarm_sec) + else if ( cur_sec < alarm_sec ) next_alarm_sec = alarm_sec - cur_sec; else next_alarm_sec = alarm_sec + SEC_PER_MIN - cur_sec; } else { - if (cur_min < alarm_min) + if ( cur_min < alarm_min ) { min = alarm_min - cur_min; next_alarm_sec = min * SEC_PER_MIN - cur_sec; - if ((s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0 ) next_alarm_sec += 0; else next_alarm_sec += alarm_sec; } - else if (cur_min == alarm_min) + else if ( cur_min == alarm_min ) { - if ((s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0 ) next_alarm_sec = 1; - else if (cur_sec < alarm_sec) + else if ( cur_sec < alarm_sec ) next_alarm_sec = alarm_sec - cur_sec; else { @@ -313,7 +312,7 @@ static void alarm_timer_update(RTCState *s) { min = alarm_min + MIN_PER_HOUR - cur_min; next_alarm_sec = min * SEC_PER_MIN - cur_sec; - if ((s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0 ) next_alarm_sec += 0; else next_alarm_sec += alarm_sec; @@ -322,14 +321,14 @@ static void alarm_timer_update(RTCState *s) } else { - if (cur_hour < alarm_hour) + if ( cur_hour < alarm_hour ) { hour = alarm_hour - cur_hour; - next_alarm_sec = hour * SEC_PER_HOUR - - cur_min * SEC_PER_MIN - cur_sec; - if ((s->hw.cmos_data[RTC_MINUTES_ALARM] & 0xc0) == 0xc0) + next_alarm_sec = + hour * SEC_PER_HOUR - cur_min * SEC_PER_MIN - cur_sec; + if ( (s->hw.cmos_data[RTC_MINUTES_ALARM] & 0xc0) == 0xc0 ) { - if ((s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0 ) next_alarm_sec += 0; else next_alarm_sec += alarm_sec; @@ -337,53 +336,53 @@ static void alarm_timer_update(RTCState *s) else { next_alarm_sec += alarm_min * SEC_PER_MIN; - if ((s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0 ) next_alarm_sec += 0; else next_alarm_sec += alarm_sec; } } - else if (cur_hour == alarm_hour) + else if ( cur_hour == alarm_hour ) { - if ((s->hw.cmos_data[RTC_MINUTES_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_MINUTES_ALARM] & 0xc0) == 0xc0 ) { - if ((s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0 ) next_alarm_sec = 1; - else if (cur_sec < alarm_sec) + else if ( cur_sec < alarm_sec ) next_alarm_sec = alarm_sec - cur_sec; else next_alarm_sec = alarm_sec + SEC_PER_MIN - cur_sec; } - else if (cur_min < alarm_min) + else if ( cur_min < alarm_min ) { min = alarm_min - cur_min; next_alarm_sec = min * SEC_PER_MIN - cur_sec; - if ((s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0 ) next_alarm_sec += 0; else next_alarm_sec += alarm_sec; } - else if (cur_min == alarm_min) + else if ( cur_min == alarm_min ) { - if ((s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0 ) next_alarm_sec = 1; - else if (cur_sec < alarm_sec) + else if ( cur_sec < alarm_sec ) next_alarm_sec = alarm_sec - cur_sec; else { hour = alarm_hour + HOUR_PER_DAY - cur_hour; next_alarm_sec = hour * SEC_PER_HOUR - - cur_min * SEC_PER_MIN - cur_sec; + cur_min * SEC_PER_MIN - cur_sec; next_alarm_sec += alarm_min * SEC_PER_MIN + alarm_sec; } } else { hour = alarm_hour + HOUR_PER_DAY - cur_hour; - next_alarm_sec = hour * SEC_PER_HOUR - - cur_min * SEC_PER_MIN - cur_sec; + next_alarm_sec = + hour * SEC_PER_HOUR - cur_min * SEC_PER_MIN - cur_sec; next_alarm_sec += alarm_min * SEC_PER_MIN; - if ((s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0 ) next_alarm_sec += 0; else next_alarm_sec += alarm_sec; @@ -392,11 +391,11 @@ static void alarm_timer_update(RTCState *s) else { hour = alarm_hour + HOUR_PER_DAY - cur_hour; - next_alarm_sec = hour * SEC_PER_HOUR - - cur_min * SEC_PER_MIN - cur_sec; - if ((s->hw.cmos_data[RTC_MINUTES_ALARM] & 0xc0) == 0xc0) + next_alarm_sec = + hour * SEC_PER_HOUR - cur_min * SEC_PER_MIN - cur_sec; + if ( (s->hw.cmos_data[RTC_MINUTES_ALARM] & 0xc0) == 0xc0 ) { - if ((s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0 ) next_alarm_sec += 0; else next_alarm_sec += alarm_sec; @@ -404,7 +403,7 @@ static void alarm_timer_update(RTCState *s) else { next_alarm_sec += alarm_min * SEC_PER_MIN; - if ((s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0) + if ( (s->hw.cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0 ) next_alarm_sec += 0; else next_alarm_sec += alarm_sec; @@ -425,7 +424,7 @@ static void rtc_alarm_cb(void *opaque) RTCState *s = opaque; spin_lock(&s->lock); - if (!(s->hw.cmos_data[RTC_REG_B] & RTC_SET)) + if ( !(s->hw.cmos_data[RTC_REG_B] & RTC_SET) ) { s->hw.cmos_data[RTC_REG_C] |= RTC_AF; rtc_update_irq(s); @@ -457,7 +456,7 @@ static int rtc_ioport_write(void *opaque, uint32_t addr, uint32_t data) } orig = s->hw.cmos_data[s->hw.cmos_index]; - switch ( s->hw.cmos_index ) + switch (s->hw.cmos_index) { case RTC_SECONDS_ALARM: case RTC_MINUTES_ALARM: @@ -497,7 +496,7 @@ static int rtc_ioport_write(void *opaque, uint32_t addr, uint32_t data) /* set mode: reset UIP mode */ s->hw.cmos_data[RTC_REG_A] &= ~RTC_UIP; /* adjust cmos before stopping */ - if (!(orig & RTC_SET)) + if ( !(orig & RTC_SET) ) { s->current_tm = gmtime(get_localtime(d)); rtc_copy_date(s); @@ -561,10 +560,10 @@ static inline int convert_hour(RTCState *s, int raw) { int hour = from_bcd(s, raw & 0x7f); - if (!(s->hw.cmos_data[RTC_REG_B] & RTC_24H)) + if ( !(s->hw.cmos_data[RTC_REG_B] & RTC_24H) ) { hour %= 12; - if (raw & 0x80) + if ( raw & 0x80 ) hour += 12; } return hour; @@ -575,12 +574,12 @@ static void rtc_set_time(RTCState *s) struct tm *tm = &s->current_tm; struct domain *d = vrtc_domain(s); unsigned long before, after; /* XXX s_time_t */ - + ASSERT(spin_is_locked(&s->lock)); before = mktime(get_year(tm->tm_year), tm->tm_mon + 1, tm->tm_mday, - tm->tm_hour, tm->tm_min, tm->tm_sec); - + tm->tm_hour, tm->tm_min, tm->tm_sec); + tm->tm_sec = from_bcd(s, s->hw.cmos_data[RTC_SECONDS]); tm->tm_min = from_bcd(s, s->hw.cmos_data[RTC_MINUTES]); tm->tm_hour = convert_hour(s, s->hw.cmos_data[RTC_HOURS]); @@ -592,7 +591,7 @@ static void rtc_set_time(RTCState *s) after = mktime(get_year(tm->tm_year), tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); - /* We use the guest's setting of the RTC to define the local-time + /* We use the guest's setting of the RTC to define the local-time * offset for this domain. */ d->time_offset_seconds += (after - before); update_domain_wallclock_time(d); @@ -632,12 +631,12 @@ static int update_in_progress(RTCState *s) uint64_t guest_usec; struct domain *d = vrtc_domain(s); - if (s->hw.cmos_data[RTC_REG_B] & RTC_SET) + if ( s->hw.cmos_data[RTC_REG_B] & RTC_SET ) return 0; guest_usec = get_localtime_us(d); /* UIP bit will be set at last 244us of every second. */ - if ((guest_usec % USEC_PER_SEC) >= (USEC_PER_SEC - 244)) + if ( (guest_usec % USEC_PER_SEC) >= (USEC_PER_SEC - 244) ) return 1; return 0; @@ -653,7 +652,7 @@ static uint32_t rtc_ioport_read(RTCState *s, uint32_t addr) spin_lock(&s->lock); - switch ( s->hw.cmos_index ) + switch (s->hw.cmos_index) { case RTC_SECONDS: case RTC_MINUTES: @@ -663,7 +662,7 @@ static uint32_t rtc_ioport_read(RTCState *s, uint32_t addr) case RTC_MONTH: case RTC_YEAR: /* if not in set mode, adjust cmos before reading*/ - if (!(s->hw.cmos_data[RTC_REG_B] & RTC_SET)) + if ( !(s->hw.cmos_data[RTC_REG_B] & RTC_SET) ) { s->current_tm = gmtime(get_localtime(d)); rtc_copy_date(s); @@ -672,7 +671,7 @@ static uint32_t rtc_ioport_read(RTCState *s, uint32_t addr) break; case RTC_REG_A: ret = s->hw.cmos_data[s->hw.cmos_index]; - if ((s->use_timer == 0) && update_in_progress(s)) + if ( (s->use_timer == 0) && update_in_progress(s) ) ret |= RTC_UIP; break; case RTC_REG_C: @@ -695,8 +694,8 @@ static uint32_t rtc_ioport_read(RTCState *s, uint32_t addr) return ret; } -static int handle_rtc_io( - int dir, unsigned int port, unsigned int bytes, uint32_t *val) +static int handle_rtc_io(int dir, unsigned int port, unsigned int bytes, + uint32_t *val) { struct RTCState *vrtc = vcpu_vrtc(current); @@ -706,7 +705,7 @@ static int handle_rtc_io( *val = ~0; return X86EMUL_OKAY; } - + if ( dir == IOREQ_WRITE ) { if ( rtc_ioport_write(vrtc, port, (uint8_t)*val) ) @@ -769,7 +768,7 @@ static int rtc_load(struct domain *d, hvm_domain_context_t *h) return -EINVAL; } - /* Reset the wall-clock time. In normal running, this runs with host + /* Reset the wall-clock time. In normal running, this runs with host * time, so let's keep doing that. */ s->current_tm = gmtime(get_localtime(d)); rtc_copy_date(s); diff --git a/xen/arch/x86/hvm/save.c b/xen/arch/x86/hvm/save.c index 0fc59d3487..e9377f05dc 100644 --- a/xen/arch/x86/hvm/save.c +++ b/xen/arch/x86/hvm/save.c @@ -48,7 +48,7 @@ int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr) if ( hdr->magic != HVM_FILE_MAGIC ) { - printk(XENLOG_G_ERR "HVM%d restore: bad magic number %#"PRIx32"\n", + printk(XENLOG_G_ERR "HVM%d restore: bad magic number %#" PRIx32 "\n", d->domain_id, hdr->magic); return -1; } @@ -64,7 +64,8 @@ int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr) /* CPUs ought to match but with feature-masking they might not */ if ( (hdr->cpuid & ~0x0fUL) != (eax & ~0x0fUL) ) printk(XENLOG_G_INFO "HVM%d restore: VM saved on one CPU " - "(%#"PRIx32") and restored on another (%#"PRIx32").\n", + "(%#" PRIx32 ") and restored on another (%#" PRIx32 + ").\n", d->domain_id, hdr->cpuid, eax); /* Restore guest's preferred TSC frequency. */ @@ -83,7 +84,8 @@ int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr) } /* List of handlers for various HVM save and restore types */ -static struct { +static struct +{ hvm_save_handler save; hvm_load_handler load; const char *name; @@ -92,11 +94,10 @@ static struct { } hvm_sr_handlers[HVM_SAVE_CODE_MAX + 1]; /* Init-time function to add entries to that list */ -void __init hvm_register_savevm(uint16_t typecode, - const char *name, +void __init hvm_register_savevm(uint16_t typecode, const char *name, hvm_save_handler save_state, - hvm_load_handler load_state, - size_t size, int kind) + hvm_load_handler load_state, size_t size, + int kind) { ASSERT(typecode <= HVM_SAVE_CODE_MAX); ASSERT(hvm_sr_handlers[typecode].save == NULL); @@ -115,12 +116,12 @@ size_t hvm_save_size(struct domain *d) int i; /* Basic overhead for header and footer */ - sz = (2 * sizeof (struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER); + sz = (2 * sizeof(struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER); /* Plus space for each thing we will be saving */ for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU ) - for_each_vcpu(d, v) + for_each_vcpu (d, v) sz += hvm_sr_handlers[i].size; else sz += hvm_sr_handlers[i].size; @@ -136,12 +137,11 @@ int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int instance, XEN_GUEST_HANDLE_64(uint8) handle, uint64_t *bufsz) { int rv; - hvm_domain_context_t ctxt = { }; + hvm_domain_context_t ctxt = {}; const struct hvm_save_descriptor *desc; struct vcpu *v; - if ( d->is_dying || - typecode > HVM_SAVE_CODE_MAX || + if ( d->is_dying || typecode > HVM_SAVE_CODE_MAX || hvm_sr_handlers[typecode].size < sizeof(*desc) || !hvm_sr_handlers[typecode].save ) return -EINVAL; @@ -163,10 +163,12 @@ int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int instance, domain_pause(d); if ( (rv = hvm_sr_handlers[typecode].save(v, &ctxt)) != 0 ) - printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n", + printk(XENLOG_G_ERR "HVM%d save: failed to save type %" PRIu16 + " (%d)\n", d->domain_id, typecode, rv); - else if ( (rv = hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU ? - -ENODATA : -ENOENT), ctxt.cur >= sizeof(*desc) ) + else if ( (rv = hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU ? -ENODATA + : -ENOENT), + ctxt.cur >= sizeof(*desc) ) { uint32_t off; @@ -175,8 +177,7 @@ int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int instance, desc = (void *)(ctxt.data + off); /* Move past header */ off += sizeof(*desc); - if ( ctxt.cur < desc->length || - off > ctxt.cur - desc->length ) + if ( ctxt.cur < desc->length || off > ctxt.cur - desc->length ) break; if ( instance == desc->instance ) { @@ -244,14 +245,14 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h) { struct vcpu *v; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { - printk(XENLOG_G_INFO "HVM %pv save: %s\n", - v, hvm_sr_handlers[i].name); + printk(XENLOG_G_INFO "HVM %pv save: %s\n", v, + hvm_sr_handlers[i].name); if ( handler(v, h) != 0 ) { printk(XENLOG_G_ERR - "HVM %pv save: failed to save type %"PRIu16"\n", + "HVM %pv save: failed to save type %" PRIu16 "\n", v, i); return -ENODATA; } @@ -259,12 +260,12 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h) } else { - printk(XENLOG_G_INFO "HVM d%d save: %s\n", - d->domain_id, hvm_sr_handlers[i].name); + printk(XENLOG_G_INFO "HVM d%d save: %s\n", d->domain_id, + hvm_sr_handlers[i].name); if ( handler(d->vcpu[0], h) != 0 ) { - printk(XENLOG_G_ERR - "HVM d%d save: failed to save type %"PRIu16"\n", + printk(XENLOG_G_ERR "HVM d%d save: failed to save type %" PRIu16 + "\n", d->domain_id, i); return -ENODATA; } @@ -303,11 +304,11 @@ int hvm_load(struct domain *d, hvm_domain_context_t *h) return -1; /* Down all the vcpus: we only re-enable the ones that had state saved. */ - for_each_vcpu(d, v) + for_each_vcpu (d, v) if ( !test_and_set_bit(_VPF_down, &v->pause_flags) ) vcpu_sleep_nosync(v); - for ( ; ; ) + for ( ;; ) { if ( h->size - h->cur < sizeof(struct hvm_save_descriptor) ) { @@ -333,7 +334,7 @@ int hvm_load(struct domain *d, hvm_domain_context_t *h) } /* Load the entry */ - printk(XENLOG_G_INFO "HVM%d restore: %s %"PRIu16"\n", d->domain_id, + printk(XENLOG_G_INFO "HVM%d restore: %s %" PRIu16 "\n", d->domain_id, hvm_sr_handlers[desc->typecode].name, desc->instance); if ( handler(d, h) != 0 ) { @@ -349,13 +350,14 @@ int hvm_load(struct domain *d, hvm_domain_context_t *h) int _hvm_init_entry(struct hvm_domain_context *h, uint16_t tc, uint16_t inst, uint32_t len) { - struct hvm_save_descriptor *d - = (struct hvm_save_descriptor *)&h->data[h->cur]; + struct hvm_save_descriptor *d = + (struct hvm_save_descriptor *)&h->data[h->cur]; - if ( h->size - h->cur < len + sizeof (*d) ) + if ( h->size - h->cur < len + sizeof(*d) ) { printk(XENLOG_G_WARNING "HVM save: no room for" - " %"PRIu32" + %zu bytes for typecode %"PRIu16"\n", + " %" PRIu32 " + %zu bytes for typecode %" PRIu16 + "\n", len, sizeof(*d), tc); return -1; } @@ -368,8 +370,7 @@ int _hvm_init_entry(struct hvm_domain_context *h, uint16_t tc, uint16_t inst, return 0; } -void _hvm_write_entry(struct hvm_domain_context *h, void *src, - uint32_t src_len) +void _hvm_write_entry(struct hvm_domain_context *h, void *src, uint32_t src_len) { memcpy(&h->data[h->cur], src, src_len); h->cur += src_len; @@ -378,14 +379,15 @@ void _hvm_write_entry(struct hvm_domain_context *h, void *src, int _hvm_check_entry(struct hvm_domain_context *h, uint16_t type, uint32_t len, bool strict_length) { - struct hvm_save_descriptor *d - = (struct hvm_save_descriptor *)&h->data[h->cur]; + struct hvm_save_descriptor *d = + (struct hvm_save_descriptor *)&h->data[h->cur]; - if ( sizeof(*d) > h->size - h->cur) + if ( sizeof(*d) > h->size - h->cur ) { printk(XENLOG_G_WARNING "HVM restore: not enough data left to read %zu bytes " - "for type %u header\n", sizeof(*d), type); + "for type %u header\n", + sizeof(*d), type); return -1; } @@ -409,8 +411,8 @@ int _hvm_check_entry(struct hvm_domain_context *h, uint16_t type, uint32_t len, void _hvm_read_entry(struct hvm_domain_context *h, void *dest, uint32_t dest_len) { - struct hvm_save_descriptor *d - = (struct hvm_save_descriptor *)&h->data[h->cur - sizeof(*d)]; + struct hvm_save_descriptor *d = + (struct hvm_save_descriptor *)&h->data[h->cur - sizeof(*d)]; BUG_ON(d->length > dest_len); diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c index bd398dbb1b..220edda94d 100644 --- a/xen/arch/x86/hvm/stdvga.c +++ b/xen/arch/x86/hvm/stdvga.c @@ -5,7 +5,7 @@ * (c) 2007. This file and the modifications can be redistributed and/or * modified under the terms and conditions of the GNU General Public * License, version 2.1 and not any later version of the GPL, as published - * by the Free Software Foundation. + * by the Free Software Foundation. * * This improves the performance of Standard VGA, * the mode used during Windows boot and by the Linux @@ -40,34 +40,16 @@ #define PAT(x) (x) static const uint32_t mask16[16] = { - PAT(0x00000000), - PAT(0x000000ff), - PAT(0x0000ff00), - PAT(0x0000ffff), - PAT(0x00ff0000), - PAT(0x00ff00ff), - PAT(0x00ffff00), - PAT(0x00ffffff), - PAT(0xff000000), - PAT(0xff0000ff), - PAT(0xff00ff00), - PAT(0xff00ffff), - PAT(0xffff0000), - PAT(0xffff00ff), - PAT(0xffffff00), - PAT(0xffffffff), + PAT(0x00000000), PAT(0x000000ff), PAT(0x0000ff00), PAT(0x0000ffff), + PAT(0x00ff0000), PAT(0x00ff00ff), PAT(0x00ffff00), PAT(0x00ffffff), + PAT(0xff000000), PAT(0xff0000ff), PAT(0xff00ff00), PAT(0xff00ffff), + PAT(0xffff0000), PAT(0xffff00ff), PAT(0xffffff00), PAT(0xffffffff), }; /* force some bits to zero */ static const uint8_t sr_mask[8] = { - (uint8_t)~0xfc, - (uint8_t)~0xc2, - (uint8_t)~0xf0, - (uint8_t)~0xc0, - (uint8_t)~0xf1, - (uint8_t)~0xff, - (uint8_t)~0xff, - (uint8_t)~0x00, + (uint8_t)~0xfc, (uint8_t)~0xc2, (uint8_t)~0xf0, (uint8_t)~0xc0, + (uint8_t)~0xf1, (uint8_t)~0xff, (uint8_t)~0xff, (uint8_t)~0x00, }; static const uint8_t gr_mask[9] = { @@ -137,23 +119,23 @@ static int stdvga_outb(uint64_t addr, uint8_t val) struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga; int rc = 1, prev_stdvga = s->stdvga; - switch ( addr ) + switch (addr) { - case 0x3c4: /* sequencer address register */ + case 0x3c4: /* sequencer address register */ s->sr_index = val; break; - case 0x3c5: /* sequencer data register */ + case 0x3c5: /* sequencer data register */ rc = (s->sr_index < sizeof(s->sr)); if ( rc ) - s->sr[s->sr_index] = val & sr_mask[s->sr_index] ; + s->sr[s->sr_index] = val & sr_mask[s->sr_index]; break; - case 0x3ce: /* graphics address register */ + case 0x3ce: /* graphics address register */ s->gr_index = val; break; - case 0x3cf: /* graphics data register */ + case 0x3cf: /* graphics data register */ rc = (s->gr_index < sizeof(s->gr)); if ( rc ) s->gr[s->gr_index] = val & gr_mask[s->gr_index]; @@ -183,7 +165,7 @@ static int stdvga_outb(uint64_t addr, uint8_t val) static void stdvga_out(uint32_t port, uint32_t bytes, uint32_t val) { - switch ( bytes ) + switch (bytes) { case 1: stdvga_outb(port, val); @@ -199,8 +181,8 @@ static void stdvga_out(uint32_t port, uint32_t bytes, uint32_t val) } } -static int stdvga_intercept_pio( - int dir, unsigned int port, unsigned int bytes, uint32_t *val) +static int stdvga_intercept_pio(int dir, unsigned int port, unsigned int bytes, + uint32_t *val) { struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga; @@ -214,13 +196,13 @@ static int stdvga_intercept_pio( return X86EMUL_UNHANDLEABLE; /* propagate to external ioemu */ } -static unsigned int stdvga_mem_offset( - struct hvm_hw_stdvga *s, unsigned int mmio_addr) +static unsigned int stdvga_mem_offset(struct hvm_hw_stdvga *s, + unsigned int mmio_addr) { unsigned int memory_map_mode = (s->gr[6] >> 2) & 3; unsigned int offset = mmio_addr & 0x1ffff; - switch ( memory_map_mode ) + switch (memory_map_mode) { case 0: break; @@ -244,11 +226,11 @@ static unsigned int stdvga_mem_offset( return offset; - fail: +fail: return ~0u; } -#define GET_PLANE(data, p) (((data) >> ((p) * 8)) & 0xff) +#define GET_PLANE(data, p) (((data) >> ((p)*8)) & 0xff) static uint8_t stdvga_mem_readb(uint64_t addr) { @@ -302,12 +284,12 @@ static uint8_t stdvga_mem_readb(uint64_t addr) return ret; } -static int stdvga_mem_read(const struct hvm_io_handler *handler, - uint64_t addr, uint32_t size, uint64_t *p_data) +static int stdvga_mem_read(const struct hvm_io_handler *handler, uint64_t addr, + uint32_t size, uint64_t *p_data) { uint64_t data = ~0ul; - switch ( size ) + switch (size) { case 1: data = stdvga_mem_readb(addr); @@ -326,7 +308,7 @@ static int stdvga_mem_read(const struct hvm_io_handler *handler, break; case 8: - data = (uint64_t)(stdvga_mem_readb(addr)); + data = (uint64_t)(stdvga_mem_readb(addr)); data |= (uint64_t)(stdvga_mem_readb(addr + 1)) << 8; data |= (uint64_t)(stdvga_mem_readb(addr + 2)) << 16; data |= (uint64_t)(stdvga_mem_readb(addr + 3)) << 24; @@ -384,7 +366,7 @@ static void stdvga_mem_writeb(uint64_t addr, uint32_t val) else { write_mode = s->gr[5] & 3; - switch ( write_mode ) + switch (write_mode) { default: case 0: @@ -418,7 +400,7 @@ static void stdvga_mem_writeb(uint64_t addr, uint32_t val) /* apply logical operation */ func_select = s->gr[3] >> 3; - switch ( func_select ) + switch (func_select) { case 0: default: @@ -453,9 +435,8 @@ static void stdvga_mem_writeb(uint64_t addr, uint32_t val) } } -static int stdvga_mem_write(const struct hvm_io_handler *handler, - uint64_t addr, uint32_t size, - uint64_t data) +static int stdvga_mem_write(const struct hvm_io_handler *handler, uint64_t addr, + uint32_t size, uint64_t data) { struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga; ioreq_t p = { @@ -472,33 +453,33 @@ static int stdvga_mem_write(const struct hvm_io_handler *handler, goto done; /* Intercept mmio write */ - switch ( size ) + switch (size) { case 1: - stdvga_mem_writeb(addr, (data >> 0) & 0xff); + stdvga_mem_writeb(addr, (data >> 0) & 0xff); break; case 2: - stdvga_mem_writeb(addr+0, (data >> 0) & 0xff); - stdvga_mem_writeb(addr+1, (data >> 8) & 0xff); + stdvga_mem_writeb(addr + 0, (data >> 0) & 0xff); + stdvga_mem_writeb(addr + 1, (data >> 8) & 0xff); break; case 4: - stdvga_mem_writeb(addr+0, (data >> 0) & 0xff); - stdvga_mem_writeb(addr+1, (data >> 8) & 0xff); - stdvga_mem_writeb(addr+2, (data >> 16) & 0xff); - stdvga_mem_writeb(addr+3, (data >> 24) & 0xff); + stdvga_mem_writeb(addr + 0, (data >> 0) & 0xff); + stdvga_mem_writeb(addr + 1, (data >> 8) & 0xff); + stdvga_mem_writeb(addr + 2, (data >> 16) & 0xff); + stdvga_mem_writeb(addr + 3, (data >> 24) & 0xff); break; case 8: - stdvga_mem_writeb(addr+0, (data >> 0) & 0xff); - stdvga_mem_writeb(addr+1, (data >> 8) & 0xff); - stdvga_mem_writeb(addr+2, (data >> 16) & 0xff); - stdvga_mem_writeb(addr+3, (data >> 24) & 0xff); - stdvga_mem_writeb(addr+4, (data >> 32) & 0xff); - stdvga_mem_writeb(addr+5, (data >> 40) & 0xff); - stdvga_mem_writeb(addr+6, (data >> 48) & 0xff); - stdvga_mem_writeb(addr+7, (data >> 56) & 0xff); + stdvga_mem_writeb(addr + 0, (data >> 0) & 0xff); + stdvga_mem_writeb(addr + 1, (data >> 8) & 0xff); + stdvga_mem_writeb(addr + 2, (data >> 16) & 0xff); + stdvga_mem_writeb(addr + 3, (data >> 24) & 0xff); + stdvga_mem_writeb(addr + 4, (data >> 32) & 0xff); + stdvga_mem_writeb(addr + 5, (data >> 40) & 0xff); + stdvga_mem_writeb(addr + 6, (data >> 48) & 0xff); + stdvga_mem_writeb(addr + 7, (data >> 56) & 0xff); break; default: @@ -506,7 +487,7 @@ static int stdvga_mem_write(const struct hvm_io_handler *handler, break; } - done: +done: srv = hvm_select_ioreq_server(current->domain, &p); if ( !srv ) return X86EMUL_UNHANDLEABLE; @@ -553,7 +534,7 @@ static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler, /* s->lock intentionally held */ return 1; - reject: +reject: spin_unlock(&s->lock); return 0; } @@ -565,12 +546,11 @@ static void stdvga_mem_complete(const struct hvm_io_handler *handler) spin_unlock(&s->lock); } -static const struct hvm_io_ops stdvga_mem_ops = { - .accept = stdvga_mem_accept, - .read = stdvga_mem_read, - .write = stdvga_mem_write, - .complete = stdvga_mem_complete -}; +static const struct hvm_io_ops stdvga_mem_ops = {.accept = stdvga_mem_accept, + .read = stdvga_mem_read, + .write = stdvga_mem_write, + .complete = + stdvga_mem_complete}; void stdvga_init(struct domain *d) { @@ -583,7 +563,7 @@ void stdvga_init(struct domain *d) memset(s, 0, sizeof(*s)); spin_lock_init(&s->lock); - + for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ ) { pg = alloc_domheap_page(d, MEMF_no_owner); diff --git a/xen/arch/x86/hvm/svm/asid.c b/xen/arch/x86/hvm/svm/asid.c index e554e25213..1fd995d05c 100644 --- a/xen/arch/x86/hvm/svm/asid.c +++ b/xen/arch/x86/hvm/svm/asid.c @@ -41,9 +41,9 @@ void svm_asid_handle_vmrun(void) { struct vcpu *curr = current; struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb; - struct hvm_vcpu_asid *p_asid = - nestedhvm_vcpu_in_guestmode(curr) - ? &vcpu_nestedhvm(curr).nv_n2asid : &curr->arch.hvm.n1asid; + struct hvm_vcpu_asid *p_asid = nestedhvm_vcpu_in_guestmode(curr) + ? &vcpu_nestedhvm(curr).nv_n2asid + : &curr->arch.hvm.n1asid; bool_t need_flush = hvm_asid_handle_vmenter(p_asid); /* ASID 0 indicates that ASIDs are disabled. */ @@ -54,7 +54,7 @@ void svm_asid_handle_vmrun(void) return; } - if (vmcb_get_guest_asid(vmcb) != p_asid->asid) + if ( vmcb_get_guest_asid(vmcb) != p_asid->asid ) vmcb_set_guest_asid(vmcb, p_asid->asid); vmcb->tlb_control = need_flush; } diff --git a/xen/arch/x86/hvm/svm/emulate.c b/xen/arch/x86/hvm/svm/emulate.c index 3e52592847..0a4d2f63b8 100644 --- a/xen/arch/x86/hvm/svm/emulate.c +++ b/xen/arch/x86/hvm/svm/emulate.c @@ -34,7 +34,7 @@ static unsigned long svm_nextrip_insn_length(struct vcpu *v) return 0; #ifndef NDEBUG - switch ( vmcb->exitcode ) + switch (vmcb->exitcode) { case VMEXIT_CR0_READ ... VMEXIT_DR15_WRITE: /* faults due to instruction intercepts */ @@ -92,7 +92,7 @@ unsigned int svm_get_insn_len(struct vcpu *v, unsigned int instr_enc) x86_emulate_free_state(state); /* Extract components from instr_enc. */ - instr_modrm = instr_enc & 0xff; + instr_modrm = instr_enc & 0xff; instr_opcode = instr_enc >> 8; if ( instr_opcode == ctxt.ctxt.opcode ) @@ -100,17 +100,17 @@ unsigned int svm_get_insn_len(struct vcpu *v, unsigned int instr_enc) if ( !instr_modrm ) return emul_len; - if ( modrm_mod == MASK_EXTR(instr_modrm, 0300) && + if ( modrm_mod == MASK_EXTR(instr_modrm, 0300) && (modrm_reg & 7) == MASK_EXTR(instr_modrm, 0070) && - (modrm_rm & 7) == MASK_EXTR(instr_modrm, 0007) ) + (modrm_rm & 7) == MASK_EXTR(instr_modrm, 0007) ) return emul_len; } - printk(XENLOG_G_WARNING - "Insn mismatch: Expected opcode %#x, modrm %#x, got nrip_len %lu, emul_len %lu\n", + printk(XENLOG_G_WARNING "Insn mismatch: Expected opcode %#x, modrm %#x, " + "got nrip_len %lu, emul_len %lu\n", instr_opcode, instr_modrm, nrip_len, emul_len); - hvm_dump_emulation_state(XENLOG_G_WARNING, "SVM Insn len", - &ctxt, X86EMUL_UNHANDLEABLE); + hvm_dump_emulation_state(XENLOG_G_WARNING, "SVM Insn len", &ctxt, + X86EMUL_UNHANDLEABLE); hvm_inject_hw_exception(TRAP_gp_fault, 0); return 0; diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c index ff755165cd..0e967c348e 100644 --- a/xen/arch/x86/hvm/svm/intr.c +++ b/xen/arch/x86/hvm/svm/intr.c @@ -1,6 +1,6 @@ /* * intr.c: Interrupt handling for SVM. - * Copyright (c) 2005, AMD Inc. + * Copyright (c) 2005, AMD Inc. * Copyright (c) 2004, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it @@ -57,8 +57,8 @@ static void svm_inject_nmi(struct vcpu *v) * SVM does not virtualise the NMI mask, so we emulate it by intercepting * the next IRET and blocking NMI injection until the intercept triggers. */ - vmcb_set_general1_intercepts( - vmcb, general1_intercepts | GENERAL1_INTERCEPT_IRET); + vmcb_set_general1_intercepts(vmcb, + general1_intercepts | GENERAL1_INTERCEPT_IRET); } static void svm_inject_extint(struct vcpu *v, int vector) @@ -83,9 +83,11 @@ static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack) ASSERT(intack.source != hvm_intsrc_none); - if ( nestedhvm_enabled(v->domain) ) { + if ( nestedhvm_enabled(v->domain) ) + { struct nestedvcpu *nv = &vcpu_nestedhvm(v); - if ( nv->nv_vmentry_pending ) { + if ( nv->nv_vmentry_pending ) + { struct vmcb_struct *gvmcb = nv->nv_vvmcx; /* check if l1 guest injects interrupt into l2 guest via vintr. @@ -99,7 +101,7 @@ static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack) } HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source, - vmcb->eventinj.fields.v?vmcb->eventinj.fields.vector:-1); + vmcb->eventinj.fields.v ? vmcb->eventinj.fields.vector : -1); /* * Create a dummy virtual interrupt to intercept as soon as the @@ -122,16 +124,16 @@ static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack) return; intr = vmcb_get_vintr(vmcb); - intr.fields.irq = 1; - intr.fields.vector = 0; - intr.fields.prio = intack.vector >> 4; + intr.fields.irq = 1; + intr.fields.vector = 0; + intr.fields.prio = intack.vector >> 4; intr.fields.ign_tpr = (intack.source != hvm_intsrc_lapic); vmcb_set_vintr(vmcb, intr); - vmcb_set_general1_intercepts( - vmcb, general1_intercepts | GENERAL1_INTERCEPT_VINTR); + vmcb_set_general1_intercepts(vmcb, general1_intercepts | + GENERAL1_INTERCEPT_VINTR); } -void svm_intr_assist(void) +void svm_intr_assist(void) { struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb; @@ -151,7 +153,8 @@ void svm_intr_assist(void) return; intblk = hvm_interrupt_blocked(v, intack); - if ( intblk == hvm_intblk_svm_gif ) { + if ( intblk == hvm_intblk_svm_gif ) + { ASSERT(nestedhvm_enabled(v->domain)); return; } @@ -167,10 +170,11 @@ void svm_intr_assist(void) * the l1 guest occurred. */ rc = nestedsvm_vcpu_interrupt(v, intack); - switch (rc) { + switch (rc) + { case NSVM_INTR_NOTINTERCEPTED: /* Inject interrupt into 2nd level guest directly. */ - break; + break; case NSVM_INTR_NOTHANDLED: case NSVM_INTR_FORCEVMEXIT: return; @@ -179,7 +183,7 @@ void svm_intr_assist(void) return; default: panic("%s: nestedsvm_vcpu_interrupt can't handle value %#x\n", - __func__, rc); + __func__, rc); } } @@ -212,7 +216,7 @@ void svm_intr_assist(void) } else { - HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0); + HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/0); svm_inject_extint(v, intack.vector); pt_intr_post(v, intack); } diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c index 35c1a04542..f10c3e68f2 100644 --- a/xen/arch/x86/hvm/svm/nestedsvm.c +++ b/xen/arch/x86/hvm/svm/nestedsvm.c @@ -24,31 +24,27 @@ #include #include #include /* paging_mode_hap */ -#include /* for local_event_delivery_(en|dis)able */ -#include /* p2m_get_pagetable, p2m_get_nestedp2m */ +#include /* for local_event_delivery_(en|dis)able */ +#include /* p2m_get_pagetable, p2m_get_nestedp2m */ +#define NSVM_ERROR_VVMCB 1 +#define NSVM_ERROR_VMENTRY 2 -#define NSVM_ERROR_VVMCB 1 -#define NSVM_ERROR_VMENTRY 2 - -static void -nestedsvm_vcpu_clgi(struct vcpu *v) +static void nestedsvm_vcpu_clgi(struct vcpu *v) { /* clear gif flag */ vcpu_nestedsvm(v).ns_gif = 0; local_event_delivery_disable(); /* mask events for PV drivers */ } -static void -nestedsvm_vcpu_stgi(struct vcpu *v) +static void nestedsvm_vcpu_stgi(struct vcpu *v) { /* enable gif flag */ vcpu_nestedsvm(v).ns_gif = 1; local_event_delivery_enable(); /* unmask events for PV drivers */ } -static int -nestedsvm_vmcb_isvalid(struct vcpu *v, uint64_t vmcxaddr) +static int nestedsvm_vmcb_isvalid(struct vcpu *v, uint64_t vmcxaddr) { /* Address must be 4k aligned */ if ( (vmcxaddr & ~PAGE_MASK) != 0 ) @@ -67,7 +63,8 @@ int nestedsvm_vmcb_map(struct vcpu *v, uint64_t vmcbaddr) { struct nestedvcpu *nv = &vcpu_nestedhvm(v); - if (nv->nv_vvmcx != NULL && nv->nv_vvmcxaddr != vmcbaddr) { + if ( nv->nv_vvmcx != NULL && nv->nv_vvmcxaddr != vmcbaddr ) + { ASSERT(vvmcx_valid(v)); hvm_unmap_guest_frame(nv->nv_vvmcx, 1); nv->nv_vvmcx = NULL; @@ -77,8 +74,8 @@ int nestedsvm_vmcb_map(struct vcpu *v, uint64_t vmcbaddr) if ( !nv->nv_vvmcx ) { bool_t writable; - void *vvmcx = hvm_map_guest_frame_rw(paddr_to_pfn(vmcbaddr), 1, - &writable); + void *vvmcx = + hvm_map_guest_frame_rw(paddr_to_pfn(vmcbaddr), 1, &writable); if ( !vvmcx ) return 0; @@ -103,18 +100,18 @@ int nsvm_vcpu_initialise(struct vcpu *v) msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0); svm->ns_cached_msrpm = msrpm; - if (msrpm == NULL) + if ( msrpm == NULL ) goto err; memset(msrpm, 0x0, MSRPM_SIZE); msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0); svm->ns_merged_msrpm = msrpm; - if (msrpm == NULL) + if ( msrpm == NULL ) goto err; memset(msrpm, 0x0, MSRPM_SIZE); nv->nv_n2vmcx = alloc_vmcb(); - if (nv->nv_n2vmcx == NULL) + if ( nv->nv_n2vmcx == NULL ) goto err; nv->nv_n2vmcx_pa = virt_to_maddr(nv->nv_n2vmcx); @@ -136,27 +133,30 @@ void nsvm_vcpu_destroy(struct vcpu *v) * in order to avoid double free of l2 vmcb and the possible memory leak * of l1 vmcb page. */ - if (nv->nv_n1vmcx) + if ( nv->nv_n1vmcx ) v->arch.hvm.svm.vmcb = nv->nv_n1vmcx; - if (svm->ns_cached_msrpm) { + if ( svm->ns_cached_msrpm ) + { free_xenheap_pages(svm->ns_cached_msrpm, get_order_from_bytes(MSRPM_SIZE)); svm->ns_cached_msrpm = NULL; } - if (svm->ns_merged_msrpm) { + if ( svm->ns_merged_msrpm ) + { free_xenheap_pages(svm->ns_merged_msrpm, get_order_from_bytes(MSRPM_SIZE)); svm->ns_merged_msrpm = NULL; } hvm_unmap_guest_frame(nv->nv_vvmcx, 1); nv->nv_vvmcx = NULL; - if (nv->nv_n2vmcx) { + if ( nv->nv_n2vmcx ) + { free_vmcb(nv->nv_n2vmcx); nv->nv_n2vmcx = NULL; nv->nv_n2vmcx_pa = INVALID_PADDR; } - if (svm->ns_iomap) + if ( svm->ns_iomap ) svm->ns_iomap = NULL; } @@ -184,27 +184,30 @@ int nsvm_vcpu_reset(struct vcpu *v) svm->ns_vmexit.exitinfo1 = 0; svm->ns_vmexit.exitinfo2 = 0; - if (svm->ns_iomap) + if ( svm->ns_iomap ) svm->ns_iomap = NULL; nestedsvm_vcpu_stgi(v); return 0; } -static uint64_t nestedsvm_fpu_vmentry(uint64_t n1cr0, - struct vmcb_struct *vvmcb, - struct vmcb_struct *n1vmcb, struct vmcb_struct *n2vmcb) +static uint64_t nestedsvm_fpu_vmentry(uint64_t n1cr0, struct vmcb_struct *vvmcb, + struct vmcb_struct *n1vmcb, + struct vmcb_struct *n2vmcb) { uint64_t vcr0; vcr0 = vvmcb->_cr0; - if ( !(n1cr0 & X86_CR0_TS) && (n1vmcb->_cr0 & X86_CR0_TS) ) { + if ( !(n1cr0 & X86_CR0_TS) && (n1vmcb->_cr0 & X86_CR0_TS) ) + { /* svm_fpu_leave() run while l1 guest was running. * Sync FPU state with l2 guest. */ vcr0 |= X86_CR0_TS; n2vmcb->_exception_intercepts |= (1U << TRAP_no_device); - } else if ( !(vcr0 & X86_CR0_TS) && (n2vmcb->_cr0 & X86_CR0_TS) ) { + } + else if ( !(vcr0 & X86_CR0_TS) && (n2vmcb->_cr0 & X86_CR0_TS) ) + { /* svm_fpu_enter() run while l1 guest was running. * Sync FPU state with l2 guest. */ vcr0 &= ~X86_CR0_TS; @@ -215,14 +218,18 @@ static uint64_t nestedsvm_fpu_vmentry(uint64_t n1cr0, } static void nestedsvm_fpu_vmexit(struct vmcb_struct *n1vmcb, - struct vmcb_struct *n2vmcb, uint64_t n1cr0, uint64_t guest_cr0) + struct vmcb_struct *n2vmcb, uint64_t n1cr0, + uint64_t guest_cr0) { - if ( !(guest_cr0 & X86_CR0_TS) && (n2vmcb->_cr0 & X86_CR0_TS) ) { + if ( !(guest_cr0 & X86_CR0_TS) && (n2vmcb->_cr0 & X86_CR0_TS) ) + { /* svm_fpu_leave() run while l2 guest was running. * Sync FPU state with l1 guest. */ n1vmcb->_cr0 |= X86_CR0_TS; n1vmcb->_exception_intercepts |= (1U << TRAP_no_device); - } else if ( !(n1cr0 & X86_CR0_TS) && (n1vmcb->_cr0 & X86_CR0_TS) ) { + } + else if ( !(n1cr0 & X86_CR0_TS) && (n1vmcb->_cr0 & X86_CR0_TS) ) + { /* svm_fpu_enter() run while l2 guest was running. * Sync FPU state with l1 guest. */ n1vmcb->_cr0 &= ~X86_CR0_TS; @@ -280,7 +287,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs) rc = hvm_set_efer(n1vmcb->_efer); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); - if (rc != X86EMUL_OKAY) + if ( rc != X86EMUL_OKAY ) gdprintk(XENLOG_ERR, "hvm_set_efer failed, rc: %u\n", rc); /* CR4 */ @@ -288,18 +295,17 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs) rc = hvm_set_cr4(n1vmcb->_cr4, true); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); - if (rc != X86EMUL_OKAY) + if ( rc != X86EMUL_OKAY ) gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc); /* CR0 */ - nestedsvm_fpu_vmexit(n1vmcb, n2vmcb, - svm->ns_cr0, v->arch.hvm.guest_cr[0]); + nestedsvm_fpu_vmexit(n1vmcb, n2vmcb, svm->ns_cr0, v->arch.hvm.guest_cr[0]); v->arch.hvm.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE; n1vmcb->rflags &= ~X86_EFLAGS_VM; rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, true); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); - if (rc != X86EMUL_OKAY) + if ( rc != X86EMUL_OKAY ) gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc); svm->ns_cr0 = v->arch.hvm.guest_cr[0]; @@ -309,17 +315,22 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs) /* CR3 */ /* Nested paging mode */ - if (nestedhvm_paging_mode_hap(v)) { + if ( nestedhvm_paging_mode_hap(v) ) + { /* host nested paging + guest nested paging. */ /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */ - } else if (paging_mode_hap(v->domain)) { + } + else if ( paging_mode_hap(v->domain) ) + { /* host nested paging + guest shadow paging. */ /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */ - } else { + } + else + { /* host shadow paging + guest shadow paging. */ /* Reset MMU context -- XXX (hostrestore) not yet working*/ - if (!pagetable_is_null(v->arch.guest_table)) + if ( !pagetable_is_null(v->arch.guest_table) ) put_page(pagetable_get_page(v->arch.guest_table)); v->arch.guest_table = pagetable_null(); /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */ @@ -327,7 +338,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs) rc = hvm_set_cr3(n1vmcb->_cr3, true); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); - if (rc != X86EMUL_OKAY) + if ( rc != X86EMUL_OKAY ) gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc); regs->rax = n1vmcb->rax; @@ -391,19 +402,18 @@ static int nsvm_vmrun_permissionmap(struct vcpu *v, bool_t viopm) nv->nv_ioportED = ioport_ed; /* v->arch.hvm.svm.msrpm has type unsigned long, thus BYTES_PER_LONG. */ - for (i = 0; i < MSRPM_SIZE / BYTES_PER_LONG; i++) + for ( i = 0; i < MSRPM_SIZE / BYTES_PER_LONG; i++ ) svm->ns_merged_msrpm[i] = arch_svm->msrpm[i] | ns_msrpm_ptr[i]; - host_vmcb->_iopm_base_pa = - (uint64_t)virt_to_maddr(svm->ns_iomap); - host_vmcb->_msrpm_base_pa = - (uint64_t)virt_to_maddr(svm->ns_merged_msrpm); + host_vmcb->_iopm_base_pa = (uint64_t)virt_to_maddr(svm->ns_iomap); + host_vmcb->_msrpm_base_pa = (uint64_t)virt_to_maddr(svm->ns_merged_msrpm); return 0; } static void nestedsvm_vmcb_set_nestedp2m(struct vcpu *v, - struct vmcb_struct *vvmcb, struct vmcb_struct *n2vmcb) + struct vmcb_struct *vvmcb, + struct vmcb_struct *n2vmcb) { struct p2m_domain *p2m; @@ -438,14 +448,15 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) vcleanbits_valid = 1; if ( svm->ns_ovvmcb_pa == INVALID_PADDR ) vcleanbits_valid = 0; - if (svm->ns_ovvmcb_pa != nv->nv_vvmcxaddr) + if ( svm->ns_ovvmcb_pa != nv->nv_vvmcxaddr ) vcleanbits_valid = 0; -#define vcleanbit_set(_name) \ +#define vcleanbit_set(_name) \ (vcleanbits_valid && ns_vmcb->cleanbits.fields._name) /* Enable l2 guest intercepts */ - if (!vcleanbit_set(intercepts)) { + if ( !vcleanbit_set(intercepts) ) + { svm->ns_cr_intercepts = ns_vmcb->_cr_intercepts; svm->ns_dr_intercepts = ns_vmcb->_dr_intercepts; svm->ns_exception_intercepts = ns_vmcb->_exception_intercepts; @@ -458,7 +469,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) * below. Those cleanbits would be tracked in an integer field * in struct nestedsvm. * But this effort is not worth doing because: - * - Only the intercepts bit of the n1vmcb can effectively be used here + * - Only the intercepts bit of the n1vmcb can effectively be used here * - The CPU runs more instructions for the tracking than can be * safed here. * The overhead comes from (ordered from highest to lowest): @@ -470,10 +481,8 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) * tracked intercepts bit of the n1vmcb is practically *always* cleared. */ - n2vmcb->_cr_intercepts = - n1vmcb->_cr_intercepts | ns_vmcb->_cr_intercepts; - n2vmcb->_dr_intercepts = - n1vmcb->_dr_intercepts | ns_vmcb->_dr_intercepts; + n2vmcb->_cr_intercepts = n1vmcb->_cr_intercepts | ns_vmcb->_cr_intercepts; + n2vmcb->_dr_intercepts = n1vmcb->_dr_intercepts | ns_vmcb->_dr_intercepts; n2vmcb->_exception_intercepts = n1vmcb->_exception_intercepts | ns_vmcb->_exception_intercepts; n2vmcb->_general1_intercepts = @@ -482,7 +491,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) n1vmcb->_general2_intercepts | ns_vmcb->_general2_intercepts; /* Nested Pause Filter */ - if (ns_vmcb->_general1_intercepts & GENERAL1_INTERCEPT_PAUSE) + if ( ns_vmcb->_general1_intercepts & GENERAL1_INTERCEPT_PAUSE ) n2vmcb->_pause_filter_count = min(n1vmcb->_pause_filter_count, ns_vmcb->_pause_filter_count); else @@ -493,7 +502,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) /* Nested IO permission bitmaps */ rc = nsvm_vmrun_permissionmap(v, vcleanbit_set(iopm)); - if (rc) + if ( rc ) return rc; /* ASID - Emulation handled in hvm_asid_handle_vmenter() */ @@ -502,7 +511,8 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) n2vmcb->tlb_control = ns_vmcb->tlb_control; /* Virtual Interrupts */ - if (!vcleanbit_set(tpr)) { + if ( !vcleanbit_set(tpr) ) + { n2vmcb->_vintr = ns_vmcb->_vintr; n2vmcb->_vintr.fields.intr_masking = 1; } @@ -520,11 +530,11 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) n2vmcb->eventinj = ns_vmcb->eventinj; /* LBR and other virtualization */ - if (!vcleanbit_set(lbr)) { + if ( !vcleanbit_set(lbr) ) + { svm->ns_virt_ext = ns_vmcb->virt_ext; } - n2vmcb->virt_ext.bytes = - n1vmcb->virt_ext.bytes | ns_vmcb->virt_ext.bytes; + n2vmcb->virt_ext.bytes = n1vmcb->virt_ext.bytes | ns_vmcb->virt_ext.bytes; /* NextRIP - only evaluated on #VMEXIT. */ @@ -533,7 +543,8 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) */ /* Segments */ - if (!vcleanbit_set(seg)) { + if ( !vcleanbit_set(seg) ) + { n2vmcb->es = ns_vmcb->es; n2vmcb->cs = ns_vmcb->cs; n2vmcb->ss = ns_vmcb->ss; @@ -541,7 +552,8 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) /* CPL */ n2vmcb->_cpl = ns_vmcb->_cpl; } - if (!vcleanbit_set(dt)) { + if ( !vcleanbit_set(dt) ) + { n2vmcb->gdtr = ns_vmcb->gdtr; n2vmcb->idtr = ns_vmcb->idtr; } @@ -551,7 +563,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) rc = hvm_set_efer(ns_vmcb->_efer); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); - if (rc != X86EMUL_OKAY) + if ( rc != X86EMUL_OKAY ) gdprintk(XENLOG_ERR, "hvm_set_efer failed, rc: %u\n", rc); /* CR4 */ @@ -559,7 +571,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) rc = hvm_set_cr4(ns_vmcb->_cr4, true); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); - if (rc != X86EMUL_OKAY) + if ( rc != X86EMUL_OKAY ) gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc); /* CR0 */ @@ -569,7 +581,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) rc = hvm_set_cr0(cr0, true); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); - if (rc != X86EMUL_OKAY) + if ( rc != X86EMUL_OKAY ) gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc); /* CR2 */ @@ -577,7 +589,8 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) hvm_update_guest_cr(v, 2); /* Nested paging mode */ - if (nestedhvm_paging_mode_hap(v)) { + if ( nestedhvm_paging_mode_hap(v) ) + { /* host nested paging + guest nested paging. */ n2vmcb->_np_enable = 1; @@ -587,9 +600,11 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) rc = hvm_set_cr3(ns_vmcb->_cr3, true); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); - if (rc != X86EMUL_OKAY) + if ( rc != X86EMUL_OKAY ) gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc); - } else if (paging_mode_hap(v->domain)) { + } + else if ( paging_mode_hap(v->domain) ) + { /* host nested paging + guest shadow paging. */ n2vmcb->_np_enable = 1; /* Keep h_cr3 as it is. */ @@ -601,9 +616,11 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) rc = hvm_set_cr3(ns_vmcb->_cr3, true); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); - if (rc != X86EMUL_OKAY) + if ( rc != X86EMUL_OKAY ) gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc); - } else { + } + else + { /* host shadow paging + guest shadow paging. */ n2vmcb->_np_enable = 0; n2vmcb->_h_cr3 = 0x0; @@ -614,7 +631,8 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) } /* DRn */ - if (!vcleanbit_set(dr)) { + if ( !vcleanbit_set(dr) ) + { n2vmcb->_dr7 = ns_vmcb->_dr7; n2vmcb->_dr6 = ns_vmcb->_dr6; } @@ -637,11 +655,13 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) */ /* PAT */ - if (!vcleanbit_set(np)) { + if ( !vcleanbit_set(np) ) + { n2vmcb->_g_pat = ns_vmcb->_g_pat; } - if (!vcleanbit_set(lbr)) { + if ( !vcleanbit_set(lbr) ) + { /* Debug Control MSR */ n2vmcb->_debugctlmsr = ns_vmcb->_debugctlmsr; @@ -656,13 +676,15 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) n2vmcb->cleanbits.bytes = 0; rc = svm_vmcb_isvalid(__func__, ns_vmcb, v, true); - if (rc) { + if ( rc ) + { gdprintk(XENLOG_ERR, "virtual vmcb invalid\n"); return NSVM_ERROR_VVMCB; } rc = svm_vmcb_isvalid(__func__, n2vmcb, v, true); - if (rc) { + if ( rc ) + { gdprintk(XENLOG_ERR, "n2vmcb invalid\n"); return NSVM_ERROR_VMENTRY; } @@ -677,9 +699,8 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) return 0; } -static int -nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs, - unsigned int inst_len) +static int nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs, + unsigned int inst_len) { int ret; struct nestedvcpu *nv = &vcpu_nestedhvm(v); @@ -716,7 +737,8 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs, /* Save l1 guest state (= host state) */ ret = nsvm_vcpu_hostsave(v, inst_len); - if (ret) { + if ( ret ) + { gdprintk(XENLOG_ERR, "hostsave failed, ret = %i\n", ret); return ret; } @@ -726,7 +748,8 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs, v->arch.hvm.svm.vmcb_pa = nv->nv_n2vmcx_pa; ret = nsvm_vmcb_prepare4vmrun(v, regs); - if (ret) { + if ( ret ) + { gdprintk(XENLOG_ERR, "prepare4vmrun failed, ret = %i\n", ret); return ret; } @@ -735,8 +758,7 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs, return 0; } -int -nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs) +int nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs) { int ret; unsigned int inst_len; @@ -761,7 +783,8 @@ nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs) * and l1 guest keeps alive. */ nestedhvm_vcpu_enter_guestmode(v); - switch (ret) { + switch (ret) + { case 0: break; case NSVM_ERROR_VVMCB: @@ -770,25 +793,24 @@ nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs) return -1; case NSVM_ERROR_VMENTRY: default: - gdprintk(XENLOG_ERR, - "nsvm_vcpu_vmentry failed, injecting #UD\n"); + gdprintk(XENLOG_ERR, "nsvm_vcpu_vmentry failed, injecting #UD\n"); hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC); - /* Must happen after hvm_inject_hw_exception or it doesn't work right. */ + /* Must happen after hvm_inject_hw_exception or it doesn't work right. + */ nv->nv_vmswitch_in_progress = 0; return 1; } /* If l1 guest uses shadow paging, update the paging mode. */ - if (!nestedhvm_paging_mode_hap(v)) + if ( !nestedhvm_paging_mode_hap(v) ) paging_update_paging_modes(v); nv->nv_vmswitch_in_progress = 0; return 0; } -static int -nsvm_vcpu_vmexit_inject(struct vcpu *v, struct cpu_user_regs *regs, - uint64_t exitcode) +static int nsvm_vcpu_vmexit_inject(struct vcpu *v, struct cpu_user_regs *regs, + uint64_t exitcode) { struct nestedvcpu *nv = &vcpu_nestedhvm(v); struct nestedsvm *svm = &vcpu_nestedsvm(v); @@ -802,14 +824,15 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v, struct cpu_user_regs *regs, ns_vmcb = nv->nv_vvmcx; - if (nv->nv_vmexit_pending) { - - switch (exitcode) { + if ( nv->nv_vmexit_pending ) + { + switch (exitcode) + { case VMEXIT_INTR: - if ( unlikely(ns_vmcb->eventinj.fields.v) - && nv->nv_vmentry_pending - && hvm_event_needs_reinjection(ns_vmcb->eventinj.fields.type, - ns_vmcb->eventinj.fields.vector) ) + if ( unlikely(ns_vmcb->eventinj.fields.v) && + nv->nv_vmentry_pending && + hvm_event_needs_reinjection(ns_vmcb->eventinj.fields.type, + ns_vmcb->eventinj.fields.vector) ) { ns_vmcb->exitintinfo.bytes = ns_vmcb->eventinj.bytes; } @@ -841,8 +864,7 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v, struct cpu_user_regs *regs, return 0; } -int -nsvm_vcpu_vmexit_event(struct vcpu *v, const struct x86_event *trap) +int nsvm_vcpu_vmexit_event(struct vcpu *v, const struct x86_event *trap) { ASSERT(vcpu_nestedhvm(v).nv_vvmcx != NULL); @@ -856,36 +878,34 @@ uint64_t nsvm_vcpu_hostcr3(struct vcpu *v) return vcpu_nestedsvm(v).ns_vmcb_hostcr3; } -static int -nsvm_vmcb_guest_intercepts_msr(unsigned long *msr_bitmap, - uint32_t msr, bool_t write) +static int nsvm_vmcb_guest_intercepts_msr(unsigned long *msr_bitmap, + uint32_t msr, bool_t write) { bool_t enabled; unsigned long *msr_bit; msr_bit = svm_msrbit(msr_bitmap, msr); - if (msr_bit == NULL) + if ( msr_bit == NULL ) /* MSR not in the permission map: Let the guest handle it. */ return NESTEDHVM_VMEXIT_INJECT; msr &= 0x1fff; - if (write) + if ( write ) /* write access */ enabled = test_bit(msr * 2 + 1, msr_bit); else /* read access */ enabled = test_bit(msr * 2, msr_bit); - if (!enabled) + if ( !enabled ) return NESTEDHVM_VMEXIT_HOST; return NESTEDHVM_VMEXIT_INJECT; } -static int -nsvm_vmcb_guest_intercepts_ioio(paddr_t iopm_pa, uint64_t exitinfo1) +static int nsvm_vmcb_guest_intercepts_ioio(paddr_t iopm_pa, uint64_t exitinfo1) { unsigned long gfn = iopm_pa >> PAGE_SHIFT; unsigned long *io_bitmap; @@ -898,11 +918,11 @@ nsvm_vmcb_guest_intercepts_ioio(paddr_t iopm_pa, uint64_t exitinfo1) port = ioinfo.fields.port; size = ioinfo.fields.sz32 ? 4 : ioinfo.fields.sz16 ? 2 : 1; - switch ( port ) + switch (port) { case 0 ... 8 * PAGE_SIZE - 1: /* first 4KB page */ break; - case 8 * PAGE_SIZE ... 2 * 8 * PAGE_SIZE - 1: /* second 4KB page */ + case 8 * PAGE_SIZE... 2 * 8 * PAGE_SIZE - 1: /* second 4KB page */ port -= 8 * PAGE_SIZE; ++gfn; break; @@ -911,7 +931,7 @@ nsvm_vmcb_guest_intercepts_ioio(paddr_t iopm_pa, uint64_t exitinfo1) break; } - for ( io_bitmap = hvm_map_guest_frame_ro(gfn, 0); ; ) + for ( io_bitmap = hvm_map_guest_frame_ro(gfn, 0);; ) { enabled = io_bitmap && test_bit(port, io_bitmap); if ( !enabled || !--size ) @@ -931,9 +951,9 @@ nsvm_vmcb_guest_intercepts_ioio(paddr_t iopm_pa, uint64_t exitinfo1) return NESTEDHVM_VMEXIT_INJECT; } -static bool_t -nsvm_vmcb_guest_intercepts_exitcode(struct vcpu *v, - struct cpu_user_regs *regs, uint64_t exitcode) +static bool_t nsvm_vmcb_guest_intercepts_exitcode(struct vcpu *v, + struct cpu_user_regs *regs, + uint64_t exitcode) { uint64_t exit_bits; struct nestedvcpu *nv = &vcpu_nestedhvm(v); @@ -941,41 +961,42 @@ nsvm_vmcb_guest_intercepts_exitcode(struct vcpu *v, struct vmcb_struct *ns_vmcb = nv->nv_vvmcx; enum nestedhvm_vmexits vmexits; - switch (exitcode) { + switch (exitcode) + { case VMEXIT_CR0_READ ... VMEXIT_CR15_READ: case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE: exit_bits = 1ULL << (exitcode - VMEXIT_CR0_READ); - if (svm->ns_cr_intercepts & exit_bits) + if ( svm->ns_cr_intercepts & exit_bits ) break; return 0; case VMEXIT_DR0_READ ... VMEXIT_DR7_READ: case VMEXIT_DR0_WRITE ... VMEXIT_DR7_WRITE: exit_bits = 1ULL << (exitcode - VMEXIT_DR0_READ); - if (svm->ns_dr_intercepts & exit_bits) + if ( svm->ns_dr_intercepts & exit_bits ) break; return 0; case VMEXIT_EXCEPTION_DE ... VMEXIT_EXCEPTION_XF: exit_bits = 1ULL << (exitcode - VMEXIT_EXCEPTION_DE); - if (svm->ns_exception_intercepts & exit_bits) + if ( svm->ns_exception_intercepts & exit_bits ) break; return 0; case VMEXIT_INTR ... VMEXIT_SHUTDOWN: exit_bits = 1ULL << (exitcode - VMEXIT_INTR); - if (svm->ns_general1_intercepts & exit_bits) + if ( svm->ns_general1_intercepts & exit_bits ) break; return 0; case VMEXIT_VMRUN ... VMEXIT_XSETBV: exit_bits = 1ULL << (exitcode - VMEXIT_VMRUN); - if (svm->ns_general2_intercepts & exit_bits) + if ( svm->ns_general2_intercepts & exit_bits ) break; return 0; case VMEXIT_NPF: - if (nestedhvm_paging_mode_hap(v)) + if ( nestedhvm_paging_mode_hap(v) ) break; return 0; case VMEXIT_INVALID: @@ -983,21 +1004,22 @@ nsvm_vmcb_guest_intercepts_exitcode(struct vcpu *v, break; default: - gdprintk(XENLOG_ERR, "Illegal exitcode %#"PRIx64"\n", exitcode); + gdprintk(XENLOG_ERR, "Illegal exitcode %#" PRIx64 "\n", exitcode); BUG(); break; } /* Special cases: Do more detailed checks */ - switch (exitcode) { + switch (exitcode) + { case VMEXIT_MSR: ASSERT(regs != NULL); if ( !nestedsvm_vmcb_map(v, nv->nv_vvmcxaddr) ) break; ns_vmcb = nv->nv_vvmcx; - vmexits = nsvm_vmcb_guest_intercepts_msr(svm->ns_cached_msrpm, - regs->ecx, ns_vmcb->exitinfo1 != 0); - if (vmexits == NESTEDHVM_VMEXIT_HOST) + vmexits = nsvm_vmcb_guest_intercepts_msr( + svm->ns_cached_msrpm, regs->ecx, ns_vmcb->exitinfo1 != 0); + if ( vmexits == NESTEDHVM_VMEXIT_HOST ) return 0; break; case VMEXIT_IOIO: @@ -1005,8 +1027,8 @@ nsvm_vmcb_guest_intercepts_exitcode(struct vcpu *v, break; ns_vmcb = nv->nv_vvmcx; vmexits = nsvm_vmcb_guest_intercepts_ioio(ns_vmcb->_iopm_base_pa, - ns_vmcb->exitinfo1); - if (vmexits == NESTEDHVM_VMEXIT_HOST) + ns_vmcb->exitinfo1); + if ( vmexits == NESTEDHVM_VMEXIT_HOST ) return 0; break; } @@ -1014,16 +1036,14 @@ nsvm_vmcb_guest_intercepts_exitcode(struct vcpu *v, return 1; } -bool_t -nsvm_vmcb_guest_intercepts_event( - struct vcpu *v, unsigned int vector, int errcode) +bool_t nsvm_vmcb_guest_intercepts_event(struct vcpu *v, unsigned int vector, + int errcode) { - return nsvm_vmcb_guest_intercepts_exitcode(v, - guest_cpu_user_regs(), VMEXIT_EXCEPTION_DE + vector); + return nsvm_vmcb_guest_intercepts_exitcode(v, guest_cpu_user_regs(), + VMEXIT_EXCEPTION_DE + vector); } -static int -nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs) +static int nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs) { struct nestedvcpu *nv = &vcpu_nestedhvm(v); struct nestedsvm *svm = &vcpu_nestedsvm(v); @@ -1047,7 +1067,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs) */ /* TSC offset */ - /* Keep it. It's maintainted by the l1 guest. */ + /* Keep it. It's maintainted by the l1 guest. */ /* ASID */ /* ns_vmcb->_guest_asid = n2vmcb->_guest_asid; */ @@ -1057,7 +1077,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs) /* Virtual Interrupts */ ns_vmcb->_vintr = n2vmcb->_vintr; - if (!(svm->ns_hostflags.fields.vintrmask)) + if ( !(svm->ns_hostflags.fields.vintrmask) ) ns_vmcb->_vintr.fields.intr_masking = 0; /* Shadow mode */ @@ -1087,14 +1107,17 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs) ns_vmcb->eventinj.bytes = 0; /* Nested paging mode */ - if (nestedhvm_paging_mode_hap(v)) { + if ( nestedhvm_paging_mode_hap(v) ) + { /* host nested paging + guest nested paging. */ ns_vmcb->_np_enable = n2vmcb->_np_enable; ns_vmcb->_cr3 = n2vmcb->_cr3; /* The vmcb->h_cr3 is the shadowed h_cr3. The original * unshadowed guest h_cr3 is kept in ns_vmcb->h_cr3, * hence we keep the ns_vmcb->h_cr3 value. */ - } else if (paging_mode_hap(v->domain)) { + } + else if ( paging_mode_hap(v->domain) ) + { /* host nested paging + guest shadow paging. */ ns_vmcb->_np_enable = 0; /* Throw h_cr3 away. Guest is not allowed to set it or @@ -1103,7 +1126,9 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs) /* Stop intercepting #PF (already done above * by restoring cached intercepts). */ ns_vmcb->_cr3 = n2vmcb->_cr3; - } else { + } + else + { /* host shadow paging + guest shadow paging. */ ns_vmcb->_np_enable = 0; ns_vmcb->_h_cr3 = 0x0; @@ -1187,8 +1212,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs) return 0; } -bool_t -nsvm_vmcb_hap_enabled(struct vcpu *v) +bool_t nsvm_vmcb_hap_enabled(struct vcpu *v) { return vcpu_nestedsvm(v).ns_hap_enabled; } @@ -1197,10 +1221,9 @@ nsvm_vmcb_hap_enabled(struct vcpu *v) * walk is successful, the translated value is returned in * L1_gpa. The result value tells what to do next. */ -int -nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, - unsigned int *page_order, uint8_t *p2m_acc, - bool_t access_r, bool_t access_w, bool_t access_x) +int nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, + unsigned int *page_order, uint8_t *p2m_acc, + bool_t access_r, bool_t access_w, bool_t access_x) { uint32_t pfec; unsigned long nested_cr3, gfn; @@ -1233,7 +1256,8 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v) if ( !nestedsvm_gif_isset(v) ) return hvm_intblk_svm_gif; - if ( nestedhvm_vcpu_in_guestmode(v) ) { + if ( nestedhvm_vcpu_in_guestmode(v) ) + { struct vmcb_struct *n2vmcb = nv->nv_n2vmcx; if ( svm->ns_hostflags.fields.vintrmask ) @@ -1249,7 +1273,8 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v) if ( v->arch.hvm.hvm_io.io_req.state != STATE_IOREQ_NONE ) return hvm_intblk_shadow; - if ( !nv->nv_vmexit_pending && n2vmcb->exitintinfo.bytes != 0 ) { + if ( !nv->nv_vmexit_pending && n2vmcb->exitintinfo.bytes != 0 ) + { /* Give the l2 guest a chance to finish the delivery of * the last injected interrupt or exception before we * emulate a VMEXIT (e.g. VMEXIT(INTR) ). @@ -1258,7 +1283,8 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v) } } - if ( nv->nv_vmexit_pending ) { + if ( nv->nv_vmexit_pending ) + { /* hvm_inject_hw_exception() must have run before. * exceptions have higher priority than interrupts. */ @@ -1276,7 +1302,8 @@ int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content) *msr_content = 0; - switch (msr) { + switch (msr) + { case MSR_K8_VM_CR: break; case MSR_K8_VM_HSAVE_PA: @@ -1298,24 +1325,28 @@ int nsvm_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content) int ret = 1; struct nestedsvm *svm = &vcpu_nestedsvm(v); - switch (msr) { + switch (msr) + { case MSR_K8_VM_CR: /* ignore write. handle all bits as read-only. */ break; case MSR_K8_VM_HSAVE_PA: - if (!nestedsvm_vmcb_isvalid(v, msr_content)) { + if ( !nestedsvm_vmcb_isvalid(v, msr_content) ) + { gdprintk(XENLOG_ERR, - "MSR_K8_VM_HSAVE_PA value invalid %#"PRIx64"\n", msr_content); + "MSR_K8_VM_HSAVE_PA value invalid %#" PRIx64 "\n", + msr_content); ret = -1; /* inject #GP */ break; } svm->ns_msr_hsavepa = msr_content; break; case MSR_AMD64_TSC_RATIO: - if ((msr_content & ~TSC_RATIO_RSVD_BITS) != msr_content) { + if ( (msr_content & ~TSC_RATIO_RSVD_BITS) != msr_content ) + { gdprintk(XENLOG_ERR, - "reserved bits set in MSR_AMD64_TSC_RATIO %#"PRIx64"\n", - msr_content); + "reserved bits set in MSR_AMD64_TSC_RATIO %#" PRIx64 "\n", + msr_content); ret = -1; /* inject #GP */ break; } @@ -1330,9 +1361,8 @@ int nsvm_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content) } /* VMEXIT emulation */ -void -nestedsvm_vmexit_defer(struct vcpu *v, - uint64_t exitcode, uint64_t exitinfo1, uint64_t exitinfo2) +void nestedsvm_vmexit_defer(struct vcpu *v, uint64_t exitcode, + uint64_t exitinfo1, uint64_t exitinfo2) { struct nestedsvm *svm = &vcpu_nestedsvm(v); struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb; @@ -1348,18 +1378,19 @@ nestedsvm_vmexit_defer(struct vcpu *v, vcpu_nestedhvm(v).nv_vmexit_pending = 1; } -enum nestedhvm_vmexits -nestedsvm_check_intercepts(struct vcpu *v, struct cpu_user_regs *regs, - uint64_t exitcode) +enum nestedhvm_vmexits nestedsvm_check_intercepts(struct vcpu *v, + struct cpu_user_regs *regs, + uint64_t exitcode) { bool_t is_intercepted; ASSERT(vcpu_nestedhvm(v).nv_vmexit_pending == 0); is_intercepted = nsvm_vmcb_guest_intercepts_exitcode(v, regs, exitcode); - switch (exitcode) { + switch (exitcode) + { case VMEXIT_INVALID: - if (is_intercepted) + if ( is_intercepted ) return NESTEDHVM_VMEXIT_INJECT; return NESTEDHVM_VMEXIT_HOST; @@ -1373,14 +1404,16 @@ nestedsvm_check_intercepts(struct vcpu *v, struct cpu_user_regs *regs, return NESTEDHVM_VMEXIT_HOST; case VMEXIT_NPF: - if (nestedhvm_paging_mode_hap(v)) { - if (!is_intercepted) + if ( nestedhvm_paging_mode_hap(v) ) + { + if ( !is_intercepted ) return NESTEDHVM_VMEXIT_FATALERROR; /* host nested paging + guest nested paging */ return NESTEDHVM_VMEXIT_HOST; } - if (paging_mode_hap(v->domain)) { - if (is_intercepted) + if ( paging_mode_hap(v->domain) ) + { + if ( is_intercepted ) return NESTEDHVM_VMEXIT_FATALERROR; /* host nested paging + guest shadow paging */ return NESTEDHVM_VMEXIT_HOST; @@ -1390,15 +1423,17 @@ nestedsvm_check_intercepts(struct vcpu *v, struct cpu_user_regs *regs, BUG(); return NESTEDHVM_VMEXIT_FATALERROR; case VMEXIT_EXCEPTION_PF: - if (nestedhvm_paging_mode_hap(v)) { + if ( nestedhvm_paging_mode_hap(v) ) + { /* host nested paging + guest nested paging */ - if (!is_intercepted) + if ( !is_intercepted ) /* l1 guest intercepts #PF unnecessarily */ return NESTEDHVM_VMEXIT_HOST; /* l2 guest intercepts #PF unnecessarily */ return NESTEDHVM_VMEXIT_INJECT; } - if (!paging_mode_hap(v->domain)) { + if ( !paging_mode_hap(v->domain) ) + { /* host shadow paging + guest shadow paging */ return NESTEDHVM_VMEXIT_HOST; } @@ -1408,18 +1443,18 @@ nestedsvm_check_intercepts(struct vcpu *v, struct cpu_user_regs *regs, /* Always let the guest handle VMMCALL/VMCALL */ return NESTEDHVM_VMEXIT_INJECT; default: - gprintk(XENLOG_ERR, "Unexpected nested vmexit: reason %#"PRIx64"\n", + gprintk(XENLOG_ERR, "Unexpected nested vmexit: reason %#" PRIx64 "\n", exitcode); break; } - if (is_intercepted) + if ( is_intercepted ) return NESTEDHVM_VMEXIT_INJECT; return NESTEDHVM_VMEXIT_HOST; } -enum nestedhvm_vmexits -nestedsvm_vmexit_n2n1(struct vcpu *v, struct cpu_user_regs *regs) +enum nestedhvm_vmexits nestedsvm_vmexit_n2n1(struct vcpu *v, + struct cpu_user_regs *regs) { int rc; enum nestedhvm_vmexits ret = NESTEDHVM_VMEXIT_DONE; @@ -1428,11 +1463,11 @@ nestedsvm_vmexit_n2n1(struct vcpu *v, struct cpu_user_regs *regs) ASSERT(nestedhvm_vcpu_in_guestmode(v)); rc = nsvm_vmcb_prepare4vmexit(v, regs); - if (rc) + if ( rc ) ret = NESTEDHVM_VMEXIT_ERROR; rc = nsvm_vcpu_hostrestore(v, regs); - if (rc) + if ( rc ) ret = NESTEDHVM_VMEXIT_FATALERROR; nestedhvm_vcpu_exit_guestmode(v); @@ -1442,9 +1477,9 @@ nestedsvm_vmexit_n2n1(struct vcpu *v, struct cpu_user_regs *regs) /* The exitcode is in native SVM/VMX format. The forced exitcode * is in generic format. */ -static enum nestedhvm_vmexits -nestedsvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs, - uint64_t exitcode) +static enum nestedhvm_vmexits nestedsvm_vcpu_vmexit(struct vcpu *v, + struct cpu_user_regs *regs, + uint64_t exitcode) { int rc; struct nestedvcpu *nv = &vcpu_nestedhvm(v); @@ -1456,11 +1491,13 @@ nestedsvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs, /* On special intercepts the host has to handle * the vcpu is still in guest mode here. */ - if (nestedhvm_vcpu_in_guestmode(v)) { + if ( nestedhvm_vcpu_in_guestmode(v) ) + { enum nestedhvm_vmexits ret; ret = nestedsvm_vmexit_n2n1(v, regs); - switch (ret) { + switch (ret) + { case NESTEDHVM_VMEXIT_FATALERROR: gdprintk(XENLOG_ERR, "VMEXIT: fatal error\n"); return ret; @@ -1486,12 +1523,12 @@ nestedsvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs, rc = nsvm_vcpu_vmexit_inject(v, regs, exitcode); /* If l1 guest uses shadow paging, update the paging mode. */ - if (!nestedhvm_paging_mode_hap(v)) + if ( !nestedhvm_paging_mode_hap(v) ) paging_update_paging_modes(v); nv->nv_vmswitch_in_progress = 0; - if (rc) + if ( rc ) return NESTEDHVM_VMEXIT_FATALERROR; return NESTEDHVM_VMEXIT_DONE; @@ -1504,7 +1541,7 @@ void nsvm_vcpu_switch(struct cpu_user_regs *regs) struct nestedvcpu *nv; struct nestedsvm *svm; - if (!nestedhvm_enabled(v->domain)) + if ( !nestedhvm_enabled(v->domain) ) return; nv = &vcpu_nestedhvm(v); @@ -1515,39 +1552,39 @@ void nsvm_vcpu_switch(struct cpu_user_regs *regs) ASSERT(nv->nv_n1vmcx_pa != INVALID_PADDR); ASSERT(nv->nv_n2vmcx_pa != INVALID_PADDR); - if (nv->nv_vmexit_pending) { - vmexit: + if ( nv->nv_vmexit_pending ) + { + vmexit: nestedsvm_vcpu_vmexit(v, regs, svm->ns_vmexit.exitcode); nv->nv_vmexit_pending = 0; nv->nv_vmentry_pending = 0; return; } - if (nv->nv_vmentry_pending) { + if ( nv->nv_vmentry_pending ) + { int ret; ASSERT(!nv->nv_vmexit_pending); ret = nsvm_vcpu_vmrun(v, regs); - if (ret) + if ( ret ) goto vmexit; ASSERT(nestedhvm_vcpu_in_guestmode(v)); nv->nv_vmentry_pending = 0; } - if (nestedhvm_vcpu_in_guestmode(v) - && nestedhvm_paging_mode_hap(v)) + if ( nestedhvm_vcpu_in_guestmode(v) && nestedhvm_paging_mode_hap(v) ) { /* In case left the l2 guest due to a physical interrupt (e.g. IPI) * that is not for the l1 guest then we continue running the l2 guest * but check if the nestedp2m is still valid. */ - if (nv->nv_p2m == NULL) + if ( nv->nv_p2m == NULL ) nestedsvm_vmcb_set_nestedp2m(v, nv->nv_vvmcx, nv->nv_n2vmcx); } } /* Interrupts, Virtual GIF */ -int -nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack) +int nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack) { int ret; enum hvm_intblk intr; @@ -1559,7 +1596,8 @@ nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack) if ( intr != hvm_intblk_none ) return NSVM_INTR_MASKED; - switch (intack.source) { + switch (intack.source) + { case hvm_intsrc_pic: case hvm_intsrc_lapic: case hvm_intsrc_vector: @@ -1580,9 +1618,10 @@ nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack) BUG(); } - ret = nsvm_vmcb_guest_intercepts_exitcode(v, - guest_cpu_user_regs(), exitcode); - if (ret) { + ret = + nsvm_vmcb_guest_intercepts_exitcode(v, guest_cpu_user_regs(), exitcode); + if ( ret ) + { nestedsvm_vmexit_defer(v, exitcode, intack.source, exitinfo2); return NSVM_INTR_FORCEVMEXIT; } @@ -1590,8 +1629,7 @@ nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack) return NSVM_INTR_NOTINTERCEPTED; } -bool_t -nestedsvm_gif_isset(struct vcpu *v) +bool_t nestedsvm_gif_isset(struct vcpu *v) { struct nestedsvm *svm = &vcpu_nestedsvm(v); struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb; @@ -1671,26 +1709,24 @@ void svm_nested_features_on_efer_update(struct vcpu *v) if ( v->arch.hvm.guest_efer & EFER_SVME ) { if ( !vmcb->virt_ext.fields.vloadsave_enable && - paging_mode_hap(v->domain) && - cpu_has_svm_vloadsave ) + paging_mode_hap(v->domain) && cpu_has_svm_vloadsave ) { vmcb->virt_ext.fields.vloadsave_enable = 1; - general2_intercepts = vmcb_get_general2_intercepts(vmcb); - general2_intercepts &= ~(GENERAL2_INTERCEPT_VMLOAD | - GENERAL2_INTERCEPT_VMSAVE); + general2_intercepts = vmcb_get_general2_intercepts(vmcb); + general2_intercepts &= + ~(GENERAL2_INTERCEPT_VMLOAD | GENERAL2_INTERCEPT_VMSAVE); vmcb_set_general2_intercepts(vmcb, general2_intercepts); } - if ( !vmcb->_vintr.fields.vgif_enable && - cpu_has_svm_vgif ) + if ( !vmcb->_vintr.fields.vgif_enable && cpu_has_svm_vgif ) { vintr = vmcb_get_vintr(vmcb); vintr.fields.vgif = svm->ns_gif; vintr.fields.vgif_enable = 1; vmcb_set_vintr(vmcb, vintr); - general2_intercepts = vmcb_get_general2_intercepts(vmcb); - general2_intercepts &= ~(GENERAL2_INTERCEPT_STGI | - GENERAL2_INTERCEPT_CLGI); + general2_intercepts = vmcb_get_general2_intercepts(vmcb); + general2_intercepts &= + ~(GENERAL2_INTERCEPT_STGI | GENERAL2_INTERCEPT_CLGI); vmcb_set_general2_intercepts(vmcb, general2_intercepts); } } @@ -1699,9 +1735,9 @@ void svm_nested_features_on_efer_update(struct vcpu *v) if ( vmcb->virt_ext.fields.vloadsave_enable ) { vmcb->virt_ext.fields.vloadsave_enable = 0; - general2_intercepts = vmcb_get_general2_intercepts(vmcb); - general2_intercepts |= (GENERAL2_INTERCEPT_VMLOAD | - GENERAL2_INTERCEPT_VMSAVE); + general2_intercepts = vmcb_get_general2_intercepts(vmcb); + general2_intercepts |= + (GENERAL2_INTERCEPT_VMLOAD | GENERAL2_INTERCEPT_VMSAVE); vmcb_set_general2_intercepts(vmcb, general2_intercepts); } @@ -1711,9 +1747,9 @@ void svm_nested_features_on_efer_update(struct vcpu *v) svm->ns_gif = vintr.fields.vgif; vintr.fields.vgif_enable = 0; vmcb_set_vintr(vmcb, vintr); - general2_intercepts = vmcb_get_general2_intercepts(vmcb); - general2_intercepts |= (GENERAL2_INTERCEPT_STGI | - GENERAL2_INTERCEPT_CLGI); + general2_intercepts = vmcb_get_general2_intercepts(vmcb); + general2_intercepts |= + (GENERAL2_INTERCEPT_STGI | GENERAL2_INTERCEPT_CLGI); vmcb_set_general2_intercepts(vmcb, general2_intercepts); } } diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 23d72e81e3..d6adb282fc 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -127,8 +127,7 @@ static void svm_cpu_down(void) write_efer(read_efer() & ~EFER_SVME); } -unsigned long * -svm_msrbit(unsigned long *msr_bitmap, uint32_t msr) +unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr) { unsigned long *msr_bit = NULL; @@ -155,9 +154,9 @@ void svm_intercept_msr(struct vcpu *v, uint32_t msr, int flags) msr &= 0x1fff; if ( flags & MSR_INTERCEPT_READ ) - __set_bit(msr * 2, msr_bit); + __set_bit(msr * 2, msr_bit); else if ( !monitored_msr(d, msr) ) - __clear_bit(msr * 2, msr_bit); + __clear_bit(msr * 2, msr_bit); if ( flags & MSR_INTERCEPT_WRITE ) __set_bit(msr * 2 + 1, msr_bit); @@ -169,7 +168,7 @@ static void svm_enable_msr_interception(struct domain *d, uint32_t msr) { struct vcpu *v; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) svm_intercept_msr(v, msr, MSR_INTERCEPT_WRITE); } @@ -177,7 +176,7 @@ static void svm_set_icebp_interception(struct domain *d, bool enable) { const struct vcpu *v; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb; uint32_t intercepts = vmcb_get_general2_intercepts(vmcb); @@ -220,8 +219,8 @@ static void svm_save_dr(struct vcpu *v) v->arch.dr[1] = read_debugreg(1); v->arch.dr[2] = read_debugreg(2); v->arch.dr[3] = read_debugreg(3); - v->arch.dr6 = vmcb_get_dr6(vmcb); - v->arch.dr7 = vmcb_get_dr7(vmcb); + v->arch.dr6 = vmcb_get_dr6(vmcb); + v->arch.dr7 = vmcb_get_dr7(vmcb); } static void __restore_debug_registers(struct vmcb_struct *vmcb, struct vcpu *v) @@ -299,16 +298,16 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c) if ( (c->pending_type == 1) || (c->pending_type > 4) || (c->pending_reserved != 0) ) { - dprintk(XENLOG_ERR, "%pv: Invalid pending event %#"PRIx32"\n", - v, c->pending_event); + dprintk(XENLOG_ERR, "%pv: Invalid pending event %#" PRIx32 "\n", v, + c->pending_event); return -EINVAL; } if ( c->pending_error_valid && c->error_code != (uint16_t)c->error_code ) { - dprintk(XENLOG_ERR, "%pv: Invalid error code %#"PRIx32"\n", - v, c->error_code); + dprintk(XENLOG_ERR, "%pv: Invalid error code %#" PRIx32 "\n", v, + c->error_code); return -EINVAL; } } @@ -317,11 +316,11 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c) { if ( c->cr0 & X86_CR0_PG ) { - page = get_page_from_gfn(v->domain, c->cr3 >> PAGE_SHIFT, - NULL, P2M_ALLOC); + page = get_page_from_gfn(v->domain, c->cr3 >> PAGE_SHIFT, NULL, + P2M_ALLOC); if ( !page ) { - gdprintk(XENLOG_ERR, "Invalid CR3 value=%#"PRIx64"\n", + gdprintk(XENLOG_ERR, "Invalid CR3 value=%#" PRIx64 "\n", c->cr3); return -EINVAL; } @@ -344,7 +343,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c) vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = c->sysenter_cs; vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = c->sysenter_esp; vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = c->sysenter_eip; - + if ( paging_mode_hap(v->domain) ) { vmcb_set_np_enable(vmcb, 1); @@ -355,7 +354,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c) if ( c->pending_valid && hvm_event_needs_reinjection(c->pending_type, c->pending_vector) ) { - gdprintk(XENLOG_INFO, "Re-injecting %#"PRIx32", %#"PRIx32"\n", + gdprintk(XENLOG_INFO, "Re-injecting %#" PRIx32 ", %#" PRIx32 "\n", c->pending_event, c->error_code); vmcb->eventinj.bytes = c->pending_event; vmcb->eventinj.fields.errorcode = c->error_code; @@ -369,28 +368,26 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c) return 0; } - static void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) { struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb; - data->shadow_gs = vmcb->kerngsbase; - data->msr_lstar = vmcb->lstar; - data->msr_star = vmcb->star; - data->msr_cstar = vmcb->cstar; + data->shadow_gs = vmcb->kerngsbase; + data->msr_lstar = vmcb->lstar; + data->msr_star = vmcb->star; + data->msr_cstar = vmcb->cstar; data->msr_syscall_mask = vmcb->sfmask; } - static void svm_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) { struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb; vmcb->kerngsbase = data->shadow_gs; - vmcb->lstar = data->msr_lstar; - vmcb->star = data->msr_star; - vmcb->cstar = data->msr_cstar; - vmcb->sfmask = data->msr_syscall_mask; + vmcb->lstar = data->msr_lstar; + vmcb->star = data->msr_star; + vmcb->cstar = data->msr_cstar; + vmcb->sfmask = data->msr_syscall_mask; v->arch.hvm.guest_efer = data->msr_efer; svm_update_guest_efer(v); } @@ -404,7 +401,8 @@ static void svm_save_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) static int svm_load_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) { svm_load_cpu_state(v, ctxt); - if (svm_vmcb_restore(v, ctxt)) { + if ( svm_vmcb_restore(v, ctxt) ) + { gdprintk(XENLOG_ERR, "svm_vmcb restore failed!\n"); domain_crash(v->domain); return -EINVAL; @@ -418,9 +416,9 @@ static void svm_fpu_enter(struct vcpu *v) struct vmcb_struct *n1vmcb = vcpu_nestedhvm(v).nv_n1vmcx; vcpu_restore_fpu_lazy(v); - vmcb_set_exception_intercepts( - n1vmcb, - vmcb_get_exception_intercepts(n1vmcb) & ~(1U << TRAP_no_device)); + vmcb_set_exception_intercepts(n1vmcb, + vmcb_get_exception_intercepts(n1vmcb) & + ~(1U << TRAP_no_device)); } static void svm_fpu_leave(struct vcpu *v) @@ -431,16 +429,16 @@ static void svm_fpu_leave(struct vcpu *v) ASSERT(read_cr0() & X86_CR0_TS); /* - * If the guest does not have TS enabled then we must cause and handle an - * exception on first use of the FPU. If the guest *does* have TS enabled - * then this is not necessary: no FPU activity can occur until the guest + * If the guest does not have TS enabled then we must cause and handle an + * exception on first use of the FPU. If the guest *does* have TS enabled + * then this is not necessary: no FPU activity can occur until the guest * clears CR0.TS, and we will initialise the FPU when that happens. */ if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) ) { - vmcb_set_exception_intercepts( - n1vmcb, - vmcb_get_exception_intercepts(n1vmcb) | (1U << TRAP_no_device)); + vmcb_set_exception_intercepts(n1vmcb, + vmcb_get_exception_intercepts(n1vmcb) | + (1U << TRAP_no_device)); vmcb_set_cr0(n1vmcb, vmcb_get_cr0(n1vmcb) | X86_CR0_TS); } } @@ -465,7 +463,7 @@ static void svm_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow) u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); vmcb->interrupt_shadow = - !!(intr_shadow & (HVM_INTR_SHADOW_MOV_SS|HVM_INTR_SHADOW_STI)); + !!(intr_shadow & (HVM_INTR_SHADOW_MOV_SS | HVM_INTR_SHADOW_STI)); general1_intercepts &= ~GENERAL1_INTERCEPT_IRET; if ( intr_shadow & HVM_INTR_SHADOW_NMI ) @@ -491,9 +489,10 @@ void svm_update_guest_cr(struct vcpu *v, unsigned int cr, unsigned int flags) struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb; uint64_t value; - switch ( cr ) + switch (cr) + { + case 0: { - case 0: { unsigned long hw_cr0_mask = 0; if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) ) @@ -514,7 +513,8 @@ void svm_update_guest_cr(struct vcpu *v, unsigned int cr, unsigned int flags) /* Trap CR3 updates if CR3 memory events are enabled. */ if ( v->domain->arch.monitor.write_ctrlreg_enabled & monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3) ) - vmcb_set_cr_intercepts(vmcb, intercepts | CR_INTERCEPT_CR3_WRITE); + vmcb_set_cr_intercepts(vmcb, + intercepts | CR_INTERCEPT_CR3_WRITE); } value = v->arch.hvm.guest_cr[0] | hw_cr0_mask; @@ -536,9 +536,9 @@ void svm_update_guest_cr(struct vcpu *v, unsigned int cr, unsigned int flags) else if ( nestedhvm_vmswitch_in_progress(v) ) ; /* CR3 switches during VMRUN/VMEXIT do not flush the TLB. */ else if ( !(flags & HVM_UPDATE_GUEST_CR3_NOFLUSH) ) - hvm_asid_flush_vcpu_asid( - nestedhvm_vcpu_in_guestmode(v) - ? &vcpu_nestedhvm(v).nv_n2asid : &v->arch.hvm.n1asid); + hvm_asid_flush_vcpu_asid(nestedhvm_vcpu_in_guestmode(v) + ? &vcpu_nestedhvm(v).nv_n2asid + : &v->arch.hvm.n1asid); break; case 4: value = HVM_CR4_HOST_MASK; @@ -573,8 +573,7 @@ void svm_update_guest_cr(struct vcpu *v, unsigned int cr, unsigned int flags) static void svm_update_guest_efer(struct vcpu *v) { struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb; - unsigned long guest_efer = v->arch.hvm.guest_efer, - xen_efer = read_efer(); + unsigned long guest_efer = v->arch.hvm.guest_efer, xen_efer = read_efer(); if ( paging_mode_shadow(v->domain) ) { @@ -660,7 +659,7 @@ static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg, ASSERT((v == current) || !vcpu_runnable(v)); - switch ( seg ) + switch (seg) { case x86_seg_fs ... x86_seg_gs: svm_sync_vmcb(v, vmcb_in_sync); @@ -705,7 +704,7 @@ static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg, ASSERT((v == current) || !vcpu_runnable(v)); - switch ( seg ) + switch (seg) { case x86_seg_cs: case x86_seg_ds: @@ -733,7 +732,7 @@ static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg, return; } - switch ( seg ) + switch (seg) { case x86_seg_ss: vmcb_set_cpl(vmcb, reg->dpl); @@ -819,7 +818,7 @@ static uint64_t scale_tsc(uint64_t host_tsc, uint64_t ratio) */ mult = ratio >> 32; frac = ratio & ((1ULL << 32) - 1); - scaled_host_tsc = host_tsc * mult; + scaled_host_tsc = host_tsc * mult; scaled_host_tsc += (host_tsc >> 32) * frac; scaled_host_tsc += ((host_tsc & ((1ULL << 32) - 1)) * frac) >> 32; @@ -827,7 +826,7 @@ static uint64_t scale_tsc(uint64_t host_tsc, uint64_t ratio) } static uint64_t svm_get_tsc_offset(uint64_t host_tsc, uint64_t guest_tsc, - uint64_t ratio) + uint64_t ratio) { return guest_tsc - scale_tsc(host_tsc, ratio); } @@ -839,7 +838,8 @@ static void svm_set_tsc_offset(struct vcpu *v, u64 offset, u64 at_tsc) uint64_t n2_tsc_offset = 0; struct domain *d = v->domain; - if ( !nestedhvm_enabled(d) ) { + if ( !nestedhvm_enabled(d) ) + { vmcb_set_tsc_offset(vmcb, offset); return; } @@ -847,17 +847,18 @@ static void svm_set_tsc_offset(struct vcpu *v, u64 offset, u64 at_tsc) n1vmcb = vcpu_nestedhvm(v).nv_n1vmcx; n2vmcb = vcpu_nestedhvm(v).nv_n2vmcx; - if ( nestedhvm_vcpu_in_guestmode(v) ) { + if ( nestedhvm_vcpu_in_guestmode(v) ) + { struct nestedsvm *svm = &vcpu_nestedsvm(v); - n2_tsc_offset = vmcb_get_tsc_offset(n2vmcb) - - vmcb_get_tsc_offset(n1vmcb); - if ( svm->ns_tscratio != DEFAULT_TSC_RATIO ) { + n2_tsc_offset = + vmcb_get_tsc_offset(n2vmcb) - vmcb_get_tsc_offset(n1vmcb); + if ( svm->ns_tscratio != DEFAULT_TSC_RATIO ) + { uint64_t guest_tsc = hvm_get_guest_tsc_fixed(v, at_tsc); - n2_tsc_offset = svm_get_tsc_offset(guest_tsc, - guest_tsc + n2_tsc_offset, - svm->ns_tscratio); + n2_tsc_offset = svm_get_tsc_offset( + guest_tsc, guest_tsc + n2_tsc_offset, svm->ns_tscratio); } vmcb_set_tsc_offset(n1vmcb, offset); } @@ -888,10 +889,10 @@ static void svm_set_descriptor_access_exiting(struct vcpu *v, bool enable) { struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb; u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); - u32 mask = GENERAL1_INTERCEPT_IDTR_READ | GENERAL1_INTERCEPT_GDTR_READ - | GENERAL1_INTERCEPT_LDTR_READ | GENERAL1_INTERCEPT_TR_READ - | GENERAL1_INTERCEPT_IDTR_WRITE | GENERAL1_INTERCEPT_GDTR_WRITE - | GENERAL1_INTERCEPT_LDTR_WRITE | GENERAL1_INTERCEPT_TR_WRITE; + u32 mask = GENERAL1_INTERCEPT_IDTR_READ | GENERAL1_INTERCEPT_GDTR_READ | + GENERAL1_INTERCEPT_LDTR_READ | GENERAL1_INTERCEPT_TR_READ | + GENERAL1_INTERCEPT_IDTR_WRITE | GENERAL1_INTERCEPT_GDTR_WRITE | + GENERAL1_INTERCEPT_LDTR_WRITE | GENERAL1_INTERCEPT_TR_WRITE; if ( enable ) general1_intercepts |= mask; @@ -927,12 +928,12 @@ static void svm_init_hypercall_page(struct domain *d, void *hypercall_page) continue; p = (char *)(hypercall_page + (i * 32)); - *(u8 *)(p + 0) = 0xb8; /* mov imm32, %eax */ + *(u8 *)(p + 0) = 0xb8; /* mov imm32, %eax */ *(u32 *)(p + 1) = i; - *(u8 *)(p + 5) = 0x0f; /* vmmcall */ - *(u8 *)(p + 6) = 0x01; - *(u8 *)(p + 7) = 0xd9; - *(u8 *)(p + 8) = 0xc3; /* ret */ + *(u8 *)(p + 5) = 0x0f; /* vmmcall */ + *(u8 *)(p + 6) = 0x01; + *(u8 *)(p + 7) = 0xd9; + *(u8 *)(p + 8) = 0xc3; /* ret */ } /* Don't support HYPERVISOR_iret at the moment */ @@ -944,10 +945,8 @@ static void svm_lwp_interrupt(struct cpu_user_regs *regs) struct vcpu *curr = current; ack_APIC_irq(); - vlapic_set_irq( - vcpu_vlapic(curr), - (curr->arch.hvm.svm.guest_lwp_cfg >> 40) & 0xff, - 0); + vlapic_set_irq(vcpu_vlapic(curr), + (curr->arch.hvm.svm.guest_lwp_cfg >> 40) & 0xff, 0); } static inline void svm_lwp_save(struct vcpu *v) @@ -963,8 +962,8 @@ static inline void svm_lwp_save(struct vcpu *v) static inline void svm_lwp_load(struct vcpu *v) { /* Only LWP_CFG is reloaded. LWP_CBADDR will be reloaded via xrstor. */ - if ( v->arch.hvm.svm.guest_lwp_cfg ) - wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm.svm.cpu_lwp_cfg); + if ( v->arch.hvm.svm.guest_lwp_cfg ) + wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm.svm.cpu_lwp_cfg); } /* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */ @@ -976,7 +975,7 @@ static int svm_update_lwp_cfg(struct vcpu *v, uint64_t msr_content) if ( xsave_enabled(v) && cpu_has_lwp ) { msr_low = (uint32_t)msr_content; - + /* generate #GP if guest tries to turn on unsupported features. */ if ( msr_low & ~v->domain->arch.cpuid->extd.raw[0x1c].d ) return -1; @@ -987,15 +986,16 @@ static int svm_update_lwp_cfg(struct vcpu *v, uint64_t msr_content) if ( (msr_content & 0x80000000) && ((msr_content >> 40) & 0xff) ) { alloc_direct_apic_vector(&lwp_intr_vector, svm_lwp_interrupt); - v->arch.hvm.svm.cpu_lwp_cfg = (msr_content & 0xffff00ffffffffffULL) - | ((uint64_t)lwp_intr_vector << 40); + v->arch.hvm.svm.cpu_lwp_cfg = + (msr_content & 0xffff00ffffffffffULL) | + ((uint64_t)lwp_intr_vector << 40); } else { /* otherwise disable it */ v->arch.hvm.svm.cpu_lwp_cfg = msr_content & 0xffff00ff7fffffffULL; } - + wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm.svm.cpu_lwp_cfg); /* track nonalzy state if LWP_CFG is non-zero. */ @@ -1014,7 +1014,7 @@ static inline void svm_tsc_ratio_save(struct vcpu *v) static inline void svm_tsc_ratio_load(struct vcpu *v) { - if ( cpu_has_tsc_ratio && !v->domain->arch.vtsc ) + if ( cpu_has_tsc_ratio && !v->domain->arch.vtsc ) wrmsrl(MSR_AMD64_TSC_RATIO, hvm_tsc_scaling_ratio(v->domain)); } @@ -1054,7 +1054,7 @@ static void svm_ctxt_switch_to(struct vcpu *v) * DOM0 selectors are pointing to invalid GDT locations, and cause AMD * processors to shutdown. */ - asm volatile ("mov %0, %%ds; mov %0, %%es; mov %0, %%ss;" :: "r" (0)); + asm volatile("mov %0, %%ds; mov %0, %%es; mov %0, %%ss;" ::"r"(0)); /* * Cannot use ISTs for NMI/#MC/#DF while we are running with the guest TR. @@ -1086,7 +1086,7 @@ static void noreturn svm_do_resume(struct vcpu *v) vcpu_guestmode = 1; if ( !vcpu_guestmode && - unlikely(v->arch.hvm.debug_state_latch != debug_state) ) + unlikely(v->arch.hvm.debug_state_latch != debug_state) ) { uint32_t intercepts = vmcb_get_exception_intercepts(vmcb); @@ -1111,8 +1111,7 @@ static void noreturn svm_do_resume(struct vcpu *v) /* Reflect the vlapic's TPR in the hardware vtpr */ intr = vmcb_get_vintr(vmcb); - intr.fields.tpr = - (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4; + intr.fields.tpr = (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4; vmcb_set_vintr(vmcb, intr); } @@ -1131,7 +1130,7 @@ void svm_vmenter_helper(const struct cpu_user_regs *regs) if ( unlikely(tb_init_done) ) HVMTRACE_ND(VMENTRY, nestedhvm_vcpu_in_guestmode(curr) ? TRC_HVM_NESTEDFLAG : 0, - 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0); + 1 /*cycles*/, 0, 0, 0, 0, 0, 0, 0); svm_sync_vmcb(curr, vmcb_needs_vmsave); @@ -1195,7 +1194,7 @@ void svm_host_osvw_init() rdmsr_safe(MSR_AMD_OSVW_STATUS, status) ) len = status = 0; - if (len < osvw_length) + if ( len < osvw_length ) osvw_length = len; osvw_status |= status; @@ -1227,7 +1226,7 @@ static int svm_domain_initialise(struct domain *d) { static const struct arch_csw csw = { .from = svm_ctxt_switch_from, - .to = svm_ctxt_switch_to, + .to = svm_ctxt_switch_to, .tail = svm_do_resume, }; @@ -1253,8 +1252,7 @@ static int svm_vcpu_initialise(struct vcpu *v) if ( (rc = svm_create_vmcb(v)) != 0 ) { - dprintk(XENLOG_WARNING, - "Failed to create VMCB for vcpu %d: err=%d.\n", + dprintk(XENLOG_WARNING, "Failed to create VMCB for vcpu %d: err=%d.\n", v->vcpu_id, rc); return rc; } @@ -1287,19 +1285,22 @@ static void svm_emul_swint_injection(struct x86_event *event) struct segment_register cs, idtr; unsigned int idte_size, idte_offset; unsigned long idte_linear_addr; - struct { uint32_t a, b, c, d; } idte = {}; + struct + { + uint32_t a, b, c, d; + } idte = {}; bool lm = vmcb_get_efer(vmcb) & EFER_LMA; int rc; if ( !(vmcb_get_cr0(vmcb) & X86_CR0_PE) ) goto raise_exception; /* TODO: support real-mode injection? */ - idte_size = lm ? 16 : 8; + idte_size = lm ? 16 : 8; idte_offset = trap * idte_size; /* ICEBP sets the External Event bit despite being an instruction. */ ec = (trap << 3) | X86_XEC_IDT | - (type == X86_EVENTTYPE_PRI_SW_EXCEPTION ? X86_XEC_EXT : 0); + (type == X86_EVENTTYPE_PRI_SW_EXCEPTION ? X86_XEC_EXT : 0); /* * TODO: This does not cover the v8086 mode with CR4.VME case @@ -1318,8 +1319,8 @@ static void svm_emul_swint_injection(struct x86_event *event) hvm_get_segment_register(curr, x86_seg_cs, &cs); hvm_get_segment_register(curr, x86_seg_idtr, &idtr); if ( !hvm_virtual_to_linear_addr(x86_seg_idtr, &idtr, idte_offset, - idte_size, hvm_access_read, - &cs, &idte_linear_addr) ) + idte_size, hvm_access_read, &cs, + &idte_linear_addr) ) goto raise_exception; rc = hvm_copy_from_guest_linear(&idte, idte_linear_addr, idte_size, @@ -1337,7 +1338,7 @@ static void svm_emul_swint_injection(struct x86_event *event) } /* This must be an interrupt, trap, or task gate. */ - switch ( (idte.b >> 8) & 0x1f ) + switch ((idte.b >> 8) & 0x1f) { case SYS_DESC_irq_gate: case SYS_DESC_trap_gate: @@ -1376,7 +1377,7 @@ static void svm_emul_swint_injection(struct x86_event *event) */ return; - raise_exception: +raise_exception: event->vector = fault; event->type = X86_EVENTTYPE_HW_EXCEPTION; event->insn_len = 0; @@ -1403,7 +1404,7 @@ static void svm_inject_event(const struct x86_event *event) (!cpu_has_svm_nrips && (event->type >= X86_EVENTTYPE_SW_INTERRUPT)) ) svm_emul_swint_injection(&_event); - switch ( _event.vector | -(_event.type == X86_EVENTTYPE_SW_INTERRUPT) ) + switch (_event.vector | -(_event.type == X86_EVENTTYPE_SW_INTERRUPT)) { case TRAP_debug: if ( regs->eflags & X86_EFLAGS_TF ) @@ -1431,8 +1432,8 @@ static void svm_inject_event(const struct x86_event *event) if ( unlikely(eventinj.fields.v) && (eventinj.fields.type == X86_EVENTTYPE_HW_EXCEPTION) ) { - _event.vector = hvm_combine_hw_exceptions( - eventinj.fields.vector, _event.vector); + _event.vector = + hvm_combine_hw_exceptions(eventinj.fields.vector, _event.vector); if ( _event.vector == TRAP_double_fault ) _event.error_code = 0; } @@ -1452,7 +1453,7 @@ static void svm_inject_event(const struct x86_event *event) * semantics won't fault on injection. Position %rip/NextRIP suitably, * and restrict the event type to what hardware will tolerate. */ - switch ( _event.type ) + switch (_event.type) { case X86_EVENTTYPE_SW_INTERRUPT: /* int $n */ if ( cpu_has_svm_nrips ) @@ -1583,7 +1584,7 @@ static int svm_cpu_up_prepare(unsigned int cpu) return 0; - err: +err: svm_cpu_dead(cpu); return -ENOMEM; } @@ -1597,11 +1598,13 @@ static void svm_init_erratum_383(const struct cpuinfo_x86 *c) return; /* use safe methods to be compatible with nested virtualization */ - if (rdmsr_safe(MSR_AMD64_DC_CFG, msr_content) == 0 && - wrmsr_safe(MSR_AMD64_DC_CFG, msr_content | (1ULL << 47)) == 0) + if ( rdmsr_safe(MSR_AMD64_DC_CFG, msr_content) == 0 && + wrmsr_safe(MSR_AMD64_DC_CFG, msr_content | (1ULL << 47)) == 0 ) { amd_erratum383_found = 1; - } else { + } + else + { printk("Failed to enable erratum 383\n"); } } @@ -1672,7 +1675,7 @@ static int _svm_cpu_up(bool bsp) int rc; unsigned int cpu = smp_processor_id(); const struct cpuinfo_x86 *c = &cpu_data[cpu]; - + /* Check whether SVM feature is disabled in BIOS */ rdmsrl(MSR_K8_VM_CR, msr_content); if ( msr_content & K8_VMCR_SVME_DISABLE ) @@ -1708,7 +1711,7 @@ static int svm_cpu_up(void) return _svm_cpu_up(false); } -const struct hvm_function_table * __init start_svm(void) +const struct hvm_function_table *__init start_svm(void) { bool_t printed = 0; @@ -1722,8 +1725,9 @@ const struct hvm_function_table * __init start_svm(void) setup_vmcb_dump(); - svm_feature_flags = (current_cpu_data.extended_cpuid_level >= 0x8000000A ? - cpuid_edx(0x8000000A) : 0); + svm_feature_flags = (current_cpu_data.extended_cpuid_level >= 0x8000000A + ? cpuid_edx(0x8000000A) + : 0); printk("SVM: Supported advanced features:\n"); @@ -1734,7 +1738,12 @@ const struct hvm_function_table * __init start_svm(void) if ( cpu_has_tsc_ratio ) svm_function_table.tsc_scaling.ratio_frac_bits = 32; -#define P(p,s) if ( p ) { printk(" - %s\n", s); printed = 1; } +#define P(p, s) \ + if ( p ) \ + { \ + printk(" - %s\n", s); \ + printed = 1; \ + } P(cpu_has_svm_npt, "Nested Page Tables (NPT)"); P(cpu_has_svm_lbrv, "Last Branch Record (LBR) Virtualisation"); P(cpu_has_svm_nrips, "Next-RIP Saved on #VMEXIT"); @@ -1751,14 +1760,14 @@ const struct hvm_function_table * __init start_svm(void) printk(" - none\n"); svm_function_table.hap_supported = !!cpu_has_svm_npt; - svm_function_table.hap_capabilities = HVM_HAP_SUPERPAGE_2MB | - (cpu_has_page1gb ? HVM_HAP_SUPERPAGE_1GB : 0); + svm_function_table.hap_capabilities = + HVM_HAP_SUPERPAGE_2MB | (cpu_has_page1gb ? HVM_HAP_SUPERPAGE_1GB : 0); return &svm_function_table; } -static void svm_do_nested_pgfault(struct vcpu *v, - struct cpu_user_regs *regs, uint64_t pfec, paddr_t gpa) +static void svm_do_nested_pgfault(struct vcpu *v, struct cpu_user_regs *regs, + uint64_t pfec, paddr_t gpa) { int ret; unsigned long gfn = gpa >> PAGE_SHIFT; @@ -1789,7 +1798,8 @@ static void svm_do_nested_pgfault(struct vcpu *v, if ( tb_init_done ) { - struct { + struct + { uint64_t gpa; uint64_t mfn; uint32_t qualification; @@ -1801,11 +1811,12 @@ static void svm_do_nested_pgfault(struct vcpu *v, _d.qualification = 0; mfn = __get_gfn_type_access(p2m, gfn, &_d.p2mt, &p2ma, 0, NULL, 0); _d.mfn = mfn_x(mfn); - + __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d); } - switch (ret) { + switch (ret) + { case 0: break; case 1: @@ -1822,8 +1833,8 @@ static void svm_do_nested_pgfault(struct vcpu *v, /* Everything else is an error. */ mfn = __get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL, 0); gdprintk(XENLOG_ERR, - "SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n", - gpa, mfn_x(mfn), p2mt); + "SVM violation gpa %#" PRIpaddr ", mfn %#lx, type %i\n", gpa, + mfn_x(mfn), p2mt); domain_crash(v->domain); } @@ -1837,19 +1848,20 @@ static void svm_fpu_dirty_intercept(void) if ( vmcb != n1vmcb ) { - /* Check if l1 guest must make FPU ready for the l2 guest */ - if ( v->arch.hvm.guest_cr[0] & X86_CR0_TS ) - hvm_inject_hw_exception(TRAP_no_device, X86_EVENT_NO_EC); - else - vmcb_set_cr0(n1vmcb, vmcb_get_cr0(n1vmcb) & ~X86_CR0_TS); - return; + /* Check if l1 guest must make FPU ready for the l2 guest */ + if ( v->arch.hvm.guest_cr[0] & X86_CR0_TS ) + hvm_inject_hw_exception(TRAP_no_device, X86_EVENT_NO_EC); + else + vmcb_set_cr0(n1vmcb, vmcb_get_cr0(n1vmcb) & ~X86_CR0_TS); + return; } if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) ) vmcb_set_cr0(vmcb, vmcb_get_cr0(vmcb) & ~X86_CR0_TS); } -static int svm_vmexit_do_cpuid(struct cpu_user_regs *regs, unsigned int inst_len) +static int svm_vmexit_do_cpuid(struct cpu_user_regs *regs, + unsigned int inst_len) { struct vcpu *curr = current; struct cpuid_leaf res; @@ -1871,8 +1883,8 @@ static int svm_vmexit_do_cpuid(struct cpu_user_regs *regs, unsigned int inst_len return hvm_monitor_cpuid(inst_len, regs->eax, regs->ecx); } -static void svm_vmexit_do_cr_access( - struct vmcb_struct *vmcb, struct cpu_user_regs *regs) +static void svm_vmexit_do_cr_access(struct vmcb_struct *vmcb, + struct cpu_user_regs *regs) { int gp, cr, dir, rc; @@ -1902,7 +1914,7 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) const struct domain *d = v->domain; struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb; - switch ( msr ) + switch (msr) { /* * Sync not needed while the cross-vendor logic is in unilateral effect. @@ -1921,7 +1933,7 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) break; } - switch ( msr ) + switch (msr) { case MSR_IA32_SYSENTER_CS: *msr_content = v->arch.hvm.svm.guest_sysenter_cs; @@ -2059,11 +2071,11 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) goto gpf; } - HVM_DBG_LOG(DBG_LEVEL_MSR, "returns: ecx=%x, msr_value=%"PRIx64, - msr, *msr_content); + HVM_DBG_LOG(DBG_LEVEL_MSR, "returns: ecx=%x, msr_value=%" PRIx64, msr, + *msr_content); return X86EMUL_OKAY; - gpf: +gpf: return X86EMUL_EXCEPTION; } @@ -2074,7 +2086,7 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content) struct domain *d = v->domain; struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb; - switch ( msr ) + switch (msr) { case MSR_IA32_SYSENTER_CS: case MSR_IA32_SYSENTER_ESP: @@ -2090,7 +2102,7 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content) break; } - switch ( msr ) + switch (msr) { case MSR_IA32_SYSENTER_ESP: case MSR_IA32_SYSENTER_EIP: @@ -2102,14 +2114,16 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content) if ( !is_canonical_address(msr_content) ) goto gpf; - switch ( msr ) + switch (msr) { case MSR_IA32_SYSENTER_ESP: - vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = msr_content; + vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = + msr_content; break; case MSR_IA32_SYSENTER_EIP: - vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = msr_content; + vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = + msr_content; break; case MSR_LSTAR: @@ -2235,7 +2249,7 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content) return result; - gpf: +gpf: return X86EMUL_EXCEPTION; } @@ -2243,8 +2257,8 @@ static void svm_do_msr_access(struct cpu_user_regs *regs) { struct vcpu *curr = current; bool rdmsr = curr->arch.hvm.svm.vmcb->exitinfo1 == 0; - int rc, inst_len = svm_get_insn_len(curr, rdmsr ? INSTR_RDMSR - : INSTR_WRMSR); + int rc, + inst_len = svm_get_insn_len(curr, rdmsr ? INSTR_RDMSR : INSTR_WRMSR); if ( inst_len == 0 ) return; @@ -2318,9 +2332,8 @@ static void svm_vmexit_do_pause(struct cpu_user_regs *regs) do_sched_op(SCHEDOP_yield, guest_handle_from_ptr(NULL, void)); } -static void -svm_vmexit_do_vmrun(struct cpu_user_regs *regs, - struct vcpu *v, uint64_t vmcbaddr) +static void svm_vmexit_do_vmrun(struct cpu_user_regs *regs, struct vcpu *v, + uint64_t vmcbaddr) { if ( !nsvm_efer_svm_enabled(v) ) { @@ -2339,8 +2352,7 @@ svm_vmexit_do_vmrun(struct cpu_user_regs *regs, return; } -static struct page_info * -nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr) +static struct page_info *nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr) { p2m_type_t p2mt; struct page_info *page; @@ -2350,25 +2362,23 @@ nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr) return NULL; /* Need to translate L1-GPA to MPA */ - page = get_page_from_gfn(v->domain, - nv->nv_vvmcxaddr >> PAGE_SHIFT, - &p2mt, P2M_ALLOC | P2M_UNSHARE); + page = get_page_from_gfn(v->domain, nv->nv_vvmcxaddr >> PAGE_SHIFT, &p2mt, + P2M_ALLOC | P2M_UNSHARE); if ( !page ) return NULL; if ( !p2m_is_ram(p2mt) || p2m_is_readonly(p2mt) ) { put_page(page); - return NULL; + return NULL; } - return page; + return page; } -static void -svm_vmexit_do_vmload(struct vmcb_struct *vmcb, - struct cpu_user_regs *regs, - struct vcpu *v, uint64_t vmcbaddr) +static void svm_vmexit_do_vmload(struct vmcb_struct *vmcb, + struct cpu_user_regs *regs, struct vcpu *v, + uint64_t vmcbaddr) { unsigned int inst_len; struct page_info *page; @@ -2376,7 +2386,7 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb, if ( (inst_len = svm_get_insn_len(v, INSTR_VMLOAD)) == 0 ) return; - if ( !nsvm_efer_svm_enabled(v) ) + if ( !nsvm_efer_svm_enabled(v) ) { hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC); return; @@ -2385,8 +2395,7 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb, page = nsvm_get_nvmcb_page(v, vmcbaddr); if ( !page ) { - gdprintk(XENLOG_ERR, - "VMLOAD: mapping failed, injecting #GP\n"); + gdprintk(XENLOG_ERR, "VMLOAD: mapping failed, injecting #GP\n"); hvm_inject_hw_exception(TRAP_gp_fault, 0); return; } @@ -2400,10 +2409,9 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb, __update_guest_eip(regs, inst_len); } -static void -svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, - struct cpu_user_regs *regs, - struct vcpu *v, uint64_t vmcbaddr) +static void svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, + struct cpu_user_regs *regs, struct vcpu *v, + uint64_t vmcbaddr) { unsigned int inst_len; struct page_info *page; @@ -2411,7 +2419,7 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, if ( (inst_len = svm_get_insn_len(v, INSTR_VMSAVE)) == 0 ) return; - if ( !nsvm_efer_svm_enabled(v) ) + if ( !nsvm_efer_svm_enabled(v) ) { hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC); return; @@ -2420,8 +2428,7 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, page = nsvm_get_nvmcb_page(v, vmcbaddr); if ( !page ) { - gdprintk(XENLOG_ERR, - "VMSAVE: mapping vmcb failed, injecting #GP\n"); + gdprintk(XENLOG_ERR, "VMSAVE: mapping vmcb failed, injecting #GP\n"); hvm_inject_hw_exception(TRAP_gp_fault, 0); return; } @@ -2446,11 +2453,11 @@ static int svm_is_erratum_383(struct cpu_user_regs *regs) if ( msr_content != 0xb600000000010015ULL ) return 0; - + /* Clear MCi_STATUS registers */ - for (i = 0; i < nr_mce_banks; i++) + for ( i = 0; i < nr_mce_banks; i++ ) wrmsrl(MSR_IA32_MCx_STATUS(i), 0ULL); - + rdmsrl(MSR_IA32_MCG_STATUS, msr_content); wrmsrl(MSR_IA32_MCG_STATUS, msr_content & ~(1ULL << 2)); @@ -2460,8 +2467,7 @@ static int svm_is_erratum_383(struct cpu_user_regs *regs) return 1; } -static void svm_vmexit_mce_intercept( - struct vcpu *v, struct cpu_user_regs *regs) +static void svm_vmexit_mce_intercept(struct vcpu *v, struct cpu_user_regs *regs) { if ( svm_is_erratum_383(regs) ) { @@ -2479,8 +2485,8 @@ static void svm_wbinvd_intercept(void) static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs, bool invld) { - unsigned int inst_len = svm_get_insn_len(current, invld ? INSTR_INVD - : INSTR_WBINVD); + unsigned int inst_len = + svm_get_insn_len(current, invld ? INSTR_INVD : INSTR_WBINVD); if ( inst_len == 0 ) return; @@ -2490,13 +2496,11 @@ static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs, __update_guest_eip(regs, inst_len); } -static void svm_invlpga_intercept( - struct vcpu *v, unsigned long linear, uint32_t asid) +static void svm_invlpga_intercept(struct vcpu *v, unsigned long linear, + uint32_t asid) { - svm_invlpga(linear, - (asid == 0) - ? v->arch.hvm.n1asid.asid - : vcpu_nestedhvm(v).nv_n2asid.asid); + svm_invlpga(linear, (asid == 0) ? v->arch.hvm.n1asid.asid + : vcpu_nestedhvm(v).nv_n2asid.asid); } static void svm_invlpg_intercept(unsigned long linear) @@ -2511,8 +2515,7 @@ static bool is_invlpg(const struct x86_emulate_state *state, unsigned int ext; return ctxt->opcode == X86EMUL_OPC(0x0f, 0x01) && - x86_insn_modrm(state, NULL, &ext) != 3 && - (ext & 7) == 7; + x86_insn_modrm(state, NULL, &ext) != 3 && (ext & 7) == 7; } static void svm_invlpg(struct vcpu *v, unsigned long linear) @@ -2535,45 +2538,45 @@ static bool svm_get_pending_event(struct vcpu *v, struct x86_event *info) } static struct hvm_function_table __initdata svm_function_table = { - .name = "SVM", - .cpu_up_prepare = svm_cpu_up_prepare, - .cpu_dead = svm_cpu_dead, - .cpu_up = svm_cpu_up, - .cpu_down = svm_cpu_down, - .domain_initialise = svm_domain_initialise, - .domain_destroy = svm_domain_destroy, - .vcpu_initialise = svm_vcpu_initialise, - .vcpu_destroy = svm_vcpu_destroy, - .save_cpu_ctxt = svm_save_vmcb_ctxt, - .load_cpu_ctxt = svm_load_vmcb_ctxt, + .name = "SVM", + .cpu_up_prepare = svm_cpu_up_prepare, + .cpu_dead = svm_cpu_dead, + .cpu_up = svm_cpu_up, + .cpu_down = svm_cpu_down, + .domain_initialise = svm_domain_initialise, + .domain_destroy = svm_domain_destroy, + .vcpu_initialise = svm_vcpu_initialise, + .vcpu_destroy = svm_vcpu_destroy, + .save_cpu_ctxt = svm_save_vmcb_ctxt, + .load_cpu_ctxt = svm_load_vmcb_ctxt, .get_interrupt_shadow = svm_get_interrupt_shadow, .set_interrupt_shadow = svm_set_interrupt_shadow, - .guest_x86_mode = svm_guest_x86_mode, - .get_cpl = svm_get_cpl, + .guest_x86_mode = svm_guest_x86_mode, + .get_cpl = svm_get_cpl, .get_segment_register = svm_get_segment_register, .set_segment_register = svm_set_segment_register, - .get_shadow_gs_base = svm_get_shadow_gs_base, - .update_guest_cr = svm_update_guest_cr, - .update_guest_efer = svm_update_guest_efer, + .get_shadow_gs_base = svm_get_shadow_gs_base, + .update_guest_cr = svm_update_guest_cr, + .update_guest_efer = svm_update_guest_efer, .cpuid_policy_changed = svm_cpuid_policy_changed, - .fpu_leave = svm_fpu_leave, - .set_guest_pat = svm_set_guest_pat, - .get_guest_pat = svm_get_guest_pat, - .set_tsc_offset = svm_set_tsc_offset, - .inject_event = svm_inject_event, - .init_hypercall_page = svm_init_hypercall_page, - .event_pending = svm_event_pending, - .get_pending_event = svm_get_pending_event, - .invlpg = svm_invlpg, - .wbinvd_intercept = svm_wbinvd_intercept, - .fpu_dirty_intercept = svm_fpu_dirty_intercept, - .msr_read_intercept = svm_msr_read_intercept, - .msr_write_intercept = svm_msr_write_intercept, + .fpu_leave = svm_fpu_leave, + .set_guest_pat = svm_set_guest_pat, + .get_guest_pat = svm_get_guest_pat, + .set_tsc_offset = svm_set_tsc_offset, + .inject_event = svm_inject_event, + .init_hypercall_page = svm_init_hypercall_page, + .event_pending = svm_event_pending, + .get_pending_event = svm_get_pending_event, + .invlpg = svm_invlpg, + .wbinvd_intercept = svm_wbinvd_intercept, + .fpu_dirty_intercept = svm_fpu_dirty_intercept, + .msr_read_intercept = svm_msr_read_intercept, + .msr_write_intercept = svm_msr_write_intercept, .enable_msr_interception = svm_enable_msr_interception, .set_icebp_interception = svm_set_icebp_interception, - .set_rdtsc_exiting = svm_set_rdtsc_exiting, + .set_rdtsc_exiting = svm_set_rdtsc_exiting, .set_descriptor_access_exiting = svm_set_descriptor_access_exiting, - .get_insn_bytes = svm_get_insn_bytes, + .get_insn_bytes = svm_get_insn_bytes, .nhvm_vcpu_initialise = nsvm_vcpu_initialise, .nhvm_vcpu_destroy = nsvm_vcpu_destroy, @@ -2585,9 +2588,10 @@ static struct hvm_function_table __initdata svm_function_table = { .nhvm_intr_blocked = nsvm_intr_blocked, .nhvm_hap_walk_L1_p2m = nsvm_hap_walk_L1_p2m, - .tsc_scaling = { - .max_ratio = ~TSC_RATIO_RSVD_BITS, - }, + .tsc_scaling = + { + .max_ratio = ~TSC_RATIO_RSVD_BITS, + }, }; void svm_vmexit_handler(struct cpu_user_regs *regs) @@ -2625,22 +2629,22 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) { intr = vmcb_get_vintr(vmcb); vlapic_set_reg(vlapic, APIC_TASKPRI, - ((intr.fields.tpr & 0x0F) << 4) | - (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0x0F)); + ((intr.fields.tpr & 0x0F) << 4) | + (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0x0F)); } exit_reason = vmcb->exitcode; if ( hvm_long_mode_active(v) ) HVMTRACE_ND(VMEXIT64, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0, - 1/*cycles*/, 3, exit_reason, - regs->eip, regs->rip >> 32, 0, 0, 0); + 1 /*cycles*/, 3, exit_reason, regs->eip, regs->rip >> 32, 0, + 0, 0); else HVMTRACE_ND(VMEXIT, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0, - 1/*cycles*/, 2, exit_reason, - regs->eip, 0, 0, 0, 0); + 1 /*cycles*/, 2, exit_reason, regs->eip, 0, 0, 0, 0); - if ( vcpu_guestmode ) { + if ( vcpu_guestmode ) + { enum nestedhvm_vmexits nsret; struct nestedvcpu *nv = &vcpu_nestedhvm(v); struct vmcb_struct *ns_vmcb = nv->nv_vvmcx; @@ -2655,7 +2659,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) exitinfo1 = ns_vmcb->exitinfo1; ns_vmcb->exitinfo1 = vmcb->exitinfo1; nsret = nestedsvm_check_intercepts(v, regs, exit_reason); - switch (nsret) { + switch (nsret) + { case NESTEDHVM_VMEXIT_CONTINUE: BUG(); break; @@ -2671,7 +2676,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) nv->nv_vmswitch_in_progress = 1; nsret = nestedsvm_vmexit_n2n1(v, regs); nv->nv_vmswitch_in_progress = 0; - switch (nsret) { + switch (nsret) + { case NESTEDHVM_VMEXIT_DONE: /* defer VMEXIT injection */ nestedsvm_vmexit_defer(v, exit_reason, exitinfo1, exitinfo2); @@ -2687,17 +2693,17 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) } /* fallthrough */ case NESTEDHVM_VMEXIT_ERROR: - gdprintk(XENLOG_ERR, - "nestedsvm_check_intercepts() returned NESTEDHVM_VMEXIT_ERROR\n"); + gdprintk(XENLOG_ERR, "nestedsvm_check_intercepts() returned " + "NESTEDHVM_VMEXIT_ERROR\n"); goto out; case NESTEDHVM_VMEXIT_FATALERROR: gdprintk(XENLOG_ERR, - "unexpected nestedsvm_check_intercepts() error\n"); + "unexpected nestedsvm_check_intercepts() error\n"); domain_crash(v->domain); goto out; default: gdprintk(XENLOG_INFO, "nestedsvm_check_intercepts() returned %i\n", - nsret); + nsret); domain_crash(v->domain); goto out; } @@ -2724,7 +2730,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) eventinj.fields.vector) ) vmcb->eventinj = eventinj; - switch ( exit_reason ) + switch (exit_reason) { case VMEXIT_INTR: /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ @@ -2759,14 +2765,13 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) inst_len = svm_get_insn_len(v, INSTR_ICEBP); } - rc = hvm_monitor_debug(regs->rip, - HVM_MONITOR_DEBUG_EXCEPTION, + rc = hvm_monitor_debug(regs->rip, HVM_MONITOR_DEBUG_EXCEPTION, trap_type, inst_len); if ( rc < 0 ) goto unexpected_exit_type; if ( !rc ) - hvm_inject_exception(TRAP_debug, - trap_type, inst_len, X86_EVENT_NO_EC); + hvm_inject_exception(TRAP_debug, trap_type, inst_len, + X86_EVENT_NO_EC); } else domain_pause_for_debugger(); @@ -2776,44 +2781,43 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) inst_len = svm_get_insn_len(v, INSTR_INT3); if ( inst_len == 0 ) - break; + break; if ( v->domain->debugger_attached ) { - /* AMD Vol2, 15.11: INT3, INTO, BOUND intercepts do not update RIP. */ + /* AMD Vol2, 15.11: INT3, INTO, BOUND intercepts do not update RIP. + */ __update_guest_eip(regs, inst_len); current->arch.gdbsx_vcpu_event = TRAP_int3; domain_pause_for_debugger(); } else { - int rc; - - rc = hvm_monitor_debug(regs->rip, - HVM_MONITOR_SOFTWARE_BREAKPOINT, - X86_EVENTTYPE_SW_EXCEPTION, - inst_len); - if ( rc < 0 ) - goto unexpected_exit_type; - if ( !rc ) - hvm_inject_exception(TRAP_int3, - X86_EVENTTYPE_SW_EXCEPTION, - inst_len, X86_EVENT_NO_EC); + int rc; + + rc = hvm_monitor_debug(regs->rip, HVM_MONITOR_SOFTWARE_BREAKPOINT, + X86_EVENTTYPE_SW_EXCEPTION, inst_len); + if ( rc < 0 ) + goto unexpected_exit_type; + if ( !rc ) + hvm_inject_exception(TRAP_int3, X86_EVENTTYPE_SW_EXCEPTION, + inst_len, X86_EVENT_NO_EC); } break; case VMEXIT_EXCEPTION_NM: svm_fpu_dirty_intercept(); - break; + break; - case VMEXIT_EXCEPTION_PF: { + case VMEXIT_EXCEPTION_PF: + { unsigned long va; va = vmcb->exitinfo2; regs->error_code = vmcb->exitinfo1; HVM_DBG_LOG(DBG_LEVEL_VMMU, "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx", - regs->rax, regs->rbx, regs->rcx, - regs->rdx, regs->rsi, regs->rdi); + regs->rax, regs->rbx, regs->rcx, regs->rdx, regs->rsi, + regs->rdi); if ( cpu_has_svm_decode ) v->arch.hvm.svm.cached_insn_len = vmcb->guest_ins_len & 0xf; @@ -2850,7 +2854,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) svm_vmexit_mce_intercept(v, regs); break; - case VMEXIT_VINTR: { + case VMEXIT_VINTR: + { u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); intr = vmcb_get_vintr(vmcb); @@ -2867,7 +2872,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) svm_vmexit_do_invalidate_cache(regs, exit_reason == VMEXIT_INVD); break; - case VMEXIT_TASK_SWITCH: { + case VMEXIT_TASK_SWITCH: + { enum hvm_task_switch_reason reason; int32_t errcode = -1; if ( (vmcb->exitinfo2 >> 36) & 1 ) @@ -2913,7 +2919,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) break; case VMEXIT_IOIO: - if ( (vmcb->exitinfo1 & (1u<<2)) == 0 ) + if ( (vmcb->exitinfo1 & (1u << 2)) == 0 ) { uint16_t port = (vmcb->exitinfo1 >> 16) & 0xFFFF; int bytes = ((vmcb->exitinfo1 >> 4) & 0x07); @@ -3020,7 +3026,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) if ( cpu_has_svm_decode ) v->arch.hvm.svm.cached_insn_len = vmcb->guest_ins_len & 0xf; rc = vmcb->exitinfo1 & PFEC_page_present - ? p2m_pt_handle_deferred_changes(vmcb->exitinfo2) : 0; + ? p2m_pt_handle_deferred_changes(vmcb->exitinfo2) + : 0; if ( rc >= 0 ) svm_do_nested_pgfault(v, regs, vmcb->exitinfo1, vmcb->exitinfo2); else @@ -3033,7 +3040,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) v->arch.hvm.svm.cached_insn_len = 0; break; - case VMEXIT_IRET: { + case VMEXIT_IRET: + { u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); /* @@ -3058,44 +3066,48 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) case VMEXIT_IDTR_READ: case VMEXIT_IDTR_WRITE: hvm_descriptor_access_intercept(vmcb->exitintinfo.bytes, 0, - VM_EVENT_DESC_IDTR, exit_reason == VMEXIT_IDTR_WRITE); + VM_EVENT_DESC_IDTR, + exit_reason == VMEXIT_IDTR_WRITE); break; case VMEXIT_GDTR_READ: case VMEXIT_GDTR_WRITE: hvm_descriptor_access_intercept(vmcb->exitintinfo.bytes, 0, - VM_EVENT_DESC_GDTR, exit_reason == VMEXIT_GDTR_WRITE); + VM_EVENT_DESC_GDTR, + exit_reason == VMEXIT_GDTR_WRITE); break; case VMEXIT_LDTR_READ: case VMEXIT_LDTR_WRITE: hvm_descriptor_access_intercept(vmcb->exitintinfo.bytes, 0, - VM_EVENT_DESC_LDTR, exit_reason == VMEXIT_LDTR_WRITE); + VM_EVENT_DESC_LDTR, + exit_reason == VMEXIT_LDTR_WRITE); break; case VMEXIT_TR_READ: case VMEXIT_TR_WRITE: hvm_descriptor_access_intercept(vmcb->exitintinfo.bytes, 0, - VM_EVENT_DESC_TR, exit_reason == VMEXIT_TR_WRITE); + VM_EVENT_DESC_TR, + exit_reason == VMEXIT_TR_WRITE); break; default: unexpected_exit_type: - gprintk(XENLOG_ERR, "Unexpected vmexit: reason %#"PRIx64", " - "exitinfo1 %#"PRIx64", exitinfo2 %#"PRIx64"\n", + gprintk(XENLOG_ERR, + "Unexpected vmexit: reason %#" PRIx64 ", " + "exitinfo1 %#" PRIx64 ", exitinfo2 %#" PRIx64 "\n", exit_reason, vmcb->exitinfo1, vmcb->exitinfo2); svm_crash_or_fault(v); break; } - out: +out: if ( vcpu_guestmode || vlapic_hw_disabled(vlapic) ) return; /* The exit may have updated the TPR: reflect this in the hardware vtpr */ intr = vmcb_get_vintr(vmcb); - intr.fields.tpr = - (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4; + intr.fields.tpr = (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4; vmcb_set_vintr(vmcb, intr); } diff --git a/xen/arch/x86/hvm/svm/svmdebug.c b/xen/arch/x86/hvm/svm/svmdebug.c index d35e40596b..50c9e07b89 100644 --- a/xen/arch/x86/hvm/svm/svmdebug.c +++ b/xen/arch/x86/hvm/svm/svmdebug.c @@ -23,14 +23,14 @@ static void svm_dump_sel(const char *name, const struct segment_register *s) { - printk("%s: %04x %04x %08x %016"PRIx64"\n", - name, s->sel, s->attr, s->limit, s->base); + printk("%s: %04x %04x %08x %016" PRIx64 "\n", name, s->sel, s->attr, + s->limit, s->base); } void svm_vmcb_dump(const char *from, const struct vmcb_struct *vmcb) { printk("Dumping guest's current state at %s...\n", from); - printk("Size of VMCB = %zu, paddr = %"PRIpaddr", vaddr = %p\n", + printk("Size of VMCB = %zu, paddr = %" PRIpaddr ", vaddr = %p\n", sizeof(struct vmcb_struct), virt_to_maddr(vmcb), vmcb); printk("cr_intercepts = %#x dr_intercepts = %#x " @@ -38,43 +38,47 @@ void svm_vmcb_dump(const char *from, const struct vmcb_struct *vmcb) vmcb_get_cr_intercepts(vmcb), vmcb_get_dr_intercepts(vmcb), vmcb_get_exception_intercepts(vmcb)); printk("general1_intercepts = %#x general2_intercepts = %#x\n", - vmcb_get_general1_intercepts(vmcb), vmcb_get_general2_intercepts(vmcb)); - printk("iopm_base_pa = %#"PRIx64" msrpm_base_pa = %#"PRIx64" tsc_offset = %#"PRIx64"\n", + vmcb_get_general1_intercepts(vmcb), + vmcb_get_general2_intercepts(vmcb)); + printk("iopm_base_pa = %#" PRIx64 " msrpm_base_pa = %#" PRIx64 + " tsc_offset = %#" PRIx64 "\n", vmcb_get_iopm_base_pa(vmcb), vmcb_get_msrpm_base_pa(vmcb), vmcb_get_tsc_offset(vmcb)); - printk("tlb_control = %#x vintr = %#"PRIx64" interrupt_shadow = %#"PRIx64"\n", + printk("tlb_control = %#x vintr = %#" PRIx64 " interrupt_shadow = %#" PRIx64 + "\n", vmcb->tlb_control, vmcb_get_vintr(vmcb).bytes, vmcb->interrupt_shadow); - printk("eventinj %016"PRIx64", valid? %d, ec? %d, type %u, vector %#x\n", + printk("eventinj %016" PRIx64 ", valid? %d, ec? %d, type %u, vector %#x\n", vmcb->eventinj.bytes, vmcb->eventinj.fields.v, vmcb->eventinj.fields.ev, vmcb->eventinj.fields.type, vmcb->eventinj.fields.vector); - printk("exitcode = %#"PRIx64" exitintinfo = %#"PRIx64"\n", + printk("exitcode = %#" PRIx64 " exitintinfo = %#" PRIx64 "\n", vmcb->exitcode, vmcb->exitintinfo.bytes); - printk("exitinfo1 = %#"PRIx64" exitinfo2 = %#"PRIx64"\n", + printk("exitinfo1 = %#" PRIx64 " exitinfo2 = %#" PRIx64 "\n", vmcb->exitinfo1, vmcb->exitinfo2); - printk("np_enable = %#"PRIx64" guest_asid = %#x\n", + printk("np_enable = %#" PRIx64 " guest_asid = %#x\n", vmcb_get_np_enable(vmcb), vmcb_get_guest_asid(vmcb)); - printk("virtual vmload/vmsave = %d, virt_ext = %#"PRIx64"\n", + printk("virtual vmload/vmsave = %d, virt_ext = %#" PRIx64 "\n", vmcb->virt_ext.fields.vloadsave_enable, vmcb->virt_ext.bytes); - printk("cpl = %d efer = %#"PRIx64" star = %#"PRIx64" lstar = %#"PRIx64"\n", + printk("cpl = %d efer = %#" PRIx64 " star = %#" PRIx64 " lstar = %#" PRIx64 + "\n", vmcb_get_cpl(vmcb), vmcb_get_efer(vmcb), vmcb->star, vmcb->lstar); - printk("CR0 = 0x%016"PRIx64" CR2 = 0x%016"PRIx64"\n", + printk("CR0 = 0x%016" PRIx64 " CR2 = 0x%016" PRIx64 "\n", vmcb_get_cr0(vmcb), vmcb_get_cr2(vmcb)); - printk("CR3 = 0x%016"PRIx64" CR4 = 0x%016"PRIx64"\n", + printk("CR3 = 0x%016" PRIx64 " CR4 = 0x%016" PRIx64 "\n", vmcb_get_cr3(vmcb), vmcb_get_cr4(vmcb)); - printk("RSP = 0x%016"PRIx64" RIP = 0x%016"PRIx64"\n", - vmcb->rsp, vmcb->rip); - printk("RAX = 0x%016"PRIx64" RFLAGS=0x%016"PRIx64"\n", - vmcb->rax, vmcb->rflags); - printk("DR6 = 0x%016"PRIx64", DR7 = 0x%016"PRIx64"\n", + printk("RSP = 0x%016" PRIx64 " RIP = 0x%016" PRIx64 "\n", vmcb->rsp, + vmcb->rip); + printk("RAX = 0x%016" PRIx64 " RFLAGS=0x%016" PRIx64 "\n", vmcb->rax, + vmcb->rflags); + printk("DR6 = 0x%016" PRIx64 ", DR7 = 0x%016" PRIx64 "\n", vmcb_get_dr6(vmcb), vmcb_get_dr7(vmcb)); - printk("CSTAR = 0x%016"PRIx64" SFMask = 0x%016"PRIx64"\n", - vmcb->cstar, vmcb->sfmask); - printk("KernGSBase = 0x%016"PRIx64" PAT = 0x%016"PRIx64"\n", + printk("CSTAR = 0x%016" PRIx64 " SFMask = 0x%016" PRIx64 "\n", vmcb->cstar, + vmcb->sfmask); + printk("KernGSBase = 0x%016" PRIx64 " PAT = 0x%016" PRIx64 "\n", vmcb->kerngsbase, vmcb_get_g_pat(vmcb)); - printk("H_CR3 = 0x%016"PRIx64" CleanBits = %#x\n", - vmcb_get_h_cr3(vmcb), vmcb->cleanbits.bytes); + printk("H_CR3 = 0x%016" PRIx64 " CleanBits = %#x\n", vmcb_get_h_cr3(vmcb), + vmcb->cleanbits.bytes); /* print out all the selectors */ printk(" sel attr limit base\n"); @@ -99,45 +103,48 @@ bool svm_vmcb_isvalid(const char *from, const struct vmcb_struct *vmcb, unsigned long cr4 = vmcb_get_cr4(vmcb); uint64_t efer = vmcb_get_efer(vmcb); -#define PRINTF(fmt, args...) do { \ - if ( !verbose ) return true; \ - ret = true; \ - printk(XENLOG_GUEST "%pv[%s]: " fmt, v, from, ## args); \ -} while (0) +#define PRINTF(fmt, args...) \ + do { \ + if ( !verbose ) \ + return true; \ + ret = true; \ + printk(XENLOG_GUEST "%pv[%s]: " fmt, v, from, ##args); \ + } while ( 0 ) if ( !(efer & EFER_SVME) ) - PRINTF("EFER: SVME bit not set (%#"PRIx64")\n", efer); + PRINTF("EFER: SVME bit not set (%#" PRIx64 ")\n", efer); if ( !(cr0 & X86_CR0_CD) && (cr0 & X86_CR0_NW) ) - PRINTF("CR0: CD bit is zero and NW bit set (%#"PRIx64")\n", cr0); + PRINTF("CR0: CD bit is zero and NW bit set (%#" PRIx64 ")\n", cr0); if ( cr0 >> 32 ) - PRINTF("CR0: bits [63:32] are not zero (%#"PRIx64")\n", cr0); + PRINTF("CR0: bits [63:32] are not zero (%#" PRIx64 ")\n", cr0); if ( (cr0 & X86_CR0_PG) && ((cr3 & 7) || ((!(cr4 & X86_CR4_PAE) || (efer & EFER_LMA)) && (cr3 & 0xfe0)) || ((efer & EFER_LMA) && (cr3 >> v->domain->arch.cpuid->extd.maxphysaddr))) ) - PRINTF("CR3: MBZ bits are set (%#"PRIx64")\n", cr3); + PRINTF("CR3: MBZ bits are set (%#" PRIx64 ")\n", cr3); if ( cr4 & ~hvm_cr4_guest_valid_bits(v->domain, false) ) - PRINTF("CR4: invalid bits are set (%#"PRIx64", valid: %#"PRIx64")\n", + PRINTF("CR4: invalid bits are set (%#" PRIx64 ", valid: %#" PRIx64 + ")\n", cr4, hvm_cr4_guest_valid_bits(v->domain, false)); if ( vmcb_get_dr6(vmcb) >> 32 ) - PRINTF("DR6: bits [63:32] are not zero (%#"PRIx64")\n", + PRINTF("DR6: bits [63:32] are not zero (%#" PRIx64 ")\n", vmcb_get_dr6(vmcb)); if ( vmcb_get_dr7(vmcb) >> 32 ) - PRINTF("DR7: bits [63:32] are not zero (%#"PRIx64")\n", + PRINTF("DR7: bits [63:32] are not zero (%#" PRIx64 ")\n", vmcb_get_dr7(vmcb)); if ( efer & ~EFER_KNOWN_MASK ) - PRINTF("EFER: unknown bits are not zero (%#"PRIx64")\n", efer); + PRINTF("EFER: unknown bits are not zero (%#" PRIx64 ")\n", efer); if ( hvm_efer_valid(v, efer, -1) ) - PRINTF("EFER: %s (%"PRIx64")\n", hvm_efer_valid(v, efer, -1), efer); + PRINTF("EFER: %s (%" PRIx64 ")\n", hvm_efer_valid(v, efer, -1), efer); if ( (efer & EFER_LME) && (cr0 & X86_CR0_PG) ) { @@ -152,11 +159,12 @@ bool svm_vmcb_isvalid(const char *from, const struct vmcb_struct *vmcb, PRINTF("EFER_LME, CR0.PG, CR4.PAE, CS.L and CS.D are all non-zero\n"); if ( !(vmcb_get_general2_intercepts(vmcb) & GENERAL2_INTERCEPT_VMRUN) ) - PRINTF("GENERAL2_INTERCEPT: VMRUN intercept bit is clear (%#"PRIx32")\n", + PRINTF("GENERAL2_INTERCEPT: VMRUN intercept bit is clear (%#" PRIx32 + ")\n", vmcb_get_general2_intercepts(vmcb)); if ( vmcb->eventinj.fields.resvd1 ) - PRINTF("eventinj: MBZ bits are set (%#"PRIx64")\n", + PRINTF("eventinj: MBZ bits are set (%#" PRIx64 ")\n", vmcb->eventinj.bytes); #undef PRINTF diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c index 9d1c5bf6af..a8a00f67c4 100644 --- a/xen/arch/x86/hvm/svm/vmcb.c +++ b/xen/arch/x86/hvm/svm/vmcb.c @@ -30,7 +30,7 @@ #include #include -struct vmcb_struct *alloc_vmcb(void) +struct vmcb_struct *alloc_vmcb(void) { struct vmcb_struct *vmcb; @@ -59,30 +59,28 @@ static int construct_vmcb(struct vcpu *v) /* Build-time check of the size of VMCB AMD structure. */ BUILD_BUG_ON(sizeof(*vmcb) != PAGE_SIZE); - vmcb->_general1_intercepts = - GENERAL1_INTERCEPT_INTR | GENERAL1_INTERCEPT_NMI | - GENERAL1_INTERCEPT_SMI | GENERAL1_INTERCEPT_INIT | - GENERAL1_INTERCEPT_CPUID | GENERAL1_INTERCEPT_INVD | - GENERAL1_INTERCEPT_HLT | GENERAL1_INTERCEPT_INVLPG | - GENERAL1_INTERCEPT_INVLPGA | GENERAL1_INTERCEPT_IOIO_PROT | - GENERAL1_INTERCEPT_MSR_PROT | GENERAL1_INTERCEPT_SHUTDOWN_EVT| + vmcb->_general1_intercepts = + GENERAL1_INTERCEPT_INTR | GENERAL1_INTERCEPT_NMI | + GENERAL1_INTERCEPT_SMI | GENERAL1_INTERCEPT_INIT | + GENERAL1_INTERCEPT_CPUID | GENERAL1_INTERCEPT_INVD | + GENERAL1_INTERCEPT_HLT | GENERAL1_INTERCEPT_INVLPG | + GENERAL1_INTERCEPT_INVLPGA | GENERAL1_INTERCEPT_IOIO_PROT | + GENERAL1_INTERCEPT_MSR_PROT | GENERAL1_INTERCEPT_SHUTDOWN_EVT | GENERAL1_INTERCEPT_TASK_SWITCH; - vmcb->_general2_intercepts = - GENERAL2_INTERCEPT_VMRUN | GENERAL2_INTERCEPT_VMMCALL | - GENERAL2_INTERCEPT_VMLOAD | GENERAL2_INTERCEPT_VMSAVE | - GENERAL2_INTERCEPT_STGI | GENERAL2_INTERCEPT_CLGI | - GENERAL2_INTERCEPT_SKINIT | GENERAL2_INTERCEPT_MWAIT | - GENERAL2_INTERCEPT_WBINVD | GENERAL2_INTERCEPT_MONITOR | + vmcb->_general2_intercepts = + GENERAL2_INTERCEPT_VMRUN | GENERAL2_INTERCEPT_VMMCALL | + GENERAL2_INTERCEPT_VMLOAD | GENERAL2_INTERCEPT_VMSAVE | + GENERAL2_INTERCEPT_STGI | GENERAL2_INTERCEPT_CLGI | + GENERAL2_INTERCEPT_SKINIT | GENERAL2_INTERCEPT_MWAIT | + GENERAL2_INTERCEPT_WBINVD | GENERAL2_INTERCEPT_MONITOR | GENERAL2_INTERCEPT_XSETBV; /* Intercept all debug-register writes. */ vmcb->_dr_intercepts = ~0u; /* Intercept all control-register accesses except for CR2 and CR8. */ - vmcb->_cr_intercepts = ~(CR_INTERCEPT_CR2_READ | - CR_INTERCEPT_CR2_WRITE | - CR_INTERCEPT_CR8_READ | - CR_INTERCEPT_CR8_WRITE); + vmcb->_cr_intercepts = ~(CR_INTERCEPT_CR2_READ | CR_INTERCEPT_CR2_WRITE | + CR_INTERCEPT_CR8_READ | CR_INTERCEPT_CR8_WRITE); svm->vmcb_sync_state = vmcb_needs_vmload; @@ -110,7 +108,7 @@ static int construct_vmcb(struct vcpu *v) /* Virtualise EFLAGS.IF and LAPIC TPR (CR8). */ vmcb->_vintr.fields.intr_masking = 1; - + /* Initialise event injection to no-op. */ vmcb->eventinj.bytes = 0; @@ -180,19 +178,18 @@ static int construct_vmcb(struct vcpu *v) paging_update_paging_modes(v); vmcb->_exception_intercepts = - HVM_TRAP_MASK | - (v->arch.fully_eager_fpu ? 0 : (1U << TRAP_no_device)); + HVM_TRAP_MASK | (v->arch.fully_eager_fpu ? 0 : (1U << TRAP_no_device)); if ( paging_mode_hap(v->domain) ) { - vmcb->_np_enable = 1; /* enable nested paging */ + vmcb->_np_enable = 1; /* enable nested paging */ vmcb->_g_pat = MSR_IA32_CR_PAT_RESET; /* guest PAT */ - vmcb->_h_cr3 = pagetable_get_paddr( - p2m_get_pagetable(p2m_get_hostp2m(v->domain))); + vmcb->_h_cr3 = + pagetable_get_paddr(p2m_get_pagetable(p2m_get_hostp2m(v->domain))); /* No point in intercepting CR3 reads/writes. */ vmcb->_cr_intercepts &= - ~(CR_INTERCEPT_CR3_READ|CR_INTERCEPT_CR3_WRITE); + ~(CR_INTERCEPT_CR3_READ | CR_INTERCEPT_CR3_WRITE); /* * No point in intercepting INVLPG if we don't have shadow pagetables @@ -228,8 +225,7 @@ int svm_create_vmcb(struct vcpu *v) struct svm_vcpu *svm = &v->arch.hvm.svm; int rc; - if ( (nv->nv_n1vmcx == NULL) && - (nv->nv_n1vmcx = alloc_vmcb()) == NULL ) + if ( (nv->nv_n1vmcx == NULL) && (nv->nv_n1vmcx = alloc_vmcb()) == NULL ) { printk("Failed to create a new VMCB\n"); return -ENOMEM; @@ -259,8 +255,7 @@ void svm_destroy_vmcb(struct vcpu *v) if ( svm->msrpm != NULL ) { - free_xenheap_pages( - svm->msrpm, get_order_from_bytes(MSRPM_SIZE)); + free_xenheap_pages(svm->msrpm, get_order_from_bytes(MSRPM_SIZE)); svm->msrpm = NULL; } @@ -273,17 +268,17 @@ static void vmcb_dump(unsigned char ch) { struct domain *d; struct vcpu *v; - + printk("*********** VMCB Areas **************\n"); rcu_read_lock(&domlist_read_lock); - for_each_domain ( d ) + for_each_domain (d) { if ( !is_hvm_domain(d) ) continue; printk("\n>>> Domain %d <<<\n", d->domain_id); - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { printk("\tVCPU %d\n", v->vcpu_id); svm_vmcb_dump("key_handler", v->arch.hvm.svm.vmcb); @@ -305,15 +300,15 @@ static void __init __maybe_unused build_assertions(void) struct segment_register sreg; /* Check struct segment_register against the VMCB segment layout. */ - BUILD_BUG_ON(sizeof(sreg) != 16); - BUILD_BUG_ON(sizeof(sreg.sel) != 2); - BUILD_BUG_ON(sizeof(sreg.attr) != 2); + BUILD_BUG_ON(sizeof(sreg) != 16); + BUILD_BUG_ON(sizeof(sreg.sel) != 2); + BUILD_BUG_ON(sizeof(sreg.attr) != 2); BUILD_BUG_ON(sizeof(sreg.limit) != 4); - BUILD_BUG_ON(sizeof(sreg.base) != 8); - BUILD_BUG_ON(offsetof(struct segment_register, sel) != 0); - BUILD_BUG_ON(offsetof(struct segment_register, attr) != 2); + BUILD_BUG_ON(sizeof(sreg.base) != 8); + BUILD_BUG_ON(offsetof(struct segment_register, sel) != 0); + BUILD_BUG_ON(offsetof(struct segment_register, attr) != 2); BUILD_BUG_ON(offsetof(struct segment_register, limit) != 4); - BUILD_BUG_ON(offsetof(struct segment_register, base) != 8); + BUILD_BUG_ON(offsetof(struct segment_register, base) != 8); } /* diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c index 9c25f72b4d..d9ddc83e77 100644 --- a/xen/arch/x86/hvm/vioapic.c +++ b/xen/arch/x86/hvm/vioapic.c @@ -62,8 +62,8 @@ static struct hvm_vioapic *addr_vioapic(const struct domain *d, return NULL; } -static struct hvm_vioapic *gsi_vioapic(const struct domain *d, - unsigned int gsi, unsigned int *pin) +static struct hvm_vioapic *gsi_vioapic(const struct domain *d, unsigned int gsi, + unsigned int *pin) { unsigned int i; @@ -92,13 +92,13 @@ static uint32_t vioapic_read_indirect(const struct hvm_vioapic *vioapic) { uint32_t result = 0; - switch ( vioapic->ioregsel ) + switch (vioapic->ioregsel) { case VIOAPIC_REG_VERSION: - result = ((union IO_APIC_reg_01){ - .bits = { .version = VIOAPIC_VERSION_ID, - .entries = vioapic->nr_pins - 1 } - }).raw; + result = + ((union IO_APIC_reg_01){.bits = {.version = VIOAPIC_VERSION_ID, + .entries = vioapic->nr_pins - 1}}) + .raw; break; case VIOAPIC_REG_APIC_ID: @@ -107,9 +107,8 @@ static uint32_t vioapic_read_indirect(const struct hvm_vioapic *vioapic) * union IO_APIC_reg_00's ID field is 8 bits wide for some reason. */ case VIOAPIC_REG_ARB_ID: - result = ((union IO_APIC_reg_02){ - .bits = { .arbitration = vioapic->id } - }).raw; + result = + ((union IO_APIC_reg_02){.bits = {.arbitration = vioapic->id}}).raw; break; default: @@ -124,10 +123,11 @@ static uint32_t vioapic_read_indirect(const struct hvm_vioapic *vioapic) break; } - redir_content = vioapic->redirtbl[array_index_nospec(redir_index, - vioapic->nr_pins)].bits; - result = (vioapic->ioregsel & 1) ? (redir_content >> 32) - : redir_content; + redir_content = + vioapic->redirtbl[array_index_nospec(redir_index, vioapic->nr_pins)] + .bits; + result = + (vioapic->ioregsel & 1) ? (redir_content >> 32) : redir_content; break; } } @@ -135,9 +135,8 @@ static uint32_t vioapic_read_indirect(const struct hvm_vioapic *vioapic) return result; } -static int vioapic_read( - struct vcpu *v, unsigned long addr, - unsigned int length, unsigned long *pval) +static int vioapic_read(struct vcpu *v, unsigned long addr, unsigned int length, + unsigned long *pval) { const struct hvm_vioapic *vioapic; uint32_t result; @@ -147,7 +146,7 @@ static int vioapic_read( vioapic = addr_vioapic(v->domain, addr); ASSERT(vioapic); - switch ( addr & 0xff ) + switch (addr & 0xff) { case VIOAPIC_REG_SELECT: result = vioapic->ioregsel; @@ -184,16 +183,16 @@ static int vioapic_hwdom_map_gsi(unsigned int gsi, unsigned int trig, return 0; if ( ret ) { - gprintk(XENLOG_WARNING, "vioapic: error registering GSI %u: %d\n", - gsi, ret); + gprintk(XENLOG_WARNING, "vioapic: error registering GSI %u: %d\n", gsi, + ret); return ret; } ret = allocate_and_map_gsi_pirq(currd, pirq, &pirq); if ( ret ) { - gprintk(XENLOG_WARNING, "vioapic: error mapping GSI %u: %d\n", - gsi, ret); + gprintk(XENLOG_WARNING, "vioapic: error mapping GSI %u: %d\n", gsi, + ret); return ret; } @@ -201,8 +200,8 @@ static int vioapic_hwdom_map_gsi(unsigned int gsi, unsigned int trig, ret = pt_irq_create_bind(currd, &pt_irq_bind); if ( ret ) { - gprintk(XENLOG_WARNING, "vioapic: error binding GSI %u: %d\n", - gsi, ret); + gprintk(XENLOG_WARNING, "vioapic: error binding GSI %u: %d\n", gsi, + ret); spin_lock(&currd->event_lock); unmap_domain_pirq(currd, pirq); spin_unlock(&currd->event_lock); @@ -212,9 +211,8 @@ static int vioapic_hwdom_map_gsi(unsigned int gsi, unsigned int trig, return ret; } -static void vioapic_write_redirent( - struct hvm_vioapic *vioapic, unsigned int idx, - int top_word, uint32_t val) +static void vioapic_write_redirent(struct hvm_vioapic *vioapic, + unsigned int idx, int top_word, uint32_t val) { struct domain *d = vioapic_domain(vioapic); struct hvm_irq *hvm_irq = hvm_domain_irq(d); @@ -233,7 +231,7 @@ static void vioapic_write_redirent( spin_lock(&d->arch.hvm.irq_lock); pent = &vioapic->redirtbl[idx]; - ent = *pent; + ent = *pent; if ( top_word ) { @@ -258,8 +256,7 @@ static void vioapic_write_redirent( } else if ( ent.fields.trig_mode == VIOAPIC_EDGE_TRIG ) pent->fields.remote_irr = 0; - else if ( !ent.fields.mask && - !ent.fields.remote_irr && + else if ( !ent.fields.mask && !ent.fields.remote_irr && hvm_irq->gsi_assert_count[idx] ) { pent->fields.remote_irr = 1; @@ -290,10 +287,9 @@ static void vioapic_write_redirent( pt_may_unmask_irq(d, NULL); } -static void vioapic_write_indirect( - struct hvm_vioapic *vioapic, uint32_t val) +static void vioapic_write_indirect(struct hvm_vioapic *vioapic, uint32_t val) { - switch ( vioapic->ioregsel ) + switch (vioapic->ioregsel) { case VIOAPIC_REG_VERSION: /* Writes are ignored. */ @@ -305,7 +301,7 @@ static void vioapic_write_indirect( * 4 bit ID field (compared to 8 for AMD), using union IO_APIC_reg_02 * for the ID register (union IO_APIC_reg_00's ID field is 8 bits). */ - vioapic->id = ((union IO_APIC_reg_02){ .raw = val }).bits.arbitration; + vioapic->id = ((union IO_APIC_reg_02){.raw = val}).bits.arbitration; break; case VIOAPIC_REG_ARB_ID: @@ -315,33 +311,34 @@ static void vioapic_write_indirect( { uint32_t redir_index = (vioapic->ioregsel - VIOAPIC_REG_RTE0) >> 1; - HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "rte[%02x].%s = %08x", - redir_index, vioapic->ioregsel & 1 ? "hi" : "lo", val); + HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "rte[%02x].%s = %08x", redir_index, + vioapic->ioregsel & 1 ? "hi" : "lo", val); if ( redir_index >= vioapic->nr_pins ) { - gdprintk(XENLOG_WARNING, "vioapic_write_indirect " - "error register %x\n", vioapic->ioregsel); + gdprintk(XENLOG_WARNING, + "vioapic_write_indirect " + "error register %x\n", + vioapic->ioregsel); break; } - vioapic_write_redirent( - vioapic, redir_index, vioapic->ioregsel&1, val); + vioapic_write_redirent(vioapic, redir_index, vioapic->ioregsel & 1, + val); break; } } } -static int vioapic_write( - struct vcpu *v, unsigned long addr, - unsigned int length, unsigned long val) +static int vioapic_write(struct vcpu *v, unsigned long addr, + unsigned int length, unsigned long val) { struct hvm_vioapic *vioapic; vioapic = addr_vioapic(v->domain, addr); ASSERT(vioapic); - switch ( addr & 0xff ) + switch (addr & 0xff) { case VIOAPIC_REG_SELECT: vioapic->ioregsel = val; @@ -370,23 +367,16 @@ static int vioapic_range(struct vcpu *v, unsigned long addr) } static const struct hvm_mmio_ops vioapic_mmio_ops = { - .check = vioapic_range, - .read = vioapic_read, - .write = vioapic_write -}; - -static void ioapic_inj_irq( - struct hvm_vioapic *vioapic, - struct vlapic *target, - uint8_t vector, - uint8_t trig_mode, - uint8_t delivery_mode) + .check = vioapic_range, .read = vioapic_read, .write = vioapic_write}; + +static void ioapic_inj_irq(struct hvm_vioapic *vioapic, struct vlapic *target, + uint8_t vector, uint8_t trig_mode, + uint8_t delivery_mode) { - HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %d trig %d deliv %d", - vector, trig_mode, delivery_mode); + HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %d trig %d deliv %d", vector, trig_mode, + delivery_mode); - ASSERT((delivery_mode == dest_Fixed) || - (delivery_mode == dest_LowestPrio)); + ASSERT((delivery_mode == dest_Fixed) || (delivery_mode == dest_LowestPrio)); vlapic_set_irq(target, vector, trig_mode); } @@ -415,7 +405,7 @@ static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin) "vector=%x trig_mode=%x", dest, dest_mode, delivery_mode, vector, trig_mode); - switch ( delivery_mode ) + switch (delivery_mode) { case dest_LowestPrio: { @@ -435,7 +425,8 @@ static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin) } else { - HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: " + HVM_DBG_LOG(DBG_LEVEL_IOAPIC, + "null round robin: " "vector=%x delivery_mode=%x", vector, dest_LowestPrio); } @@ -449,26 +440,25 @@ static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin) if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() ) { if ( (v = d->vcpu ? d->vcpu[0] : NULL) != NULL ) - ioapic_inj_irq(vioapic, vcpu_vlapic(v), vector, - trig_mode, delivery_mode); + ioapic_inj_irq(vioapic, vcpu_vlapic(v), vector, trig_mode, + delivery_mode); } else #endif { - for_each_vcpu ( d, v ) - if ( vlapic_match_dest(vcpu_vlapic(v), NULL, - 0, dest, dest_mode) ) - ioapic_inj_irq(vioapic, vcpu_vlapic(v), vector, - trig_mode, delivery_mode); + for_each_vcpu (d, v) + if ( vlapic_match_dest(vcpu_vlapic(v), NULL, 0, dest, + dest_mode) ) + ioapic_inj_irq(vioapic, vcpu_vlapic(v), vector, trig_mode, + delivery_mode); } break; } case dest_NMI: { - for_each_vcpu ( d, v ) - if ( vlapic_match_dest(vcpu_vlapic(v), NULL, - 0, dest, dest_mode) && + for_each_vcpu (d, v) + if ( vlapic_match_dest(vcpu_vlapic(v), NULL, 0, dest, dest_mode) && !test_and_set_bool(v->nmi_pending) ) vcpu_kick(v); break; @@ -665,7 +655,7 @@ static void vioapic_free(const struct domain *d, unsigned int nr_vioapics) { unsigned int i; - for ( i = 0; i < nr_vioapics; i++) + for ( i = 0; i < nr_vioapics; i++ ) xfree(domain_vioapic(d, i)); xfree(d->arch.hvm.vioapic); } @@ -684,7 +674,7 @@ int vioapic_init(struct domain *d) if ( (d->arch.hvm.vioapic == NULL) && ((d->arch.hvm.vioapic = - xzalloc_array(struct hvm_vioapic *, nr_vioapics)) == NULL) ) + xzalloc_array(struct hvm_vioapic *, nr_vioapics)) == NULL) ) return -ENOMEM; for ( i = 0; i < nr_vioapics; i++ ) @@ -703,7 +693,7 @@ int vioapic_init(struct domain *d) } if ( (domain_vioapic(d, i) = - xmalloc_bytes(hvm_vioapic_size(nr_pins))) == NULL ) + xmalloc_bytes(hvm_vioapic_size(nr_pins))) == NULL ) { vioapic_free(d, nr_vioapics); return -ENOMEM; diff --git a/xen/arch/x86/hvm/viridian/synic.c b/xen/arch/x86/hvm/viridian/synic.c index a6ebbbc9f5..1d906fe79a 100644 --- a/xen/arch/x86/hvm/viridian/synic.c +++ b/xen/arch/x86/hvm/viridian/synic.c @@ -18,12 +18,11 @@ typedef struct _HV_VIRTUAL_APIC_ASSIST { - uint32_t no_eoi:1; - uint32_t reserved_zero:31; + uint32_t no_eoi : 1; + uint32_t reserved_zero : 31; } HV_VIRTUAL_APIC_ASSIST; -typedef union _HV_VP_ASSIST_PAGE -{ +typedef union _HV_VP_ASSIST_PAGE { HV_VIRTUAL_APIC_ASSIST ApicAssist; uint8_t ReservedZBytePadding[PAGE_SIZE]; } HV_VP_ASSIST_PAGE; @@ -54,8 +53,7 @@ bool viridian_apic_assist_completed(struct vcpu *v) if ( !ptr ) return false; - if ( v->arch.hvm.viridian.apic_assist_pending && - !ptr->ApicAssist.no_eoi ) + if ( v->arch.hvm.viridian.apic_assist_pending && !ptr->ApicAssist.no_eoi ) { /* An EOI has been avoided */ v->arch.hvm.viridian.apic_assist_pending = false; @@ -78,7 +76,7 @@ void viridian_apic_assist_clear(struct vcpu *v) int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val) { - switch ( idx ) + switch (idx) { case HV_X64_MSR_EOI: vlapic_EOI_set(vcpu_vlapic(v)); @@ -104,7 +102,7 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val) break; default: - gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x (%016"PRIx64")\n", + gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x (%016" PRIx64 ")\n", __func__, idx, val); return X86EMUL_EXCEPTION; } @@ -114,7 +112,7 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val) int viridian_synic_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val) { - switch ( idx ) + switch (idx) { case HV_X64_MSR_EOI: return X86EMUL_EXCEPTION; @@ -150,8 +148,8 @@ void viridian_synic_save_vcpu_ctxt(const struct vcpu *v, ctxt->vp_assist_msr = v->arch.hvm.viridian.vp_assist.msr.raw; } -void viridian_synic_load_vcpu_ctxt( - struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt) +void viridian_synic_load_vcpu_ctxt(struct vcpu *v, + const struct hvm_viridian_vcpu_context *ctxt) { v->arch.hvm.viridian.vp_assist.msr.raw = ctxt->vp_assist_msr; if ( v->arch.hvm.viridian.vp_assist.msr.fields.enabled ) diff --git a/xen/arch/x86/hvm/viridian/time.c b/xen/arch/x86/hvm/viridian/time.c index 840a82b457..1cd1cdcc3b 100644 --- a/xen/arch/x86/hvm/viridian/time.c +++ b/xen/arch/x86/hvm/viridian/time.c @@ -21,7 +21,7 @@ typedef struct _HV_REFERENCE_TSC_PAGE uint32_t TscSequence; uint32_t Reserved1; uint64_t TscScale; - int64_t TscOffset; + int64_t TscOffset; uint64_t Reserved2[509]; } HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE; @@ -46,7 +46,7 @@ static void update_reference_tsc(struct domain *d, bool initialize) { if ( page ) put_page(page); - gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", + gdprintk(XENLOG_WARNING, "Bad GMFN %#" PRI_gfn " (MFN %#" PRI_mfn ")\n", gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); return; } @@ -101,7 +101,7 @@ static void update_reference_tsc(struct domain *d, bool initialize) p->TscSequence == 0 ) /* Avoid both 'invalid' values */ p->TscSequence = 1; - out: +out: unmap_domain_page(p); put_page_and_type(page); @@ -135,8 +135,7 @@ void viridian_time_ref_count_thaw(struct domain *d) trc = &d->arch.hvm.viridian.time_ref_count; - if ( !d->is_shutting_down && - !test_and_set_bit(_TRC_running, &trc->flags) ) + if ( !d->is_shutting_down && !test_and_set_bit(_TRC_running, &trc->flags) ) trc->off = (int64_t)trc->val - raw_trc_val(d); } @@ -144,7 +143,7 @@ int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val) { struct domain *d = v->domain; - switch ( idx ) + switch (idx) { case HV_X64_MSR_REFERENCE_TSC: if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) ) @@ -157,7 +156,7 @@ int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val) break; default: - gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x (%016"PRIx64")\n", + gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x (%016" PRIx64 ")\n", __func__, idx, val); return X86EMUL_EXCEPTION; } @@ -169,7 +168,7 @@ int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val) { struct domain *d = v->domain; - switch ( idx ) + switch (idx) { case HV_X64_MSR_TSC_FREQUENCY: if ( viridian_feature_mask(d) & HVMPV_no_freq ) @@ -216,8 +215,8 @@ int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val) return X86EMUL_OKAY; } -void viridian_time_save_domain_ctxt( - const struct domain *d, struct hvm_viridian_domain_context *ctxt) +void viridian_time_save_domain_ctxt(const struct domain *d, + struct hvm_viridian_domain_context *ctxt) { ctxt->time_ref_count = d->arch.hvm.viridian.time_ref_count.val; ctxt->reference_tsc = d->arch.hvm.viridian.reference_tsc.raw; diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c index 425af56856..4997699294 100644 --- a/xen/arch/x86/hvm/viridian/viridian.c +++ b/xen/arch/x86/hvm/viridian/viridian.c @@ -20,70 +20,70 @@ #include "private.h" /* Viridian Hypercall Status Codes. */ -#define HV_STATUS_SUCCESS 0x0000 -#define HV_STATUS_INVALID_HYPERCALL_CODE 0x0002 -#define HV_STATUS_INVALID_PARAMETER 0x0005 +#define HV_STATUS_SUCCESS 0x0000 +#define HV_STATUS_INVALID_HYPERCALL_CODE 0x0002 +#define HV_STATUS_INVALID_PARAMETER 0x0005 /* Viridian Hypercall Codes. */ #define HvFlushVirtualAddressSpace 0x0002 -#define HvFlushVirtualAddressList 0x0003 -#define HvNotifyLongSpinWait 0x0008 -#define HvGetPartitionId 0x0046 +#define HvFlushVirtualAddressList 0x0003 +#define HvNotifyLongSpinWait 0x0008 +#define HvGetPartitionId 0x0046 #define HvExtCallQueryCapabilities 0x8001 /* Viridian Hypercall Flags. */ #define HV_FLUSH_ALL_PROCESSORS 1 /* Viridian Partition Privilege Flags */ -typedef struct { +typedef struct +{ /* Access to virtual MSRs */ - uint64_t AccessVpRunTimeReg:1; - uint64_t AccessPartitionReferenceCounter:1; - uint64_t AccessSynicRegs:1; - uint64_t AccessSyntheticTimerRegs:1; - uint64_t AccessIntrCtrlRegs:1; - uint64_t AccessHypercallMsrs:1; - uint64_t AccessVpIndex:1; - uint64_t AccessResetReg:1; - uint64_t AccessStatsReg:1; - uint64_t AccessPartitionReferenceTsc:1; - uint64_t AccessGuestIdleReg:1; - uint64_t AccessFrequencyRegs:1; - uint64_t AccessDebugRegs:1; - uint64_t Reserved1:19; + uint64_t AccessVpRunTimeReg : 1; + uint64_t AccessPartitionReferenceCounter : 1; + uint64_t AccessSynicRegs : 1; + uint64_t AccessSyntheticTimerRegs : 1; + uint64_t AccessIntrCtrlRegs : 1; + uint64_t AccessHypercallMsrs : 1; + uint64_t AccessVpIndex : 1; + uint64_t AccessResetReg : 1; + uint64_t AccessStatsReg : 1; + uint64_t AccessPartitionReferenceTsc : 1; + uint64_t AccessGuestIdleReg : 1; + uint64_t AccessFrequencyRegs : 1; + uint64_t AccessDebugRegs : 1; + uint64_t Reserved1 : 19; /* Access to hypercalls */ - uint64_t CreatePartitions:1; - uint64_t AccessPartitionId:1; - uint64_t AccessMemoryPool:1; - uint64_t AdjustMessageBuffers:1; - uint64_t PostMessages:1; - uint64_t SignalEvents:1; - uint64_t CreatePort:1; - uint64_t ConnectPort:1; - uint64_t AccessStats:1; - uint64_t Reserved2:2; - uint64_t Debugging:1; - uint64_t CpuManagement:1; - uint64_t Reserved3:1; - uint64_t Reserved4:1; - uint64_t Reserved5:1; - uint64_t AccessVSM:1; - uint64_t AccessVpRegisters:1; - uint64_t Reserved6:1; - uint64_t Reserved7:1; - uint64_t EnableExtendedHypercalls:1; - uint64_t StartVirtualProcessor:1; - uint64_t Reserved8:10; + uint64_t CreatePartitions : 1; + uint64_t AccessPartitionId : 1; + uint64_t AccessMemoryPool : 1; + uint64_t AdjustMessageBuffers : 1; + uint64_t PostMessages : 1; + uint64_t SignalEvents : 1; + uint64_t CreatePort : 1; + uint64_t ConnectPort : 1; + uint64_t AccessStats : 1; + uint64_t Reserved2 : 2; + uint64_t Debugging : 1; + uint64_t CpuManagement : 1; + uint64_t Reserved3 : 1; + uint64_t Reserved4 : 1; + uint64_t Reserved5 : 1; + uint64_t AccessVSM : 1; + uint64_t AccessVpRegisters : 1; + uint64_t Reserved6 : 1; + uint64_t Reserved7 : 1; + uint64_t EnableExtendedHypercalls : 1; + uint64_t StartVirtualProcessor : 1; + uint64_t Reserved8 : 10; } HV_PARTITION_PRIVILEGE_MASK; -typedef union _HV_CRASH_CTL_REG_CONTENTS -{ +typedef union _HV_CRASH_CTL_REG_CONTENTS { uint64_t AsUINT64; struct { - uint64_t Reserved:63; - uint64_t CrashNotify:1; + uint64_t Reserved : 63; + uint64_t CrashNotify : 1; } u; } HV_CRASH_CTL_REG_CONTENTS; @@ -92,13 +92,13 @@ typedef union _HV_CRASH_CTL_REG_CONTENTS /* Viridian CPUID leaf 4: Implementation Recommendations. */ #define CPUID4A_HCALL_REMOTE_TLB_FLUSH (1 << 2) -#define CPUID4A_MSR_BASED_APIC (1 << 3) -#define CPUID4A_RELAX_TIMER_INT (1 << 5) +#define CPUID4A_MSR_BASED_APIC (1 << 3) +#define CPUID4A_RELAX_TIMER_INT (1 << 5) /* Viridian CPUID leaf 6: Implementation HW features detected and in use */ -#define CPUID6A_APIC_OVERLAY (1 << 0) -#define CPUID6A_MSR_BITMAPS (1 << 1) -#define CPUID6A_NESTED_PAGING (1 << 3) +#define CPUID6A_APIC_OVERLAY (1 << 0) +#define CPUID6A_MSR_BITMAPS (1 << 1) +#define CPUID6A_NESTED_PAGING (1 << 3) /* * Version and build number reported by CPUID leaf 2 @@ -115,8 +115,7 @@ static uint32_t __read_mostly viridian_build = 0x1772; * to acquire a spinlock. */ static uint32_t __read_mostly viridian_spinlock_retry_count = 2047; -integer_param("viridian-spinlock-retry-count", - viridian_spinlock_retry_count); +integer_param("viridian-spinlock-retry-count", viridian_spinlock_retry_count); void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf, uint32_t subleaf, struct cpuid_leaf *res) @@ -128,7 +127,7 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf, leaf -= 0x40000000; - switch ( leaf ) + switch (leaf) { case 0: res->a = 0x40000006; /* Maximum leaf */ @@ -168,7 +167,10 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf, }; union { HV_PARTITION_PRIVILEGE_MASK mask; - struct { uint32_t lo, hi; }; + struct + { + uint32_t lo, hi; + }; } u; if ( !(viridian_feature_mask(d) & HVMPV_no_freq) ) @@ -226,12 +228,11 @@ static void dump_guest_os_id(const struct domain *d) goi = &d->arch.hvm.viridian.guest_os_id; - printk(XENLOG_G_INFO - "d%d: VIRIDIAN GUEST_OS_ID: vendor: %x os: %x major: %x minor: %x sp: %x build: %x\n", - d->domain_id, - goi->fields.vendor, goi->fields.os, - goi->fields.major, goi->fields.minor, - goi->fields.service_pack, goi->fields.build_number); + printk(XENLOG_G_INFO "d%d: VIRIDIAN GUEST_OS_ID: vendor: %x os: %x major: " + "%x minor: %x sp: %x build: %x\n", + d->domain_id, goi->fields.vendor, goi->fields.os, goi->fields.major, + goi->fields.minor, goi->fields.service_pack, + goi->fields.build_number); } static void dump_hypercall(const struct domain *d) @@ -241,8 +242,7 @@ static void dump_hypercall(const struct domain *d) hg = &d->arch.hvm.viridian.hypercall_gpa; printk(XENLOG_G_INFO "d%d: VIRIDIAN HYPERCALL: enabled: %x pfn: %lx\n", - d->domain_id, - hg->fields.enabled, (unsigned long)hg->fields.pfn); + d->domain_id, hg->fields.enabled, (unsigned long)hg->fields.pfn); } static void enable_hypercall_page(struct domain *d) @@ -255,7 +255,7 @@ static void enable_hypercall_page(struct domain *d) { if ( page ) put_page(page); - gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", + gdprintk(XENLOG_WARNING, "Bad GMFN %#" PRI_gfn " (MFN %#" PRI_mfn ")\n", gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); return; } @@ -266,12 +266,12 @@ static void enable_hypercall_page(struct domain *d) * We set the bit 31 in %eax (reserved field in the Viridian hypercall * calling convention) to differentiate Xen and Viridian hypercalls. */ - *(u8 *)(p + 0) = 0x0d; /* orl $0x80000000, %eax */ + *(u8 *)(p + 0) = 0x0d; /* orl $0x80000000, %eax */ *(u32 *)(p + 1) = 0x80000000; - *(u8 *)(p + 5) = 0x0f; /* vmcall/vmmcall */ - *(u8 *)(p + 6) = 0x01; - *(u8 *)(p + 7) = (cpu_has_vmx ? 0xc1 : 0xd9); - *(u8 *)(p + 8) = 0xc3; /* ret */ + *(u8 *)(p + 5) = 0x0f; /* vmcall/vmmcall */ + *(u8 *)(p + 6) = 0x01; + *(u8 *)(p + 7) = (cpu_has_vmx ? 0xc1 : 0xd9); + *(u8 *)(p + 8) = 0xc3; /* ret */ memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */ unmap_domain_page(p); @@ -285,7 +285,7 @@ int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, uint64_t val) ASSERT(is_viridian_domain(d)); - switch ( idx ) + switch (idx) { case HV_X64_MSR_GUEST_OS_ID: d->arch.hvm.viridian.guest_os_id.raw = val; @@ -346,9 +346,8 @@ int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, uint64_t val) } default: - gdprintk(XENLOG_INFO, - "Write %016"PRIx64" to unimplemented MSR %#x\n", val, - idx); + gdprintk(XENLOG_INFO, "Write %016" PRIx64 " to unimplemented MSR %#x\n", + val, idx); return X86EMUL_EXCEPTION; } @@ -361,7 +360,7 @@ int guest_rdmsr_viridian(const struct vcpu *v, uint32_t idx, uint64_t *val) ASSERT(is_viridian_domain(d)); - switch ( idx ) + switch (idx) { case HV_X64_MSR_GUEST_OS_ID: *val = d->arch.hvm.viridian.guest_os_id.raw; @@ -426,7 +425,7 @@ void viridian_domain_deinit(struct domain *d) { struct vcpu *v; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) viridian_vcpu_deinit(v); } @@ -451,30 +450,32 @@ int viridian_hypercall(struct cpu_user_regs *regs) union hypercall_input { uint64_t raw; - struct { + struct + { uint16_t call_code; - uint16_t fast:1; - uint16_t rsvd1:15; - uint16_t rep_count:12; - uint16_t rsvd2:4; - uint16_t rep_start:12; - uint16_t rsvd3:4; + uint16_t fast : 1; + uint16_t rsvd1 : 15; + uint16_t rep_count : 12; + uint16_t rsvd2 : 4; + uint16_t rep_start : 12; + uint16_t rsvd3 : 4; }; } input; union hypercall_output { uint64_t raw; - struct { + struct + { uint16_t result; uint16_t rsvd1; - uint32_t rep_complete:12; - uint32_t rsvd2:20; + uint32_t rep_complete : 12; + uint32_t rsvd2 : 20; }; - } output = { 0 }; + } output = {0}; ASSERT(is_viridian_domain(currd)); - switch ( mode ) + switch (mode) { case 8: input.raw = regs->rcx; @@ -490,7 +491,7 @@ int viridian_hypercall(struct cpu_user_regs *regs) goto out; } - switch ( input.call_code ) + switch (input.call_code) { case HvNotifyLongSpinWait: /* @@ -503,7 +504,8 @@ int viridian_hypercall(struct cpu_user_regs *regs) case HvFlushVirtualAddressSpace: case HvFlushVirtualAddressList: { - struct { + struct + { uint64_t address_space; uint64_t flags; uint64_t vcpu_mask; @@ -516,8 +518,7 @@ int viridian_hypercall(struct cpu_user_regs *regs) /* Get input parameters. */ if ( hvm_copy_from_guest_phys(&input_params, input_params_gpa, - sizeof(input_params)) != - HVMTRANS_okay ) + sizeof(input_params)) != HVMTRANS_okay ) break; /* @@ -558,7 +559,8 @@ int viridian_hypercall(struct cpu_user_regs *regs) out: output.result = status; - switch (mode) { + switch (mode) + { case 8: regs->rax = output.raw; break; @@ -577,8 +579,8 @@ void viridian_dump_guest_page(const struct vcpu *v, const char *name, if ( !vp->msr.fields.enabled ) return; - printk(XENLOG_G_INFO "%pv: VIRIDIAN %s: pfn: %lx\n", - v, name, (unsigned long)vp->msr.fields.pfn); + printk(XENLOG_G_INFO "%pv: VIRIDIAN %s: pfn: %lx\n", v, name, + (unsigned long)vp->msr.fields.pfn); } void viridian_map_guest_page(struct vcpu *v, struct viridian_page *vp) @@ -610,8 +612,8 @@ void viridian_map_guest_page(struct vcpu *v, struct viridian_page *vp) clear_page(vp->ptr); return; - fail: - gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", +fail: + gdprintk(XENLOG_WARNING, "Bad GMFN %#" PRI_gfn " (MFN %#" PRI_mfn ")\n", gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); } @@ -630,13 +632,12 @@ void viridian_unmap_guest_page(struct viridian_page *vp) put_page_and_type(page); } -static int viridian_save_domain_ctxt(struct vcpu *v, - hvm_domain_context_t *h) +static int viridian_save_domain_ctxt(struct vcpu *v, hvm_domain_context_t *h) { const struct domain *d = v->domain; struct hvm_viridian_domain_context ctxt = { - .hypercall_gpa = d->arch.hvm.viridian.hypercall_gpa.raw, - .guest_os_id = d->arch.hvm.viridian.guest_os_id.raw, + .hypercall_gpa = d->arch.hvm.viridian.hypercall_gpa.raw, + .guest_os_id = d->arch.hvm.viridian.guest_os_id.raw, }; if ( !is_viridian_domain(d) ) @@ -647,16 +648,15 @@ static int viridian_save_domain_ctxt(struct vcpu *v, return (hvm_save_entry(VIRIDIAN_DOMAIN, 0, h, &ctxt) != 0); } -static int viridian_load_domain_ctxt(struct domain *d, - hvm_domain_context_t *h) +static int viridian_load_domain_ctxt(struct domain *d, hvm_domain_context_t *h) { struct hvm_viridian_domain_context ctxt; if ( hvm_load_entry_zeroextend(VIRIDIAN_DOMAIN, h, &ctxt) != 0 ) return -EINVAL; - d->arch.hvm.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa; - d->arch.hvm.viridian.guest_os_id.raw = ctxt.guest_os_id; + d->arch.hvm.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa; + d->arch.hvm.viridian.guest_os_id.raw = ctxt.guest_os_id; viridian_time_load_domain_ctxt(d, &ctxt); @@ -678,8 +678,7 @@ static int viridian_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h) return hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt); } -static int viridian_load_vcpu_ctxt(struct domain *d, - hvm_domain_context_t *h) +static int viridian_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) { unsigned int vcpuid = hvm_load_instance(h); struct vcpu *v; @@ -746,8 +745,8 @@ static int __init parse_viridian_version(const char *arg) viridian_minor = n[1]; viridian_build = n[2]; - printk("viridian-version = %#x,%#x,%#x\n", - viridian_major, viridian_minor, viridian_build); + printk("viridian-version = %#x,%#x,%#x\n", viridian_major, viridian_minor, + viridian_build); return 0; } custom_param("viridian-version", parse_viridian_version); diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c index a1a43cd792..cda32ba0ed 100644 --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -41,47 +41,44 @@ #include #include -#define VLAPIC_VERSION 0x00050014 -#define VLAPIC_LVT_NUM 6 - -#define LVT_MASK \ - (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK) - -#define LINT_MASK \ - (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY |\ - APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER) - -static const unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] = -{ - /* LVTT */ - LVT_MASK | APIC_TIMER_MODE_MASK, - /* LVTTHMR */ - LVT_MASK | APIC_MODE_MASK, - /* LVTPC */ - LVT_MASK | APIC_MODE_MASK, - /* LVT0-1 */ - LINT_MASK, LINT_MASK, - /* LVTERR */ - LVT_MASK -}; - -#define vlapic_lvt_vector(vlapic, lvt_type) \ +#define VLAPIC_VERSION 0x00050014 +#define VLAPIC_LVT_NUM 6 + +#define LVT_MASK (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK) + +#define LINT_MASK \ + (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | \ + APIC_LVT_LEVEL_TRIGGER) + +static const unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] = { + /* LVTT */ + LVT_MASK | APIC_TIMER_MODE_MASK, + /* LVTTHMR */ + LVT_MASK | APIC_MODE_MASK, + /* LVTPC */ + LVT_MASK | APIC_MODE_MASK, + /* LVT0-1 */ + LINT_MASK, LINT_MASK, + /* LVTERR */ + LVT_MASK}; + +#define vlapic_lvt_vector(vlapic, lvt_type) \ (vlapic_get_reg(vlapic, lvt_type) & APIC_VECTOR_MASK) -#define vlapic_lvt_dm(vlapic, lvt_type) \ +#define vlapic_lvt_dm(vlapic, lvt_type) \ (vlapic_get_reg(vlapic, lvt_type) & APIC_MODE_MASK) -#define vlapic_lvtt_period(vlapic) \ - ((vlapic_get_reg(vlapic, APIC_LVTT) & APIC_TIMER_MODE_MASK) \ - == APIC_TIMER_MODE_PERIODIC) +#define vlapic_lvtt_period(vlapic) \ + ((vlapic_get_reg(vlapic, APIC_LVTT) & APIC_TIMER_MODE_MASK) == \ + APIC_TIMER_MODE_PERIODIC) -#define vlapic_lvtt_oneshot(vlapic) \ - ((vlapic_get_reg(vlapic, APIC_LVTT) & APIC_TIMER_MODE_MASK) \ - == APIC_TIMER_MODE_ONESHOT) +#define vlapic_lvtt_oneshot(vlapic) \ + ((vlapic_get_reg(vlapic, APIC_LVTT) & APIC_TIMER_MODE_MASK) == \ + APIC_TIMER_MODE_ONESHOT) -#define vlapic_lvtt_tdt(vlapic) \ - ((vlapic_get_reg(vlapic, APIC_LVTT) & APIC_TIMER_MODE_MASK) \ - == APIC_TIMER_MODE_TSC_DEADLINE) +#define vlapic_lvtt_tdt(vlapic) \ + ((vlapic_get_reg(vlapic, APIC_LVTT) & APIC_TIMER_MODE_MASK) == \ + APIC_TIMER_MODE_TSC_DEADLINE) static void vlapic_do_init(struct vlapic *vlapic); @@ -91,10 +88,10 @@ static int vlapic_find_highest_vector(const void *bitmap) unsigned int word_offset = NR_VECTORS / 32; /* Work backwards through the bitmap (first 32-bit word in every four). */ - while ( (word_offset != 0) && (word[(--word_offset)*4] == 0) ) + while ( (word_offset != 0) && (word[(--word_offset) * 4] == 0) ) continue; - return (fls(word[word_offset*4]) - 1) + (word_offset * 32); + return (fls(word[word_offset * 4]) - 1) + (word_offset * 32); } /* @@ -183,8 +180,8 @@ static uint32_t vlapic_get_ppr(const struct vlapic *vlapic) uint32_t tpr, isrv, ppr; int isr; - tpr = vlapic_get_reg(vlapic, APIC_TASKPRI); - isr = vlapic_find_highest_isr(vlapic); + tpr = vlapic_get_reg(vlapic, APIC_TASKPRI); + isr = vlapic_find_highest_isr(vlapic); isrv = (isr != -1) ? isr : 0; if ( (tpr & 0xf0) >= (isrv & 0xf0) ) @@ -193,18 +190,18 @@ static uint32_t vlapic_get_ppr(const struct vlapic *vlapic) ppr = isrv & 0xf0; HVM_DBG_LOG(DBG_LEVEL_VLAPIC_INTERRUPT, - "vlapic %p, ppr %#x, isr %#x, isrv %#x", - vlapic, ppr, isr, isrv); + "vlapic %p, ppr %#x, isr %#x, isrv %#x", vlapic, ppr, isr, + isrv); return ppr; } uint32_t vlapic_set_ppr(struct vlapic *vlapic) { - uint32_t ppr = vlapic_get_ppr(vlapic); + uint32_t ppr = vlapic_get_ppr(vlapic); - vlapic_set_reg(vlapic, APIC_PROCPRI, ppr); - return ppr; + vlapic_set_reg(vlapic, APIC_PROCPRI, ppr); + return ppr; } static bool_t vlapic_match_logical_addr(const struct vlapic *vlapic, @@ -220,7 +217,7 @@ static bool_t vlapic_match_logical_addr(const struct vlapic *vlapic, logical_id = GET_xAPIC_LOGICAL_ID(logical_id); mda = (uint8_t)mda; - switch ( vlapic_get_reg(vlapic, APIC_DFR) ) + switch (vlapic_get_reg(vlapic, APIC_DFR)) { case APIC_DFR_FLAT: if ( logical_id & mda ) @@ -232,23 +229,23 @@ static bool_t vlapic_match_logical_addr(const struct vlapic *vlapic, break; default: printk(XENLOG_G_WARNING "%pv: bad LAPIC DFR value %08x\n", - const_vlapic_vcpu(vlapic), - vlapic_get_reg(vlapic, APIC_DFR)); + const_vlapic_vcpu(vlapic), vlapic_get_reg(vlapic, APIC_DFR)); break; } return result; } -bool_t vlapic_match_dest( - const struct vlapic *target, const struct vlapic *source, - int short_hand, uint32_t dest, bool_t dest_mode) +bool_t vlapic_match_dest(const struct vlapic *target, + const struct vlapic *source, int short_hand, + uint32_t dest, bool_t dest_mode) { - HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "target %p, source %p, dest %#x, " + HVM_DBG_LOG(DBG_LEVEL_VLAPIC, + "target %p, source %p, dest %#x, " "dest_mode %#x, short_hand %#x", target, source, dest, dest_mode, short_hand); - switch ( short_hand ) + switch (short_hand) { case APIC_DEST_NOSHORT: if ( dest_mode ) @@ -277,9 +274,10 @@ static void vlapic_init_sipi_one(struct vcpu *target, uint32_t icr) { vcpu_pause(target); - switch ( icr & APIC_MODE_MASK ) + switch (icr & APIC_MODE_MASK) + { + case APIC_DM_INIT: { - case APIC_DM_INIT: { bool_t fpu_initialised; int rc; @@ -302,7 +300,8 @@ static void vlapic_init_sipi_one(struct vcpu *target, uint32_t icr) break; } - case APIC_DM_STARTUP: { + case APIC_DM_STARTUP: + { uint16_t reset_cs = (icr & 0xffu) << 8; hvm_vcpu_reset_state(target, reset_cs, 0); break; @@ -327,10 +326,10 @@ static void vlapic_init_sipi_action(unsigned long _vcpu) if ( icr == 0 ) return; - for_each_vcpu ( origin->domain, v ) + for_each_vcpu (origin->domain, v) { - if ( vlapic_match_dest(vcpu_vlapic(v), vcpu_vlapic(origin), - short_hand, dest, dest_mode) ) + if ( vlapic_match_dest(vcpu_vlapic(v), vcpu_vlapic(origin), short_hand, + dest, dest_mode) ) vlapic_init_sipi_one(v, icr); } @@ -344,7 +343,7 @@ static void vlapic_accept_irq(struct vcpu *v, uint32_t icr_low) struct vlapic *vlapic = vcpu_vlapic(v); uint8_t vector = (uint8_t)icr_low; - switch ( icr_low & APIC_MODE_MASK ) + switch (icr_low & APIC_MODE_MASK) { case APIC_DM_FIXED: case APIC_DM_LOWEST: @@ -385,9 +384,9 @@ static void vlapic_accept_irq(struct vcpu *v, uint32_t icr_low) } } -struct vlapic *vlapic_lowest_prio( - struct domain *d, const struct vlapic *source, - int short_hand, uint32_t dest, bool_t dest_mode) +struct vlapic *vlapic_lowest_prio(struct domain *d, const struct vlapic *source, + int short_hand, uint32_t dest, + bool_t dest_mode) { int old = hvm_domain_irq(d)->round_robin_prev_vcpu; uint32_t ppr, target_ppr = UINT_MAX; @@ -398,7 +397,7 @@ struct vlapic *vlapic_lowest_prio( return NULL; do { - v = v->next_in_list ? : d->vcpu[0]; + v = v->next_in_list ?: d->vcpu[0]; vlapic = vcpu_vlapic(v); if ( vlapic_match_dest(vlapic, source, short_hand, dest, dest_mode) && vlapic_enabled(vlapic) && @@ -410,8 +409,7 @@ struct vlapic *vlapic_lowest_prio( } while ( v->vcpu_id != old ); if ( target != NULL ) - hvm_domain_irq(d)->round_robin_prev_vcpu = - vlapic_vcpu(target)->vcpu_id; + hvm_domain_irq(d)->round_robin_prev_vcpu = vlapic_vcpu(target)->vcpu_id; return target; } @@ -429,7 +427,7 @@ void vlapic_EOI_set(struct vlapic *vlapic) bool missed_eoi = viridian_apic_assist_completed(v); int vector; - again: +again: vector = vlapic_find_highest_isr(vlapic); /* Some EOI writes may not have a matching to an in-service interrupt. */ @@ -482,15 +480,13 @@ static bool_t is_multicast_dest(struct vlapic *vlapic, unsigned int short_hand, return dest_mode ? hweight16(dest) > 1 : dest == 0xffffffff; if ( dest_mode ) - return hweight8(dest & - GET_xAPIC_DEST_FIELD(vlapic_get_reg(vlapic, - APIC_DFR))) > 1; + return hweight8(dest & GET_xAPIC_DEST_FIELD( + vlapic_get_reg(vlapic, APIC_DFR))) > 1; return dest == 0xff; } -void vlapic_ipi( - struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high) +void vlapic_ipi(struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high) { unsigned int dest; unsigned int short_hand = icr_low & APIC_SHORT_MASK; @@ -500,7 +496,7 @@ void vlapic_ipi( dest = _VLAPIC_ID(vlapic, icr_high); - switch ( icr_low & APIC_MODE_MASK ) + switch (icr_low & APIC_MODE_MASK) { case APIC_DM_INIT: case APIC_DM_STARTUP: @@ -515,7 +511,8 @@ void vlapic_ipi( tasklet_schedule(&vlapic->init_sipi.tasklet); break; - case APIC_DM_LOWEST: { + case APIC_DM_LOWEST: + { struct vlapic *target = vlapic_lowest_prio( vlapic_domain(vlapic), vlapic, short_hand, dest, dest_mode); @@ -533,16 +530,17 @@ void vlapic_ipi( break; } /* fall through */ - default: { + default: + { struct vcpu *v; bool_t batch = is_multicast_dest(vlapic, short_hand, dest, dest_mode); if ( batch ) cpu_raise_softirq_batch_begin(); - for_each_vcpu ( vlapic_domain(vlapic), v ) + for_each_vcpu (vlapic_domain(vlapic), v) { - if ( vlapic_match_dest(vcpu_vlapic(v), vlapic, - short_hand, dest, dest_mode) ) + if ( vlapic_match_dest(vcpu_vlapic(v), vlapic, short_hand, dest, + dest_mode) ) vlapic_accept_irq(v, icr_low); } if ( batch ) @@ -558,8 +556,8 @@ static uint32_t vlapic_get_tmcct(const struct vlapic *vlapic) uint32_t tmcct = 0, tmict = vlapic_get_reg(vlapic, APIC_TMICT); uint64_t counter_passed; - counter_passed = ((hvm_get_guest_time(v) - vlapic->timer_last_update) - / (APIC_BUS_CYCLE_NS * vlapic->hw.timer_divisor)); + counter_passed = ((hvm_get_guest_time(v) - vlapic->timer_last_update) / + (APIC_BUS_CYCLE_NS * vlapic->hw.timer_divisor)); /* If timer_last_update is 0, then TMCCT should return 0 as well. */ if ( tmict && vlapic->timer_last_update ) @@ -572,7 +570,7 @@ static uint32_t vlapic_get_tmcct(const struct vlapic *vlapic) HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "timer initial count %d, timer current count %d, " - "offset %"PRId64, + "offset %" PRId64, tmict, tmcct, counter_passed); return tmcct; @@ -588,14 +586,14 @@ static void vlapic_set_tdcr(struct vlapic *vlapic, unsigned int val) val = ((val & 3) | ((val & 8) >> 1)) + 1; vlapic->hw.timer_divisor = 1 << (val & 7); - HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, - "timer_divisor: %d", vlapic->hw.timer_divisor); + HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "timer_divisor: %d", + vlapic->hw.timer_divisor); } static uint32_t vlapic_read_aligned(const struct vlapic *vlapic, unsigned int offset) { - switch ( offset ) + switch (offset) { case APIC_PROCPRI: return vlapic_get_ppr(vlapic); @@ -634,15 +632,23 @@ static int vlapic_mmio_read(struct vcpu *v, unsigned long address, { uint32_t reg = vlapic_read_aligned(vlapic, offset & ~0xf); - switch ( len ) + switch (len) { - case 1: result = (uint8_t) (reg >> (alignment * 8)); break; - case 2: result = (uint16_t)(reg >> (alignment * 8)); break; - case 4: result = reg; break; + case 1: + result = (uint8_t)(reg >> (alignment * 8)); + break; + case 2: + result = (uint16_t)(reg >> (alignment * 8)); + break; + case 4: + result = reg; + break; } - HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset %#x with length %#x, " - "and the result is %#x", offset, len, result); + HVM_DBG_LOG(DBG_LEVEL_VLAPIC, + "offset %#x with length %#x, " + "and the result is %#x", + offset, len, result); } *pval = result; @@ -652,14 +658,13 @@ static int vlapic_mmio_read(struct vcpu *v, unsigned long address, int guest_rdmsr_x2apic(const struct vcpu *v, uint32_t msr, uint64_t *val) { static const unsigned long readable[] = { -#define REG(x) (1UL << (APIC_ ## x >> 4)) - REG(ID) | REG(LVR) | REG(TASKPRI) | REG(PROCPRI) | - REG(LDR) | REG(SPIV) | REG(ESR) | REG(ICR) | - REG(CMCI) | REG(LVTT) | REG(LVTTHMR) | REG(LVTPC) | - REG(LVT0) | REG(LVT1) | REG(LVTERR) | REG(TMICT) | +#define REG(x) (1UL << (APIC_##x >> 4)) + REG(ID) | REG(LVR) | REG(TASKPRI) | REG(PROCPRI) | REG(LDR) | + REG(SPIV) | REG(ESR) | REG(ICR) | REG(CMCI) | REG(LVTT) | REG(LVTTHMR) | + REG(LVTPC) | REG(LVT0) | REG(LVT1) | REG(LVTERR) | REG(TMICT) | REG(TMCCT) | REG(TDCR) | #undef REG -#define REGBLOCK(x) (((1UL << (NR_VECTORS / 32)) - 1) << (APIC_ ## x >> 4)) +#define REGBLOCK(x) (((1UL << (NR_VECTORS / 32)) - 1) << (APIC_##x >> 4)) REGBLOCK(ISR) | REGBLOCK(TMR) | REGBLOCK(IRR) #undef REGBLOCK }; @@ -674,8 +679,8 @@ int guest_rdmsr_x2apic(const struct vcpu *v, uint32_t msr, uint64_t *val) */ ASSERT(v == current); - if ( !vlapic_x2apic_mode(vlapic) || - (reg >= sizeof(readable) * 8) || !test_bit(reg, readable) ) + if ( !vlapic_x2apic_mode(vlapic) || (reg >= sizeof(readable) * 8) || + !test_bit(reg, readable) ) return X86EMUL_EXCEPTION; if ( offset == APIC_ICR ) @@ -715,16 +720,16 @@ static void vlapic_update_timer(struct vlapic *vlapic, uint32_t lvtt, is_periodic = (lvtt & APIC_TIMER_MODE_MASK) == APIC_TIMER_MODE_PERIODIC; is_oneshot = (lvtt & APIC_TIMER_MODE_MASK) == APIC_TIMER_MODE_ONESHOT; - period = (uint64_t)vlapic_get_reg(vlapic, APIC_TMICT) - * APIC_BUS_CYCLE_NS * old_divisor; + period = (uint64_t)vlapic_get_reg(vlapic, APIC_TMICT) * APIC_BUS_CYCLE_NS * + old_divisor; /* Calculate the next time the timer should trigger an interrupt. */ if ( tmict_updated ) delta = period; else if ( period && vlapic->timer_last_update ) { - uint64_t time_passed = hvm_get_guest_time(current) - - vlapic->timer_last_update; + uint64_t time_passed = + hvm_get_guest_time(current) - vlapic->timer_last_update; /* This depends of the previous mode, if a new mode is being set */ if ( vlapic_lvtt_period(vlapic) ) @@ -737,14 +742,13 @@ static void vlapic_update_timer(struct vlapic *vlapic, uint32_t lvtt, { if ( vlapic->hw.timer_divisor != old_divisor ) { - period = (uint64_t)vlapic_get_reg(vlapic, APIC_TMICT) - * APIC_BUS_CYCLE_NS * vlapic->hw.timer_divisor; + period = (uint64_t)vlapic_get_reg(vlapic, APIC_TMICT) * + APIC_BUS_CYCLE_NS * vlapic->hw.timer_divisor; delta = delta * vlapic->hw.timer_divisor / old_divisor; } TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(delta), - TRC_PAR_LONG(is_periodic ? period : 0), - vlapic->pt.irq); + TRC_PAR_LONG(is_periodic ? period : 0), vlapic->pt.irq); create_periodic_time(current, &vlapic->pt, delta, is_periodic ? period : 0, vlapic->pt.irq, @@ -757,9 +761,8 @@ static void vlapic_update_timer(struct vlapic *vlapic, uint32_t lvtt, HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "bus cycle is %uns, " - "initial count %u, period %"PRIu64"ns", - APIC_BUS_CYCLE_NS, - vlapic_get_reg(vlapic, APIC_TMICT), + "initial count %u, period %" PRIu64 "ns", + APIC_BUS_CYCLE_NS, vlapic_get_reg(vlapic, APIC_TMICT), period); } else @@ -781,7 +784,7 @@ void vlapic_reg_write(struct vcpu *v, unsigned int reg, uint32_t val) memset(&vlapic->loaded, 0, sizeof(vlapic->loaded)); - switch ( reg ) + switch (reg) { case APIC_ID: vlapic_set_reg(vlapic, APIC_ID, val); @@ -837,9 +840,9 @@ void vlapic_reg_write(struct vcpu *v, unsigned int reg, uint32_t val) vlapic_set_reg(vlapic, APIC_ICR2, val & 0xff000000); break; - case APIC_LVTT: /* LVT Timer Reg */ + case APIC_LVTT: /* LVT Timer Reg */ if ( vlapic_lvtt_tdt(vlapic) != - ((val & APIC_TIMER_MODE_MASK) == APIC_TIMER_MODE_TSC_DEADLINE)) + ((val & APIC_TIMER_MODE_MASK) == APIC_TIMER_MODE_TSC_DEADLINE) ) { vlapic_set_reg(vlapic, APIC_TMICT, 0); vlapic->hw.tdt_msr = 0; @@ -849,11 +852,11 @@ void vlapic_reg_write(struct vcpu *v, unsigned int reg, uint32_t val) vlapic_update_timer(vlapic, val, false, vlapic->hw.timer_divisor); /* fallthrough */ - case APIC_LVTTHMR: /* LVT Thermal Monitor */ - case APIC_LVTPC: /* LVT Performance Counter */ - case APIC_LVT0: /* LVT LINT0 Reg */ - case APIC_LVT1: /* LVT Lint1 Reg */ - case APIC_LVTERR: /* LVT Error Reg */ + case APIC_LVTTHMR: /* LVT Thermal Monitor */ + case APIC_LVTPC: /* LVT Performance Counter */ + case APIC_LVT0: /* LVT LINT0 Reg */ + case APIC_LVT1: /* LVT Lint1 Reg */ + case APIC_LVTERR: /* LVT Error Reg */ if ( vlapic_sw_disabled(vlapic) ) val |= APIC_LVT_MASKED; val &= vlapic_lvt_mask[(reg - APIC_LVTT) >> 4]; @@ -905,8 +908,8 @@ static int vlapic_mmio_write(struct vcpu *v, unsigned long address, if ( offset != APIC_EOI ) HVM_DBG_LOG(DBG_LEVEL_VLAPIC, - "offset %#x with length %#x, and value is %#lx", - offset, len, val); + "offset %#x with length %#x, and value is %#lx", offset, + len, val); /* * APIC registers are 32-bit values, aligned on 128-bit boundaries, and @@ -923,16 +926,16 @@ static int vlapic_mmio_write(struct vcpu *v, unsigned long address, alignment *= 8; - switch ( len ) + switch (len) { case 1: val = ((reg & ~(0xffU << alignment)) | - ((val & 0xff) << alignment)); + ((val & 0xff) << alignment)); break; case 2: val = ((reg & ~(0xffffU << alignment)) | - ((val & 0xffff) << alignment)); + ((val & 0xffff) << alignment)); break; } } @@ -973,7 +976,7 @@ int guest_wrmsr_x2apic(struct vcpu *v, uint32_t msr, uint64_t msr_content) if ( !vlapic_x2apic_mode(vlapic) ) return X86EMUL_EXCEPTION; - switch ( offset ) + switch (offset) { case APIC_TASKPRI: if ( msr_content & ~APIC_TPRI_MASK ) @@ -981,9 +984,10 @@ int guest_wrmsr_x2apic(struct vcpu *v, uint32_t msr, uint64_t msr_content) break; case APIC_SPIV: - if ( msr_content & ~(APIC_VECTOR_MASK | APIC_SPIV_APIC_ENABLED | - (VLAPIC_VERSION & APIC_LVR_DIRECTED_EOI - ? APIC_SPIV_DIRECTED_EOI : 0)) ) + if ( msr_content & + ~(APIC_VECTOR_MASK | APIC_SPIV_APIC_ENABLED | + (VLAPIC_VERSION & APIC_LVR_DIRECTED_EOI ? APIC_SPIV_DIRECTED_EOI + : 0)) ) return X86EMUL_EXCEPTION; break; @@ -1019,9 +1023,9 @@ int guest_wrmsr_x2apic(struct vcpu *v, uint32_t msr, uint64_t msr_content) break; case APIC_ICR: - if ( (uint32_t)msr_content & ~(APIC_VECTOR_MASK | APIC_MODE_MASK | - APIC_DEST_MASK | APIC_INT_ASSERT | - APIC_INT_LEVELTRIG | APIC_SHORT_MASK) ) + if ( (uint32_t)msr_content & + ~(APIC_VECTOR_MASK | APIC_MODE_MASK | APIC_DEST_MASK | + APIC_INT_ASSERT | APIC_INT_LEVELTRIG | APIC_SHORT_MASK) ) return X86EMUL_EXCEPTION; vlapic_set_reg(vlapic, APIC_ICR2, msr_content >> 32); break; @@ -1037,7 +1041,7 @@ int guest_wrmsr_x2apic(struct vcpu *v, uint32_t msr, uint64_t msr_content) case APIC_ESR: if ( msr_content ) { - default: + default: return X86EMUL_EXCEPTION; } } @@ -1050,10 +1054,9 @@ int guest_wrmsr_x2apic(struct vcpu *v, uint32_t msr, uint64_t msr_content) static int vlapic_range(struct vcpu *v, unsigned long addr) { struct vlapic *vlapic = vcpu_vlapic(v); - unsigned long offset = addr - vlapic_base_address(vlapic); + unsigned long offset = addr - vlapic_base_address(vlapic); - return !vlapic_hw_disabled(vlapic) && - !vlapic_x2apic_mode(vlapic) && + return !vlapic_hw_disabled(vlapic) && !vlapic_x2apic_mode(vlapic) && (offset < PAGE_SIZE); } @@ -1124,7 +1127,7 @@ int guest_wrmsr_apic_base(struct vcpu *v, uint64_t value) ((value & APIC_BASE_ADDR_MASK) != APIC_DEFAULT_PHYS_BASE) ) { printk(XENLOG_G_INFO - "%pv tried to move the APIC MMIO window: val 0x%08"PRIx64"\n", + "%pv tried to move the APIC MMIO window: val 0x%08" PRIx64 "\n", v, value); return X86EMUL_EXCEPTION; } @@ -1158,13 +1161,13 @@ int guest_wrmsr_apic_base(struct vcpu *v, uint64_t value) vmx_vlapic_msr_changed(vlapic_vcpu(vlapic)); - HVM_DBG_LOG(DBG_LEVEL_VLAPIC, - "apic base msr is 0x%016"PRIx64, vlapic->hw.apic_base_msr); + HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "apic base msr is 0x%016" PRIx64, + vlapic->hw.apic_base_msr); return X86EMUL_OKAY; } -uint64_t vlapic_tdt_msr_get(struct vlapic *vlapic) +uint64_t vlapic_tdt_msr_get(struct vlapic *vlapic) { if ( !vlapic_lvtt_tdt(vlapic) ) return 0; @@ -1185,7 +1188,7 @@ void vlapic_tdt_msr_set(struct vlapic *vlapic, uint64_t value) HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "ignore tsc deadline msr write"); return; } - + /* new_value = 0, >0 && <= now, > now */ guest_tsc = hvm_get_guest_tsc(v); if ( value > guest_tsc ) @@ -1193,15 +1196,15 @@ void vlapic_tdt_msr_set(struct vlapic *vlapic, uint64_t value) uint64_t delta = gtsc_to_gtime(v->domain, value - guest_tsc); delta = max_t(s64, delta, 0); - HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "delta[0x%016"PRIx64"]", delta); + HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "delta[0x%016" PRIx64 "]", delta); vlapic->hw.tdt_msr = value; /* .... reprogram tdt timer */ TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(delta), TRC_PAR_LONG(0LL), vlapic->pt.irq); - create_periodic_time(v, &vlapic->pt, delta, 0, - vlapic->pt.irq, vlapic_tdt_pt_cb, - &vlapic->timer_last_update, false); + create_periodic_time(v, &vlapic->pt, delta, 0, vlapic->pt.irq, + vlapic_tdt_pt_cb, &vlapic->timer_last_update, + false); vlapic->timer_last_update = vlapic->pt.last_plt_gtime; } else @@ -1213,9 +1216,9 @@ void vlapic_tdt_msr_set(struct vlapic *vlapic, uint64_t value) { TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(0LL), TRC_PAR_LONG(0LL), vlapic->pt.irq); - create_periodic_time(v, &vlapic->pt, 0, 0, - vlapic->pt.irq, vlapic_tdt_pt_cb, - &vlapic->timer_last_update, false); + create_periodic_time(v, &vlapic->pt, 0, 0, vlapic->pt.irq, + vlapic_tdt_pt_cb, &vlapic->timer_last_update, + false); vlapic->timer_last_update = vlapic->pt.last_plt_gtime; } else @@ -1225,12 +1228,12 @@ void vlapic_tdt_msr_set(struct vlapic *vlapic, uint64_t value) destroy_periodic_time(&vlapic->pt); } - HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "value[0x%016"PRIx64"]", value); + HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "value[0x%016" PRIx64 "]", value); } HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, - "tdt_msr[0x%016"PRIx64"]," - " gtsc[0x%016"PRIx64"]", + "tdt_msr[0x%016" PRIx64 "]," + " gtsc[0x%016" PRIx64 "]", vlapic->hw.tdt_msr, guest_tsc); } @@ -1249,15 +1252,15 @@ static int __vlapic_accept_pic_intr(struct vcpu *v) redir0 = domain_vioapic(d, 0)->redirtbl[0]; /* We deliver 8259 interrupts to the appropriate CPU as follows. */ - return ((/* IOAPIC pin0 is unmasked and routing to this LAPIC? */ - ((redir0.fields.delivery_mode == dest_ExtINT) && - !redir0.fields.mask && - redir0.fields.dest_id == VLAPIC_ID(vlapic) && - !vlapic_disabled(vlapic)) || - /* LAPIC has LVT0 unmasked for ExtInts? */ - ((lvt0 & (APIC_MODE_MASK|APIC_LVT_MASKED)) == APIC_DM_EXTINT) || - /* LAPIC is fully disabled? */ - vlapic_hw_disabled(vlapic))); + return ( + (/* IOAPIC pin0 is unmasked and routing to this LAPIC? */ + ((redir0.fields.delivery_mode == dest_ExtINT) && !redir0.fields.mask && + redir0.fields.dest_id == VLAPIC_ID(vlapic) && + !vlapic_disabled(vlapic)) || + /* LAPIC has LVT0 unmasked for ExtInts? */ + ((lvt0 & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_EXTINT) || + /* LAPIC is fully disabled? */ + vlapic_hw_disabled(vlapic))); } int vlapic_accept_pic_intr(struct vcpu *v) @@ -1280,13 +1283,13 @@ void vlapic_adjust_i8259_target(struct domain *d) if ( !has_vpic(d) ) return; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) if ( __vlapic_accept_pic_intr(v) ) goto found; v = d->vcpu ? d->vcpu[0] : NULL; - found: +found: if ( d->arch.hvm.i8259_target == v ) return; d->arch.hvm.i8259_target = v; @@ -1324,8 +1327,7 @@ int vlapic_has_pending_irq(struct vcpu *v) * subsequent interrupt of lower priority occurs then APIC assist * needs to be cleared. */ - if ( isr >= 0 && - (irr & 0xf0) <= (isr & 0xf0) ) + if ( isr >= 0 && (irr & 0xf0) <= (isr & 0xf0) ) { viridian_apic_assist_clear(v); return -1; @@ -1339,8 +1341,7 @@ int vlapic_ack_pending_irq(struct vcpu *v, int vector, bool_t force_ack) struct vlapic *vlapic = vcpu_vlapic(v); int isr; - if ( !force_ack && - hvm_funcs.virtual_intr_delivery_enabled ) + if ( !force_ack && hvm_funcs.virtual_intr_delivery_enabled ) return 1; /* If there's no chance of using APIC assist then bail now. */ @@ -1359,7 +1360,7 @@ int vlapic_ack_pending_irq(struct vcpu *v, int vector, bool_t force_ack) viridian_apic_assist_set(v); } - done: +done: vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]); vlapic_clear_irr(vector, vlapic); return 1; @@ -1387,8 +1388,8 @@ static void vlapic_do_init(struct vlapic *vlapic) vlapic_set_reg(vlapic, APIC_ISR + 0x10 * i, 0); vlapic_set_reg(vlapic, APIC_TMR + 0x10 * i, 0); } - vlapic_set_reg(vlapic, APIC_ICR, 0); - vlapic_set_reg(vlapic, APIC_ICR2, 0); + vlapic_set_reg(vlapic, APIC_ICR, 0); + vlapic_set_reg(vlapic, APIC_ICR2, 0); /* * LDR is read-only in x2APIC mode. Preserve its value when handling * INIT signal in x2APIC mode. @@ -1396,8 +1397,8 @@ static void vlapic_do_init(struct vlapic *vlapic) if ( !vlapic_x2apic_mode(vlapic) ) vlapic_set_reg(vlapic, APIC_LDR, 0); vlapic_set_reg(vlapic, APIC_TASKPRI, 0); - vlapic_set_reg(vlapic, APIC_TMICT, 0); - vlapic_set_reg(vlapic, APIC_TMCCT, 0); + vlapic_set_reg(vlapic, APIC_TMICT, 0); + vlapic_set_reg(vlapic, APIC_TMCCT, 0); vlapic_set_tdcr(vlapic, 0); vlapic_set_reg(vlapic, APIC_DFR, 0xffffffffU); @@ -1446,13 +1447,13 @@ static void lapic_rearm(struct vlapic *s) if ( (tmict = vlapic_get_reg(s, APIC_TMICT)) == 0 ) return; - period = ((uint64_t)APIC_BUS_CYCLE_NS * - (uint32_t)tmict * s->hw.timer_divisor); + period = + ((uint64_t)APIC_BUS_CYCLE_NS * (uint32_t)tmict * s->hw.timer_divisor); TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(period), - TRC_PAR_LONG(vlapic_lvtt_period(s) ? period : 0LL), s->pt.irq); + TRC_PAR_LONG(vlapic_lvtt_period(s) ? period : 0LL), + s->pt.irq); create_periodic_time(vlapic_vcpu(s), &s->pt, period, - vlapic_lvtt_period(s) ? period : 0, - s->pt.irq, + vlapic_lvtt_period(s) ? period : 0, s->pt.irq, vlapic_lvtt_period(s) ? vlapic_pt_cb : NULL, &s->timer_last_update, false); s->timer_last_update = s->pt.last_plt_gtime; @@ -1523,8 +1524,8 @@ static int lapic_load_hidden(struct domain *d, hvm_domain_context_t *h) return -EINVAL; } s = vcpu_vlapic(v); - - if ( hvm_load_entry_zeroextend(LAPIC, h, &s->hw) != 0 ) + + if ( hvm_load_entry_zeroextend(LAPIC, h, &s->hw) != 0 ) return -EINVAL; s->loaded.hw = 1; @@ -1557,8 +1558,8 @@ static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h) return -EINVAL; } s = vcpu_vlapic(v); - - if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 ) + + if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 ) return -EINVAL; s->loaded.id = vlapic_get_reg(s, APIC_ID); @@ -1575,10 +1576,10 @@ static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h) return 0; } -HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, - lapic_load_hidden, 1, HVMSR_PER_VCPU); -HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, - lapic_load_regs, 1, HVMSR_PER_VCPU); +HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden, 1, + HVMSR_PER_VCPU); +HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs, 1, + HVMSR_PER_VCPU); int vlapic_init(struct vcpu *v) { @@ -1594,7 +1595,7 @@ int vlapic_init(struct vcpu *v) vlapic->pt.source = PTSRC_lapic; - if (vlapic->regs_page == NULL) + if ( vlapic->regs_page == NULL ) { vlapic->regs_page = alloc_domheap_page(v->domain, MEMF_no_owner); if ( vlapic->regs_page == NULL ) @@ -1604,7 +1605,7 @@ int vlapic_init(struct vcpu *v) return -ENOMEM; } } - if (vlapic->regs == NULL) + if ( vlapic->regs == NULL ) { vlapic->regs = __map_domain_page_global(vlapic->regs_page); if ( vlapic->regs == NULL ) @@ -1620,8 +1621,7 @@ int vlapic_init(struct vcpu *v) spin_lock_init(&vlapic->esr_lock); - tasklet_init(&vlapic->init_sipi.tasklet, - vlapic_init_sipi_action, + tasklet_init(&vlapic->init_sipi.tasklet, vlapic_init_sipi_action, (unsigned long)v); if ( v->vcpu_id == 0 ) diff --git a/xen/arch/x86/hvm/vm_event.c b/xen/arch/x86/hvm/vm_event.c index 121de23071..b88767d018 100644 --- a/xen/arch/x86/hvm/vm_event.c +++ b/xen/arch/x86/hvm/vm_event.c @@ -86,8 +86,7 @@ void hvm_vm_event_do_resume(struct vcpu *v) VM_EVENT_FLAG_SET_EMUL_INSN_DATA ) kind = EMUL_KIND_SET_CONTEXT_INSN; - hvm_emulate_one_vm_event(kind, TRAP_invalid_op, - X86_EVENT_NO_EC); + hvm_emulate_one_vm_event(kind, TRAP_invalid_op, X86_EVENT_NO_EC); v->arch.vm_event->emulate_flags = 0; } diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c index aeb5a70104..1962daea39 100644 --- a/xen/arch/x86/hvm/vmsi.c +++ b/xen/arch/x86/hvm/vmsi.c @@ -22,7 +22,7 @@ * * Support for virtual MSI logic * Will be merged it with virtual IOAPIC logic, since most is the same -*/ + */ #include #include @@ -42,16 +42,13 @@ #include #include -static void vmsi_inj_irq( - struct vlapic *target, - uint8_t vector, - uint8_t trig_mode, - uint8_t delivery_mode) +static void vmsi_inj_irq(struct vlapic *target, uint8_t vector, + uint8_t trig_mode, uint8_t delivery_mode) { HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "vmsi_inj_irq: vec %02x trig %d dm %d\n", vector, trig_mode, delivery_mode); - switch ( delivery_mode ) + switch (delivery_mode) { case dest_Fixed: case dest_LowestPrio: @@ -62,15 +59,13 @@ static void vmsi_inj_irq( } } -int vmsi_deliver( - struct domain *d, int vector, - uint8_t dest, uint8_t dest_mode, - uint8_t delivery_mode, uint8_t trig_mode) +int vmsi_deliver(struct domain *d, int vector, uint8_t dest, uint8_t dest_mode, + uint8_t delivery_mode, uint8_t trig_mode) { struct vlapic *target; struct vcpu *v; - switch ( delivery_mode ) + switch (delivery_mode) { case dest_LowestPrio: target = vlapic_lowest_prio(d, NULL, 0, dest, dest_mode); @@ -84,11 +79,9 @@ int vmsi_deliver( return -ESRCH; case dest_Fixed: - for_each_vcpu ( d, v ) - if ( vlapic_match_dest(vcpu_vlapic(v), NULL, - 0, dest, dest_mode) ) - vmsi_inj_irq(vcpu_vlapic(v), vector, - trig_mode, delivery_mode); + for_each_vcpu (d, v) + if ( vlapic_match_dest(vcpu_vlapic(v), NULL, 0, dest, dest_mode) ) + vmsi_inj_irq(vcpu_vlapic(v), vector, trig_mode, delivery_mode); break; default: @@ -125,13 +118,13 @@ int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode) { int dest_vcpu_id = -1, w = 0; struct vcpu *v; - + if ( d->max_vcpus == 1 ) return 0; - - for_each_vcpu ( d, v ) + + for_each_vcpu (d, v) { - if ( vlapic_match_dest(vcpu_vlapic(v), NULL, 0, dest, dest_mode) ) + if ( vlapic_match_dest(vcpu_vlapic(v), NULL, 0, dest, dest_mode) ) { w++; dest_vcpu_id = v->vcpu_id; @@ -147,20 +140,21 @@ int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode) struct msixtbl_entry { struct list_head list; - atomic_t refcnt; /* how many bind_pt_irq called for the device */ + atomic_t refcnt; /* how many bind_pt_irq called for the device */ /* TODO: resolve the potential race by destruction of pdev */ struct pci_dev *pdev; - unsigned long gtable; /* gpa of msix table */ + unsigned long gtable; /* gpa of msix table */ DECLARE_BITMAP(table_flags, MAX_MSIX_TABLE_ENTRIES); #define MAX_MSIX_ACC_ENTRIES 3 unsigned int table_len; - struct { - uint32_t msi_ad[3]; /* Shadow of address low, high and data */ + struct + { + uint32_t msi_ad[3]; /* Shadow of address low, high and data */ } gentries[MAX_MSIX_ACC_ENTRIES]; DECLARE_BITMAP(acc_valid, 3 * MAX_MSIX_ACC_ENTRIES); #define acc_bit(what, ent, slot, idx) \ - what##_bit((slot) * 3 + (idx), (ent)->acc_valid) + what##_bit((slot)*3 + (idx), (ent)->acc_valid) struct rcu_head rcu; }; @@ -176,22 +170,21 @@ static bool msixtbl_initialised(const struct domain *d) return d->arch.hvm.msixtbl_list.next; } -static struct msixtbl_entry *msixtbl_find_entry( - struct vcpu *v, unsigned long addr) +static struct msixtbl_entry *msixtbl_find_entry(struct vcpu *v, + unsigned long addr) { struct msixtbl_entry *entry; struct domain *d = v->domain; - list_for_each_entry( entry, &d->arch.hvm.msixtbl_list, list ) - if ( addr >= entry->gtable && - addr < entry->gtable + entry->table_len ) + list_for_each_entry (entry, &d->arch.hvm.msixtbl_list, list) + if ( addr >= entry->gtable && addr < entry->gtable + entry->table_len ) return entry; return NULL; } -static struct msi_desc *msixtbl_addr_to_desc( - const struct msixtbl_entry *entry, unsigned long addr) +static struct msi_desc *msixtbl_addr_to_desc(const struct msixtbl_entry *entry, + unsigned long addr) { unsigned int nr_entry; struct msi_desc *desc; @@ -201,7 +194,7 @@ static struct msi_desc *msixtbl_addr_to_desc( nr_entry = (addr - entry->gtable) / PCI_MSIX_ENTRY_SIZE; - list_for_each_entry( desc, &entry->pdev->msi_list, list ) + list_for_each_entry (desc, &entry->pdev->msi_list, list) if ( desc->msi_attrib.type == PCI_CAP_ID_MSIX && desc->msi_attrib.entry_nr == nr_entry ) return desc; @@ -209,8 +202,8 @@ static struct msi_desc *msixtbl_addr_to_desc( return NULL; } -static int msixtbl_read(const struct hvm_io_handler *handler, - uint64_t address, uint32_t len, uint64_t *pval) +static int msixtbl_read(const struct hvm_io_handler *handler, uint64_t address, + uint32_t len, uint64_t *pval) { unsigned long offset; struct msixtbl_entry *entry; @@ -256,9 +249,10 @@ static int msixtbl_read(const struct hvm_io_handler *handler, PCI_MSIX_VECTOR_BITMASK); else *pval |= (u64)MASK_INSR(msi_desc->msi_attrib.guest_masked, - PCI_MSIX_VECTOR_BITMASK) << 32; + PCI_MSIX_VECTOR_BITMASK) + << 32; } - + r = X86EMUL_OKAY; out: rcu_read_unlock(&msixtbl_rcu_lock); @@ -290,7 +284,7 @@ static int msixtbl_write(struct vcpu *v, unsigned long address, if ( offset != PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET ) { index = offset / sizeof(uint32_t); - if ( nr_entry < MAX_MSIX_ACC_ENTRIES ) + if ( nr_entry < MAX_MSIX_ACC_ENTRIES ) { entry->gentries[nr_entry].msi_ad[index] = val; acc_bit(set, entry, nr_entry, index); @@ -318,7 +312,7 @@ static int msixtbl_write(struct vcpu *v, unsigned long address, msi_desc = msixtbl_addr_to_desc(entry, address); if ( !msi_desc || msi_desc->irq < 0 ) goto out; - + desc = irq_to_desc(msi_desc->irq); if ( !desc ) goto out; @@ -329,7 +323,7 @@ static int msixtbl_write(struct vcpu *v, unsigned long address, goto unlock; ASSERT(msi_desc == desc->msi_desc); - + guest_mask_msi_irq(desc, !!(val & PCI_MSIX_VECTOR_BITMASK)); unlock: @@ -417,10 +411,8 @@ static const struct hvm_io_ops msixtbl_mmio_ops = { .write = _msixtbl_write, }; -static void add_msixtbl_entry(struct domain *d, - struct pci_dev *pdev, - uint64_t gtable, - struct msixtbl_entry *entry) +static void add_msixtbl_entry(struct domain *d, struct pci_dev *pdev, + uint64_t gtable, struct msixtbl_entry *entry) { INIT_LIST_HEAD(&entry->list); INIT_RCU_HEAD(&entry->rcu); @@ -428,7 +420,7 @@ static void add_msixtbl_entry(struct domain *d, entry->table_len = pdev->msix->nr_entries * PCI_MSIX_ENTRY_SIZE; entry->pdev = pdev; - entry->gtable = (unsigned long) gtable; + entry->gtable = (unsigned long)gtable; list_add_rcu(&entry->list, &d->arch.hvm.msixtbl_list); } @@ -437,7 +429,7 @@ static void free_msixtbl_entry(struct rcu_head *rcu) { struct msixtbl_entry *entry; - entry = container_of (rcu, struct msixtbl_entry, rcu); + entry = container_of(rcu, struct msixtbl_entry, rcu); xfree(entry); } @@ -463,7 +455,7 @@ int msixtbl_pt_register(struct domain *d, struct pirq *pirq, uint64_t gtable) return -ENODEV; /* - * xmalloc() with irq_disabled causes the failure of check_lock() + * xmalloc() with irq_disabled causes the failure of check_lock() * for xenpool->lock. So we allocate an entry beforehand. */ new_entry = xzalloc(struct msixtbl_entry); @@ -483,7 +475,7 @@ int msixtbl_pt_register(struct domain *d, struct pirq *pirq, uint64_t gtable) pdev = msi_desc->dev; - list_for_each_entry( entry, &d->arch.hvm.msixtbl_list, list ) + list_for_each_entry (entry, &d->arch.hvm.msixtbl_list, list) if ( pdev == entry->pdev ) goto found; @@ -503,14 +495,14 @@ out: { struct vcpu *v; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { if ( (v->pause_flags & VPF_blocked_in_xen) && !v->arch.hvm.hvm_io.msix_snoop_gpa && v->arch.hvm.hvm_io.msix_snoop_address == - (gtable + msi_desc->msi_attrib.entry_nr * - PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET) ) + (gtable + + msi_desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET) ) v->arch.hvm.hvm_io.msix_unmask_address = v->arch.hvm.hvm_io.msix_snoop_address; } @@ -542,7 +534,7 @@ void msixtbl_pt_unregister(struct domain *d, struct pirq *pirq) pdev = msi_desc->dev; - list_for_each_entry( entry, &d->arch.hvm.msixtbl_list, list ) + list_for_each_entry (entry, &d->arch.hvm.msixtbl_list, list) if ( pdev == entry->pdev ) goto found; @@ -583,8 +575,7 @@ void msixtbl_pt_cleanup(struct domain *d) spin_lock(&d->event_lock); - list_for_each_entry_safe( entry, temp, - &d->arch.hvm.msixtbl_list, list ) + list_for_each_entry_safe(entry, temp, &d->arch.hvm.msixtbl_list, list) del_msixtbl_entry(entry); spin_unlock(&d->event_lock); @@ -597,20 +588,18 @@ void msix_write_completion(struct vcpu *v) v->arch.hvm.hvm_io.msix_snoop_address = 0; - if ( !ctrl_address && snoop_addr && - v->arch.hvm.hvm_io.msix_snoop_gpa ) + if ( !ctrl_address && snoop_addr && v->arch.hvm.hvm_io.msix_snoop_gpa ) { const struct msi_desc *desc; uint32_t data; rcu_read_lock(&msixtbl_rcu_lock); - desc = msixtbl_addr_to_desc(msixtbl_find_entry(v, snoop_addr), - snoop_addr); + desc = + msixtbl_addr_to_desc(msixtbl_find_entry(v, snoop_addr), snoop_addr); rcu_read_unlock(&msixtbl_rcu_lock); if ( desc && - hvm_copy_from_guest_phys(&data, - v->arch.hvm.hvm_io.msix_snoop_gpa, + hvm_copy_from_guest_phys(&data, v->arch.hvm.hvm_io.msix_snoop_gpa, sizeof(data)) == HVMTRANS_okay && !(data & PCI_MSIX_VECTOR_BITMASK) ) ctrl_address = snoop_addr; @@ -678,8 +667,8 @@ static int vpci_msi_update(const struct pci_dev *pdev, uint32_t data, struct xen_domctl_bind_pt_irq bind = { .machine_irq = pirq + i, .irq_type = PT_IRQ_TYPE_MSI, - .u.msi.gvec = (vector & ~vector_mask) | - ((vector + i) & vector_mask), + .u.msi.gvec = + (vector & ~vector_mask) | ((vector + i) & vector_mask), .u.msi.gflags = msi_gflags(data, address, (mask >> i) & 1), }; int rc = pt_irq_create_bind(pdev->domain, &bind); @@ -737,10 +726,9 @@ static int vpci_msi_enable(const struct pci_dev *pdev, uint32_t data, int rc, pirq = INVALID_PIRQ; /* Get a PIRQ. */ - rc = allocate_and_map_msi_pirq(pdev->domain, -1, &pirq, - table_base ? MAP_PIRQ_TYPE_MSI - : MAP_PIRQ_TYPE_MULTI_MSI, - &msi_info); + rc = allocate_and_map_msi_pirq( + pdev->domain, -1, &pirq, + table_base ? MAP_PIRQ_TYPE_MSI : MAP_PIRQ_TYPE_MULTI_MSI, &msi_info); if ( rc ) { gdprintk(XENLOG_ERR, "%04x:%02x:%02x.%u: failed to map PIRQ: %d\n", @@ -826,8 +814,7 @@ void vpci_msi_arch_print(const struct vpci_msi *msi) msi->data & MSI_DATA_LEVEL_ASSERT ? "" : "de", msi->address & MSI_ADDR_DESTMODE_LOGIC ? "log" : "phys", msi->address & MSI_ADDR_REDIRECTION_LOWPRI ? "lowest" : "fixed", - MASK_EXTR(msi->address, MSI_ADDR_DEST_ID_MASK), - msi->arch.pirq); + MASK_EXTR(msi->address, MSI_ADDR_DEST_ID_MASK), msi->arch.pirq); } void vpci_msix_arch_mask_entry(struct vpci_msix_entry *entry, @@ -844,8 +831,8 @@ int vpci_msix_arch_enable_entry(struct vpci_msix_entry *entry, ASSERT(entry->arch.pirq == INVALID_PIRQ); rc = vpci_msi_enable(pdev, entry->data, entry->addr, - vmsix_entry_nr(pdev->vpci->msix, entry), - table_base, entry->masked); + vmsix_entry_nr(pdev->vpci->msix, entry), table_base, + entry->masked); if ( rc >= 0 ) { entry->arch.pirq = rc; @@ -880,15 +867,16 @@ int vpci_msix_arch_print(const struct vpci_msix *msix) { const struct vpci_msix_entry *entry = &msix->entries[i]; - printk("%6u vec=%02x%7s%6s%3sassert%5s%7s dest_id=%lu mask=%u pirq: %d\n", - i, MASK_EXTR(entry->data, MSI_DATA_VECTOR_MASK), - entry->data & MSI_DATA_DELIVERY_LOWPRI ? "lowest" : "fixed", - entry->data & MSI_DATA_TRIGGER_LEVEL ? "level" : "edge", - entry->data & MSI_DATA_LEVEL_ASSERT ? "" : "de", - entry->addr & MSI_ADDR_DESTMODE_LOGIC ? "log" : "phys", - entry->addr & MSI_ADDR_REDIRECTION_LOWPRI ? "lowest" : "fixed", - MASK_EXTR(entry->addr, MSI_ADDR_DEST_ID_MASK), - entry->masked, entry->arch.pirq); + printk( + "%6u vec=%02x%7s%6s%3sassert%5s%7s dest_id=%lu mask=%u pirq: %d\n", + i, MASK_EXTR(entry->data, MSI_DATA_VECTOR_MASK), + entry->data & MSI_DATA_DELIVERY_LOWPRI ? "lowest" : "fixed", + entry->data & MSI_DATA_TRIGGER_LEVEL ? "level" : "edge", + entry->data & MSI_DATA_LEVEL_ASSERT ? "" : "de", + entry->addr & MSI_ADDR_DESTMODE_LOGIC ? "log" : "phys", + entry->addr & MSI_ADDR_REDIRECTION_LOWPRI ? "lowest" : "fixed", + MASK_EXTR(entry->addr, MSI_ADDR_DEST_ID_MASK), entry->masked, + entry->arch.pirq); if ( i && !(i % 64) ) { struct pci_dev *pdev = msix->pdev; diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c index 0d097cf1f2..27b04e2b2b 100644 --- a/xen/arch/x86/hvm/vmx/intr.c +++ b/xen/arch/x86/hvm/vmx/intr.c @@ -42,12 +42,12 @@ /* * A few notes on virtual NMI and INTR delivery, and interactions with * interruptibility states: - * + * * We can only inject an ExtInt if EFLAGS.IF = 1 and no blocking by * STI nor MOV SS. Otherwise the VM entry fails. The 'virtual interrupt * pending' control causes a VM exit when all these checks succeed. It will * exit immediately after VM entry if the checks succeed at that point. - * + * * We can only inject an NMI if no blocking by MOV SS (also, depending on * implementation, if no blocking by STI). If pin-based 'virtual NMIs' * control is specified then the NMI-blocking interruptibility flag is @@ -55,14 +55,14 @@ * conjunction with 'virtual NMIs') causes a VM exit when all these checks * succeed. It will exit immediately after VM entry if the checks succeed * at that point. - * + * * Because a processor may or may not check blocking-by-STI when injecting * a virtual NMI, it will be necessary to convert that to block-by-MOV-SS * before specifying the 'virtual NMI pending' control. Otherwise we could * enter an infinite loop where we check blocking-by-STI in software and * thus delay delivery of a virtual NMI, but the processor causes immediate * VM exit because it does not check blocking-by-STI. - * + * * Injecting a virtual NMI sets the NMI-blocking interruptibility flag only * if the 'virtual NMIs' control is set. Injecting *any* kind of event clears * the STI- and MOV-SS-blocking interruptibility-state flags. @@ -154,8 +154,7 @@ enum hvm_intblk nvmx_intr_blocked(struct vcpu *v) if ( nestedhvm_vcpu_in_guestmode(v) ) { - if ( nvcpu->nv_vmexit_pending || - nvcpu->nv_vmswitch_in_progress ) + if ( nvcpu->nv_vmexit_pending || nvcpu->nv_vmswitch_in_progress ) r = hvm_intblk_rflags_ie; else { @@ -194,7 +193,7 @@ static int nvmx_intr_intercept(struct vcpu *v, struct hvm_intack intack) return 0; if ( intack.source == hvm_intsrc_pic || - intack.source == hvm_intsrc_lapic ) + intack.source == hvm_intsrc_lapic ) { vmx_inject_extint(intack.vector, intack.source); @@ -280,7 +279,8 @@ void vmx_intr_assist(void) goto out; } - } else if ( intblk == hvm_intblk_tpr ) + } + else if ( intblk == hvm_intblk_tpr ) { ASSERT(vlapic_enabled(vcpu_vlapic(v))); ASSERT(intack.source == hvm_intsrc_lapic); @@ -320,11 +320,11 @@ void vmx_intr_assist(void) unsigned long status; unsigned int i, n; - /* - * intack.vector is the highest priority vector. So we set eoi_exit_bitmap - * for intack.vector - give a chance to post periodic time interrupts when - * periodic time interrupts become the highest one - */ + /* + * intack.vector is the highest priority vector. So we set + * eoi_exit_bitmap for intack.vector - give a chance to post periodic + * time interrupts when periodic time interrupts become the highest one + */ if ( pt_vector != -1 ) { #ifndef NDEBUG @@ -344,16 +344,16 @@ void vmx_intr_assist(void) const uint32_t *word; unsigned int i; - printk(XENLOG_ERR "%pv: intack: %u:%02x pt: %02x\n", - current, intack.source, intack.vector, pt_vector); + printk(XENLOG_ERR "%pv: intack: %u:%02x pt: %02x\n", current, + intack.source, intack.vector, pt_vector); vlapic = vcpu_vlapic(v); if ( vlapic && vlapic->regs ) { word = (const void *)&vlapic->regs->data[APIC_IRR]; printk(XENLOG_ERR "vIRR:"); - for ( i = NR_VECTORS / 32; i-- ; ) - printk(" %08x", word[i*4]); + for ( i = NR_VECTORS / 32; i--; ) + printk(" %08x", word[i * 4]); printk("\n"); } @@ -362,7 +362,7 @@ void vmx_intr_assist(void) { word = (const void *)&pi_desc->pir; printk(XENLOG_ERR " PIR:"); - for ( i = NR_VECTORS / 32; i-- ; ) + for ( i = NR_VECTORS / 32; i--; ) printk(" %08x", word[i]); printk("\n"); } @@ -375,13 +375,12 @@ void vmx_intr_assist(void) /* we need update the RVI field */ __vmread(GUEST_INTR_STATUS, &status); status &= ~VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK; - status |= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK & - intack.vector; + status |= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK & intack.vector; __vmwrite(GUEST_INTR_STATUS, status); n = ARRAY_SIZE(v->arch.hvm.vmx.eoi_exit_bitmap); - while ( (i = find_first_bit(&v->arch.hvm.vmx.eoi_exitmap_changed, - n)) < n ) + while ( (i = find_first_bit(&v->arch.hvm.vmx.eoi_exitmap_changed, n)) < + n ) { clear_bit(i, &v->arch.hvm.vmx.eoi_exitmap_changed); __vmwrite(EOI_EXIT_BITMAP(i), v->arch.hvm.vmx.eoi_exit_bitmap[i]); @@ -391,7 +390,7 @@ void vmx_intr_assist(void) } else { - HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0); + HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/0); vmx_inject_extint(intack.vector, intack.source); pt_intr_post(v, intack); } @@ -399,17 +398,15 @@ void vmx_intr_assist(void) /* Is there another IRQ to queue up behind this one? */ intack = hvm_vcpu_has_pending_irq(v); if ( !cpu_has_vmx_virtual_intr_delivery || - intack.source == hvm_intsrc_pic || - intack.source == hvm_intsrc_vector ) + intack.source == hvm_intsrc_pic || intack.source == hvm_intsrc_vector ) { if ( unlikely(intack.source != hvm_intsrc_none) ) vmx_enable_intr_window(v, intack); } - out: +out: if ( !nestedhvm_vcpu_in_guestmode(v) && - !cpu_has_vmx_virtual_intr_delivery && - cpu_has_vmx_tpr_shadow ) + !cpu_has_vmx_virtual_intr_delivery && cpu_has_vmx_tpr_shadow ) __vmwrite(TPR_THRESHOLD, tpr_threshold); } diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c index bb0b4439df..9d7ac585c3 100644 --- a/xen/arch/x86/hvm/vmx/realmode.c +++ b/xen/arch/x86/hvm/vmx/realmode.c @@ -1,10 +1,10 @@ /****************************************************************************** * arch/x86/hvm/vmx/realmode.c - * + * * Real-mode emulation for VMX. - * + * * Copyright (c) 2007-2008 Citrix Systems, Inc. - * + * * Authors: * Keir Fraser */ @@ -21,10 +21,9 @@ #include #include -static void realmode_deliver_exception( - unsigned int vector, - unsigned int insn_len, - struct hvm_emulate_ctxt *hvmemul_ctxt) +static void realmode_deliver_exception(unsigned int vector, + unsigned int insn_len, + struct hvm_emulate_ctxt *hvmemul_ctxt) { struct segment_register *idtr, *csr; struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs; @@ -33,14 +32,14 @@ static void realmode_deliver_exception( unsigned int last_byte; idtr = hvmemul_get_seg_reg(x86_seg_idtr, hvmemul_ctxt); - csr = hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt); + csr = hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt); __set_bit(x86_seg_cs, &hvmemul_ctxt->seg_reg_dirty); - again: +again: last_byte = (vector * 4) + 3; if ( idtr->limit < last_byte || hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) != - HVMTRANS_okay ) + HVMTRANS_okay ) { /* Software interrupt? */ if ( insn_len != 0 ) @@ -51,7 +50,7 @@ static void realmode_deliver_exception( } /* Exception or hardware interrupt. */ - switch ( vector ) + switch (vector) { case TRAP_double_fault: hvm_triple_fault(); @@ -78,17 +77,17 @@ static void realmode_deliver_exception( pstk += hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->base; (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame), current); - csr->sel = cs_eip >> 16; + csr->sel = cs_eip >> 16; csr->base = (uint32_t)csr->sel << 4; regs->ip = (uint16_t)cs_eip; regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF); /* Exception delivery clears STI and MOV-SS blocking. */ if ( hvmemul_ctxt->intr_shadow & - (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) ) + (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) ) { hvmemul_ctxt->intr_shadow &= - ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS); + ~(VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS); __vmwrite(GUEST_INTERRUPTIBILITY_INFO, hvmemul_ctxt->intr_shadow); } } @@ -137,16 +136,15 @@ void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt) } else { - realmode_deliver_exception( - hvmemul_ctxt->ctxt.event.vector, - hvmemul_ctxt->ctxt.event.insn_len, - hvmemul_ctxt); + realmode_deliver_exception(hvmemul_ctxt->ctxt.event.vector, + hvmemul_ctxt->ctxt.event.insn_len, + hvmemul_ctxt); } } return; - fail: +fail: hvm_dump_emulation_state(XENLOG_G_ERR, "Real-mode", hvmemul_ctxt, rc); domain_crash(curr->domain); } @@ -200,8 +198,8 @@ void vmx_realmode(struct cpu_user_regs *regs) (curr->arch.hvm.vmx.vm86_segment_mask != 0); else curr->arch.hvm.vmx.vmx_emulate = - ((hvmemul_ctxt.seg_reg[x86_seg_cs].sel & 3) - || (hvmemul_ctxt.seg_reg[x86_seg_ss].sel & 3)); + ((hvmemul_ctxt.seg_reg[x86_seg_cs].sel & 3) || + (hvmemul_ctxt.seg_reg[x86_seg_ss].sel & 3)); } /* Need to emulate next time if we've started an IO operation */ @@ -223,9 +221,9 @@ void vmx_realmode(struct cpu_user_regs *regs) sreg->dpl = sreg->sel & 3; sreg = hvmemul_get_seg_reg(x86_seg_gs, &hvmemul_ctxt); sreg->dpl = sreg->sel & 3; - hvmemul_ctxt.seg_reg_dirty |= - (1ul << x86_seg_ds) | (1ul << x86_seg_es) | - (1ul << x86_seg_fs) | (1ul << x86_seg_gs); + hvmemul_ctxt.seg_reg_dirty |= (1ul << x86_seg_ds) | + (1ul << x86_seg_es) | + (1ul << x86_seg_fs) | (1ul << x86_seg_gs); } hvm_emulate_writeback(&hvmemul_ctxt); diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 74f2a08cfd..354586af9f 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -116,7 +116,12 @@ static void __init vmx_display_features(void) printk("VMX: Supported advanced features:\n"); -#define P(p,s) if ( p ) { printk(" - %s\n", s); printed = 1; } +#define P(p, s) \ + if ( p ) \ + { \ + printk(" - %s\n", s); \ + printed = 1; \ + } P(cpu_has_vmx_virtualize_apic_accesses, "APIC MMIO access virtualisation"); P(cpu_has_vmx_tpr_shadow, "APIC TPR shadow"); P(cpu_has_vmx_ept, "Extended Page Tables (EPT)"); @@ -138,8 +143,8 @@ static void __init vmx_display_features(void) printk(" - none\n"); } -static u32 adjust_vmx_controls( - const char *name, u32 ctl_min, u32 ctl_opt, u32 msr, bool_t *mismatch) +static u32 adjust_vmx_controls(const char *name, u32 ctl_min, u32 ctl_opt, + u32 msr, bool_t *mismatch) { u32 vmx_msr_low, vmx_msr_high, ctl = ctl_min | ctl_opt; @@ -181,34 +186,24 @@ static int vmx_init_vmcs_config(void) rdmsr(MSR_IA32_VMX_BASIC, vmx_basic_msr_low, vmx_basic_msr_high); - min = (PIN_BASED_EXT_INTR_MASK | - PIN_BASED_NMI_EXITING); - opt = (PIN_BASED_VIRTUAL_NMIS | - PIN_BASED_POSTED_INTERRUPT); - _vmx_pin_based_exec_control = adjust_vmx_controls( - "Pin-Based Exec Control", min, opt, - MSR_IA32_VMX_PINBASED_CTLS, &mismatch); - - min = (CPU_BASED_HLT_EXITING | - CPU_BASED_VIRTUAL_INTR_PENDING | - CPU_BASED_CR8_LOAD_EXITING | - CPU_BASED_CR8_STORE_EXITING | - CPU_BASED_INVLPG_EXITING | - CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING | - CPU_BASED_MONITOR_EXITING | - CPU_BASED_MWAIT_EXITING | - CPU_BASED_MOV_DR_EXITING | - CPU_BASED_ACTIVATE_IO_BITMAP | - CPU_BASED_USE_TSC_OFFSETING | + min = (PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING); + opt = (PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTERRUPT); + _vmx_pin_based_exec_control = + adjust_vmx_controls("Pin-Based Exec Control", min, opt, + MSR_IA32_VMX_PINBASED_CTLS, &mismatch); + + min = (CPU_BASED_HLT_EXITING | CPU_BASED_VIRTUAL_INTR_PENDING | + CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | + CPU_BASED_INVLPG_EXITING | CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING | CPU_BASED_MONITOR_EXITING | + CPU_BASED_MWAIT_EXITING | CPU_BASED_MOV_DR_EXITING | + CPU_BASED_ACTIVATE_IO_BITMAP | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_RDTSC_EXITING); - opt = (CPU_BASED_ACTIVATE_MSR_BITMAP | - CPU_BASED_TPR_SHADOW | - CPU_BASED_MONITOR_TRAP_FLAG | - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS); - _vmx_cpu_based_exec_control = adjust_vmx_controls( - "CPU-Based Exec Control", min, opt, - MSR_IA32_VMX_PROCBASED_CTLS, &mismatch); + opt = (CPU_BASED_ACTIVATE_MSR_BITMAP | CPU_BASED_TPR_SHADOW | + CPU_BASED_MONITOR_TRAP_FLAG | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS); + _vmx_cpu_based_exec_control = + adjust_vmx_controls("CPU-Based Exec Control", min, opt, + MSR_IA32_VMX_PROCBASED_CTLS, &mismatch); _vmx_cpu_based_exec_control &= ~CPU_BASED_RDTSC_EXITING; if ( _vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW ) _vmx_cpu_based_exec_control &= @@ -217,17 +212,15 @@ static int vmx_init_vmcs_config(void) if ( _vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS ) { min = 0; - opt = (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | - SECONDARY_EXEC_WBINVD_EXITING | - SECONDARY_EXEC_ENABLE_EPT | - SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING | - SECONDARY_EXEC_ENABLE_RDTSCP | - SECONDARY_EXEC_PAUSE_LOOP_EXITING | - SECONDARY_EXEC_ENABLE_INVPCID | - SECONDARY_EXEC_ENABLE_VM_FUNCTIONS | - SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS | - SECONDARY_EXEC_XSAVES | - SECONDARY_EXEC_TSC_SCALING); + opt = + (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_WBINVD_EXITING | SECONDARY_EXEC_ENABLE_EPT | + SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING | + SECONDARY_EXEC_ENABLE_RDTSCP | SECONDARY_EXEC_PAUSE_LOOP_EXITING | + SECONDARY_EXEC_ENABLE_INVPCID | + SECONDARY_EXEC_ENABLE_VM_FUNCTIONS | + SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS | SECONDARY_EXEC_XSAVES | + SECONDARY_EXEC_TSC_SCALING); rdmsrl(MSR_IA32_VMX_MISC, _vmx_misc_cap); if ( _vmx_misc_cap & VMX_MISC_VMWRITE_ALL ) opt |= SECONDARY_EXEC_ENABLE_VMCS_SHADOWING; @@ -248,14 +241,14 @@ static int vmx_init_vmcs_config(void) SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; - _vmx_secondary_exec_control = adjust_vmx_controls( - "Secondary Exec Control", min, opt, - MSR_IA32_VMX_PROCBASED_CTLS2, &mismatch); + _vmx_secondary_exec_control = + adjust_vmx_controls("Secondary Exec Control", min, opt, + MSR_IA32_VMX_PROCBASED_CTLS2, &mismatch); } /* The IA32_VMX_EPT_VPID_CAP MSR exists only when EPT or VPID available */ - if ( _vmx_secondary_exec_control & (SECONDARY_EXEC_ENABLE_EPT | - SECONDARY_EXEC_ENABLE_VPID) ) + if ( _vmx_secondary_exec_control & + (SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_ENABLE_VPID) ) { rdmsrl(MSR_IA32_VMX_EPT_VPID_CAP, _vmx_ept_vpid_cap); @@ -306,12 +299,11 @@ static int vmx_init_vmcs_config(void) if ( vmx_basic_msr_high & (VMX_BASIC_DEFAULT1_ZERO >> 32) ) msr = MSR_IA32_VMX_TRUE_PROCBASED_CTLS; rdmsr(msr, must_be_one, must_be_zero); - if ( must_be_one & (CPU_BASED_INVLPG_EXITING | - CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING) ) - _vmx_secondary_exec_control &= - ~(SECONDARY_EXEC_ENABLE_EPT | - SECONDARY_EXEC_UNRESTRICTED_GUEST); + if ( must_be_one & + (CPU_BASED_INVLPG_EXITING | CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING) ) + _vmx_secondary_exec_control &= ~(SECONDARY_EXEC_ENABLE_EPT | + SECONDARY_EXEC_UNRESTRICTED_GUEST); } /* PML cannot be supported if EPT is not used */ @@ -323,11 +315,11 @@ static int vmx_init_vmcs_config(void) opt_ept_pml = false; if ( (_vmx_secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) && - ple_gap == 0 ) + ple_gap == 0 ) { if ( !vmx_pin_based_exec_control ) printk(XENLOG_INFO "Disable Pause-Loop Exiting.\n"); - _vmx_secondary_exec_control &= ~ SECONDARY_EXEC_PAUSE_LOOP_EXITING; + _vmx_secondary_exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; } min = VM_EXIT_ACK_INTR_ON_EXIT; @@ -383,18 +375,17 @@ static int vmx_init_vmcs_config(void) if ( !vmx_pin_based_exec_control ) { /* First time through. */ - vmcs_revision_id = vmx_basic_msr_low & VMX_BASIC_REVISION_MASK; + vmcs_revision_id = vmx_basic_msr_low & VMX_BASIC_REVISION_MASK; vmx_pin_based_exec_control = _vmx_pin_based_exec_control; vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control; vmx_secondary_exec_control = _vmx_secondary_exec_control; - vmx_ept_vpid_cap = _vmx_ept_vpid_cap; - vmx_vmexit_control = _vmx_vmexit_control; - vmx_vmentry_control = _vmx_vmentry_control; - vmx_basic_msr = ((u64)vmx_basic_msr_high << 32) | - vmx_basic_msr_low; - vmx_vmfunc = _vmx_vmfunc; - vmx_virt_exception = !!(_vmx_secondary_exec_control & - SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS); + vmx_ept_vpid_cap = _vmx_ept_vpid_cap; + vmx_vmexit_control = _vmx_vmexit_control; + vmx_vmentry_control = _vmx_vmentry_control; + vmx_basic_msr = ((u64)vmx_basic_msr_high << 32) | vmx_basic_msr_low; + vmx_vmfunc = _vmx_vmfunc; + vmx_virt_exception = !!(_vmx_secondary_exec_control & + SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS); vmx_display_features(); /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ @@ -410,30 +401,24 @@ static int vmx_init_vmcs_config(void) else { /* Globals are already initialised: re-check them. */ - mismatch |= cap_check( - "VMCS revision ID", - vmcs_revision_id, vmx_basic_msr_low & VMX_BASIC_REVISION_MASK); - mismatch |= cap_check( - "Pin-Based Exec Control", - vmx_pin_based_exec_control, _vmx_pin_based_exec_control); - mismatch |= cap_check( - "CPU-Based Exec Control", - vmx_cpu_based_exec_control, _vmx_cpu_based_exec_control); - mismatch |= cap_check( - "Secondary Exec Control", - vmx_secondary_exec_control, _vmx_secondary_exec_control); - mismatch |= cap_check( - "VMExit Control", - vmx_vmexit_control, _vmx_vmexit_control); - mismatch |= cap_check( - "VMEntry Control", - vmx_vmentry_control, _vmx_vmentry_control); - mismatch |= cap_check( - "EPT and VPID Capability", - vmx_ept_vpid_cap, _vmx_ept_vpid_cap); - mismatch |= cap_check( - "VMFUNC Capability", - vmx_vmfunc, _vmx_vmfunc); + mismatch |= cap_check("VMCS revision ID", vmcs_revision_id, + vmx_basic_msr_low & VMX_BASIC_REVISION_MASK); + mismatch |= + cap_check("Pin-Based Exec Control", vmx_pin_based_exec_control, + _vmx_pin_based_exec_control); + mismatch |= + cap_check("CPU-Based Exec Control", vmx_cpu_based_exec_control, + _vmx_cpu_based_exec_control); + mismatch |= + cap_check("Secondary Exec Control", vmx_secondary_exec_control, + _vmx_secondary_exec_control); + mismatch |= cap_check("VMExit Control", vmx_vmexit_control, + _vmx_vmexit_control); + mismatch |= cap_check("VMEntry Control", vmx_vmentry_control, + _vmx_vmentry_control); + mismatch |= cap_check("EPT and VPID Capability", vmx_ept_vpid_cap, + _vmx_ept_vpid_cap); + mismatch |= cap_check("VMFUNC Capability", vmx_vmfunc, _vmx_vmfunc); if ( cpu_has_vmx_ins_outs_instr_info != !!(vmx_basic_msr_high & (VMX_BASIC_INS_OUT_INFO >> 32)) ) { @@ -445,8 +430,7 @@ static int vmx_init_vmcs_config(void) if ( (vmx_basic_msr_high & (VMX_BASIC_VMCS_SIZE_MASK >> 32)) != ((vmx_basic_msr & VMX_BASIC_VMCS_SIZE_MASK) >> 32) ) { - printk("VMX: CPU%d unexpected VMCS size %Lu\n", - smp_processor_id(), + printk("VMX: CPU%d unexpected VMCS size %Lu\n", smp_processor_id(), vmx_basic_msr_high & (VMX_BASIC_VMCS_SIZE_MASK >> 32)); mismatch = 1; } @@ -518,7 +502,7 @@ static void __vmx_clear_vmcs(void *info) __vmpclear(vmx->vmcs_shadow_maddr); vmx->active_cpu = -1; - vmx->launched = 0; + vmx->launched = 0; list_del(&vmx->active_list); @@ -606,17 +590,18 @@ int _vmx_cpu_up(bool bsp) BUG_ON(!(read_cr4() & X86_CR4_VMXE)); - /* - * Ensure the current processor operating mode meets - * the requred CRO fixed bits in VMX operation. + /* + * Ensure the current processor operating mode meets + * the requred CRO fixed bits in VMX operation. */ cr0 = read_cr0(); rdmsrl(MSR_IA32_VMX_CR0_FIXED0, vmx_cr0_fixed0); rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx_cr0_fixed1); if ( (~cr0 & vmx_cr0_fixed0) || (cr0 & ~vmx_cr0_fixed1) ) { - printk("CPU%d: some settings of host CR0 are " - "not allowed in VMX operation.\n", cpu); + printk("CPU%d: some settings of host CR0 are " + "not allowed in VMX operation.\n", + cpu); return -EINVAL; } @@ -626,8 +611,8 @@ int _vmx_cpu_up(bool bsp) if ( bios_locked ) { if ( !(eax & (tboot_in_measured_env() - ? IA32_FEATURE_CONTROL_ENABLE_VMXON_INSIDE_SMX - : IA32_FEATURE_CONTROL_ENABLE_VMXON_OUTSIDE_SMX)) ) + ? IA32_FEATURE_CONTROL_ENABLE_VMXON_INSIDE_SMX + : IA32_FEATURE_CONTROL_ENABLE_VMXON_OUTSIDE_SMX)) ) { printk("CPU%d: VMX disabled by BIOS.\n", cpu); return -EINVAL; @@ -635,7 +620,7 @@ int _vmx_cpu_up(bool bsp) } else { - eax = IA32_FEATURE_CONTROL_LOCK; + eax = IA32_FEATURE_CONTROL_LOCK; eax |= IA32_FEATURE_CONTROL_ENABLE_VMXON_OUTSIDE_SMX; if ( test_bit(X86_FEATURE_SMX, &boot_cpu_data.x86_capability) ) eax |= IA32_FEATURE_CONTROL_ENABLE_VMXON_INSIDE_SMX; @@ -650,7 +635,7 @@ int _vmx_cpu_up(bool bsp) if ( bsp && (rc = vmx_cpu_up_prepare(cpu)) != 0 ) return rc; - switch ( __vmxon(this_cpu(vmxon_region)) ) + switch (__vmxon(this_cpu(vmxon_region))) { case -2: /* #UD or #GP */ if ( bios_locked && @@ -659,7 +644,8 @@ int _vmx_cpu_up(bool bsp) !(eax & IA32_FEATURE_CONTROL_ENABLE_VMXON_INSIDE_SMX)) ) { printk("CPU%d: VMXON failed: perhaps because of TXT settings " - "in your BIOS configuration?\n", cpu); + "in your BIOS configuration?\n", + cpu); printk(" --> Disable TXT in your BIOS unless using a secure " "bootloader.\n"); return -EINVAL; @@ -704,8 +690,8 @@ void vmx_cpu_down(void) local_irq_save(flags); while ( !list_empty(active_vmcs_list) ) - __vmx_clear_vmcs(list_entry(active_vmcs_list->next, - struct vcpu, arch.hvm.vmx.active_list)); + __vmx_clear_vmcs(list_entry(active_vmcs_list->next, struct vcpu, + arch.hvm.vmx.active_list)); BUG_ON(!(read_cr4() & X86_CR4_VMXE)); this_cpu(vmxon) = 0; @@ -714,7 +700,8 @@ void vmx_cpu_down(void) local_irq_restore(flags); } -struct foreign_vmcs { +struct foreign_vmcs +{ struct vcpu *v; unsigned int count; }; @@ -869,8 +856,8 @@ void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, ASSERT(!"MSR out of range for interception\n"); } -bool vmx_msr_is_intercepted(struct vmx_msr_bitmap *msr_bitmap, - unsigned int msr, bool is_write) +bool vmx_msr_is_intercepted(struct vmx_msr_bitmap *msr_bitmap, unsigned int msr, + bool is_write) { if ( msr <= 0x1fff ) return test_bit(msr, is_write ? msr_bitmap->write_low @@ -883,7 +870,6 @@ bool vmx_msr_is_intercepted(struct vmx_msr_bitmap *msr_bitmap, return true; } - /* * Switch VMCS between layer 1 & 2 guest */ @@ -1010,27 +996,23 @@ static int construct_vmcs(struct vcpu *v) */ v->arch.hvm.vmx.secondary_exec_control &= ~(SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING | - SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | - SECONDARY_EXEC_ENABLE_VPID | - SECONDARY_EXEC_ENABLE_VMCS_SHADOWING | - SECONDARY_EXEC_ENABLE_PML | + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_ENABLE_VPID | + SECONDARY_EXEC_ENABLE_VMCS_SHADOWING | SECONDARY_EXEC_ENABLE_PML | SECONDARY_EXEC_ENABLE_VM_FUNCTIONS | SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS); if ( paging_mode_hap(d) ) { - v->arch.hvm.vmx.exec_control &= ~(CPU_BASED_INVLPG_EXITING | - CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING); + v->arch.hvm.vmx.exec_control &= + ~(CPU_BASED_INVLPG_EXITING | CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING); } else { v->arch.hvm.vmx.secondary_exec_control &= - ~(SECONDARY_EXEC_ENABLE_EPT | - SECONDARY_EXEC_UNRESTRICTED_GUEST | + ~(SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_UNRESTRICTED_GUEST | SECONDARY_EXEC_ENABLE_INVPCID); - vmexit_ctl &= ~(VM_EXIT_SAVE_GUEST_PAT | - VM_EXIT_LOAD_HOST_PAT); + vmexit_ctl &= ~(VM_EXIT_SAVE_GUEST_PAT | VM_EXIT_LOAD_HOST_PAT); vmentry_ctl &= ~VM_ENTRY_LOAD_GUEST_PAT; } @@ -1041,9 +1023,9 @@ static int construct_vmcs(struct vcpu *v) { /* Disable virtual apics, TPR */ v->arch.hvm.vmx.secondary_exec_control &= - ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES - | SECONDARY_EXEC_APIC_REGISTER_VIRT - | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); + ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_APIC_REGISTER_VIRT | + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); v->arch.hvm.vmx.exec_control &= ~CPU_BASED_TPR_SHADOW; /* In turn, disable posted interrupts. */ @@ -1213,9 +1195,9 @@ static int construct_vmcs(struct vcpu *v) __vmwrite(GUEST_DR7, 0); __vmwrite(VMCS_LINK_POINTER, ~0UL); - v->arch.hvm.vmx.exception_bitmap = HVM_TRAP_MASK - | (paging_mode_hap(d) ? 0 : (1U << TRAP_page_fault)) - | (v->arch.fully_eager_fpu ? 0 : (1U << TRAP_no_device)); + v->arch.hvm.vmx.exception_bitmap = + HVM_TRAP_MASK | (paging_mode_hap(d) ? 0 : (1U << TRAP_page_fault)) | + (v->arch.fully_eager_fpu ? 0 : (1U << TRAP_no_device)); vmx_update_exception_bitmap(v); v->arch.hvm.guest_cr[0] = X86_CR0_PE | X86_CR0_ET; @@ -1259,7 +1241,7 @@ static int construct_vmcs(struct vcpu *v) rc = vmx_add_msr(v, MSR_FLUSH_CMD, FLUSH_CMD_L1D, VMX_MSR_GUEST_LOADONLY); - out: +out: vmx_vmcs_exit(v); return rc; @@ -1272,8 +1254,9 @@ static int construct_vmcs(struct vcpu *v) * The return pointer is guaranteed to be bounded by start and end. However, * it may point at end, and may be invalid for the caller to dereference. */ -static struct vmx_msr_entry *locate_msr_entry( - struct vmx_msr_entry *start, struct vmx_msr_entry *end, uint32_t msr) +static struct vmx_msr_entry *locate_msr_entry(struct vmx_msr_entry *start, + struct vmx_msr_entry *end, + uint32_t msr) { while ( start < end ) { @@ -1300,22 +1283,22 @@ struct vmx_msr_entry *vmx_find_msr(const struct vcpu *v, uint32_t msr, ASSERT(v == current || !vcpu_runnable(v)); - switch ( type ) + switch (type) { case VMX_MSR_HOST: - start = vmx->host_msr_area; - subend = vmx->host_msr_count; - total = subend; + start = vmx->host_msr_area; + subend = vmx->host_msr_count; + total = subend; break; case VMX_MSR_GUEST: - start = vmx->msr_area; + start = vmx->msr_area; break; case VMX_MSR_GUEST_LOADONLY: - start = vmx->msr_area; + start = vmx->msr_area; substart = subend; - subend = total; + subend = total; break; default: @@ -1341,27 +1324,27 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val, ASSERT(v == current || !vcpu_runnable(v)); - switch ( type ) + switch (type) { case VMX_MSR_HOST: - ptr = &vmx->host_msr_area; + ptr = &vmx->host_msr_area; substart = 0; - subend = vmx->host_msr_count; - total = subend; + subend = vmx->host_msr_count; + total = subend; break; case VMX_MSR_GUEST: - ptr = &vmx->msr_area; + ptr = &vmx->msr_area; substart = 0; - subend = vmx->msr_save_count; - total = vmx->msr_load_count; + subend = vmx->msr_save_count; + total = vmx->msr_load_count; break; case VMX_MSR_GUEST_LOADONLY: - ptr = &vmx->msr_area; + ptr = &vmx->msr_area; substart = vmx->msr_save_count; - subend = vmx->msr_load_count; - total = subend; + subend = vmx->msr_load_count; + total = subend; break; default: @@ -1384,7 +1367,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val, addr = virt_to_maddr(*ptr); - switch ( type ) + switch (type) { case VMX_MSR_HOST: __vmwrite(VM_EXIT_MSR_LOAD_ADDR, addr); @@ -1399,8 +1382,8 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val, } start = *ptr; - end = start + total; - ent = locate_msr_entry(start + substart, start + subend, msr); + end = start + total; + ent = locate_msr_entry(start + substart, start + subend, msr); if ( (ent < end) && (ent->index == msr) ) goto found; @@ -1417,7 +1400,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val, ent->index = msr; ent->mbz = 0; - switch ( type ) + switch (type) { case VMX_MSR_HOST: __vmwrite(VM_EXIT_MSR_LOAD_COUNT, ++vmx->host_msr_count); @@ -1433,11 +1416,11 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val, } /* Set the msr's value. */ - found: +found: ent->data = val; rc = 0; - out: +out: vmx_vmcs_exit(v); return rc; @@ -1452,22 +1435,22 @@ int vmx_del_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type) ASSERT(v == current || !vcpu_runnable(v)); - switch ( type ) + switch (type) { case VMX_MSR_HOST: - start = vmx->host_msr_area; - subend = vmx->host_msr_count; - total = subend; + start = vmx->host_msr_area; + subend = vmx->host_msr_count; + total = subend; break; case VMX_MSR_GUEST: - start = vmx->msr_area; + start = vmx->msr_area; break; case VMX_MSR_GUEST_LOADONLY: - start = vmx->msr_area; + start = vmx->msr_area; substart = subend; - subend = total; + subend = total; break; default: @@ -1487,7 +1470,7 @@ int vmx_del_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type) vmx_vmcs_enter(v); - switch ( type ) + switch (type) { case VMX_MSR_HOST: __vmwrite(VM_EXIT_MSR_LOAD_COUNT, vmx->host_msr_count--); @@ -1510,15 +1493,13 @@ int vmx_del_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type) void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector) { if ( !test_and_set_bit(vector, v->arch.hvm.vmx.eoi_exit_bitmap) ) - set_bit(vector / BITS_PER_LONG, - &v->arch.hvm.vmx.eoi_exitmap_changed); + set_bit(vector / BITS_PER_LONG, &v->arch.hvm.vmx.eoi_exitmap_changed); } void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector) { if ( test_and_clear_bit(vector, v->arch.hvm.vmx.eoi_exit_bitmap) ) - set_bit(vector / BITS_PER_LONG, - &v->arch.hvm.vmx.eoi_exitmap_changed); + set_bit(vector / BITS_PER_LONG, &v->arch.hvm.vmx.eoi_exitmap_changed); } bool_t vmx_vcpu_pml_enabled(const struct vcpu *v) @@ -1622,7 +1603,7 @@ void vmx_vcpu_flush_pml_buffer(struct vcpu *v) /* Reset PML index */ __vmwrite(GUEST_PML_INDEX, NR_PML_ENTRIES - 1); - out: +out: vmx_vmcs_exit(v); } @@ -1649,7 +1630,7 @@ int vmx_domain_enable_pml(struct domain *d) if ( vmx_domain_pml_enabled(d) ) return 0; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) if ( (rc = vmx_vcpu_enable_pml(v)) != 0 ) goto error; @@ -1657,8 +1638,8 @@ int vmx_domain_enable_pml(struct domain *d) return 0; - error: - for_each_vcpu ( d, v ) +error: + for_each_vcpu (d, v) if ( vmx_vcpu_pml_enabled(v) ) vmx_vcpu_disable_pml(v); return rc; @@ -1679,7 +1660,7 @@ void vmx_domain_disable_pml(struct domain *d) if ( !vmx_domain_pml_enabled(d) ) return; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) vmx_vcpu_disable_pml(v); d->arch.hvm.vmx.status &= ~VMX_DOMAIN_PML_ENABLED; @@ -1698,7 +1679,7 @@ void vmx_domain_flush_pml_buffers(struct domain *d) if ( !vmx_domain_pml_enabled(d) ) return; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) vmx_vcpu_flush_pml_buffer(v); } @@ -1720,7 +1701,7 @@ void vmx_domain_update_eptp(struct domain *d) ASSERT(atomic_read(&d->pause_count)); - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) vmx_vcpu_update_eptp(v, p2m->ept.eptp); ept_sync_domain(p2m); @@ -1737,7 +1718,7 @@ int vmx_create_vmcs(struct vcpu *v) INIT_LIST_HEAD(&vmx->active_list); __vmpclear(vmx->vmcs_pa); vmx->active_cpu = -1; - vmx->launched = 0; + vmx->launched = 0; if ( (rc = construct_vmcs(v)) != 0 ) { @@ -1796,8 +1777,8 @@ void vmx_do_resume(struct vcpu *v) * 2: execute wbinvd on all dirty pCPUs when guest wbinvd exits. * If VT-d engine can force snooping, we don't need to do these. */ - if ( has_arch_pdevs(v->domain) && !iommu_snoop - && !cpu_has_wbinvd_exiting ) + if ( has_arch_pdevs(v->domain) && !iommu_snoop && + !cpu_has_wbinvd_exiting ) { int cpu = v->arch.hvm.vmx.active_cpu; if ( cpu != -1 ) @@ -1810,7 +1791,7 @@ void vmx_do_resume(struct vcpu *v) hvm_migrate_pirqs(v); vmx_set_host_env(v); /* - * Both n1 VMCS and n2 VMCS need to update the host environment after + * Both n1 VMCS and n2 VMCS need to update the host environment after * VCPU migration. The environment of current VMCS is updated in place, * but the action of another VMCS is deferred till it is switched in. */ @@ -1819,9 +1800,9 @@ void vmx_do_resume(struct vcpu *v) hvm_asid_flush_vcpu(v); } - debug_state = v->domain->debugger_attached - || v->domain->arch.monitor.software_breakpoint_enabled - || v->domain->arch.monitor.singlestep_enabled; + debug_state = v->domain->debugger_attached || + v->domain->arch.monitor.software_breakpoint_enabled || + v->domain->arch.monitor.singlestep_enabled; if ( unlikely(v->arch.hvm.debug_state_latch != debug_state) ) { @@ -1846,15 +1827,17 @@ static inline unsigned long vmr(unsigned long field) return vmread_safe(field, &val) ? 0 : val; } -#define vmr16(fld) ({ \ - BUILD_BUG_ON((fld) & 0x6001); \ - (uint16_t)vmr(fld); \ -}) +#define vmr16(fld) \ + ({ \ + BUILD_BUG_ON((fld)&0x6001); \ + (uint16_t) vmr(fld); \ + }) -#define vmr32(fld) ({ \ - BUILD_BUG_ON(((fld) & 0x6001) != 0x4000); \ - (uint32_t)vmr(fld); \ -}) +#define vmr32(fld) \ + ({ \ + BUILD_BUG_ON(((fld)&0x6001) != 0x4000); \ + (uint32_t) vmr(fld); \ + }) static void vmx_dump_sel(char *name, uint32_t selector) { @@ -1864,7 +1847,7 @@ static void vmx_dump_sel(char *name, uint32_t selector) attr = vmr(selector + (GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR)); limit = vmr(selector + (GUEST_ES_LIMIT - GUEST_ES_SELECTOR)); base = vmr(selector + (GUEST_ES_BASE - GUEST_ES_SELECTOR)); - printk("%s: %04x %05x %08x %016"PRIx64"\n", name, sel, attr, limit, base); + printk("%s: %04x %05x %08x %016" PRIx64 "\n", name, sel, attr, limit, base); } static void vmx_dump_sel2(char *name, uint32_t lim) @@ -1873,7 +1856,7 @@ static void vmx_dump_sel2(char *name, uint32_t lim) uint64_t base; limit = vmr(lim); base = vmr(lim + (GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); - printk("%s: %08x %016"PRIx64"\n", name, limit, base); + printk("%s: %08x %016" PRIx64 "\n", name, limit, base); } void vmcs_dump_vcpu(struct vcpu *v) @@ -1907,26 +1890,22 @@ void vmcs_dump_vcpu(struct vcpu *v) printk("*** Guest State ***\n"); printk("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", vmr(GUEST_CR0), vmr(CR0_READ_SHADOW), vmr(CR0_GUEST_HOST_MASK)); - printk("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", - cr4, vmr(CR4_READ_SHADOW), vmr(CR4_GUEST_HOST_MASK)); + printk("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", cr4, + vmr(CR4_READ_SHADOW), vmr(CR4_GUEST_HOST_MASK)); printk("CR3 = 0x%016lx\n", vmr(GUEST_CR3)); - if ( (v->arch.hvm.vmx.secondary_exec_control & - SECONDARY_EXEC_ENABLE_EPT) && + if ( (v->arch.hvm.vmx.secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) && (cr4 & X86_CR4_PAE) && !(vmentry_ctl & VM_ENTRY_IA32E_MODE) ) { - printk("PDPTE0 = 0x%016lx PDPTE1 = 0x%016lx\n", - vmr(GUEST_PDPTE(0)), vmr(GUEST_PDPTE(1))); - printk("PDPTE2 = 0x%016lx PDPTE3 = 0x%016lx\n", - vmr(GUEST_PDPTE(2)), vmr(GUEST_PDPTE(3))); + printk("PDPTE0 = 0x%016lx PDPTE1 = 0x%016lx\n", vmr(GUEST_PDPTE(0)), + vmr(GUEST_PDPTE(1))); + printk("PDPTE2 = 0x%016lx PDPTE3 = 0x%016lx\n", vmr(GUEST_PDPTE(2)), + vmr(GUEST_PDPTE(3))); } printk("RSP = 0x%016lx (0x%016lx) RIP = 0x%016lx (0x%016lx)\n", - vmr(GUEST_RSP), regs->rsp, - vmr(GUEST_RIP), regs->rip); - printk("RFLAGS=0x%08lx (0x%08lx) DR7 = 0x%016lx\n", - vmr(GUEST_RFLAGS), regs->rflags, - vmr(GUEST_DR7)); - printk("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", - vmr(GUEST_SYSENTER_ESP), + vmr(GUEST_RSP), regs->rsp, vmr(GUEST_RIP), regs->rip); + printk("RFLAGS=0x%08lx (0x%08lx) DR7 = 0x%016lx\n", vmr(GUEST_RFLAGS), + regs->rflags, vmr(GUEST_DR7)); + printk("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", vmr(GUEST_SYSENTER_ESP), vmr32(GUEST_SYSENTER_CS), vmr(GUEST_SYSENTER_EIP)); printk(" sel attr limit base\n"); vmx_dump_sel(" CS", GUEST_CS_SELECTOR); @@ -1945,7 +1924,8 @@ void vmcs_dump_vcpu(struct vcpu *v) vmr32(GUEST_PREEMPTION_TIMER), vmr32(GUEST_SMBASE)); printk("DebugCtl = 0x%016lx DebugExceptions = 0x%016lx\n", vmr(GUEST_IA32_DEBUGCTL), vmr(GUEST_PENDING_DBG_EXCEPTIONS)); - if ( vmentry_ctl & (VM_ENTRY_LOAD_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_BNDCFGS) ) + if ( vmentry_ctl & + (VM_ENTRY_LOAD_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_BNDCFGS) ) printk("PerfGlobCtl = 0x%016lx BndCfgS = 0x%016lx\n", vmr(GUEST_PERF_GLOBAL_CTRL), vmr(GUEST_BNDCFGS)); printk("Interruptibility = %08x ActivityState = %08x\n", @@ -1955,71 +1935,63 @@ void vmcs_dump_vcpu(struct vcpu *v) printk("InterruptStatus = %04x\n", vmr16(GUEST_INTR_STATUS)); printk("*** Host State ***\n"); - printk("RIP = 0x%016lx (%ps) RSP = 0x%016lx\n", - vmr(HOST_RIP), (void *)vmr(HOST_RIP), vmr(HOST_RSP)); + printk("RIP = 0x%016lx (%ps) RSP = 0x%016lx\n", vmr(HOST_RIP), + (void *)vmr(HOST_RIP), vmr(HOST_RSP)); printk("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n", vmr16(HOST_CS_SELECTOR), vmr16(HOST_SS_SELECTOR), vmr16(HOST_DS_SELECTOR), vmr16(HOST_ES_SELECTOR), vmr16(HOST_FS_SELECTOR), vmr16(HOST_GS_SELECTOR), vmr16(HOST_TR_SELECTOR)); - printk("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n", - vmr(HOST_FS_BASE), vmr(HOST_GS_BASE), vmr(HOST_TR_BASE)); - printk("GDTBase=%016lx IDTBase=%016lx\n", - vmr(HOST_GDTR_BASE), vmr(HOST_IDTR_BASE)); - printk("CR0=%016lx CR3=%016lx CR4=%016lx\n", - vmr(HOST_CR0), vmr(HOST_CR3), vmr(HOST_CR4)); - printk("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", - vmr(HOST_SYSENTER_ESP), + printk("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n", vmr(HOST_FS_BASE), + vmr(HOST_GS_BASE), vmr(HOST_TR_BASE)); + printk("GDTBase=%016lx IDTBase=%016lx\n", vmr(HOST_GDTR_BASE), + vmr(HOST_IDTR_BASE)); + printk("CR0=%016lx CR3=%016lx CR4=%016lx\n", vmr(HOST_CR0), vmr(HOST_CR3), + vmr(HOST_CR4)); + printk("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", vmr(HOST_SYSENTER_ESP), vmr32(HOST_SYSENTER_CS), vmr(HOST_SYSENTER_EIP)); if ( vmexit_ctl & (VM_EXIT_LOAD_HOST_PAT | VM_EXIT_LOAD_HOST_EFER) ) - printk("EFER = 0x%016lx PAT = 0x%016lx\n", vmr(HOST_EFER), vmr(HOST_PAT)); + printk("EFER = 0x%016lx PAT = 0x%016lx\n", vmr(HOST_EFER), + vmr(HOST_PAT)); if ( vmexit_ctl & VM_EXIT_LOAD_PERF_GLOBAL_CTRL ) - printk("PerfGlobCtl = 0x%016lx\n", - vmr(HOST_PERF_GLOBAL_CTRL)); + printk("PerfGlobCtl = 0x%016lx\n", vmr(HOST_PERF_GLOBAL_CTRL)); printk("*** Control State ***\n"); printk("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", - vmr32(PIN_BASED_VM_EXEC_CONTROL), - vmr32(CPU_BASED_VM_EXEC_CONTROL), + vmr32(PIN_BASED_VM_EXEC_CONTROL), vmr32(CPU_BASED_VM_EXEC_CONTROL), vmr32(SECONDARY_VM_EXEC_CONTROL)); printk("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl); printk("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", - vmr32(EXCEPTION_BITMAP), - vmr32(PAGE_FAULT_ERROR_CODE_MASK), + vmr32(EXCEPTION_BITMAP), vmr32(PAGE_FAULT_ERROR_CODE_MASK), vmr32(PAGE_FAULT_ERROR_CODE_MATCH)); printk("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", - vmr32(VM_ENTRY_INTR_INFO), - vmr32(VM_ENTRY_EXCEPTION_ERROR_CODE), + vmr32(VM_ENTRY_INTR_INFO), vmr32(VM_ENTRY_EXCEPTION_ERROR_CODE), vmr32(VM_ENTRY_INSTRUCTION_LEN)); printk("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", - vmr32(VM_EXIT_INTR_INFO), - vmr32(VM_EXIT_INTR_ERROR_CODE), + vmr32(VM_EXIT_INTR_INFO), vmr32(VM_EXIT_INTR_ERROR_CODE), vmr32(VM_EXIT_INSTRUCTION_LEN)); - printk(" reason=%08x qualification=%016lx\n", - vmr32(VM_EXIT_REASON), vmr(EXIT_QUALIFICATION)); - printk("IDTVectoring: info=%08x errcode=%08x\n", - vmr32(IDT_VECTORING_INFO), vmr32(IDT_VECTORING_ERROR_CODE)); + printk(" reason=%08x qualification=%016lx\n", vmr32(VM_EXIT_REASON), + vmr(EXIT_QUALIFICATION)); + printk("IDTVectoring: info=%08x errcode=%08x\n", vmr32(IDT_VECTORING_INFO), + vmr32(IDT_VECTORING_ERROR_CODE)); printk("TSC Offset = 0x%016lx TSC Multiplier = 0x%016lx\n", vmr(TSC_OFFSET), vmr(TSC_MULTIPLIER)); if ( (v->arch.hvm.vmx.exec_control & CPU_BASED_TPR_SHADOW) || (vmx_pin_based_exec_control & PIN_BASED_POSTED_INTERRUPT) ) printk("TPR Threshold = 0x%02x PostedIntrVec = 0x%02x\n", vmr32(TPR_THRESHOLD), vmr16(POSTED_INTR_NOTIFICATION_VECTOR)); - if ( (v->arch.hvm.vmx.secondary_exec_control & - SECONDARY_EXEC_ENABLE_EPT) ) + if ( (v->arch.hvm.vmx.secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) ) printk("EPT pointer = 0x%016lx EPTP index = 0x%04x\n", vmr(EPT_POINTER), vmr16(EPTP_INDEX)); n = vmr32(CR3_TARGET_COUNT); for ( i = 0; i + 1 < n; i += 2 ) - printk("CR3 target%u=%016lx target%u=%016lx\n", - i, vmr(CR3_TARGET_VALUE(i)), - i + 1, vmr(CR3_TARGET_VALUE(i + 1))); + printk("CR3 target%u=%016lx target%u=%016lx\n", i, + vmr(CR3_TARGET_VALUE(i)), i + 1, vmr(CR3_TARGET_VALUE(i + 1))); if ( i < n ) printk("CR3 target%u=%016lx\n", i, vmr(CR3_TARGET_VALUE(i))); if ( v->arch.hvm.vmx.secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING ) - printk("PLE Gap=%08x Window=%08x\n", - vmr32(PLE_GAP), vmr32(PLE_WINDOW)); + printk("PLE Gap=%08x Window=%08x\n", vmr32(PLE_GAP), vmr32(PLE_WINDOW)); if ( v->arch.hvm.vmx.secondary_exec_control & (SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_ENABLE_VM_FUNCTIONS) ) printk("Virtual processor ID = 0x%04x VMfunc controls = %016lx\n", @@ -2032,17 +2004,17 @@ static void vmcs_dump(unsigned char ch) { struct domain *d; struct vcpu *v; - + printk("*********** VMCS Areas **************\n"); rcu_read_lock(&domlist_read_lock); - for_each_domain ( d ) + for_each_domain (d) { if ( !is_hvm_domain(d) ) continue; printk("\n>>> Domain %d <<<\n", d->domain_id); - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { printk("\tVCPU %d\n", v->vcpu_id); vmcs_dump_vcpu(v); @@ -2064,14 +2036,14 @@ static void __init __maybe_unused build_assertions(void) struct vmx_msr_bitmap bitmap; /* Check vmx_msr_bitmap layoug against hardware expectations. */ - BUILD_BUG_ON(sizeof(bitmap) != PAGE_SIZE); - BUILD_BUG_ON(sizeof(bitmap.read_low) != 1024); - BUILD_BUG_ON(sizeof(bitmap.read_high) != 1024); - BUILD_BUG_ON(sizeof(bitmap.write_low) != 1024); + BUILD_BUG_ON(sizeof(bitmap) != PAGE_SIZE); + BUILD_BUG_ON(sizeof(bitmap.read_low) != 1024); + BUILD_BUG_ON(sizeof(bitmap.read_high) != 1024); + BUILD_BUG_ON(sizeof(bitmap.write_low) != 1024); BUILD_BUG_ON(sizeof(bitmap.write_high) != 1024); - BUILD_BUG_ON(offsetof(struct vmx_msr_bitmap, read_low) != 0); - BUILD_BUG_ON(offsetof(struct vmx_msr_bitmap, read_high) != 1024); - BUILD_BUG_ON(offsetof(struct vmx_msr_bitmap, write_low) != 2048); + BUILD_BUG_ON(offsetof(struct vmx_msr_bitmap, read_low) != 0); + BUILD_BUG_ON(offsetof(struct vmx_msr_bitmap, read_high) != 1024); + BUILD_BUG_ON(offsetof(struct vmx_msr_bitmap, write_low) != 2048); BUILD_BUG_ON(offsetof(struct vmx_msr_bitmap, write_high) != 3072); } diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 725dd88c13..1fb993d884 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -65,7 +65,7 @@ boolean_param("force-ept", opt_force_ept); static void vmx_ctxt_switch_from(struct vcpu *v); static void vmx_ctxt_switch_to(struct vcpu *v); -static int vmx_alloc_vlapic_mapping(struct domain *d); +static int vmx_alloc_vlapic_mapping(struct domain *d); static void vmx_free_vlapic_mapping(struct domain *d); static void vmx_install_vlapic_mapping(struct vcpu *v); static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr, @@ -79,11 +79,12 @@ static void vmx_invlpg(struct vcpu *v, unsigned long linear); /* Values for domain's ->arch.hvm_domain.pi_ops.flags. */ #define PI_CSW_FROM (1u << 0) -#define PI_CSW_TO (1u << 1) +#define PI_CSW_TO (1u << 1) -struct vmx_pi_blocking_vcpu { - struct list_head list; - spinlock_t lock; +struct vmx_pi_blocking_vcpu +{ + struct list_head list; + spinlock_t lock; }; /* @@ -107,12 +108,12 @@ static void vmx_vcpu_block(struct vcpu *v) unsigned int dest; spinlock_t *old_lock; spinlock_t *pi_blocking_list_lock = - &per_cpu(vmx_pi_blocking, v->processor).lock; + &per_cpu(vmx_pi_blocking, v->processor).lock; struct pi_desc *pi_desc = &v->arch.hvm.vmx.pi_desc; spin_lock_irqsave(pi_blocking_list_lock, flags); - old_lock = cmpxchg(&v->arch.hvm.vmx.pi_blocking.lock, NULL, - pi_blocking_list_lock); + old_lock = + cmpxchg(&v->arch.hvm.vmx.pi_blocking.lock, NULL, pi_blocking_list_lock); /* * 'v->arch.hvm.vmx.pi_blocking.lock' should be NULL before @@ -242,10 +243,10 @@ void vmx_pi_desc_fixup(unsigned int cpu) else { /* - * We need to find an online cpu as the NDST of the PI descriptor, it - * doesn't matter whether it is within the cpupool of the domain or - * not. As long as it is online, the vCPU will be woken up once the - * notification event arrives. + * We need to find an online cpu as the NDST of the PI descriptor, + * it doesn't matter whether it is within the cpupool of the domain + * or not. As long as it is online, the vCPU will be woken up once + * the notification event arrives. */ new_cpu = cpumask_any(&cpu_online_map); new_lock = &per_cpu(vmx_pi_blocking, new_cpu).lock; @@ -256,7 +257,8 @@ void vmx_pi_desc_fixup(unsigned int cpu) dest = cpu_physical_id(new_cpu); write_atomic(&vmx->pi_desc.ndst, - x2apic_enabled ? dest : MASK_INSR(dest, PI_xAPIC_NDST_MASK)); + x2apic_enabled ? dest + : MASK_INSR(dest, PI_xAPIC_NDST_MASK)); list_move(&vmx->pi_blocking.list, &per_cpu(vmx_pi_blocking, new_cpu).list); @@ -335,7 +337,7 @@ void vmx_pi_hooks_assign(struct domain *d) */ d->arch.hvm.pi_ops.flags = PI_CSW_FROM | PI_CSW_TO; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { unsigned int dest = cpu_physical_id(v->processor); struct pi_desc *pi_desc = &v->arch.hvm.vmx.pi_desc; @@ -345,7 +347,8 @@ void vmx_pi_hooks_assign(struct domain *d) * has already got called. */ (void)cmpxchg(&pi_desc->ndst, APIC_INVALID_DEST, - x2apic_enabled ? dest : MASK_INSR(dest, PI_xAPIC_NDST_MASK)); + x2apic_enabled ? dest + : MASK_INSR(dest, PI_xAPIC_NDST_MASK)); } d->arch.hvm.pi_ops.vcpu_block = vmx_vcpu_block; @@ -379,15 +382,15 @@ void vmx_pi_hooks_deassign(struct domain *d) * clear the 'SN' bit when the vCPU becomes running next time. After * that, No matter which status(runnable, running or block) the vCPU is in, * the 'SN' bit will keep clear for the 'switch_from' hook function that set - * the 'SN' bit has been removed. At that time, the 'switch_to' hook function - * is also useless. Considering the function doesn't do harm to the whole - * system, leave it here until we find a clean solution to deassign the - * 'switch_to' hook function. + * the 'SN' bit has been removed. At that time, the 'switch_to' hook + * function is also useless. Considering the function doesn't do harm to the + * whole system, leave it here until we find a clean solution to deassign + * the 'switch_to' hook function. */ d->arch.hvm.pi_ops.vcpu_block = NULL; d->arch.hvm.pi_ops.flags = PI_CSW_TO; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) vmx_pi_unblock_vcpu(v); domain_unpause(d); @@ -397,7 +400,7 @@ static int vmx_domain_initialise(struct domain *d) { static const struct arch_csw csw = { .from = vmx_ctxt_switch_from, - .to = vmx_ctxt_switch_to, + .to = vmx_ctxt_switch_to, .tail = vmx_do_resume, }; int rc; @@ -433,8 +436,7 @@ static int vmx_vcpu_initialise(struct vcpu *v) if ( (rc = vmx_create_vmcs(v)) != 0 ) { - dprintk(XENLOG_WARNING, - "Failed to create VMCS for vcpu %d: err=%d.\n", + dprintk(XENLOG_WARNING, "Failed to create VMCS for vcpu %d: err=%d.\n", v->vcpu_id, rc); return rc; } @@ -490,8 +492,8 @@ static void vmx_vcpu_destroy(struct vcpu *v) static void vmx_restore_host_msrs(void) { /* Relies on the SYSCALL trampoline being at the start of the stubs. */ - wrmsrl(MSR_STAR, XEN_MSR_STAR); - wrmsrl(MSR_LSTAR, this_cpu(stubs.addr)); + wrmsrl(MSR_STAR, XEN_MSR_STAR); + wrmsrl(MSR_LSTAR, this_cpu(stubs.addr)); wrmsrl(MSR_SYSCALL_MASK, XEN_SYSCALL_MASK); } @@ -507,9 +509,9 @@ static void vmx_save_guest_msrs(struct vcpu *v) static void vmx_restore_guest_msrs(struct vcpu *v) { wrgsshadow(v->arch.hvm.vmx.shadow_gs); - wrmsrl(MSR_STAR, v->arch.hvm.vmx.star); - wrmsrl(MSR_LSTAR, v->arch.hvm.vmx.lstar); - wrmsrl(MSR_SYSCALL_MASK, v->arch.hvm.vmx.sfmask); + wrmsrl(MSR_STAR, v->arch.hvm.vmx.star); + wrmsrl(MSR_LSTAR, v->arch.hvm.vmx.lstar); + wrmsrl(MSR_SYSCALL_MASK, v->arch.hvm.vmx.sfmask); if ( cpu_has_msr_tsc_aux ) wrmsr_tsc_aux(v->arch.msrs->tsc_aux); @@ -526,8 +528,8 @@ void vmx_update_cpu_exec_control(struct vcpu *v) void vmx_update_secondary_exec_control(struct vcpu *v) { if ( nestedhvm_vcpu_in_guestmode(v) ) - nvmx_update_secondary_exec_control(v, - v->arch.hvm.vmx.secondary_exec_control); + nvmx_update_secondary_exec_control( + v, v->arch.hvm.vmx.secondary_exec_control); else __vmwrite(SECONDARY_VM_EXEC_CONTROL, v->arch.hvm.vmx.secondary_exec_control); @@ -536,7 +538,8 @@ void vmx_update_secondary_exec_control(struct vcpu *v) void vmx_update_exception_bitmap(struct vcpu *v) { u32 bitmap = unlikely(v->arch.hvm.vmx.vmx_realmode) - ? 0xffffffffu : v->arch.hvm.vmx.exception_bitmap; + ? 0xffffffffu + : v->arch.hvm.vmx.exception_bitmap; if ( nestedhvm_vcpu_in_guestmode(v) ) nvmx_update_exception_bitmap(v, bitmap); @@ -569,9 +572,9 @@ static void vmx_cpuid_policy_changed(struct vcpu *v) /* MSR_PRED_CMD is safe to pass through if the guest knows about it. */ if ( cp->feat.ibrsb || cp->extd.ibpb ) - vmx_clear_msr_intercept(v, MSR_PRED_CMD, VMX_MSR_RW); + vmx_clear_msr_intercept(v, MSR_PRED_CMD, VMX_MSR_RW); else - vmx_set_msr_intercept(v, MSR_PRED_CMD, VMX_MSR_RW); + vmx_set_msr_intercept(v, MSR_PRED_CMD, VMX_MSR_RW); /* MSR_FLUSH_CMD is safe to pass through if the guest knows about it. */ if ( cp->feat.l1d_flush ) @@ -609,7 +612,7 @@ static void vmx_save_dr(struct vcpu *v) v->arch.dr[1] = read_debugreg(1); v->arch.dr[2] = read_debugreg(2); v->arch.dr[3] = read_debugreg(3); - v->arch.dr6 = read_debugreg(6); + v->arch.dr6 = read_debugreg(6); /* DR7 must be saved as it is used by vmx_restore_dr(). */ __vmread(GUEST_DR7, &v->arch.dr7); } @@ -665,8 +668,8 @@ static void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c) vmx_vmcs_exit(v); } -static int vmx_restore_cr0_cr3( - struct vcpu *v, unsigned long cr0, unsigned long cr3) +static int vmx_restore_cr0_cr3(struct vcpu *v, unsigned long cr0, + unsigned long cr3) { struct page_info *page = NULL; @@ -674,8 +677,8 @@ static int vmx_restore_cr0_cr3( { if ( cr0 & X86_CR0_PG ) { - page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, - NULL, P2M_ALLOC); + page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, NULL, + P2M_ALLOC); if ( !page ) { gdprintk(XENLOG_ERR, "Invalid CR3 value=%#lx\n", cr3); @@ -705,16 +708,16 @@ static int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c) if ( (c->pending_type == 1) || (c->pending_type > 6) || (c->pending_reserved != 0) ) { - dprintk(XENLOG_ERR, "%pv: Invalid pending event %#"PRIx32"\n", - v, c->pending_event); + dprintk(XENLOG_ERR, "%pv: Invalid pending event %#" PRIx32 "\n", v, + c->pending_event); return -EINVAL; } if ( c->pending_error_valid && c->error_code != (uint16_t)c->error_code ) { - dprintk(XENLOG_ERR, "%pv: Invalid error code %#"PRIx32"\n", - v, c->error_code); + dprintk(XENLOG_ERR, "%pv: Invalid error code %#" PRIx32 "\n", v, + c->error_code); return -EINVAL; } } @@ -741,7 +744,7 @@ static int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c) if ( c->pending_valid && hvm_event_needs_reinjection(c->pending_type, c->pending_vector) ) { - gdprintk(XENLOG_INFO, "Re-injecting %#"PRIx32", %#"PRIx32"\n", + gdprintk(XENLOG_INFO, "Re-injecting %#" PRIx32 ", %#" PRIx32 "\n", c->pending_event, c->error_code); __vmwrite(VM_ENTRY_INTR_INFO, c->pending_event); __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, c->error_code); @@ -760,23 +763,22 @@ static int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c) static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) { - data->shadow_gs = v->arch.hvm.vmx.shadow_gs; - data->msr_lstar = v->arch.hvm.vmx.lstar; - data->msr_star = v->arch.hvm.vmx.star; - data->msr_cstar = v->arch.hvm.vmx.cstar; + data->shadow_gs = v->arch.hvm.vmx.shadow_gs; + data->msr_lstar = v->arch.hvm.vmx.lstar; + data->msr_star = v->arch.hvm.vmx.star; + data->msr_cstar = v->arch.hvm.vmx.cstar; data->msr_syscall_mask = v->arch.hvm.vmx.sfmask; } static void vmx_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) { v->arch.hvm.vmx.shadow_gs = data->shadow_gs; - v->arch.hvm.vmx.star = data->msr_star; - v->arch.hvm.vmx.lstar = data->msr_lstar; - v->arch.hvm.vmx.cstar = data->msr_cstar; - v->arch.hvm.vmx.sfmask = data->msr_syscall_mask; + v->arch.hvm.vmx.star = data->msr_star; + v->arch.hvm.vmx.lstar = data->msr_lstar; + v->arch.hvm.vmx.cstar = data->msr_cstar; + v->arch.hvm.vmx.sfmask = data->msr_syscall_mask; } - static void vmx_save_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) { vmx_save_cpu_state(v, ctxt); @@ -833,7 +835,7 @@ static int vmx_load_msr(struct vcpu *v, struct hvm_msr *ctxt) for ( i = 0; i < ctxt->count; ++i ) { - switch ( ctxt->msr[i].index ) + switch (ctxt->msr[i].index) { case MSR_IA32_BNDCFGS: if ( cpu_has_mpx && cpu_has_vmx_mpx && @@ -938,7 +940,6 @@ static void vmx_ctxt_switch_to(struct vcpu *v) vmx_pi_switch_to(v); } - unsigned int vmx_get_cpl(void) { unsigned long attr; @@ -969,8 +970,8 @@ static unsigned int _vmx_get_cpl(struct vcpu *v) * in the VMCS to avoid lots of shuffling on vmenter and vmexit, and translate * in these accessors. */ -#define rm_cs_attr 0x9b -#define rm_ds_attr 0x93 +#define rm_cs_attr 0x9b +#define rm_ds_attr 0x93 #define vm86_ds_attr 0xf3 #define vm86_tr_attr 0x8b @@ -992,7 +993,8 @@ static void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg, if ( !warned ) { warned = 1; - printk(XENLOG_WARNING "Segment register inaccessible for %pv\n" + printk(XENLOG_WARNING + "Segment register inaccessible for %pv\n" "(If you see this outside of debugging activity," " please report to xen-devel@lists.xenproject.org)\n", v); @@ -1001,32 +1003,32 @@ static void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg, return; } - switch ( seg ) + switch (seg) { case x86_seg_es ... x86_seg_gs: __vmread(GUEST_SEG_SELECTOR(seg), &sel); - __vmread(GUEST_SEG_LIMIT(seg), &limit); - __vmread(GUEST_SEG_BASE(seg), ®->base); + __vmread(GUEST_SEG_LIMIT(seg), &limit); + __vmread(GUEST_SEG_BASE(seg), ®->base); __vmread(GUEST_SEG_AR_BYTES(seg), &attr); break; case x86_seg_tr: __vmread(GUEST_TR_SELECTOR, &sel); - __vmread(GUEST_TR_LIMIT, &limit); - __vmread(GUEST_TR_BASE, ®->base); + __vmread(GUEST_TR_LIMIT, &limit); + __vmread(GUEST_TR_BASE, ®->base); __vmread(GUEST_TR_AR_BYTES, &attr); break; case x86_seg_gdtr: __vmread(GUEST_GDTR_LIMIT, &limit); - __vmread(GUEST_GDTR_BASE, ®->base); + __vmread(GUEST_GDTR_BASE, ®->base); break; case x86_seg_idtr: __vmread(GUEST_IDTR_LIMIT, &limit); - __vmread(GUEST_IDTR_BASE, ®->base); + __vmread(GUEST_IDTR_BASE, ®->base); break; case x86_seg_ldtr: __vmread(GUEST_LDTR_SELECTOR, &sel); - __vmread(GUEST_LDTR_LIMIT, &limit); - __vmread(GUEST_LDTR_BASE, ®->base); + __vmread(GUEST_LDTR_LIMIT, &limit); + __vmread(GUEST_LDTR_BASE, ®->base); __vmread(GUEST_LDTR_AR_BYTES, &attr); break; default: @@ -1047,25 +1049,25 @@ static void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg, (!(attr & (1u << 16)) << 7) | (attr & 0x7f) | ((attr >> 4) & 0xf00); /* Adjust for virtual 8086 mode */ - if ( v->arch.hvm.vmx.vmx_realmode && seg <= x86_seg_tr - && !(v->arch.hvm.vmx.vm86_segment_mask & (1u << seg)) ) + if ( v->arch.hvm.vmx.vmx_realmode && seg <= x86_seg_tr && + !(v->arch.hvm.vmx.vm86_segment_mask & (1u << seg)) ) { struct segment_register *sreg = &v->arch.hvm.vmx.vm86_saved_seg[seg]; - if ( seg == x86_seg_tr ) + if ( seg == x86_seg_tr ) *reg = *sreg; else if ( reg->base != sreg->base || seg == x86_seg_ss ) { /* If the guest's reloaded the segment, remember the new version. - * We can't tell if the guest reloaded the segment with another + * We can't tell if the guest reloaded the segment with another * one that has the same base. By default we assume it hasn't, * since we don't want to lose big-real-mode segment attributes, * but for SS we assume it has: the Ubuntu graphical bootloader - * does this and gets badly confused if we leave the old SS in + * does this and gets badly confused if we leave the old SS in * place. */ reg->attr = (seg == x86_seg_cs ? rm_cs_attr : rm_ds_attr); *sreg = *reg; } - else + else { /* Always give realmode guests a selector that matches the base * but keep the attr and limit from before */ @@ -1091,8 +1093,8 @@ static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg, { /* Remember the proper contents */ v->arch.hvm.vmx.vm86_saved_seg[seg] = *reg; - - if ( seg == x86_seg_tr ) + + if ( seg == x86_seg_tr ) { const struct domain *d = v->domain; uint64_t val = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED]; @@ -1106,8 +1108,8 @@ static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg, if ( val & VM86_TSS_UPDATED ) { hvm_prepare_vm86_tss(v, base, limit); - cmpxchg(&d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED], - val, val & ~VM86_TSS_UPDATED); + cmpxchg(&d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED], val, + val & ~VM86_TSS_UPDATED); } v->arch.hvm.vmx.vm86_segment_mask &= ~(1u << seg); } @@ -1127,7 +1129,7 @@ static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg, limit = 0xffff; v->arch.hvm.vmx.vm86_segment_mask &= ~(1u << seg); } - else + else v->arch.hvm.vmx.vm86_segment_mask |= (1u << seg); } } @@ -1140,12 +1142,12 @@ static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg, vmx_vmcs_enter(v); - switch ( seg ) + switch (seg) { case x86_seg_es ... x86_seg_gs: __vmwrite(GUEST_SEG_SELECTOR(seg), sel); - __vmwrite(GUEST_SEG_LIMIT(seg), limit); - __vmwrite(GUEST_SEG_BASE(seg), base); + __vmwrite(GUEST_SEG_LIMIT(seg), limit); + __vmwrite(GUEST_SEG_BASE(seg), base); __vmwrite(GUEST_SEG_AR_BYTES(seg), attr); break; case x86_seg_tr: @@ -1247,15 +1249,14 @@ static void vmx_handle_cd(struct vcpu *v, unsigned long value) * For EPT, set guest IA32_PAT fields as UC so that guest * memory type are all UC. */ - u64 uc_pat = - ((uint64_t)PAT_TYPE_UNCACHABLE) | /* PAT0 */ - ((uint64_t)PAT_TYPE_UNCACHABLE << 8) | /* PAT1 */ - ((uint64_t)PAT_TYPE_UNCACHABLE << 16) | /* PAT2 */ - ((uint64_t)PAT_TYPE_UNCACHABLE << 24) | /* PAT3 */ - ((uint64_t)PAT_TYPE_UNCACHABLE << 32) | /* PAT4 */ - ((uint64_t)PAT_TYPE_UNCACHABLE << 40) | /* PAT5 */ - ((uint64_t)PAT_TYPE_UNCACHABLE << 48) | /* PAT6 */ - ((uint64_t)PAT_TYPE_UNCACHABLE << 56); /* PAT7 */ + u64 uc_pat = ((uint64_t)PAT_TYPE_UNCACHABLE) | /* PAT0 */ + ((uint64_t)PAT_TYPE_UNCACHABLE << 8) | /* PAT1 */ + ((uint64_t)PAT_TYPE_UNCACHABLE << 16) | /* PAT2 */ + ((uint64_t)PAT_TYPE_UNCACHABLE << 24) | /* PAT3 */ + ((uint64_t)PAT_TYPE_UNCACHABLE << 32) | /* PAT4 */ + ((uint64_t)PAT_TYPE_UNCACHABLE << 40) | /* PAT5 */ + ((uint64_t)PAT_TYPE_UNCACHABLE << 48) | /* PAT6 */ + ((uint64_t)PAT_TYPE_UNCACHABLE << 56); /* PAT7 */ vmx_get_guest_pat(v, pat); vmx_set_guest_pat(v, uc_pat); @@ -1332,12 +1333,12 @@ static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page) continue; p = (char *)(hypercall_page + (i * 32)); - *(u8 *)(p + 0) = 0xb8; /* mov imm32, %eax */ + *(u8 *)(p + 0) = 0xb8; /* mov imm32, %eax */ *(u32 *)(p + 1) = i; - *(u8 *)(p + 5) = 0x0f; /* vmcall */ - *(u8 *)(p + 6) = 0x01; - *(u8 *)(p + 7) = 0xc1; - *(u8 *)(p + 8) = 0xc3; /* ret */ + *(u8 *)(p + 5) = 0x0f; /* vmcall */ + *(u8 *)(p + 6) = 0x01; + *(u8 *)(p + 7) = 0xc1; + *(u8 *)(p + 8) = 0xc3; /* ret */ } /* Don't support HYPERVISOR_iret at the moment */ @@ -1376,12 +1377,11 @@ static void vmx_load_pdptrs(struct vcpu *v) page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE); if ( !page ) { - /* Ideally you don't want to crash but rather go into a wait + /* Ideally you don't want to crash but rather go into a wait * queue, but this is the wrong place. We're holding at least * the paging lock */ - gdprintk(XENLOG_ERR, - "Bad cr3 on load pdptrs gfn %lx type %d\n", - cr3 >> PAGE_SHIFT, (int) p2mt); + gdprintk(XENLOG_ERR, "Bad cr3 on load pdptrs gfn %lx type %d\n", + cr3 >> PAGE_SHIFT, (int)p2mt); goto crash; } @@ -1408,7 +1408,7 @@ static void vmx_load_pdptrs(struct vcpu *v) put_page(page); return; - crash: +crash: domain_crash(v->domain); } @@ -1436,7 +1436,7 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr, { vmx_vmcs_enter(v); - switch ( cr ) + switch (cr) { case 0: { @@ -1453,8 +1453,8 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr, { /* Manage GUEST_CR3 when CR0.PE=0. */ uint32_t old_ctls = v->arch.hvm.vmx.exec_control; - uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING); + uint32_t cr3_ctls = + (CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); v->arch.hvm.vmx.exec_control &= ~cr3_ctls; if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) ) @@ -1510,7 +1510,7 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr, else { for ( s = 0; s < ARRAY_SIZE(reg); s++ ) - if ( !(v->arch.hvm.vmx.vm86_segment_mask & (1<arch.hvm.vmx.vm86_segment_mask & (1 << s)) ) hvm_set_segment_register( v, s, &v->arch.hvm.vmx.vm86_saved_seg[s]); } @@ -1518,8 +1518,7 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr, vmx_update_exception_bitmap(v); } - v->arch.hvm.hw_cr[0] = - v->arch.hvm.guest_cr[0] | hw_cr0_mask; + v->arch.hvm.hw_cr[0] = v->arch.hvm.guest_cr[0] | hw_cr0_mask; __vmwrite(GUEST_CR0, v->arch.hvm.hw_cr[0]); } /* Fallthrough: Changing CR0 can change some bits in real CR4. */ @@ -1587,21 +1586,22 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr, (HVM_CR4_HOST_MASK | X86_CR4_PKE | ~hvm_cr4_guest_valid_bits(v->domain, false)); - v->arch.hvm.vmx.cr4_host_mask |= v->arch.hvm.vmx.vmx_realmode ? - X86_CR4_VME : 0; - v->arch.hvm.vmx.cr4_host_mask |= !hvm_paging_enabled(v) ? - (X86_CR4_PSE | X86_CR4_SMEP | - X86_CR4_SMAP) - : 0; + v->arch.hvm.vmx.cr4_host_mask |= + v->arch.hvm.vmx.vmx_realmode ? X86_CR4_VME : 0; + v->arch.hvm.vmx.cr4_host_mask |= + !hvm_paging_enabled(v) + ? (X86_CR4_PSE | X86_CR4_SMEP | X86_CR4_SMAP) + : 0; if ( v->domain->arch.monitor.write_ctrlreg_enabled & monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4) ) v->arch.hvm.vmx.cr4_host_mask |= - ~v->domain->arch.monitor.write_ctrlreg_mask[VM_EVENT_X86_CR4]; + ~v->domain->arch.monitor + .write_ctrlreg_mask[VM_EVENT_X86_CR4]; if ( nestedhvm_vcpu_in_guestmode(v) ) /* Add the nested host mask to get the more restrictive one. */ - v->arch.hvm.vmx.cr4_host_mask |= get_vvmcs(v, - CR4_GUEST_HOST_MASK); + v->arch.hvm.vmx.cr4_host_mask |= + get_vvmcs(v, CR4_GUEST_HOST_MASK); __vmwrite(CR4_GUEST_HOST_MASK, v->arch.hvm.vmx.cr4_host_mask); } @@ -1637,7 +1637,7 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr, static void vmx_update_guest_efer(struct vcpu *v) { unsigned long entry_ctls, guest_efer = v->arch.hvm.guest_efer, - xen_efer = read_efer(); + xen_efer = read_efer(); if ( paging_mode_shadow(v->domain) ) { @@ -1717,12 +1717,13 @@ static void vmx_update_guest_efer(struct vcpu *v) vmx_set_msr_intercept(v, MSR_EFER, VMX_MSR_R); } -void nvmx_enqueue_n2_exceptions(struct vcpu *v, - unsigned long intr_fields, int error_code, uint8_t source) +void nvmx_enqueue_n2_exceptions(struct vcpu *v, unsigned long intr_fields, + int error_code, uint8_t source) { struct nestedvmx *nvmx = &vcpu_2_nvmx(v); - if ( !(nvmx->intr.intr_info & INTR_INFO_VALID_MASK) ) { + if ( !(nvmx->intr.intr_info & INTR_INFO_VALID_MASK) ) + { /* enqueue the exception till the VMCS switch back to L1 */ nvmx->intr.intr_info = intr_fields; nvmx->intr.error_code = error_code; @@ -1731,9 +1732,11 @@ void nvmx_enqueue_n2_exceptions(struct vcpu *v, return; } else - gdprintk(XENLOG_ERR, "Double Fault on Nested Guest: exception %lx %x" - "on %lx %x\n", intr_fields, error_code, - nvmx->intr.intr_info, nvmx->intr.error_code); + gdprintk(XENLOG_ERR, + "Double Fault on Nested Guest: exception %lx %x" + "on %lx %x\n", + intr_fields, error_code, nvmx->intr.intr_info, + nvmx->intr.error_code); } static int nvmx_vmexit_event(struct vcpu *v, const struct x86_event *event) @@ -1768,7 +1771,7 @@ static void __vmx_inject_exception(int trap, int type, int error_code) __vmwrite(VM_ENTRY_INTR_INFO, intr_fields); - /* Can't inject exceptions in virtual 8086 mode because they would + /* Can't inject exceptions in virtual 8086 mode because they would * use the protected-mode IDT. Emulate at the next vmenter instead. */ if ( curr->arch.hvm.vmx.vmx_realmode ) curr->arch.hvm.vmx.vmx_emulate = 1; @@ -1777,41 +1780,46 @@ static void __vmx_inject_exception(int trap, int type, int error_code) void vmx_inject_extint(int trap, uint8_t source) { struct vcpu *v = current; - u32 pin_based_cntrl; + u32 pin_based_cntrl; - if ( nestedhvm_vcpu_in_guestmode(v) ) { + if ( nestedhvm_vcpu_in_guestmode(v) ) + { pin_based_cntrl = get_vvmcs(v, PIN_BASED_VM_EXEC_CONTROL); - if ( pin_based_cntrl & PIN_BASED_EXT_INTR_MASK ) { - nvmx_enqueue_n2_exceptions (v, - INTR_INFO_VALID_MASK | - MASK_INSR(X86_EVENTTYPE_EXT_INTR, INTR_INFO_INTR_TYPE_MASK) | - MASK_INSR(trap, INTR_INFO_VECTOR_MASK), - X86_EVENT_NO_EC, source); + if ( pin_based_cntrl & PIN_BASED_EXT_INTR_MASK ) + { + nvmx_enqueue_n2_exceptions( + v, + INTR_INFO_VALID_MASK | + MASK_INSR(X86_EVENTTYPE_EXT_INTR, + INTR_INFO_INTR_TYPE_MASK) | + MASK_INSR(trap, INTR_INFO_VECTOR_MASK), + X86_EVENT_NO_EC, source); return; } } - __vmx_inject_exception(trap, X86_EVENTTYPE_EXT_INTR, - X86_EVENT_NO_EC); + __vmx_inject_exception(trap, X86_EVENTTYPE_EXT_INTR, X86_EVENT_NO_EC); } void vmx_inject_nmi(void) { struct vcpu *v = current; - u32 pin_based_cntrl; + u32 pin_based_cntrl; - if ( nestedhvm_vcpu_in_guestmode(v) ) { + if ( nestedhvm_vcpu_in_guestmode(v) ) + { pin_based_cntrl = get_vvmcs(v, PIN_BASED_VM_EXEC_CONTROL); - if ( pin_based_cntrl & PIN_BASED_NMI_EXITING ) { - nvmx_enqueue_n2_exceptions (v, - INTR_INFO_VALID_MASK | - MASK_INSR(X86_EVENTTYPE_NMI, INTR_INFO_INTR_TYPE_MASK) | - MASK_INSR(TRAP_nmi, INTR_INFO_VECTOR_MASK), - X86_EVENT_NO_EC, hvm_intsrc_nmi); + if ( pin_based_cntrl & PIN_BASED_NMI_EXITING ) + { + nvmx_enqueue_n2_exceptions( + v, + INTR_INFO_VALID_MASK | + MASK_INSR(X86_EVENTTYPE_NMI, INTR_INFO_INTR_TYPE_MASK) | + MASK_INSR(TRAP_nmi, INTR_INFO_VECTOR_MASK), + X86_EVENT_NO_EC, hvm_intsrc_nmi); return; } } - __vmx_inject_exception(2, X86_EVENTTYPE_NMI, - X86_EVENT_NO_EC); + __vmx_inject_exception(2, X86_EVENTTYPE_NMI, X86_EVENT_NO_EC); } /* @@ -1828,7 +1836,7 @@ static void vmx_inject_event(const struct x86_event *event) struct vcpu *curr = current; struct x86_event _event = *event; - switch ( _event.vector | -(_event.type == X86_EVENTTYPE_SW_INTERRUPT) ) + switch (_event.vector | -(_event.type == X86_EVENTTYPE_SW_INTERRUPT)) { case TRAP_debug: if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF ) @@ -1873,8 +1881,8 @@ static void vmx_inject_event(const struct x86_event *event) (MASK_EXTR(intr_info, INTR_INFO_INTR_TYPE_MASK) == X86_EVENTTYPE_HW_EXCEPTION) ) { - _event.vector = hvm_combine_hw_exceptions( - (uint8_t)intr_info, _event.vector); + _event.vector = + hvm_combine_hw_exceptions((uint8_t)intr_info, _event.vector); if ( _event.vector == TRAP_double_fault ) _event.error_code = 0; } @@ -1885,10 +1893,11 @@ static void vmx_inject_event(const struct x86_event *event) if ( nestedhvm_vcpu_in_guestmode(curr) && nvmx_intercepts_exception(curr, _event.vector, _event.error_code) ) { - nvmx_enqueue_n2_exceptions (curr, + nvmx_enqueue_n2_exceptions( + curr, INTR_INFO_VALID_MASK | - MASK_INSR(_event.type, INTR_INFO_INTR_TYPE_MASK) | - MASK_INSR(_event.vector, INTR_INFO_VECTOR_MASK), + MASK_INSR(_event.type, INTR_INFO_INTR_TYPE_MASK) | + MASK_INSR(_event.vector, INTR_INFO_VECTOR_MASK), _event.error_code, hvm_intsrc_none); return; } @@ -1921,7 +1930,7 @@ static void vmx_set_info_guest(struct vcpu *v) __vmwrite(GUEST_DR7, v->arch.dr7); - /* + /* * If the interruptibility-state field indicates blocking by STI, * setting the TF flag in the EFLAGS may cause VM entry to fail * and crash the guest. See SDM 3B 22.3.1.5. @@ -2093,8 +2102,8 @@ static void vmx_deliver_posted_intr(struct vcpu *v, u8 vector) old.control = v->arch.hvm.vmx.pi_desc.control & ~((1 << POSTED_INTR_ON) | (1 << POSTED_INTR_SN)); - new.control = v->arch.hvm.vmx.pi_desc.control | - (1 << POSTED_INTR_ON); + new.control = + v->arch.hvm.vmx.pi_desc.control | (1 << POSTED_INTR_ON); prev.control = cmpxchg(&v->arch.hvm.vmx.pi_desc.control, old.control, new.control); @@ -2119,7 +2128,7 @@ static void vmx_sync_pir_to_irr(struct vcpu *v) for ( group = 0; group < ARRAY_SIZE(pending_intr); group++ ) pending_intr[group] = pi_get_pir(&v->arch.hvm.vmx.pi_desc, group); - for_each_set_bit(i, pending_intr, NR_VECTORS) + for_each_set_bit (i, pending_intr, NR_VECTORS) vlapic_set_vector(i, &vlapic->regs->data[APIC_IRR]); } @@ -2141,7 +2150,7 @@ static void vmx_enable_msr_interception(struct domain *d, uint32_t msr) { struct vcpu *v; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) vmx_set_msr_intercept(v, msr, VMX_MSR_W); } @@ -2224,8 +2233,7 @@ static int vmx_vcpu_emulate_vmfunc(const struct cpu_user_regs *regs) int rc = X86EMUL_EXCEPTION; struct vcpu *curr = current; - if ( !cpu_has_vmx_vmfunc && altp2m_active(curr->domain) && - regs->eax == 0 && + if ( !cpu_has_vmx_vmfunc && altp2m_active(curr->domain) && regs->eax == 0 && p2m_switch_vcpu_altp2m_by_id(curr, regs->ecx) ) rc = X86EMUL_OKAY; @@ -2257,10 +2265,9 @@ static bool_t vmx_vcpu_emulate_ve(struct vcpu *v) __vmread(GUEST_PHYSICAL_ADDRESS, &veinfo->gpa); vmx_vmcs_exit(v); - hvm_inject_hw_exception(TRAP_virtualisation, - X86_EVENT_NO_EC); + hvm_inject_hw_exception(TRAP_virtualisation, X86_EVENT_NO_EC); - out: +out: unmap_domain_page(veinfo); if ( rc ) @@ -2289,62 +2296,62 @@ static bool vmx_get_pending_event(struct vcpu *v, struct x86_event *info) } static struct hvm_function_table __initdata vmx_function_table = { - .name = "VMX", - .cpu_up_prepare = vmx_cpu_up_prepare, - .cpu_dead = vmx_cpu_dead, - .domain_initialise = vmx_domain_initialise, - .domain_destroy = vmx_domain_destroy, - .vcpu_initialise = vmx_vcpu_initialise, - .vcpu_destroy = vmx_vcpu_destroy, - .save_cpu_ctxt = vmx_save_vmcs_ctxt, - .load_cpu_ctxt = vmx_load_vmcs_ctxt, - .init_msr = vmx_init_msr, - .save_msr = vmx_save_msr, - .load_msr = vmx_load_msr, + .name = "VMX", + .cpu_up_prepare = vmx_cpu_up_prepare, + .cpu_dead = vmx_cpu_dead, + .domain_initialise = vmx_domain_initialise, + .domain_destroy = vmx_domain_destroy, + .vcpu_initialise = vmx_vcpu_initialise, + .vcpu_destroy = vmx_vcpu_destroy, + .save_cpu_ctxt = vmx_save_vmcs_ctxt, + .load_cpu_ctxt = vmx_load_vmcs_ctxt, + .init_msr = vmx_init_msr, + .save_msr = vmx_save_msr, + .load_msr = vmx_load_msr, .get_interrupt_shadow = vmx_get_interrupt_shadow, .set_interrupt_shadow = vmx_set_interrupt_shadow, - .guest_x86_mode = vmx_guest_x86_mode, - .get_cpl = _vmx_get_cpl, + .guest_x86_mode = vmx_guest_x86_mode, + .get_cpl = _vmx_get_cpl, .get_segment_register = vmx_get_segment_register, .set_segment_register = vmx_set_segment_register, - .get_shadow_gs_base = vmx_get_shadow_gs_base, - .update_host_cr3 = vmx_update_host_cr3, - .update_guest_cr = vmx_update_guest_cr, - .update_guest_efer = vmx_update_guest_efer, + .get_shadow_gs_base = vmx_get_shadow_gs_base, + .update_host_cr3 = vmx_update_host_cr3, + .update_guest_cr = vmx_update_guest_cr, + .update_guest_efer = vmx_update_guest_efer, .cpuid_policy_changed = vmx_cpuid_policy_changed, - .fpu_leave = vmx_fpu_leave, - .set_guest_pat = vmx_set_guest_pat, - .get_guest_pat = vmx_get_guest_pat, - .set_tsc_offset = vmx_set_tsc_offset, - .inject_event = vmx_inject_event, - .init_hypercall_page = vmx_init_hypercall_page, - .event_pending = vmx_event_pending, - .get_pending_event = vmx_get_pending_event, - .invlpg = vmx_invlpg, - .cpu_up = vmx_cpu_up, - .cpu_down = vmx_cpu_down, - .wbinvd_intercept = vmx_wbinvd_intercept, - .fpu_dirty_intercept = vmx_fpu_dirty_intercept, - .msr_read_intercept = vmx_msr_read_intercept, - .msr_write_intercept = vmx_msr_write_intercept, - .handle_cd = vmx_handle_cd, - .set_info_guest = vmx_set_info_guest, - .set_rdtsc_exiting = vmx_set_rdtsc_exiting, + .fpu_leave = vmx_fpu_leave, + .set_guest_pat = vmx_set_guest_pat, + .get_guest_pat = vmx_get_guest_pat, + .set_tsc_offset = vmx_set_tsc_offset, + .inject_event = vmx_inject_event, + .init_hypercall_page = vmx_init_hypercall_page, + .event_pending = vmx_event_pending, + .get_pending_event = vmx_get_pending_event, + .invlpg = vmx_invlpg, + .cpu_up = vmx_cpu_up, + .cpu_down = vmx_cpu_down, + .wbinvd_intercept = vmx_wbinvd_intercept, + .fpu_dirty_intercept = vmx_fpu_dirty_intercept, + .msr_read_intercept = vmx_msr_read_intercept, + .msr_write_intercept = vmx_msr_write_intercept, + .handle_cd = vmx_handle_cd, + .set_info_guest = vmx_set_info_guest, + .set_rdtsc_exiting = vmx_set_rdtsc_exiting, .nhvm_vcpu_initialise = nvmx_vcpu_initialise, - .nhvm_vcpu_destroy = nvmx_vcpu_destroy, - .nhvm_vcpu_reset = nvmx_vcpu_reset, - .nhvm_vcpu_p2m_base = nvmx_vcpu_eptp_base, + .nhvm_vcpu_destroy = nvmx_vcpu_destroy, + .nhvm_vcpu_reset = nvmx_vcpu_reset, + .nhvm_vcpu_p2m_base = nvmx_vcpu_eptp_base, .nhvm_vmcx_hap_enabled = nvmx_ept_enabled, .nhvm_vmcx_guest_intercepts_event = nvmx_intercepts_exception, .nhvm_vcpu_vmexit_event = nvmx_vmexit_event, - .nhvm_intr_blocked = nvmx_intr_blocked, + .nhvm_intr_blocked = nvmx_intr_blocked, .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources, .update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap, - .process_isr = vmx_process_isr, - .deliver_posted_intr = vmx_deliver_posted_intr, - .sync_pir_to_irr = vmx_sync_pir_to_irr, - .test_pir = vmx_test_pir, - .handle_eoi = vmx_handle_eoi, + .process_isr = vmx_process_isr, + .deliver_posted_intr = vmx_deliver_posted_intr, + .sync_pir_to_irr = vmx_sync_pir_to_irr, + .test_pir = vmx_test_pir, + .handle_eoi = vmx_handle_eoi, .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m, .enable_msr_interception = vmx_enable_msr_interception, .is_singlestep_supported = vmx_is_singlestep_supported, @@ -2352,9 +2359,10 @@ static struct hvm_function_table __initdata vmx_function_table = { .altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve, .altp2m_vcpu_emulate_ve = vmx_vcpu_emulate_ve, .altp2m_vcpu_emulate_vmfunc = vmx_vcpu_emulate_vmfunc, - .tsc_scaling = { - .max_ratio = VMX_TSC_MULTIPLIER_MAX, - }, + .tsc_scaling = + { + .max_ratio = VMX_TSC_MULTIPLIER_MAX, + }, }; /* Handle VT-d posted-interrupt when VCPU is blocked. */ @@ -2363,7 +2371,7 @@ static void pi_wakeup_interrupt(struct cpu_user_regs *regs) struct vmx_vcpu *vmx, *tmp; spinlock_t *lock = &per_cpu(vmx_pi_blocking, smp_processor_id()).lock; struct list_head *blocked_vcpus = - &per_cpu(vmx_pi_blocking, smp_processor_id()).list; + &per_cpu(vmx_pi_blocking, smp_processor_id()).list; ack_APIC_irq(); this_cpu(irq_count)++; @@ -2439,7 +2447,7 @@ static void pi_notification_interrupt(struct cpu_user_regs *regs) static void __init lbr_tsx_fixup_check(void); static void __init bdw_erratum_bdf14_fixup_check(void); -const struct hvm_function_table * __init start_vmx(void) +const struct hvm_function_table *__init start_vmx(void) { set_in_cr4(X86_CR4_VMXE); @@ -2483,7 +2491,8 @@ const struct hvm_function_table * __init start_vmx(void) if ( cpu_has_vmx_posted_intr_processing ) { - alloc_direct_apic_vector(&posted_intr_vector, pi_notification_interrupt); + alloc_direct_apic_vector(&posted_intr_vector, + pi_notification_interrupt); if ( iommu_intpost ) alloc_direct_apic_vector(&pi_wakeup_vector, pi_wakeup_interrupt); } @@ -2569,7 +2578,7 @@ static int vmx_do_cpuid(struct cpu_user_regs *regs) if ( hvm_check_cpuid_faulting(current) ) { hvm_inject_hw_exception(TRAP_gp_fault, 0); - return 1; /* Don't advance the guest IP! */ + return 1; /* Don't advance the guest IP! */ } guest_cpuid(curr, leaf, subleaf, &res); @@ -2600,7 +2609,7 @@ static void vmx_dr_access(unsigned long exit_qualification, static void vmx_invlpg_intercept(unsigned long linear) { - HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(linear)); + HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/0, TRC_PAR_LONG(linear)); paging_invlpg(current, linear); } @@ -2627,7 +2636,7 @@ static int vmx_cr_access(cr_access_qual_t qual) { struct vcpu *curr = current; - switch ( qual.access_type ) + switch (qual.access_type) { case VMX_CR_ACCESS_TYPE_MOV_TO_CR: return hvm_mov_to_cr(qual.cr, qual.gpr); @@ -2658,9 +2667,9 @@ static int vmx_cr_access(cr_access_qual_t qual) int rc; /* LMSW can (1) set PE; (2) set or clear MP, EM, and TS. */ - value = (value & ~(X86_CR0_MP|X86_CR0_EM|X86_CR0_TS)) | + value = (value & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) | (qual.lmsw_data & - (X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS)); + (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)); HVMTRACE_LONG_1D(LMSW, value); if ( (rc = hvm_set_cr0(value, true)) == X86EMUL_EXCEPTION ) @@ -2678,66 +2687,60 @@ static int vmx_cr_access(cr_access_qual_t qual) } /* This defines the layout of struct lbr_info[] */ -#define LBR_LASTINT_FROM_IDX 0 -#define LBR_LASTINT_TO_IDX 1 -#define LBR_LASTBRANCH_TOS_IDX 2 +#define LBR_LASTINT_FROM_IDX 0 +#define LBR_LASTINT_TO_IDX 1 +#define LBR_LASTBRANCH_TOS_IDX 2 #define LBR_LASTBRANCH_FROM_IDX 3 -#define LBR_LASTBRANCH_TO_IDX 4 -#define LBR_LASTBRANCH_INFO 5 +#define LBR_LASTBRANCH_TO_IDX 4 +#define LBR_LASTBRANCH_INFO 5 -static const struct lbr_info { +static const struct lbr_info +{ u32 base, count; -} p4_lbr[] = { - { MSR_P4_LER_FROM_LIP, 1 }, - { MSR_P4_LER_TO_LIP, 1 }, - { MSR_P4_LASTBRANCH_TOS, 1 }, - { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO }, - { MSR_P4_LASTBRANCH_0_TO_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO }, - { 0, 0 } -}, c2_lbr[] = { - { MSR_IA32_LASTINTFROMIP, 1 }, - { MSR_IA32_LASTINTTOIP, 1 }, - { MSR_C2_LASTBRANCH_TOS, 1 }, - { MSR_C2_LASTBRANCH_0_FROM_IP, NUM_MSR_C2_LASTBRANCH_FROM_TO }, - { MSR_C2_LASTBRANCH_0_TO_IP, NUM_MSR_C2_LASTBRANCH_FROM_TO }, - { 0, 0 } -}, nh_lbr[] = { - { MSR_IA32_LASTINTFROMIP, 1 }, - { MSR_IA32_LASTINTTOIP, 1 }, - { MSR_C2_LASTBRANCH_TOS, 1 }, - { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO }, - { MSR_P4_LASTBRANCH_0_TO_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO }, - { 0, 0 } -}, sk_lbr[] = { - { MSR_IA32_LASTINTFROMIP, 1 }, - { MSR_IA32_LASTINTTOIP, 1 }, - { MSR_SKL_LASTBRANCH_TOS, 1 }, - { MSR_SKL_LASTBRANCH_0_FROM_IP, NUM_MSR_SKL_LASTBRANCH }, - { MSR_SKL_LASTBRANCH_0_TO_IP, NUM_MSR_SKL_LASTBRANCH }, - { MSR_SKL_LASTBRANCH_0_INFO, NUM_MSR_SKL_LASTBRANCH }, - { 0, 0 } -}, at_lbr[] = { - { MSR_IA32_LASTINTFROMIP, 1 }, - { MSR_IA32_LASTINTTOIP, 1 }, - { MSR_C2_LASTBRANCH_TOS, 1 }, - { MSR_C2_LASTBRANCH_0_FROM_IP, NUM_MSR_ATOM_LASTBRANCH_FROM_TO }, - { MSR_C2_LASTBRANCH_0_TO_IP, NUM_MSR_ATOM_LASTBRANCH_FROM_TO }, - { 0, 0 } -}, gm_lbr[] = { - { MSR_IA32_LASTINTFROMIP, 1 }, - { MSR_IA32_LASTINTTOIP, 1 }, - { MSR_GM_LASTBRANCH_TOS, 1 }, - { MSR_GM_LASTBRANCH_0_FROM_IP, NUM_MSR_GM_LASTBRANCH_FROM_TO }, - { MSR_GM_LASTBRANCH_0_TO_IP, NUM_MSR_GM_LASTBRANCH_FROM_TO }, - { 0, 0 } -}; +} p4_lbr[] = {{MSR_P4_LER_FROM_LIP, 1}, + {MSR_P4_LER_TO_LIP, 1}, + {MSR_P4_LASTBRANCH_TOS, 1}, + {MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO}, + {MSR_P4_LASTBRANCH_0_TO_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO}, + {0, 0}}, + c2_lbr[] = {{MSR_IA32_LASTINTFROMIP, 1}, + {MSR_IA32_LASTINTTOIP, 1}, + {MSR_C2_LASTBRANCH_TOS, 1}, + {MSR_C2_LASTBRANCH_0_FROM_IP, NUM_MSR_C2_LASTBRANCH_FROM_TO}, + {MSR_C2_LASTBRANCH_0_TO_IP, NUM_MSR_C2_LASTBRANCH_FROM_TO}, + {0, 0}}, + nh_lbr[] = {{MSR_IA32_LASTINTFROMIP, 1}, + {MSR_IA32_LASTINTTOIP, 1}, + {MSR_C2_LASTBRANCH_TOS, 1}, + {MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO}, + {MSR_P4_LASTBRANCH_0_TO_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO}, + {0, 0}}, + sk_lbr[] = {{MSR_IA32_LASTINTFROMIP, 1}, + {MSR_IA32_LASTINTTOIP, 1}, + {MSR_SKL_LASTBRANCH_TOS, 1}, + {MSR_SKL_LASTBRANCH_0_FROM_IP, NUM_MSR_SKL_LASTBRANCH}, + {MSR_SKL_LASTBRANCH_0_TO_IP, NUM_MSR_SKL_LASTBRANCH}, + {MSR_SKL_LASTBRANCH_0_INFO, NUM_MSR_SKL_LASTBRANCH}, + {0, 0}}, + at_lbr[] = {{MSR_IA32_LASTINTFROMIP, 1}, + {MSR_IA32_LASTINTTOIP, 1}, + {MSR_C2_LASTBRANCH_TOS, 1}, + {MSR_C2_LASTBRANCH_0_FROM_IP, NUM_MSR_ATOM_LASTBRANCH_FROM_TO}, + {MSR_C2_LASTBRANCH_0_TO_IP, NUM_MSR_ATOM_LASTBRANCH_FROM_TO}, + {0, 0}}, + gm_lbr[] = {{MSR_IA32_LASTINTFROMIP, 1}, + {MSR_IA32_LASTINTTOIP, 1}, + {MSR_GM_LASTBRANCH_TOS, 1}, + {MSR_GM_LASTBRANCH_0_FROM_IP, NUM_MSR_GM_LASTBRANCH_FROM_TO}, + {MSR_GM_LASTBRANCH_0_TO_IP, NUM_MSR_GM_LASTBRANCH_FROM_TO}, + {0, 0}}; static const struct lbr_info *last_branch_msr_get(void) { - switch ( boot_cpu_data.x86 ) + switch (boot_cpu_data.x86) { case 6: - switch ( boot_cpu_data.x86_model ) + switch (boot_cpu_data.x86_model) { /* Core2 Duo */ case 0x0f: @@ -2747,20 +2750,34 @@ static const struct lbr_info *last_branch_msr_get(void) case 0x1d: return c2_lbr; /* Nehalem */ - case 0x1a: case 0x1e: case 0x1f: case 0x2e: + case 0x1a: + case 0x1e: + case 0x1f: + case 0x2e: /* Westmere */ - case 0x25: case 0x2c: case 0x2f: + case 0x25: + case 0x2c: + case 0x2f: /* Sandy Bridge */ - case 0x2a: case 0x2d: + case 0x2a: + case 0x2d: /* Ivy Bridge */ - case 0x3a: case 0x3e: + case 0x3a: + case 0x3e: /* Haswell */ - case 0x3c: case 0x3f: case 0x45: case 0x46: + case 0x3c: + case 0x3f: + case 0x45: + case 0x46: /* Broadwell */ - case 0x3d: case 0x47: case 0x4f: case 0x56: + case 0x3d: + case 0x47: + case 0x4f: + case 0x56: return nh_lbr; /* Skylake */ - case 0x4e: case 0x5e: + case 0x4e: + case 0x5e: /* Xeon Scalable */ case 0x55: /* Cannon Lake */ @@ -2768,12 +2785,21 @@ static const struct lbr_info *last_branch_msr_get(void) /* Goldmont Plus */ case 0x7a: /* Kaby Lake */ - case 0x8e: case 0x9e: + case 0x8e: + case 0x9e: return sk_lbr; /* Atom */ - case 0x1c: case 0x26: case 0x27: case 0x35: case 0x36: + case 0x1c: + case 0x26: + case 0x27: + case 0x35: + case 0x36: /* Silvermont */ - case 0x37: case 0x4a: case 0x4d: case 0x5a: case 0x5d: + case 0x37: + case 0x4a: + case 0x4d: + case 0x5a: + case 0x5d: /* Xeon Phi Knights Landing */ case 0x57: /* Xeon Phi Knights Mill */ @@ -2782,16 +2808,19 @@ static const struct lbr_info *last_branch_msr_get(void) case 0x4c: return at_lbr; /* Goldmont */ - case 0x5c: case 0x5f: + case 0x5c: + case 0x5f: return gm_lbr; } break; case 15: - switch ( boot_cpu_data.x86_model ) + switch (boot_cpu_data.x86_model) { /* Pentium4/Xeon with em64t */ - case 3: case 4: case 6: + case 3: + case 4: + case 6: return p4_lbr; } break; @@ -2802,22 +2831,22 @@ static const struct lbr_info *last_branch_msr_get(void) enum { - LBR_FORMAT_32 = 0x0, /* 32-bit record format */ - LBR_FORMAT_LIP = 0x1, /* 64-bit LIP record format */ - LBR_FORMAT_EIP = 0x2, /* 64-bit EIP record format */ - LBR_FORMAT_EIP_FLAGS = 0x3, /* 64-bit EIP, Flags */ - LBR_FORMAT_EIP_FLAGS_TSX = 0x4, /* 64-bit EIP, Flags, TSX */ + LBR_FORMAT_32 = 0x0, /* 32-bit record format */ + LBR_FORMAT_LIP = 0x1, /* 64-bit LIP record format */ + LBR_FORMAT_EIP = 0x2, /* 64-bit EIP record format */ + LBR_FORMAT_EIP_FLAGS = 0x3, /* 64-bit EIP, Flags */ + LBR_FORMAT_EIP_FLAGS_TSX = 0x4, /* 64-bit EIP, Flags, TSX */ LBR_FORMAT_EIP_FLAGS_TSX_INFO = 0x5, /* 64-bit EIP, Flags, TSX, LBR_INFO */ - LBR_FORMAT_EIP_FLAGS_CYCLES = 0x6, /* 64-bit EIP, Flags, Cycles */ + LBR_FORMAT_EIP_FLAGS_CYCLES = 0x6, /* 64-bit EIP, Flags, Cycles */ LBR_FORMAT_LIP_FLAGS_TSX_INFO = 0x7, /* 64-bit LIP, Flags, TSX, LBR_INFO */ }; -#define LBR_FROM_SIGNEXT_2MSB ((1ULL << 59) | (1ULL << 60)) +#define LBR_FROM_SIGNEXT_2MSB ((1ULL << 59) | (1ULL << 60)) -#define LBR_MSRS_INSERTED (1u << 0) -#define LBR_FIXUP_TSX (1u << 1) -#define LBR_FIXUP_BDF14 (1u << 2) -#define LBR_FIXUP_MASK (LBR_FIXUP_TSX | LBR_FIXUP_BDF14) +#define LBR_MSRS_INSERTED (1u << 0) +#define LBR_FIXUP_TSX (1u << 1) +#define LBR_FIXUP_BDF14 (1u << 2) +#define LBR_FIXUP_MASK (LBR_FIXUP_TSX | LBR_FIXUP_BDF14) static bool __read_mostly lbr_tsx_fixup_needed; static bool __read_mostly bdw_erratum_bdf14_fixup_needed; @@ -2884,7 +2913,7 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) HVM_DBG_LOG(DBG_LEVEL_MSR, "ecx=%#x", msr); - switch ( msr ) + switch (msr) { case MSR_IA32_SYSENTER_CS: __vmread(GUEST_SYSENTER_CS, msr_content); @@ -2934,7 +2963,7 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) if ( nestedhvm_enabled(curr->domain) ) *msr_content |= IA32_FEATURE_CONTROL_ENABLE_VMXON_OUTSIDE_SMX; break; - case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_VMFUNC: + case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if ( !nvmx_msr_read_intercept(msr, msr_content) ) goto gp_fault; break; @@ -2942,13 +2971,13 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) rdmsrl(MSR_IA32_MISC_ENABLE, *msr_content); /* Debug Trace Store is not supported. */ *msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL | - MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL; + MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL; /* Perhaps vpmu will change some bits. */ /* FALLTHROUGH */ - case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7): - case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3): - case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: - case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: + case MSR_P6_PERFCTR(0)... MSR_P6_PERFCTR(7): + case MSR_P6_EVNTSEL(0)... MSR_P6_EVNTSEL(3): + case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR2: + case MSR_CORE_PERF_FIXED_CTR_CTRL ... MSR_CORE_PERF_GLOBAL_OVF_CTRL: case MSR_IA32_PEBS_ENABLE: case MSR_IA32_DS_AREA: if ( vpmu_do_rdmsr(msr, msr_content) ) @@ -2975,8 +3004,8 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) } done: - HVM_DBG_LOG(DBG_LEVEL_MSR, "returns: ecx=%#x, msr_value=%#"PRIx64, - msr, *msr_content); + HVM_DBG_LOG(DBG_LEVEL_MSR, "returns: ecx=%#x, msr_value=%#" PRIx64, msr, + *msr_content); return X86EMUL_OKAY; gp_fault: @@ -3037,12 +3066,11 @@ void vmx_vlapic_msr_changed(struct vcpu *v) struct vlapic *vlapic = vcpu_vlapic(v); unsigned int msr; - virtualize_x2apic_mode = ( (cpu_has_vmx_apic_reg_virt || - cpu_has_vmx_virtual_intr_delivery) && - cpu_has_vmx_virtualize_x2apic_mode ); + virtualize_x2apic_mode = + ((cpu_has_vmx_apic_reg_virt || cpu_has_vmx_virtual_intr_delivery) && + cpu_has_vmx_virtualize_x2apic_mode); - if ( !cpu_has_vmx_virtualize_apic_accesses && - !virtualize_x2apic_mode ) + if ( !cpu_has_vmx_virtualize_apic_accesses && !virtualize_x2apic_mode ) return; vmx_vmcs_enter(v); @@ -3058,8 +3086,8 @@ void vmx_vlapic_msr_changed(struct vcpu *v) SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; if ( cpu_has_vmx_apic_reg_virt ) { - for ( msr = MSR_X2APIC_FIRST; - msr <= MSR_X2APIC_FIRST + 0xff; msr++ ) + for ( msr = MSR_X2APIC_FIRST; msr <= MSR_X2APIC_FIRST + 0xff; + msr++ ) vmx_clear_msr_intercept(v, msr, VMX_MSR_R); vmx_set_msr_intercept(v, MSR_X2APIC_PPR, VMX_MSR_R); @@ -3079,8 +3107,7 @@ void vmx_vlapic_msr_changed(struct vcpu *v) } if ( !(v->arch.hvm.vmx.secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE) ) - for ( msr = MSR_X2APIC_FIRST; - msr <= MSR_X2APIC_FIRST + 0xff; msr++ ) + for ( msr = MSR_X2APIC_FIRST; msr <= MSR_X2APIC_FIRST + 0xff; msr++ ) vmx_set_msr_intercept(v, msr, VMX_MSR_RW); vmx_update_secondary_exec_control(v); @@ -3092,9 +3119,10 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) struct vcpu *v = current; const struct cpuid_policy *cp = v->domain->arch.cpuid; - HVM_DBG_LOG(DBG_LEVEL_MSR, "ecx=%#x, msr_value=%#"PRIx64, msr, msr_content); + HVM_DBG_LOG(DBG_LEVEL_MSR, "ecx=%#x, msr_value=%#" PRIx64, msr, + msr_content); - switch ( msr ) + switch (msr) { uint64_t rsvd; @@ -3209,8 +3237,8 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) if ( unlikely(rc) ) { - gprintk(XENLOG_ERR, - "Guest load/save list error %d\n", rc); + gprintk(XENLOG_ERR, "Guest load/save list error %d\n", + rc); domain_crash(v->domain); return X86EMUL_OKAY; } @@ -3234,13 +3262,13 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) /* None of these MSRs are writeable. */ goto gp_fault; - case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7): - case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(7): - case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: - case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: + case MSR_P6_PERFCTR(0)... MSR_P6_PERFCTR(7): + case MSR_P6_EVNTSEL(0)... MSR_P6_EVNTSEL(7): + case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR2: + case MSR_CORE_PERF_FIXED_CTR_CTRL ... MSR_CORE_PERF_GLOBAL_OVF_CTRL: case MSR_IA32_PEBS_ENABLE: case MSR_IA32_DS_AREA: - if ( vpmu_do_wrmsr(msr, msr_content, 0) ) + if ( vpmu_do_wrmsr(msr, msr_content, 0) ) goto gp_fault; break; @@ -3318,7 +3346,8 @@ static void ept_handle_violation(ept_qual_t q, paddr_t gpa) if ( tb_init_done ) { - struct { + struct + { uint64_t gpa; uint64_t mfn; u32 qualification; @@ -3336,7 +3365,7 @@ static void ept_handle_violation(ept_qual_t q, paddr_t gpa) { __vmread(GUEST_LINEAR_ADDRESS, &gla); npfec.gla_valid = 1; - if( q.gla_fault ) + if ( q.gla_fault ) npfec.kind = npfec_kind_with_gla; else npfec.kind = npfec_kind_in_gpt; @@ -3345,13 +3374,13 @@ static void ept_handle_violation(ept_qual_t q, paddr_t gpa) gla = ~0ull; ret = hvm_hap_nested_page_fault(gpa, gla, npfec); - switch ( ret ) + switch (ret) { - case 0: // Unhandled L1 EPT violation + case 0: // Unhandled L1 EPT violation break; - case 1: // This violation is handled completly + case 1: // This violation is handled completly return; - case -1: // This vioaltion should be injected to L1 VMM + case -1: // This vioaltion should be injected to L1 VMM vcpu_nestedhvm(current).nv_vmexit_pending = 1; return; } @@ -3359,15 +3388,11 @@ static void ept_handle_violation(ept_qual_t q, paddr_t gpa) /* Everything else is an error. */ mfn = get_gfn_query_unlocked(d, gfn, &p2mt); gprintk(XENLOG_ERR, - "EPT violation %#lx (%c%c%c/%c%c%c) gpa %#"PRIpaddr" mfn %#lx type %i\n", - q.raw, - q.read ? 'r' : '-', - q.write ? 'w' : '-', - q.fetch ? 'x' : '-', - q.eff_read ? 'r' : '-', - q.eff_write ? 'w' : '-', - q.eff_exec ? 'x' : '-', - gpa, mfn_x(mfn), p2mt); + "EPT violation %#lx (%c%c%c/%c%c%c) gpa %#" PRIpaddr + " mfn %#lx type %i\n", + q.raw, q.read ? 'r' : '-', q.write ? 'w' : '-', q.fetch ? 'x' : '-', + q.eff_read ? 'r' : '-', q.eff_write ? 'w' : '-', + q.eff_exec ? 'x' : '-', gpa, mfn_x(mfn), p2mt); ept_walk_table(d, gfn); @@ -3386,7 +3411,7 @@ static void vmx_failed_vmentry(unsigned int exit_reason, printk("%pv vmentry failure (reason %#x): ", curr, exit_reason); __vmread(EXIT_QUALIFICATION, &exit_qualification); - switch ( failed_vmentry_reason ) + switch (failed_vmentry_reason) { case EXIT_REASON_INVALID_GUEST_STATE: printk("Invalid guest state (%lu)\n", exit_qualification); @@ -3405,8 +3430,8 @@ static void vmx_failed_vmentry(unsigned int exit_reason, { msr = &curr->arch.hvm.vmx.msr_area[idx]; - printk(" msr %08x val %016"PRIx64" (mbz %#x)\n", - msr->index, msr->data, msr->mbz); + printk(" msr %08x val %016" PRIx64 " (mbz %#x)\n", msr->index, + msr->data, msr->mbz); } break; } @@ -3497,13 +3522,12 @@ static void vmx_propagate_intr(unsigned long intr) static void vmx_idtv_reinject(unsigned long idtv_info) { - /* Event delivery caused this intercept? Queue for redelivery. */ if ( unlikely(idtv_info & INTR_INFO_VALID_MASK) ) { - if ( hvm_event_needs_reinjection(MASK_EXTR(idtv_info, - INTR_INFO_INTR_TYPE_MASK), - idtv_info & INTR_INFO_VECTOR_MASK) ) + if ( hvm_event_needs_reinjection( + MASK_EXTR(idtv_info, INTR_INFO_INTR_TYPE_MASK), + idtv_info & INTR_INFO_VECTOR_MASK) ) { /* See SDM 3B 25.7.1.1 and .2 for info about masking resvd bits. */ __vmwrite(VM_ENTRY_INTR_INFO, @@ -3590,8 +3614,8 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) struct vcpu *v = current; struct domain *currd = v->domain; - __vmread(GUEST_RIP, ®s->rip); - __vmread(GUEST_RSP, ®s->rsp); + __vmread(GUEST_RIP, ®s->rip); + __vmread(GUEST_RSP, ®s->rsp); __vmread(GUEST_RFLAGS, ®s->rflags); hvm_invalidate_regs_fields(regs); @@ -3604,8 +3628,8 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) */ __vmread(GUEST_CR4, &v->arch.hvm.hw_cr[4]); v->arch.hvm.guest_cr[4] &= v->arch.hvm.vmx.cr4_host_mask; - v->arch.hvm.guest_cr[4] |= (v->arch.hvm.hw_cr[4] & - ~v->arch.hvm.vmx.cr4_host_mask); + v->arch.hvm.guest_cr[4] |= + (v->arch.hvm.hw_cr[4] & ~v->arch.hvm.vmx.cr4_host_mask); __vmread(GUEST_CR3, &v->arch.hvm.hw_cr[3]); if ( vmx_unrestricted_guest(v) || hvm_paging_enabled(v) ) @@ -3615,16 +3639,16 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) __vmread(VM_EXIT_REASON, &exit_reason); if ( hvm_long_mode_active(v) ) - HVMTRACE_ND(VMEXIT64, 0, 1/*cycles*/, 3, exit_reason, - regs->eip, regs->rip >> 32, 0, 0, 0); + HVMTRACE_ND(VMEXIT64, 0, 1 /*cycles*/, 3, exit_reason, regs->eip, + regs->rip >> 32, 0, 0, 0); else - HVMTRACE_ND(VMEXIT, 0, 1/*cycles*/, 2, exit_reason, - regs->eip, 0, 0, 0, 0); + HVMTRACE_ND(VMEXIT, 0, 1 /*cycles*/, 2, exit_reason, regs->eip, 0, 0, 0, + 0); perfc_incra(vmexits, exit_reason); /* Handle the interrupt we missed before allowing any more in. */ - switch ( (uint16_t)exit_reason ) + switch ((uint16_t)exit_reason) { case EXIT_REASON_EXTERNAL_INTERRUPT: vmx_do_extint(regs); @@ -3655,14 +3679,13 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) * If the guest has the ability to switch EPTP without an exit, * figure out whether it has done so and update the altp2m data. */ - if ( altp2m_active(v->domain) && - (v->arch.hvm.vmx.secondary_exec_control & - SECONDARY_EXEC_ENABLE_VM_FUNCTIONS) ) + if ( altp2m_active(v->domain) && (v->arch.hvm.vmx.secondary_exec_control & + SECONDARY_EXEC_ENABLE_VM_FUNCTIONS) ) { unsigned long idx; if ( v->arch.hvm.vmx.secondary_exec_control & - SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS ) + SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS ) __vmread(EPTP_INDEX, &idx); else { @@ -3711,14 +3734,13 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) /* Unless this exit was for an interrupt, we've hit something * vm86 can't handle. Try again, using the emulator. */ - switch ( exit_reason ) + switch (exit_reason) { case EXIT_REASON_EXCEPTION_NMI: - if ( vector != TRAP_page_fault - && vector != TRAP_nmi - && vector != TRAP_machine_check ) + if ( vector != TRAP_page_fault && vector != TRAP_nmi && + vector != TRAP_machine_check ) { - default: + default: perfc_incr(realmode_exits); v->arch.hvm.vmx.vmx_emulate = 1; HVMTRACE_0D(REALMODE_EMULATE); @@ -3746,7 +3768,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) if ( exit_reason != EXIT_REASON_TASK_SWITCH ) vmx_idtv_reinject(idtv_info); - switch ( exit_reason ) + switch (exit_reason) { unsigned long ecode; @@ -3776,7 +3798,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) perfc_incra(cause_vector, vector); - switch ( vector ) + switch (vector) { case TRAP_debug: /* @@ -3791,14 +3813,13 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) { unsigned long insn_len = 0; int rc; - unsigned long trap_type = MASK_EXTR(intr_info, - INTR_INFO_INTR_TYPE_MASK); + unsigned long trap_type = + MASK_EXTR(intr_info, INTR_INFO_INTR_TYPE_MASK); if ( trap_type >= X86_EVENTTYPE_SW_INTERRUPT ) __vmread(VM_EXIT_INSTRUCTION_LEN, &insn_len); - rc = hvm_monitor_debug(regs->rip, - HVM_MONITOR_DEBUG_EXCEPTION, + rc = hvm_monitor_debug(regs->rip, HVM_MONITOR_DEBUG_EXCEPTION, trap_type, insn_len); if ( rc < 0 ) @@ -3819,8 +3840,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) __vmread(VM_EXIT_INSTRUCTION_LEN, &insn_len); rc = hvm_monitor_debug(regs->rip, HVM_MONITOR_SOFTWARE_BREAKPOINT, - X86_EVENTTYPE_SW_EXCEPTION, - insn_len); + X86_EVENTTYPE_SW_EXCEPTION, insn_len); if ( rc < 0 ) goto exit_and_crash; @@ -3845,8 +3865,8 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) HVM_DBG_LOG(DBG_LEVEL_VMMU, "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx", - regs->rax, regs->rbx, regs->rcx, - regs->rdx, regs->rsi, regs->rdi); + regs->rax, regs->rbx, regs->rcx, regs->rdx, regs->rsi, + regs->rdi); if ( paging_fault(exit_qualification, regs) ) { @@ -3854,10 +3874,9 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) break; if ( hvm_long_mode_active(v) ) HVMTRACE_LONG_2D(PF_XEN, regs->error_code, - TRC_PAR_LONG(exit_qualification) ); + TRC_PAR_LONG(exit_qualification)); else - HVMTRACE_2D(PF_XEN, - regs->error_code, exit_qualification ); + HVMTRACE_2D(PF_XEN, regs->error_code, exit_qualification); break; } @@ -3904,10 +3923,10 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) v->arch.hvm.vmx.exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; vmx_update_cpu_exec_control(v); break; - case EXIT_REASON_TASK_SWITCH: { + case EXIT_REASON_TASK_SWITCH: + { static const enum hvm_task_switch_reason reasons[] = { - TSW_call_or_int, TSW_iret, TSW_jmp, TSW_call_or_int - }; + TSW_call_or_int, TSW_iret, TSW_jmp, TSW_call_or_int}; unsigned int inst_len, source; __vmread(EXIT_QUALIFICATION, &exit_qualification); @@ -3919,14 +3938,15 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) * - TSW is due to a CALL, IRET or JMP instruction. * - TSW is a vectored event due to a SW exception or SW interrupt. */ - inst_len = ((source != 3) || /* CALL, IRET, or JMP? */ - (MASK_EXTR(idtv_info, INTR_INFO_INTR_TYPE_MASK) - > 3)) /* IntrType > 3? */ - ? get_instruction_length() /* Safe: SDM 3B 23.2.4 */ : 0; + inst_len = ((source != 3) || /* CALL, IRET, or JMP? */ + (MASK_EXTR(idtv_info, INTR_INFO_INTR_TYPE_MASK) > + 3)) /* IntrType > 3? */ + ? get_instruction_length() /* Safe: SDM 3B 23.2.4 */ + : 0; if ( (source == 3) && (idtv_info & INTR_INFO_DELIVER_CODE_MASK) ) __vmread(IDT_VECTORING_ERROR_CODE, &ecode); else - ecode = -1; + ecode = -1; regs->rip += inst_len; hvm_task_switch((uint16_t)exit_qualification, reasons[source], ecode); break; @@ -3991,7 +4011,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) { uint64_t msr_content = 0; - switch ( hvm_msr_read_intercept(regs->ecx, &msr_content) ) + switch (hvm_msr_read_intercept(regs->ecx, &msr_content)) { case X86EMUL_OKAY: msr_split(regs, msr_content); @@ -4006,7 +4026,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) } case EXIT_REASON_MSR_WRITE: - switch ( hvm_msr_write_intercept(regs->ecx, msr_fold(regs), true) ) + switch (hvm_msr_write_intercept(regs->ecx, msr_fold(regs), true)) { case X86EMUL_OKAY: update_guest_eip(); /* Safe: WRMSR */ @@ -4120,9 +4140,8 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) vmx_update_cpu_exec_control(v); if ( v->arch.hvm.single_step ) { - hvm_monitor_debug(regs->rip, - HVM_MONITOR_SINGLESTEP_BREAKPOINT, - 0, 0); + hvm_monitor_debug(regs->rip, HVM_MONITOR_SINGLESTEP_BREAKPOINT, 0, + 0); if ( v->domain->debugger_attached ) domain_pause_for_debugger(); @@ -4169,8 +4188,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) gprintk(XENLOG_ERR, "Unexpected vmexit: reason %lu\n", exit_reason); if ( vmx_get_cpl() ) - hvm_inject_hw_exception(TRAP_invalid_op, - X86_EVENT_NO_EC); + hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC); else domain_crash(v->domain); break; @@ -4191,8 +4209,7 @@ out: * already is in most privileged mode. */ mode = vmx_guest_x86_mode(v); - if ( mode == 8 ? !is_canonical_address(regs->rip) - : regs->rip != regs->eip ) + if ( mode == 8 ? !is_canonical_address(regs->rip) : regs->rip != regs->eip ) { gprintk(XENLOG_WARNING, "Bad rIP %lx for mode %u\n", regs->rip, mode); @@ -4203,8 +4220,8 @@ out: hvm_inject_hw_exception(TRAP_gp_fault, 0); /* Need to fix rIP nevertheless. */ if ( mode == 8 ) - regs->rip = (long)(regs->rip << (64 - VADDR_BITS)) >> - (64 - VADDR_BITS); + regs->rip = + (long)(regs->rip << (64 - VADDR_BITS)) >> (64 - VADDR_BITS); else regs->rip = regs->eip; } @@ -4279,8 +4296,8 @@ bool vmx_vmenter_helper(const struct cpu_user_regs *regs) bool_t need_flush; /* Shadow EPTP can't be updated here because irqs are disabled */ - if ( nestedhvm_vcpu_in_guestmode(curr) && vcpu_nestedhvm(curr).stale_np2m ) - return false; + if ( nestedhvm_vcpu_in_guestmode(curr) && vcpu_nestedhvm(curr).stale_np2m ) + return false; if ( curr->domain->arch.hvm.pi_ops.vcpu_block ) vmx_pi_do_resume(curr); @@ -4322,7 +4339,7 @@ bool vmx_vmenter_helper(const struct cpu_user_regs *regs) { struct ept_data *ept = &p2m_get_hostp2m(currd)->ept; unsigned int cpu = smp_processor_id(); - unsigned int inv = 0; /* None => Single => All */ + unsigned int inv = 0; /* None => Single => All */ struct ept_data *single = NULL; /* Single eptp, iff inv == 1 */ if ( cpumask_test_cpu(cpu, ept->invalidate) ) @@ -4355,17 +4372,17 @@ bool vmx_vmenter_helper(const struct cpu_user_regs *regs) if ( inv ) __invept(inv == 1 ? INVEPT_SINGLE_CONTEXT : INVEPT_ALL_CONTEXT, - inv == 1 ? single->eptp : 0); + inv == 1 ? single->eptp : 0); } - out: +out: if ( unlikely(curr->arch.hvm.vmx.lbr_flags & LBR_FIXUP_MASK) ) lbr_fixup(); - HVMTRACE_ND(VMENTRY, 0, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0); + HVMTRACE_ND(VMENTRY, 0, 1 /*cycles*/, 0, 0, 0, 0, 0, 0, 0); - __vmwrite(GUEST_RIP, regs->rip); - __vmwrite(GUEST_RSP, regs->rsp); + __vmwrite(GUEST_RIP, regs->rip); + __vmwrite(GUEST_RSP, regs->rsp); __vmwrite(GUEST_RFLAGS, regs->rflags | X86_EFLAGS_MBS); return true; diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index 9f6ea5c1f7..a2299754ed 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -89,7 +89,8 @@ int nvmx_vcpu_initialise(struct vcpu *v) vmwrite_bitmap = alloc_domheap_page(NULL, 0); if ( !vmwrite_bitmap ) { - gdprintk(XENLOG_ERR, "nest: allocation for vmwrite bitmap failed\n"); + gdprintk(XENLOG_ERR, + "nest: allocation for vmwrite bitmap failed\n"); return -ENOMEM; } v->arch.hvm.vmx.vmwrite_bitmap = vmwrite_bitmap; @@ -124,14 +125,14 @@ int nvmx_vcpu_initialise(struct vcpu *v) INIT_LIST_HEAD(&nvmx->launched_list); return 0; } - + void nvmx_vcpu_destroy(struct vcpu *v) { struct nestedvmx *nvmx = &vcpu_2_nvmx(v); struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); struct vvmcs_list *item, *n; - /* + /* * When destroying the vcpu, it may be running on behalf of L2 guest. * Therefore we need to switch the VMCS pointer back to the L1 VMCS, * in order to avoid double free of L2 VMCS and the possible memory @@ -166,12 +167,12 @@ void nvmx_vcpu_destroy(struct vcpu *v) v->arch.hvm.vmx.vmwrite_bitmap = NULL; } } - + void nvmx_domain_relinquish_resources(struct domain *d) { struct vcpu *v; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) nvmx_purge_vvmcs(v); } @@ -192,14 +193,16 @@ bool_t nvmx_ept_enabled(struct vcpu *v) return !!(nvmx->ept.enabled); } -struct vmx_inst_decoded { +struct vmx_inst_decoded +{ #define VMX_INST_MEMREG_TYPE_MEMORY 0 -#define VMX_INST_MEMREG_TYPE_REG 1 +#define VMX_INST_MEMREG_TYPE_REG 1 int type; union { - struct { + struct + { unsigned long mem; - unsigned int len; + unsigned int len; }; unsigned int reg1; }; @@ -213,7 +216,7 @@ static int vvmcs_offset(u32 width, u32 type, u32 index) offset = (index & 0x1f) | type << 5 | width << 7; - if ( offset == 0 ) /* vpid */ + if ( offset == 0 ) /* vpid */ offset = 0x3f; return offset; @@ -222,7 +225,7 @@ static int vvmcs_offset(u32 width, u32 type, u32 index) u64 get_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding) { union vmcs_encoding enc; - u64 *content = (u64 *) vvmcs; + u64 *content = (u64 *)vvmcs; int offset; u64 res; @@ -230,11 +233,12 @@ u64 get_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding) offset = vvmcs_offset(enc.width, enc.type, enc.index); res = content[offset]; - switch ( enc.width ) { + switch (enc.width) + { case VVMCS_WIDTH_16: res &= 0xffff; break; - case VVMCS_WIDTH_64: + case VVMCS_WIDTH_64: if ( enc.access_type ) res >>= 32; break; @@ -276,7 +280,7 @@ enum vmx_insn_errno get_vvmcs_real_safe(const struct vcpu *v, u32 encoding, void set_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding, u64 val) { union vmcs_encoding enc; - u64 *content = (u64 *) vvmcs; + u64 *content = (u64 *)vvmcs; int offset; u64 res; @@ -284,7 +288,8 @@ void set_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding, u64 val) offset = vvmcs_offset(enc.width, enc.type, enc.index); res = content[offset]; - switch ( enc.width ) { + switch (enc.width) + { case VVMCS_WIDTH_16: res = val & 0xffff; break; @@ -333,14 +338,12 @@ enum vmx_insn_errno set_vvmcs_real_safe(const struct vcpu *v, u32 encoding, return virtual_vmcs_vmwrite_safe(v, encoding, val); } -static unsigned long reg_read(struct cpu_user_regs *regs, - unsigned int index) +static unsigned long reg_read(struct cpu_user_regs *regs, unsigned int index) { return *decode_gpr(regs, index); } -static void reg_write(struct cpu_user_regs *regs, - unsigned int index, +static void reg_write(struct cpu_user_regs *regs, unsigned int index, unsigned long value) { *decode_gpr(regs, index) = value; @@ -379,7 +382,8 @@ static int decode_vmx_inst(struct cpu_user_regs *regs, __vmread(VMX_INSTRUCTION_INFO, &offset); info.word = offset; - if ( info.fields.memreg ) { + if ( info.fields.memreg ) + { decode->type = VMX_INST_MEMREG_TYPE_REG; decode->reg1 = info.fields.reg1; if ( poperandS != NULL ) @@ -396,11 +400,13 @@ static int decode_vmx_inst(struct cpu_user_regs *regs, hvm_get_segment_register(v, info.fields.segment, &seg); seg_base = seg.base; - base = info.fields.base_reg_invalid ? 0 : - reg_read(regs, info.fields.base_reg); + base = info.fields.base_reg_invalid + ? 0 + : reg_read(regs, info.fields.base_reg); - index = info.fields.index_reg_invalid ? 0 : - reg_read(regs, info.fields.index_reg); + index = info.fields.index_reg_invalid + ? 0 + : reg_read(regs, info.fields.index_reg); scale = 1 << info.fields.scaling; @@ -409,20 +415,20 @@ static int decode_vmx_inst(struct cpu_user_regs *regs, size = 1 << (info.fields.addr_size + 1); offset = base + index * scale + disp; - base = !mode_64bit || info.fields.segment >= x86_seg_fs ? - seg_base + offset : offset; + base = !mode_64bit || info.fields.segment >= x86_seg_fs + ? seg_base + offset + : offset; if ( offset + size - 1 < offset || - (mode_64bit ? - !is_canonical_address((long)base < 0 ? base : - base + size - 1) : - offset + size - 1 > seg.limit) ) + (mode_64bit ? !is_canonical_address( + (long)base < 0 ? base : base + size - 1) + : offset + size - 1 > seg.limit) ) goto gp_fault; if ( poperandS != NULL ) { pagefault_info_t pfinfo; - int rc = hvm_copy_from_guest_linear(poperandS, base, size, - 0, &pfinfo); + int rc = + hvm_copy_from_guest_linear(poperandS, base, size, 0, &pfinfo); if ( rc == HVMTRANS_bad_linear_to_gfn ) hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); @@ -479,21 +485,21 @@ static void vmfail(struct cpu_user_regs *regs, enum vmx_insn_errno errno) vmfail_invalid(regs); } -bool_t nvmx_intercepts_exception( - struct vcpu *v, unsigned int vector, int error_code) +bool_t nvmx_intercepts_exception(struct vcpu *v, unsigned int vector, + int error_code) { - u32 exception_bitmap, pfec_match=0, pfec_mask=0; + u32 exception_bitmap, pfec_match = 0, pfec_mask = 0; int r; ASSERT(vector < 32); exception_bitmap = get_vvmcs(v, EXCEPTION_BITMAP); - r = exception_bitmap & (1 << vector) ? 1: 0; + r = exception_bitmap & (1 << vector) ? 1 : 0; if ( vector == TRAP_page_fault ) { pfec_match = get_vvmcs(v, PAGE_FAULT_ERROR_CODE_MATCH); - pfec_mask = get_vvmcs(v, PAGE_FAULT_ERROR_CODE_MASK); + pfec_mask = get_vvmcs(v, PAGE_FAULT_ERROR_CODE_MASK); if ( (error_code & pfec_mask) != pfec_match ) r = !r; } @@ -501,18 +507,16 @@ bool_t nvmx_intercepts_exception( } /* - * Nested VMX uses "strict" condition to exit from + * Nested VMX uses "strict" condition to exit from * L2 guest if either L1 VMM or L0 VMM expect to exit. */ -static inline u32 __shadow_control(struct vcpu *v, - unsigned int field, - u32 host_value) +static inline u32 __shadow_control(struct vcpu *v, unsigned int field, + u32 host_value) { return get_vvmcs(v, field) | host_value; } -static void set_shadow_control(struct vcpu *v, - unsigned int field, +static void set_shadow_control(struct vcpu *v, unsigned int field, u32 host_value) { __vmwrite(field, __shadow_control(v, field, host_value)); @@ -533,33 +537,37 @@ unsigned long *_shadow_io_bitmap(struct vcpu *v) void nvmx_update_exec_control(struct vcpu *v, u32 host_cntrl) { - u32 pio_cntrl = (CPU_BASED_ACTIVATE_IO_BITMAP - | CPU_BASED_UNCOND_IO_EXITING); - unsigned long *bitmap; + u32 pio_cntrl = + (CPU_BASED_ACTIVATE_IO_BITMAP | CPU_BASED_UNCOND_IO_EXITING); + unsigned long *bitmap; u32 shadow_cntrl; - + shadow_cntrl = __n2_exec_control(v); pio_cntrl &= shadow_cntrl; /* Enforce the removed features */ - shadow_cntrl &= ~(CPU_BASED_ACTIVATE_MSR_BITMAP - | CPU_BASED_ACTIVATE_IO_BITMAP - | CPU_BASED_UNCOND_IO_EXITING); + shadow_cntrl &= + ~(CPU_BASED_ACTIVATE_MSR_BITMAP | CPU_BASED_ACTIVATE_IO_BITMAP | + CPU_BASED_UNCOND_IO_EXITING); shadow_cntrl |= host_cntrl; - if ( pio_cntrl == CPU_BASED_UNCOND_IO_EXITING ) { + if ( pio_cntrl == CPU_BASED_UNCOND_IO_EXITING ) + { /* L1 VMM intercepts all I/O instructions */ shadow_cntrl |= CPU_BASED_UNCOND_IO_EXITING; shadow_cntrl &= ~CPU_BASED_ACTIVATE_IO_BITMAP; } - else { + else + { /* Use IO_BITMAP in shadow */ - if ( pio_cntrl == 0 ) { - /* + if ( pio_cntrl == 0 ) + { + /* * L1 VMM doesn't intercept IO instruction. * Use host configuration and reset IO_BITMAP */ bitmap = hvm_io_bitmap; } - else { + else + { /* use IO bitmap */ bitmap = _shadow_io_bitmap(v); } @@ -610,10 +618,8 @@ static void nvmx_update_exit_control(struct vcpu *v, unsigned long host_cntrl) u32 shadow_cntrl; shadow_cntrl = get_vvmcs(v, VM_EXIT_CONTROLS); - shadow_cntrl &= ~(VM_EXIT_SAVE_DEBUG_CNTRLS - | VM_EXIT_LOAD_HOST_PAT - | VM_EXIT_LOAD_HOST_EFER - | VM_EXIT_LOAD_PERF_GLOBAL_CTRL); + shadow_cntrl &= ~(VM_EXIT_SAVE_DEBUG_CNTRLS | VM_EXIT_LOAD_HOST_PAT | + VM_EXIT_LOAD_HOST_EFER | VM_EXIT_LOAD_PERF_GLOBAL_CTRL); shadow_cntrl |= host_cntrl; __vmwrite(VM_EXIT_CONTROLS, shadow_cntrl); } @@ -623,9 +629,8 @@ static void nvmx_update_entry_control(struct vcpu *v) u32 shadow_cntrl; shadow_cntrl = get_vvmcs(v, VM_ENTRY_CONTROLS); - shadow_cntrl &= ~(VM_ENTRY_LOAD_GUEST_PAT - | VM_ENTRY_LOAD_GUEST_EFER - | VM_ENTRY_LOAD_PERF_GLOBAL_CTRL); + shadow_cntrl &= ~(VM_ENTRY_LOAD_GUEST_PAT | VM_ENTRY_LOAD_GUEST_EFER | + VM_ENTRY_LOAD_PERF_GLOBAL_CTRL); __vmwrite(VM_ENTRY_CONTROLS, shadow_cntrl); } @@ -697,7 +702,7 @@ static void nvmx_update_pfec(struct vcpu *v) static void __clear_current_vvmcs(struct vcpu *v) { struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); - + if ( nvcpu->nv_n2vmcx_pa ) __vmpclear(nvcpu->nv_n2vmcx_pa); } @@ -761,8 +766,7 @@ static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg) static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v) { - return _map_io_bitmap(v, IO_BITMAP_A) && - _map_io_bitmap(v, IO_BITMAP_B); + return _map_io_bitmap(v, IO_BITMAP_A) && _map_io_bitmap(v, IO_BITMAP_B); } static void nvmx_purge_vvmcs(struct vcpu *v) @@ -787,8 +791,7 @@ u64 nvmx_get_tsc_offset(struct vcpu *v) { u64 offset = 0; - if ( get_vvmcs(v, CPU_BASED_VM_EXEC_CONTROL) & - CPU_BASED_USE_TSC_OFFSETING ) + if ( get_vvmcs(v, CPU_BASED_VM_EXEC_CONTROL) & CPU_BASED_USE_TSC_OFFSETING ) offset = get_vvmcs(v, TSC_OFFSET); return offset; @@ -870,20 +873,13 @@ static const u16 gpdpte_fields[] = { * Context: shadow -> virtual VMCS */ static const u16 vmcs_ro_field[] = { - GUEST_PHYSICAL_ADDRESS, - VM_INSTRUCTION_ERROR, - VM_EXIT_REASON, - VM_EXIT_INTR_INFO, - VM_EXIT_INTR_ERROR_CODE, - IDT_VECTORING_INFO, - IDT_VECTORING_ERROR_CODE, - VM_EXIT_INSTRUCTION_LEN, - VMX_INSTRUCTION_INFO, - EXIT_QUALIFICATION, - GUEST_LINEAR_ADDRESS -}; + GUEST_PHYSICAL_ADDRESS, VM_INSTRUCTION_ERROR, VM_EXIT_REASON, + VM_EXIT_INTR_INFO, VM_EXIT_INTR_ERROR_CODE, IDT_VECTORING_INFO, + IDT_VECTORING_ERROR_CODE, VM_EXIT_INSTRUCTION_LEN, VMX_INSTRUCTION_INFO, + EXIT_QUALIFICATION, GUEST_LINEAR_ADDRESS}; -static struct vmcs_host_to_guest { +static struct vmcs_host_to_guest +{ u16 host_field; u16 guest_field; } const vmcs_h2g_field[] = { @@ -1014,8 +1010,7 @@ static void load_shadow_guest_state(struct vcpu *v) }; /* vvmcs.gstate to shadow vmcs.gstate */ - vvmcs_to_shadow_bulk(v, ARRAY_SIZE(vmcs_gstate_field), - vmcs_gstate_field); + vvmcs_to_shadow_bulk(v, ARRAY_SIZE(vmcs_gstate_field), vmcs_gstate_field); nvcpu->guest_cr[0] = get_vvmcs(v, CR0_READ_SHADOW); nvcpu->guest_cr[4] = get_vvmcs(v, CR4_READ_SHADOW); @@ -1038,7 +1033,8 @@ static void load_shadow_guest_state(struct vcpu *v) if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL ) { rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, - get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), false); + get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), + false); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); } @@ -1187,14 +1183,12 @@ static void virtual_vmentry(struct cpu_user_regs *regs) nvmx->guest_vpid = new_vpid; } } - } static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs) { /* copy shadow vmcs.gstate back to vvmcs.gstate */ - shadow_to_vvmcs_bulk(v, ARRAY_SIZE(vmcs_gstate_field), - vmcs_gstate_field); + shadow_to_vvmcs_bulk(v, ARRAY_SIZE(vmcs_gstate_field), vmcs_gstate_field); /* RIP, RSP are in user regs */ set_vvmcs(v, GUEST_RIP, regs->rip); set_vvmcs(v, GUEST_RSP, regs->rsp); @@ -1269,14 +1263,13 @@ static void sync_exception_state(struct vcpu *v) if ( !(nvmx->intr.intr_info & INTR_INFO_VALID_MASK) ) return; - switch ( MASK_EXTR(nvmx->intr.intr_info, INTR_INFO_INTR_TYPE_MASK) ) + switch (MASK_EXTR(nvmx->intr.intr_info, INTR_INFO_INTR_TYPE_MASK)) { case X86_EVENTTYPE_EXT_INTR: /* rename exit_reason to EXTERNAL_INTERRUPT */ set_vvmcs(v, VM_EXIT_REASON, EXIT_REASON_EXTERNAL_INTERRUPT); set_vvmcs(v, EXIT_QUALIFICATION, 0); - set_vvmcs(v, VM_EXIT_INTR_INFO, - nvmx->intr.intr_info); + set_vvmcs(v, VM_EXIT_INTR_INFO, nvmx->intr.intr_info); break; case X86_EVENTTYPE_HW_EXCEPTION: @@ -1293,7 +1286,7 @@ static void sync_exception_state(struct vcpu *v) break; default: gdprintk(XENLOG_ERR, "Exception state %lx not handled\n", - nvmx->intr.intr_info); + nvmx->intr.intr_info); break; } } @@ -1386,9 +1379,8 @@ static void nvmx_eptp_update(void) struct vcpu *curr = current; if ( !nestedhvm_vcpu_in_guestmode(curr) || - vcpu_nestedhvm(curr).nv_vmexit_pending || - !vcpu_nestedhvm(curr).stale_np2m || - !nestedhvm_paging_mode_hap(curr) ) + vcpu_nestedhvm(curr).nv_vmexit_pending || + !vcpu_nestedhvm(curr).stale_np2m || !nestedhvm_paging_mode_hap(curr) ) return; /* @@ -1440,7 +1432,7 @@ void nvmx_switch_guest(void) static int nvmx_handle_vmxon(struct cpu_user_regs *regs) { - struct vcpu *v=current; + struct vcpu *v = current; struct nestedvmx *nvmx = &vcpu_2_nvmx(v); struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); struct vmx_inst_decoded decode; @@ -1465,8 +1457,7 @@ static int nvmx_handle_vmxon(struct cpu_user_regs *regs) } rc = hvm_copy_from_guest_phys(&nvmcs_revid, gpa, sizeof(nvmcs_revid)); - if ( rc != HVMTRANS_okay || - (nvmcs_revid & ~VMX_BASIC_REVISION_MASK) || + if ( rc != HVMTRANS_okay || (nvmcs_revid & ~VMX_BASIC_REVISION_MASK) || ((nvmcs_revid ^ vmx_basic_msr) & VMX_BASIC_REVISION_MASK) ) { vmfail_invalid(regs); @@ -1492,7 +1483,7 @@ static int nvmx_handle_vmxon(struct cpu_user_regs *regs) static int nvmx_handle_vmxoff(struct cpu_user_regs *regs) { - struct vcpu *v=current; + struct vcpu *v = current; struct nestedvmx *nvmx = &vcpu_2_nvmx(v); nvmx_purge_vvmcs(v); @@ -1509,7 +1500,7 @@ static bool_t vvmcs_launched(struct list_head *launched_list, struct list_head *pos; bool_t launched = 0; - list_for_each(pos, launched_list) + list_for_each (pos, launched_list) { vvmcs = list_entry(pos, struct vvmcs_list, node); if ( vvmcs_mfn == vvmcs->vvmcs_mfn ) @@ -1546,7 +1537,7 @@ static void clear_vvmcs_launched(struct list_head *launched_list, struct vvmcs_list *vvmcs; struct list_head *pos; - list_for_each(pos, launched_list) + list_for_each (pos, launched_list) { vvmcs = list_entry(pos, struct vvmcs_list, node); if ( vvmcs_mfn == vvmcs->vvmcs_mfn ) @@ -1599,7 +1590,7 @@ static int nvmx_handle_vmresume(struct cpu_user_regs *regs) if ( !vvmcx_valid(v) ) { vmfail_invalid(regs); - return X86EMUL_OKAY; + return X86EMUL_OKAY; } __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow); @@ -1652,14 +1643,16 @@ static int nvmx_handle_vmlaunch(struct cpu_user_regs *regs) vmfail_valid(regs, VMX_INSN_VMLAUNCH_NONCLEAR_VMCS); return X86EMUL_OKAY; } - else { + else + { rc = nvmx_vmresume(v); if ( rc ) vmfail_valid(regs, rc); else { - if ( set_vvmcs_launched(&nvmx->launched_list, - PFN_DOWN(v->arch.hvm.vmx.vmcs_shadow_maddr)) < 0 ) + if ( set_vvmcs_launched( + &nvmx->launched_list, + PFN_DOWN(v->arch.hvm.vmx.vmcs_shadow_maddr)) < 0 ) return X86EMUL_UNHANDLEABLE; } rc = X86EMUL_OKAY; @@ -1706,7 +1699,7 @@ static int nvmx_handle_vmptrld(struct cpu_user_regs *regs) struct vmcs_struct *vvmcs = vvmcx; if ( ((vvmcs->vmcs_revision_id ^ vmx_basic_msr) & - VMX_BASIC_REVISION_MASK) || + VMX_BASIC_REVISION_MASK) || (!cpu_has_vmx_vmcs_shadowing && (vvmcs->vmcs_revision_id & ~VMX_BASIC_REVISION_MASK)) ) { @@ -1802,7 +1795,7 @@ static int nvmx_handle_vmclear(struct cpu_user_regs *regs) nvmx_purge_vvmcs(v); vmsucceed(regs); } - else + else { /* Even if this VMCS isn't the current one, we must clear it. */ bool_t writable; @@ -1856,9 +1849,11 @@ static int nvmx_handle_vmread(struct cpu_user_regs *regs) return X86EMUL_OKAY; } - switch ( decode.type ) { + switch (decode.type) + { case VMX_INST_MEMREG_TYPE_MEMORY: - rc = hvm_copy_to_guest_linear(decode.mem, &value, decode.len, 0, &pfinfo); + rc = hvm_copy_to_guest_linear(decode.mem, &value, decode.len, 0, + &pfinfo); if ( rc == HVMTRANS_bad_linear_to_gfn ) hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); if ( rc != HVMTRANS_okay ) @@ -1877,7 +1872,7 @@ static int nvmx_handle_vmwrite(struct cpu_user_regs *regs) { struct vcpu *v = current; struct vmx_inst_decoded decode; - unsigned long operand; + unsigned long operand; u64 vmcs_encoding; enum vmx_insn_errno err; int rc; @@ -1900,7 +1895,7 @@ static int nvmx_handle_vmwrite(struct cpu_user_regs *regs) return X86EMUL_OKAY; } - switch ( vmcs_encoding & ~VMCS_HIGH(0) ) + switch (vmcs_encoding & ~VMCS_HIGH(0)) { case IO_BITMAP_A: unmap_io_bitmap(v, 0); @@ -1927,7 +1922,7 @@ static int nvmx_handle_invept(struct cpu_user_regs *regs) if ( (ret = decode_vmx_inst(regs, &decode, &eptp)) != X86EMUL_OKAY ) return ret; - switch ( reg_read(regs, decode.reg2) ) + switch (reg_read(regs, decode.reg2)) { case INVEPT_SINGLE_CONTEXT: { @@ -1955,7 +1950,7 @@ static int nvmx_handle_invvpid(struct cpu_user_regs *regs) if ( (ret = decode_vmx_inst(regs, &decode, &vpid)) != X86EMUL_OKAY ) return ret; - switch ( reg_read(regs, decode.reg2) ) + switch (reg_read(regs, decode.reg2)) { /* Just invalidate all tlb entries for all types! */ case INVVPID_INDIVIDUAL_ADDR: @@ -2000,7 +1995,7 @@ int nvmx_handle_vmx_insn(struct cpu_user_regs *regs, unsigned int exit_reason) return X86EMUL_UNHANDLEABLE; } - switch ( exit_reason ) + switch (exit_reason) { case EXIT_REASON_VMXOFF: ret = nvmx_handle_vmxoff(regs); @@ -2059,9 +2054,9 @@ int nvmx_handle_vmx_insn(struct cpu_user_regs *regs, unsigned int exit_reason) #define __emul_value(enable1, default1) \ ((enable1 | default1) << 32 | (default1)) -#define gen_vmx_msr(enable1, default1, host_value) \ +#define gen_vmx_msr(enable1, default1, host_value) \ (((__emul_value(enable1, default1) & host_value) & (~0ul << 32)) | \ - ((uint32_t)(__emul_value(enable1, default1) | host_value))) + ((uint32_t)(__emul_value(enable1, default1) | host_value))) /* * Capability reporting @@ -2082,7 +2077,7 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content) * These prerequisites are listed in the Intel 64 and IA-32 * Architectures Software Developer’s Manual, Vol 3, Appendix A. */ - switch ( msr ) + switch (msr) { case MSR_IA32_VMX_PROCBASED_CTLS2: if ( !cpu_has_vmx_secondary_exec_control ) @@ -2113,14 +2108,15 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content) /* * Remove unsupport features from n1 guest capability MSR */ - switch (msr) { + switch (msr) + { case MSR_IA32_VMX_BASIC: { const struct vmcs_struct *vmcs = map_domain_page(_mfn(PFN_DOWN(v->arch.hvm.vmx.vmcs_pa))); - data = (host_data & (~0ul << 32)) | - (vmcs->vmcs_revision_id & 0x7fffffff); + data = + (host_data & (~0ul << 32)) | (vmcs->vmcs_revision_id & 0x7fffffff); unmap_domain_page(vmcs); if ( !cpu_has_vmx_vmcs_shadowing ) @@ -2135,8 +2131,7 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content) case MSR_IA32_VMX_PINBASED_CTLS: case MSR_IA32_VMX_TRUE_PINBASED_CTLS: /* 1-settings */ - data = PIN_BASED_EXT_INTR_MASK | - PIN_BASED_NMI_EXITING | + data = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | PIN_BASED_PREEMPT_TIMER; data = gen_vmx_msr(data, VMX_PINBASED_CTLS_DEFAULT1, host_data); break; @@ -2145,32 +2140,22 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content) { u32 default1_bits = VMX_PROCBASED_CTLS_DEFAULT1; /* 1-settings */ - data = CPU_BASED_HLT_EXITING | - CPU_BASED_VIRTUAL_INTR_PENDING | - CPU_BASED_CR8_LOAD_EXITING | - CPU_BASED_CR8_STORE_EXITING | - CPU_BASED_INVLPG_EXITING | - CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING | - CPU_BASED_MONITOR_EXITING | - CPU_BASED_MWAIT_EXITING | - CPU_BASED_MOV_DR_EXITING | - CPU_BASED_ACTIVATE_IO_BITMAP | - CPU_BASED_USE_TSC_OFFSETING | - CPU_BASED_UNCOND_IO_EXITING | - CPU_BASED_RDTSC_EXITING | - CPU_BASED_MONITOR_TRAP_FLAG | - CPU_BASED_VIRTUAL_NMI_PENDING | - CPU_BASED_ACTIVATE_MSR_BITMAP | - CPU_BASED_PAUSE_EXITING | - CPU_BASED_RDPMC_EXITING | - CPU_BASED_TPR_SHADOW | + data = CPU_BASED_HLT_EXITING | CPU_BASED_VIRTUAL_INTR_PENDING | + CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | + CPU_BASED_INVLPG_EXITING | CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING | CPU_BASED_MONITOR_EXITING | + CPU_BASED_MWAIT_EXITING | CPU_BASED_MOV_DR_EXITING | + CPU_BASED_ACTIVATE_IO_BITMAP | CPU_BASED_USE_TSC_OFFSETING | + CPU_BASED_UNCOND_IO_EXITING | CPU_BASED_RDTSC_EXITING | + CPU_BASED_MONITOR_TRAP_FLAG | CPU_BASED_VIRTUAL_NMI_PENDING | + CPU_BASED_ACTIVATE_MSR_BITMAP | CPU_BASED_PAUSE_EXITING | + CPU_BASED_RDPMC_EXITING | CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; if ( msr == MSR_IA32_VMX_TRUE_PROCBASED_CTLS ) - default1_bits &= ~(CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING | - CPU_BASED_INVLPG_EXITING); + default1_bits &= + ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | + CPU_BASED_INVLPG_EXITING); data = gen_vmx_msr(data, default1_bits, host_data); break; @@ -2179,31 +2164,24 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content) /* 1-settings */ data = SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | - SECONDARY_EXEC_ENABLE_VPID | - SECONDARY_EXEC_UNRESTRICTED_GUEST | + SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_UNRESTRICTED_GUEST | SECONDARY_EXEC_ENABLE_EPT; data = gen_vmx_msr(data, 0, host_data); break; case MSR_IA32_VMX_EXIT_CTLS: case MSR_IA32_VMX_TRUE_EXIT_CTLS: /* 1-settings */ - data = VM_EXIT_ACK_INTR_ON_EXIT | - VM_EXIT_IA32E_MODE | - VM_EXIT_SAVE_PREEMPT_TIMER | - VM_EXIT_SAVE_GUEST_PAT | - VM_EXIT_LOAD_HOST_PAT | - VM_EXIT_SAVE_GUEST_EFER | - VM_EXIT_LOAD_HOST_EFER | - VM_EXIT_LOAD_PERF_GLOBAL_CTRL; + data = VM_EXIT_ACK_INTR_ON_EXIT | VM_EXIT_IA32E_MODE | + VM_EXIT_SAVE_PREEMPT_TIMER | VM_EXIT_SAVE_GUEST_PAT | + VM_EXIT_LOAD_HOST_PAT | VM_EXIT_SAVE_GUEST_EFER | + VM_EXIT_LOAD_HOST_EFER | VM_EXIT_LOAD_PERF_GLOBAL_CTRL; data = gen_vmx_msr(data, VMX_EXIT_CTLS_DEFAULT1, host_data); break; case MSR_IA32_VMX_ENTRY_CTLS: case MSR_IA32_VMX_TRUE_ENTRY_CTLS: /* 1-settings */ - data = VM_ENTRY_LOAD_GUEST_PAT | - VM_ENTRY_LOAD_GUEST_EFER | - VM_ENTRY_LOAD_PERF_GLOBAL_CTRL | - VM_ENTRY_IA32E_MODE; + data = VM_ENTRY_LOAD_GUEST_PAT | VM_ENTRY_LOAD_GUEST_EFER | + VM_ENTRY_LOAD_PERF_GLOBAL_CTRL | VM_ENTRY_IA32E_MODE; data = gen_vmx_msr(data, VMX_ENTRY_CTLS_DEFAULT1, host_data); break; @@ -2246,10 +2224,9 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content) * walk is successful, the translated value is returned in * L1_gpa. The result value tells what to do next. */ -int -nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, - unsigned int *page_order, uint8_t *p2m_acc, - bool_t access_r, bool_t access_w, bool_t access_x) +int nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, + unsigned int *page_order, uint8_t *p2m_acc, + bool_t access_r, bool_t access_w, bool_t access_x) { int rc; unsigned long gfn; @@ -2263,7 +2240,7 @@ nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, __vmread(EXIT_QUALIFICATION, &exit_qual); rc = nept_translate_l2ga(v, L2_gpa, page_order, rwx_rights, &gfn, p2m_acc, &exit_qual, &exit_reason); - switch ( rc ) + switch (rc) { case EPT_TRANSLATE_SUCCEED: *L1_gpa = (gfn << PAGE_SHIFT) + (L2_gpa & ~PAGE_MASK); @@ -2305,9 +2282,9 @@ void nvmx_idtv_handling(void) * be reinjected, otherwise, pass to L1. */ __vmread(VM_EXIT_REASON, &reason); - if ( reason != EXIT_REASON_EPT_VIOLATION ? - !(nvmx->intr.intr_info & INTR_INFO_VALID_MASK) : - !nvcpu->nv_vmexit_pending ) + if ( reason != EXIT_REASON_EPT_VIOLATION + ? !(nvmx->intr.intr_info & INTR_INFO_VALID_MASK) + : !nvcpu->nv_vmexit_pending ) { __vmwrite(VM_ENTRY_INTR_INFO, idtv_info & ~INTR_INFO_RESVD_BITS_MASK); if ( idtv_info & INTR_INFO_DELIVER_CODE_MASK ) @@ -2325,7 +2302,7 @@ void nvmx_idtv_handling(void) */ __vmread(VM_EXIT_INSTRUCTION_LEN, &reason); __vmwrite(VM_ENTRY_INSTRUCTION_LEN, reason); - } + } } /* @@ -2335,8 +2312,7 @@ void nvmx_idtv_handling(void) * or it may be already processed here. * 0: Require the normal layer 0 process. */ -int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, - unsigned int exit_reason) +int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, unsigned int exit_reason) { struct vcpu *v = current; struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); @@ -2347,13 +2323,14 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, nvmx->intr.intr_info = 0; nvmx->intr.error_code = 0; - switch (exit_reason) { + switch (exit_reason) + { case EXIT_REASON_EXCEPTION_NMI: { unsigned long intr_info; - u32 valid_mask = MASK_INSR(X86_EVENTTYPE_HW_EXCEPTION, - INTR_INFO_INTR_TYPE_MASK) | - INTR_INFO_VALID_MASK; + u32 valid_mask = + MASK_INSR(X86_EVENTTYPE_HW_EXCEPTION, INTR_INFO_INTR_TYPE_MASK) | + INTR_INFO_VALID_MASK; u64 exec_bitmap; int vector; @@ -2466,7 +2443,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, break; case EXIT_REASON_MONITOR_TRAP_FLAG: ctrl = __n2_exec_control(v); - if ( ctrl & CPU_BASED_MONITOR_TRAP_FLAG) + if ( ctrl & CPU_BASED_MONITOR_TRAP_FLAG ) nvcpu->nv_vmexit_pending = 1; break; case EXIT_REASON_ACCESS_GDTR_OR_IDTR: @@ -2524,8 +2501,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, break; case EXIT_REASON_DR_ACCESS: ctrl = __n2_exec_control(v); - if ( (ctrl & CPU_BASED_MOV_DR_EXITING) && - v->arch.hvm.flag_dr_dirty ) + if ( (ctrl & CPU_BASED_MOV_DR_EXITING) && v->arch.hvm.flag_dr_dirty ) nvcpu->nv_vmexit_pending = 1; break; case EXIT_REASON_INVLPG: @@ -2560,17 +2536,16 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, if ( ctrl & mask ) nvcpu->nv_vmexit_pending = 1; } - else /* CR0, CR4, CLTS, LMSW */ + else /* CR0, CR4, CLTS, LMSW */ { /* - * While getting the VM exit for CR0/CR4 access, check if L1 VMM owns - * the bit. - * If so, inject the VM exit to L1 VMM. - * Otherwise, L0 will handle it and sync the value to L1 virtual VMCS. + * While getting the VM exit for CR0/CR4 access, check if L1 VMM + * owns the bit. If so, inject the VM exit to L1 VMM. Otherwise, L0 + * will handle it and sync the value to L1 virtual VMCS. */ unsigned long old_val, val, changed_bits; - switch ( qual.access_type ) + switch (qual.access_type) { case VMX_CR_ACCESS_TYPE_MOV_TO_CR: { @@ -2589,7 +2564,8 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, u64 guest_cr0 = get_vvmcs(v, GUEST_CR0); set_vvmcs(v, GUEST_CR0, - (guest_cr0 & cr0_gh_mask) | (val & ~cr0_gh_mask)); + (guest_cr0 & cr0_gh_mask) | + (val & ~cr0_gh_mask)); } } else if ( qual.cr == 4 ) @@ -2605,7 +2581,8 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, u64 guest_cr4 = get_vvmcs(v, GUEST_CR4); set_vvmcs(v, GUEST_CR4, - (guest_cr4 & cr4_gh_mask) | (val & ~cr4_gh_mask)); + (guest_cr4 & cr4_gh_mask) | + (val & ~cr4_gh_mask)); } } else @@ -2633,9 +2610,9 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK); __vmread(CR0_READ_SHADOW, &old_val); - old_val &= X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS; + old_val &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS; val = qual.lmsw_data & - (X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS); + (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); changed_bits = old_val ^ val; if ( changed_bits & cr0_gh_mask ) nvcpu->nv_vmexit_pending = 1; @@ -2643,7 +2620,8 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, { u64 guest_cr0 = get_vvmcs(v, GUEST_CR0); - set_vvmcs(v, GUEST_CR0, (guest_cr0 & cr0_gh_mask) | (val & ~cr0_gh_mask)); + set_vvmcs(v, GUEST_CR0, + (guest_cr0 & cr0_gh_mask) | (val & ~cr0_gh_mask)); } break; } @@ -2671,14 +2649,14 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, domain_crash(v->domain); } - return ( nvcpu->nv_vmexit_pending == 1 ); + return (nvcpu->nv_vmexit_pending == 1); } void nvmx_set_cr_read_shadow(struct vcpu *v, unsigned int cr) { unsigned long cr_field, read_shadow_field, mask_field; - switch ( cr ) + switch (cr) { case 0: cr_field = GUEST_CR0; @@ -2697,8 +2675,7 @@ void nvmx_set_cr_read_shadow(struct vcpu *v, unsigned int cr) if ( !nestedhvm_vmswitch_in_progress(v) ) { - unsigned long virtual_cr_mask = - get_vvmcs(v, mask_field); + unsigned long virtual_cr_mask = get_vvmcs(v, mask_field); /* * We get here when L2 changed cr in a way that did not change @@ -2709,8 +2686,7 @@ void nvmx_set_cr_read_shadow(struct vcpu *v, unsigned int cr) * value combined with the L1-owned bits from L1's guest cr. */ v->arch.hvm.guest_cr[cr] &= ~virtual_cr_mask; - v->arch.hvm.guest_cr[cr] |= virtual_cr_mask & - get_vvmcs(v, cr_field); + v->arch.hvm.guest_cr[cr] |= virtual_cr_mask & get_vvmcs(v, cr_field); } /* nvcpu.guest_cr is what L2 write to cr actually. */ diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c index 3f3fb7a4ff..911b4a194e 100644 --- a/xen/arch/x86/hvm/vpic.c +++ b/xen/arch/x86/hvm/vpic.c @@ -1,10 +1,10 @@ /* * i8259 interrupt controller emulation - * + * * Copyright (c) 2003-2004 Fabrice Bellard * Copyright (c) 2005 Intel Corperation * Copyright (c) 2006 Keir Fraser, XenSource Inc. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the @@ -34,11 +34,11 @@ #include #include -#define vpic_domain(v) (container_of((v), struct domain, \ - arch.hvm.vpic[!vpic->is_master])) -#define __vpic_lock(v) &container_of((v), struct hvm_domain, \ - vpic[!(v)->is_master])->irq_lock -#define vpic_lock(v) spin_lock(__vpic_lock(v)) +#define vpic_domain(v) \ + (container_of((v), struct domain, arch.hvm.vpic[!vpic->is_master])) +#define __vpic_lock(v) \ + &container_of((v), struct hvm_domain, vpic[!(v)->is_master])->irq_lock +#define vpic_lock(v) spin_lock(__vpic_lock(v)) #define vpic_unlock(v) spin_unlock(__vpic_lock(v)) #define vpic_is_locked(v) spin_is_locked(__vpic_lock(v)) #define vpic_elcr_mask(v) (vpic->is_master ? (uint8_t)0xf8 : (uint8_t)0xde); @@ -55,8 +55,9 @@ static int vpic_get_priority(struct hvm_hw_vpic *vpic, uint8_t mask) return VPIC_PRIO_NONE; /* prio = ffs(mask ROR vpic->priority_add); */ - asm ( "ror %%cl,%b1 ; rep; bsf %1,%0" - : "=r" (prio) : "q" ((uint32_t)mask), "c" (vpic->priority_add) ); + asm("ror %%cl,%b1 ; rep; bsf %1,%0" + : "=r"(prio) + : "q"((uint32_t)mask), "c"(vpic->priority_add)); return prio; } @@ -176,13 +177,13 @@ static int vpic_intack(struct hvm_hw_vpic *vpic) irq += 8; } - out: +out: vpic_unlock(vpic); return irq; } -static void vpic_ioport_write( - struct hvm_hw_vpic *vpic, uint32_t addr, uint32_t val) +static void vpic_ioport_write(struct hvm_hw_vpic *vpic, uint32_t addr, + uint32_t val) { int priority, cmd, irq; uint8_t mask, unmasked = 0; @@ -232,7 +233,7 @@ static void vpic_ioport_write( { /* OCW2 */ cmd = val >> 5; - switch ( cmd ) + switch (cmd) { case 0: /* Rotate in AEOI Mode (Clear) */ case 4: /* Rotate in AEOI Mode (Set) */ @@ -261,10 +262,10 @@ static void vpic_ioport_write( vpic_update_int_output(vpic); vpic_unlock(vpic); hvm_dpci_eoi(current->domain, - hvm_isa_irq_to_gsi((addr >> 7) ? (irq|8) : irq), + hvm_isa_irq_to_gsi((addr >> 7) ? (irq | 8) : irq), NULL); return; /* bail immediately */ - case 6: /* Set Priority */ + case 6: /* Set Priority */ vpic->priority_add = (val + 1) & 7; break; } @@ -272,7 +273,7 @@ static void vpic_ioport_write( } else { - switch ( vpic->init_state & 3 ) + switch (vpic->init_state & 3) { case 0: /* OCW1 */ @@ -323,8 +324,8 @@ static uint32_t vpic_ioport_read(struct hvm_hw_vpic *vpic, uint32_t addr) return vpic->imr; } -static int vpic_intercept_pic_io( - int dir, unsigned int port, unsigned int bytes, uint32_t *val) +static int vpic_intercept_pic_io(int dir, unsigned int port, unsigned int bytes, + uint32_t *val) { struct hvm_hw_vpic *vpic; @@ -345,8 +346,8 @@ static int vpic_intercept_pic_io( return X86EMUL_OKAY; } -static int vpic_intercept_elcr_io( - int dir, unsigned int port, unsigned int bytes, uint32_t *val) +static int vpic_intercept_elcr_io(int dir, unsigned int port, + unsigned int bytes, uint32_t *val) { struct hvm_hw_vpic *vpic; uint32_t data; @@ -382,7 +383,7 @@ static int vpic_save(struct vcpu *v, hvm_domain_context_t *h) return 0; /* Save the state of both PICs */ - for ( i = 0; i < 2 ; i++ ) + for ( i = 0; i < 2; i++ ) { s = &d->arch.hvm.vpic[i]; if ( hvm_save_entry(PIC, i, h, s) ) @@ -425,7 +426,7 @@ void vpic_reset(struct domain *d) vpic = &d->arch.hvm.vpic[0]; memset(vpic, 0, sizeof(*vpic)); vpic->is_master = 1; - vpic->elcr = 1 << 2; + vpic->elcr = 1 << 2; /* Slave PIC. */ vpic++; diff --git a/xen/arch/x86/hvm/vpt.c b/xen/arch/x86/hvm/vpt.c index ecd25d7ad4..575648bc6d 100644 --- a/xen/arch/x86/hvm/vpt.c +++ b/xen/arch/x86/hvm/vpt.c @@ -88,8 +88,8 @@ static int pt_irq_vector(struct periodic_time *pt, enum hvm_intsrc src) gsi = hvm_isa_irq_to_gsi(isa_irq); if ( src == hvm_intsrc_pic ) - return (v->domain->arch.hvm.vpic[isa_irq >> 3].irq_base - + (isa_irq & 7)); + return (v->domain->arch.hvm.vpic[isa_irq >> 3].irq_base + + (isa_irq & 7)); ASSERT(src == hvm_intsrc_lapic); vector = vioapic_get_vector(v->domain, gsi); @@ -109,7 +109,7 @@ static int pt_irq_masked(struct periodic_time *pt) struct vcpu *v = pt->vcpu; unsigned int gsi = pt->irq; - switch ( pt->source ) + switch (pt->source) { case PTSRC_lapic: { @@ -156,7 +156,7 @@ static void pt_lock(struct periodic_time *pt) { struct vcpu *v; - for ( ; ; ) + for ( ;; ) { v = pt->vcpu; spin_lock(&v->arch.hvm.tm_lock); @@ -182,7 +182,7 @@ static void pt_process_missed_ticks(struct periodic_time *pt) if ( missed_ticks <= 0 ) return; - missed_ticks = missed_ticks / (s_time_t) pt->period + 1; + missed_ticks = missed_ticks / (s_time_t)pt->period + 1; if ( mode_is(pt->vcpu->domain, no_missed_ticks_pending) ) pt->do_not_freeze = !pt->pending_intr_nr; else @@ -220,7 +220,7 @@ void pt_save_timer(struct vcpu *v) spin_lock(&v->arch.hvm.tm_lock); - list_for_each_entry ( pt, head, list ) + list_for_each_entry (pt, head, list) if ( !pt->do_not_freeze ) stop_timer(&pt->timer); @@ -236,7 +236,7 @@ void pt_restore_timer(struct vcpu *v) spin_lock(&v->arch.hvm.tm_lock); - list_for_each_entry ( pt, head, list ) + list_for_each_entry (pt, head, list) { if ( pt->pending_intr_nr == 0 ) { @@ -312,7 +312,7 @@ int pt_update_irq(struct vcpu *v) earliest_pt = NULL; max_lag = -1ULL; - list_for_each_entry_safe ( pt, temp, head, list ) + list_for_each_entry_safe(pt, temp, head, list) { if ( pt->pending_intr_nr ) { @@ -348,7 +348,7 @@ int pt_update_irq(struct vcpu *v) spin_unlock(&v->arch.hvm.tm_lock); - switch ( earliest_pt->source ) + switch (earliest_pt->source) { case PTSRC_lapic: /* @@ -395,7 +395,7 @@ int pt_update_irq(struct vcpu *v) spin_lock(&v->arch.hvm.tm_lock); /* Make sure the timer is still on the list. */ - list_for_each_entry ( pt, &v->arch.hvm.tm_list, list ) + list_for_each_entry (pt, &v->arch.hvm.tm_list, list) if ( pt == earliest_pt ) { pt_irq_fired(v, pt); @@ -415,13 +415,12 @@ int pt_update_irq(struct vcpu *v) return pt_vector; } -static struct periodic_time *is_pt_irq( - struct vcpu *v, struct hvm_intack intack) +static struct periodic_time *is_pt_irq(struct vcpu *v, struct hvm_intack intack) { struct list_head *head = &v->arch.hvm.tm_list; struct periodic_time *pt; - list_for_each_entry ( pt, head, list ) + list_for_each_entry (pt, head, list) { if ( pt->pending_intr_nr && pt->irq_issued && (intack.vector == pt_irq_vector(pt, intack.source)) ) @@ -467,18 +466,17 @@ void pt_migrate(struct vcpu *v) spin_lock(&v->arch.hvm.tm_lock); - list_for_each_entry ( pt, head, list ) + list_for_each_entry (pt, head, list) migrate_timer(&pt->timer, v->processor); spin_unlock(&v->arch.hvm.tm_lock); } -void create_periodic_time( - struct vcpu *v, struct periodic_time *pt, uint64_t delta, - uint64_t period, uint8_t irq, time_cb *cb, void *data, bool level) +void create_periodic_time(struct vcpu *v, struct periodic_time *pt, + uint64_t delta, uint64_t period, uint8_t irq, + time_cb *cb, void *data, bool level) { - if ( !pt->source || - (irq >= NR_ISAIRQS && pt->source == PTSRC_isa) || + if ( !pt->source || (irq >= NR_ISAIRQS && pt->source == PTSRC_isa) || (level && period) || (pt->source == PTSRC_ioapic ? irq >= hvm_domain_irq(v->domain)->nr_gsis : level) ) @@ -499,8 +497,10 @@ void create_periodic_time( if ( (period < 100000) && period ) { if ( !test_and_set_bool(pt->warned_timeout_too_short) ) - gdprintk(XENLOG_WARNING, "HVM_PlatformTime: program too " - "small period %"PRIu64"\n", period); + gdprintk(XENLOG_WARNING, + "HVM_PlatformTime: program too " + "small period %" PRIu64 "\n", + period); period = 100000; } @@ -617,7 +617,6 @@ void pt_adjust_global_vcpu_target(struct vcpu *v) write_unlock(&pl_time->vhpet.lock); } - static void pt_resume(struct periodic_time *pt) { if ( pt->vcpu == NULL ) diff --git a/xen/arch/x86/hypercall.c b/xen/arch/x86/hypercall.c index 93e78600da..22ab4eccc5 100644 --- a/xen/arch/x86/hypercall.c +++ b/xen/arch/x86/hypercall.c @@ -21,13 +21,10 @@ #include -#define ARGS(x, n) \ - [ __HYPERVISOR_ ## x ] = { n, n } -#define COMP(x, n, c) \ - [ __HYPERVISOR_ ## x ] = { n, c } +#define ARGS(x, n) [__HYPERVISOR_##x] = {n, n} +#define COMP(x, n, c) [__HYPERVISOR_##x] = {n, c} -const hypercall_args_t hypercall_args_table[NR_hypercalls] = -{ +const hypercall_args_t hypercall_args_table[NR_hypercalls] = { ARGS(set_trap_table, 1), ARGS(mmu_update, 4), ARGS(set_gdt, 2), @@ -79,20 +76,29 @@ const hypercall_args_t hypercall_args_table[NR_hypercalls] = #undef COMP #undef ARGS -#define next_arg(fmt, args) ({ \ - unsigned long __arg; \ - switch ( *(fmt)++ ) \ - { \ - case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \ - case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \ - case 'h': __arg = (unsigned long)va_arg(args, void *); break; \ - default: __arg = 0; BUG(); \ - } \ - __arg; \ -}) +#define next_arg(fmt, args) \ + ({ \ + unsigned long __arg; \ + switch (*(fmt)++) \ + { \ + case 'i': \ + __arg = (unsigned long)va_arg(args, unsigned int); \ + break; \ + case 'l': \ + __arg = (unsigned long)va_arg(args, unsigned long); \ + break; \ + case 'h': \ + __arg = (unsigned long)va_arg(args, void *); \ + break; \ + default: \ + __arg = 0; \ + BUG(); \ + } \ + __arg; \ + }) -unsigned long hypercall_create_continuation( - unsigned int op, const char *format, ...) +unsigned long hypercall_create_continuation(unsigned int op, const char *format, + ...) { struct vcpu *curr = current; struct mc_state *mcs = &curr->mc_state; @@ -121,14 +127,26 @@ unsigned long hypercall_create_continuation( for ( i = 0; *p != '\0'; i++ ) { arg = next_arg(p, args); - switch ( i ) + switch (i) { - case 0: regs->rdi = arg; break; - case 1: regs->rsi = arg; break; - case 2: regs->rdx = arg; break; - case 3: regs->r10 = arg; break; - case 4: regs->r8 = arg; break; - case 5: regs->r9 = arg; break; + case 0: + regs->rdi = arg; + break; + case 1: + regs->rsi = arg; + break; + case 2: + regs->rdx = arg; + break; + case 3: + regs->r10 = arg; + break; + case 4: + regs->r8 = arg; + break; + case 5: + regs->r9 = arg; + break; } } } @@ -137,14 +155,26 @@ unsigned long hypercall_create_continuation( for ( i = 0; *p != '\0'; i++ ) { arg = next_arg(p, args); - switch ( i ) + switch (i) { - case 0: regs->rbx = arg; break; - case 1: regs->rcx = arg; break; - case 2: regs->rdx = arg; break; - case 3: regs->rsi = arg; break; - case 4: regs->rdi = arg; break; - case 5: regs->rbp = arg; break; + case 0: + regs->rbx = arg; + break; + case 1: + regs->rcx = arg; + break; + case 2: + regs->rdx = arg; + break; + case 3: + regs->rsi = arg; + break; + case 4: + regs->rdi = arg; + break; + case 5: + regs->rbp = arg; + break; } } } @@ -212,15 +242,30 @@ int hypercall_xlat_continuation(unsigned int *id, unsigned int nr, { unsigned long *reg; - switch ( i ) + switch (i) { - case 0: reg = ®s->rbx; break; - case 1: reg = ®s->rcx; break; - case 2: reg = ®s->rdx; break; - case 3: reg = ®s->rsi; break; - case 4: reg = ®s->rdi; break; - case 5: reg = ®s->rbp; break; - default: BUG(); reg = NULL; break; + case 0: + reg = ®s->rbx; + break; + case 1: + reg = ®s->rcx; + break; + case 2: + reg = ®s->rdx; + break; + case 3: + reg = ®s->rsi; + break; + case 4: + reg = ®s->rdi; + break; + case 5: + reg = ®s->rbp; + break; + default: + BUG(); + reg = NULL; + break; } if ( (mask & 1) ) { @@ -268,4 +313,3 @@ enum mc_disposition arch_do_multicall_call(struct mc_state *mc) * indent-tabs-mode: nil * End: */ - diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c index 88178485cb..7ef4d4b38b 100644 --- a/xen/arch/x86/i387.c +++ b/xen/arch/x86/i387.c @@ -27,7 +27,7 @@ static inline void fpu_xrstor(struct vcpu *v, uint64_t mask) ASSERT(v->arch.xsave_area); /* - * XCR0 normally represents what guest OS set. In case of Xen itself, + * XCR0 normally represents what guest OS set. In case of Xen itself, * we set the accumulated feature mask before doing save/restore. */ ok = set_xcr0(v->arch.xcr0_accum | XSTATE_FP_SSE); @@ -52,10 +52,11 @@ static inline void fpu_fxrstor(struct vcpu *v) if ( !(fpu_ctxt->fsw & ~fpu_ctxt->fcw & 0x003f) && boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) { - asm volatile ( "fnclex\n\t" - "ffree %%st(7)\n\t" /* clear stack tag */ - "fildl %0" /* load to clear state */ - : : "m" (*fpu_ctxt) ); + asm volatile("fnclex\n\t" + "ffree %%st(7)\n\t" /* clear stack tag */ + "fildl %0" /* load to clear state */ + : + : "m"(*fpu_ctxt)); } /* @@ -63,48 +64,46 @@ static inline void fpu_fxrstor(struct vcpu *v) * possibility, which may occur if the block was passed to us by control * tools or through VCPUOP_initialise, by silently clearing the block. */ - switch ( __builtin_expect(fpu_ctxt->x[FPU_WORD_SIZE_OFFSET], 8) ) + switch (__builtin_expect(fpu_ctxt->x[FPU_WORD_SIZE_OFFSET], 8)) { default: - asm volatile ( + asm volatile( /* See below for why the operands/constraints are this way. */ "1: " REX64_PREFIX "fxrstor (%2)\n" ".section .fixup,\"ax\" \n" - "2: push %%"__OP"ax \n" - " push %%"__OP"cx \n" - " push %%"__OP"di \n" - " mov %2,%%"__OP"di \n" + "2: push %%" __OP "ax \n" + " push %%" __OP "cx \n" + " push %%" __OP "di \n" + " mov %2,%%" __OP "di \n" " mov %1,%%ecx \n" " xor %%eax,%%eax \n" " rep ; stosl \n" - " pop %%"__OP"di \n" - " pop %%"__OP"cx \n" - " pop %%"__OP"ax \n" + " pop %%" __OP "di \n" + " pop %%" __OP "cx \n" + " pop %%" __OP "ax \n" " jmp 1b \n" - ".previous \n" - _ASM_EXTABLE(1b, 2b) + ".previous \n" _ASM_EXTABLE(1b, 2b) : - : "m" (*fpu_ctxt), "i" (sizeof(*fpu_ctxt) / 4), "R" (fpu_ctxt) ); + : "m"(*fpu_ctxt), "i"(sizeof(*fpu_ctxt) / 4), "R"(fpu_ctxt)); break; - case 4: case 2: - asm volatile ( - "1: fxrstor %0 \n" - ".section .fixup,\"ax\"\n" - "2: push %%"__OP"ax \n" - " push %%"__OP"cx \n" - " push %%"__OP"di \n" - " lea %0,%%"__OP"di \n" - " mov %1,%%ecx \n" - " xor %%eax,%%eax \n" - " rep ; stosl \n" - " pop %%"__OP"di \n" - " pop %%"__OP"cx \n" - " pop %%"__OP"ax \n" - " jmp 1b \n" - ".previous \n" - _ASM_EXTABLE(1b, 2b) - : - : "m" (*fpu_ctxt), "i" (sizeof(*fpu_ctxt) / 4) ); + case 4: + case 2: + asm volatile("1: fxrstor %0 \n" + ".section .fixup,\"ax\"\n" + "2: push %%" __OP "ax \n" + " push %%" __OP "cx \n" + " push %%" __OP "di \n" + " lea %0,%%" __OP "di \n" + " mov %1,%%ecx \n" + " xor %%eax,%%eax \n" + " rep ; stosl \n" + " pop %%" __OP "di \n" + " pop %%" __OP "cx \n" + " pop %%" __OP "ax \n" + " jmp 1b \n" + ".previous \n" _ASM_EXTABLE(1b, 2b) + : + : "m"(*fpu_ctxt), "i"(sizeof(*fpu_ctxt) / 4)); break; } } @@ -165,8 +164,9 @@ static inline void fpu_fxsave(struct vcpu *v) * On older versions the rex64 prefix works only if we force an * addressing mode that doesn't require extended registers. */ - asm volatile ( REX64_PREFIX "fxsave (%1)" - : "=m" (*fpu_ctxt) : "R" (fpu_ctxt) ); + asm volatile(REX64_PREFIX "fxsave (%1)" + : "=m"(*fpu_ctxt) + : "R"(fpu_ctxt)); /* * AMD CPUs don't save/restore FDP/FIP/FOP unless an exception @@ -180,12 +180,11 @@ static inline void fpu_fxsave(struct vcpu *v) * If the FIP/FDP[63:32] are both zero, it is safe to use the * 32-bit restore to also restore the selectors. */ - if ( !fip_width && - !((fpu_ctxt->fip.addr | fpu_ctxt->fdp.addr) >> 32) ) + if ( !fip_width && !((fpu_ctxt->fip.addr | fpu_ctxt->fdp.addr) >> 32) ) { struct ix87_env fpu_env; - asm volatile ( "fnstenv %0" : "=m" (fpu_env) ); + asm volatile("fnstenv %0" : "=m"(fpu_env)); fpu_ctxt->fip.sel = fpu_env.fcs; fpu_ctxt->fdp.sel = fpu_env.fds; fip_width = 4; @@ -195,7 +194,7 @@ static inline void fpu_fxsave(struct vcpu *v) } else { - asm volatile ( "fxsave %0" : "=m" (*fpu_ctxt) ); + asm volatile("fxsave %0" : "=m"(*fpu_ctxt)); fip_width = 4; } @@ -241,12 +240,12 @@ void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool need_stts) need_stts = true; } - maybe_stts: +maybe_stts: if ( need_stts ) stts(); } -/* +/* * Restore FPU state when #NM is triggered. */ void vcpu_restore_fpu_lazy(struct vcpu *v) @@ -270,8 +269,8 @@ void vcpu_restore_fpu_lazy(struct vcpu *v) v->fpu_dirtied = 1; } -/* - * On each context switch, save the necessary FPU info of VCPU being switch +/* + * On each context switch, save the necessary FPU info of VCPU being switch * out. It dispatches saving operation based on CPU's capability. */ static bool _vcpu_save_fpu(struct vcpu *v) @@ -310,7 +309,7 @@ void save_fpu_enable(void) int vcpu_init_fpu(struct vcpu *v) { int rc; - + v->arch.fully_eager_fpu = opt_eager_fpu; if ( (rc = xstate_alloc_save_area(v)) != 0 ) diff --git a/xen/arch/x86/i8259.c b/xen/arch/x86/i8259.c index 357ffcd085..ed38926e61 100644 --- a/xen/arch/x86/i8259.c +++ b/xen/arch/x86/i8259.c @@ -1,6 +1,6 @@ /****************************************************************************** * i8259.c - * + * * Well, this is required for SMP systems as well, as it build interrupt * tables for IO APICS as well as uniprocessor 8259-alikes. */ @@ -52,19 +52,18 @@ static unsigned int startup_8259A_irq(struct irq_desc *desc) static void end_8259A_irq(struct irq_desc *desc, u8 vector) { - if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) + if ( !(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS)) ) enable_8259A_irq(desc); } static struct hw_interrupt_type __read_mostly i8259A_irq_type = { .typename = "XT-PIC", - .startup = startup_8259A_irq, + .startup = startup_8259A_irq, .shutdown = disable_8259A_irq, - .enable = enable_8259A_irq, - .disable = disable_8259A_irq, - .ack = mask_and_ack_8259A_irq, - .end = end_8259A_irq -}; + .enable = enable_8259A_irq, + .disable = disable_8259A_irq, + .ack = mask_and_ack_8259A_irq, + .end = end_8259A_irq}; /* * 8259A PIC functions to handle ISA devices: @@ -77,9 +76,9 @@ static struct hw_interrupt_type __read_mostly i8259A_irq_type = { */ static unsigned int cached_irq_mask = 0xffff; -#define __byte(x,y) (((unsigned char *)&(y))[x]) -#define cached_21 (__byte(0,cached_irq_mask)) -#define cached_A1 (__byte(1,cached_irq_mask)) +#define __byte(x, y) (((unsigned char *)&(y))[x]) +#define cached_21 (__byte(0, cached_irq_mask)) +#define cached_A1 (__byte(1, cached_irq_mask)) /* * Not all IRQs can be routed through the IO-APIC, eg. on certain (older) @@ -99,10 +98,10 @@ static void _disable_8259A_irq(unsigned int irq) spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask |= mask; - if (irq & 8) - outb(cached_A1,0xA1); + if ( irq & 8 ) + outb(cached_A1, 0xA1); else - outb(cached_21,0x21); + outb(cached_21, 0x21); per_cpu(vector_irq, 0)[LEGACY_VECTOR(irq)] = ~irq; spin_unlock_irqrestore(&i8259A_lock, flags); } @@ -120,21 +119,21 @@ void enable_8259A_irq(struct irq_desc *desc) spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask &= mask; per_cpu(vector_irq, 0)[LEGACY_VECTOR(desc->irq)] = desc->irq; - if (desc->irq & 8) - outb(cached_A1,0xA1); + if ( desc->irq & 8 ) + outb(cached_A1, 0xA1); else - outb(cached_21,0x21); + outb(cached_21, 0x21); spin_unlock_irqrestore(&i8259A_lock, flags); } int i8259A_irq_pending(unsigned int irq) { - unsigned int mask = 1<> 8); @@ -172,17 +171,18 @@ void unmask_8259A(void) static inline int i8259A_irq_real(unsigned int irq) { int value; - int irqmask = 1<> 8); - outb(0x0A,0xA0); /* back to the IRR register */ + outb(0x0A, 0xA0); /* back to the IRR register */ return value; } @@ -216,11 +216,13 @@ static bool _mask_and_ack_8259A_irq(unsigned int irq) * but should be enough to warn the user that there * is something bad going on ... */ - if ((cached_irq_mask & irqmask) && !i8259A_irq_real(irq)) { + if ( (cached_irq_mask & irqmask) && !i8259A_irq_real(irq) ) + { static int spurious_irq_mask; is_real_irq = false; /* Report spurious IRQ, once per IRQ line. */ - if (!(spurious_irq_mask & irqmask)) { + if ( !(spurious_irq_mask & irqmask) ) + { printk("spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } @@ -233,18 +235,22 @@ static bool _mask_and_ack_8259A_irq(unsigned int irq) cached_irq_mask |= irqmask; - if (irq & 8) { - inb(0xA1); /* DUMMY - (do we need this?) */ - outb(cached_A1,0xA1); - if (!aeoi_mode) { - outb(0x60 + (irq & 7), 0xA0);/* 'Specific EOI' to slave */ - outb(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */ + if ( irq & 8 ) + { + inb(0xA1); /* DUMMY - (do we need this?) */ + outb(cached_A1, 0xA1); + if ( !aeoi_mode ) + { + outb(0x60 + (irq & 7), 0xA0); /* 'Specific EOI' to slave */ + outb(0x62, 0x20); /* 'Specific EOI' to master-IRQ2 */ } - } else { - inb(0x21); /* DUMMY - (do we need this?) */ - outb(cached_21,0x21); - if (!aeoi_mode) - outb(0x60 + irq, 0x20);/* 'Specific EOI' to master */ + } + else + { + inb(0x21); /* DUMMY - (do we need this?) */ + outb(cached_21, 0x21); + if ( !aeoi_mode ) + outb(0x60 + irq, 0x20); /* 'Specific EOI' to master */ } spin_unlock_irqrestore(&i8259A_lock, flags); @@ -288,27 +294,27 @@ void init_8259A(int auto_eoi) spin_lock_irqsave(&i8259A_lock, flags); - outb(0xff, 0x21); /* mask all of 8259A-1 */ - outb(0xff, 0xA1); /* mask all of 8259A-2 */ + outb(0xff, 0x21); /* mask all of 8259A-1 */ + outb(0xff, 0xA1); /* mask all of 8259A-2 */ /* * outb_p - this has to work on a wide range of PC hardware. */ - outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */ + outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */ outb_p(FIRST_LEGACY_VECTOR + 0, 0x21); /* ICW2: 8259A-1 IR0-7 */ - outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */ - if (auto_eoi) + outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */ + if ( auto_eoi ) outb_p(0x03, 0x21); /* master does Auto EOI */ else outb_p(0x01, 0x21); /* master expects normal EOI */ - outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */ + outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */ outb_p(FIRST_LEGACY_VECTOR + 8, 0xA1); /* ICW2: 8259A-2 IR0-7 */ - outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */ - outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode - is to be investigated) */ + outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */ + outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode + is to be investigated) */ - if (auto_eoi) + if ( auto_eoi ) /* * in AEOI mode we just have to mask the interrupt * when acking. @@ -317,10 +323,10 @@ void init_8259A(int auto_eoi) else i8259A_irq_type.ack = mask_and_ack_8259A_irq; - udelay(100); /* wait for 8259A to initialize */ + udelay(100); /* wait for 8259A to initialize */ - outb(cached_21, 0x21); /* restore master IRQ mask */ - outb(cached_A1, 0xA1); /* restore slave IRQ mask */ + outb(cached_21, 0x21); /* restore master IRQ mask */ + outb(cached_A1, 0xA1); /* restore slave IRQ mask */ spin_unlock_irqrestore(&i8259A_lock, flags); } @@ -331,7 +337,7 @@ void __init make_8259A_irq(unsigned int irq) irq_to_desc(irq)->handler = &i8259A_irq_type; } -static struct irqaction __read_mostly cascade = { no_action, "cascade", NULL}; +static struct irqaction __read_mostly cascade = {no_action, "cascade", NULL}; void __init init_IRQ(void) { @@ -341,9 +347,10 @@ void __init init_IRQ(void) init_8259A(0); - for (irq = 0; platform_legacy_irq(irq); irq++) { + for ( irq = 0; platform_legacy_irq(irq); irq++ ) + { struct irq_desc *desc = irq_to_desc(irq); - + if ( irq == 2 ) /* IRQ2 doesn't exist */ continue; desc->handler = &i8259A_irq_type; @@ -351,11 +358,10 @@ void __init init_IRQ(void) cpumask_copy(desc->arch.cpu_mask, cpumask_of(cpu)); desc->arch.vector = FIRST_LEGACY_VECTOR + irq; } - + per_cpu(vector_irq, cpu)[IRQ0_VECTOR] = 0; apic_intr_init(); setup_irq(2, 0, &cascade); } - diff --git a/xen/arch/x86/io_apic.c b/xen/arch/x86/io_apic.c index daa5e9e5ff..73ce5fc65b 100644 --- a/xen/arch/x86/io_apic.c +++ b/xen/arch/x86/io_apic.c @@ -39,7 +39,10 @@ #include /* Where if anywhere is the i8259 connect in external int mode */ -static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; +static struct +{ + int pin, apic; +} ioapic_i8259 = {-1, -1}; static DEFINE_SPINLOCK(ioapic_lock); @@ -60,7 +63,6 @@ int __read_mostly nr_ioapics; #define MAX_PLUS_SHARED_IRQS nr_irqs_gsi #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + nr_irqs_gsi) - #define ioapic_has_eoi_reg(apic) (mp_ioapics[(apic)].mpc_apicver >= 0x20) static int apic_pin_2_gsi_irq(int apic, int pin); @@ -71,20 +73,21 @@ static void share_vector_maps(unsigned int src, unsigned int dst) { unsigned int pin; - if (vector_map[src] == vector_map[dst]) + if ( vector_map[src] == vector_map[dst] ) return; bitmap_or(vector_map[src]->_bits, vector_map[src]->_bits, vector_map[dst]->_bits, NR_VECTORS); - for (pin = 0; pin < nr_ioapic_entries[dst]; ++pin) { + for ( pin = 0; pin < nr_ioapic_entries[dst]; ++pin ) + { int irq = apic_pin_2_gsi_irq(dst, pin); struct irq_desc *desc; - if (irq < 0) + if ( irq < 0 ) continue; desc = irq_to_desc(irq); - if (desc->arch.used_vectors == vector_map[dst]) + if ( desc->arch.used_vectors == vector_map[dst] ) desc->arch.used_vectors = vector_map[src]; } @@ -98,10 +101,11 @@ static void share_vector_maps(unsigned int src, unsigned int dst) * between pins and IRQs. */ -static struct irq_pin_list { +static struct irq_pin_list +{ int apic, pin; unsigned int next; -} *__read_mostly irq_2_pin; +} * __read_mostly irq_2_pin; static unsigned int irq_2_pin_free_entry; @@ -114,15 +118,17 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin) { struct irq_pin_list *entry = irq_2_pin + irq; - while (entry->next) { + while ( entry->next ) + { BUG_ON((entry->apic == apic) && (entry->pin == pin)); entry = irq_2_pin + entry->next; } - + BUG_ON((entry->apic == apic) && (entry->pin == pin)); - if (entry->pin != -1) { - if (irq_2_pin_free_entry >= PIN_MAP_SIZE) + if ( entry->pin != -1 ) + { + if ( irq_2_pin_free_entry >= PIN_MAP_SIZE ) panic("io_apic.c: whoops\n"); entry->next = irq_2_pin_free_entry; entry = irq_2_pin + entry->next; @@ -138,27 +144,32 @@ static void remove_pin_from_irq(unsigned int irq, int apic, int pin) { struct irq_pin_list *entry, *prev; - for (entry = &irq_2_pin[irq]; ; entry = &irq_2_pin[entry->next]) { - if ((entry->apic == apic) && (entry->pin == pin)) + for ( entry = &irq_2_pin[irq];; entry = &irq_2_pin[entry->next] ) + { + if ( (entry->apic == apic) && (entry->pin == pin) ) break; BUG_ON(!entry->next); } entry->pin = entry->apic = -1; - if (entry != &irq_2_pin[irq]) { + if ( entry != &irq_2_pin[irq] ) + { /* Removed entry is not at head of list. */ prev = &irq_2_pin[irq]; - while (&irq_2_pin[prev->next] != entry) + while ( &irq_2_pin[prev->next] != entry ) prev = &irq_2_pin[prev->next]; prev->next = entry->next; - } else if (entry->next) { + } + else if ( entry->next ) + { /* Removed entry is at head of multi-item list. */ - prev = entry; + prev = entry; entry = &irq_2_pin[entry->next]; *prev = *entry; entry->pin = entry->apic = -1; - } else + } + else return; entry->next = irq_2_pin_free_entry; @@ -168,19 +179,20 @@ static void remove_pin_from_irq(unsigned int irq, int apic, int pin) /* * Reroute an IRQ to a different pin. */ -static void __init replace_pin_at_irq(unsigned int irq, - int oldapic, int oldpin, - int newapic, int newpin) +static void __init replace_pin_at_irq(unsigned int irq, int oldapic, int oldpin, + int newapic, int newpin) { struct irq_pin_list *entry = irq_2_pin + irq; - while (1) { - if (entry->apic == oldapic && entry->pin == oldpin) { + while ( 1 ) + { + if ( entry->apic == oldapic && entry->pin == oldpin ) + { entry->apic = newapic; entry->pin = newpin; share_vector_maps(oldapic, newapic); } - if (!entry->next) + if ( !entry->next ) break; entry = irq_2_pin + entry->next; } @@ -190,7 +202,7 @@ vmask_t *io_apic_get_used_vector_map(unsigned int irq) { struct irq_pin_list *entry = irq_2_pin + irq; - if (entry->pin == -1) + if ( entry->pin == -1 ) return NULL; return vector_map[entry->apic]; @@ -202,21 +214,21 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void) struct IO_APIC_route_entry **ioapic_entries; ioapic_entries = xmalloc_array(struct IO_APIC_route_entry *, nr_ioapics); - if (!ioapic_entries) + if ( !ioapic_entries ) return 0; - for (apic = 0; apic < nr_ioapics; apic++) { + for ( apic = 0; apic < nr_ioapics; apic++ ) + { ioapic_entries[apic] = - xmalloc_array(struct IO_APIC_route_entry, - nr_ioapic_entries[apic]); - if (!ioapic_entries[apic] && nr_ioapic_entries[apic]) + xmalloc_array(struct IO_APIC_route_entry, nr_ioapic_entries[apic]); + if ( !ioapic_entries[apic] && nr_ioapic_entries[apic] ) goto nomem; } return ioapic_entries; nomem: - while (--apic >= 0) + while ( --apic >= 0 ) xfree(ioapic_entries[apic]); xfree(ioapic_entries); @@ -224,23 +236,26 @@ nomem: } union entry_union { - struct { u32 w1, w2; }; + struct + { + u32 w1, w2; + }; struct IO_APIC_route_entry entry; }; -struct IO_APIC_route_entry __ioapic_read_entry( - unsigned int apic, unsigned int pin, bool raw) +struct IO_APIC_route_entry __ioapic_read_entry(unsigned int apic, + unsigned int pin, bool raw) { - unsigned int (*read)(unsigned int, unsigned int) - = raw ? __io_apic_read : io_apic_read; + unsigned int (*read)(unsigned int, unsigned int) = + raw ? __io_apic_read : io_apic_read; union entry_union eu; eu.w1 = (*read)(apic, 0x10 + 2 * pin); eu.w2 = (*read)(apic, 0x11 + 2 * pin); return eu.entry; } -static struct IO_APIC_route_entry ioapic_read_entry( - unsigned int apic, unsigned int pin, bool raw) +static struct IO_APIC_route_entry ioapic_read_entry(unsigned int apic, + unsigned int pin, bool raw) { struct IO_APIC_route_entry entry; unsigned long flags; @@ -251,21 +266,19 @@ static struct IO_APIC_route_entry ioapic_read_entry( return entry; } -void __ioapic_write_entry( - unsigned int apic, unsigned int pin, bool raw, - struct IO_APIC_route_entry e) +void __ioapic_write_entry(unsigned int apic, unsigned int pin, bool raw, + struct IO_APIC_route_entry e) { - void (*write)(unsigned int, unsigned int, unsigned int) - = raw ? __io_apic_write : io_apic_write; - union entry_union eu = { .entry = e }; + void (*write)(unsigned int, unsigned int, unsigned int) = + raw ? __io_apic_write : io_apic_write; + union entry_union eu = {.entry = e}; - (*write)(apic, 0x11 + 2*pin, eu.w2); - (*write)(apic, 0x10 + 2*pin, eu.w1); + (*write)(apic, 0x11 + 2 * pin, eu.w2); + (*write)(apic, 0x10 + 2 * pin, eu.w1); } -static void ioapic_write_entry( - unsigned int apic, unsigned int pin, bool raw, - struct IO_APIC_route_entry e) +static void ioapic_write_entry(unsigned int apic, unsigned int pin, bool raw, + struct IO_APIC_route_entry e) { unsigned long flags; spin_lock_irqsave(&ioapic_lock, flags); @@ -278,7 +291,8 @@ static void ioapic_write_entry( * being held, and interrupts are disabled (or there is a good reason not * to), and that if both pin and vector are passed, that they refer to the * same redirection entry in the IO-APIC. */ -static void __io_apic_eoi(unsigned int apic, unsigned int vector, unsigned int pin) +static void __io_apic_eoi(unsigned int apic, unsigned int vector, + unsigned int pin) { /* Prefer the use of the EOI register if available */ if ( ioapic_has_eoi_reg(apic) ) @@ -287,7 +301,7 @@ static void __io_apic_eoi(unsigned int apic, unsigned int vector, unsigned int p if ( vector == IRQ_VECTOR_UNASSIGNED ) vector = __ioapic_read_entry(apic, pin, TRUE).vector; - *(IO_APIC_BASE(apic)+16) = vector; + *(IO_APIC_BASE(apic) + 16) = vector; } else { @@ -298,7 +312,7 @@ static void __io_apic_eoi(unsigned int apic, unsigned int vector, unsigned int p entry = __ioapic_read_entry(apic, pin, TRUE); - if ( ! entry.mask ) + if ( !entry.mask ) { /* If entry is not currently masked, mask it and make * a note to unmask it later */ @@ -329,18 +343,19 @@ int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) { int apic, pin; - if (!ioapic_entries) + if ( !ioapic_entries ) return -ENOMEM; - for (apic = 0; apic < nr_ioapics; apic++) { - if (!nr_ioapic_entries[apic]) + for ( apic = 0; apic < nr_ioapics; apic++ ) + { + if ( !nr_ioapic_entries[apic] ) continue; - if (!ioapic_entries[apic]) + if ( !ioapic_entries[apic] ) return -ENOMEM; - for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) - ioapic_entries[apic][pin] = __ioapic_read_entry(apic, pin, 1); + for ( pin = 0; pin < nr_ioapic_entries[apic]; pin++ ) + ioapic_entries[apic][pin] = __ioapic_read_entry(apic, pin, 1); } return 0; @@ -353,21 +368,24 @@ void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) { int apic, pin; - if (!ioapic_entries) + if ( !ioapic_entries ) return; - for (apic = 0; apic < nr_ioapics; apic++) { - if (!nr_ioapic_entries[apic]) + for ( apic = 0; apic < nr_ioapics; apic++ ) + { + if ( !nr_ioapic_entries[apic] ) continue; - if (!ioapic_entries[apic]) + if ( !ioapic_entries[apic] ) break; - for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) { + for ( pin = 0; pin < nr_ioapic_entries[apic]; pin++ ) + { struct IO_APIC_route_entry entry; entry = ioapic_entries[apic][pin]; - if (!entry.mask) { + if ( !entry.mask ) + { entry.mask = 1; ioapic_write_entry(apic, pin, 1, entry); @@ -383,18 +401,19 @@ int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) { int apic, pin; - if (!ioapic_entries) + if ( !ioapic_entries ) return -ENOMEM; - for (apic = 0; apic < nr_ioapics; apic++) { - if (!nr_ioapic_entries[apic]) + for ( apic = 0; apic < nr_ioapics; apic++ ) + { + if ( !nr_ioapic_entries[apic] ) continue; - if (!ioapic_entries[apic]) + if ( !ioapic_entries[apic] ) return -ENOMEM; - for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) - ioapic_write_entry(apic, pin, 1, ioapic_entries[apic][pin]); + for ( pin = 0; pin < nr_ioapic_entries[apic]; pin++ ) + ioapic_write_entry(apic, pin, 1, ioapic_entries[apic][pin]); } return 0; @@ -404,51 +423,53 @@ void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries) { int apic; - for (apic = 0; apic < nr_ioapics; apic++) + for ( apic = 0; apic < nr_ioapics; apic++ ) xfree(ioapic_entries[apic]); xfree(ioapic_entries); } -static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable) +static void __modify_IO_APIC_irq(unsigned int irq, unsigned long enable, + unsigned long disable) { struct irq_pin_list *entry = irq_2_pin + irq; unsigned int pin, reg; - for (;;) { + for ( ;; ) + { pin = entry->pin; - if (pin == -1) + if ( pin == -1 ) break; - reg = io_apic_read(entry->apic, 0x10 + pin*2); + reg = io_apic_read(entry->apic, 0x10 + pin * 2); reg &= ~disable; reg |= enable; - io_apic_modify(entry->apic, 0x10 + pin*2, reg); - if (!entry->next) + io_apic_modify(entry->apic, 0x10 + pin * 2, reg); + if ( !entry->next ) break; entry = irq_2_pin + entry->next; } } /* mask = 1 */ -static void __mask_IO_APIC_irq (unsigned int irq) +static void __mask_IO_APIC_irq(unsigned int irq) { __modify_IO_APIC_irq(irq, 0x00010000, 0); } /* mask = 0 */ -static void __unmask_IO_APIC_irq (unsigned int irq) +static void __unmask_IO_APIC_irq(unsigned int irq) { __modify_IO_APIC_irq(irq, 0, 0x00010000); } /* trigger = 0 */ -static void __edge_IO_APIC_irq (unsigned int irq) +static void __edge_IO_APIC_irq(unsigned int irq) { __modify_IO_APIC_irq(irq, 0, 0x00008000); } /* trigger = 1 */ -static void __level_IO_APIC_irq (unsigned int irq) +static void __level_IO_APIC_irq(unsigned int irq) { __modify_IO_APIC_irq(irq, 0x00008000, 0); } @@ -476,12 +497,13 @@ static void __eoi_IO_APIC_irq(struct irq_desc *desc) struct irq_pin_list *entry = irq_2_pin + desc->irq; unsigned int pin, vector = desc->arch.vector; - for (;;) { + for ( ;; ) + { pin = entry->pin; - if (pin == -1) + if ( pin == -1 ) break; __io_apic_eoi(entry->apic, vector, pin); - if (!entry->next) + if ( !entry->next ) break; entry = irq_2_pin + entry->next; } @@ -501,22 +523,25 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) /* Check delivery_mode to be sure we're not clearing an SMI pin */ entry = __ioapic_read_entry(apic, pin, FALSE); - if (entry.delivery_mode == dest_SMI) + if ( entry.delivery_mode == dest_SMI ) return; /* * Make sure the entry is masked and re-read the contents to check * if it is a level triggered pin and if the remoteIRR is set. */ - if (!entry.mask) { + if ( !entry.mask ) + { entry.mask = 1; __ioapic_write_entry(apic, pin, FALSE, entry); } entry = __ioapic_read_entry(apic, pin, TRUE); - if (entry.irr) { + if ( entry.irr ) + { /* Make sure the trigger mode is set to level. */ - if (!entry.trigger) { + if ( !entry.trigger ) + { entry.trigger = 1; __ioapic_write_entry(apic, pin, TRUE, entry); } @@ -531,23 +556,24 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) __ioapic_write_entry(apic, pin, TRUE, entry); entry = __ioapic_read_entry(apic, pin, TRUE); - if (entry.irr) + if ( entry.irr ) printk(KERN_ERR "IO-APIC%02x-%u: Unable to reset IRR\n", IO_APIC_ID(apic), pin); } -static void clear_IO_APIC (void) +static void clear_IO_APIC(void) { int apic, pin; - for (apic = 0; apic < nr_ioapics; apic++) { - for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) + for ( apic = 0; apic < nr_ioapics; apic++ ) + { + for ( pin = 0; pin < nr_ioapic_entries[apic]; pin++ ) clear_IO_APIC_pin(apic, pin); } } -static void -set_ioapic_affinity_irq(struct irq_desc *desc, const cpumask_t *mask) +static void set_ioapic_affinity_irq(struct irq_desc *desc, + const cpumask_t *mask) { unsigned long flags; unsigned int dest; @@ -558,29 +584,30 @@ set_ioapic_affinity_irq(struct irq_desc *desc, const cpumask_t *mask) spin_lock_irqsave(&ioapic_lock, flags); dest = set_desc_affinity(desc, mask); - if (dest != BAD_APICID) { + if ( dest != BAD_APICID ) + { if ( !x2apic_enabled ) dest = SET_APIC_LOGICAL_ID(dest); entry = irq_2_pin + irq; - for (;;) { + for ( ;; ) + { unsigned int data; pin = entry->pin; - if (pin == -1) + if ( pin == -1 ) break; - io_apic_write(entry->apic, 0x10 + 1 + pin*2, dest); - data = io_apic_read(entry->apic, 0x10 + pin*2); + io_apic_write(entry->apic, 0x10 + 1 + pin * 2, dest); + data = io_apic_read(entry->apic, 0x10 + pin * 2); data &= ~IO_APIC_REDIR_VECTOR_MASK; data |= desc->arch.vector & 0xFF; - io_apic_modify(entry->apic, 0x10 + pin*2, data); + io_apic_modify(entry->apic, 0x10 + pin * 2, data); - if (!entry->next) + if ( !entry->next ) break; entry = irq_2_pin + entry->next; } } spin_unlock_irqrestore(&ioapic_lock, flags); - } /* @@ -590,11 +617,11 @@ static int find_irq_entry(int apic, int pin, int type) { int i; - for (i = 0; i < mp_irq_entries; i++) - if (mp_irqs[i].mpc_irqtype == type && - (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid || - mp_irqs[i].mpc_dstapic == MP_APIC_ALL) && - mp_irqs[i].mpc_dstirq == pin) + for ( i = 0; i < mp_irq_entries; i++ ) + if ( mp_irqs[i].mpc_irqtype == type && + (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid || + mp_irqs[i].mpc_dstapic == MP_APIC_ALL) && + mp_irqs[i].mpc_dstirq == pin ) return i; return -1; @@ -607,16 +634,16 @@ static int __init find_isa_irq_pin(int irq, int type) { int i; - for (i = 0; i < mp_irq_entries; i++) { + for ( i = 0; i < mp_irq_entries; i++ ) + { int lbus = mp_irqs[i].mpc_srcbus; - if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA || - mp_bus_id_to_type[lbus] == MP_BUS_EISA || - mp_bus_id_to_type[lbus] == MP_BUS_MCA || - mp_bus_id_to_type[lbus] == MP_BUS_NEC98 - ) && - (mp_irqs[i].mpc_irqtype == type) && - (mp_irqs[i].mpc_srcbusirq == irq)) + if ( (mp_bus_id_to_type[lbus] == MP_BUS_ISA || + mp_bus_id_to_type[lbus] == MP_BUS_EISA || + mp_bus_id_to_type[lbus] == MP_BUS_MCA || + mp_bus_id_to_type[lbus] == MP_BUS_NEC98) && + (mp_irqs[i].mpc_irqtype == type) && + (mp_irqs[i].mpc_srcbusirq == irq) ) return mp_irqs[i].mpc_dstirq; } @@ -627,24 +654,26 @@ static int __init find_isa_irq_apic(int irq, int type) { int i; - for (i = 0; i < mp_irq_entries; i++) { + for ( i = 0; i < mp_irq_entries; i++ ) + { int lbus = mp_irqs[i].mpc_srcbus; - if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA || - mp_bus_id_to_type[lbus] == MP_BUS_EISA || - mp_bus_id_to_type[lbus] == MP_BUS_MCA || - mp_bus_id_to_type[lbus] == MP_BUS_NEC98 - ) && - (mp_irqs[i].mpc_irqtype == type) && - (mp_irqs[i].mpc_srcbusirq == irq)) + if ( (mp_bus_id_to_type[lbus] == MP_BUS_ISA || + mp_bus_id_to_type[lbus] == MP_BUS_EISA || + mp_bus_id_to_type[lbus] == MP_BUS_MCA || + mp_bus_id_to_type[lbus] == MP_BUS_NEC98) && + (mp_irqs[i].mpc_irqtype == type) && + (mp_irqs[i].mpc_srcbusirq == irq) ) break; } - if (i < mp_irq_entries) { + if ( i < mp_irq_entries ) + { int apic; - for(apic = 0; apic < nr_ioapics; apic++) { - if (!nr_ioapic_entries[apic]) + for ( apic = 0; apic < nr_ioapics; apic++ ) + { + if ( !nr_ioapic_entries[apic] ) continue; - if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic) + if ( mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ) return apic; } } @@ -659,30 +688,31 @@ static int __init find_isa_irq_apic(int irq, int type) static int pin_2_irq(int idx, int apic, int pin); /* - * This function currently is only a helper for the i386 smp boot process where - * we need to reprogram the ioredtbls to cater for the cpus which have come online - * so mask in all cases should simply be TARGET_CPUS + * This function currently is only a helper for the i386 smp boot process where + * we need to reprogram the ioredtbls to cater for the cpus which have come + * online so mask in all cases should simply be TARGET_CPUS */ void /*__init*/ setup_ioapic_dest(void) { int pin, ioapic, irq, irq_entry; - if (skip_ioapic_setup) + if ( skip_ioapic_setup ) return; - for (ioapic = 0; ioapic < nr_ioapics; ioapic++) { - for (pin = 0; pin < nr_ioapic_entries[ioapic]; pin++) { + for ( ioapic = 0; ioapic < nr_ioapics; ioapic++ ) + { + for ( pin = 0; pin < nr_ioapic_entries[ioapic]; pin++ ) + { struct irq_desc *desc; irq_entry = find_irq_entry(ioapic, pin, mp_INT); - if (irq_entry == -1) + if ( irq_entry == -1 ) continue; irq = pin_2_irq(irq_entry, ioapic, pin); desc = irq_to_desc(irq); BUG_ON(cpumask_empty(desc->arch.cpu_mask)); set_ioapic_affinity_irq(desc, desc->arch.cpu_mask); } - } } @@ -691,12 +721,13 @@ void /*__init*/ setup_ioapic_dest(void) */ static int EISA_ELCR(unsigned int irq) { - if (platform_legacy_irq(irq)) { + if ( platform_legacy_irq(irq) ) + { unsigned int port = 0x4d0 + (irq >> 3); return (inb(port) >> (irq & 7)) & 1; } - apic_printk(APIC_VERBOSE, KERN_INFO - "Broken MPtable reports ISA irq %d\n", irq); + apic_printk(APIC_VERBOSE, KERN_INFO "Broken MPtable reports ISA irq %d\n", + irq); return 0; } @@ -705,32 +736,32 @@ static int EISA_ELCR(unsigned int irq) * EISA conforming in the MP table, that means its trigger type must * be read in from the ELCR */ -#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq)) -#define default_EISA_polarity(idx) (0) +#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq)) +#define default_EISA_polarity(idx) (0) /* ISA interrupts are always polarity zero edge triggered, * when listed as conforming in the MP table. */ -#define default_ISA_trigger(idx) (0) -#define default_ISA_polarity(idx) (0) +#define default_ISA_trigger(idx) (0) +#define default_ISA_polarity(idx) (0) /* PCI interrupts are always polarity one level triggered, * when listed as conforming in the MP table. */ -#define default_PCI_trigger(idx) (1) -#define default_PCI_polarity(idx) (1) +#define default_PCI_trigger(idx) (1) +#define default_PCI_polarity(idx) (1) /* MCA interrupts are always polarity zero level triggered, * when listed as conforming in the MP table. */ -#define default_MCA_trigger(idx) (1) -#define default_MCA_polarity(idx) (0) +#define default_MCA_trigger(idx) (1) +#define default_MCA_polarity(idx) (0) /* NEC98 interrupts are always polarity zero edge triggered, * when listed as conforming in the MP table. */ -#define default_NEC98_trigger(idx) (0) -#define default_NEC98_polarity(idx) (0) +#define default_NEC98_trigger(idx) (0) +#define default_NEC98_polarity(idx) (0) static int __init MPBIOS_polarity(int idx) { @@ -814,7 +845,7 @@ static int MPBIOS_trigger(int idx) /* * Determine IRQ trigger mode (edge or level sensitive): */ - switch ((mp_irqs[idx].mpc_irqflag>>2) & 3) + switch ((mp_irqs[idx].mpc_irqflag >> 2) & 3) { case 0: /* conforms, ie. bus-type dependent */ { @@ -898,7 +929,7 @@ static int pin_2_irq(int idx, int apic, int pin) /* * Debugging check, we are in big trouble if this message pops up! */ - if (mp_irqs[idx].mpc_dstirq != pin) + if ( mp_irqs[idx].mpc_dstirq != pin ) printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); switch (mp_bus_id_to_type[bus]) @@ -917,14 +948,14 @@ static int pin_2_irq(int idx, int apic, int pin) * PCI IRQs are mapped in order */ i = irq = 0; - while (i < apic) + while ( i < apic ) irq += nr_ioapic_entries[i++]; irq += pin; break; } default: { - printk(KERN_ERR "unknown bus type %d.\n",bus); + printk(KERN_ERR "unknown bus type %d.\n", bus); irq = 0; break; } @@ -937,10 +968,12 @@ static inline int IO_APIC_irq_trigger(int irq) { int apic, idx, pin; - for (apic = 0; apic < nr_ioapics; apic++) { - for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) { - idx = find_irq_entry(apic,pin,mp_INT); - if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin))) + for ( apic = 0; apic < nr_ioapics; apic++ ) + { + for ( pin = 0; pin < nr_ioapic_entries[apic]; pin++ ) + { + idx = find_irq_entry(apic, pin, mp_INT); + if ( (idx != -1) && (irq == pin_2_irq(idx, apic, pin)) ) return irq_trigger(idx); } } @@ -953,21 +986,22 @@ static inline int IO_APIC_irq_trigger(int irq) static struct hw_interrupt_type ioapic_level_type; static hw_irq_controller ioapic_edge_type; -#define IOAPIC_AUTO -1 -#define IOAPIC_EDGE 0 -#define IOAPIC_LEVEL 1 +#define IOAPIC_AUTO -1 +#define IOAPIC_EDGE 0 +#define IOAPIC_LEVEL 1 -#define SET_DEST(ent, mode, val) do { \ - if (x2apic_enabled) \ - (ent).dest.dest32 = (val); \ - else \ - (ent).dest.mode.mode##_dest = (val); \ -} while (0) +#define SET_DEST(ent, mode, val) \ + do { \ + if ( x2apic_enabled ) \ + (ent).dest.dest32 = (val); \ + else \ + (ent).dest.mode.mode##_dest = (val); \ + } while ( 0 ) static inline void ioapic_register_intr(int irq, unsigned long trigger) { - if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || - trigger == IOAPIC_LEVEL) + if ( (trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || + trigger == IOAPIC_LEVEL ) irq_desc[irq].handler = &ioapic_level_type; else irq_desc[irq].handler = &ioapic_edge_type; @@ -981,28 +1015,32 @@ static void __init setup_IO_APIC_irqs(void) apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); - for (apic = 0; apic < nr_ioapics; apic++) { - for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) { + for ( apic = 0; apic < nr_ioapics; apic++ ) + { + for ( pin = 0; pin < nr_ioapic_entries[apic]; pin++ ) + { struct irq_desc *desc; /* * add it to the IO-APIC irq-routing table: */ - memset(&entry,0,sizeof(entry)); + memset(&entry, 0, sizeof(entry)); entry.delivery_mode = INT_DELIVERY_MODE; entry.dest_mode = INT_DEST_MODE; - entry.mask = 0; /* enable IRQ */ - - idx = find_irq_entry(apic,pin,mp_INT); - if (idx == -1) { - if (first_notcon) { - apic_printk(APIC_VERBOSE, KERN_DEBUG - " IO-APIC (apicid-pin) %d-%d", - mp_ioapics[apic].mpc_apicid, - pin); + entry.mask = 0; /* enable IRQ */ + + idx = find_irq_entry(apic, pin, mp_INT); + if ( idx == -1 ) + { + if ( first_notcon ) + { + apic_printk(APIC_VERBOSE, + KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", + mp_ioapics[apic].mpc_apicid, pin); first_notcon = 0; - } else + } + else apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin); continue; @@ -1011,7 +1049,8 @@ static void __init setup_IO_APIC_irqs(void) entry.trigger = irq_trigger(idx); entry.polarity = irq_polarity(idx); - if (irq_trigger(idx)) { + if ( irq_trigger(idx) ) + { entry.trigger = 1; entry.mask = 1; } @@ -1021,12 +1060,12 @@ static void __init setup_IO_APIC_irqs(void) * skip adding the timer int on secondary nodes, which causes * a small but painful rift in the time-space continuum */ - if (multi_timer_check(apic, irq)) + if ( multi_timer_check(apic, irq) ) continue; else add_pin_to_irq(irq, apic, pin); - if (!IO_APIC_IRQ(irq)) + if ( !IO_APIC_IRQ(irq) ) continue; vector = assign_irq_vector(irq, NULL); @@ -1034,7 +1073,7 @@ static void __init setup_IO_APIC_irqs(void) entry.vector = vector; ioapic_register_intr(irq, IOAPIC_AUTO); - if (platform_legacy_irq(irq)) + if ( platform_legacy_irq(irq) ) disable_8259A_irq(irq_to_desc(irq)); desc = irq_to_desc(irq); @@ -1046,18 +1085,19 @@ static void __init setup_IO_APIC_irqs(void) } } - if (!first_notcon) + if ( !first_notcon ) apic_printk(APIC_VERBOSE, " not connected.\n"); } /* * Set up the 8259A-master output pin: */ -static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector) +static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, + int vector) { struct IO_APIC_route_entry entry; - memset(&entry,0,sizeof(entry)); + memset(&entry, 0, sizeof(entry)); disable_8259A_irq(irq_to_desc(0)); @@ -1069,7 +1109,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in * to the first CPU. */ entry.dest_mode = INT_DEST_MODE; - entry.mask = 0; /* unmask IRQ now */ + entry.mask = 0; /* unmask IRQ now */ SET_DEST(entry, logical, cpu_mask_to_apicid(TARGET_CPUS)); entry.delivery_mode = INT_DELIVERY_MODE; entry.polarity = 0; @@ -1104,7 +1144,7 @@ static void /*__init*/ __print_IO_APIC(bool boot) unsigned long flags; printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); - for (i = 0; i < nr_ioapics; i++) + for ( i = 0; i < nr_ioapics; i++ ) printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", mp_ioapics[i].mpc_apicid, nr_ioapic_entries[i]); @@ -1114,123 +1154,126 @@ static void /*__init*/ __print_IO_APIC(bool boot) */ printk(KERN_INFO "testing the IO APIC.......................\n"); - for (apic = 0; apic < nr_ioapics; apic++) { + for ( apic = 0; apic < nr_ioapics; apic++ ) + { if ( !boot ) process_pending_softirqs(); - if (!nr_ioapic_entries[apic]) + if ( !nr_ioapic_entries[apic] ) continue; - spin_lock_irqsave(&ioapic_lock, flags); - reg_00.raw = io_apic_read(apic, 0); - reg_01.raw = io_apic_read(apic, 1); - if (reg_01.bits.version >= 0x10) + spin_lock_irqsave(&ioapic_lock, flags); + reg_00.raw = io_apic_read(apic, 0); + reg_01.raw = io_apic_read(apic, 1); + if ( reg_01.bits.version >= 0x10 ) reg_02.raw = io_apic_read(apic, 2); - if (reg_01.bits.version >= 0x20) + if ( reg_01.bits.version >= 0x20 ) reg_03.raw = io_apic_read(apic, 3); - spin_unlock_irqrestore(&ioapic_lock, flags); - - printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid); - printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); - printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); - printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); - printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); - if (reg_00.bits.ID >= get_physical_broadcast()) + spin_unlock_irqrestore(&ioapic_lock, flags); + + printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid); + printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); + printk(KERN_DEBUG "....... : physical APIC id: %02X\n", + reg_00.bits.ID); + printk(KERN_DEBUG "....... : Delivery Type: %X\n", + reg_00.bits.delivery_type); + printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); + if ( reg_00.bits.ID >= get_physical_broadcast() ) UNEXPECTED_IO_APIC(); - if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2) + if ( reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2 ) UNEXPECTED_IO_APIC(); - printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw); - printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); - if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */ - (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */ - (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */ - (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */ - (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */ - (reg_01.bits.entries != 0x2E) && - (reg_01.bits.entries != 0x3F) - ) + printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw); + printk(KERN_DEBUG "....... : max redirection entries: %04X\n", + reg_01.bits.entries); + if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */ + (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */ + (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */ + (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */ + (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */ + (reg_01.bits.entries != 0x2E) && (reg_01.bits.entries != 0x3F) ) UNEXPECTED_IO_APIC(); - printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); - printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); - if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */ - (reg_01.bits.version != 0x10) && /* oldest IO-APICs */ - (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */ - (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */ - (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */ - ) + printk(KERN_DEBUG "....... : PRQ implemented: %X\n", + reg_01.bits.PRQ); + printk(KERN_DEBUG "....... : IO APIC version: %04X\n", + reg_01.bits.version); + if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */ + (reg_01.bits.version != 0x10) && /* oldest IO-APICs */ + (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */ + (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */ + (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */ + ) UNEXPECTED_IO_APIC(); - if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2) + if ( reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2 ) UNEXPECTED_IO_APIC(); - /* - * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, - * but the value of reg_02 is read as the previous read register - * value, so ignore it if reg_02 == reg_01. - */ - if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { + /* + * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, + * but the value of reg_02 is read as the previous read register + * value, so ignore it if reg_02 == reg_01. + */ + if ( reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw ) + { printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); - printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); - if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2) + printk(KERN_DEBUG "....... : arbitration: %02X\n", + reg_02.bits.arbitration); + if ( reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2 ) UNEXPECTED_IO_APIC(); - } - - /* - * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 - * or reg_03, but the value of reg_0[23] is read as the previous read - * register value, so ignore it if reg_03 == reg_0[12]. - */ - if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && - reg_03.raw != reg_01.raw) { + } + + /* + * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 + * or reg_03, but the value of reg_0[23] is read as the previous read + * register value, so ignore it if reg_03 == reg_0[12]. + */ + if ( reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && + reg_03.raw != reg_01.raw ) + { printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); - printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); - if (reg_03.bits.__reserved_1) + printk(KERN_DEBUG "....... : Boot DT : %X\n", + reg_03.bits.boot_DT); + if ( reg_03.bits.__reserved_1 ) UNEXPECTED_IO_APIC(); - } + } - printk(KERN_DEBUG ".... IRQ redirection table:\n"); + printk(KERN_DEBUG ".... IRQ redirection table:\n"); - printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol" - " Stat Dest Deli Vect: \n"); + printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol" + " Stat Dest Deli Vect: \n"); - for (i = 0; i <= reg_01.bits.entries; i++) { + for ( i = 0; i <= reg_01.bits.entries; i++ ) + { struct IO_APIC_route_entry entry; entry = ioapic_read_entry(apic, i, 0); - printk(KERN_DEBUG " %02x %03X %02X ", - i, + printk(KERN_DEBUG " %02x %03X %02X ", i, entry.dest.logical.logical_dest, - entry.dest.physical.physical_dest - ); + entry.dest.physical.physical_dest); printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", - entry.mask, - entry.trigger, - entry.irr, - entry.polarity, - entry.delivery_status, - entry.dest_mode, - entry.delivery_mode, - entry.vector - ); - } + entry.mask, entry.trigger, entry.irr, entry.polarity, + entry.delivery_status, entry.dest_mode, entry.delivery_mode, + entry.vector); + } } printk(KERN_INFO "Using vector-based indexing\n"); printk(KERN_DEBUG "IRQ to pin mappings:\n"); - for (i = 0; i < nr_irqs_gsi; i++) { + for ( i = 0; i < nr_irqs_gsi; i++ ) + { struct irq_pin_list *entry = irq_2_pin + i; if ( !boot && !(i & 0x1f) ) process_pending_softirqs(); - if (entry->pin < 0) + if ( entry->pin < 0 ) continue; printk(KERN_DEBUG "IRQ%d ", irq_to_desc(i)->arch.vector); - for (;;) { + for ( ;; ) + { printk("-> %d:%d", entry->apic, entry->pin); - if (!entry->next) + if ( !entry->next ) break; entry = irq_2_pin + entry->next; } @@ -1244,7 +1287,7 @@ static void /*__init*/ __print_IO_APIC(bool boot) static void __init print_IO_APIC(void) { - if (apic_verbosity != APIC_QUIET) + if ( apic_verbosity != APIC_QUIET ) __print_IO_APIC(1); } @@ -1260,59 +1303,69 @@ static void __init enable_IO_APIC(void) /* Initialise dynamic irq_2_pin free list. */ irq_2_pin = xzalloc_array(struct irq_pin_list, PIN_MAP_SIZE); - - for (i = 0; i < PIN_MAP_SIZE; i++) + + for ( i = 0; i < PIN_MAP_SIZE; i++ ) irq_2_pin[i].pin = -1; - for (i = irq_2_pin_free_entry = nr_irqs_gsi; i < PIN_MAP_SIZE; i++) + for ( i = irq_2_pin_free_entry = nr_irqs_gsi; i < PIN_MAP_SIZE; i++ ) irq_2_pin[i].next = i + 1; - if (directed_eoi_enabled) { - for (apic = 0; apic < nr_ioapics; apic++) { - if (!nr_ioapic_entries[apic]) + if ( directed_eoi_enabled ) + { + for ( apic = 0; apic < nr_ioapics; apic++ ) + { + if ( !nr_ioapic_entries[apic] ) continue; vector_map[apic] = xzalloc(vmask_t); BUG_ON(!vector_map[apic]); } - } else { + } + else + { vector_map[0] = xzalloc(vmask_t); BUG_ON(!vector_map[0]); - for (apic = 1; apic < nr_ioapics; apic++) + for ( apic = 1; apic < nr_ioapics; apic++ ) vector_map[apic] = vector_map[0]; } - for(apic = 0; apic < nr_ioapics; apic++) { + for ( apic = 0; apic < nr_ioapics; apic++ ) + { int pin; /* See if any of the pins is in ExtINT mode */ - for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) { + for ( pin = 0; pin < nr_ioapic_entries[apic]; pin++ ) + { struct IO_APIC_route_entry entry = ioapic_read_entry(apic, pin, 0); /* If the interrupt line is enabled and in ExtInt mode * I have found the pin where the i8259 is connected. */ - if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { + if ( (entry.mask == 0) && (entry.delivery_mode == dest_ExtINT) ) + { ioapic_i8259.apic = apic; - ioapic_i8259.pin = pin; + ioapic_i8259.pin = pin; goto found_i8259; } } } - found_i8259: +found_i8259: /* Look to see what if the MP table has reported the ExtINT */ /* If we could not find the appropriate pin by looking at the ioapic * the i8259 probably is not connected the ioapic but give the * mptable a chance anyway. */ - i8259_pin = find_isa_irq_pin(0, mp_ExtINT); + i8259_pin = find_isa_irq_pin(0, mp_ExtINT); i8259_apic = find_isa_irq_apic(0, mp_ExtINT); /* Trust the MP table if nothing is setup in the hardware */ - if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { - printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); - ioapic_i8259.pin = i8259_pin; + if ( (ioapic_i8259.pin == -1) && (i8259_pin >= 0) ) + { + printk(KERN_WARNING + "ExtINT not setup in hardware but reported by MP table\n"); + ioapic_i8259.pin = i8259_pin; ioapic_i8259.apic = i8259_apic; } /* Complain if the MP table and the hardware disagree */ - if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && - (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) + if ( ((ioapic_i8259.apic != i8259_apic) || + (ioapic_i8259.pin != i8259_pin)) && + (i8259_pin >= 0) && (ioapic_i8259.pin >= 0) ) { printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); } @@ -1338,18 +1391,19 @@ void disable_IO_APIC(void) * Put that IOAPIC in virtual wire mode * so legacy interrupts can be delivered. */ - if (ioapic_i8259.pin != -1) { + if ( ioapic_i8259.pin != -1 ) + { struct IO_APIC_route_entry entry; memset(&entry, 0, sizeof(entry)); - entry.mask = 0; /* Enabled */ - entry.trigger = 0; /* Edge */ - entry.irr = 0; - entry.polarity = 0; /* High */ + entry.mask = 0; /* Enabled */ + entry.trigger = 0; /* Edge */ + entry.irr = 0; + entry.polarity = 0; /* High */ entry.delivery_status = 0; - entry.dest_mode = 0; /* Physical */ - entry.delivery_mode = dest_ExtINT; /* ExtInt */ - entry.vector = 0; + entry.dest_mode = 0; /* Physical */ + entry.delivery_mode = dest_ExtINT; /* ExtInt */ + entry.vector = 0; SET_DEST(entry, physical, get_apic_id()); /* @@ -1380,8 +1434,8 @@ static void __init setup_ioapic_ids_from_mpc(void) * Don't check I/O APIC IDs for xAPIC systems. They have * no meaning without the serial APIC bus. */ - if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) - || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) + if ( !(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) || + APIC_XAPIC(apic_version[boot_cpu_physical_apicid]) ) return; /* @@ -1393,19 +1447,22 @@ static void __init setup_ioapic_ids_from_mpc(void) /* * Set the IOAPIC ID to the value stored in the MPC table. */ - for (apic = 0; apic < nr_ioapics; apic++) { - if (!nr_ioapic_entries[apic]) + for ( apic = 0; apic < nr_ioapics; apic++ ) + { + if ( !nr_ioapic_entries[apic] ) continue; /* Read the register 0 value */ spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(apic, 0); spin_unlock_irqrestore(&ioapic_lock, flags); - + old_id = mp_ioapics[apic].mpc_apicid; - if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) { - printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", + if ( mp_ioapics[apic].mpc_apicid >= get_physical_broadcast() ) + { + printk(KERN_ERR + "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", apic, mp_ioapics[apic].mpc_apicid); printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", reg_00.bits.ID); @@ -1417,20 +1474,23 @@ static void __init setup_ioapic_ids_from_mpc(void) * system must have a unique ID or we get lots of nice * 'stuck on smp_invalidate_needed IPI wait' messages. */ - if (check_apicid_used(&phys_id_present_map, - mp_ioapics[apic].mpc_apicid)) { + if ( check_apicid_used(&phys_id_present_map, + mp_ioapics[apic].mpc_apicid) ) + { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", apic, mp_ioapics[apic].mpc_apicid); - for (i = 0; i < get_physical_broadcast(); i++) - if (!physid_isset(i, phys_id_present_map)) + for ( i = 0; i < get_physical_broadcast(); i++ ) + if ( !physid_isset(i, phys_id_present_map) ) break; - if (i >= get_physical_broadcast()) + if ( i >= get_physical_broadcast() ) panic("Max APIC ID exceeded\n"); - printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", - i); + printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", i); mp_ioapics[apic].mpc_apicid = i; - } else { - apic_printk(APIC_VERBOSE, "Setting %d in the " + } + else + { + apic_printk(APIC_VERBOSE, + "Setting %d in the " "phys_id_present_map\n", mp_ioapics[apic].mpc_apicid); } @@ -1440,18 +1500,17 @@ static void __init setup_ioapic_ids_from_mpc(void) * We need to adjust the IRQ routing table * if the ID changed. */ - if (old_id != mp_ioapics[apic].mpc_apicid) - for (i = 0; i < mp_irq_entries; i++) - if (mp_irqs[i].mpc_dstapic == old_id) - mp_irqs[i].mpc_dstapic - = mp_ioapics[apic].mpc_apicid; + if ( old_id != mp_ioapics[apic].mpc_apicid ) + for ( i = 0; i < mp_irq_entries; i++ ) + if ( mp_irqs[i].mpc_dstapic == old_id ) + mp_irqs[i].mpc_dstapic = mp_ioapics[apic].mpc_apicid; /* * Read the right value from the MPC table and * write it into the ID register. */ - apic_printk(APIC_VERBOSE, KERN_INFO - "...changing IO-APIC physical APIC ID to %d ...", + apic_printk(APIC_VERBOSE, + KERN_INFO "...changing IO-APIC physical APIC ID to %d ...", mp_ioapics[apic].mpc_apicid); reg_00.bits.ID = mp_ioapics[apic].mpc_apicid; @@ -1465,7 +1524,7 @@ static void __init setup_ioapic_ids_from_mpc(void) spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(apic, 0); spin_unlock_irqrestore(&ioapic_lock, flags); - if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid) + if ( reg_00.bits.ID != mp_ioapics[apic].mpc_apicid ) printk("could not set ID!\n"); else apic_printk(APIC_VERBOSE, " ok.\n"); @@ -1533,9 +1592,10 @@ static unsigned int startup_edge_ioapic_irq(struct irq_desc *desc) unsigned long flags; spin_lock_irqsave(&ioapic_lock, flags); - if (platform_legacy_irq(desc->irq)) { + if ( platform_legacy_irq(desc->irq) ) + { disable_8259A_irq(desc); - if (i8259A_irq_pending(desc->irq)) + if ( i8259A_irq_pending(desc->irq) ) was_pending = 1; } __unmask_IO_APIC_irq(desc->irq); @@ -1554,8 +1614,8 @@ static void ack_edge_ioapic_irq(struct irq_desc *desc) irq_complete_move(desc); move_native_irq(desc); - if ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) - == (IRQ_PENDING | IRQ_DISABLED)) + if ( (desc->status & (IRQ_PENDING | IRQ_DISABLED)) == + (IRQ_PENDING | IRQ_DISABLED) ) mask_IO_APIC_irq(desc); ack_APIC_irq(); } @@ -1607,23 +1667,25 @@ static bool io_apic_level_ack_pending(unsigned int irq) spin_lock_irqsave(&ioapic_lock, flags); entry = &irq_2_pin[irq]; - for (;;) { + for ( ;; ) + { unsigned int reg; int pin; - if (!entry) + if ( !entry ) break; pin = entry->pin; - if (pin == -1) + if ( pin == -1 ) continue; - reg = io_apic_read(entry->apic, 0x10 + pin*2); + reg = io_apic_read(entry->apic, 0x10 + pin * 2); /* Is the remote IRR bit set? */ - if (reg & IO_APIC_REDIR_REMOTE_IRR) { + if ( reg & IO_APIC_REDIR_REMOTE_IRR ) + { spin_unlock_irqrestore(&ioapic_lock, flags); return 1; } - if (!entry->next) + if ( !entry->next ) break; entry = irq_2_pin + entry->next; } @@ -1642,39 +1704,40 @@ static void mask_and_ack_level_ioapic_irq(struct irq_desc *desc) if ( !directed_eoi_enabled ) mask_IO_APIC_irq(desc); -/* - * It appears there is an erratum which affects at least version 0x11 - * of I/O APIC (that's the 82093AA and cores integrated into various - * chipsets). Under certain conditions a level-triggered interrupt is - * erroneously delivered as edge-triggered one but the respective IRR - * bit gets set nevertheless. As a result the I/O unit expects an EOI - * message but it will never arrive and further interrupts are blocked - * from the source. The exact reason is so far unknown, but the - * phenomenon was observed when two consecutive interrupt requests - * from a given source get delivered to the same CPU and the source is - * temporarily disabled in between. - * - * A workaround is to simulate an EOI message manually. We achieve it - * by setting the trigger mode to edge and then to level when the edge - * trigger mode gets detected in the TMR of a local APIC for a - * level-triggered interrupt. We mask the source for the time of the - * operation to prevent an edge-triggered interrupt escaping meanwhile. - * The idea is from Manfred Spraul. --macro - */ + /* + * It appears there is an erratum which affects at least version 0x11 + * of I/O APIC (that's the 82093AA and cores integrated into various + * chipsets). Under certain conditions a level-triggered interrupt is + * erroneously delivered as edge-triggered one but the respective IRR + * bit gets set nevertheless. As a result the I/O unit expects an EOI + * message but it will never arrive and further interrupts are blocked + * from the source. The exact reason is so far unknown, but the + * phenomenon was observed when two consecutive interrupt requests + * from a given source get delivered to the same CPU and the source is + * temporarily disabled in between. + * + * A workaround is to simulate an EOI message manually. We achieve it + * by setting the trigger mode to edge and then to level when the edge + * trigger mode gets detected in the TMR of a local APIC for a + * level-triggered interrupt. We mask the source for the time of the + * operation to prevent an edge-triggered interrupt escaping meanwhile. + * The idea is from Manfred Spraul. --macro + */ i = desc->arch.vector; v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); ack_APIC_irq(); - + if ( directed_eoi_enabled ) return; - if ((desc->status & IRQ_MOVE_PENDING) && - !io_apic_level_ack_pending(desc->irq)) + if ( (desc->status & IRQ_MOVE_PENDING) && + !io_apic_level_ack_pending(desc->irq) ) move_masked_irq(desc); - if ( !(v & (1 << (i & 0x1f))) ) { + if ( !(v & (1 << (i & 0x1f))) ) + { spin_lock(&ioapic_lock); __edge_IO_APIC_irq(desc->irq); __level_IO_APIC_irq(desc->irq); @@ -1686,7 +1749,7 @@ static void end_level_ioapic_irq_old(struct irq_desc *desc, u8 vector) { if ( directed_eoi_enabled ) { - if ( !(desc->status & (IRQ_DISABLED|IRQ_MOVE_PENDING)) ) + if ( !(desc->status & (IRQ_DISABLED | IRQ_MOVE_PENDING)) ) { eoi_IO_APIC_irq(desc); return; @@ -1705,25 +1768,25 @@ static void end_level_ioapic_irq_old(struct irq_desc *desc, u8 vector) static void end_level_ioapic_irq_new(struct irq_desc *desc, u8 vector) { -/* - * It appears there is an erratum which affects at least version 0x11 - * of I/O APIC (that's the 82093AA and cores integrated into various - * chipsets). Under certain conditions a level-triggered interrupt is - * erroneously delivered as edge-triggered one but the respective IRR - * bit gets set nevertheless. As a result the I/O unit expects an EOI - * message but it will never arrive and further interrupts are blocked - * from the source. The exact reason is so far unknown, but the - * phenomenon was observed when two consecutive interrupt requests - * from a given source get delivered to the same CPU and the source is - * temporarily disabled in between. - * - * A workaround is to simulate an EOI message manually. We achieve it - * by setting the trigger mode to edge and then to level when the edge - * trigger mode gets detected in the TMR of a local APIC for a - * level-triggered interrupt. We mask the source for the time of the - * operation to prevent an edge-triggered interrupt escaping meanwhile. - * The idea is from Manfred Spraul. --macro - */ + /* + * It appears there is an erratum which affects at least version 0x11 + * of I/O APIC (that's the 82093AA and cores integrated into various + * chipsets). Under certain conditions a level-triggered interrupt is + * erroneously delivered as edge-triggered one but the respective IRR + * bit gets set nevertheless. As a result the I/O unit expects an EOI + * message but it will never arrive and further interrupts are blocked + * from the source. The exact reason is so far unknown, but the + * phenomenon was observed when two consecutive interrupt requests + * from a given source get delivered to the same CPU and the source is + * temporarily disabled in between. + * + * A workaround is to simulate an EOI message manually. We achieve it + * by setting the trigger mode to edge and then to level when the edge + * trigger mode gets detected in the TMR of a local APIC for a + * level-triggered interrupt. We mask the source for the time of the + * operation to prevent an edge-triggered interrupt escaping meanwhile. + * The idea is from Manfred Spraul. --macro + */ unsigned int v, i = desc->arch.vector; /* Manually EOI the old vector if we are moving to the new */ @@ -1738,7 +1801,8 @@ static void end_level_ioapic_irq_new(struct irq_desc *desc, u8 vector) !io_apic_level_ack_pending(desc->irq) ) move_native_irq(desc); - if (!(v & (1 << (i & 0x1f)))) { + if ( !(v & (1 << (i & 0x1f))) ) + { spin_lock(&ioapic_lock); __mask_IO_APIC_irq(desc->irq); __edge_IO_APIC_irq(desc->irq); @@ -1758,32 +1822,32 @@ static void end_level_ioapic_irq_new(struct irq_desc *desc, u8 vector) * races. */ static hw_irq_controller ioapic_edge_type = { - .typename = "IO-APIC-edge", - .startup = startup_edge_ioapic_irq, - .shutdown = irq_shutdown_none, - .enable = unmask_IO_APIC_irq, - .disable = irq_disable_none, - .ack = ack_edge_ioapic_irq, - .set_affinity = set_ioapic_affinity_irq, + .typename = "IO-APIC-edge", + .startup = startup_edge_ioapic_irq, + .shutdown = irq_shutdown_none, + .enable = unmask_IO_APIC_irq, + .disable = irq_disable_none, + .ack = ack_edge_ioapic_irq, + .set_affinity = set_ioapic_affinity_irq, }; static struct hw_interrupt_type __read_mostly ioapic_level_type = { - .typename = "IO-APIC-level", - .startup = startup_level_ioapic_irq, - .shutdown = mask_IO_APIC_irq, - .enable = unmask_IO_APIC_irq, - .disable = mask_IO_APIC_irq, - .ack = mask_and_ack_level_ioapic_irq, - .end = end_level_ioapic_irq_old, - .set_affinity = set_ioapic_affinity_irq, + .typename = "IO-APIC-level", + .startup = startup_level_ioapic_irq, + .shutdown = mask_IO_APIC_irq, + .enable = unmask_IO_APIC_irq, + .disable = mask_IO_APIC_irq, + .ack = mask_and_ack_level_ioapic_irq, + .end = end_level_ioapic_irq_old, + .set_affinity = set_ioapic_affinity_irq, }; static inline void init_IO_APIC_traps(void) { int irq; /* Xen: This is way simpler than the Linux implementation. */ - for (irq = 0; platform_legacy_irq(irq); irq++) - if (IO_APIC_IRQ(irq) && !irq_to_vector(irq)) + for ( irq = 0; platform_legacy_irq(irq); irq++ ) + if ( IO_APIC_IRQ(irq) && !irq_to_vector(irq) ) make_8259A_irq(irq); } @@ -1809,12 +1873,12 @@ static void ack_lapic_irq(struct irq_desc *desc) } static hw_irq_controller lapic_irq_type = { - .typename = "local-APIC-edge", - .startup = NULL, /* startup_irq() not used for IRQ0 */ - .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */ - .enable = enable_lapic_irq, - .disable = disable_lapic_irq, - .ack = ack_lapic_irq, + .typename = "local-APIC-edge", + .startup = NULL, /* startup_irq() not used for IRQ0 */ + .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */ + .enable = enable_lapic_irq, + .disable = disable_lapic_irq, + .ack = ack_lapic_irq, }; /* @@ -1840,8 +1904,8 @@ static void __init unlock_ExtINT_logic(void) memset(&entry1, 0, sizeof(entry1)); - entry1.dest_mode = 0; /* physical delivery */ - entry1.mask = 0; /* unmask IRQ now */ + entry1.dest_mode = 0; /* physical delivery */ + entry1.mask = 0; /* unmask IRQ now */ SET_DEST(entry1, physical, get_apic_id()); entry1.delivery_mode = dest_ExtINT; entry1.polarity = entry0.polarity; @@ -1852,14 +1916,14 @@ static void __init unlock_ExtINT_logic(void) save_control = CMOS_READ(RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, - RTC_FREQ_SELECT); + CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, RTC_FREQ_SELECT); CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); i = 100; - while (i-- > 0) { + while ( i-- > 0 ) + { mdelay(10); - if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) + if ( (CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF ) i -= 10; } @@ -1893,9 +1957,10 @@ static void __init check_timer(void) clear_irq_vector(0); cpumask_setall(&mask_all); - if ((ret = bind_irq_vector(0, vector, &mask_all))) - printk(KERN_ERR"..IRQ0 is not set correctly with ioapic!!!, err:%d\n", ret); - + if ( (ret = bind_irq_vector(0, vector, &mask_all)) ) + printk(KERN_ERR "..IRQ0 is not set correctly with ioapic!!!, err:%d\n", + ret); + irq_desc[0].status &= ~IRQ_DISABLED; /* @@ -1911,39 +1976,44 @@ static void __init check_timer(void) /*timer_ack = 1;*/ /*enable_8259A_irq(irq_to_desc(0));*/ - pin1 = find_isa_irq_pin(0, mp_INT); + pin1 = find_isa_irq_pin(0, mp_INT); apic1 = find_isa_irq_apic(0, mp_INT); - pin2 = ioapic_i8259.pin; + pin2 = ioapic_i8259.pin; apic2 = ioapic_i8259.apic; - printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", + printk(KERN_INFO + "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", vector, apic1, pin1, apic2, pin2); - if (pin1 != -1) { + if ( pin1 != -1 ) + { /* * Ok, does IRQ0 through the IOAPIC work? */ unmask_IO_APIC_irq(irq_to_desc(0)); - if (timer_irq_works()) { + if ( timer_irq_works() ) + { local_irq_restore(flags); return; } clear_IO_APIC_pin(apic1, pin1); printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to " - "IO-APIC\n"); + "IO-APIC\n"); } printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... "); - if (pin2 != -1) { + if ( pin2 != -1 ) + { printk("\n..... (found pin %d) ...", pin2); /* * legacy devices should be connected to IO APIC #0 */ setup_ExtINT_IRQ0_pin(apic2, pin2, vector); - if (timer_irq_works()) { + if ( timer_irq_works() ) + { local_irq_restore(flags); printk("works.\n"); - if (pin1 != -1) + if ( pin1 != -1 ) replace_pin_at_irq(0, apic1, pin1, apic2, pin2); else add_pin_to_irq(0, apic2, pin2); @@ -1956,8 +2026,10 @@ static void __init check_timer(void) } printk(" failed.\n"); - if (nmi_watchdog == NMI_IO_APIC) { - printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n"); + if ( nmi_watchdog == NMI_IO_APIC ) + { + printk(KERN_WARNING "timer doesn't work through the IO-APIC - " + "disabling NMI Watchdog!\n"); nmi_watchdog = 0; } @@ -1965,10 +2037,11 @@ static void __init check_timer(void) disable_8259A_irq(irq_to_desc(0)); irq_desc[0].handler = &lapic_irq_type; - apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ + apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ enable_8259A_irq(irq_to_desc(0)); - if (timer_irq_works()) { + if ( timer_irq_works() ) + { local_irq_restore(flags); printk(" works.\n"); return; @@ -1987,7 +2060,8 @@ static void __init check_timer(void) local_irq_restore(flags); - if (timer_irq_works()) { + if ( timer_irq_works() ) + { printk(" works.\n"); return; } @@ -2003,7 +2077,7 @@ static void __init check_timer(void) * Linux doesn't really care, as it's not actually used * for any interrupt handling anyway. */ -#define PIC_IRQS (1 << PIC_CASCADE_IR) +#define PIC_IRQS (1 << PIC_CASCADE_IR) static struct IO_APIC_route_entry *ioapic_pm_state; @@ -2011,10 +2085,10 @@ static void __init ioapic_pm_state_alloc(void) { int i, nr_entry = 0; - for (i = 0; i < nr_ioapics; i++) + for ( i = 0; i < nr_ioapics; i++ ) nr_entry += nr_ioapic_entries[i]; - ioapic_pm_state = _xmalloc(sizeof(struct IO_APIC_route_entry)*nr_entry, + ioapic_pm_state = _xmalloc(sizeof(struct IO_APIC_route_entry) * nr_entry, sizeof(struct IO_APIC_route_entry)); BUG_ON(ioapic_pm_state == NULL); } @@ -2023,15 +2097,16 @@ void __init setup_IO_APIC(void) { enable_IO_APIC(); - if (acpi_ioapic) - io_apic_irqs = ~0; /* all IRQs go through IOAPIC */ + if ( acpi_ioapic ) + io_apic_irqs = ~0; /* all IRQs go through IOAPIC */ else io_apic_irqs = ~PIC_IRQS; printk("ENABLING IO-APIC IRQs\n"); printk(" -> Using %s ACK method\n", ioapic_ack_new ? "new" : "old"); - if (ioapic_ack_new) { + if ( ioapic_ack_new ) + { ioapic_level_type.ack = irq_complete_move; ioapic_level_type.end = end_level_ioapic_irq_new; } @@ -2039,7 +2114,7 @@ void __init setup_IO_APIC(void) /* * Set up IO-APIC IRQ routing. */ - if (!acpi_ioapic) + if ( !acpi_ioapic ) setup_ioapic_ids_from_mpc(); sync_Arb_IDs(); setup_IO_APIC_irqs(); @@ -2058,8 +2133,10 @@ void ioapic_suspend(void) int apic, i; spin_lock_irqsave(&ioapic_lock, flags); - for (apic = 0; apic < nr_ioapics; apic++) { - for (i = 0; i < nr_ioapic_entries[apic]; i ++, entry ++ ) { + for ( apic = 0; apic < nr_ioapics; apic++ ) + { + for ( i = 0; i < nr_ioapic_entries[apic]; i++, entry++ ) + { *(((int *)entry) + 1) = __io_apic_read(apic, 0x11 + 2 * i); *(((int *)entry) + 0) = __io_apic_read(apic, 0x10 + 2 * i); } @@ -2075,17 +2152,20 @@ void ioapic_resume(void) int i, apic; spin_lock_irqsave(&ioapic_lock, flags); - for (apic = 0; apic < nr_ioapics; apic++){ - if (!nr_ioapic_entries[apic]) + for ( apic = 0; apic < nr_ioapics; apic++ ) + { + if ( !nr_ioapic_entries[apic] ) continue; reg_00.raw = __io_apic_read(apic, 0); - if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid) { + if ( reg_00.bits.ID != mp_ioapics[apic].mpc_apicid ) + { reg_00.bits.ID = mp_ioapics[apic].mpc_apicid; __io_apic_write(apic, 0, reg_00.raw); } - for (i = 0; i < nr_ioapic_entries[apic]; i++, entry++) { - __io_apic_write(apic, 0x11+2*i, *(((int *)entry)+1)); - __io_apic_write(apic, 0x10+2*i, *(((int *)entry)+0)); + for ( i = 0; i < nr_ioapic_entries[apic]; i++, entry++ ) + { + __io_apic_write(apic, 0x11 + 2 * i, *(((int *)entry) + 1)); + __io_apic_write(apic, 0x10 + 2 * i, *(((int *)entry) + 0)); } } spin_unlock_irqrestore(&ioapic_lock, flags); @@ -2095,8 +2175,7 @@ void ioapic_resume(void) ACPI-based IOAPIC Configuration -------------------------------------------------------------------------- */ - -int __init io_apic_get_unique_id (int ioapic, int apic_id) +int __init io_apic_get_unique_id(int ioapic, int apic_id) { union IO_APIC_reg_00 reg_00; static physid_mask_t __initdata apic_id_map = PHYSID_MASK_NONE; @@ -2104,50 +2183,55 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id) int i = 0; /* - * The P4 platform supports up to 256 APIC IDs on two separate APIC - * buses (one for LAPICs, one for IOAPICs), where predecessors only + * The P4 platform supports up to 256 APIC IDs on two separate APIC + * buses (one for LAPICs, one for IOAPICs), where predecessors only * supports up to 16 on one shared APIC bus. - * + * * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full * advantage of new APIC bus architecture. */ - if (physids_empty(apic_id_map)) + if ( physids_empty(apic_id_map) ) ioapic_phys_id_map(&apic_id_map); spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic, 0); spin_unlock_irqrestore(&ioapic_lock, flags); - if (apic_id >= get_physical_broadcast()) { + if ( apic_id >= get_physical_broadcast() ) + { printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " - "%d\n", ioapic, apic_id, reg_00.bits.ID); + "%d\n", + ioapic, apic_id, reg_00.bits.ID); apic_id = reg_00.bits.ID; } /* - * Every APIC in a system must have a unique ID or we get lots of nice + * Every APIC in a system must have a unique ID or we get lots of nice * 'stuck on smp_invalidate_needed IPI wait' messages. */ - if (check_apicid_used(&apic_id_map, apic_id)) { - - for (i = 0; i < get_physical_broadcast(); i++) { - if (!check_apicid_used(&apic_id_map, i)) + if ( check_apicid_used(&apic_id_map, apic_id) ) + { + for ( i = 0; i < get_physical_broadcast(); i++ ) + { + if ( !check_apicid_used(&apic_id_map, i) ) break; } - if (i == get_physical_broadcast()) + if ( i == get_physical_broadcast() ) panic("Max apic_id exceeded\n"); printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " - "trying %d\n", ioapic, apic_id, i); + "trying %d\n", + ioapic, apic_id, i); apic_id = i; - } + } set_apicid(apic_id, &apic_id_map); - if (reg_00.bits.ID != apic_id) { + if ( reg_00.bits.ID != apic_id ) + { reg_00.bits.ID = apic_id; spin_lock_irqsave(&ioapic_lock, flags); @@ -2156,22 +2240,22 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id) spin_unlock_irqrestore(&ioapic_lock, flags); /* Sanity check */ - if (reg_00.bits.ID != apic_id) { + if ( reg_00.bits.ID != apic_id ) + { printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); return -1; } } - apic_printk(APIC_VERBOSE, KERN_INFO - "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); + apic_printk(APIC_VERBOSE, KERN_INFO "IOAPIC[%d]: Assigned apic_id %d\n", + ioapic, apic_id); return apic_id; } - -int __init io_apic_get_version (int ioapic) +int __init io_apic_get_version(int ioapic) { - union IO_APIC_reg_01 reg_01; + union IO_APIC_reg_01 reg_01; unsigned long flags; spin_lock_irqsave(&ioapic_lock, flags); @@ -2181,10 +2265,9 @@ int __init io_apic_get_version (int ioapic) return reg_01.bits.version; } - -int __init io_apic_get_redir_entries (int ioapic) +int __init io_apic_get_redir_entries(int ioapic) { - union IO_APIC_reg_01 reg_01; + union IO_APIC_reg_01 reg_01; unsigned long flags; spin_lock_irqsave(&ioapic_lock, flags); @@ -2194,8 +2277,8 @@ int __init io_apic_get_redir_entries (int ioapic) return reg_01.bits.entries; } - -int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low) +int io_apic_set_pci_routing(int ioapic, int pin, int irq, int edge_level, + int active_high_low) { struct irq_desc *desc = irq_to_desc(irq); struct IO_APIC_route_entry entry; @@ -2203,9 +2286,10 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a unsigned long flags; int vector; - if (!IO_APIC_IRQ(irq)) { - printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ %d\n", - ioapic, irq); + if ( !IO_APIC_IRQ(irq) ) + { + printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ %d\n", ioapic, + irq); return -EINVAL; } @@ -2215,39 +2299,40 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a * corresponding device driver registers for this IRQ. */ - memset(&entry,0,sizeof(entry)); + memset(&entry, 0, sizeof(entry)); entry.delivery_mode = INT_DELIVERY_MODE; entry.dest_mode = INT_DEST_MODE; entry.trigger = edge_level; entry.polarity = active_high_low; - entry.mask = 1; + entry.mask = 1; /* * IRQs < 16 are already in the irq_2_pin[] map */ - if (!platform_legacy_irq(irq)) + if ( !platform_legacy_irq(irq) ) add_pin_to_irq(irq, ioapic, pin); vector = assign_irq_vector(irq, NULL); - if (vector < 0) + if ( vector < 0 ) return vector; entry.vector = vector; cpumask_copy(&mask, TARGET_CPUS); /* Don't chance ending up with an empty mask. */ - if (cpumask_intersects(&mask, desc->arch.cpu_mask)) + if ( cpumask_intersects(&mask, desc->arch.cpu_mask) ) cpumask_and(&mask, &mask, desc->arch.cpu_mask); SET_DEST(entry, logical, cpu_mask_to_apicid(&mask)); - apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry " - "(%d-%d -> %#x -> IRQ %d Mode:%i Active:%i)\n", ioapic, - mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq, - edge_level, active_high_low); + apic_printk(APIC_DEBUG, + KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry " + "(%d-%d -> %#x -> IRQ %d Mode:%i Active:%i)\n", + ioapic, mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq, + edge_level, active_high_low); ioapic_register_intr(irq, edge_level); - if (!ioapic && platform_legacy_irq(irq)) + if ( !ioapic && platform_legacy_irq(irq) ) disable_8259A_irq(desc); spin_lock_irqsave(&ioapic_lock, flags); @@ -2256,7 +2341,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a spin_unlock(&ioapic_lock); spin_lock(&desc->lock); - if (!(desc->status & (IRQ_DISABLED | IRQ_GUEST))) + if ( !(desc->status & (IRQ_DISABLED | IRQ_GUEST)) ) desc->handler->startup(desc); spin_unlock_irqrestore(&desc->lock, flags); @@ -2280,13 +2365,12 @@ static int apic_pin_2_gsi_irq(int apic, int pin) { int idx; - if (apic < 0) - return -EINVAL; + if ( apic < 0 ) + return -EINVAL; idx = find_irq_entry(apic, pin, mp_INT); - return idx >= 0 ? pin_2_irq(idx, apic, pin) - : io_apic_gsi_base(apic) + pin; + return idx >= 0 ? pin_2_irq(idx, apic, pin) : io_apic_gsi_base(apic) + pin; } int ioapic_guest_read(unsigned long physbase, unsigned int reg, u32 *pval) @@ -2304,16 +2388,16 @@ int ioapic_guest_read(unsigned long physbase, unsigned int reg, u32 *pval) return 0; } -#define WARN_BOGUS_WRITE(f, a...) \ - dprintk(XENLOG_INFO, "IO-APIC: apic=%d, pin=%d, irq=%d\n" \ - XENLOG_INFO "IO-APIC: new_entry=%08x\n" \ - XENLOG_INFO "IO-APIC: " f "\n", \ - apic, pin, irq, *(u32 *)&rte, ##a ) +#define WARN_BOGUS_WRITE(f, a...) \ + dprintk(XENLOG_INFO, \ + "IO-APIC: apic=%d, pin=%d, irq=%d\n" XENLOG_INFO \ + "IO-APIC: new_entry=%08x\n" XENLOG_INFO "IO-APIC: " f "\n", \ + apic, pin, irq, *(u32 *)&rte, ##a) int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val) { int apic, pin, irq, ret, pirq; - struct IO_APIC_route_entry rte = { 0 }; + struct IO_APIC_route_entry rte = {0}; unsigned long flags; struct irq_desc *desc; @@ -2323,7 +2407,7 @@ int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val) /* Only write to the first half of a route entry. */ if ( (reg < 0x10) || (reg & 1) ) return 0; - + pin = (reg - 0x10) >> 1; /* Write first half from guest; second half is target info. */ @@ -2349,7 +2433,7 @@ int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val) * Apply genapic conventions for this platform. */ rte.delivery_mode = INT_DELIVERY_MODE; - rte.dest_mode = INT_DEST_MODE; + rte.dest_mode = INT_DEST_MODE; irq = apic_pin_2_gsi_irq(apic, pin); if ( irq < 0 ) @@ -2371,17 +2455,18 @@ int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val) } else pirq = -1; - + if ( desc->action ) { spin_lock_irqsave(&ioapic_lock, flags); ret = io_apic_read(apic, 0x10 + 2 * pin); spin_unlock_irqrestore(&ioapic_lock, flags); rte.vector = desc->arch.vector; - if ( *(u32*)&rte != ret ) - WARN_BOGUS_WRITE("old_entry=%08x pirq=%d\n" XENLOG_INFO - "IO-APIC: Attempt to modify IO-APIC pin for in-use IRQ!", - ret, pirq); + if ( *(u32 *)&rte != ret ) + WARN_BOGUS_WRITE( + "old_entry=%08x pirq=%d\n" XENLOG_INFO + "IO-APIC: Attempt to modify IO-APIC pin for in-use IRQ!", + ret, pirq); return 0; } @@ -2408,8 +2493,8 @@ int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val) if ( pirq >= 0 ) { spin_lock(&hardware_domain->event_lock); - ret = map_domain_pirq(hardware_domain, pirq, irq, - MAP_PIRQ_TYPE_GSI, NULL); + ret = map_domain_pirq(hardware_domain, pirq, irq, MAP_PIRQ_TYPE_GSI, + NULL); spin_unlock(&hardware_domain->event_lock); if ( ret < 0 ) return ret; @@ -2417,8 +2502,7 @@ int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val) spin_lock_irqsave(&ioapic_lock, flags); /* Set the correct irq-handling type. */ - desc->handler = rte.trigger ? - &ioapic_level_type: &ioapic_edge_type; + desc->handler = rte.trigger ? &ioapic_level_type : &ioapic_edge_type; /* Mask iff level triggered. */ rte.mask = rte.trigger; @@ -2428,26 +2512,34 @@ int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val) SET_DEST(rte, logical, cpu_mask_to_apicid(desc->arch.cpu_mask)); __ioapic_write_entry(apic, pin, 0, rte); - + spin_unlock_irqrestore(&ioapic_lock, flags); return 0; } -static const char * delivery_mode_2_str( - const enum ioapic_irq_destination_types mode) +static const char * +delivery_mode_2_str(const enum ioapic_irq_destination_types mode) { - switch ( mode ) + switch (mode) { - case dest_Fixed: return "Fixed"; - case dest_LowestPrio: return "LoPri"; - case dest_SMI: return "SMI"; - case dest_NMI: return "NMI"; - case dest_INIT: return "INIT"; - case dest_ExtINT: return "ExINT"; + case dest_Fixed: + return "Fixed"; + case dest_LowestPrio: + return "LoPri"; + case dest_SMI: + return "SMI"; + case dest_NMI: + return "NMI"; + case dest_INIT: + return "INIT"; + case dest_ExtINT: + return "ExINT"; case dest__reserved_1: - case dest__reserved_2: return "Resvd"; - default: return "INVAL"; + case dest__reserved_2: + return "Resvd"; + default: + return "INVAL"; } } @@ -2474,7 +2566,7 @@ void dump_ioapic_irq_info(void) printk(" IRQ%3d Vec%3d:\n", irq, irq_to_vector(irq)); - for ( ; ; ) + for ( ;; ) { pin = entry->pin; @@ -2485,9 +2577,8 @@ void dump_ioapic_irq_info(void) printk("vec=%02x delivery=%-5s dest=%c status=%d " "polarity=%d irr=%d trig=%c mask=%d dest_id:%d\n", rte.vector, delivery_mode_2_str(rte.delivery_mode), - rte.dest_mode ? 'L' : 'P', - rte.delivery_status, rte.polarity, rte.irr, - rte.trigger ? 'L' : 'E', rte.mask, + rte.dest_mode ? 'L' : 'P', rte.delivery_status, rte.polarity, + rte.irr, rte.trigger ? 'L' : 'E', rte.mask, rte.dest.logical.logical_dest); if ( entry->next == 0 ) @@ -2502,13 +2593,14 @@ integer_param("max_gsi_irqs", max_gsi_irqs); static __init bool bad_ioapic_register(unsigned int idx) { - union IO_APIC_reg_00 reg_00 = { .raw = io_apic_read(idx, 0) }; - union IO_APIC_reg_01 reg_01 = { .raw = io_apic_read(idx, 1) }; - union IO_APIC_reg_02 reg_02 = { .raw = io_apic_read(idx, 2) }; + union IO_APIC_reg_00 reg_00 = {.raw = io_apic_read(idx, 0)}; + union IO_APIC_reg_01 reg_01 = {.raw = io_apic_read(idx, 1)}; + union IO_APIC_reg_02 reg_02 = {.raw = io_apic_read(idx, 2)}; if ( reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1 ) { - printk(KERN_WARNING "I/O APIC %#x registers return all ones, skipping!\n", + printk(KERN_WARNING + "I/O APIC %#x registers return all ones, skipping!\n", mp_ioapics[idx].mpc_apicaddr); return 1; } @@ -2531,7 +2623,8 @@ void __init init_ioapic_mappings(void) ioapic_phys = mp_ioapics[i].mpc_apicaddr; if ( !ioapic_phys ) { - printk(KERN_ERR "WARNING: bogus zero IO-APIC address " + printk(KERN_ERR + "WARNING: bogus zero IO-APIC address " "found in MPTABLE, disabling IO/APIC support!\n"); smp_found_config = false; skip_ioapic_setup = true; @@ -2540,7 +2633,7 @@ void __init init_ioapic_mappings(void) } else { - fake_ioapic_page: + fake_ioapic_page: ioapic_phys = __pa(alloc_xenheap_page()); clear_page(__va(ioapic_phys)); } @@ -2596,14 +2689,14 @@ void __init init_ioapic_mappings(void) } if ( nr_irqs == 0 ) - nr_irqs = cpu_has_apic ? - max(16U + num_present_cpus() * NR_DYNAMIC_VECTORS, - 8 * nr_irqs_gsi) : - nr_irqs_gsi; + nr_irqs = cpu_has_apic + ? max(16U + num_present_cpus() * NR_DYNAMIC_VECTORS, + 8 * nr_irqs_gsi) + : nr_irqs_gsi; else if ( nr_irqs < 16 ) nr_irqs = 16; - printk(XENLOG_INFO "IRQ limits: %u GSI, %u MSI/MSI-X\n", - nr_irqs_gsi, nr_irqs - nr_irqs_gsi); + printk(XENLOG_INFO "IRQ limits: %u GSI, %u MSI/MSI-X\n", nr_irqs_gsi, + nr_irqs - nr_irqs_gsi); } unsigned int arch_hwdom_irqs(domid_t domid) @@ -2615,7 +2708,7 @@ unsigned int arch_hwdom_irqs(domid_t domid) n = min(nr_irqs_gsi + n * NR_DYNAMIC_VECTORS, nr_irqs); /* Bounded by the domain pirq eoi bitmap gfn. */ - n = min_t(unsigned int, n, PAGE_SIZE * BITS_PER_BYTE); + n = min_t(unsigned int, n, PAGE_SIZE *BITS_PER_BYTE); printk("Dom%d has maximum %u PIRQs\n", domid, n); diff --git a/xen/arch/x86/ioport_emulate.c b/xen/arch/x86/ioport_emulate.c index c2aded7668..f0065d997d 100644 --- a/xen/arch/x86/ioport_emulate.c +++ b/xen/arch/x86/ioport_emulate.c @@ -8,8 +8,8 @@ #include #include -static bool ioemul_handle_proliant_quirk( - u8 opcode, char *io_emul_stub, struct cpu_user_regs *regs) +static bool ioemul_handle_proliant_quirk(u8 opcode, char *io_emul_stub, + struct cpu_user_regs *regs) { static const char stub[] = { 0x9c, /* pushf */ @@ -48,69 +48,76 @@ static struct dmi_system_id __initdata ioport_quirks_tbl[] = { { .callback = proliant_quirk, .ident = "HP ProLiant DL3xx", - .matches = { - DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL3"), - }, + .matches = + { + DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL3"), + }, }, { .callback = proliant_quirk, .ident = "HP ProLiant DL5xx", - .matches = { - DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL5"), - }, + .matches = + { + DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL5"), + }, }, { .callback = proliant_quirk, .ident = "HP ProLiant DL7xx", - .matches = { - DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL7"), - }, + .matches = + { + DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL7"), + }, }, { .callback = proliant_quirk, .ident = "HP ProLiant ML3xx", - .matches = { - DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant ML3"), - }, + .matches = + { + DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant ML3"), + }, }, { .callback = proliant_quirk, .ident = "HP ProLiant ML5xx", - .matches = { - DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant ML5"), - }, + .matches = + { + DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant ML5"), + }, }, { .callback = proliant_quirk, .ident = "HP ProLiant BL2xx", - .matches = { - DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL2"), - }, + .matches = + { + DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL2"), + }, }, { .callback = proliant_quirk, .ident = "HP ProLiant BL4xx", - .matches = { - DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL4"), - }, + .matches = + { + DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL4"), + }, }, { .callback = proliant_quirk, .ident = "HP ProLiant BL6xx", - .matches = { - DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL6"), - }, + .matches = + { + DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL6"), + }, }, - { } -}; + {}}; static int __init ioport_quirks_init(void) { diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c index 23b4f423e6..71e2190bec 100644 --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -1,6 +1,6 @@ /****************************************************************************** * arch/x86/irq.c - * + * * Portions of this file are: * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar */ @@ -101,8 +101,9 @@ void unlock_vector_lock(void) static void trace_irq_mask(u32 event, int irq, int vector, cpumask_t *mask) { - struct { - unsigned int irq:16, vec:16; + struct + { + unsigned int irq : 16, vec : 16; unsigned int mask[6]; } d; d.irq = irq; @@ -112,7 +113,8 @@ static void trace_irq_mask(u32 event, int irq, int vector, cpumask_t *mask) trace_var(event, 1, sizeof(d), &d); } -static int __init __bind_irq_vector(int irq, int vector, const cpumask_t *cpu_mask) +static int __init __bind_irq_vector(int irq, int vector, + const cpumask_t *cpu_mask) { cpumask_t online_mask; int cpu; @@ -122,7 +124,7 @@ static int __init __bind_irq_vector(int irq, int vector, const cpumask_t *cpu_ma BUG_ON((unsigned)vector >= NR_VECTORS); cpumask_and(&online_mask, cpu_mask, &cpu_online_map); - if (cpumask_empty(&online_mask)) + if ( cpumask_empty(&online_mask) ) return -EINVAL; if ( (desc->arch.vector == vector) && cpumask_equal(desc->arch.cpu_mask, &online_mask) ) @@ -130,7 +132,7 @@ static int __init __bind_irq_vector(int irq, int vector, const cpumask_t *cpu_ma if ( desc->arch.vector != IRQ_VECTOR_UNASSIGNED ) return -EBUSY; trace_irq_mask(TRC_HW_IRQ_BIND_VECTOR, irq, vector, &online_mask); - for_each_cpu(cpu, &online_mask) + for_each_cpu (cpu, &online_mask) per_cpu(vector_irq, cpu)[vector] = irq; desc->arch.vector = vector; cpumask_copy(desc->arch.cpu_mask, &online_mask); @@ -162,30 +164,30 @@ int create_irq(nodeid_t node) int irq, ret; struct irq_desc *desc; - for (irq = nr_irqs_gsi; irq < nr_irqs; irq++) + for ( irq = nr_irqs_gsi; irq < nr_irqs; irq++ ) { desc = irq_to_desc(irq); - if (cmpxchg(&desc->arch.used, IRQ_UNUSED, IRQ_RESERVED) == IRQ_UNUSED) - break; + if ( cmpxchg(&desc->arch.used, IRQ_UNUSED, IRQ_RESERVED) == IRQ_UNUSED ) + break; } - if (irq >= nr_irqs) - return -ENOSPC; + if ( irq >= nr_irqs ) + return -ENOSPC; ret = init_one_irq_desc(desc); - if (!ret) + if ( !ret ) { cpumask_t *mask = NULL; if ( node != NUMA_NO_NODE ) { mask = &node_to_cpumask(node); - if (cpumask_empty(mask)) + if ( cpumask_empty(mask) ) mask = NULL; } ret = assign_irq_vector(irq, mask); } - if (ret < 0) + if ( ret < 0 ) { desc->arch.used = IRQ_UNUSED; irq = ret; @@ -221,17 +223,19 @@ void destroy_irq(unsigned int irq) } spin_lock_irqsave(&desc->lock, flags); - desc->status &= ~IRQ_GUEST; + desc->status &= ~IRQ_GUEST; desc->handler->shutdown(desc); desc->status |= IRQ_DISABLED; action = desc->action; - desc->action = NULL; + desc->action = NULL; desc->msi_desc = NULL; cpumask_setall(desc->affinity); spin_unlock_irqrestore(&desc->lock, flags); /* Wait to make sure it's not being used on another CPU */ - do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS ); + do { + smp_mb(); + } while ( desc->status & IRQ_INPROGRESS ); spin_lock_irqsave(&desc->lock, flags); desc->handler = &no_irq_type; @@ -254,8 +258,9 @@ static void __clear_irq_vector(int irq) vector = desc->arch.vector; cpumask_and(&tmp_mask, desc->arch.cpu_mask, &cpu_online_map); - for_each_cpu(cpu, &tmp_mask) { - ASSERT( per_cpu(vector_irq, cpu)[vector] == irq ); + for_each_cpu (cpu, &tmp_mask) + { + ASSERT(per_cpu(vector_irq, cpu)[vector] == irq); per_cpu(vector_irq, cpu)[vector] = ~irq; } @@ -279,8 +284,9 @@ static void __clear_irq_vector(int irq) old_vector = desc->arch.old_vector; cpumask_and(&tmp_mask, desc->arch.old_cpu_mask, &cpu_online_map); - for_each_cpu(cpu, &tmp_mask) { - ASSERT( per_cpu(vector_irq, cpu)[old_vector] == irq ); + for_each_cpu (cpu, &tmp_mask) + { + ASSERT(per_cpu(vector_irq, cpu)[old_vector] == irq); TRACE_3D(TRC_HW_IRQ_MOVE_FINISH, irq, old_vector, cpu); per_cpu(vector_irq, cpu)[old_vector] = ~irq; } @@ -312,13 +318,13 @@ int irq_to_vector(int irq) BUG_ON(irq >= nr_irqs || irq < 0); - if (IO_APIC_IRQ(irq)) + if ( IO_APIC_IRQ(irq) ) { vector = irq_to_desc(irq)->arch.vector; - if (vector >= FIRST_LEGACY_VECTOR && vector <= LAST_LEGACY_VECTOR) + if ( vector >= FIRST_LEGACY_VECTOR && vector <= LAST_LEGACY_VECTOR ) vector = 0; } - else if (MSI_IRQ(irq)) + else if ( MSI_IRQ(irq) ) vector = irq_to_desc(irq)->arch.vector; else vector = LEGACY_VECTOR(irq); @@ -359,7 +365,7 @@ int __init init_irq_data(void) this_cpu(vector_irq)[vector] = INT_MIN; irq_desc = xzalloc_array(struct irq_desc, nr_irqs); - + if ( !irq_desc ) return -ENOMEM; @@ -377,7 +383,7 @@ int __init init_irq_data(void) set_bit(LEGACY_SYSCALL_VECTOR, used_vectors); set_bit(HYPERCALL_VECTOR, used_vectors); #endif - + /* IRQ_MOVE_CLEANUP_VECTOR used for clean up vectors */ set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); @@ -392,12 +398,8 @@ static void ack_none(struct irq_desc *desc) } hw_irq_controller no_irq_type = { - "none", - irq_startup_none, - irq_shutdown_none, - irq_enable_none, - irq_disable_none, - ack_none, + "none", irq_startup_none, irq_shutdown_none, + irq_enable_none, irq_disable_none, ack_none, }; static vmask_t *irq_get_used_vector_mask(int irq) @@ -416,13 +418,13 @@ static vmask_t *irq_get_used_vector_mask(int irq) else { int vector; - + vector = irq_to_vector(irq); if ( vector > 0 ) { - printk(XENLOG_INFO "IRQ %d already assigned vector %d\n", - irq, vector); - + printk(XENLOG_INFO "IRQ %d already assigned vector %d\n", irq, + vector); + ASSERT(!test_bit(vector, ret)); set_bit(vector, ret); @@ -438,8 +440,8 @@ static vmask_t *irq_get_used_vector_mask(int irq) return ret; } -static int __assign_irq_vector( - int irq, struct irq_desc *desc, const cpumask_t *mask) +static int __assign_irq_vector(int irq, struct irq_desc *desc, + const cpumask_t *mask) { /* * NOTE! The local APIC isn't very good at handling @@ -458,9 +460,11 @@ static int __assign_irq_vector( vmask_t *irq_used_vectors = NULL; old_vector = irq_to_vector(irq); - if (old_vector > 0) { + if ( old_vector > 0 ) + { cpumask_and(&tmp_mask, mask, &cpu_online_map); - if (cpumask_intersects(&tmp_mask, desc->arch.cpu_mask)) { + if ( cpumask_intersects(&tmp_mask, desc->arch.cpu_mask) ) + { desc->arch.vector = old_vector; return 0; } @@ -481,56 +485,57 @@ static int __assign_irq_vector( else irq_used_vectors = irq_get_used_vector_mask(irq); - for_each_cpu(cpu, mask) { + for_each_cpu (cpu, mask) + { int new_cpu; int vector, offset; /* Only try and allocate irqs on cpus that are present. */ - if (!cpu_online(cpu)) + if ( !cpu_online(cpu) ) continue; - cpumask_and(&tmp_mask, vector_allocation_cpumask(cpu), - &cpu_online_map); + cpumask_and(&tmp_mask, vector_allocation_cpumask(cpu), &cpu_online_map); vector = current_vector; offset = current_offset; -next: + next: vector += 8; - if (vector > LAST_DYNAMIC_VECTOR) { + if ( vector > LAST_DYNAMIC_VECTOR ) + { /* If out of vectors on large boxen, must share them. */ offset = (offset + 1) % 8; vector = FIRST_DYNAMIC_VECTOR + offset; } - if (unlikely(current_vector == vector)) + if ( unlikely(current_vector == vector) ) continue; - if (test_bit(vector, used_vectors)) + if ( test_bit(vector, used_vectors) ) goto next; - if (irq_used_vectors - && test_bit(vector, irq_used_vectors) ) + if ( irq_used_vectors && test_bit(vector, irq_used_vectors) ) goto next; - for_each_cpu(new_cpu, &tmp_mask) - if (per_cpu(vector_irq, new_cpu)[vector] >= 0) + for_each_cpu (new_cpu, &tmp_mask) + if ( per_cpu(vector_irq, new_cpu)[vector] >= 0 ) goto next; /* Found one! */ current_vector = vector; current_offset = offset; - if (old_vector > 0) { + if ( old_vector > 0 ) + { desc->arch.move_in_progress = 1; cpumask_copy(desc->arch.old_cpu_mask, desc->arch.cpu_mask); desc->arch.old_vector = desc->arch.vector; } trace_irq_mask(TRC_HW_IRQ_ASSIGN_VECTOR, irq, vector, &tmp_mask); - for_each_cpu(new_cpu, &tmp_mask) + for_each_cpu (new_cpu, &tmp_mask) per_cpu(vector_irq, new_cpu)[vector] = irq; desc->arch.vector = vector; cpumask_copy(desc->arch.cpu_mask, &tmp_mask); desc->arch.used = IRQ_USED; - ASSERT((desc->arch.used_vectors == NULL) - || (desc->arch.used_vectors == irq_used_vectors)); + ASSERT((desc->arch.used_vectors == NULL) || + (desc->arch.used_vectors == irq_used_vectors)); desc->arch.used_vectors = irq_used_vectors; if ( desc->arch.used_vectors ) @@ -551,12 +556,13 @@ int assign_irq_vector(int irq, const cpumask_t *mask) int ret; unsigned long flags; struct irq_desc *desc = irq_to_desc(irq); - - BUG_ON(irq >= nr_irqs || irq <0); + + BUG_ON(irq >= nr_irqs || irq < 0); spin_lock_irqsave(&vector_lock, flags); ret = __assign_irq_vector(irq, desc, mask ?: TARGET_CPUS); - if (!ret) { + if ( !ret ) + { ret = desc->arch.vector; cpumask_copy(desc->affinity, desc->arch.cpu_mask); } @@ -596,22 +602,22 @@ void move_masked_irq(struct irq_desc *desc) { cpumask_t *pending_mask = desc->arch.pending_mask; - if (likely(!(desc->status & IRQ_MOVE_PENDING))) + if ( likely(!(desc->status & IRQ_MOVE_PENDING)) ) return; - + desc->status &= ~IRQ_MOVE_PENDING; - if (unlikely(cpumask_empty(pending_mask))) + if ( unlikely(cpumask_empty(pending_mask)) ) return; - if (!desc->handler->set_affinity) + if ( !desc->handler->set_affinity ) return; /* - * If there was a valid mask to work with, please do the disable, - * re-program, enable sequence. This is *not* particularly important for - * level triggered but in a edge trigger case, we might be setting rte when - * an active trigger is comming in. This could cause some ioapics to + * If there was a valid mask to work with, please do the disable, + * re-program, enable sequence. This is *not* particularly important for + * level triggered but in a edge trigger case, we might be setting rte when + * an active trigger is comming in. This could cause some ioapics to * mal-function. Being paranoid i guess! * * For correct operation this depends on the caller masking the irqs. @@ -624,10 +630,10 @@ void move_masked_irq(struct irq_desc *desc) void move_native_irq(struct irq_desc *desc) { - if (likely(!(desc->status & IRQ_MOVE_PENDING))) + if ( likely(!(desc->status & IRQ_MOVE_PENDING)) ) return; - if (unlikely(desc->status & IRQ_DISABLED)) + if ( unlikely(desc->status & IRQ_DISABLED) ) return; desc->handler->disable(desc); @@ -642,26 +648,26 @@ void irq_move_cleanup_interrupt(struct cpu_user_regs *regs) ack_APIC_irq(); me = smp_processor_id(); - for ( vector = FIRST_DYNAMIC_VECTOR; - vector <= LAST_HIPRIORITY_VECTOR; vector++) + for ( vector = FIRST_DYNAMIC_VECTOR; vector <= LAST_HIPRIORITY_VECTOR; + vector++ ) { unsigned int irq; unsigned int irr; struct irq_desc *desc; irq = __get_cpu_var(vector_irq)[vector]; - if ((int)irq < 0) + if ( (int)irq < 0 ) continue; if ( vector >= FIRST_LEGACY_VECTOR && vector <= LAST_LEGACY_VECTOR ) continue; desc = irq_to_desc(irq); - if (!desc) + if ( !desc ) continue; spin_lock(&desc->lock); - if (!desc->arch.move_cleanup_count) + if ( !desc->arch.move_cleanup_count ) goto unlock; if ( vector == desc->arch.vector && @@ -676,15 +682,15 @@ void irq_move_cleanup_interrupt(struct cpu_user_regs *regs) * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR * to myself. */ - if (irr & (1 << (vector % 32))) { + if ( irr & (1 << (vector % 32)) ) + { send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); - TRACE_3D(TRC_HW_IRQ_MOVE_CLEANUP_DELAY, - irq, vector, smp_processor_id()); + TRACE_3D(TRC_HW_IRQ_MOVE_CLEANUP_DELAY, irq, vector, + smp_processor_id()); goto unlock; } - TRACE_3D(TRC_HW_IRQ_MOVE_CLEANUP, - irq, vector, smp_processor_id()); + TRACE_3D(TRC_HW_IRQ_MOVE_CLEANUP, irq, vector, smp_processor_id()); __get_cpu_var(vector_irq)[vector] = ~irq; desc->arch.move_cleanup_count--; @@ -700,7 +706,7 @@ void irq_move_cleanup_interrupt(struct cpu_user_regs *regs) clear_bit(vector, desc->arch.used_vectors); } } -unlock: + unlock: spin_unlock(&desc->lock); } } @@ -720,7 +726,7 @@ void irq_complete_move(struct irq_desc *desc) { unsigned vector, me; - if (likely(!desc->arch.move_in_progress)) + if ( likely(!desc->arch.move_in_progress) ) return; vector = (u8)get_irq_regs()->entry_vector; @@ -738,7 +744,7 @@ unsigned int set_desc_affinity(struct irq_desc *desc, const cpumask_t *mask) unsigned long flags; cpumask_t dest_mask; - if (!cpumask_intersects(mask, &cpu_online_map)) + if ( !cpumask_intersects(mask, &cpu_online_map) ) return BAD_APICID; irq = desc->irq; @@ -747,7 +753,7 @@ unsigned int set_desc_affinity(struct irq_desc *desc, const cpumask_t *mask) ret = __assign_irq_vector(irq, desc, mask); spin_unlock_irqrestore(&vector_lock, flags); - if (ret < 0) + if ( ret < 0 ) return BAD_APICID; cpumask_copy(desc->affinity, mask); @@ -759,9 +765,9 @@ unsigned int set_desc_affinity(struct irq_desc *desc, const cpumask_t *mask) /* For re-setting irq interrupt affinity for specific irq */ void irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask) { - if (!desc->handler->set_affinity) + if ( !desc->handler->set_affinity ) return; - + ASSERT(spin_is_locked(&desc->lock)); desc->status &= ~IRQ_MOVE_PENDING; smp_wmb(); @@ -792,20 +798,21 @@ uint8_t alloc_hipriority_vector(void) } static void (*direct_apic_vector[NR_VECTORS])(struct cpu_user_regs *); -void set_direct_apic_vector( - uint8_t vector, void (*handler)(struct cpu_user_regs *)) +void set_direct_apic_vector(uint8_t vector, + void (*handler)(struct cpu_user_regs *)) { BUG_ON(direct_apic_vector[vector] != NULL); direct_apic_vector[vector] = handler; } -void alloc_direct_apic_vector( - uint8_t *vector, void (*handler)(struct cpu_user_regs *)) +void alloc_direct_apic_vector(uint8_t *vector, + void (*handler)(struct cpu_user_regs *)) { static DEFINE_SPINLOCK(lock); spin_lock(&lock); - if (*vector == 0) { + if ( *vector == 0 ) + { *vector = alloc_hipriority_vector(); set_direct_apic_vector(*vector, handler); } @@ -815,29 +822,33 @@ void alloc_direct_apic_vector( void do_IRQ(struct cpu_user_regs *regs) { struct irqaction *action; - uint32_t tsc_in; - struct irq_desc *desc; - unsigned int vector = (u8)regs->entry_vector; + uint32_t tsc_in; + struct irq_desc *desc; + unsigned int vector = (u8)regs->entry_vector; int irq = __get_cpu_var(vector_irq[vector]); struct cpu_user_regs *old_regs = set_irq_regs(regs); - + perfc_incr(irqs); this_cpu(irq_count)++; irq_enter(); - if (irq < 0) { - if (direct_apic_vector[vector] != NULL) { + if ( irq < 0 ) + { + if ( direct_apic_vector[vector] != NULL ) + { (*direct_apic_vector[vector])(regs); - } else { + } + else + { const char *kind = ", LAPIC"; if ( apic_isr_read(vector) ) ack_APIC_irq(); else kind = ""; - if ( ! ( vector >= FIRST_LEGACY_VECTOR && - vector <= LAST_LEGACY_VECTOR && - bogus_8259A_irq(vector - FIRST_LEGACY_VECTOR) ) ) + if ( !(vector >= FIRST_LEGACY_VECTOR && + vector <= LAST_LEGACY_VECTOR && + bogus_8259A_irq(vector - FIRST_LEGACY_VECTOR)) ) { printk("CPU%u: No irq handler for vector %02x (IRQ %d%s)\n", smp_processor_id(), vector, irq, kind); @@ -845,12 +856,13 @@ void do_IRQ(struct cpu_user_regs *regs) if ( ~irq < nr_irqs && irq_desc_initialized(desc) ) { spin_lock(&desc->lock); - printk("IRQ%d a=%04lx[%04lx,%04lx] v=%02x[%02x] t=%s s=%08x\n", - ~irq, *cpumask_bits(desc->affinity), - *cpumask_bits(desc->arch.cpu_mask), - *cpumask_bits(desc->arch.old_cpu_mask), - desc->arch.vector, desc->arch.old_vector, - desc->handler->typename, desc->status); + printk( + "IRQ%d a=%04lx[%04lx,%04lx] v=%02x[%02x] t=%s s=%08x\n", + ~irq, *cpumask_bits(desc->affinity), + *cpumask_bits(desc->arch.cpu_mask), + *cpumask_bits(desc->arch.old_cpu_mask), + desc->arch.vector, desc->arch.old_vector, + desc->handler->typename, desc->status); spin_unlock(&desc->lock); } } @@ -874,8 +886,8 @@ void do_IRQ(struct cpu_user_regs *regs) { desc->handler->disable(desc); /* - * If handler->disable doesn't actually mask the interrupt, a - * disabled irq still can fire. This check also avoids possible + * If handler->disable doesn't actually mask the interrupt, a + * disabled irq still can fire. This check also avoids possible * deadlocks if ratelimit_timer_fn runs at the same time. */ if ( likely(list_empty(&desc->rl_link)) ) @@ -902,7 +914,7 @@ void do_IRQ(struct cpu_user_regs *regs) desc->status |= IRQ_PENDING; /* - * Since we set PENDING, if another processor is handling a different + * Since we set PENDING, if another processor is handling a different * instance of this same irq, the other processor will take care of it. */ if ( desc->status & (IRQ_DISABLED | IRQ_INPROGRESS) ) @@ -923,12 +935,12 @@ void do_IRQ(struct cpu_user_regs *regs) desc->status &= ~IRQ_INPROGRESS; - out: +out: if ( desc->handler->end ) desc->handler->end(desc, vector); - out_no_end: +out_no_end: spin_unlock(&desc->lock); - out_no_unlock: +out_no_unlock: irq_exit(); set_irq_regs(old_regs); } @@ -940,7 +952,7 @@ static void irq_ratelimit_timer_fn(void *data) spin_lock_irqsave(&irq_ratelimit_lock, flags); - list_for_each_entry_safe ( desc, tmp, &irq_ratelimit_list, rl_link ) + list_for_each_entry_safe(desc, tmp, &irq_ratelimit_list, rl_link) { spin_lock(&desc->lock); desc->handler->enable(desc); @@ -961,10 +973,10 @@ static int __init irq_ratelimit_init(void) __initcall(irq_ratelimit_init); int __init request_irq(unsigned int irq, unsigned int irqflags, - void (*handler)(int, void *, struct cpu_user_regs *), - const char * devname, void *dev_id) + void (*handler)(int, void *, struct cpu_user_regs *), + const char *devname, void *dev_id) { - struct irqaction * action; + struct irqaction *action; int retval; /* @@ -973,13 +985,13 @@ int __init request_irq(unsigned int irq, unsigned int irqflags, * which interrupt is which (messes up the interrupt freeing * logic etc). */ - if (irq >= nr_irqs) + if ( irq >= nr_irqs ) return -EINVAL; - if (!handler) + if ( !handler ) return -EINVAL; action = xmalloc(struct irqaction); - if (!action) + if ( !action ) return -ENOMEM; action->handler = handler; @@ -988,7 +1000,7 @@ int __init request_irq(unsigned int irq, unsigned int irqflags, action->free_on_release = 1; retval = setup_irq(irq, irqflags, action); - if (retval) + if ( retval ) xfree(action); return retval; @@ -1002,17 +1014,19 @@ void __init release_irq(unsigned int irq, const void *dev_id) desc = irq_to_desc(irq); - spin_lock_irqsave(&desc->lock,flags); + spin_lock_irqsave(&desc->lock, flags); action = desc->action; - desc->action = NULL; + desc->action = NULL; desc->handler->shutdown(desc); desc->status |= IRQ_DISABLED; - spin_unlock_irqrestore(&desc->lock,flags); + spin_unlock_irqrestore(&desc->lock, flags); /* Wait to make sure it's not being used on another CPU */ - do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS ); + do { + smp_mb(); + } while ( desc->status & IRQ_INPROGRESS ); - if (action && action->free_on_release) + if ( action && action->free_on_release ) xfree(action); } @@ -1025,38 +1039,38 @@ int __init setup_irq(unsigned int irq, unsigned int irqflags, ASSERT(irqflags == 0); desc = irq_to_desc(irq); - - spin_lock_irqsave(&desc->lock,flags); + + spin_lock_irqsave(&desc->lock, flags); if ( desc->action != NULL ) { - spin_unlock_irqrestore(&desc->lock,flags); + spin_unlock_irqrestore(&desc->lock, flags); return -EBUSY; } - desc->action = new; + desc->action = new; desc->status &= ~IRQ_DISABLED; desc->handler->startup(desc); - spin_unlock_irqrestore(&desc->lock,flags); + spin_unlock_irqrestore(&desc->lock, flags); return 0; } - /* * HANDLING OF GUEST-BOUND PHYSICAL IRQS */ #define IRQ_MAX_GUESTS 7 -typedef struct { +typedef struct +{ u8 nr_guests; u8 in_flight; u8 shareable; u8 ack_type; -#define ACKTYPE_NONE 0 /* No final acknowledgement is required */ -#define ACKTYPE_UNMASK 1 /* Unmask PIC hardware (from any CPU) */ -#define ACKTYPE_EOI 2 /* EOI on the CPU that was interrupted */ +#define ACKTYPE_NONE 0 /* No final acknowledgement is required */ +#define ACKTYPE_UNMASK 1 /* Unmask PIC hardware (from any CPU) */ +#define ACKTYPE_EOI 2 /* EOI on the CPU that was interrupted */ cpumask_var_t cpu_eoi_map; /* CPUs that need to EOI this interrupt */ struct timer eoi_timer; struct domain *guest[IRQ_MAX_GUESTS]; @@ -1066,14 +1080,15 @@ typedef struct { * Stack of interrupts awaiting EOI on each CPU. These must be popped in * order, as only the current highest-priority pending irq can be EOIed. */ -struct pending_eoi { - u32 ready:1; /* Ready for EOI now? */ - u32 irq:23; /* irq of the vector */ - u32 vector:8; /* vector awaiting EOI */ +struct pending_eoi +{ + u32 ready : 1; /* Ready for EOI now? */ + u32 irq : 23; /* irq of the vector */ + u32 vector : 8; /* vector awaiting EOI */ }; static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_DYNAMIC_VECTORS]); -#define pending_eoi_sp(p) ((p)[NR_DYNAMIC_VECTORS-1].vector) +#define pending_eoi_sp(p) ((p)[NR_DYNAMIC_VECTORS - 1].vector) bool cpu_has_pending_apic_eoi(void) { @@ -1109,7 +1124,7 @@ static void irq_guest_eoi_timer_fn(void *data) unsigned long flags; spin_lock_irqsave(&desc->lock, flags); - + if ( !(desc->status & IRQ_GUEST) ) goto out; @@ -1130,7 +1145,7 @@ static void irq_guest_eoi_timer_fn(void *data) if ( action->in_flight != 0 ) goto out; - switch ( action->ack_type ) + switch (action->ack_type) { case ACKTYPE_UNMASK: if ( desc->handler->end ) @@ -1144,18 +1159,18 @@ static void irq_guest_eoi_timer_fn(void *data) break; } - out: +out: spin_unlock_irqrestore(&desc->lock, flags); } static void __do_IRQ_guest(int irq) { - struct irq_desc *desc = irq_to_desc(irq); + struct irq_desc *desc = irq_to_desc(irq); irq_guest_action_t *action = (irq_guest_action_t *)desc->action; - struct domain *d; - int i, sp; + struct domain *d; + int i, sp; struct pending_eoi *peoi = this_cpu(pending_eoi); - unsigned int vector = (u8)get_irq_regs()->entry_vector; + unsigned int vector = (u8)get_irq_regs()->entry_vector; if ( unlikely(action->nr_guests == 0) ) { @@ -1170,12 +1185,12 @@ static void __do_IRQ_guest(int irq) if ( action->ack_type == ACKTYPE_EOI ) { sp = pending_eoi_sp(peoi); - ASSERT((sp == 0) || (peoi[sp-1].vector < vector)); - ASSERT(sp < (NR_DYNAMIC_VECTORS-1)); + ASSERT((sp == 0) || (peoi[sp - 1].vector < vector)); + ASSERT(sp < (NR_DYNAMIC_VECTORS - 1)); peoi[sp].irq = irq; peoi[sp].vector = vector; peoi[sp].ready = 0; - pending_eoi_sp(peoi) = sp+1; + pending_eoi_sp(peoi) = sp + 1; cpumask_set_cpu(smp_processor_id(), action->cpu_eoi_map); } @@ -1205,8 +1220,8 @@ static void __do_IRQ_guest(int irq) * The descriptor is returned locked. This function is safe against changes * to the per-domain irq-to-vector mapping. */ -struct irq_desc *domain_spin_lock_irq_desc( - struct domain *d, int pirq, unsigned long *pflags) +struct irq_desc *domain_spin_lock_irq_desc(struct domain *d, int pirq, + unsigned long *pflags) { const struct pirq *info = pirq_info(d, pirq); @@ -1216,13 +1231,13 @@ struct irq_desc *domain_spin_lock_irq_desc( /* * Same with struct pirq already looked up. */ -struct irq_desc *pirq_spin_lock_irq_desc( - const struct pirq *pirq, unsigned long *pflags) +struct irq_desc *pirq_spin_lock_irq_desc(const struct pirq *pirq, + unsigned long *pflags) { struct irq_desc *desc; unsigned long flags; - for ( ; ; ) + for ( ;; ) { int irq = pirq->arch.irq; @@ -1243,10 +1258,10 @@ struct irq_desc *pirq_spin_lock_irq_desc( } static int prepare_domain_irq_pirq(struct domain *d, int irq, int pirq, - struct pirq **pinfo) + struct pirq **pinfo) { - int err = radix_tree_insert(&d->arch.irq_pirq, irq, - radix_tree_int_to_ptr(0)); + int err = + radix_tree_insert(&d->arch.irq_pirq, irq, radix_tree_int_to_ptr(0)); struct pirq *info; if ( err && err != -EEXIST ) @@ -1265,18 +1280,16 @@ static int prepare_domain_irq_pirq(struct domain *d, int irq, int pirq, static void set_domain_irq_pirq(struct domain *d, int irq, struct pirq *pirq) { - radix_tree_replace_slot( - radix_tree_lookup_slot(&d->arch.irq_pirq, irq), - radix_tree_int_to_ptr(pirq->pirq)); + radix_tree_replace_slot(radix_tree_lookup_slot(&d->arch.irq_pirq, irq), + radix_tree_int_to_ptr(pirq->pirq)); pirq->arch.irq = irq; } static void clear_domain_irq_pirq(struct domain *d, int irq, struct pirq *pirq) { pirq->arch.irq = 0; - radix_tree_replace_slot( - radix_tree_lookup_slot(&d->arch.irq_pirq, irq), - radix_tree_int_to_ptr(0)); + radix_tree_replace_slot(radix_tree_lookup_slot(&d->arch.irq_pirq, irq), + radix_tree_int_to_ptr(0)); } static void cleanup_domain_irq_pirq(struct domain *d, int irq, @@ -1324,8 +1337,8 @@ void cleanup_domain_irq_mapping(struct domain *d) struct pirq *alloc_pirq_struct(struct domain *d) { - size_t sz = is_hvm_domain(d) ? sizeof(struct pirq) : - offsetof(struct pirq, arch.hvm); + size_t sz = is_hvm_domain(d) ? sizeof(struct pirq) + : offsetof(struct pirq, arch.hvm); struct pirq *pirq = xzalloc_bytes(sz); if ( pirq ) @@ -1340,7 +1353,7 @@ struct pirq *alloc_pirq_struct(struct domain *d) return pirq; } -void (pirq_cleanup_check)(struct pirq *pirq, struct domain *d) +void(pirq_cleanup_check)(struct pirq *pirq, struct domain *d) { /* * Check whether all fields have their default values, and delete @@ -1367,8 +1380,8 @@ void (pirq_cleanup_check)(struct pirq *pirq, struct domain *d) static void flush_ready_eoi(void) { struct pending_eoi *peoi = this_cpu(pending_eoi); - struct irq_desc *desc; - int irq, sp; + struct irq_desc *desc; + int irq, sp; ASSERT(!local_irq_is_enabled()); @@ -1385,21 +1398,19 @@ static void flush_ready_eoi(void) spin_unlock(&desc->lock); } - pending_eoi_sp(peoi) = sp+1; + pending_eoi_sp(peoi) = sp + 1; } static void __set_eoi_ready(struct irq_desc *desc) { irq_guest_action_t *action = (irq_guest_action_t *)desc->action; struct pending_eoi *peoi = this_cpu(pending_eoi); - int irq, sp; + int irq, sp; irq = desc - irq_desc; - if ( !(desc->status & IRQ_GUEST) || - (action->in_flight != 0) || - !cpumask_test_and_clear_cpu(smp_processor_id(), - action->cpu_eoi_map) ) + if ( !(desc->status & IRQ_GUEST) || (action->in_flight != 0) || + !cpumask_test_and_clear_cpu(smp_processor_id(), action->cpu_eoi_map) ) return; sp = pending_eoi_sp(peoi); @@ -1438,8 +1449,8 @@ void pirq_guest_eoi(struct pirq *pirq) void desc_guest_eoi(struct irq_desc *desc, struct pirq *pirq) { irq_guest_action_t *action; - cpumask_t cpu_eoi_map; - int irq; + cpumask_t cpu_eoi_map; + int irq; if ( !(desc->status & IRQ_GUEST) ) { @@ -1467,7 +1478,7 @@ void desc_guest_eoi(struct irq_desc *desc, struct pirq *pirq) } ASSERT(action->ack_type == ACKTYPE_EOI); - + cpumask_copy(&cpu_eoi_map, action->cpu_eoi_map); if ( __cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) ) @@ -1508,7 +1519,7 @@ int pirq_guest_unmask(struct domain *d) static int pirq_acktype(struct domain *d, int pirq) { - struct irq_desc *desc; + struct irq_desc *desc; int irq; irq = domain_pirq_to_irq(d, pirq); @@ -1540,8 +1551,8 @@ static int pirq_acktype(struct domain *d, int pirq) * on which they were received. This is because we tickle the LAPIC to EOI. */ if ( !strcmp(desc->handler->typename, "IO-APIC-level") ) - return desc->handler->ack == irq_complete_move ? - ACKTYPE_EOI : ACKTYPE_UNMASK; + return desc->handler->ack == irq_complete_move ? ACKTYPE_EOI + : ACKTYPE_UNMASK; /* Legacy PIC interrupts can be acknowledged from any CPU. */ if ( !strcmp(desc->handler->typename, "XT-PIC") ) @@ -1555,10 +1566,10 @@ static int pirq_acktype(struct domain *d, int pirq) int pirq_shared(struct domain *d, int pirq) { - struct irq_desc *desc; + struct irq_desc *desc; irq_guest_action_t *action; - unsigned long flags; - int shared; + unsigned long flags; + int shared; desc = domain_spin_lock_irq_desc(d, pirq, &flags); if ( desc == NULL ) @@ -1574,15 +1585,15 @@ int pirq_shared(struct domain *d, int pirq) int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share) { - unsigned int irq; - struct irq_desc *desc; + unsigned int irq; + struct irq_desc *desc; irq_guest_action_t *action, *newaction = NULL; - int rc = 0; + int rc = 0; WARN_ON(!spin_is_locked(&v->domain->event_lock)); BUG_ON(!local_irq_is_enabled()); - retry: +retry: desc = pirq_spin_lock_irq_desc(pirq, NULL); if ( desc == NULL ) { @@ -1611,8 +1622,7 @@ int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share) zalloc_cpumask_var(&newaction->cpu_eoi_map) ) goto retry; xfree(newaction); - printk(XENLOG_G_INFO - "Cannot bind IRQ%d to dom%d. Out of memory.\n", + printk(XENLOG_G_INFO "Cannot bind IRQ%d to dom%d. Out of memory.\n", pirq->pirq, v->domain->domain_id); return -ENOMEM; } @@ -1621,10 +1631,10 @@ int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share) desc->action = (struct irqaction *)action; newaction = NULL; - action->nr_guests = 0; - action->in_flight = 0; - action->shareable = will_share; - action->ack_type = pirq_acktype(v->domain, pirq->pirq); + action->nr_guests = 0; + action->in_flight = 0; + action->shareable = will_share; + action->ack_type = pirq_acktype(v->domain, pirq->pirq); init_timer(&action->eoi_timer, irq_guest_eoi_timer_fn, desc, 0); desc->status |= IRQ_GUEST; @@ -1638,8 +1648,8 @@ int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share) } else if ( !will_share || !action->shareable ) { - printk(XENLOG_G_INFO "Cannot bind IRQ%d to dom%d. %s.\n", - pirq->pirq, v->domain->domain_id, + printk(XENLOG_G_INFO "Cannot bind IRQ%d to dom%d. %s.\n", pirq->pirq, + v->domain->domain_id, will_share ? "Others do not share" : "Will not share with others"); rc = -EBUSY; @@ -1661,7 +1671,7 @@ int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share) if ( action->nr_guests == IRQ_MAX_GUESTS ) { printk(XENLOG_G_INFO "Cannot bind IRQ%d to dom%d. " - "Already at max share.\n", + "Already at max share.\n", pirq->pirq, v->domain->domain_id); rc = -EBUSY; goto unlock_out; @@ -1674,9 +1684,9 @@ int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share) else clear_pirq_eoi(v->domain, pirq->pirq); - unlock_out: +unlock_out: spin_unlock_irq(&desc->lock); - out: +out: if ( newaction != NULL ) { free_cpumask_var(newaction->cpu_eoi_map); @@ -1685,13 +1695,13 @@ int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share) return rc; } -static irq_guest_action_t *__pirq_guest_unbind( - struct domain *d, struct pirq *pirq, struct irq_desc *desc) +static irq_guest_action_t * +__pirq_guest_unbind(struct domain *d, struct pirq *pirq, struct irq_desc *desc) { - unsigned int irq; + unsigned int irq; irq_guest_action_t *action; - cpumask_t cpu_eoi_map; - int i; + cpumask_t cpu_eoi_map; + int i; action = (irq_guest_action_t *)desc->action; irq = desc - irq_desc; @@ -1708,22 +1718,20 @@ static irq_guest_action_t *__pirq_guest_unbind( for ( i = 0; (i < action->nr_guests) && (action->guest[i] != d); i++ ) continue; BUG_ON(i == action->nr_guests); - memmove(&action->guest[i], &action->guest[i+1], - (action->nr_guests-i-1) * sizeof(action->guest[0])); + memmove(&action->guest[i], &action->guest[i + 1], + (action->nr_guests - i - 1) * sizeof(action->guest[0])); action->nr_guests--; - switch ( action->ack_type ) + switch (action->ack_type) { case ACKTYPE_UNMASK: - if ( test_and_clear_bool(pirq->masked) && - (--action->in_flight == 0) && + if ( test_and_clear_bool(pirq->masked) && (--action->in_flight == 0) && desc->handler->end ) - desc->handler->end(desc, 0); + desc->handler->end(desc, 0); break; case ACKTYPE_EOI: /* NB. If #guests == 0 then we clear the eoi_map later on. */ - if ( test_and_clear_bool(pirq->masked) && - (--action->in_flight == 0) && + if ( test_and_clear_bool(pirq->masked) && (--action->in_flight == 0) && (action->nr_guests != 0) ) { cpumask_copy(&cpu_eoi_map, action->cpu_eoi_map); @@ -1768,7 +1776,7 @@ static irq_guest_action_t *__pirq_guest_unbind( BUG_ON(!cpumask_empty(action->cpu_eoi_map)); desc->action = NULL; - desc->status &= ~(IRQ_GUEST|IRQ_INPROGRESS); + desc->status &= ~(IRQ_GUEST | IRQ_INPROGRESS); desc->handler->shutdown(desc); /* Caller frees the old guest descriptor block. */ @@ -1831,7 +1839,7 @@ static bool pirq_guest_force_unbind(struct domain *d, struct pirq *pirq) if ( unlikely(action == NULL) ) { dprintk(XENLOG_G_WARNING, "dom%d: pirq %d: desc->action is NULL!\n", - d->domain_id, pirq->pirq); + d->domain_id, pirq->pirq); goto out; } @@ -1843,7 +1851,7 @@ static bool pirq_guest_force_unbind(struct domain *d, struct pirq *pirq) bound = true; oldaction = __pirq_guest_unbind(d, pirq, desc); - out: +out: spin_unlock_irq(&desc->lock); if ( oldaction != NULL ) @@ -1856,11 +1864,10 @@ static bool pirq_guest_force_unbind(struct domain *d, struct pirq *pirq) return bound; } -static inline bool is_free_pirq(const struct domain *d, - const struct pirq *pirq) +static inline bool is_free_pirq(const struct domain *d, const struct pirq *pirq) { return !pirq || (!pirq->arch.irq && (!is_hvm_domain(d) || - pirq->arch.hvm.emuirq == IRQ_UNBOUND)); + pirq->arch.hvm.emuirq == IRQ_UNBOUND)); } int get_free_pirq(struct domain *d, int type) @@ -1909,8 +1916,7 @@ int get_free_pirqs(struct domain *d, unsigned int nr) #define MAX_MSI_IRQS 32 /* limited by MSI capability struct properties */ -int map_domain_pirq( - struct domain *d, int pirq, int irq, int type, void *data) +int map_domain_pirq(struct domain *d, int pirq, int irq, int type, void *data) { int ret = 0; int old_irq, old_pirq; @@ -1922,7 +1928,7 @@ int map_domain_pirq( ASSERT(spin_is_locked(&d->event_lock)); - if ( !irq_access_permitted(current->domain, irq)) + if ( !irq_access_permitted(current->domain, irq) ) return -EPERM; if ( pirq < 0 || pirq >= d->nr_pirqs || irq <= 0 || irq >= nr_irqs ) @@ -1935,8 +1941,7 @@ int map_domain_pirq( old_irq = domain_pirq_to_irq(d, pirq); old_pirq = domain_irq_to_pirq(d, irq); - if ( (old_irq > 0 && (old_irq != irq) ) || - (old_pirq && (old_pirq != pirq)) ) + if ( (old_irq > 0 && (old_irq != irq)) || (old_pirq && (old_pirq != pirq)) ) { dprintk(XENLOG_G_WARNING, "dom%d: pirq %d or irq %d already mapped (%d,%d)\n", @@ -1947,7 +1952,8 @@ int map_domain_pirq( ret = xsm_map_domain_irq(XSM_HOOK, d, irq, data); if ( ret ) { - dprintk(XENLOG_G_ERR, "dom%d: could not permit access to irq %d mapping to pirq %d\n", + dprintk(XENLOG_G_ERR, + "dom%d: could not permit access to irq %d mapping to pirq %d\n", d->domain_id, irq, pirq); return ret; } @@ -1959,7 +1965,7 @@ int map_domain_pirq( { printk(XENLOG_G_ERR "dom%d: could not permit access to IRQ%d (pirq %d)\n", - d->domain_id, irq, pirq); + d->domain_id, irq, pirq); return ret; } __set_bit(0, granted); @@ -2006,8 +2012,7 @@ int map_domain_pirq( if ( desc->handler != &no_irq_type ) { spin_unlock_irqrestore(&desc->lock, flags); - dprintk(XENLOG_G_ERR, "dom%d: irq %d in use\n", - d->domain_id, irq); + dprintk(XENLOG_G_ERR, "dom%d: irq %d in use\n", d->domain_id, irq); pci_disable_msi(msi_desc); msi_desc->irq = -1; msi_free_irq(msi_desc); @@ -2050,9 +2055,10 @@ int map_domain_pirq( if ( likely(!irq_access_permitted(d, irq)) ) { if ( irq_permit_access(d, irq) ) - printk(XENLOG_G_WARNING - "dom%d: could not permit access to IRQ%d (pirq %d)\n", - d->domain_id, irq, pirq); + printk( + XENLOG_G_WARNING + "dom%d: could not permit access to IRQ%d (pirq %d)\n", + d->domain_id, irq, pirq); else __set_bit(nr, granted); } @@ -2086,9 +2092,10 @@ int map_domain_pirq( { if ( irq >= 0 && test_bit(nr, granted) && irq_deny_access(d, irq) ) - printk(XENLOG_G_ERR - "dom%d: could not revoke access to IRQ%d (pirq %d)\n", - d->domain_id, irq, pirq); + printk( + XENLOG_G_ERR + "dom%d: could not revoke access to IRQ%d (pirq %d)\n", + d->domain_id, irq, pirq); if ( info && test_bit(nr, prepared) ) cleanup_domain_irq_pirq(d, irq, info); info = pirq_info(d, pirq + --nr); @@ -2115,7 +2122,7 @@ done: { if ( test_bit(0, prepared) ) cleanup_domain_irq_pirq(d, irq, info); - revoke: + revoke: if ( test_bit(0, granted) && irq_deny_access(d, irq) ) printk(XENLOG_G_ERR "dom%d: could not revoke access to IRQ%d (pirq %d)\n", @@ -2144,8 +2151,8 @@ int unmap_domain_pirq(struct domain *d, int pirq) info = pirq_info(d, pirq); if ( !info || (irq = info->arch.irq) <= 0 ) { - dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n", - d->domain_id, pirq); + dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n", d->domain_id, + pirq); ret = -EINVAL; goto done; } @@ -2165,8 +2172,8 @@ int unmap_domain_pirq(struct domain *d, int pirq) nr = msi_desc->msi.nvec; } - ret = xsm_unmap_domain_irq(XSM_HOOK, d, irq, - msi_desc ? msi_desc->dev : NULL); + ret = + xsm_unmap_domain_irq(XSM_HOOK, d, irq, msi_desc ? msi_desc->dev : NULL); if ( ret ) goto done; @@ -2180,7 +2187,7 @@ int unmap_domain_pirq(struct domain *d, int pirq) spin_lock_irqsave(&desc->lock, flags); - for ( i = 0; ; ) + for ( i = 0;; ) { BUG_ON(irq != domain_pirq_to_irq(d, pirq + i)); @@ -2206,7 +2213,7 @@ int unmap_domain_pirq(struct domain *d, int pirq) spin_unlock_irqrestore(&desc->lock, flags); if ( !forced_unbind ) - cleanup_domain_irq_pirq(d, irq, info); + cleanup_domain_irq_pirq(d, irq, info); rc = irq_deny_access(d, irq); if ( rc ) @@ -2221,8 +2228,8 @@ int unmap_domain_pirq(struct domain *d, int pirq) info = pirq_info(d, pirq + i); if ( info && (irq = info->arch.irq) > 0 ) break; - printk(XENLOG_G_ERR "dom%d: MSI pirq %d not mapped\n", - d->domain_id, pirq + i); + printk(XENLOG_G_ERR "dom%d: MSI pirq %d not mapped\n", d->domain_id, + pirq + i); } while ( ++i < nr ); if ( i == nr ) @@ -2254,10 +2261,10 @@ int unmap_domain_pirq(struct domain *d, int pirq) } } - if (msi_desc) + if ( msi_desc ) msi_free_irq(msi_desc); - done: +done: return ret; } @@ -2302,8 +2309,8 @@ static void dump_irqs(unsigned char key) spin_lock_irqsave(&desc->lock, flags); - printk(" IRQ:%4d affinity:%*pb vec:%02x type=%-15s status=%08x ", - irq, nr_cpu_ids, cpumask_bits(desc->affinity), desc->arch.vector, + printk(" IRQ:%4d affinity:%*pb vec:%02x type=%-15s status=%08x ", irq, + nr_cpu_ids, cpumask_bits(desc->affinity), desc->arch.vector, desc->handler->typename, desc->status); if ( ssid ) @@ -2320,8 +2327,7 @@ static void dump_irqs(unsigned char key) d = action->guest[i]; pirq = domain_irq_to_pirq(d, irq); info = pirq_info(d, pirq); - printk("%u:%3d(%c%c%c)", - d->domain_id, pirq, + printk("%u:%3d(%c%c%c)", d->domain_id, pirq, evtchn_port_is_pending(d, info->evtchn) ? 'P' : '-', evtchn_port_is_masked(d, info->evtchn) ? 'M' : '-', (info->masked ? 'M' : '-')); @@ -2460,8 +2466,8 @@ int map_domain_emuirq_pirq(struct domain *d, int pirq, int emuirq) if ( !is_hvm_domain(d) ) return -EINVAL; - if ( pirq < 0 || pirq >= d->nr_pirqs || - emuirq == IRQ_UNBOUND || emuirq >= (int) nr_irqs ) + if ( pirq < 0 || pirq >= d->nr_pirqs || emuirq == IRQ_UNBOUND || + emuirq >= (int)nr_irqs ) { dprintk(XENLOG_G_ERR, "dom%d: invalid pirq %d or emuirq %d\n", d->domain_id, pirq, emuirq); @@ -2472,11 +2478,12 @@ int map_domain_emuirq_pirq(struct domain *d, int pirq, int emuirq) if ( emuirq != IRQ_PT ) old_pirq = domain_emuirq_to_pirq(d, emuirq); - if ( (old_emuirq != IRQ_UNBOUND && (old_emuirq != emuirq) ) || + if ( (old_emuirq != IRQ_UNBOUND && (old_emuirq != emuirq)) || (old_pirq != IRQ_UNBOUND && (old_pirq != pirq)) ) { - dprintk(XENLOG_G_WARNING, "dom%d: pirq %d or emuirq %d already mapped\n", - d->domain_id, pirq, emuirq); + dprintk(XENLOG_G_WARNING, + "dom%d: pirq %d or emuirq %d already mapped\n", d->domain_id, + pirq, emuirq); return 0; } @@ -2490,14 +2497,13 @@ int map_domain_emuirq_pirq(struct domain *d, int pirq, int emuirq) int err = radix_tree_insert(&d->arch.hvm.emuirq_pirq, emuirq, radix_tree_int_to_ptr(pirq)); - switch ( err ) + switch (err) { case 0: break; case -EEXIST: radix_tree_replace_slot( - radix_tree_lookup_slot( - &d->arch.hvm.emuirq_pirq, emuirq), + radix_tree_lookup_slot(&d->arch.hvm.emuirq_pirq, emuirq), radix_tree_int_to_ptr(pirq)); break; default: @@ -2526,8 +2532,8 @@ int unmap_domain_pirq_emuirq(struct domain *d, int pirq) emuirq = domain_pirq_to_emuirq(d, pirq); if ( emuirq == IRQ_UNBOUND ) { - dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n", - d->domain_id, pirq); + dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n", d->domain_id, + pirq); ret = -EINVAL; goto done; } @@ -2541,7 +2547,7 @@ int unmap_domain_pirq_emuirq(struct domain *d, int pirq) if ( emuirq != IRQ_PT ) radix_tree_delete(&d->arch.hvm.emuirq_pirq, emuirq); - done: +done: return ret; } @@ -2653,7 +2659,7 @@ int allocate_and_map_gsi_pirq(struct domain *d, int index, int *pirq_p) if ( !ret ) *pirq_p = pirq; - done: +done: spin_unlock(&d->event_lock); return ret; @@ -2664,7 +2670,7 @@ int allocate_and_map_msi_pirq(struct domain *d, int index, int *pirq_p, { int irq, pirq, ret; - switch ( type ) + switch (type) { case MAP_PIRQ_TYPE_MSI: if ( !msi->table_base ) @@ -2672,7 +2678,7 @@ int allocate_and_map_msi_pirq(struct domain *d, int index, int *pirq_p, irq = index; if ( irq == -1 ) { - case MAP_PIRQ_TYPE_MULTI_MSI: + case MAP_PIRQ_TYPE_MULTI_MSI: irq = create_irq(NUMA_NO_NODE); } @@ -2685,8 +2691,8 @@ int allocate_and_map_msi_pirq(struct domain *d, int index, int *pirq_p, break; default: - dprintk(XENLOG_G_ERR, "dom%d: wrong pirq type %x\n", - d->domain_id, type); + dprintk(XENLOG_G_ERR, "dom%d: wrong pirq type %x\n", d->domain_id, + type); ASSERT_UNREACHABLE(); return -EINVAL; } @@ -2707,16 +2713,16 @@ int allocate_and_map_msi_pirq(struct domain *d, int index, int *pirq_p, if ( !ret ) *pirq_p = pirq; - done: +done: spin_unlock(&d->event_lock); pcidevs_unlock(); if ( ret ) { - switch ( type ) + switch (type) { case MAP_PIRQ_TYPE_MSI: if ( index == -1 ) - case MAP_PIRQ_TYPE_MULTI_MSI: + case MAP_PIRQ_TYPE_MULTI_MSI: destroy_irq(irq); break; } diff --git a/xen/arch/x86/livepatch.c b/xen/arch/x86/livepatch.c index 406eb910cc..798b05c18e 100644 --- a/xen/arch/x86/livepatch.c +++ b/xen/arch/x86/livepatch.c @@ -119,11 +119,9 @@ void arch_livepatch_unmask(void) int arch_livepatch_verify_elf(const struct livepatch_elf *elf) { - const Elf_Ehdr *hdr = elf->hdr; - if ( hdr->e_machine != EM_X86_64 || - hdr->e_ident[EI_CLASS] != ELFCLASS64 || + if ( hdr->e_machine != EM_X86_64 || hdr->e_ident[EI_CLASS] != ELFCLASS64 || hdr->e_ident[EI_DATA] != ELFDATA2LSB ) { dprintk(XENLOG_ERR, LIVEPATCH "%s: Unsupported ELF Machine type!\n", @@ -178,27 +176,30 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf, } else if ( symndx >= elf->nsym ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative relocation wants symbol@%u which is past end!\n", - elf->name, symndx); + dprintk( + XENLOG_ERR, + LIVEPATCH + "%s: Relative relocation wants symbol@%u which is past end!\n", + elf->name, symndx); return -EINVAL; } else if ( !elf->sym[symndx].sym ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: No symbol@%u\n", - elf->name, symndx); + dprintk(XENLOG_ERR, LIVEPATCH "%s: No symbol@%u\n", elf->name, + symndx); return -EINVAL; } val = r->r_addend + elf->sym[symndx].sym->st_value; - switch ( ELF64_R_TYPE(r->r_info) ) + switch (ELF64_R_TYPE(r->r_info)) { case R_X86_64_NONE: break; case R_X86_64_64: if ( r->r_offset >= base->sec->sh_size || - (r->r_offset + sizeof(uint64_t)) > base->sec->sh_size ) + (r->r_offset + sizeof(uint64_t)) > base->sec->sh_size ) goto bad_offset; *(uint64_t *)dest = val; @@ -215,14 +216,16 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf, */ case R_X86_64_PC32: if ( r->r_offset >= base->sec->sh_size || - (r->r_offset + sizeof(uint32_t)) > base->sec->sh_size ) + (r->r_offset + sizeof(uint32_t)) > base->sec->sh_size ) goto bad_offset; val -= (uint64_t)dest; *(int32_t *)dest = val; if ( (int64_t)val != *(int32_t *)dest ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Overflow in relocation %u in %s for %s!\n", + dprintk(XENLOG_ERR, + LIVEPATCH + "%s: Overflow in relocation %u in %s for %s!\n", elf->name, i, rela->name, base->name); return -EOVERFLOW; } @@ -237,8 +240,9 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf, return 0; - bad_offset: - dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative relocation offset is past %s section!\n", +bad_offset: + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Relative relocation offset is past %s section!\n", elf->name, base->name); return -EINVAL; } diff --git a/xen/arch/x86/machine_kexec.c b/xen/arch/x86/machine_kexec.c index b70d5a6a86..29a01f8e96 100644 --- a/xen/arch/x86/machine_kexec.c +++ b/xen/arch/x86/machine_kexec.c @@ -106,7 +106,7 @@ int machine_kexec_load(struct kexec_image *image) void *code_page; int ret; - switch ( image->arch ) + switch (image->arch) { case EM_386: case EM_X86_64: @@ -180,23 +180,23 @@ void machine_kexec(struct kexec_image *image) reloc_flags |= KEXEC_RELOC_FLAG_COMPAT; kexec_reloc(page_to_maddr(image->control_code_page), - page_to_maddr(image->aux_page), - image->head, image->entry_maddr, reloc_flags); + page_to_maddr(image->aux_page), image->head, image->entry_maddr, + reloc_flags); } int machine_kexec_get(xen_kexec_range_t *range) { - if (range->range != KEXEC_RANGE_MA_XEN) - return -EINVAL; - return machine_kexec_get_xen(range); + if ( range->range != KEXEC_RANGE_MA_XEN ) + return -EINVAL; + return machine_kexec_get_xen(range); } void arch_crash_save_vmcoreinfo(void) { - VMCOREINFO_SYMBOL(dom_xen); - VMCOREINFO_SYMBOL(dom_io); + VMCOREINFO_SYMBOL(dom_xen); + VMCOREINFO_SYMBOL(dom_io); - VMCOREINFO_SYMBOL_ALIAS(pgd_l4, idle_pg_table); + VMCOREINFO_SYMBOL_ALIAS(pgd_l4, idle_pg_table); } /* diff --git a/xen/arch/x86/microcode.c b/xen/arch/x86/microcode.c index 4163f50bb7..9c46c1c3b3 100644 --- a/xen/arch/x86/microcode.c +++ b/xen/arch/x86/microcode.c @@ -49,7 +49,8 @@ static bool_t __initdata ucode_mod_forced; * and the size of said blob. It is allocated from Xen's heap * memory. */ -struct ucode_mod_blob { +struct ucode_mod_blob +{ void *data; size_t size; }; @@ -77,7 +78,7 @@ static int __init parse_ucode(const char *s) const char *q = NULL; if ( ucode_mod_forced ) /* Forced by EFI */ - return 0; + return 0; if ( !strncmp(s, "scan", 4) ) ucode_scan = 1; @@ -93,9 +94,8 @@ custom_param("ucode", parse_ucode); */ #define MAX_EARLY_CPIO_MICROCODE (8 << 20) -void __init microcode_scan_module( - unsigned long *module_map, - const multiboot_info_t *mbi) +void __init microcode_scan_module(unsigned long *module_map, + const multiboot_info_t *mbi) { module_t *mod = (module_t *)__va(mbi->mods_addr); uint64_t *_blob_start; @@ -128,8 +128,8 @@ void __init microcode_scan_module( _blob_size = mod[i].mod_end; if ( !_blob_start ) { - printk("Could not map multiboot module #%d (size: %ld)\n", - i, _blob_size); + printk("Could not map multiboot module #%d (size: %ld)\n", i, + _blob_size); continue; } cd.data = NULL; @@ -137,23 +137,24 @@ void __init microcode_scan_module( cd = find_cpio_data(p, _blob_start, _blob_size, &offset /* ignore */); if ( cd.data ) { - /* - * This is an arbitrary check - it would be sad if the blob - * consumed most of the memory and did not allow guests - * to launch. - */ - if ( cd.size > MAX_EARLY_CPIO_MICROCODE ) - { - printk("Multiboot %d microcode payload too big! (%ld, we can do %d)\n", - i, cd.size, MAX_EARLY_CPIO_MICROCODE); - goto err; - } - ucode_blob.size = cd.size; - ucode_blob.data = xmalloc_bytes(cd.size); - if ( !ucode_blob.data ) - cd.data = NULL; - else - memcpy(ucode_blob.data, cd.data, cd.size); + /* + * This is an arbitrary check - it would be sad if the blob + * consumed most of the memory and did not allow guests + * to launch. + */ + if ( cd.size > MAX_EARLY_CPIO_MICROCODE ) + { + printk("Multiboot %d microcode payload too big! (%ld, we can " + "do %d)\n", + i, cd.size, MAX_EARLY_CPIO_MICROCODE); + goto err; + } + ucode_blob.size = cd.size; + ucode_blob.data = xmalloc_bytes(cd.size); + if ( !ucode_blob.data ) + cd.data = NULL; + else + memcpy(ucode_blob.data, cd.data, cd.size); } bootstrap_map(NULL); if ( cd.data ) @@ -163,9 +164,8 @@ void __init microcode_scan_module( err: bootstrap_map(NULL); } -void __init microcode_grab_module( - unsigned long *module_map, - const multiboot_info_t *mbi) +void __init microcode_grab_module(unsigned long *module_map, + const multiboot_info_t *mbi) { module_t *mod = (module_t *)__va(mbi->mods_addr); @@ -186,7 +186,8 @@ static DEFINE_SPINLOCK(microcode_mutex); DEFINE_PER_CPU(struct ucode_cpu_info, ucode_cpu_info); -struct microcode_info { +struct microcode_info +{ unsigned int cpu; uint32_t buffer_size; int error; @@ -245,7 +246,7 @@ int microcode_resume_cpu(unsigned int cpu) uci->cpu_sig = nsig; err = -EIO; - for_each_online_cpu ( cpu2 ) + for_each_online_cpu (cpu2) { uci = &per_cpu(ucode_cpu_info, cpu2); if ( uci->mc.mc_valid && @@ -362,12 +363,12 @@ static int __init microcode_init(void) } __initcall(microcode_init); -static int microcode_percpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int microcode_percpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - switch ( action ) + switch (action) { case CPU_DEAD: microcode_fini_cpu(cpu); diff --git a/xen/arch/x86/microcode_amd.c b/xen/arch/x86/microcode_amd.c index 7a854c012f..277153455e 100644 --- a/xen/arch/x86/microcode_amd.c +++ b/xen/arch/x86/microcode_amd.c @@ -28,11 +28,12 @@ #define pr_debug(x...) ((void)0) -#define CONT_HDR_SIZE 12 -#define SECTION_HDR_SIZE 8 -#define PATCH_HDR_SIZE 32 +#define CONT_HDR_SIZE 12 +#define SECTION_HDR_SIZE 8 +#define PATCH_HDR_SIZE 32 -struct __packed equiv_cpu_entry { +struct __packed equiv_cpu_entry +{ uint32_t installed_cpu; uint32_t fixed_errata_mask; uint32_t fixed_errata_compare; @@ -40,35 +41,38 @@ struct __packed equiv_cpu_entry { uint16_t reserved; }; -struct __packed microcode_header_amd { +struct __packed microcode_header_amd +{ uint32_t data_code; uint32_t patch_id; - uint8_t mc_patch_data_id[2]; - uint8_t mc_patch_data_len; - uint8_t init_flag; + uint8_t mc_patch_data_id[2]; + uint8_t mc_patch_data_len; + uint8_t init_flag; uint32_t mc_patch_data_checksum; uint32_t nb_dev_id; uint32_t sb_dev_id; uint16_t processor_rev_id; - uint8_t nb_rev_id; - uint8_t sb_rev_id; - uint8_t bios_api_rev; - uint8_t reserved1[3]; + uint8_t nb_rev_id; + uint8_t sb_rev_id; + uint8_t bios_api_rev; + uint8_t reserved1[3]; uint32_t match_reg[8]; }; -#define UCODE_MAGIC 0x00414d44 +#define UCODE_MAGIC 0x00414d44 #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 -#define UCODE_UCODE_TYPE 0x00000001 +#define UCODE_UCODE_TYPE 0x00000001 -struct microcode_amd { +struct microcode_amd +{ void *mpb; size_t mpb_size; struct equiv_cpu_entry *equiv_cpu_table; size_t equiv_cpu_table_size; }; -struct mpbhdr { +struct mpbhdr +{ uint32_t type; uint32_t len; uint8_t data[]; @@ -86,15 +90,14 @@ static int collect_cpu_info(unsigned int cpu, struct cpu_signature *csig) if ( (c->x86_vendor != X86_VENDOR_AMD) || (c->x86 < 0x10) ) { - printk(KERN_ERR "microcode: CPU%d not a capable AMD processor\n", - cpu); + printk(KERN_ERR "microcode: CPU%d not a capable AMD processor\n", cpu); return -EINVAL; } rdmsrl(MSR_AMD_PATCHLEVEL, csig->rev); - pr_debug("microcode: CPU%d collect_cpu_info: patch_id=%#x\n", - cpu, csig->rev); + pr_debug("microcode: CPU%d collect_cpu_info: patch_id=%#x\n", cpu, + csig->rev); return 0; } @@ -184,7 +187,8 @@ static bool_t microcode_fits(const struct microcode_amd *mc_amd, return 0; } - pr_debug("microcode: CPU%d found a matching microcode update with version %#x (current=%#x)\n", + pr_debug("microcode: CPU%d found a matching microcode update with version " + "%#x (current=%#x)\n", cpu, mc_header->patch_id, uci->cpu_sig.rev); return 1; @@ -228,7 +232,8 @@ static int apply_microcode(unsigned int cpu) if ( hw_err || (rev != hdr->patch_id) ) { printk(KERN_ERR "microcode: CPU%d update from revision " - "%#x to %#x failed\n", cpu, rev, hdr->patch_id); + "%#x to %#x failed\n", + cpu, rev, hdr->patch_id); return -EIO; } @@ -240,11 +245,9 @@ static int apply_microcode(unsigned int cpu) return 0; } -static int get_ucode_from_buffer_amd( - struct microcode_amd *mc_amd, - const void *buf, - size_t bufsize, - size_t *offset) +static int get_ucode_from_buffer_amd(struct microcode_amd *mc_amd, + const void *buf, size_t bufsize, + size_t *offset) { const struct mpbhdr *mpbuf = buf + *offset; @@ -281,7 +284,8 @@ static int get_ucode_from_buffer_amd( } memcpy(mc_amd->mpb, mpbuf->data, mpbuf->len); - pr_debug("microcode: CPU%d size %zu, block size %u offset %zu equivID %#x rev %#x\n", + pr_debug("microcode: CPU%d size %zu, block size %u offset %zu equivID %#x " + "rev %#x\n", raw_smp_processor_id(), bufsize, mpbuf->len, *offset, ((struct microcode_header_amd *)mc_amd->mpb)->processor_rev_id, ((struct microcode_header_amd *)mc_amd->mpb)->patch_id); @@ -291,31 +295,32 @@ static int get_ucode_from_buffer_amd( return 0; } -static int install_equiv_cpu_table( - struct microcode_amd *mc_amd, - const void *data, - size_t *offset) +static int install_equiv_cpu_table(struct microcode_amd *mc_amd, + const void *data, size_t *offset) { const struct mpbhdr *mpbuf = data + *offset + 4; - *offset += mpbuf->len + CONT_HDR_SIZE; /* add header length */ + *offset += mpbuf->len + CONT_HDR_SIZE; /* add header length */ if ( mpbuf->type != UCODE_EQUIV_CPU_TABLE_TYPE ) { - printk(KERN_ERR "microcode: Wrong microcode equivalent cpu table type field\n"); + printk(KERN_ERR + "microcode: Wrong microcode equivalent cpu table type field\n"); return -EINVAL; } if ( mpbuf->len == 0 ) { - printk(KERN_ERR "microcode: Wrong microcode equivalent cpu table length\n"); + printk(KERN_ERR + "microcode: Wrong microcode equivalent cpu table length\n"); return -EINVAL; } mc_amd->equiv_cpu_table = xmalloc_bytes(mpbuf->len); if ( !mc_amd->equiv_cpu_table ) { - printk(KERN_ERR "microcode: Cannot allocate memory for equivalent cpu table\n"); + printk(KERN_ERR + "microcode: Cannot allocate memory for equivalent cpu table\n"); return -ENOMEM; } @@ -325,9 +330,10 @@ static int install_equiv_cpu_table( return 0; } -static int container_fast_forward(const void *data, size_t size_left, size_t *offset) +static int container_fast_forward(const void *data, size_t size_left, + size_t *offset) { - for ( ; ; ) + for ( ;; ) { size_t size; const uint32_t *header; @@ -359,7 +365,7 @@ static int container_fast_forward(const void *data, size_t size_left, size_t *of /* * The 'final_levels' of patch ids have been obtained empirically. - * Refer bug https://bugzilla.suse.com/show_bug.cgi?id=913996 + * Refer bug https://bugzilla.suse.com/show_bug.cgi?id=913996 * for details of the issue. The short version is that people * using certain Fam10h systems noticed system hang issues when * trying to update microcode levels beyond the patch IDs below. @@ -368,11 +374,7 @@ static int container_fast_forward(const void *data, size_t size_left, size_t *of * due to hardware issues. Therefore, we need to abort microcode * update process if we hit any of these levels. */ -static const unsigned int final_levels[] = { - 0x01000098, - 0x0100009f, - 0x010000af -}; +static const unsigned int final_levels[] = {0x01000098, 0x0100009f, 0x010000af}; static bool_t check_final_patch_levels(unsigned int cpu) { @@ -419,8 +421,8 @@ static int cpu_request_microcode(unsigned int cpu, const void *buf, if ( check_final_patch_levels(cpu) ) { - printk(XENLOG_INFO - "microcode: Cannot update microcode patch on the cpu as we hit a final level\n"); + printk(XENLOG_INFO "microcode: Cannot update microcode patch on the " + "cpu as we hit a final level\n"); error = -EPERM; goto out; } @@ -428,7 +430,8 @@ static int cpu_request_microcode(unsigned int cpu, const void *buf, mc_amd = xmalloc(struct microcode_amd); if ( !mc_amd ) { - printk(KERN_ERR "microcode: Cannot allocate memory for microcode patch\n"); + printk(KERN_ERR + "microcode: Cannot allocate memory for microcode patch\n"); error = -ENOMEM; goto out; } @@ -443,7 +446,8 @@ static int cpu_request_microcode(unsigned int cpu, const void *buf, error = install_equiv_cpu_table(mc_amd, buf, &offset); if ( error ) { - printk(KERN_ERR "microcode: installing equivalent cpu table failed\n"); + printk(KERN_ERR + "microcode: installing equivalent cpu table failed\n"); break; } @@ -470,9 +474,11 @@ static int cpu_request_microcode(unsigned int cpu, const void *buf, } if ( error ) { - printk(KERN_ERR "microcode: CPU%d incorrect or corrupt container file\n" + printk(KERN_ERR + "microcode: CPU%d incorrect or corrupt container file\n" "microcode: Failed to update patch level. " - "Current lvl:%#x\n", cpu, uci->cpu_sig.rev); + "Current lvl:%#x\n", + cpu, uci->cpu_sig.rev); break; } } @@ -540,8 +546,8 @@ static int cpu_request_microcode(unsigned int cpu, const void *buf, */ if ( applied_offset ) { - save_error = get_ucode_from_buffer_amd( - mc_amd, buf, bufsize, &applied_offset); + save_error = + get_ucode_from_buffer_amd(mc_amd, buf, bufsize, &applied_offset); if ( save_error ) error = save_error; @@ -555,7 +561,7 @@ static int cpu_request_microcode(unsigned int cpu, const void *buf, else xfree(mc_old); - out: +out: #if CONFIG_HVM svm_host_osvw_init(); #endif @@ -634,11 +640,11 @@ static int start_update(void) } static const struct microcode_ops microcode_amd_ops = { - .microcode_resume_match = microcode_resume_match, - .cpu_request_microcode = cpu_request_microcode, - .collect_cpu_info = collect_cpu_info, - .apply_microcode = apply_microcode, - .start_update = start_update, + .microcode_resume_match = microcode_resume_match, + .cpu_request_microcode = cpu_request_microcode, + .collect_cpu_info = collect_cpu_info, + .apply_microcode = apply_microcode, + .start_update = start_update, }; int __init microcode_init_amd(void) diff --git a/xen/arch/x86/microcode_intel.c b/xen/arch/x86/microcode_intel.c index 9657575c29..37142e33e1 100644 --- a/xen/arch/x86/microcode_intel.c +++ b/xen/arch/x86/microcode_intel.c @@ -34,7 +34,8 @@ #define pr_debug(x...) ((void)0) -struct microcode_header_intel { +struct microcode_header_intel +{ unsigned int hdrver; unsigned int rev; unsigned int date; @@ -47,42 +48,46 @@ struct microcode_header_intel { unsigned int reserved[3]; }; -struct microcode_intel { +struct microcode_intel +{ struct microcode_header_intel hdr; unsigned int bits[0]; }; /* microcode format is extended from prescott processors */ -struct extended_signature { +struct extended_signature +{ unsigned int sig; unsigned int pf; unsigned int cksum; }; -struct extended_sigtable { +struct extended_sigtable +{ unsigned int count; unsigned int cksum; unsigned int reserved[3]; struct extended_signature sigs[0]; }; -#define DEFAULT_UCODE_DATASIZE (2000) -#define MC_HEADER_SIZE (sizeof(struct microcode_header_intel)) +#define DEFAULT_UCODE_DATASIZE (2000) +#define MC_HEADER_SIZE (sizeof(struct microcode_header_intel)) #define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) -#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable)) -#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature)) -#define DWSIZE (sizeof(u32)) -#define get_totalsize(mc) \ - (((struct microcode_intel *)mc)->hdr.totalsize ? \ - ((struct microcode_intel *)mc)->hdr.totalsize : \ - DEFAULT_UCODE_TOTALSIZE) - -#define get_datasize(mc) \ - (((struct microcode_intel *)mc)->hdr.datasize ? \ - ((struct microcode_intel *)mc)->hdr.datasize : DEFAULT_UCODE_DATASIZE) +#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable)) +#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature)) +#define DWSIZE (sizeof(u32)) +#define get_totalsize(mc) \ + (((struct microcode_intel *)mc)->hdr.totalsize \ + ? ((struct microcode_intel *)mc)->hdr.totalsize \ + : DEFAULT_UCODE_TOTALSIZE) + +#define get_datasize(mc) \ + (((struct microcode_intel *)mc)->hdr.datasize \ + ? ((struct microcode_intel *)mc)->hdr.datasize \ + : DEFAULT_UCODE_DATASIZE) #define sigmatch(s1, s2, p1, p2) \ - (((s1) == (s2)) && (((p1) & (p2)) || (((p1) == 0) && ((p2) == 0)))) + (((s1) == (s2)) && (((p1) & (p2)) || (((p1) == 0) && ((p2) == 0)))) #define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE) @@ -101,7 +106,8 @@ static int collect_cpu_info(unsigned int cpu_num, struct cpu_signature *csig) if ( (c->x86_vendor != X86_VENDOR_INTEL) || (c->x86 < 6) ) { printk(KERN_ERR "microcode: CPU%d not a capable Intel " - "processor\n", cpu_num); + "processor\n", + cpu_num); return -1; } @@ -127,9 +133,10 @@ static int collect_cpu_info(unsigned int cpu_num, struct cpu_signature *csig) return 0; } -static inline int microcode_update_match( - unsigned int cpu_num, const struct microcode_header_intel *mc_header, - int sig, int pf) +static inline int +microcode_update_match(unsigned int cpu_num, + const struct microcode_header_intel *mc_header, int sig, + int pf) { struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu_num); @@ -151,14 +158,14 @@ static int microcode_sanity_check(void *mc) if ( (data_size + MC_HEADER_SIZE) > total_size ) { printk(KERN_ERR "microcode: error! " - "Bad data size in microcode data file\n"); + "Bad data size in microcode data file\n"); return -EINVAL; } if ( (mc_header->ldrver != 1) || (mc_header->hdrver != 1) ) { printk(KERN_ERR "microcode: error! " - "Unknown microcode update format\n"); + "Unknown microcode update format\n"); return -EINVAL; } ext_table_size = total_size - (MC_HEADER_SIZE + data_size); @@ -168,14 +175,14 @@ static int microcode_sanity_check(void *mc) ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE) ) { printk(KERN_ERR "microcode: error! " - "Small exttable size in microcode data file\n"); + "Small exttable size in microcode data file\n"); return -EINVAL; } ext_header = mc + MC_HEADER_SIZE + data_size; if ( ext_table_size != exttable_size(ext_header) ) { printk(KERN_ERR "microcode: error! " - "Bad exttable size in microcode data file\n"); + "Bad exttable size in microcode data file\n"); return -EFAULT; } ext_sigcount = ext_header->count; @@ -193,7 +200,7 @@ static int microcode_sanity_check(void *mc) if ( ext_table_sum ) { printk(KERN_WARNING "microcode: aborting, " - "bad extended signature table checksum\n"); + "bad extended signature table checksum\n"); return -EINVAL; } } @@ -213,11 +220,9 @@ static int microcode_sanity_check(void *mc) /* check extended signature checksum */ for ( i = 0; i < ext_sigcount; i++ ) { - ext_sig = (void *)ext_header + EXT_HEADER_SIZE + - EXT_SIGNATURE_SIZE * i; - sum = orig_sum - - (mc_header->sig + mc_header->pf + mc_header->cksum) - + (ext_sig->sig + ext_sig->pf + ext_sig->cksum); + ext_sig = (void *)ext_header + EXT_HEADER_SIZE + EXT_SIGNATURE_SIZE * i; + sum = orig_sum - (mc_header->sig + mc_header->pf + mc_header->cksum) + + (ext_sig->sig + ext_sig->pf + ext_sig->cksum); if ( sum ) { printk(KERN_ERR "microcode: aborting, bad checksum\n"); @@ -242,8 +247,7 @@ static int get_matching_microcode(const void *mc, unsigned int cpu) struct extended_signature *ext_sig; void *new_mc; - if ( microcode_update_match(cpu, mc_header, - mc_header->sig, mc_header->pf) ) + if ( microcode_update_match(cpu, mc_header, mc_header->sig, mc_header->pf) ) goto find; if ( total_size <= (get_datasize(mc_header) + MC_HEADER_SIZE) ) @@ -254,13 +258,12 @@ static int get_matching_microcode(const void *mc, unsigned int cpu) ext_sig = (void *)ext_header + EXT_HEADER_SIZE; for ( i = 0; i < ext_sigcount; i++ ) { - if ( microcode_update_match(cpu, mc_header, - ext_sig->sig, ext_sig->pf) ) + if ( microcode_update_match(cpu, mc_header, ext_sig->sig, ext_sig->pf) ) goto find; ext_sig++; } return 0; - find: +find: pr_debug("microcode: CPU%d found a matching microcode update with" " version %#x (current=%#x)\n", cpu, mc_header->rev, uci->cpu_sig.rev); @@ -309,12 +312,12 @@ static int apply_microcode(unsigned int cpu) if ( val[1] != uci->mc.mc_intel->hdr.rev ) { printk(KERN_ERR "microcode: CPU%d update from revision " - "%#x to %#x failed. Resulting revision is %#x.\n", cpu_num, - uci->cpu_sig.rev, uci->mc.mc_intel->hdr.rev, val[1]); + "%#x to %#x failed. Resulting revision is %#x.\n", + cpu_num, uci->cpu_sig.rev, uci->mc.mc_intel->hdr.rev, val[1]); return -EIO; } printk(KERN_INFO "microcode: CPU%d updated from revision " - "%#x to %#x, date = %04x-%02x-%02x \n", + "%#x to %#x, date = %04x-%02x-%02x \n", cpu_num, uci->cpu_sig.rev, val[1], uci->mc.mc_intel->hdr.date & 0xffff, uci->mc.mc_intel->hdr.date >> 24, @@ -352,8 +355,7 @@ static long get_next_ucode_from_buffer(void **mc, const u8 *buf, return offset + total_size; } -static int cpu_request_microcode(unsigned int cpu, const void *buf, - size_t size) +static int cpu_request_microcode(unsigned int cpu, const void *buf, size_t size) { long offset = 0; int error = 0; @@ -399,10 +401,10 @@ static int microcode_resume_match(unsigned int cpu, const void *mc) } static const struct microcode_ops microcode_intel_ops = { - .microcode_resume_match = microcode_resume_match, - .cpu_request_microcode = cpu_request_microcode, - .collect_cpu_info = collect_cpu_info, - .apply_microcode = apply_microcode, + .microcode_resume_match = microcode_resume_match, + .cpu_request_microcode = cpu_request_microcode, + .collect_cpu_info = collect_cpu_info, + .apply_microcode = apply_microcode, }; int __init microcode_init_intel(void) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 3557cd1178..d4d6285387 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -153,8 +153,8 @@ #define virt_to_mfn(v) _mfn(__virt_to_mfn(v)) /* Mapping of the fixmap space needed early. */ -l1_pgentry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE) - l1_fixmap[L1_PAGETABLE_ENTRIES]; +l1_pgentry_t __section(".bss.page_aligned") + __aligned(PAGE_SIZE) l1_fixmap[L1_PAGETABLE_ENTRIES]; paddr_t __read_mostly mem_hotplug; @@ -175,18 +175,17 @@ static uint32_t base_disallow_mask; #define L2_DISALLOW_MASK base_disallow_mask -#define l3_disallow_mask(d) (!is_pv_32bit_domain(d) ? \ - base_disallow_mask : 0xFFFFF198U) +#define l3_disallow_mask(d) \ + (!is_pv_32bit_domain(d) ? base_disallow_mask : 0xFFFFF198U) #define L4_DISALLOW_MASK (base_disallow_mask) -#define l1_disallow_mask(d) \ - ((d != dom_io) && \ - (rangeset_is_empty((d)->iomem_caps) && \ - rangeset_is_empty((d)->arch.ioport_caps) && \ - !has_arch_pdevs(d) && \ - is_pv_domain(d)) ? \ - L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS)) +#define l1_disallow_mask(d) \ + ((d != dom_io) && (rangeset_is_empty((d)->iomem_caps) && \ + rangeset_is_empty((d)->arch.ioport_caps) && \ + !has_arch_pdevs(d) && is_pv_domain(d)) \ + ? L1_DISALLOW_MASK \ + : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS)) static s8 __read_mostly opt_mmio_relax; @@ -216,10 +215,10 @@ static void __init init_frametable_chunk(void *start, void *end) ASSERT(!(s & ((1 << L2_PAGETABLE_SHIFT) - 1))); for ( ; s < e; s += step << PAGE_SHIFT ) { - step = 1UL << (cpu_has_page1gb && - !(s & ((1UL << L3_PAGETABLE_SHIFT) - 1)) ? - L3_PAGETABLE_SHIFT - PAGE_SHIFT : - L2_PAGETABLE_SHIFT - PAGE_SHIFT); + step = + 1UL << (cpu_has_page1gb && !(s & ((1UL << L3_PAGETABLE_SHIFT) - 1)) + ? L3_PAGETABLE_SHIFT - PAGE_SHIFT + : L2_PAGETABLE_SHIFT - PAGE_SHIFT); /* * The hardcoded 4 below is arbitrary - just pick whatever you think * is reasonable to waste as a trade-off for using a large page. @@ -243,7 +242,7 @@ void __init init_frametable(void) BUILD_BUG_ON(XEN_VIRT_END > FRAMETABLE_VIRT_START); BUILD_BUG_ON(FRAMETABLE_VIRT_START & ((1UL << L2_PAGETABLE_SHIFT) - 1)); - for ( sidx = 0; ; sidx = nidx ) + for ( sidx = 0;; sidx = nidx ) { eidx = find_next_zero_bit(pdx_group_valid, max_idx, sidx); nidx = find_next_bit(pdx_group_valid, max_idx, eidx); @@ -254,15 +253,15 @@ void __init init_frametable(void) } end_pg = pdx_to_page(max_pdx - 1) + 1; - top_pg = mem_hotplug ? pdx_to_page(max_idx * PDX_GROUP_COUNT - 1) + 1 - : end_pg; + top_pg = + mem_hotplug ? pdx_to_page(max_idx * PDX_GROUP_COUNT - 1) + 1 : end_pg; init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT), top_pg); memset(end_pg, -1, (unsigned long)top_pg - (unsigned long)end_pg); } #ifndef NDEBUG -static unsigned int __read_mostly root_pgt_pv_xen_slots - = ROOT_PAGETABLE_PV_XEN_SLOTS; +static unsigned int __read_mostly root_pgt_pv_xen_slots = + ROOT_PAGETABLE_PV_XEN_SLOTS; static l4_pgentry_t __read_mostly split_l4e; #else #define root_pgt_pv_xen_slots ROOT_PAGETABLE_PV_XEN_SLOTS @@ -277,8 +276,8 @@ void __init arch_init_memory(void) * PRESENT, R/W, USER, A/D, AVAIL[0,1,2], AVAIL_HIGH, NX (if available). */ base_disallow_mask = - ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | - _PAGE_DIRTY | _PAGE_AVAIL | _PAGE_AVAIL_HIGH | _PAGE_NX); + ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | + _PAGE_AVAIL | _PAGE_AVAIL_HIGH | _PAGE_NX); /* * Initialise our DOMID_XEN domain. @@ -319,8 +318,7 @@ void __init arch_init_memory(void) /* Any areas not specified as RAM by the e820 map are considered I/O. */ for ( i = 0, pfn = 0; pfn < max_page; i++ ) { - while ( (i < e820.nr_map) && - (e820.map[i].type != E820_RAM) && + while ( (i < e820.nr_map) && (e820.map[i].type != E820_RAM) && (e820.map[i].type != E820_UNUSABLE) ) i++; @@ -332,10 +330,10 @@ void __init arch_init_memory(void) else { /* Mark as I/O just up as far as next RAM region. */ - rstart_pfn = min_t(unsigned long, max_page, - PFN_UP(e820.map[i].addr)); - rend_pfn = max_t(unsigned long, rstart_pfn, - PFN_DOWN(e820.map[i].addr + e820.map[i].size)); + rstart_pfn = + min_t(unsigned long, max_page, PFN_UP(e820.map[i].addr)); + rend_pfn = max_t(unsigned long, rstart_pfn, + PFN_DOWN(e820.map[i].addr + e820.map[i].size)); } /* @@ -376,8 +374,8 @@ void __init arch_init_memory(void) if ( split_va < HYPERVISOR_VIRT_END && split_va - 1 == (unsigned long)__va(highmem_start - 1) ) { - root_pgt_pv_xen_slots = l4_table_offset(split_va) - - ROOT_PAGETABLE_FIRST_XEN_SLOT; + root_pgt_pv_xen_slots = + l4_table_offset(split_va) - ROOT_PAGETABLE_FIRST_XEN_SLOT; ASSERT(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS); if ( l4_table_offset(split_va) == l4_table_offset(split_va - 1) ) { @@ -392,8 +390,8 @@ void __init arch_init_memory(void) l3tab[i] = l3idle[i]; for ( ; i < L3_PAGETABLE_ENTRIES; ++i ) l3tab[i] = l3e_empty(); - split_l4e = l4e_from_mfn(virt_to_mfn(l3tab), - __PAGE_HYPERVISOR_RW); + split_l4e = + l4e_from_mfn(virt_to_mfn(l3tab), __PAGE_HYPERVISOR_RW); } else ++root_pgt_pv_xen_slots; @@ -410,7 +408,7 @@ int page_is_ram_type(unsigned long mfn, unsigned long mem_type) for ( i = 0; i < e820.nr_map; i++ ) { - switch ( e820.map[i].type ) + switch (e820.map[i].type) { case E820_RAM: if ( mem_type & RAM_TYPE_CONVENTIONAL ) @@ -458,7 +456,7 @@ unsigned int page_get_ram_type(mfn_t mfn) maddr >= (e820.map[i].addr + e820.map[i].size) ) continue; - switch ( e820.map[i].type ) + switch (e820.map[i].type) { case E820_RAM: type |= RAM_TYPE_CONVENTIONAL; @@ -550,8 +548,8 @@ void write_ptbase(struct vcpu *v) struct cpu_info *cpu_info = get_cpu_info(); unsigned long new_cr4; - new_cr4 = (is_pv_vcpu(v) && !is_idle_vcpu(v)) - ? pv_make_cr4(v) : mmu_cr4_features; + new_cr4 = + (is_pv_vcpu(v) && !is_idle_vcpu(v)) ? pv_make_cr4(v) : mmu_cr4_features; if ( is_pv_vcpu(v) && v->domain->arch.pv.xpti ) { @@ -612,9 +610,8 @@ static inline void set_tlbflush_timestamp(struct page_info *page) page_set_tlbflush_timestamp(page); } -const char __section(".bss.page_aligned.const") __aligned(PAGE_SIZE) - zero_page[PAGE_SIZE]; - +const char __section(".bss.page_aligned.const") + __aligned(PAGE_SIZE) zero_page[PAGE_SIZE]; #ifdef CONFIG_PV static int alloc_segdesc_page(struct page_info *page) @@ -708,87 +705,85 @@ static void dec_linear_uses(struct page_info *pg) static bool __read_mostly opt_pv_linear_pt = true; boolean_param("pv-linear-pt", opt_pv_linear_pt); -#define define_get_linear_pagetable(level) \ -static int \ -get_##level##_linear_pagetable( \ - level##_pgentry_t pde, unsigned long pde_pfn, struct domain *d) \ -{ \ - unsigned long x, y; \ - unsigned long pfn; \ - \ - if ( !opt_pv_linear_pt ) \ - { \ - gdprintk(XENLOG_WARNING, \ - "Attempt to create linear p.t. (feature disabled)\n"); \ - return 0; \ - } \ - \ - if ( (level##e_get_flags(pde) & _PAGE_RW) ) \ - { \ - gdprintk(XENLOG_WARNING, \ - "Attempt to create linear p.t. with write perms\n"); \ - return 0; \ - } \ - \ - if ( (pfn = level##e_get_pfn(pde)) != pde_pfn ) \ - { \ - struct page_info *page, *ptpg = mfn_to_page(_mfn(pde_pfn)); \ - \ - /* Make sure the page table belongs to the correct domain. */ \ - if ( unlikely(page_get_owner(ptpg) != d) ) \ - return 0; \ - \ - /* Make sure the mapped frame belongs to the correct domain. */ \ - page = get_page_from_mfn(_mfn(pfn), d); \ - if ( unlikely(!page) ) \ - return 0; \ - \ - /* \ - * Ensure that the mapped frame is an already-validated page table \ - * and is not itself having linear entries, as well as that the \ - * containing page table is not iself in use as a linear page table \ - * elsewhere. \ - * If so, atomically increment the count (checking for overflow). \ - */ \ - if ( !inc_linear_entries(ptpg) ) \ - { \ - put_page(page); \ - return 0; \ - } \ - if ( !inc_linear_uses(page) ) \ - { \ - dec_linear_entries(ptpg); \ - put_page(page); \ - return 0; \ - } \ - y = page->u.inuse.type_info; \ - do { \ - x = y; \ - if ( unlikely((x & PGT_count_mask) == PGT_count_mask) || \ - unlikely((x & (PGT_type_mask|PGT_validated)) != \ - (PGT_##level##_page_table|PGT_validated)) ) \ - { \ - dec_linear_uses(page); \ - dec_linear_entries(ptpg); \ - put_page(page); \ - return 0; \ - } \ - } \ - while ( (y = cmpxchg(&page->u.inuse.type_info, x, x + 1)) != x ); \ - } \ - \ - return 1; \ -} +#define define_get_linear_pagetable(level) \ + static int get_##level##_linear_pagetable( \ + level##_pgentry_t pde, unsigned long pde_pfn, struct domain *d) \ + { \ + unsigned long x, y; \ + unsigned long pfn; \ + \ + if ( !opt_pv_linear_pt ) \ + { \ + gdprintk(XENLOG_WARNING, \ + "Attempt to create linear p.t. (feature disabled)\n"); \ + return 0; \ + } \ + \ + if ( (level##e_get_flags(pde) & _PAGE_RW) ) \ + { \ + gdprintk(XENLOG_WARNING, \ + "Attempt to create linear p.t. with write perms\n"); \ + return 0; \ + } \ + \ + if ( (pfn = level##e_get_pfn(pde)) != pde_pfn ) \ + { \ + struct page_info *page, *ptpg = mfn_to_page(_mfn(pde_pfn)); \ + \ + /* Make sure the page table belongs to the correct domain. */ \ + if ( unlikely(page_get_owner(ptpg) != d) ) \ + return 0; \ + \ + /* Make sure the mapped frame belongs to the correct domain. */ \ + page = get_page_from_mfn(_mfn(pfn), d); \ + if ( unlikely(!page) ) \ + return 0; \ + \ + /* \ + * Ensure that the mapped frame is an already-validated page table \ + * and is not itself having linear entries, as well as that the \ + * containing page table is not iself in use as a linear page \ + * table elsewhere. If so, atomically increment the count \ + * (checking for overflow). \ + */ \ + if ( !inc_linear_entries(ptpg) ) \ + { \ + put_page(page); \ + return 0; \ + } \ + if ( !inc_linear_uses(page) ) \ + { \ + dec_linear_entries(ptpg); \ + put_page(page); \ + return 0; \ + } \ + y = page->u.inuse.type_info; \ + do { \ + x = y; \ + if ( unlikely((x & PGT_count_mask) == PGT_count_mask) || \ + unlikely((x & (PGT_type_mask | PGT_validated)) != \ + (PGT_##level##_page_table | PGT_validated)) ) \ + { \ + dec_linear_uses(page); \ + dec_linear_entries(ptpg); \ + put_page(page); \ + return 0; \ + } \ + } while ( (y = cmpxchg(&page->u.inuse.type_info, x, x + 1)) != \ + x ); \ + } \ + \ + return 1; \ + } #else /* CONFIG_PV_LINEAR_PT */ #define define_get_linear_pagetable(level) \ -static int \ -get_##level##_linear_pagetable( \ + static int get_##level##_linear_pagetable( \ level##_pgentry_t pde, unsigned long pde_pfn, struct domain *d) \ -{ \ + { \ return 0; \ -} + } static void dec_linear_uses(struct page_info *pg) { @@ -820,7 +815,7 @@ static int update_xen_mappings(unsigned long mfn, unsigned int cacheattr) { int err = 0; bool alias = mfn >= PFN_DOWN(xen_phys_start) && - mfn < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START); + mfn < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START); unsigned long xen_va = XEN_VIRT_START + ((mfn - PFN_DOWN(xen_phys_start)) << PAGE_SHIFT); @@ -828,14 +823,16 @@ static int update_xen_mappings(unsigned long mfn, unsigned int cacheattr) err = map_pages_to_xen(xen_va, _mfn(mfn), 1, 0); if ( !err ) err = map_pages_to_xen((unsigned long)mfn_to_virt(mfn), _mfn(mfn), 1, - PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr)); + PAGE_HYPERVISOR | + cacheattr_to_pte_flags(cacheattr)); if ( unlikely(alias) && !cacheattr && !err ) err = map_pages_to_xen(xen_va, _mfn(mfn), 1, PAGE_HYPERVISOR); return err; } #ifndef NDEBUG -struct mmio_emul_range_ctxt { +struct mmio_emul_range_ctxt +{ const struct domain *d; unsigned long mfn; }; @@ -880,9 +877,8 @@ static int print_mmio_emul_range(unsigned long s, unsigned long e, void *arg) * <0 => error code * >0 => the page flags to be flipped */ -int -get_page_from_l1e( - l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner) +int get_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner, + struct domain *pg_owner) { unsigned long mfn = l1e_get_pfn(l1e); struct page_info *page = mfn_to_page(_mfn(mfn)); @@ -921,9 +917,11 @@ get_page_from_l1e( { if ( mfn != (PADDR_MASK >> PAGE_SHIFT) ) /* INVALID_MFN? */ { - gdprintk(XENLOG_WARNING, - "d%d non-privileged attempt to map MMIO space %"PRI_mfn"\n", - pg_owner->domain_id, mfn); + gdprintk( + XENLOG_WARNING, + "d%d non-privileged attempt to map MMIO space %" PRI_mfn + "\n", + pg_owner->domain_id, mfn); return -EPERM; } return -EINVAL; @@ -935,7 +933,8 @@ get_page_from_l1e( if ( mfn != (PADDR_MASK >> PAGE_SHIFT) ) /* INVALID_MFN? */ { gdprintk(XENLOG_WARNING, - "d%d attempted to map MMIO space %"PRI_mfn" in d%d to d%d\n", + "d%d attempted to map MMIO space %" PRI_mfn + " in d%d to d%d\n", curr->domain->domain_id, mfn, pg_owner->domain_id, l1e_owner->domain_id); return -EPERM; @@ -946,7 +945,7 @@ get_page_from_l1e( if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn) ) { /* MMIO pages must not be mapped cachable unless requested so. */ - switch ( opt_mmio_relax ) + switch (opt_mmio_relax) { case 0: break; @@ -973,21 +972,19 @@ get_page_from_l1e( "d%d: Forcing read-only access to MFN %lx\n", l1e_owner->domain_id, mfn); else - rangeset_report_ranges(mmio_ro_ranges, 0, ~0UL, - print_mmio_emul_range, - &(struct mmio_emul_range_ctxt){ - .d = l1e_owner, - .mfn = mfn }); + rangeset_report_ranges( + mmio_ro_ranges, 0, ~0UL, print_mmio_emul_range, + &(struct mmio_emul_range_ctxt){.d = l1e_owner, .mfn = mfn}); #endif flip = _PAGE_RW; } - switch ( l1f & PAGE_CACHE_ATTRS ) + switch (l1f & PAGE_CACHE_ATTRS) { case 0: /* WB */ flip |= _PAGE_PWT | _PAGE_PCD; break; - case _PAGE_PWT: /* WT */ + case _PAGE_PWT: /* WT */ case _PAGE_PWT | _PAGE_PAT: /* WP */ flip |= _PAGE_PCD | (l1f & _PAGE_PAT); break; @@ -996,8 +993,7 @@ get_page_from_l1e( return flip; } - if ( unlikely( (real_pg_owner != pg_owner) && - (real_pg_owner != dom_cow) ) ) + if ( unlikely((real_pg_owner != pg_owner) && (real_pg_owner != dom_cow)) ) { /* * Let privileged domains transfer the right to map their target @@ -1056,7 +1052,7 @@ get_page_from_l1e( } do { - x = y; + x = y; nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base); } while ( (y = cmpxchg(&page->count_info, x, nx)) != x ); @@ -1065,7 +1061,7 @@ get_page_from_l1e( { cacheattr = y & PGC_cacheattr_mask; do { - x = y; + x = y; nx = (x & ~PGC_cacheattr_mask) | cacheattr; } while ( (y = cmpxchg(&page->count_info, x, nx)) != x ); @@ -1073,36 +1069,37 @@ get_page_from_l1e( put_page_type(page); put_page(page); - gdprintk(XENLOG_WARNING, "Error updating mappings for mfn %" PRI_mfn + gdprintk(XENLOG_WARNING, + "Error updating mappings for mfn %" PRI_mfn " (pfn %" PRI_pfn ", from L1 entry %" PRIpte ") for d%d\n", - mfn, get_gpfn_from_mfn(mfn), - l1e_get_intpte(l1e), l1e_owner->domain_id); + mfn, get_gpfn_from_mfn(mfn), l1e_get_intpte(l1e), + l1e_owner->domain_id); return err; } } return 0; - could_not_pin: - gdprintk(XENLOG_WARNING, "Error getting mfn %" PRI_mfn " (pfn %" PRI_pfn +could_not_pin: + gdprintk(XENLOG_WARNING, + "Error getting mfn %" PRI_mfn " (pfn %" PRI_pfn ") from L1 entry %" PRIpte " for l1e_owner d%d, pg_owner d%d\n", - mfn, get_gpfn_from_mfn(mfn), - l1e_get_intpte(l1e), l1e_owner->domain_id, pg_owner->domain_id); + mfn, get_gpfn_from_mfn(mfn), l1e_get_intpte(l1e), + l1e_owner->domain_id, pg_owner->domain_id); if ( real_pg_owner != NULL ) put_page(page); return -EBUSY; } #ifdef CONFIG_PV -static int get_page_and_type_from_mfn( - mfn_t mfn, unsigned long type, struct domain *d, - int partial, int preemptible) +static int get_page_and_type_from_mfn(mfn_t mfn, unsigned long type, + struct domain *d, int partial, + int preemptible) { struct page_info *page = mfn_to_page(mfn); int rc; - if ( likely(partial >= 0) && - unlikely(!get_page_from_mfn(mfn, d)) ) + if ( likely(partial >= 0) && unlikely(!get_page_from_mfn(mfn, d)) ) return -EINVAL; rc = _get_page_type(page, type, preemptible); @@ -1115,9 +1112,8 @@ static int get_page_and_type_from_mfn( } define_get_linear_pagetable(l2); -static int -get_page_from_l2e( - l2_pgentry_t l2e, unsigned long pfn, struct domain *d, int partial) +static int get_page_from_l2e(l2_pgentry_t l2e, unsigned long pfn, + struct domain *d, int partial) { unsigned long mfn = l2e_get_pfn(l2e); int rc; @@ -1129,8 +1125,8 @@ get_page_from_l2e( return -EINVAL; } - rc = get_page_and_type_from_mfn(_mfn(mfn), PGT_l1_page_table, d, - partial, false); + rc = get_page_and_type_from_mfn(_mfn(mfn), PGT_l1_page_table, d, partial, + false); if ( unlikely(rc == -EINVAL) && get_l2_linear_pagetable(l2e, pfn, d) ) rc = 0; @@ -1138,9 +1134,8 @@ get_page_from_l2e( } define_get_linear_pagetable(l3); -static int -get_page_from_l3e( - l3_pgentry_t l3e, unsigned long pfn, struct domain *d, int partial) +static int get_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn, + struct domain *d, int partial) { int rc; @@ -1151,10 +1146,9 @@ get_page_from_l3e( return -EINVAL; } - rc = get_page_and_type_from_mfn( - l3e_get_mfn(l3e), PGT_l2_page_table, d, partial, 1); - if ( unlikely(rc == -EINVAL) && - !is_pv_32bit_domain(d) && + rc = get_page_and_type_from_mfn(l3e_get_mfn(l3e), PGT_l2_page_table, d, + partial, 1); + if ( unlikely(rc == -EINVAL) && !is_pv_32bit_domain(d) && get_l3_linear_pagetable(l3e, pfn, d) ) rc = 0; @@ -1162,9 +1156,8 @@ get_page_from_l3e( } define_get_linear_pagetable(l4); -static int -get_page_from_l4e( - l4_pgentry_t l4e, unsigned long pfn, struct domain *d, int partial) +static int get_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn, + struct domain *d, int partial) { int rc; @@ -1175,8 +1168,8 @@ get_page_from_l4e( return -EINVAL; } - rc = get_page_and_type_from_mfn( - l4e_get_mfn(l4e), PGT_l3_page_table, d, partial, 1); + rc = get_page_and_type_from_mfn(l4e_get_mfn(l4e), PGT_l3_page_table, d, + partial, 1); if ( unlikely(rc == -EINVAL) && get_l4_linear_pagetable(l4e, pfn, d) ) rc = 0; @@ -1189,9 +1182,9 @@ static int _put_page_type(struct page_info *page, bool preemptible, void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner) { - unsigned long pfn = l1e_get_pfn(l1e); + unsigned long pfn = l1e_get_pfn(l1e); struct page_info *page; - struct domain *pg_owner; + struct domain *pg_owner; if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || is_iomem_page(_mfn(pfn)) ) return; @@ -1244,7 +1237,7 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner) cpumask_clear(mask); - for_each_vcpu ( pg_owner, v ) + for_each_vcpu (pg_owner, v) { unsigned int cpu; @@ -1276,8 +1269,8 @@ static void put_data_page(struct page_info *page, bool writeable) * NB. Virtual address 'l2e' maps to a machine address within frame 'pfn'. * Note also that this automatically deals correctly with linear p.t.'s. */ -static int put_page_from_l2e(l2_pgentry_t l2e, unsigned long pfn, - int partial, bool defer) +static int put_page_from_l2e(l2_pgentry_t l2e, unsigned long pfn, int partial, + bool defer) { int rc = 0; @@ -1321,8 +1314,8 @@ static int put_page_from_l2e(l2_pgentry_t l2e, unsigned long pfn, return rc; } -static int put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn, - int partial, bool defer) +static int put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn, int partial, + bool defer) { struct page_info *pg; int rc; @@ -1365,13 +1358,12 @@ static int put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn, return rc; } -static int put_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn, - int partial, bool defer) +static int put_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn, int partial, + bool defer) { int rc = 1; - if ( (l4e_get_flags(l4e) & _PAGE_PRESENT) && - (l4e_get_pfn(l4e) != pfn) ) + if ( (l4e_get_flags(l4e) & _PAGE_PRESENT) && (l4e_get_pfn(l4e) != pfn) ) { struct page_info *pg = l4e_get_page(l4e); @@ -1399,9 +1391,9 @@ static int put_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn, static int alloc_l1_table(struct page_info *page) { struct domain *d = page_get_owner(page); - l1_pgentry_t *pl1e; - unsigned int i; - int ret = 0; + l1_pgentry_t *pl1e; + unsigned int i; + int ret = 0; pl1e = __map_domain_page(page); @@ -1415,7 +1407,7 @@ static int alloc_l1_table(struct page_info *page) } else { - switch ( ret = get_page_from_l1e(pl1e[i], d, d) ) + switch (ret = get_page_from_l1e(pl1e[i], d, d)) { default: goto fail; @@ -1436,9 +1428,9 @@ static int alloc_l1_table(struct page_info *page) unmap_domain_page(pl1e); return 0; - fail: +fail: gdprintk(XENLOG_WARNING, "Failure in alloc_l1_table: slot %#x\n", i); - out: +out: while ( i-- > 0 ) put_page_from_l1e(pl1e[i], d); @@ -1449,7 +1441,7 @@ static int alloc_l1_table(struct page_info *page) static int create_pae_xen_mappings(struct domain *d, l3_pgentry_t *pl3e) { struct page_info *page; - l3_pgentry_t l3e3; + l3_pgentry_t l3e3; if ( !is_pv_32bit_domain(d) ) return 1; @@ -1489,10 +1481,10 @@ static int create_pae_xen_mappings(struct domain *d, l3_pgentry_t *pl3e) static int alloc_l2_table(struct page_info *page, unsigned long type) { struct domain *d = page_get_owner(page); - unsigned long pfn = mfn_x(page_to_mfn(page)); - l2_pgentry_t *pl2e; - unsigned int i; - int rc = 0, partial = page->partial_pte; + unsigned long pfn = mfn_x(page_to_mfn(page)); + l2_pgentry_t *pl2e; + unsigned int i; + int rc = 0, partial = page->partial_pte; pl2e = map_domain_page(_mfn(pfn)); @@ -1535,7 +1527,8 @@ static int alloc_l2_table(struct page_info *page, unsigned long type) } else if ( rc < 0 && rc != -EINTR ) { - gdprintk(XENLOG_WARNING, "Failure in alloc_l2_table: slot %#x\n", i); + gdprintk(XENLOG_WARNING, "Failure in alloc_l2_table: slot %#x\n", + i); if ( i ) { page->nr_validated_ptes = i; @@ -1560,10 +1553,10 @@ static int alloc_l2_table(struct page_info *page, unsigned long type) static int alloc_l3_table(struct page_info *page) { struct domain *d = page_get_owner(page); - unsigned long pfn = mfn_x(page_to_mfn(page)); - l3_pgentry_t *pl3e; - unsigned int i; - int rc = 0, partial = page->partial_pte; + unsigned long pfn = mfn_x(page_to_mfn(page)); + l3_pgentry_t *pl3e; + unsigned int i; + int rc = 0, partial = page->partial_pte; pl3e = map_domain_page(_mfn(pfn)); @@ -1596,8 +1589,8 @@ static int alloc_l3_table(struct page_info *page) rc = -EINVAL; else rc = get_page_and_type_from_mfn( - l3e_get_mfn(l3e), - PGT_l2_page_table | PGT_pae_xen_l2, d, partial, 1); + l3e_get_mfn(l3e), PGT_l2_page_table | PGT_pae_xen_l2, d, + partial, 1); } else if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) { @@ -1647,10 +1640,10 @@ static int alloc_l3_table(struct page_info *page) void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d) { - memcpy(&l2t[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)], - &compat_idle_pg_table_l2[ - l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)], - COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*l2t)); + memcpy( + &l2t[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)], + &compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)], + COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*l2t)); } #endif /* CONFIG_PV */ @@ -1665,8 +1658,8 @@ void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d) * appropriate slots. Pagetables not shared with guests will gain the * extended directmap. */ -void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn, - const struct domain *d, mfn_t sl4mfn, bool ro_mpt) +void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn, const struct domain *d, + mfn_t sl4mfn, bool ro_mpt) { /* * PV vcpus need a shortened directmap. HVM and Idle vcpus get the full @@ -1690,8 +1683,9 @@ void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn, /* Slot 259: Shadow linear mappings (if applicable) .*/ l4t[l4_table_offset(SH_LINEAR_PT_VIRT_START)] = - mfn_eq(sl4mfn, INVALID_MFN) ? l4e_empty() : - l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR_RW); + mfn_eq(sl4mfn, INVALID_MFN) + ? l4e_empty() + : l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR_RW); /* Slot 260: Per-domain mappings (if applicable). */ l4t[l4_table_offset(PERDOMAIN_VIRT_START)] = @@ -1712,27 +1706,27 @@ void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn, memcpy(&l4t[l4_table_offset(XEN_VIRT_START)], &idle_pg_table[l4_table_offset(XEN_VIRT_START)], (ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots - - l4_table_offset(XEN_VIRT_START)) * sizeof(*l4t)); + l4_table_offset(XEN_VIRT_START)) * + sizeof(*l4t)); next = &l4t[ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots]; if ( l4e_get_intpte(split_l4e) ) *next++ = split_l4e; - memset(next, 0, - _p(&l4t[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next)); + memset(next, 0, _p(&l4t[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next)); } else #endif { - unsigned int slots = (short_directmap - ? ROOT_PAGETABLE_PV_XEN_SLOTS - : ROOT_PAGETABLE_XEN_SLOTS); + unsigned int slots = (short_directmap ? ROOT_PAGETABLE_PV_XEN_SLOTS + : ROOT_PAGETABLE_XEN_SLOTS); memcpy(&l4t[l4_table_offset(XEN_VIRT_START)], &idle_pg_table[l4_table_offset(XEN_VIRT_START)], (ROOT_PAGETABLE_FIRST_XEN_SLOT + slots - - l4_table_offset(XEN_VIRT_START)) * sizeof(*l4t)); + l4_table_offset(XEN_VIRT_START)) * + sizeof(*l4t)); } } @@ -1764,10 +1758,10 @@ void zap_ro_mpt(mfn_t mfn) static int alloc_l4_table(struct page_info *page) { struct domain *d = page_get_owner(page); - unsigned long pfn = mfn_x(page_to_mfn(page)); - l4_pgentry_t *pl4e = map_domain_page(_mfn(pfn)); - unsigned int i; - int rc = 0, partial = page->partial_pte; + unsigned long pfn = mfn_x(page_to_mfn(page)); + l4_pgentry_t *pl4e = map_domain_page(_mfn(pfn)); + unsigned int i; + int rc = 0, partial = page->partial_pte; for ( i = page->nr_validated_ptes; i < L4_PAGETABLE_ENTRIES; i++, partial = 0 ) @@ -1824,8 +1818,8 @@ static int alloc_l4_table(struct page_info *page) if ( !rc ) { - init_xen_l4_slots(pl4e, _mfn(pfn), - d, INVALID_MFN, VM_ASSIST(d, m2p_strict)); + init_xen_l4_slots(pl4e, _mfn(pfn), d, INVALID_MFN, + VM_ASSIST(d, m2p_strict)); atomic_inc(&d->arch.pv.nr_l4_pages); } unmap_domain_page(pl4e); @@ -1837,7 +1831,7 @@ static void free_l1_table(struct page_info *page) { struct domain *d = page_get_owner(page); l1_pgentry_t *pl1e; - unsigned int i; + unsigned int i; pl1e = __map_domain_page(page); @@ -1847,7 +1841,6 @@ static void free_l1_table(struct page_info *page) unmap_domain_page(pl1e); } - static int free_l2_table(struct page_info *page) { struct domain *d = page_get_owner(page); @@ -1858,7 +1851,7 @@ static int free_l2_table(struct page_info *page) pl2e = map_domain_page(_mfn(pfn)); - for ( ; ; ) + for ( ;; ) { if ( is_guest_l2_slot(d, page->u.inuse.type_info, i) ) rc = put_page_from_l2e(pl2e[i], pfn, partial, false); @@ -1905,11 +1898,11 @@ static int free_l3_table(struct page_info *page) unsigned long pfn = mfn_x(page_to_mfn(page)); l3_pgentry_t *pl3e; int rc = 0, partial = page->partial_pte; - unsigned int i = page->nr_validated_ptes - !partial; + unsigned int i = page->nr_validated_ptes - !partial; pl3e = map_domain_page(_mfn(pfn)); - for ( ; ; ) + for ( ;; ) { rc = put_page_from_l3e(pl3e[i], pfn, partial, 0); if ( rc < 0 ) @@ -1951,7 +1944,7 @@ static int free_l4_table(struct page_info *page) unsigned long pfn = mfn_x(page_to_mfn(page)); l4_pgentry_t *pl4e = map_domain_page(_mfn(pfn)); int rc = 0, partial = page->partial_pte; - unsigned int i = page->nr_validated_ptes - !partial; + unsigned int i = page->nr_validated_ptes - !partial; do { if ( is_guest_l4_slot(d, i) ) @@ -2008,11 +2001,13 @@ static int free_l4_table(struct page_info *page) */ static DEFINE_PER_CPU(struct page_info *, current_locked_page); -static inline void current_locked_page_set(struct page_info *page) { +static inline void current_locked_page_set(struct page_info *page) +{ this_cpu(current_locked_page) = page; } -static inline bool current_locked_page_check(struct page_info *page) { +static inline bool current_locked_page_check(struct page_info *page) +{ return this_cpu(current_locked_page) == page; } @@ -2020,7 +2015,8 @@ static inline bool current_locked_page_check(struct page_info *page) { * We need a separate "not-equal" check so the non-debug stubs can * always return true. */ -static inline bool current_locked_page_ne_check(struct page_info *page) { +static inline bool current_locked_page_ne_check(struct page_info *page) +{ return this_cpu(current_locked_page) != page; } #else @@ -2039,8 +2035,7 @@ int page_lock(struct page_info *page) while ( (x = page->u.inuse.type_info) & PGT_locked ) cpu_relax(); nx = x + (1 | PGT_locked); - if ( !(x & PGT_validated) || - !(x & PGT_count_mask) || + if ( !(x & PGT_validated) || !(x & PGT_count_mask) || !(nx & PGT_count_mask) ) return 0; } while ( cmpxchg(&page->u.inuse.type_info, x, nx) != x ); @@ -2099,7 +2094,7 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e, if ( unlikely(l1e_get_flags(nl1e) & l1_disallow_mask(pt_dom)) ) { gdprintk(XENLOG_WARNING, "Bad L1 flags %x\n", - l1e_get_flags(nl1e) & l1_disallow_mask(pt_dom)); + l1e_get_flags(nl1e) & l1_disallow_mask(pt_dom)); return -EINVAL; } @@ -2108,8 +2103,9 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e, paging_mode_translate(pg_dom) ) { p2m_type_t p2mt; - p2m_query_t q = l1e_get_flags(nl1e) & _PAGE_RW ? - P2M_ALLOC | P2M_UNSHARE : P2M_ALLOC; + p2m_query_t q = l1e_get_flags(nl1e) & _PAGE_RW + ? P2M_ALLOC | P2M_UNSHARE + : P2M_ALLOC; page = get_page_from_gfn(pg_dom, l1e_get_pfn(nl1e), &p2mt, q); @@ -2149,7 +2145,7 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e, return rc ? 0 : -EBUSY; } - switch ( rc = get_page_from_l1e(nl1e, pt_dom, pg_dom) ) + switch (rc = get_page_from_l1e(nl1e, pt_dom, pg_dom)) { default: if ( page ) @@ -2186,13 +2182,9 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e, return rc; } - /* Update the L2 entry at pl2e to new value nl2e. pl2e is within frame pfn. */ -static int mod_l2_entry(l2_pgentry_t *pl2e, - l2_pgentry_t nl2e, - unsigned long pfn, - int preserve_ad, - struct vcpu *vcpu) +static int mod_l2_entry(l2_pgentry_t *pl2e, l2_pgentry_t nl2e, + unsigned long pfn, int preserve_ad, struct vcpu *vcpu) { l2_pgentry_t ol2e; struct domain *d = vcpu->domain; @@ -2215,7 +2207,7 @@ static int mod_l2_entry(l2_pgentry_t *pl2e, if ( unlikely(l2e_get_flags(nl2e) & L2_DISALLOW_MASK) ) { gdprintk(XENLOG_WARNING, "Bad L2 flags %x\n", - l2e_get_flags(nl2e) & L2_DISALLOW_MASK); + l2e_get_flags(nl2e) & L2_DISALLOW_MASK); return -EINVAL; } @@ -2232,8 +2224,8 @@ static int mod_l2_entry(l2_pgentry_t *pl2e, return rc; nl2e = adjust_guest_l2e(nl2e, d); - if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, - preserve_ad)) ) + if ( unlikely( + !UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, preserve_ad)) ) { ol2e = nl2e; rc = -EBUSY; @@ -2241,8 +2233,8 @@ static int mod_l2_entry(l2_pgentry_t *pl2e, } else if ( pv_l1tf_check_l2e(d, nl2e) ) return -ERESTART; - else if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, - preserve_ad)) ) + else if ( unlikely( + !UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, preserve_ad)) ) { return -EBUSY; } @@ -2253,11 +2245,8 @@ static int mod_l2_entry(l2_pgentry_t *pl2e, } /* Update the L3 entry at pl3e to new value nl3e. pl3e is within frame pfn. */ -static int mod_l3_entry(l3_pgentry_t *pl3e, - l3_pgentry_t nl3e, - unsigned long pfn, - int preserve_ad, - struct vcpu *vcpu) +static int mod_l3_entry(l3_pgentry_t *pl3e, l3_pgentry_t nl3e, + unsigned long pfn, int preserve_ad, struct vcpu *vcpu) { l3_pgentry_t ol3e; struct domain *d = vcpu->domain; @@ -2278,7 +2267,7 @@ static int mod_l3_entry(l3_pgentry_t *pl3e, if ( unlikely(l3e_get_flags(nl3e) & l3_disallow_mask(d)) ) { gdprintk(XENLOG_WARNING, "Bad L3 flags %x\n", - l3e_get_flags(nl3e) & l3_disallow_mask(d)); + l3e_get_flags(nl3e) & l3_disallow_mask(d)); return -EINVAL; } @@ -2296,8 +2285,8 @@ static int mod_l3_entry(l3_pgentry_t *pl3e, rc = 0; nl3e = adjust_guest_l3e(nl3e, d); - if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, vcpu, - preserve_ad)) ) + if ( unlikely( + !UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, vcpu, preserve_ad)) ) { ol3e = nl3e; rc = -EFAULT; @@ -2305,8 +2294,8 @@ static int mod_l3_entry(l3_pgentry_t *pl3e, } else if ( pv_l1tf_check_l3e(d, nl3e) ) return -ERESTART; - else if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, vcpu, - preserve_ad)) ) + else if ( unlikely( + !UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, vcpu, preserve_ad)) ) { return -EFAULT; } @@ -2320,11 +2309,8 @@ static int mod_l3_entry(l3_pgentry_t *pl3e, } /* Update the L4 entry at pl4e to new value nl4e. pl4e is within frame pfn. */ -static int mod_l4_entry(l4_pgentry_t *pl4e, - l4_pgentry_t nl4e, - unsigned long pfn, - int preserve_ad, - struct vcpu *vcpu) +static int mod_l4_entry(l4_pgentry_t *pl4e, l4_pgentry_t nl4e, + unsigned long pfn, int preserve_ad, struct vcpu *vcpu) { struct domain *d = vcpu->domain; l4_pgentry_t ol4e; @@ -2345,7 +2331,7 @@ static int mod_l4_entry(l4_pgentry_t *pl4e, if ( unlikely(l4e_get_flags(nl4e) & L4_DISALLOW_MASK) ) { gdprintk(XENLOG_WARNING, "Bad L4 flags %x\n", - l4e_get_flags(nl4e) & L4_DISALLOW_MASK); + l4e_get_flags(nl4e) & L4_DISALLOW_MASK); return -EINVAL; } @@ -2363,8 +2349,8 @@ static int mod_l4_entry(l4_pgentry_t *pl4e, rc = 0; nl4e = adjust_guest_l4e(nl4e, d); - if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu, - preserve_ad)) ) + if ( unlikely( + !UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu, preserve_ad)) ) { ol4e = nl4e; rc = -EFAULT; @@ -2372,8 +2358,8 @@ static int mod_l4_entry(l4_pgentry_t *pl4e, } else if ( pv_l1tf_check_l4e(d, nl4e) ) return -ERESTART; - else if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu, - preserve_ad)) ) + else if ( unlikely( + !UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu, preserve_ad)) ) { return -EFAULT; } @@ -2464,22 +2450,20 @@ void put_page(struct page_info *page) do { ASSERT((y & PGC_count_mask) != 0); - x = y; + x = y; nx = x - 1; - } - while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) ); + } while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) ); if ( unlikely((nx & PGC_count_mask) == 0) ) { if ( !cleanup_page_mappings(page) ) free_domheap_page(page); else - gdprintk(XENLOG_WARNING, - "Leaking mfn %" PRI_mfn "\n", mfn_x(page_to_mfn(page))); + gdprintk(XENLOG_WARNING, "Leaking mfn %" PRI_mfn "\n", + mfn_x(page_to_mfn(page))); } } - struct domain *page_get_owner_and_reference(struct page_info *page) { unsigned long x, y = page->count_info; @@ -2494,8 +2478,7 @@ struct domain *page_get_owner_and_reference(struct page_info *page) */ if ( unlikely(((x + 2) & PGC_count_mask) <= 2) ) return NULL; - } - while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x ); + } while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x ); owner = page_get_owner(page); ASSERT(owner); @@ -2503,7 +2486,6 @@ struct domain *page_get_owner_and_reference(struct page_info *page) return owner; } - int get_page(struct page_info *page, struct domain *domain) { struct domain *owner = page_get_owner_and_reference(page); @@ -2513,7 +2495,8 @@ int get_page(struct page_info *page, struct domain *domain) if ( !paging_mode_refcounts(domain) && !domain->is_dying ) gprintk(XENLOG_INFO, - "Error mfn %"PRI_mfn": rd=%d od=%d caf=%08lx taf=%" PRtype_info "\n", + "Error mfn %" PRI_mfn + ": rd=%d od=%d caf=%08lx taf=%" PRtype_info "\n", mfn_x(page_to_mfn(page)), domain->domain_id, owner ? owner->domain_id : DOMID_INVALID, page->count_info - !!owner, page->u.inuse.type_info); @@ -2537,13 +2520,12 @@ static void get_page_light(struct page_info *page) unsigned long x, nx, y = page->count_info; do { - x = y; + x = y; nx = x + 1; - BUG_ON(!(x & PGC_count_mask)); /* Not allocated? */ + BUG_ON(!(x & PGC_count_mask)); /* Not allocated? */ BUG_ON(!(nx & PGC_count_mask)); /* Overflow? */ y = cmpxchg(&page->count_info, x, nx); - } - while ( unlikely(y != x) ); + } while ( unlikely(y != x) ); } static int alloc_page_type(struct page_info *page, unsigned long type, @@ -2557,7 +2539,7 @@ static int alloc_page_type(struct page_info *page, unsigned long type, if ( likely(owner != NULL) ) paging_mark_dirty(owner, page_to_mfn(page)); - switch ( type & PGT_type_mask ) + switch (type & PGT_type_mask) { case PGT_l1_page_table: rc = alloc_l1_table(page); @@ -2579,39 +2561,39 @@ static int alloc_page_type(struct page_info *page, unsigned long type, break; default: printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%lx\n", - type, page->u.inuse.type_info, - page->count_info); + type, page->u.inuse.type_info, page->count_info); rc = -EINVAL; BUG(); } /* No need for atomic update of type_info here: noone else updates it. */ smp_wmb(); - switch ( rc ) + switch (rc) { case 0: page->u.inuse.type_info |= PGT_validated; break; case -EINTR: ASSERT((page->u.inuse.type_info & - (PGT_count_mask|PGT_validated|PGT_partial)) == 1); + (PGT_count_mask | PGT_validated | PGT_partial)) == 1); page->u.inuse.type_info &= ~PGT_count_mask; break; default: ASSERT(rc < 0); - gdprintk(XENLOG_WARNING, "Error while validating mfn %" PRI_mfn - " (pfn %" PRI_pfn ") for type %" PRtype_info - ": caf=%08lx taf=%" PRtype_info "\n", + gdprintk(XENLOG_WARNING, + "Error while validating mfn %" PRI_mfn " (pfn %" PRI_pfn + ") for type %" PRtype_info ": caf=%08lx taf=%" PRtype_info + "\n", mfn_x(page_to_mfn(page)), - get_gpfn_from_mfn(mfn_x(page_to_mfn(page))), - type, page->count_info, page->u.inuse.type_info); + get_gpfn_from_mfn(mfn_x(page_to_mfn(page))), type, + page->count_info, page->u.inuse.type_info); if ( page != current->arch.old_guest_table ) page->u.inuse.type_info = 0; else { ASSERT((page->u.inuse.type_info & (PGT_count_mask | PGT_validated)) == 1); - case -ERESTART: + case -ERESTART: get_page_light(page); page->u.inuse.type_info |= PGT_partial; } @@ -2625,9 +2607,7 @@ static int alloc_page_type(struct page_info *page, unsigned long type, #endif } - -int free_page_type(struct page_info *page, unsigned long type, - int preemptible) +int free_page_type(struct page_info *page, unsigned long type, int preemptible) { #ifdef CONFIG_PV struct domain *owner = page_get_owner(page); @@ -2652,7 +2632,7 @@ int free_page_type(struct page_info *page, unsigned long type, page->partial_pte = 0; } - switch ( type & PGT_type_mask ) + switch (type & PGT_type_mask) { case PGT_l1_page_table: free_l1_table(page); @@ -2684,7 +2664,6 @@ int free_page_type(struct page_info *page, unsigned long type, #endif } - static int _put_final_page_type(struct page_info *page, unsigned long type, bool preemptible, struct page_info *ptpg) { @@ -2706,7 +2685,7 @@ static int _put_final_page_type(struct page_info *page, unsigned long type, else if ( rc == -EINTR ) { ASSERT((page->u.inuse.type_info & - (PGT_count_mask|PGT_validated|PGT_partial)) == 1); + (PGT_count_mask | PGT_validated | PGT_partial)) == 1); smp_wmb(); page->u.inuse.type_info |= PGT_validated; } @@ -2721,7 +2700,6 @@ static int _put_final_page_type(struct page_info *page, unsigned long type, return rc; } - static int _put_page_type(struct page_info *page, bool preemptible, struct page_info *ptpg) { @@ -2729,18 +2707,18 @@ static int _put_page_type(struct page_info *page, bool preemptible, ASSERT(current_locked_page_ne_check(page)); - for ( ; ; ) + for ( ;; ) { - x = y; + x = y; nx = x - 1; ASSERT((x & PGT_count_mask) != 0); - switch ( nx & (PGT_locked | PGT_count_mask) ) + switch (nx & (PGT_locked | PGT_count_mask)) { case 0: if ( unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) && - likely(nx & (PGT_validated|PGT_partial)) ) + likely(nx & (PGT_validated | PGT_partial)) ) { int rc; @@ -2749,9 +2727,9 @@ static int _put_page_type(struct page_info *page, bool preemptible, * 'free' is safe because the refcnt is non-zero and validated * bit is clear => other ops will spin or fail. */ - nx = x & ~(PGT_validated|PGT_partial); - if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, - x, nx)) != x) ) + nx = x & ~(PGT_validated | PGT_partial); + if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != + x) ) break; /* We cleared the 'valid bit' so we do the clean up. */ rc = _put_final_page_type(page, x, preemptible, ptpg); @@ -2807,7 +2785,6 @@ static int _put_page_type(struct page_info *page, bool preemptible, } } - static int _get_page_type(struct page_info *page, unsigned long type, bool preemptible) { @@ -2817,14 +2794,14 @@ static int _get_page_type(struct page_info *page, unsigned long type, ASSERT(!(type & ~(PGT_type_mask | PGT_pae_xen_l2))); ASSERT(!in_irq()); - for ( ; ; ) + for ( ;; ) { - x = y; + x = y; nx = x + 1; if ( unlikely((nx & PGT_count_mask) == 0) ) { gdprintk(XENLOG_WARNING, - "Type count overflow on mfn %"PRI_mfn"\n", + "Type count overflow on mfn %" PRI_mfn "\n", mfn_x(page_to_mfn(page))); return -EINVAL; } @@ -2833,7 +2810,7 @@ static int _get_page_type(struct page_info *page, unsigned long type, struct domain *d = page_get_owner(page); if ( d && shadow_mode_enabled(d) ) - shadow_prepare_page_type_change(d, page, type); + shadow_prepare_page_type_change(d, page, type); ASSERT(!(x & PGT_pae_xen_l2)); if ( (x & PGT_type_mask) != type ) @@ -2872,7 +2849,7 @@ static int _get_page_type(struct page_info *page, unsigned long type, nx |= PGT_validated; } } - else if ( unlikely((x & (PGT_type_mask|PGT_pae_xen_l2)) != type) ) + else if ( unlikely((x & (PGT_type_mask | PGT_pae_xen_l2)) != type) ) { /* Don't log failure if it could be a recursive-mapping attempt. */ if ( ((x & PGT_type_mask) == PGT_l2_page_table) && @@ -2924,13 +2901,12 @@ static int _get_page_type(struct page_info *page, unsigned long type, mfn_t mfn = page_to_mfn(page); if ( (x & PGT_type_mask) == PGT_writable_page ) - iommu_ret = iommu_legacy_unmap(d, _dfn(mfn_x(mfn)), - PAGE_ORDER_4K); + iommu_ret = + iommu_legacy_unmap(d, _dfn(mfn_x(mfn)), PAGE_ORDER_4K); else if ( type == PGT_writable_page ) - iommu_ret = iommu_legacy_map(d, _dfn(mfn_x(mfn)), mfn, - PAGE_ORDER_4K, - IOMMUF_readable | - IOMMUF_writable); + iommu_ret = + iommu_legacy_map(d, _dfn(mfn_x(mfn)), mfn, PAGE_ORDER_4K, + IOMMUF_readable | IOMMUF_writable); if ( unlikely(iommu_ret) ) { @@ -2952,7 +2928,7 @@ static int _get_page_type(struct page_info *page, unsigned long type, rc = alloc_page_type(page, type, preemptible); } - out: +out: if ( (x & PGT_partial) && !(nx & PGT_partial) ) put_page(page); @@ -2995,8 +2971,8 @@ int put_old_guest_table(struct vcpu *v) if ( !v->arch.old_guest_table ) return 0; - switch ( rc = _put_page_type(v->arch.old_guest_table, true, - v->arch.old_guest_ptpg) ) + switch (rc = _put_page_type(v->arch.old_guest_table, true, + v->arch.old_guest_ptpg)) { case -EINTR: case -ERESTART: @@ -3082,12 +3058,11 @@ int new_guest_cr3(mfn_t mfn) l4_pgentry_t *pl4e = map_domain_page(gt_mfn); rc = mod_l4_entry(pl4e, - l4e_from_mfn(mfn, - (_PAGE_PRESENT | _PAGE_RW | - _PAGE_USER | _PAGE_ACCESSED)), + l4e_from_mfn(mfn, (_PAGE_PRESENT | _PAGE_RW | + _PAGE_USER | _PAGE_ACCESSED)), mfn_x(gt_mfn), 0, curr); unmap_domain_page(pl4e); - switch ( rc ) + switch (rc) { case 0: break; @@ -3123,7 +3098,7 @@ int new_guest_cr3(mfn_t mfn) } rc = get_page_and_type_from_mfn(mfn, PGT_root_page_table, d, 0, 1); - switch ( rc ) + switch (rc) { case 0: break; @@ -3153,7 +3128,7 @@ int new_guest_cr3(mfn_t mfn) if ( paging_mode_refcounts(d) ) put_page(page); else - switch ( rc = put_page_and_type_preemptible(page) ) + switch (rc = put_page_and_type_preemptible(page)) { case -EINTR: rc = -ERESTART; @@ -3176,8 +3151,9 @@ int new_guest_cr3(mfn_t mfn) } #ifdef CONFIG_PV -static int vcpumask_to_pcpumask( - struct domain *d, XEN_GUEST_HANDLE_PARAM(const_void) bmap, cpumask_t *pmask) +static int vcpumask_to_pcpumask(struct domain *d, + XEN_GUEST_HANDLE_PARAM(const_void) bmap, + cpumask_t *pmask) { unsigned int vcpu_id, vcpu_bias, offs; unsigned long vmask; @@ -3185,16 +3161,15 @@ static int vcpumask_to_pcpumask( bool is_native = !is_pv_32bit_domain(d); cpumask_clear(pmask); - for ( vmask = 0, offs = 0; ; ++offs ) + for ( vmask = 0, offs = 0;; ++offs ) { vcpu_bias = offs * (is_native ? BITS_PER_LONG : 32); if ( vcpu_bias >= d->max_vcpus ) return 0; - if ( unlikely(is_native ? - copy_from_guest_offset(&vmask, bmap, offs, 1) : - copy_from_guest_offset((unsigned int *)&vmask, bmap, - offs, 1)) ) + if ( unlikely(is_native ? copy_from_guest_offset(&vmask, bmap, offs, 1) + : copy_from_guest_offset((unsigned int *)&vmask, + bmap, offs, 1)) ) { cpumask_clear(pmask); return -EFAULT; @@ -3218,11 +3193,8 @@ static int vcpumask_to_pcpumask( } } -long do_mmuext_op( - XEN_GUEST_HANDLE_PARAM(mmuext_op_t) uops, - unsigned int count, - XEN_GUEST_HANDLE_PARAM(uint) pdone, - unsigned int foreigndom) +long do_mmuext_op(XEN_GUEST_HANDLE_PARAM(mmuext_op_t) uops, unsigned int count, + XEN_GUEST_HANDLE_PARAM(uint) pdone, unsigned int foreigndom) { struct mmuext_op op; unsigned long type; @@ -3235,9 +3207,8 @@ long do_mmuext_op( if ( unlikely(rc) ) { if ( likely(rc == -ERESTART) ) - rc = hypercall_create_continuation( - __HYPERVISOR_mmuext_op, "hihi", uops, count, pdone, - foreigndom); + rc = hypercall_create_continuation(__HYPERVISOR_mmuext_op, "hihi", + uops, count, pdone, foreigndom); return rc; } @@ -3295,7 +3266,7 @@ long do_mmuext_op( if ( is_hvm_domain(currd) ) { - switch ( op.cmd ) + switch (op.cmd) { case MMUEXT_PIN_L1_TABLE: case MMUEXT_PIN_L2_TABLE: @@ -3311,7 +3282,7 @@ long do_mmuext_op( rc = 0; - switch ( op.cmd ) + switch (op.cmd) { struct page_info *page; p2m_type_t p2mt; @@ -3355,8 +3326,8 @@ long do_mmuext_op( rc = -ERESTART; else if ( rc != -ERESTART ) gdprintk(XENLOG_WARNING, - "Error %d while pinning mfn %" PRI_mfn "\n", - rc, mfn_x(page_to_mfn(page))); + "Error %d while pinning mfn %" PRI_mfn "\n", rc, + mfn_x(page_to_mfn(page))); if ( page != curr->arch.old_guest_table ) put_page(page); break; @@ -3366,8 +3337,7 @@ long do_mmuext_op( if ( !rc && unlikely(test_and_set_bit(_PGT_pinned, &page->u.inuse.type_info)) ) { - gdprintk(XENLOG_WARNING, - "mfn %" PRI_mfn " already pinned\n", + gdprintk(XENLOG_WARNING, "mfn %" PRI_mfn " already pinned\n", mfn_x(page_to_mfn(page))); rc = -EINVAL; } @@ -3384,13 +3354,13 @@ long do_mmuext_op( bool drop_ref; spin_lock(&pg_owner->page_alloc_lock); - drop_ref = (pg_owner->is_dying && - test_and_clear_bit(_PGT_pinned, - &page->u.inuse.type_info)); + drop_ref = + (pg_owner->is_dying && + test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info)); spin_unlock(&pg_owner->page_alloc_lock); if ( drop_ref ) { - pin_drop: + pin_drop: if ( type == PGT_l1_page_table ) put_page_and_type(page); else @@ -3419,13 +3389,13 @@ long do_mmuext_op( if ( !test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) ) { put_page(page); - gdprintk(XENLOG_WARNING, - "mfn %" PRI_mfn " not pinned\n", op.arg1.mfn); + gdprintk(XENLOG_WARNING, "mfn %" PRI_mfn " not pinned\n", + op.arg1.mfn); rc = -EINVAL; break; } - switch ( rc = put_page_and_type_preemptible(page) ) + switch (rc = put_page_and_type_preemptible(page)) { case -EINTR: case -ERESTART: @@ -3452,7 +3422,8 @@ long do_mmuext_op( rc = new_guest_cr3(_mfn(op.arg1.mfn)); break; - case MMUEXT_NEW_USER_BASEPTR: { + case MMUEXT_NEW_USER_BASEPTR: + { unsigned long old_mfn; if ( unlikely(currd != pg_owner) ) @@ -3496,7 +3467,7 @@ long do_mmuext_op( { page = mfn_to_page(_mfn(old_mfn)); - switch ( rc = put_page_and_type_preemptible(page) ) + switch (rc = put_page_and_type_preemptible(page)) { case -EINTR: rc = -ERESTART; @@ -3535,10 +3506,10 @@ long do_mmuext_op( if ( unlikely(currd != pg_owner) ) rc = -EPERM; - else if ( unlikely(vcpumask_to_pcpumask(currd, - guest_handle_to_param(op.arg2.vcpumask, - const_void), - mask)) ) + else if ( unlikely(vcpumask_to_pcpumask( + currd, + guest_handle_to_param(op.arg2.vcpumask, const_void), + mask)) ) rc = -EINVAL; if ( unlikely(rc) ) break; @@ -3582,7 +3553,7 @@ long do_mmuext_op( cpumask_t *mask = this_cpu(scratch_cpumask); cpumask_clear(mask); - for_each_online_cpu(cpu) + for_each_online_cpu (cpu) if ( !cpumask_intersects(mask, per_cpu(cpu_sibling_mask, cpu)) ) __cpumask_set_cpu(cpu, mask); @@ -3632,8 +3603,8 @@ long do_mmuext_op( { if ( page ) put_page(page); - gdprintk(XENLOG_WARNING, - "Error clearing mfn %" PRI_mfn "\n", op.arg1.mfn); + gdprintk(XENLOG_WARNING, "Error clearing mfn %" PRI_mfn "\n", + op.arg1.mfn); rc = -EINVAL; break; } @@ -3650,8 +3621,8 @@ long do_mmuext_op( { struct page_info *src_page, *dst_page; - src_page = get_page_from_gfn(pg_owner, op.arg2.src_mfn, &p2mt, - P2M_ALLOC); + src_page = + get_page_from_gfn(pg_owner, op.arg2.src_mfn, &p2mt, P2M_ALLOC); if ( unlikely(p2mt != p2m_ram_rw) && src_page ) { put_page(src_page); @@ -3666,22 +3637,23 @@ long do_mmuext_op( break; } - dst_page = get_page_from_gfn(pg_owner, op.arg1.mfn, &p2mt, - P2M_ALLOC); + dst_page = + get_page_from_gfn(pg_owner, op.arg1.mfn, &p2mt, P2M_ALLOC); if ( unlikely(p2mt != p2m_ram_rw) && dst_page ) { put_page(dst_page); dst_page = NULL; } - rc = (dst_page && - get_page_type(dst_page, PGT_writable_page)) ? 0 : -EINVAL; + rc = (dst_page && get_page_type(dst_page, PGT_writable_page)) + ? 0 + : -EINVAL; if ( unlikely(rc) ) { put_page(src_page); if ( dst_page ) put_page(dst_page); - gdprintk(XENLOG_WARNING, - "Error copying to mfn %" PRI_mfn "\n", op.arg1.mfn); + gdprintk(XENLOG_WARNING, "Error copying to mfn %" PRI_mfn "\n", + op.arg1.mfn); break; } @@ -3705,7 +3677,7 @@ long do_mmuext_op( break; } - done: + done: if ( unlikely(rc) ) break; @@ -3713,9 +3685,9 @@ long do_mmuext_op( } if ( rc == -ERESTART ) - rc = hypercall_create_continuation( - __HYPERVISOR_mmuext_op, "hihi", - uops, (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom); + rc = hypercall_create_continuation(__HYPERVISOR_mmuext_op, "hihi", uops, + (count - i) | MMU_UPDATE_PREEMPTED, + pdone, foreigndom); else if ( curr->arch.old_guest_table ) { XEN_GUEST_HANDLE_PARAM(void) null; @@ -3727,9 +3699,8 @@ long do_mmuext_op( * our continuation, we pass this in place of "foreigndom", building * on the fact that this argument isn't needed anymore. */ - rc = hypercall_create_continuation( - __HYPERVISOR_mmuext_op, "hihi", null, - MMU_UPDATE_PREEMPTED, null, rc); + rc = hypercall_create_continuation(__HYPERVISOR_mmuext_op, "hihi", null, + MMU_UPDATE_PREEMPTED, null, rc); } put_pg_owner(pg_owner); @@ -3746,11 +3717,9 @@ long do_mmuext_op( return rc; } -long do_mmu_update( - XEN_GUEST_HANDLE_PARAM(mmu_update_t) ureqs, - unsigned int count, - XEN_GUEST_HANDLE_PARAM(uint) pdone, - unsigned int foreigndom) +long do_mmu_update(XEN_GUEST_HANDLE_PARAM(mmu_update_t) ureqs, + unsigned int count, XEN_GUEST_HANDLE_PARAM(uint) pdone, + unsigned int foreigndom) { struct mmu_update req; void *va = NULL; @@ -3768,9 +3737,8 @@ long do_mmu_update( if ( unlikely(rc) ) { if ( likely(rc == -ERESTART) ) - rc = hypercall_create_continuation( - __HYPERVISOR_mmu_update, "hihi", ureqs, count, pdone, - foreigndom); + rc = hypercall_create_continuation(__HYPERVISOR_mmu_update, "hihi", + ureqs, count, pdone, foreigndom); return rc; } @@ -3831,9 +3799,9 @@ long do_mmu_update( break; } - cmd = req.ptr & (sizeof(l1_pgentry_t)-1); + cmd = req.ptr & (sizeof(l1_pgentry_t) - 1); - switch ( cmd ) + switch (cmd) { /* * MMU_NORMAL_PT_UPDATE: Normal update to any level of page table. @@ -3859,7 +3827,8 @@ long do_mmu_update( } if ( xsm_needed != xsm_checked ) { - rc = xsm_mmu_update(XSM_TARGET, d, pt_owner, pg_owner, xsm_needed); + rc = xsm_mmu_update(XSM_TARGET, d, pt_owner, pg_owner, + xsm_needed); if ( rc ) break; xsm_checked = xsm_needed; @@ -3898,11 +3867,11 @@ long do_mmu_update( if ( page_lock(page) ) { - switch ( page->u.inuse.type_info & PGT_type_mask ) + switch (page->u.inuse.type_info & PGT_type_mask) { case PGT_l1_page_table: - rc = mod_l1_entry(va, l1e_from_intpte(req.val), mfn, - cmd, v, pg_owner); + rc = mod_l1_entry(va, l1e_from_intpte(req.val), mfn, cmd, v, + pg_owner); break; case PGT_l2_page_table: @@ -3942,7 +3911,8 @@ long do_mmu_update( if ( (page->u.inuse.type_info & PGT_count_mask) > (1 + !!(page->u.inuse.type_info & PGT_pinned) + (pagetable_get_pfn(curr->arch.guest_table_user) == - mfn) + local_in_use) ) + mfn) + + local_in_use) ) sync_guest = true; } break; @@ -4022,8 +3992,8 @@ long do_mmu_update( if ( rc == -ERESTART ) rc = hypercall_create_continuation( - __HYPERVISOR_mmu_update, "hihi", - ureqs, (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom); + __HYPERVISOR_mmu_update, "hihi", ureqs, + (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom); else if ( curr->arch.old_guest_table ) { XEN_GUEST_HANDLE_PARAM(void) null; @@ -4035,9 +4005,9 @@ long do_mmu_update( * our continuation, we pass this in place of "foreigndom", building * on the fact that this argument isn't needed anymore. */ - rc = hypercall_create_continuation( - __HYPERVISOR_mmu_update, "hihi", null, - MMU_UPDATE_PREEMPTED, null, rc); + rc = + hypercall_create_continuation(__HYPERVISOR_mmu_update, "hihi", null, + MMU_UPDATE_PREEMPTED, null, rc); } put_pg_owner(pg_owner); @@ -4061,7 +4031,7 @@ long do_mmu_update( perfc_add(num_page_updates, i); - out: +out: if ( pt_owner != d ) rcu_unlock_domain(pt_owner); @@ -4076,8 +4046,7 @@ long do_mmu_update( } #endif /* CONFIG_PV */ -int donate_page( - struct domain *d, struct page_info *page, unsigned int memflags) +int donate_page(struct domain *d, struct page_info *page, unsigned int memflags) { const struct domain *owner = dom_xen; @@ -4102,18 +4071,19 @@ int donate_page( page->count_info = PGC_allocated | 1; page_set_owner(page, d); - page_list_add_tail(page,&d->page_list); + page_list_add_tail(page, &d->page_list); spin_unlock(&d->page_alloc_lock); return 0; - fail: +fail: spin_unlock(&d->page_alloc_lock); - gdprintk(XENLOG_WARNING, "Bad donate mfn %" PRI_mfn + gdprintk(XENLOG_WARNING, + "Bad donate mfn %" PRI_mfn " to d%d (owner d%d) caf=%08lx taf=%" PRtype_info "\n", mfn_x(page_to_mfn(page)), d->domain_id, - owner ? owner->domain_id : DOMID_INVALID, - page->count_info, page->u.inuse.type_info); + owner ? owner->domain_id : DOMID_INVALID, page->count_info, + page->u.inuse.type_info); return -EINVAL; } @@ -4134,8 +4104,7 @@ int donate_page( * The caller should either call free_domheap_page() to free the * page, or assign_pages() to put it back on some domain's page list. */ -int steal_page( - struct domain *d, struct page_info *page, unsigned int memflags) +int steal_page(struct domain *d, struct page_info *page, unsigned int memflags) { unsigned long x, y; bool drop_dom_ref = false; @@ -4162,9 +4131,10 @@ int steal_page( y = page->count_info; do { x = y; - if ( (x & (PGC_count_mask|PGC_allocated)) != (2 | PGC_allocated) ) + if ( (x & (PGC_count_mask | PGC_allocated)) != (2 | PGC_allocated) ) goto fail_put; - y = cmpxchg(&page->count_info, x, x & ~(PGC_count_mask|PGC_allocated)); + y = cmpxchg(&page->count_info, x, + x & ~(PGC_count_mask | PGC_allocated)); } while ( y != x ); /* @@ -4192,8 +4162,8 @@ int steal_page( */ spin_lock(&d->page_alloc_lock); - BUG_ON(page->u.inuse.type_info & (PGT_count_mask | PGT_locked | - PGT_pinned)); + BUG_ON(page->u.inuse.type_info & + (PGT_count_mask | PGT_locked | PGT_pinned)); page->u.inuse.type_info = 0; page_set_owner(page, NULL); page_list_del(page, &d->page_list); @@ -4209,30 +4179,31 @@ int steal_page( return 0; - fail_put: +fail_put: put_page(page); - fail: - gdprintk(XENLOG_WARNING, "Bad steal mfn %" PRI_mfn +fail: + gdprintk(XENLOG_WARNING, + "Bad steal mfn %" PRI_mfn " from d%d (owner d%d) caf=%08lx taf=%" PRtype_info "\n", mfn_x(page_to_mfn(page)), d->domain_id, - owner ? owner->domain_id : DOMID_INVALID, - page->count_info, page->u.inuse.type_info); + owner ? owner->domain_id : DOMID_INVALID, page->count_info, + page->u.inuse.type_info); return rc; } #ifdef CONFIG_PV -static int __do_update_va_mapping( - unsigned long va, u64 val64, unsigned long flags, struct domain *pg_owner) +static int __do_update_va_mapping(unsigned long va, u64 val64, + unsigned long flags, struct domain *pg_owner) { - l1_pgentry_t val = l1e_from_intpte(val64); - struct vcpu *v = current; - struct domain *d = v->domain; + l1_pgentry_t val = l1e_from_intpte(val64); + struct vcpu *v = current; + struct domain *d = v->domain; struct page_info *gl1pg; - l1_pgentry_t *pl1e; - unsigned long bmap_ptr; - mfn_t gl1mfn; - cpumask_t *mask = NULL; - int rc; + l1_pgentry_t *pl1e; + unsigned long bmap_ptr; + mfn_t gl1mfn; + cpumask_t *mask = NULL; + int rc; perfc_incr(calls_to_update_va); @@ -4265,7 +4236,7 @@ static int __do_update_va_mapping( page_unlock(gl1pg); put_page(gl1pg); - out: +out: if ( pl1e ) unmap_domain_page(pl1e); @@ -4277,10 +4248,10 @@ static int __do_update_va_mapping( if ( rc ) return rc; - switch ( flags & UVMF_FLUSHTYPE_MASK ) + switch (flags & UVMF_FLUSHTYPE_MASK) { case UVMF_TLB_FLUSH: - switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) ) + switch ((bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK)) { case UVMF_LOCAL: flush_tlb_local(); @@ -4290,9 +4261,8 @@ static int __do_update_va_mapping( break; default: mask = this_cpu(scratch_cpumask); - rc = vcpumask_to_pcpumask(d, const_guest_handle_from_ptr(bmap_ptr, - void), - mask); + rc = vcpumask_to_pcpumask( + d, const_guest_handle_from_ptr(bmap_ptr, void), mask); break; } if ( mask ) @@ -4300,7 +4270,7 @@ static int __do_update_va_mapping( break; case UVMF_INVLPG: - switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) ) + switch ((bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK)) { case UVMF_LOCAL: paging_invlpg(v, va); @@ -4310,9 +4280,8 @@ static int __do_update_va_mapping( break; default: mask = this_cpu(scratch_cpumask); - rc = vcpumask_to_pcpumask(d, const_guest_handle_from_ptr(bmap_ptr, - void), - mask); + rc = vcpumask_to_pcpumask( + d, const_guest_handle_from_ptr(bmap_ptr, void), mask); break; } if ( mask ) @@ -4323,21 +4292,19 @@ static int __do_update_va_mapping( return rc; } -long do_update_va_mapping(unsigned long va, u64 val64, - unsigned long flags) +long do_update_va_mapping(unsigned long va, u64 val64, unsigned long flags) { int rc = __do_update_va_mapping(va, val64, flags, current->domain); if ( rc == -ERESTART ) - rc = hypercall_create_continuation( - __HYPERVISOR_update_va_mapping, "lll", va, val64, flags); + rc = hypercall_create_continuation(__HYPERVISOR_update_va_mapping, + "lll", va, val64, flags); return rc; } long do_update_va_mapping_otherdomain(unsigned long va, u64 val64, - unsigned long flags, - domid_t domid) + unsigned long flags, domid_t domid) { struct domain *pg_owner; int rc; @@ -4351,8 +4318,8 @@ long do_update_va_mapping_otherdomain(unsigned long va, u64 val64, if ( rc == -ERESTART ) rc = hypercall_create_continuation( - __HYPERVISOR_update_va_mapping_otherdomain, - "llli", va, val64, flags, domid); + __HYPERVISOR_update_va_mapping_otherdomain, "llli", va, val64, + flags, domid); return rc; } @@ -4360,19 +4327,19 @@ long do_update_va_mapping_otherdomain(unsigned long va, u64 val64, int compat_update_va_mapping(unsigned int va, uint32_t lo, uint32_t hi, unsigned int flags) { - int rc = __do_update_va_mapping(va, ((uint64_t)hi << 32) | lo, - flags, current->domain); + int rc = __do_update_va_mapping(va, ((uint64_t)hi << 32) | lo, flags, + current->domain); if ( rc == -ERESTART ) - rc = hypercall_create_continuation( - __HYPERVISOR_update_va_mapping, "iiii", va, lo, hi, flags); + rc = hypercall_create_continuation(__HYPERVISOR_update_va_mapping, + "iiii", va, lo, hi, flags); return rc; } -int compat_update_va_mapping_otherdomain(unsigned int va, - uint32_t lo, uint32_t hi, - unsigned int flags, domid_t domid) +int compat_update_va_mapping_otherdomain(unsigned int va, uint32_t lo, + uint32_t hi, unsigned int flags, + domid_t domid) { struct domain *pg_owner; int rc; @@ -4386,8 +4353,8 @@ int compat_update_va_mapping_otherdomain(unsigned int va, if ( rc == -ERESTART ) rc = hypercall_create_continuation( - __HYPERVISOR_update_va_mapping_otherdomain, - "iiiii", va, lo, hi, flags, domid); + __HYPERVISOR_update_va_mapping_otherdomain, "iiiii", va, lo, hi, + flags, domid); return rc; } @@ -4456,12 +4423,9 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p) return err || s > e ? err : _handle_iomem_range(s, e, p); } -int xenmem_add_to_physmap_one( - struct domain *d, - unsigned int space, - union xen_add_to_physmap_batch_extra extra, - unsigned long idx, - gfn_t gpfn) +int xenmem_add_to_physmap_one(struct domain *d, unsigned int space, + union xen_add_to_physmap_batch_extra extra, + unsigned long idx, gfn_t gpfn) { struct page_info *page = NULL; unsigned long gfn = 0; /* gcc ... */ @@ -4473,38 +4437,38 @@ int xenmem_add_to_physmap_one( if ( !paging_mode_translate(d) ) return -EACCES; - switch ( space ) + switch (space) { - case XENMAPSPACE_shared_info: - if ( idx == 0 ) - mfn = virt_to_mfn(d->shared_info); - break; - case XENMAPSPACE_grant_table: - rc = gnttab_map_frame(d, idx, gpfn, &mfn); - if ( rc ) - return rc; - break; - case XENMAPSPACE_gmfn: - { - p2m_type_t p2mt; + case XENMAPSPACE_shared_info: + if ( idx == 0 ) + mfn = virt_to_mfn(d->shared_info); + break; + case XENMAPSPACE_grant_table: + rc = gnttab_map_frame(d, idx, gpfn, &mfn); + if ( rc ) + return rc; + break; + case XENMAPSPACE_gmfn: + { + p2m_type_t p2mt; - gfn = idx; - mfn = get_gfn_unshare(d, gfn, &p2mt); - /* If the page is still shared, exit early */ - if ( p2m_is_shared(p2mt) ) - { - put_gfn(d, gfn); - return -ENOMEM; - } - page = get_page_from_mfn(mfn, d); - if ( unlikely(!page) ) - mfn = INVALID_MFN; - break; + gfn = idx; + mfn = get_gfn_unshare(d, gfn, &p2mt); + /* If the page is still shared, exit early */ + if ( p2m_is_shared(p2mt) ) + { + put_gfn(d, gfn); + return -ENOMEM; } - case XENMAPSPACE_gmfn_foreign: - return p2m_add_foreign(d, idx, gfn_x(gpfn), extra.foreign_domid); - default: - break; + page = get_page_from_mfn(mfn, d); + if ( unlikely(!page) ) + mfn = INVALID_MFN; + break; + } + case XENMAPSPACE_gmfn_foreign: + return p2m_add_foreign(d, idx, gfn_x(gpfn), extra.foreign_domid); + default: + break; } if ( mfn_eq(mfn, INVALID_MFN) ) @@ -4519,7 +4483,8 @@ int xenmem_add_to_physmap_one( { if ( is_xen_heap_mfn(prev_mfn) ) /* Xen heap frames are simply unhooked from this phys slot. */ - rc = guest_physmap_remove_page(d, gpfn, _mfn(prev_mfn), PAGE_ORDER_4K); + rc = guest_physmap_remove_page(d, gpfn, _mfn(prev_mfn), + PAGE_ORDER_4K); else /* Normal domain memory is freed, to avoid leaking memory. */ rc = guest_remove_page(d, gfn_x(gpfn)); @@ -4545,7 +4510,7 @@ int xenmem_add_to_physmap_one( if ( !rc ) rc = guest_physmap_add_page(d, gpfn, mfn, PAGE_ORDER_4K); - put_both: +put_both: /* In the XENMAPSPACE_gmfn case, we took a ref of the gfn at the top. */ if ( space == XENMAPSPACE_gmfn ) put_gfn(d, gfn); @@ -4556,14 +4521,13 @@ int xenmem_add_to_physmap_one( return rc; } -int arch_acquire_resource(struct domain *d, unsigned int type, - unsigned int id, unsigned long frame, - unsigned int nr_frames, xen_pfn_t mfn_list[], - unsigned int *flags) +int arch_acquire_resource(struct domain *d, unsigned int type, unsigned int id, + unsigned long frame, unsigned int nr_frames, + xen_pfn_t mfn_list[], unsigned int *flags) { int rc; - switch ( type ) + switch (type) { #ifdef CONFIG_HVM case XENMEM_resource_ioreq_server: @@ -4605,7 +4569,7 @@ long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { int rc; - switch ( cmd ) + switch (cmd) { case XENMEM_set_memory_map: { @@ -4714,9 +4678,8 @@ long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) if ( s > ctxt.s ) { - rc = rangeset_report_ranges(current->domain->iomem_caps, - ctxt.s, s - 1, - handle_iomem_range, &ctxt); + rc = rangeset_report_ranges(current->domain->iomem_caps, ctxt.s, + s - 1, handle_iomem_range, &ctxt); if ( !rc ) rc = handle_iomem_range(s, s, &ctxt); if ( rc ) @@ -4752,11 +4715,10 @@ long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) case XENMEM_machphys_mapping: { - struct xen_machphys_mapping mapping = { - .v_start = MACH2PHYS_VIRT_START, - .v_end = MACH2PHYS_VIRT_END, - .max_mfn = MACH2PHYS_NR_ENTRIES - 1 - }; + struct xen_machphys_mapping mapping = {.v_start = MACH2PHYS_VIRT_START, + .v_end = MACH2PHYS_VIRT_END, + .max_mfn = + MACH2PHYS_NR_ENTRIES - 1}; if ( !mem_hotplug && is_hardware_domain(current->domain) ) mapping.max_mfn = max_page - 1; @@ -4802,19 +4764,19 @@ long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) if ( rc == -ERESTART ) { - rc = hypercall_create_continuation( - __HYPERVISOR_memory_op, "lh", cmd, arg); + rc = hypercall_create_continuation(__HYPERVISOR_memory_op, "lh", + cmd, arg); } else if ( rc >= 0 ) { p2m = p2m_get_hostp2m(d); - target.tot_pages = d->tot_pages; + target.tot_pages = d->tot_pages; target.pod_cache_pages = p2m->pod.count; - target.pod_entries = p2m->pod.entry_count; + target.pod_entries = p2m->pod.entry_count; if ( __copy_to_guest(arg, &target, 1) ) { - rc= -EFAULT; + rc = -EFAULT; goto pod_target_out_unlock; } } @@ -4832,12 +4794,9 @@ long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) return 0; } -int mmio_ro_emulated_write( - enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt) +int mmio_ro_emulated_write(enum x86_segment seg, unsigned long offset, + void *p_data, unsigned int bytes, + struct x86_emulate_ctxt *ctxt) { struct mmio_ro_emulate_ctxt *mmio_ro_ctxt = ctxt->data; @@ -4846,19 +4805,16 @@ int mmio_ro_emulated_write( offset != mmio_ro_ctxt->cr2 ) { gdprintk(XENLOG_WARNING, "bad access (cr2=%lx, addr=%lx, bytes=%u)\n", - mmio_ro_ctxt->cr2, offset, bytes); + mmio_ro_ctxt->cr2, offset, bytes); return X86EMUL_UNHANDLEABLE; } return X86EMUL_OKAY; } -int mmcfg_intercept_write( - enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt) +int mmcfg_intercept_write(enum x86_segment seg, unsigned long offset, + void *p_data, unsigned int bytes, + struct x86_emulate_ctxt *ctxt) { struct mmio_ro_emulate_ctxt *mmio_ctxt = ctxt->data; @@ -4870,13 +4826,13 @@ int mmcfg_intercept_write( offset != mmio_ctxt->cr2 ) { gdprintk(XENLOG_WARNING, "bad write (cr2=%lx, addr=%lx, bytes=%u)\n", - mmio_ctxt->cr2, offset, bytes); + mmio_ctxt->cr2, offset, bytes); return X86EMUL_UNHANDLEABLE; } offset &= 0xfff; - if ( pci_conf_write_intercept(mmio_ctxt->seg, mmio_ctxt->bdf, - offset, bytes, p_data) >= 0 ) + if ( pci_conf_write_intercept(mmio_ctxt->seg, mmio_ctxt->bdf, offset, bytes, + p_data) >= 0 ) pci_mmcfg_write(mmio_ctxt->seg, PCI_BUS(mmio_ctxt->bdf), PCI_DEVFN2(mmio_ctxt->bdf), offset, bytes, *(uint32_t *)p_data); @@ -5004,42 +4960,38 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned long v) } /* Convert to from superpage-mapping flags for map_pages_to_xen(). */ -#define l1f_to_lNf(f) (((f) & _PAGE_PRESENT) ? ((f) | _PAGE_PSE) : (f)) -#define lNf_to_l1f(f) (((f) & _PAGE_PRESENT) ? ((f) & ~_PAGE_PSE) : (f)) +#define l1f_to_lNf(f) (((f)&_PAGE_PRESENT) ? ((f) | _PAGE_PSE) : (f)) +#define lNf_to_l1f(f) (((f)&_PAGE_PRESENT) ? ((f) & ~_PAGE_PSE) : (f)) /* * map_pages_to_xen() can be called with interrupts disabled during * early bootstrap. In this case it is safe to use flush_area_local() * and avoid locking because only the local CPU is online. */ -#define flush_area(v,f) (!local_irq_is_enabled() ? \ - flush_area_local((const void *)v, f) : \ - flush_area_all((const void *)v, f)) - -int map_pages_to_xen( - unsigned long virt, - mfn_t mfn, - unsigned long nr_mfns, - unsigned int flags) +#define flush_area(v, f) \ + (!local_irq_is_enabled() ? flush_area_local((const void *)v, f) \ + : flush_area_all((const void *)v, f)) + +int map_pages_to_xen(unsigned long virt, mfn_t mfn, unsigned long nr_mfns, + unsigned int flags) { bool locking = system_state > SYS_STATE_boot; l2_pgentry_t *pl2e, ol2e; l1_pgentry_t *pl1e, ol1e; - unsigned int i; - -#define flush_flags(oldf) do { \ - unsigned int o_ = (oldf); \ - if ( (o_) & _PAGE_GLOBAL ) \ - flush_flags |= FLUSH_TLB_GLOBAL; \ - if ( (flags & _PAGE_PRESENT) && \ - (((o_) ^ flags) & PAGE_CACHE_ATTRS) ) \ - { \ - flush_flags |= FLUSH_CACHE; \ - if ( virt >= DIRECTMAP_VIRT_START && \ - virt < HYPERVISOR_VIRT_END ) \ - flush_flags |= FLUSH_VA_VALID; \ - } \ -} while (0) + unsigned int i; + +#define flush_flags(oldf) \ + do { \ + unsigned int o_ = (oldf); \ + if ( (o_)&_PAGE_GLOBAL ) \ + flush_flags |= FLUSH_TLB_GLOBAL; \ + if ( (flags & _PAGE_PRESENT) && (((o_) ^ flags) & PAGE_CACHE_ATTRS) ) \ + { \ + flush_flags |= FLUSH_CACHE; \ + if ( virt >= DIRECTMAP_VIRT_START && virt < HYPERVISOR_VIRT_END ) \ + flush_flags |= FLUSH_VA_VALID; \ + } \ + } while ( 0 ) while ( nr_mfns != 0 ) { @@ -5099,9 +5051,9 @@ int map_pages_to_xen( } } - virt += 1UL << L3_PAGETABLE_SHIFT; + virt += 1UL << L3_PAGETABLE_SHIFT; if ( !mfn_eq(mfn, INVALID_MFN) ) - mfn = mfn_add(mfn, 1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)); + mfn = mfn_add(mfn, 1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)); nr_mfns -= 1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT); continue; } @@ -5113,19 +5065,21 @@ int map_pages_to_xen( FLUSH_TLB | FLUSH_ORDER(2 * PAGETABLE_ORDER); /* Skip this PTE if there is no change. */ - if ( ((l3e_get_pfn(ol3e) & ~(L2_PAGETABLE_ENTRIES * - L1_PAGETABLE_ENTRIES - 1)) + - (l2_table_offset(virt) << PAGETABLE_ORDER) + - l1_table_offset(virt) == mfn_x(mfn)) && + if ( ((l3e_get_pfn(ol3e) & + ~(L2_PAGETABLE_ENTRIES * L1_PAGETABLE_ENTRIES - 1)) + + (l2_table_offset(virt) << PAGETABLE_ORDER) + + l1_table_offset(virt) == + mfn_x(mfn)) && ((lNf_to_l1f(l3e_get_flags(ol3e)) ^ flags) & - ~(_PAGE_ACCESSED|_PAGE_DIRTY)) == 0 ) + ~(_PAGE_ACCESSED | _PAGE_DIRTY)) == 0 ) { /* We can skip to end of L3 superpage if we got a match. */ i = (1u << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) - - (mfn_x(mfn) & ((1 << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) - 1)); + (mfn_x(mfn) & + ((1 << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) - 1)); if ( i > nr_mfns ) i = nr_mfns; - virt += i << PAGE_SHIFT; + virt += i << PAGE_SHIFT; if ( !mfn_eq(mfn, INVALID_MFN) ) mfn = mfn_add(mfn, i); nr_mfns -= i; @@ -5137,10 +5091,9 @@ int map_pages_to_xen( return -ENOMEM; for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) - l2e_write(pl2e + i, - l2e_from_pfn(l3e_get_pfn(ol3e) + - (i << PAGETABLE_ORDER), - l3e_get_flags(ol3e))); + l2e_write(pl2e + i, l2e_from_pfn(l3e_get_pfn(ol3e) + + (i << PAGETABLE_ORDER), + l3e_get_flags(ol3e))); if ( l3e_get_flags(ol3e) & _PAGE_GLOBAL ) flush_flags |= FLUSH_TLB_GLOBAL; @@ -5150,8 +5103,8 @@ int map_pages_to_xen( if ( (l3e_get_flags(*pl3e) & _PAGE_PRESENT) && (l3e_get_flags(*pl3e) & _PAGE_PSE) ) { - l3e_write_atomic(pl3e, l3e_from_mfn(virt_to_mfn(pl2e), - __PAGE_HYPERVISOR)); + l3e_write_atomic( + pl3e, l3e_from_mfn(virt_to_mfn(pl2e), __PAGE_HYPERVISOR)); pl2e = NULL; } if ( locking ) @@ -5168,7 +5121,7 @@ int map_pages_to_xen( if ( ((((virt >> PAGE_SHIFT) | mfn_x(mfn)) & ((1u << PAGETABLE_ORDER) - 1)) == 0) && (nr_mfns >= (1u << PAGETABLE_ORDER)) && - !(flags & (_PAGE_PAT|MAP_SMALL_PAGES)) ) + !(flags & (_PAGE_PAT | MAP_SMALL_PAGES)) ) { /* Super-page mapping. */ ol2e = *pl2e; @@ -5194,7 +5147,7 @@ int map_pages_to_xen( } } - virt += 1UL << L2_PAGETABLE_SHIFT; + virt += 1UL << L2_PAGETABLE_SHIFT; if ( !mfn_eq(mfn, INVALID_MFN) ) mfn = mfn_add(mfn, 1UL << PAGETABLE_ORDER); nr_mfns -= 1UL << PAGETABLE_ORDER; @@ -5217,14 +5170,15 @@ int map_pages_to_xen( if ( (((l2e_get_pfn(*pl2e) & ~(L1_PAGETABLE_ENTRIES - 1)) + l1_table_offset(virt)) == mfn_x(mfn)) && (((lNf_to_l1f(l2e_get_flags(*pl2e)) ^ flags) & - ~(_PAGE_ACCESSED|_PAGE_DIRTY)) == 0) ) + ~(_PAGE_ACCESSED | _PAGE_DIRTY)) == 0) ) { /* We can skip to end of L2 superpage if we got a match. */ i = (1u << (L2_PAGETABLE_SHIFT - PAGE_SHIFT)) - - (mfn_x(mfn) & ((1u << (L2_PAGETABLE_SHIFT - PAGE_SHIFT)) - 1)); + (mfn_x(mfn) & + ((1u << (L2_PAGETABLE_SHIFT - PAGE_SHIFT)) - 1)); if ( i > nr_mfns ) i = nr_mfns; - virt += i << L1_PAGETABLE_SHIFT; + virt += i << L1_PAGETABLE_SHIFT; if ( !mfn_eq(mfn, INVALID_MFN) ) mfn = mfn_add(mfn, i); nr_mfns -= i; @@ -5259,8 +5213,8 @@ int map_pages_to_xen( free_xen_pagetable(pl1e); } - pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(virt); - ol1e = *pl1e; + pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(virt); + ol1e = *pl1e; l1e_write_atomic(pl1e, l1e_from_mfn(mfn, flags)); if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) ) { @@ -5270,15 +5224,14 @@ int map_pages_to_xen( flush_area(virt, flush_flags); } - virt += 1UL << L1_PAGETABLE_SHIFT; + virt += 1UL << L1_PAGETABLE_SHIFT; if ( !mfn_eq(mfn, INVALID_MFN) ) mfn = mfn_add(mfn, 1UL); nr_mfns -= 1UL; if ( (flags == PAGE_HYPERVISOR) && - ((nr_mfns == 0) || - ((((virt >> PAGE_SHIFT) | mfn_x(mfn)) & - ((1u << PAGETABLE_ORDER) - 1)) == 0)) ) + ((nr_mfns == 0) || ((((virt >> PAGE_SHIFT) | mfn_x(mfn)) & + ((1u << PAGETABLE_ORDER) - 1)) == 0)) ) { unsigned long base_mfn; @@ -5312,13 +5265,12 @@ int map_pages_to_xen( break; if ( i == L1_PAGETABLE_ENTRIES ) { - l2e_write_atomic(pl2e, l2e_from_pfn(base_mfn, - l1f_to_lNf(flags))); + l2e_write_atomic(pl2e, + l2e_from_pfn(base_mfn, l1f_to_lNf(flags))); if ( locking ) spin_unlock(&map_pgdir_lock); flush_area(virt - PAGE_SIZE, - FLUSH_TLB_GLOBAL | - FLUSH_ORDER(PAGETABLE_ORDER)); + FLUSH_TLB_GLOBAL | FLUSH_ORDER(PAGETABLE_ORDER)); free_xen_pagetable(l2e_to_l1e(ol2e)); } else if ( locking ) @@ -5326,9 +5278,8 @@ int map_pages_to_xen( } } - check_l3: - if ( cpu_has_page1gb && - (flags == PAGE_HYPERVISOR) && + check_l3: + if ( cpu_has_page1gb && (flags == PAGE_HYPERVISOR) && ((nr_mfns == 0) || !(((virt >> PAGE_SHIFT) | mfn_x(mfn)) & ((1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) - 1))) ) @@ -5344,7 +5295,7 @@ int map_pages_to_xen( * concurrent paging structure modifications on other CPUs. */ if ( !(l3e_get_flags(ol3e) & _PAGE_PRESENT) || - (l3e_get_flags(ol3e) & _PAGE_PSE) ) + (l3e_get_flags(ol3e) & _PAGE_PSE) ) { if ( locking ) spin_unlock(&map_pgdir_lock); @@ -5352,8 +5303,8 @@ int map_pages_to_xen( } pl2e = l3e_to_l2e(ol3e); - base_mfn = l2e_get_pfn(*pl2e) & ~(L2_PAGETABLE_ENTRIES * - L1_PAGETABLE_ENTRIES - 1); + base_mfn = l2e_get_pfn(*pl2e) & + ~(L2_PAGETABLE_ENTRIES * L1_PAGETABLE_ENTRIES - 1); for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++, pl2e++ ) if ( (l2e_get_pfn(*pl2e) != (base_mfn + (i << PAGETABLE_ORDER))) || @@ -5361,13 +5312,12 @@ int map_pages_to_xen( break; if ( i == L2_PAGETABLE_ENTRIES ) { - l3e_write_atomic(pl3e, l3e_from_pfn(base_mfn, - l1f_to_lNf(flags))); + l3e_write_atomic(pl3e, + l3e_from_pfn(base_mfn, l1f_to_lNf(flags))); if ( locking ) spin_unlock(&map_pgdir_lock); flush_area(virt - PAGE_SIZE, - FLUSH_TLB_GLOBAL | - FLUSH_ORDER(2*PAGETABLE_ORDER)); + FLUSH_TLB_GLOBAL | FLUSH_ORDER(2 * PAGETABLE_ORDER)); free_xen_pagetable(l3e_to_l2e(ol3e)); } else if ( locking ) @@ -5402,11 +5352,11 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) bool locking = system_state > SYS_STATE_boot; l2_pgentry_t *pl2e; l1_pgentry_t *pl1e; - unsigned int i; + unsigned int i; unsigned long v = s; /* Set of valid PTE bits which may be altered. */ -#define FLAGS_MASK (_PAGE_NX|_PAGE_RW|_PAGE_PRESENT) +#define FLAGS_MASK (_PAGE_NX | _PAGE_RW | _PAGE_PRESENT) nf &= FLAGS_MASK; ASSERT(IS_ALIGNED(s, PAGE_SIZE)); @@ -5428,14 +5378,16 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) if ( l3e_get_flags(*pl3e) & _PAGE_PSE ) { - if ( l2_table_offset(v) == 0 && - l1_table_offset(v) == 0 && + if ( l2_table_offset(v) == 0 && l1_table_offset(v) == 0 && ((e - v) >= (1UL << L3_PAGETABLE_SHIFT)) ) { /* PAGE1GB: whole superpage is modified. */ - l3_pgentry_t nl3e = !(nf & _PAGE_PRESENT) ? l3e_empty() - : l3e_from_pfn(l3e_get_pfn(*pl3e), - (l3e_get_flags(*pl3e) & ~FLAGS_MASK) | nf); + l3_pgentry_t nl3e = + !(nf & _PAGE_PRESENT) + ? l3e_empty() + : l3e_from_pfn(l3e_get_pfn(*pl3e), + (l3e_get_flags(*pl3e) & ~FLAGS_MASK) | + nf); l3e_write_atomic(pl3e, nl3e); v += 1UL << L3_PAGETABLE_SHIFT; @@ -5447,17 +5399,16 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) if ( !pl2e ) return -ENOMEM; for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) - l2e_write(pl2e + i, - l2e_from_pfn(l3e_get_pfn(*pl3e) + - (i << PAGETABLE_ORDER), - l3e_get_flags(*pl3e))); + l2e_write(pl2e + i, l2e_from_pfn(l3e_get_pfn(*pl3e) + + (i << PAGETABLE_ORDER), + l3e_get_flags(*pl3e))); if ( locking ) spin_lock(&map_pgdir_lock); if ( (l3e_get_flags(*pl3e) & _PAGE_PRESENT) && (l3e_get_flags(*pl3e) & _PAGE_PSE) ) { - l3e_write_atomic(pl3e, l3e_from_mfn(virt_to_mfn(pl2e), - __PAGE_HYPERVISOR)); + l3e_write_atomic( + pl3e, l3e_from_mfn(virt_to_mfn(pl2e), __PAGE_HYPERVISOR)); pl2e = NULL; } if ( locking ) @@ -5485,12 +5436,15 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) if ( l2e_get_flags(*pl2e) & _PAGE_PSE ) { if ( (l1_table_offset(v) == 0) && - ((e-v) >= (1UL << L2_PAGETABLE_SHIFT)) ) + ((e - v) >= (1UL << L2_PAGETABLE_SHIFT)) ) { /* PSE: whole superpage is modified. */ - l2_pgentry_t nl2e = !(nf & _PAGE_PRESENT) ? l2e_empty() - : l2e_from_pfn(l2e_get_pfn(*pl2e), - (l2e_get_flags(*pl2e) & ~FLAGS_MASK) | nf); + l2_pgentry_t nl2e = + !(nf & _PAGE_PRESENT) + ? l2e_empty() + : l2e_from_pfn(l2e_get_pfn(*pl2e), + (l2e_get_flags(*pl2e) & ~FLAGS_MASK) | + nf); l2e_write_atomic(pl2e, nl2e); v += 1UL << L2_PAGETABLE_SHIFT; @@ -5535,9 +5489,11 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) if ( !(l1e_get_flags(*pl1e) & _PAGE_PRESENT) ) ASSERT(!(nf & _PAGE_PRESENT)); - nl1e = !(nf & _PAGE_PRESENT) ? l1e_empty() - : l1e_from_pfn(l1e_get_pfn(*pl1e), - (l1e_get_flags(*pl1e) & ~FLAGS_MASK) | nf); + nl1e = + !(nf & _PAGE_PRESENT) + ? l1e_empty() + : l1e_from_pfn(l1e_get_pfn(*pl1e), + (l1e_get_flags(*pl1e) & ~FLAGS_MASK) | nf); l1e_write_atomic(pl1e, nl1e); v += PAGE_SIZE; @@ -5546,7 +5502,8 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) * If we are not destroying mappings, or not done with the L2E, * skip the empty&free check. */ - if ( (nf & _PAGE_PRESENT) || ((v != e) && (l1_table_offset(v) != 0)) ) + if ( (nf & _PAGE_PRESENT) || + ((v != e) && (l1_table_offset(v) != 0)) ) continue; if ( locking ) spin_lock(&map_pgdir_lock); @@ -5586,7 +5543,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) spin_unlock(&map_pgdir_lock); } - check_l3: + check_l3: /* * If we are not destroying mappings, or not done with the L3E, * skip the empty&free check. @@ -5602,7 +5559,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) * concurrent paging structure modifications on other CPUs. */ if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) || - (l3e_get_flags(*pl3e) & _PAGE_PSE) ) + (l3e_get_flags(*pl3e) & _PAGE_PSE) ) { if ( locking ) spin_unlock(&map_pgdir_lock); @@ -5639,8 +5596,8 @@ int destroy_xen_mappings(unsigned long s, unsigned long e) return modify_xen_mappings(s, e, _PAGE_NONE); } -void __set_fixmap( - enum fixed_addresses idx, unsigned long mfn, unsigned long flags) +void __set_fixmap(enum fixed_addresses idx, unsigned long mfn, + unsigned long flags) { BUG_ON(idx >= __end_of_fixed_addresses); map_pages_to_xen(__fix_to_virt(idx), _mfn(mfn), 1, flags); @@ -5666,7 +5623,8 @@ void __iomem *ioremap(paddr_t pa, size_t len) unsigned int offs = pa & (PAGE_SIZE - 1); unsigned int nr = PFN_UP(offs + len); - va = __vmap(&mfn, nr, 1, 1, PAGE_HYPERVISOR_UCMINUS, VMAP_DEFAULT) + offs; + va = __vmap(&mfn, nr, 1, 1, PAGE_HYPERVISOR_UCMINUS, VMAP_DEFAULT) + + offs; } return (void __force __iomem *)va; @@ -5858,7 +5816,7 @@ void free_perdomain_mappings(struct domain *d) l3tab = __map_domain_page(d->arch.perdomain_l3_pg); - for ( i = 0; i < PERDOMAIN_SLOTS; ++i) + for ( i = 0; i < PERDOMAIN_SLOTS; ++i ) if ( l3e_get_flags(l3tab[i]) & _PAGE_PRESENT ) { struct page_info *l2pg = l3e_get_page(l3tab[i]); @@ -5940,8 +5898,9 @@ void memguard_guard_stack(void *p) void memguard_unguard_stack(void *p) { - memguard_unguard_range(p + IST_MAX * PAGE_SIZE, - STACK_SIZE - PRIMARY_STACK_SIZE - IST_MAX * PAGE_SIZE); + memguard_unguard_range(p + IST_MAX * PAGE_SIZE, STACK_SIZE - + PRIMARY_STACK_SIZE - + IST_MAX * PAGE_SIZE); } bool memguard_is_stack_guard_page(unsigned long addr) @@ -5955,25 +5914,22 @@ bool memguard_is_stack_guard_page(unsigned long addr) void arch_dump_shared_mem_info(void) { printk("Shared frames %u -- Saved frames %u\n", - mem_sharing_get_nr_shared_mfns(), - mem_sharing_get_nr_saved_mfns()); + mem_sharing_get_nr_shared_mfns(), mem_sharing_get_nr_saved_mfns()); } -const struct platform_bad_page *__init get_platform_badpages(unsigned int *array_size) +const struct platform_bad_page *__init +get_platform_badpages(unsigned int *array_size) { u32 igd_id; static const struct platform_bad_page __initconst snb_bad_pages[] = { - { .mfn = 0x20050000 >> PAGE_SHIFT }, - { .mfn = 0x20110000 >> PAGE_SHIFT }, - { .mfn = 0x20130000 >> PAGE_SHIFT }, - { .mfn = 0x20138000 >> PAGE_SHIFT }, - { .mfn = 0x40004000 >> PAGE_SHIFT }, + {.mfn = 0x20050000 >> PAGE_SHIFT}, {.mfn = 0x20110000 >> PAGE_SHIFT}, + {.mfn = 0x20130000 >> PAGE_SHIFT}, {.mfn = 0x20138000 >> PAGE_SHIFT}, + {.mfn = 0x40004000 >> PAGE_SHIFT}, }; static const struct platform_bad_page __initconst hle_bad_page = { - .mfn = 0x40000000 >> PAGE_SHIFT, .order = 10 - }; + .mfn = 0x40000000 >> PAGE_SHIFT, .order = 10}; - switch ( cpuid_eax(1) & 0x000f3ff0 ) + switch (cpuid_eax(1) & 0x000f3ff0) { case 0x000406e0: /* erratum SKL167 */ case 0x00050650: /* erratum SKZ63 */ diff --git a/xen/arch/x86/mm/altp2m.c b/xen/arch/x86/mm/altp2m.c index 50768f2547..c20ebe6269 100644 --- a/xen/arch/x86/mm/altp2m.c +++ b/xen/arch/x86/mm/altp2m.c @@ -20,8 +20,7 @@ #include #include -void -altp2m_vcpu_initialise(struct vcpu *v) +void altp2m_vcpu_initialise(struct vcpu *v) { if ( v != current ) vcpu_pause(v); @@ -35,8 +34,7 @@ altp2m_vcpu_initialise(struct vcpu *v) vcpu_unpause(v); } -void -altp2m_vcpu_destroy(struct vcpu *v) +void altp2m_vcpu_destroy(struct vcpu *v) { struct p2m_domain *p2m; @@ -99,7 +97,7 @@ int altp2m_vcpu_enable_ve(struct vcpu *v, gfn_t gfn) return 0; - err: +err: put_page(pg); return rc; diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c index f67aeda3d0..b63e74aee4 100644 --- a/xen/arch/x86/mm/guest_walk.c +++ b/xen/arch/x86/mm/guest_walk.c @@ -79,10 +79,8 @@ static bool set_ad_bits(guest_intpte_t *guest_p, guest_intpte_t *walk_p, * If a translation exists, the accumulated access rights are compared to the * requested walk, to see whether the access is permitted. */ -bool -guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, - unsigned long va, walk_t *gw, - uint32_t walk, mfn_t top_mfn, void *top_map) +bool guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, unsigned long va, + walk_t *gw, uint32_t walk, mfn_t top_mfn, void *top_map) { struct domain *d = v->domain; p2m_type_t p2mt; @@ -97,7 +95,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, p2m_query_t qt = P2M_ALLOC | P2M_UNSHARE; #define AR_ACCUM_AND (_PAGE_USER | _PAGE_RW) -#define AR_ACCUM_OR (_PAGE_NX_BIT) +#define AR_ACCUM_OR (_PAGE_NX_BIT) /* Start with all AND bits set, all OR bits clear. */ uint32_t ar, ar_and = ~0u, ar_or = 0; @@ -108,7 +106,8 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, * inputs to a guest walk, but a whole load of code currently passes in * other PFEC_ constants. */ - walk &= (PFEC_implicit | PFEC_insn_fetch | PFEC_user_mode | PFEC_write_access); + walk &= + (PFEC_implicit | PFEC_insn_fetch | PFEC_user_mode | PFEC_write_access); /* Only implicit supervisor data accesses exist. */ ASSERT(!(walk & PFEC_implicit) || @@ -132,7 +131,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, /* Get the l4e from the top level table and check its flags*/ gw->l4mfn = top_mfn; - l4p = (guest_l4e_t *) top_map; + l4p = (guest_l4e_t *)top_map; gw->l4e = l4p[guest_l4_table_offset(va)]; gflags = guest_l4e_get_flags(gw->l4e); if ( !(gflags & _PAGE_PRESENT) ) @@ -147,14 +146,10 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, /* Accumulate l4e access rights. */ ar_and &= gflags; - ar_or |= gflags; + ar_or |= gflags; /* Map the l3 table */ - l3p = map_domain_gfn(p2m, - guest_l4e_get_gfn(gw->l4e), - &gw->l3mfn, - &p2mt, - qt, + l3p = map_domain_gfn(p2m, guest_l4e_get_gfn(gw->l4e), &gw->l3mfn, &p2mt, qt, &rc); if ( l3p == NULL ) { @@ -177,7 +172,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, /* Accumulate l3e access rights. */ ar_and &= gflags; - ar_or |= gflags; + ar_or |= gflags; if ( gflags & _PAGE_PSE ) { @@ -190,15 +185,15 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, * Grant full access in the l1e, since all the guest entry's * access controls are enforced in the l3e. */ - int flags = (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW| - _PAGE_ACCESSED|_PAGE_DIRTY); + int flags = (_PAGE_PRESENT | _PAGE_USER | _PAGE_RW | _PAGE_ACCESSED | + _PAGE_DIRTY); /* * Import protection key and cache-control bits. Note that _PAGE_PAT * is actually _PAGE_PSE, and it is always set. We will clear it in * case _PAGE_PSE_PAT (bit 12, i.e. first bit of gfn) is clear. */ - flags |= (guest_l3e_get_flags(gw->l3e) - & (_PAGE_PKEY_BITS|_PAGE_PAT|_PAGE_PWT|_PAGE_PCD)); + flags |= (guest_l3e_get_flags(gw->l3e) & + (_PAGE_PKEY_BITS | _PAGE_PAT | _PAGE_PWT | _PAGE_PCD)); if ( !(gfn_x(start) & 1) ) /* _PAGE_PSE_PAT not set: remove _PAGE_PAT from flags. */ flags &= ~_PAGE_PAT; @@ -215,7 +210,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, #else /* PAE only... */ /* Get the l3e and check its flag */ - gw->l3e = ((guest_l3e_t *) top_map)[guest_l3_table_offset(va)]; + gw->l3e = ((guest_l3e_t *)top_map)[guest_l3_table_offset(va)]; gflags = guest_l3e_get_flags(gw->l3e); if ( !(gflags & _PAGE_PRESENT) ) goto out; @@ -229,11 +224,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, #endif /* PAE or 64... */ /* Map the l2 table */ - l2p = map_domain_gfn(p2m, - guest_l3e_get_gfn(gw->l3e), - &gw->l2mfn, - &p2mt, - qt, + l2p = map_domain_gfn(p2m, guest_l3e_get_gfn(gw->l3e), &gw->l2mfn, &p2mt, qt, &rc); if ( l2p == NULL ) { @@ -248,7 +239,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, /* Get l2e from the top level table */ gw->l2mfn = top_mfn; - l2p = (guest_l2e_t *) top_map; + l2p = (guest_l2e_t *)top_map; gw->l2e = l2p[guest_l2_table_offset(va)]; #endif /* All levels... */ @@ -276,7 +267,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, /* Accumulate l2e access rights. */ ar_and &= gflags; - ar_or |= gflags; + ar_or |= gflags; if ( gflags & _PAGE_PSE ) { @@ -295,15 +286,15 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, * Grant full access in the l1e, since all the guest entry's * access controls are enforced in the shadow l2e. */ - int flags = (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW| - _PAGE_ACCESSED|_PAGE_DIRTY); + int flags = (_PAGE_PRESENT | _PAGE_USER | _PAGE_RW | _PAGE_ACCESSED | + _PAGE_DIRTY); /* * Import protection key and cache-control bits. Note that _PAGE_PAT * is actually _PAGE_PSE, and it is always set. We will clear it in * case _PAGE_PSE_PAT (bit 12, i.e. first bit of gfn) is clear. */ - flags |= (guest_l2e_get_flags(gw->l2e) - & (_PAGE_PKEY_BITS|_PAGE_PAT|_PAGE_PWT|_PAGE_PCD)); + flags |= (guest_l2e_get_flags(gw->l2e) & + (_PAGE_PKEY_BITS | _PAGE_PAT | _PAGE_PWT | _PAGE_PCD)); if ( !(gfn_x(start) & 1) ) /* _PAGE_PSE_PAT not set: remove _PAGE_PAT from flags. */ flags &= ~_PAGE_PAT; @@ -312,7 +303,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, start = _gfn((gfn_x(start) & ~GUEST_L2_GFN_MASK) + guest_l1_table_offset(va)); #if GUEST_PAGING_LEVELS == 2 - /* Wider than 32 bits if PSE36 superpage. */ + /* Wider than 32 bits if PSE36 superpage. */ gw->el1e = (gfn_x(start) << PAGE_SHIFT) | flags; #else gw->l1e = guest_l1e_from_gfn(start, flags); @@ -323,11 +314,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, } /* Map the l1 table */ - l1p = map_domain_gfn(p2m, - guest_l2e_get_gfn(gw->l2e), - &gw->l1mfn, - &p2mt, - qt, + l1p = map_domain_gfn(p2m, guest_l2e_get_gfn(gw->l2e), &gw->l1mfn, &p2mt, qt, &rc); if ( l1p == NULL ) { @@ -348,11 +335,11 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, /* Accumulate l1e access rights. */ ar_and &= gflags; - ar_or |= gflags; + ar_or |= gflags; leaf_level = 1; - leaf: +leaf: gw->pfec |= PFEC_page_present; /* @@ -436,7 +423,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, * get set whenever a lower-level PT is used, at least some hardware * walkers behave this way. */ - switch ( leaf_level ) + switch (leaf_level) { default: ASSERT_UNREACHABLE(); @@ -451,7 +438,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, if ( set_ad_bits(&l2p[guest_l2_table_offset(va)].l2, &gw->l2e.l2, (walk & PFEC_write_access) && leaf_level == 2) ) paging_mark_dirty(d, gw->l2mfn); - /* Fallthrough */ + /* Fallthrough */ #if GUEST_PAGING_LEVELS == 4 /* 64-bit only... */ case 3: if ( set_ad_bits(&l3p[guest_l3_table_offset(va)].l3, &gw->l3e.l3, @@ -464,7 +451,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, #endif } - out: +out: #if GUEST_PAGING_LEVELS == 4 if ( l3p ) { diff --git a/xen/arch/x86/mm/hap/guest_walk.c b/xen/arch/x86/mm/hap/guest_walk.c index 3b8ee2efce..e8ba41199c 100644 --- a/xen/arch/x86/mm/hap/guest_walk.c +++ b/xen/arch/x86/mm/hap/guest_walk.c @@ -39,16 +39,20 @@ asm(".file \"" __OBJECT_FILE__ "\""); #include #include -unsigned long hap_gva_to_gfn(GUEST_PAGING_LEVELS)( - struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t *pfec) +unsigned long hap_gva_to_gfn(GUEST_PAGING_LEVELS)(struct vcpu *v, + struct p2m_domain *p2m, + unsigned long gva, + uint32_t *pfec) { unsigned long cr3 = v->arch.hvm.guest_cr[3]; return hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(v, p2m, cr3, gva, pfec, NULL); } -unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)( - struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3, - paddr_t ga, uint32_t *pfec, unsigned int *page_order) +unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(struct vcpu *v, + struct p2m_domain *p2m, + unsigned long cr3, + paddr_t ga, uint32_t *pfec, + unsigned int *page_order) { bool walk_ok; mfn_t top_mfn; @@ -126,7 +130,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)( *pfec = gw.pfec; - out_tweak_pfec: +out_tweak_pfec: /* * SDM Intel 64 Volume 3, Chapter Paging, PAGE-FAULT EXCEPTIONS: * The PFEC_insn_fetch flag is set only when NX or SMEP are enabled. @@ -137,7 +141,6 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)( return gfn_x(INVALID_GFN); } - /* * Local variables: * mode: C diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c index 412a442b6a..e7d5618c8e 100644 --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -56,8 +56,7 @@ * page's p2m type looking for pages that have been made writable. */ -int hap_track_dirty_vram(struct domain *d, - unsigned long begin_pfn, +int hap_track_dirty_vram(struct domain *d, unsigned long begin_pfn, unsigned long nr, XEN_GUEST_HANDLE_PARAM(void) guest_dirty_bitmap) { @@ -108,15 +107,15 @@ int hap_track_dirty_vram(struct domain *d, paging_unlock(d); if ( oend > ostart ) - p2m_change_type_range(d, ostart, oend, - p2m_ram_logdirty, p2m_ram_rw); + p2m_change_type_range(d, ostart, oend, p2m_ram_logdirty, + p2m_ram_rw); /* * Switch vram to log dirty mode, either by setting l1e entries of * P2M table to be read-only, or via hardware-assisted log-dirty. */ - p2m_change_type_range(d, begin_pfn, begin_pfn + nr, - p2m_ram_rw, p2m_ram_logdirty); + p2m_change_type_range(d, begin_pfn, begin_pfn + nr, p2m_ram_rw, + p2m_ram_logdirty); flush_tlb_mask(d->dirty_cpumask); @@ -271,7 +270,7 @@ static struct page_info *hap_alloc_p2m_page(struct domain *d) { struct page_info *pg; - /* This is called both from the p2m code (which never holds the + /* This is called both from the p2m code (which never holds the * paging lock) and the log-dirty code (which always does). */ paging_lock_recursive(d); pg = hap_alloc(d); @@ -297,18 +296,18 @@ static void hap_free_p2m_page(struct domain *d, struct page_info *pg) { struct domain *owner = page_get_owner(pg); - /* This is called both from the p2m code (which never holds the + /* This is called both from the p2m code (which never holds the * paging lock) and the log-dirty code (which always does). */ paging_lock_recursive(d); /* Should still have no owner and count zero. */ if ( owner || (pg->count_info & PGC_count_mask) ) { - printk(XENLOG_WARNING - "d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n", + printk(XENLOG_WARNING "d%d: Odd p2m page %" PRI_mfn + " d=%d c=%lx t=%" PRtype_info "\n", d->domain_id, mfn_x(page_to_mfn(pg)), - owner ? owner->domain_id : DOMID_INVALID, - pg->count_info, pg->u.inuse.type_info); + owner ? owner->domain_id : DOMID_INVALID, pg->count_info, + pg->u.inuse.type_info); WARN(); pg->count_info &= ~PGC_count_mask; page_set_owner(pg, NULL); @@ -321,14 +320,13 @@ static void hap_free_p2m_page(struct domain *d, struct page_info *pg) } /* Return the size of the pool, rounded up to the nearest MB */ -static unsigned int -hap_get_allocation(struct domain *d) +static unsigned int hap_get_allocation(struct domain *d) { - unsigned int pg = d->arch.paging.hap.total_pages - + d->arch.paging.hap.p2m_pages; + unsigned int pg = + d->arch.paging.hap.total_pages + d->arch.paging.hap.p2m_pages; - return ((pg >> (20 - PAGE_SHIFT)) - + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0)); + return ((pg >> (20 - PAGE_SHIFT)) + + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0)); } /* Set the pool of pages to the required number of pages. @@ -344,7 +342,7 @@ int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted) else pages -= d->arch.paging.hap.p2m_pages; - for ( ; ; ) + for ( ;; ) { if ( d->arch.paging.hap.total_pages < pages ) { @@ -407,13 +405,13 @@ static mfn_t hap_make_monitor_table(struct vcpu *v) return m4mfn; - oom: +oom: printk(XENLOG_G_ERR "out of memory building monitor pagetable\n"); domain_crash(d); return INVALID_MFN; } -static void hap_destroy_monitor_table(struct vcpu* v, mfn_t mmfn) +static void hap_destroy_monitor_table(struct vcpu *v, mfn_t mmfn) { struct domain *d = v->domain; @@ -427,9 +425,9 @@ static void hap_destroy_monitor_table(struct vcpu* v, mfn_t mmfn) void hap_domain_init(struct domain *d) { static const struct log_dirty_ops hap_ops = { - .enable = hap_enable_log_dirty, + .enable = hap_enable_log_dirty, .disable = hap_disable_log_dirty, - .clean = hap_clean_dirty_bitmap, + .clean = hap_clean_dirty_bitmap, }; INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist); @@ -473,10 +471,11 @@ int hap_enable(struct domain *d, u32 mode) goto out; } - for (i = 0; i < MAX_NESTEDP2M; i++) { + for ( i = 0; i < MAX_NESTEDP2M; i++ ) + { rv = p2m_alloc_table(d->arch.nested_p2m[i]); if ( rv != 0 ) - goto out; + goto out; } if ( hvm_altp2m_supported() ) @@ -495,7 +494,7 @@ int hap_enable(struct domain *d, u32 mode) { rv = p2m_alloc_table(d->arch.altp2m_p2m[i]); if ( rv != 0 ) - goto out; + goto out; } d->arch.altp2m_active = 0; @@ -504,7 +503,7 @@ int hap_enable(struct domain *d, u32 mode) /* Now let other users see the new mode */ d->arch.paging.mode = mode | PG_HAP_enable; - out: +out: domain_unpause(d); return rv; } @@ -528,7 +527,8 @@ void hap_final_teardown(struct domain *d) } /* Destroy nestedp2m's first */ - for (i = 0; i < MAX_NESTEDP2M; i++) { + for ( i = 0; i < MAX_NESTEDP2M; i++ ) + { p2m_teardown(d->arch.nested_p2m[i]); } @@ -556,7 +556,7 @@ void hap_teardown(struct domain *d, bool *preempted) if ( paging_mode_enabled(d) ) { /* release the monitor table held by each vcpu */ - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { if ( paging_get_hostmode(v) && paging_mode_external(d) ) { @@ -592,7 +592,7 @@ int hap_domctl(struct domain *d, struct xen_domctl_shadow_op *sc, int rc; bool preempted = false; - switch ( sc->op ) + switch (sc->op) { case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION: paging_lock(d); @@ -669,13 +669,14 @@ static void hap_update_cr3(struct vcpu *v, int do_locking, bool noflush) hvm_update_guest_cr3(v, noflush); } -const struct paging_mode * -hap_paging_get_mode(struct vcpu *v) +const struct paging_mode *hap_paging_get_mode(struct vcpu *v) { - return (!hvm_paging_enabled(v) ? &hap_paging_real_mode : - hvm_long_mode_active(v) ? &hap_paging_long_mode : - hvm_pae_enabled(v) ? &hap_paging_pae_mode : - &hap_paging_protected_mode); + return (!hvm_paging_enabled(v) + ? &hap_paging_real_mode + : hvm_long_mode_active(v) + ? &hap_paging_long_mode + : hvm_pae_enabled(v) ? &hap_paging_pae_mode + : &hap_paging_protected_mode); } static void hap_update_paging_modes(struct vcpu *v) @@ -685,7 +686,7 @@ static void hap_update_paging_modes(struct vcpu *v) p2m_type_t t; /* We hold onto the cr3 as it may be modified later, and - * we need to respect lock ordering. No need for + * we need to respect lock ordering. No need for * checks here as they are performed by vmx_load_pdptrs * (the potential user of the cr3) */ (void)get_gfn(d, cr3_gfn, &t); @@ -708,9 +709,9 @@ static void hap_update_paging_modes(struct vcpu *v) put_gfn(d, cr3_gfn); } -static int -hap_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, l1_pgentry_t *p, - l1_pgentry_t new, unsigned int level) +static int hap_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, + l1_pgentry_t *p, l1_pgentry_t new, + unsigned int level) { struct domain *d = p2m->domain; uint32_t old_flags; @@ -725,15 +726,17 @@ hap_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, l1_pgentry_t *p, paging_lock(d); old_flags = l1e_get_flags(*p); - if ( nestedhvm_enabled(d) && (old_flags & _PAGE_PRESENT) - && !p2m_get_hostp2m(d)->defer_nested_flush ) { + if ( nestedhvm_enabled(d) && (old_flags & _PAGE_PRESENT) && + !p2m_get_hostp2m(d)->defer_nested_flush ) + { /* We are replacing a valid entry so we need to flush nested p2ms, * unless the only change is an increase in access rights. */ mfn_t omfn = l1e_get_mfn(*p); mfn_t nmfn = l1e_get_mfn(new); - flush_nestedp2m = !(mfn_eq(omfn, nmfn) - && perms_strictly_increased(old_flags, l1e_get_flags(new)) ); + flush_nestedp2m = + !(mfn_eq(omfn, nmfn) && + perms_strictly_increased(old_flags, l1e_get_flags(new))); } rc = p2m_entry_modify(p2m, p2m_flags_to_type(l1e_get_flags(new)), @@ -757,15 +760,18 @@ hap_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, l1_pgentry_t *p, return 0; } -static unsigned long hap_gva_to_gfn_real_mode( - struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t *pfec) +static unsigned long hap_gva_to_gfn_real_mode(struct vcpu *v, + struct p2m_domain *p2m, + unsigned long gva, uint32_t *pfec) { return ((paddr_t)gva >> PAGE_SHIFT); } -static unsigned long hap_p2m_ga_to_gfn_real_mode( - struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3, - paddr_t ga, uint32_t *pfec, unsigned int *page_order) +static unsigned long hap_p2m_ga_to_gfn_real_mode(struct vcpu *v, + struct p2m_domain *p2m, + unsigned long cr3, paddr_t ga, + uint32_t *pfec, + unsigned int *page_order) { if ( page_order ) *page_order = PAGE_ORDER_4K; @@ -774,48 +780,44 @@ static unsigned long hap_p2m_ga_to_gfn_real_mode( /* Entry points into this mode of the hap code. */ static const struct paging_mode hap_paging_real_mode = { - .page_fault = hap_page_fault, - .invlpg = hap_invlpg, - .gva_to_gfn = hap_gva_to_gfn_real_mode, - .p2m_ga_to_gfn = hap_p2m_ga_to_gfn_real_mode, - .update_cr3 = hap_update_cr3, - .update_paging_modes = hap_update_paging_modes, - .write_p2m_entry = hap_write_p2m_entry, - .guest_levels = 1 -}; + .page_fault = hap_page_fault, + .invlpg = hap_invlpg, + .gva_to_gfn = hap_gva_to_gfn_real_mode, + .p2m_ga_to_gfn = hap_p2m_ga_to_gfn_real_mode, + .update_cr3 = hap_update_cr3, + .update_paging_modes = hap_update_paging_modes, + .write_p2m_entry = hap_write_p2m_entry, + .guest_levels = 1}; static const struct paging_mode hap_paging_protected_mode = { - .page_fault = hap_page_fault, - .invlpg = hap_invlpg, - .gva_to_gfn = hap_gva_to_gfn_2_levels, - .p2m_ga_to_gfn = hap_p2m_ga_to_gfn_2_levels, - .update_cr3 = hap_update_cr3, - .update_paging_modes = hap_update_paging_modes, - .write_p2m_entry = hap_write_p2m_entry, - .guest_levels = 2 -}; + .page_fault = hap_page_fault, + .invlpg = hap_invlpg, + .gva_to_gfn = hap_gva_to_gfn_2_levels, + .p2m_ga_to_gfn = hap_p2m_ga_to_gfn_2_levels, + .update_cr3 = hap_update_cr3, + .update_paging_modes = hap_update_paging_modes, + .write_p2m_entry = hap_write_p2m_entry, + .guest_levels = 2}; static const struct paging_mode hap_paging_pae_mode = { - .page_fault = hap_page_fault, - .invlpg = hap_invlpg, - .gva_to_gfn = hap_gva_to_gfn_3_levels, - .p2m_ga_to_gfn = hap_p2m_ga_to_gfn_3_levels, - .update_cr3 = hap_update_cr3, - .update_paging_modes = hap_update_paging_modes, - .write_p2m_entry = hap_write_p2m_entry, - .guest_levels = 3 -}; + .page_fault = hap_page_fault, + .invlpg = hap_invlpg, + .gva_to_gfn = hap_gva_to_gfn_3_levels, + .p2m_ga_to_gfn = hap_p2m_ga_to_gfn_3_levels, + .update_cr3 = hap_update_cr3, + .update_paging_modes = hap_update_paging_modes, + .write_p2m_entry = hap_write_p2m_entry, + .guest_levels = 3}; static const struct paging_mode hap_paging_long_mode = { - .page_fault = hap_page_fault, - .invlpg = hap_invlpg, - .gva_to_gfn = hap_gva_to_gfn_4_levels, - .p2m_ga_to_gfn = hap_p2m_ga_to_gfn_4_levels, - .update_cr3 = hap_update_cr3, - .update_paging_modes = hap_update_paging_modes, - .write_p2m_entry = hap_write_p2m_entry, - .guest_levels = 4 -}; + .page_fault = hap_page_fault, + .invlpg = hap_invlpg, + .gva_to_gfn = hap_gva_to_gfn_4_levels, + .p2m_ga_to_gfn = hap_p2m_ga_to_gfn_4_levels, + .update_cr3 = hap_update_cr3, + .update_paging_modes = hap_update_paging_modes, + .write_p2m_entry = hap_write_p2m_entry, + .guest_levels = 4}; /* * Local variables: diff --git a/xen/arch/x86/mm/hap/nested_ept.c b/xen/arch/x86/mm/hap/nested_ept.c index 1738df69f6..5720e44001 100644 --- a/xen/arch/x86/mm/hap/nested_ept.c +++ b/xen/arch/x86/mm/hap/nested_ept.c @@ -35,20 +35,19 @@ #include /* Must reserved bits in all level entries */ -#define EPT_MUST_RSV_BITS (((1ull << PADDR_BITS) - 1) & \ - ~((1ull << paddr_bits) - 1)) +#define EPT_MUST_RSV_BITS \ + (((1ull << PADDR_BITS) - 1) & ~((1ull << paddr_bits) - 1)) -#define NEPT_CAP_BITS \ - (VMX_EPT_INVEPT_ALL_CONTEXT | VMX_EPT_INVEPT_SINGLE_CONTEXT | \ - VMX_EPT_INVEPT_INSTRUCTION | VMX_EPT_SUPERPAGE_1GB | \ - VMX_EPT_SUPERPAGE_2MB | VMX_EPT_MEMORY_TYPE_WB | \ - VMX_EPT_MEMORY_TYPE_UC | VMX_EPT_WALK_LENGTH_4_SUPPORTED | \ - VMX_EPT_EXEC_ONLY_SUPPORTED) +#define NEPT_CAP_BITS \ + (VMX_EPT_INVEPT_ALL_CONTEXT | VMX_EPT_INVEPT_SINGLE_CONTEXT | \ + VMX_EPT_INVEPT_INSTRUCTION | VMX_EPT_SUPERPAGE_1GB | \ + VMX_EPT_SUPERPAGE_2MB | VMX_EPT_MEMORY_TYPE_WB | VMX_EPT_MEMORY_TYPE_UC | \ + VMX_EPT_WALK_LENGTH_4_SUPPORTED | VMX_EPT_EXEC_ONLY_SUPPORTED) -#define NVPID_CAP_BITS \ - (VMX_VPID_INVVPID_INSTRUCTION | VMX_VPID_INVVPID_INDIVIDUAL_ADDR | \ - VMX_VPID_INVVPID_SINGLE_CONTEXT | VMX_VPID_INVVPID_ALL_CONTEXT | \ - VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL) +#define NVPID_CAP_BITS \ + (VMX_VPID_INVVPID_INSTRUCTION | VMX_VPID_INVVPID_INDIVIDUAL_ADDR | \ + VMX_VPID_INVVPID_SINGLE_CONTEXT | VMX_VPID_INVVPID_ALL_CONTEXT | \ + VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL) #define NEPT_1G_ENTRY_FLAG (1 << 11) #define NEPT_2M_ENTRY_FLAG (1 << 10) @@ -63,13 +62,13 @@ static bool_t nept_rsv_bits_check(ept_entry_t e, uint32_t level) { uint64_t rsv_bits = EPT_MUST_RSV_BITS; - switch ( level ) + switch (level) { case 1: break; case 2 ... 3: if ( nept_sp_entry(e) ) - rsv_bits |= ((1ull << (9 * (level - 1))) - 1) << PAGE_SHIFT; + rsv_bits |= ((1ull << (9 * (level - 1))) - 1) << PAGE_SHIFT; else rsv_bits |= EPTE_EMT_MASK | EPTE_IGMT_MASK; break; @@ -77,7 +76,7 @@ static bool_t nept_rsv_bits_check(ept_entry_t e, uint32_t level) rsv_bits |= EPTE_EMT_MASK | EPTE_IGMT_MASK | EPTE_SUPER_PAGE_MASK; break; default: - gdprintk(XENLOG_ERR,"Unsupported EPT paging level: %d\n", level); + gdprintk(XENLOG_ERR, "Unsupported EPT paging level: %d\n", level); BUG(); break; } @@ -141,19 +140,18 @@ static bool_t nept_rwx_bits_check(ept_entry_t e) /* nept's misconfiguration check */ static bool_t nept_misconfiguration_check(ept_entry_t e, uint32_t level) { - return nept_rsv_bits_check(e, level) || - nept_emt_bits_check(e, level) || + return nept_rsv_bits_check(e, level) || nept_emt_bits_check(e, level) || nept_rwx_bits_check(e); } static int ept_lvl_table_offset(unsigned long gpa, int lvl) { - return (gpa >> (EPT_L4_PAGETABLE_SHIFT -(4 - lvl) * 9)) & + return (gpa >> (EPT_L4_PAGETABLE_SHIFT - (4 - lvl) * 9)) & (EPT_PAGETABLE_ENTRIES - 1); } -static uint32_t -nept_walk_tables(struct vcpu *v, unsigned long l2ga, ept_walk_t *gw) +static uint32_t nept_walk_tables(struct vcpu *v, unsigned long l2ga, + ept_walk_t *gw) { int lvl; p2m_type_t p2mt; @@ -166,7 +164,7 @@ nept_walk_tables(struct vcpu *v, unsigned long l2ga, ept_walk_t *gw) memset(gw, 0, sizeof(*gw)); - for (lvl = 4; lvl > 0; lvl--) + for ( lvl = 4; lvl > 0; lvl-- ) { lxp = map_domain_gfn(p2m, base_gfn, &lxmfn, &p2mt, P2M_ALLOC, &rc); if ( !lxp ) @@ -185,13 +183,13 @@ nept_walk_tables(struct vcpu *v, unsigned long l2ga, ept_walk_t *gw) { /* Generate a fake l1 table entry so callers don't all * have to understand superpages. */ - unsigned long gfn_lvl_mask = (1ull << ((lvl - 1) * 9)) - 1; + unsigned long gfn_lvl_mask = (1ull << ((lvl - 1) * 9)) - 1; gfn_t start = _gfn(gw->lxe[lvl].mfn); /* Increment the pfn by the right number of 4k pages. */ start = _gfn((gfn_x(start) & ~gfn_lvl_mask) + - ((l2ga >> PAGE_SHIFT) & gfn_lvl_mask)); + ((l2ga >> PAGE_SHIFT) & gfn_lvl_mask)); gflags = (gw->lxe[lvl].epte & EPTE_FLAG_MASK) | - (lvl == 3 ? NEPT_1G_ENTRY_FLAG: NEPT_2M_ENTRY_FLAG); + (lvl == 3 ? NEPT_1G_ENTRY_FLAG : NEPT_2M_ENTRY_FLAG); gw->lxe[0].epte = (gfn_x(start) << PAGE_SHIFT) | gflags; goto done; } @@ -215,7 +213,7 @@ map_err: } /* fall through to misconfig error */ misconfig_err: - ret = EPT_TRANSLATE_MISCONFIG; + ret = EPT_TRANSLATE_MISCONFIG; goto out; non_present: @@ -227,10 +225,10 @@ out: /* Translate a L2 guest address to L1 gpa via L1 EPT paging structure */ -int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga, - unsigned int *page_order, uint32_t rwx_acc, - unsigned long *l1gfn, uint8_t *p2m_acc, - uint64_t *exit_qual, uint32_t *exit_reason) +int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga, unsigned int *page_order, + uint32_t rwx_acc, unsigned long *l1gfn, + uint8_t *p2m_acc, uint64_t *exit_qual, + uint32_t *exit_reason) { uint32_t rc, rwx_bits = 0; ept_walk_t gw; @@ -239,7 +237,7 @@ int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga, *l1gfn = gfn_x(INVALID_GFN); rc = nept_walk_tables(v, l2ga, &gw); - switch ( rc ) + switch (rc) { case EPT_TRANSLATE_SUCCEED: if ( likely(gw.lxe[0].epte & NEPT_2M_ENTRY_FLAG) ) @@ -254,9 +252,9 @@ int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga, gw.lxe[1].epte & EPTE_RWX_MASK; *page_order = 0; } - else if ( gw.lxe[0].epte & NEPT_1G_ENTRY_FLAG ) + else if ( gw.lxe[0].epte & NEPT_1G_ENTRY_FLAG ) { - rwx_bits = gw.lxe[4].epte & gw.lxe[3].epte & EPTE_RWX_MASK; + rwx_bits = gw.lxe[4].epte & gw.lxe[3].epte & EPTE_RWX_MASK; *page_order = 18; } else diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c index abe5958a52..fe4fc0b2f5 100644 --- a/xen/arch/x86/mm/hap/nested_hap.c +++ b/xen/arch/x86/mm/hap/nested_hap.c @@ -33,8 +33,8 @@ #include "private.h" -/* AlGORITHM for NESTED PAGE FAULT - * +/* AlGORITHM for NESTED PAGE FAULT + * * NOTATION * Levels: L0, L1, L2 * Guests: L1 guest, L2 guest @@ -44,7 +44,7 @@ * On L0, when #NPF happens, the handler function should do: * hap_page_fault(GPA) * { - * 1. If #NPF is from L1 guest, then we crash the guest VM (same as old + * 1. If #NPF is from L1 guest, then we crash the guest VM (same as old * code) * 2. If #NPF is from L2 guest, then we continue from (3) * 3. Get np2m base from L1 guest. Map np2m base into L0 hypervisor address @@ -54,26 +54,25 @@ * back to L1 guest and * re-launch L1 guest (L1 guest will either treat this #NPF as MMIO, * or fix its p2m table for L2 guest) - * 6. - if present, then we will get the a new translated value L1-GPA + * 6. - if present, then we will get the a new translated value L1-GPA * (points to L1 machine memory) * 7. * Use L1-GPA to walk L0 P2M table * 8. - if not present, then crash the guest (should not happen) - * 9. - if present, then we get a new translated value MPA + * 9. - if present, then we get a new translated value MPA * (points to real machine memory) - * 10. * Finally, use GPA and MPA to walk nested_p2m + * 10. * Finally, use GPA and MPA to walk nested_p2m * and fix the bits. * } - * + * */ - /********************************************/ /* NESTED VIRT P2M FUNCTIONS */ /********************************************/ -int -nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, - l1_pgentry_t *p, l1_pgentry_t new, unsigned int level) +int nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, + l1_pgentry_t *p, l1_pgentry_t new, + unsigned int level) { struct domain *d = p2m->domain; uint32_t old_flags; @@ -83,7 +82,7 @@ nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, old_flags = l1e_get_flags(*p); safe_write_pte(p, new); - if (old_flags & _PAGE_PRESENT) + if ( old_flags & _PAGE_PRESENT ) flush_tlb_mask(p2m->dirty_cpumask); paging_unlock(d); @@ -94,10 +93,10 @@ nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, /********************************************/ /* NESTED VIRT FUNCTIONS */ /********************************************/ -static void -nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m, - paddr_t L2_gpa, paddr_t L0_gpa, - unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma) +static void nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m, + paddr_t L2_gpa, paddr_t L0_gpa, + unsigned int page_order, p2m_type_t p2mt, + p2m_access_t p2ma) { int rc = 0; unsigned long gfn, mask; @@ -120,7 +119,7 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m, if ( rc ) { gdprintk(XENLOG_ERR, - "failed to set entry for %#"PRIx64" -> %#"PRIx64" rc:%d\n", + "failed to set entry for %#" PRIx64 " -> %#" PRIx64 " rc:%d\n", L2_gpa, L0_gpa, rc); domain_crash(p2m->domain); } @@ -130,34 +129,32 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m, * walk is successful, the translated value is returned in * L1_gpa. The result value tells what to do next. */ -int -nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, - unsigned int *page_order, uint8_t *p2m_acc, - bool_t access_r, bool_t access_w, bool_t access_x) +int nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, + unsigned int *page_order, uint8_t *p2m_acc, + bool_t access_r, bool_t access_w, bool_t access_x) { ASSERT(hvm_funcs.nhvm_hap_walk_L1_p2m); - return hvm_funcs.nhvm_hap_walk_L1_p2m(v, L2_gpa, L1_gpa, page_order, - p2m_acc, access_r, access_w, access_x); + return hvm_funcs.nhvm_hap_walk_L1_p2m( + v, L2_gpa, L1_gpa, page_order, p2m_acc, access_r, access_w, access_x); } - /* This function uses L1_gpa to walk the P2M table in L0 hypervisor. If the - * walk is successful, the translated value is returned in L0_gpa. The return + * walk is successful, the translated value is returned in L0_gpa. The return * value tells the upper level what to do. */ -static int -nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa, - p2m_type_t *p2mt, p2m_access_t *p2ma, - unsigned int *page_order, - bool_t access_r, bool_t access_w, bool_t access_x) +static int nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t L1_gpa, + paddr_t *L0_gpa, p2m_type_t *p2mt, + p2m_access_t *p2ma, unsigned int *page_order, + bool_t access_r, bool_t access_w, + bool_t access_x) { mfn_t mfn; int rc; /* walk L0 P2M table */ - mfn = get_gfn_type_access(p2m, L1_gpa >> PAGE_SHIFT, p2mt, p2ma, - 0, page_order); + mfn = get_gfn_type_access(p2m, L1_gpa >> PAGE_SHIFT, p2mt, p2ma, 0, + page_order); rc = NESTEDHVM_PAGEFAULT_DIRECT_MMIO; if ( *p2mt == p2m_mmio_direct ) @@ -189,9 +186,9 @@ out: * * Returns: */ -int -nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa, - bool_t access_r, bool_t access_w, bool_t access_x) +int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa, + bool_t access_r, bool_t access_w, + bool_t access_x) { int rv; paddr_t L1_gpa, L0_gpa; @@ -206,10 +203,11 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa, /* walk the L1 P2M table */ rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21, &p2ma_21, - access_r, access_w, access_x); + access_r, access_w, access_x); /* let caller to handle these two cases */ - switch (rv) { + switch (rv) + { case NESTEDHVM_PAGEFAULT_INJECT: case NESTEDHVM_PAGEFAULT_RETRY: case NESTEDHVM_PAGEFAULT_L1_ERROR: @@ -222,12 +220,12 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa, } /* ==> we have to walk L0 P2M */ - rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa, - &p2mt_10, &p2ma_10, &page_order_10, - access_r, access_w, access_x); + rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa, &p2mt_10, &p2ma_10, + &page_order_10, access_r, access_w, access_x); /* let upper level caller to handle these two cases */ - switch (rv) { + switch (rv) + { case NESTEDHVM_PAGEFAULT_INJECT: return rv; case NESTEDHVM_PAGEFAULT_L0_ERROR: @@ -249,7 +247,7 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa, ASSERT(p2ma_10 <= p2m_access_n2rwx); /*NOTE: if assert fails, needs to handle new access type here */ - switch ( p2ma_10 ) + switch (p2ma_10) { case p2m_access_n ... p2m_access_rwx: break; @@ -269,8 +267,8 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa, /* fix p2m_get_pagetable(nested_p2m) */ nested_p2m = p2m_get_nestedp2m_locked(v); - nestedhap_fix_p2m(v, nested_p2m, *L2_gpa, L0_gpa, page_order_20, - p2mt_10, p2ma_10); + nestedhap_fix_p2m(v, nested_p2m, *L2_gpa, L0_gpa, page_order_20, p2mt_10, + p2ma_10); p2m_unlock(nested_p2m); return NESTEDHVM_PAGEFAULT_DONE; diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c index 56c06a4fc6..f650d6b26e 100644 --- a/xen/arch/x86/mm/mem_access.c +++ b/xen/arch/x86/mm/mem_access.c @@ -1,8 +1,8 @@ /****************************************************************************** * arch/x86/mm/mem_access.c * - * Parts of this code are Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp) - * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices. + * Parts of this code are Copyright (c) 2009 by Citrix Systems, Inc. (Patrick + * Colp) Parts of this code are Copyright (c) 2007 by Advanced Micro Devices. * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc. * Parts of this code are Copyright (c) 2006 by Michael A Fetterman * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. @@ -47,16 +47,8 @@ static int _p2m_get_mem_access(struct p2m_domain *p2m, gfn_t gfn, static const xenmem_access_t memaccess[] = { #define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac - ACCESS(n), - ACCESS(r), - ACCESS(w), - ACCESS(rw), - ACCESS(x), - ACCESS(rx), - ACCESS(wx), - ACCESS(rwx), - ACCESS(rx2rw), - ACCESS(n2rwx), + ACCESS(n), ACCESS(r), ACCESS(w), ACCESS(rw), ACCESS(x), + ACCESS(rx), ACCESS(wx), ACCESS(rwx), ACCESS(rx2rw), ACCESS(n2rwx), #undef ACCESS }; @@ -77,7 +69,7 @@ static int _p2m_get_mem_access(struct p2m_domain *p2m, gfn_t gfn, if ( (unsigned int)a >= ARRAY_SIZE(memaccess) ) return -ERANGE; - *access = memaccess[a]; + *access = memaccess[a]; return 0; } @@ -97,7 +89,7 @@ bool p2m_mem_access_emulate_check(struct vcpu *v, if ( _p2m_get_mem_access(p2m, _gfn(data->gfn), &access) == 0 ) { - switch ( access ) + switch (access) { case XENMEM_access_n: case XENMEM_access_n2rwx: @@ -140,8 +132,7 @@ bool p2m_mem_access_emulate_check(struct vcpu *v, } #ifdef CONFIG_HVM -bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, - struct npfec npfec, +bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, struct npfec npfec, vm_event_request_t **req_ptr) { struct vcpu *v = current; @@ -167,7 +158,8 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, if ( npfec.write_access && p2ma == p2m_access_rx2rw ) { - rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw, -1); + rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw, + -1); ASSERT(rc == 0); gfn_unlock(p2m, gfn, 0); return true; @@ -175,21 +167,23 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, else if ( p2ma == p2m_access_n2rwx ) { ASSERT(npfec.write_access || npfec.read_access || npfec.insn_fetch); - rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, - p2mt, p2m_access_rwx, -1); + rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx, + -1); ASSERT(rc == 0); } gfn_unlock(p2m, gfn, 0); - /* Otherwise, check if there is a memory event listener, and send the message along */ + /* Otherwise, check if there is a memory event listener, and send the + * message along */ if ( !vm_event_check_ring(d->vm_event_monitor) || !req_ptr ) { /* No listener */ if ( p2m->access_required ) { - gdprintk(XENLOG_INFO, "Memory access permissions failure, " - "no vm_event listener VCPU %d, dom %d\n", - v->vcpu_id, d->domain_id); + gdprintk(XENLOG_INFO, + "Memory access permissions failure, " + "no vm_event listener VCPU %d, dom %d\n", + v->vcpu_id, d->domain_id); domain_crash(v->domain); return false; } @@ -202,8 +196,8 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, /* A listener is not required, so clear the access * restrictions. This set must succeed: we have the * gfn locked and just did a successful get_entry(). */ - rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, - p2mt, p2m_access_rwx, -1); + rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, + p2m_access_rwx, -1); ASSERT(rc == 0); } gfn_unlock(p2m, gfn, 0); @@ -214,7 +208,8 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, d->arch.monitor.inguest_pagefault_disabled && npfec.kind != npfec_kind_with_gla ) /* don't send a mem_event */ { - hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op, X86_EVENT_NO_EC); + hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op, + X86_EVENT_NO_EC); return true; } @@ -235,7 +230,7 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, req->u.mem_access.gla = gla; } - switch ( npfec.kind ) + switch (npfec.kind) { case npfec_kind_with_gla: req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA; @@ -246,9 +241,9 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, break; } - req->u.mem_access.flags |= npfec.read_access ? MEM_ACCESS_R : 0; - req->u.mem_access.flags |= npfec.write_access ? MEM_ACCESS_W : 0; - req->u.mem_access.flags |= npfec.insn_fetch ? MEM_ACCESS_X : 0; + req->u.mem_access.flags |= npfec.read_access ? MEM_ACCESS_R : 0; + req->u.mem_access.flags |= npfec.write_access ? MEM_ACCESS_W : 0; + req->u.mem_access.flags |= npfec.insn_fetch ? MEM_ACCESS_X : 0; } /* Return whether vCPU pause is required (aka. sync event) */ @@ -271,7 +266,6 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m, /* Check host p2m if no valid entry in alternate */ if ( !mfn_valid(mfn) ) { - mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a, P2M_ALLOC | P2M_UNSHARE, &page_order, 0); @@ -301,8 +295,7 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m, #endif static int set_mem_access(struct domain *d, struct p2m_domain *p2m, - struct p2m_domain *ap2m, p2m_access_t a, - gfn_t gfn) + struct p2m_domain *ap2m, p2m_access_t a, gfn_t gfn) { int rc = 0; @@ -336,20 +329,12 @@ static bool xenmem_access_to_p2m_access(struct p2m_domain *p2m, { static const p2m_access_t memaccess[] = { #define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac - ACCESS(n), - ACCESS(r), - ACCESS(w), - ACCESS(rw), - ACCESS(x), - ACCESS(rx), - ACCESS(wx), - ACCESS(rwx), - ACCESS(rx2rw), - ACCESS(n2rwx), + ACCESS(n), ACCESS(r), ACCESS(w), ACCESS(rw), ACCESS(x), + ACCESS(rx), ACCESS(wx), ACCESS(rwx), ACCESS(rx2rw), ACCESS(n2rwx), #undef ACCESS }; - switch ( xaccess ) + switch (xaccess) { case 0 ... ARRAY_SIZE(memaccess) - 1: xaccess = array_index_nospec(xaccess, ARRAY_SIZE(memaccess)); diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c index 54a94fa3a0..112a6c8ff7 100644 --- a/xen/arch/x86/mm/mem_paging.c +++ b/xen/arch/x86/mm/mem_paging.c @@ -19,7 +19,6 @@ * along with this program; If not, see . */ - #include #include #include @@ -47,7 +46,7 @@ int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg) if ( unlikely(!vm_event_check_ring(d->vm_event_paging)) ) goto out; - switch( mpo.op ) + switch (mpo.op) { case XENMEM_paging_op_nominate: rc = p2m_mem_paging_nominate(d, mpo.gfn); @@ -76,7 +75,6 @@ out: return rc; } - /* * Local variables: * mode: C diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index 5ac9d8f54c..20440ce26e 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -42,26 +42,26 @@ static shr_handle_t next_handle = 1; -typedef struct pg_lock_data { +typedef struct pg_lock_data +{ int mm_unlock_level; unsigned short recurse_count; } pg_lock_data_t; static DEFINE_PER_CPU(pg_lock_data_t, __pld); -#define MEM_SHARING_DEBUG(_f, _a...) \ +#define MEM_SHARING_DEBUG(_f, _a...) \ debugtrace_printk("mem_sharing_debug: %s(): " _f, __func__, ##_a) /* Reverse map defines */ -#define RMAP_HASHTAB_ORDER 0 -#define RMAP_HASHTAB_SIZE \ - ((PAGE_SIZE << RMAP_HASHTAB_ORDER) / sizeof(struct list_head)) -#define RMAP_USES_HASHTAB(page) \ - ((page)->sharing->hash_table.flag == NULL) -#define RMAP_HEAVY_SHARED_PAGE RMAP_HASHTAB_SIZE +#define RMAP_HASHTAB_ORDER 0 +#define RMAP_HASHTAB_SIZE \ + ((PAGE_SIZE << RMAP_HASHTAB_ORDER) / sizeof(struct list_head)) +#define RMAP_USES_HASHTAB(page) ((page)->sharing->hash_table.flag == NULL) +#define RMAP_HEAVY_SHARED_PAGE RMAP_HASHTAB_SIZE /* A bit of hysteresis. We don't want to be mutating between list and hash * table constantly. */ -#define RMAP_LIGHT_SHARED_PAGE (RMAP_HEAVY_SHARED_PAGE >> 2) +#define RMAP_LIGHT_SHARED_PAGE (RMAP_HEAVY_SHARED_PAGE >> 2) #if MEM_SHARING_AUDIT @@ -88,8 +88,8 @@ static inline void page_sharing_dispose(struct page_info *page) { /* Unlikely given our thresholds, but we should be careful. */ if ( unlikely(RMAP_USES_HASHTAB(page)) ) - free_xenheap_pages(page->sharing->hash_table.bucket, - RMAP_HASHTAB_ORDER); + free_xenheap_pages(page->sharing->hash_table.bucket, + RMAP_HASHTAB_ORDER); spin_lock(&shr_audit_lock); list_del_rcu(&page->sharing->entry); @@ -100,13 +100,13 @@ static inline void page_sharing_dispose(struct page_info *page) #else -#define audit_add_list(p) ((void)0) +#define audit_add_list(p) ((void)0) static inline void page_sharing_dispose(struct page_info *page) { /* Unlikely given our thresholds, but we should be careful. */ if ( unlikely(RMAP_USES_HASHTAB(page)) ) - free_xenheap_pages(page->sharing->hash_table.bucket, - RMAP_HASHTAB_ORDER); + free_xenheap_pages(page->sharing->hash_table.bucket, + RMAP_HASHTAB_ORDER); xfree(page->sharing); } @@ -122,8 +122,7 @@ static inline int mem_sharing_page_lock(struct page_info *pg) if ( rc ) { preempt_disable(); - page_sharing_mm_post_lock(&pld->mm_unlock_level, - &pld->recurse_count); + page_sharing_mm_post_lock(&pld->mm_unlock_level, &pld->recurse_count); } return rc; } @@ -132,28 +131,26 @@ static inline void mem_sharing_page_unlock(struct page_info *pg) { pg_lock_data_t *pld = &(this_cpu(__pld)); - page_sharing_mm_unlock(pld->mm_unlock_level, - &pld->recurse_count); + page_sharing_mm_unlock(pld->mm_unlock_level, &pld->recurse_count); preempt_enable(); page_unlock(pg); } static inline shr_handle_t get_next_handle(void) { - /* Get the next handle get_page style */ + /* Get the next handle get_page style */ uint64_t x, y = next_handle; do { x = y; - } - while ( (y = cmpxchg(&next_handle, x, x + 1)) != x ); + } while ( (y = cmpxchg(&next_handle, x, x + 1)) != x ); return x + 1; } #define mem_sharing_enabled(d) \ (is_hvm_domain(d) && (d)->arch.hvm.mem_sharing_enabled) -static atomic_t nr_saved_mfns = ATOMIC_INIT(0); -static atomic_t nr_shared_mfns = ATOMIC_INIT(0); +static atomic_t nr_saved_mfns = ATOMIC_INIT(0); +static atomic_t nr_shared_mfns = ATOMIC_INIT(0); /** Reverse map **/ /* Every shared frame keeps a reverse map (rmap) of tuples that @@ -164,29 +161,26 @@ static atomic_t nr_shared_mfns = ATOMIC_INIT(0); typedef struct gfn_info { unsigned long gfn; - domid_t domain; + domid_t domain; struct list_head list; } gfn_info_t; -static inline void -rmap_init(struct page_info *page) +static inline void rmap_init(struct page_info *page) { /* We always start off as a doubly linked list. */ INIT_LIST_HEAD(&page->sharing->gfns); } /* Exceedingly simple "hash function" */ -#define HASH(domain, gfn) \ - (((gfn) + (domain)) % RMAP_HASHTAB_SIZE) +#define HASH(domain, gfn) (((gfn) + (domain)) % RMAP_HASHTAB_SIZE) -/* Conversions. Tuned by the thresholds. Should only happen twice +/* Conversions. Tuned by the thresholds. Should only happen twice * (once each) during the lifetime of a shared page */ -static inline int -rmap_list_to_hash_table(struct page_info *page) +static inline int rmap_list_to_hash_table(struct page_info *page) { unsigned int i; - struct list_head *pos, *tmp, *b = - alloc_xenheap_pages(RMAP_HASHTAB_ORDER, 0); + struct list_head *pos, *tmp, + *b = alloc_xenheap_pages(RMAP_HASHTAB_ORDER, 0); if ( b == NULL ) return -ENOMEM; @@ -194,7 +188,7 @@ rmap_list_to_hash_table(struct page_info *page) for ( i = 0; i < RMAP_HASHTAB_SIZE; i++ ) INIT_LIST_HEAD(b + i); - list_for_each_safe(pos, tmp, &page->sharing->gfns) + list_for_each_safe (pos, tmp, &page->sharing->gfns) { gfn_info_t *gfn_info = list_entry(pos, gfn_info_t, list); struct list_head *bucket = b + HASH(gfn_info->domain, gfn_info->gfn); @@ -203,13 +197,12 @@ rmap_list_to_hash_table(struct page_info *page) } page->sharing->hash_table.bucket = b; - page->sharing->hash_table.flag = NULL; + page->sharing->hash_table.flag = NULL; return 0; } -static inline void -rmap_hash_table_to_list(struct page_info *page) +static inline void rmap_hash_table_to_list(struct page_info *page) { unsigned int i; struct list_head *bucket = page->sharing->hash_table.bucket; @@ -219,7 +212,7 @@ rmap_hash_table_to_list(struct page_info *page) for ( i = 0; i < RMAP_HASHTAB_SIZE; i++ ) { struct list_head *pos, *tmp, *head = bucket + i; - list_for_each_safe(pos, tmp, head) + list_for_each_safe (pos, tmp, head) { list_del(pos); list_add(pos, &page->sharing->gfns); @@ -230,8 +223,7 @@ rmap_hash_table_to_list(struct page_info *page) } /* Generic accessors to the rmap */ -static inline unsigned long -rmap_count(struct page_info *pg) +static inline unsigned long rmap_count(struct page_info *pg) { unsigned long count; unsigned long t = read_atomic(&pg->u.inuse.type_info); @@ -242,10 +234,10 @@ rmap_count(struct page_info *pg) } /* The page type count is always decreased after removing from the rmap. - * Use a convert flag to avoid mutating the rmap if in the middle of an + * Use a convert flag to avoid mutating the rmap if in the middle of an * iterator, or if the page will be soon destroyed anyways. */ -static inline void -rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert) +static inline void rmap_del(gfn_info_t *gfn_info, struct page_info *page, + int convert) { if ( RMAP_USES_HASHTAB(page) && convert && (rmap_count(page) <= RMAP_LIGHT_SHARED_PAGE) ) @@ -256,8 +248,7 @@ rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert) } /* The page type count is always increased before adding to the rmap. */ -static inline void -rmap_add(gfn_info_t *gfn_info, struct page_info *page) +static inline void rmap_add(gfn_info_t *gfn_info, struct page_info *page) { struct list_head *head; @@ -267,27 +258,25 @@ rmap_add(gfn_info_t *gfn_info, struct page_info *page) * but no reason to panic. */ (void)rmap_list_to_hash_table(page); - head = (RMAP_USES_HASHTAB(page)) ? - page->sharing->hash_table.bucket + - HASH(gfn_info->domain, gfn_info->gfn) : - &page->sharing->gfns; + head = (RMAP_USES_HASHTAB(page)) ? page->sharing->hash_table.bucket + + HASH(gfn_info->domain, gfn_info->gfn) + : &page->sharing->gfns; INIT_LIST_HEAD(&gfn_info->list); list_add(&gfn_info->list, head); } -static inline gfn_info_t * -rmap_retrieve(uint16_t domain_id, unsigned long gfn, - struct page_info *page) +static inline gfn_info_t *rmap_retrieve(uint16_t domain_id, unsigned long gfn, + struct page_info *page) { gfn_info_t *gfn_info; struct list_head *le, *head; - head = (RMAP_USES_HASHTAB(page)) ? - page->sharing->hash_table.bucket + HASH(domain_id, gfn) : - &page->sharing->gfns; + head = (RMAP_USES_HASHTAB(page)) + ? page->sharing->hash_table.bucket + HASH(domain_id, gfn) + : &page->sharing->gfns; - list_for_each(le, head) + list_for_each (le, head) { gfn_info = list_entry(le, gfn_info_t, list); if ( (gfn_info->gfn == gfn) && (gfn_info->domain == domain_id) ) @@ -312,31 +301,31 @@ static inline int rmap_has_entries(struct page_info *page) /* The iterator hides the details of how the rmap is implemented. This * involves splitting the list_for_each_safe macro into two steps. */ -struct rmap_iterator { +struct rmap_iterator +{ struct list_head *curr; struct list_head *next; unsigned int bucket; }; -static inline void -rmap_seed_iterator(struct page_info *page, struct rmap_iterator *ri) +static inline void rmap_seed_iterator(struct page_info *page, + struct rmap_iterator *ri) { - ri->curr = (RMAP_USES_HASHTAB(page)) ? - page->sharing->hash_table.bucket : - &page->sharing->gfns; - ri->next = ri->curr->next; + ri->curr = (RMAP_USES_HASHTAB(page)) ? page->sharing->hash_table.bucket + : &page->sharing->gfns; + ri->next = ri->curr->next; ri->bucket = 0; } -static inline gfn_info_t * -rmap_iterate(struct page_info *page, struct rmap_iterator *ri) +static inline gfn_info_t *rmap_iterate(struct page_info *page, + struct rmap_iterator *ri) { - struct list_head *head = (RMAP_USES_HASHTAB(page)) ? - page->sharing->hash_table.bucket + ri->bucket : - &page->sharing->gfns; + struct list_head *head = (RMAP_USES_HASHTAB(page)) + ? page->sharing->hash_table.bucket + ri->bucket + : &page->sharing->gfns; retry: - if ( ri->next == head) + if ( ri->next == head ) { if ( RMAP_USES_HASHTAB(page) ) { @@ -348,7 +337,8 @@ retry: ri->curr = head; ri->next = ri->curr->next; goto retry; - } else + } + else /* List exhausted */ return NULL; } @@ -366,7 +356,7 @@ static inline gfn_info_t *mem_sharing_gfn_alloc(struct page_info *page, gfn_info_t *gfn_info = xmalloc(gfn_info_t); if ( gfn_info == NULL ) - return NULL; + return NULL; gfn_info->gfn = gfn; gfn_info->domain = d->domain_id; @@ -391,15 +381,15 @@ static inline void mem_sharing_gfn_destroy(struct page_info *page, xfree(gfn_info); } -static struct page_info* mem_sharing_lookup(unsigned long mfn) +static struct page_info *mem_sharing_lookup(unsigned long mfn) { if ( mfn_valid(_mfn(mfn)) ) { - struct page_info* page = mfn_to_page(_mfn(mfn)); + struct page_info *page = mfn_to_page(_mfn(mfn)); if ( page_get_owner(page) == dom_cow ) { /* Count has to be at least two, because we're called - * with the mfn locked (1) and this is supposed to be + * with the mfn locked (1) and this is supposed to be * a shared page (1). */ unsigned long t = read_atomic(&page->u.inuse.type_info); ASSERT((t & PGT_type_mask) == PGT_shared_page); @@ -440,44 +430,46 @@ static int audit(void) /* If we can't lock it, it's definitely not a shared page */ if ( !mem_sharing_page_lock(pg) ) { - MEM_SHARING_DEBUG("mfn %lx in audit list, but cannot be locked (%lx)!\n", - mfn_x(mfn), pg->u.inuse.type_info); - errors++; - continue; + MEM_SHARING_DEBUG( + "mfn %lx in audit list, but cannot be locked (%lx)!\n", + mfn_x(mfn), pg->u.inuse.type_info); + errors++; + continue; } - /* Check if the MFN has correct type, owner and handle. */ + /* Check if the MFN has correct type, owner and handle. */ if ( (pg->u.inuse.type_info & PGT_type_mask) != PGT_shared_page ) { - MEM_SHARING_DEBUG("mfn %lx in audit list, but not PGT_shared_page (%lx)!\n", - mfn_x(mfn), pg->u.inuse.type_info & PGT_type_mask); - errors++; - continue; + MEM_SHARING_DEBUG( + "mfn %lx in audit list, but not PGT_shared_page (%lx)!\n", + mfn_x(mfn), pg->u.inuse.type_info & PGT_type_mask); + errors++; + continue; } /* Check the page owner. */ if ( page_get_owner(pg) != dom_cow ) { - MEM_SHARING_DEBUG("mfn %lx shared, but wrong owner (%hu)!\n", - mfn_x(mfn), page_get_owner(pg)->domain_id); - errors++; + MEM_SHARING_DEBUG("mfn %lx shared, but wrong owner (%hu)!\n", + mfn_x(mfn), page_get_owner(pg)->domain_id); + errors++; } /* Check the m2p entry */ if ( !SHARED_M2P(get_gpfn_from_mfn(mfn_x(mfn))) ) { - MEM_SHARING_DEBUG("mfn %lx shared, but wrong m2p entry (%lx)!\n", - mfn_x(mfn), get_gpfn_from_mfn(mfn_x(mfn))); - errors++; + MEM_SHARING_DEBUG("mfn %lx shared, but wrong m2p entry (%lx)!\n", + mfn_x(mfn), get_gpfn_from_mfn(mfn_x(mfn))); + errors++; } /* Check we have a list */ if ( (!pg->sharing) || !rmap_has_entries(pg) ) { - MEM_SHARING_DEBUG("mfn %lx shared, but empty gfn list!\n", - mfn_x(mfn)); - errors++; - continue; + MEM_SHARING_DEBUG("mfn %lx shared, but empty gfn list!\n", + mfn_x(mfn)); + errors++; + continue; } /* We've found a page that is shared */ @@ -499,7 +491,7 @@ static int audit(void) errors++; continue; } - o_mfn = get_gfn_query_unlocked(d, g->gfn, &t); + o_mfn = get_gfn_query_unlocked(d, g->gfn, &t); if ( !mfn_eq(o_mfn, mfn) ) { MEM_SHARING_DEBUG("Incorrect P2M for d=%hu, PFN=%lx." @@ -509,9 +501,10 @@ static int audit(void) } if ( t != p2m_ram_shared ) { - MEM_SHARING_DEBUG("Incorrect P2M type for d=%hu, PFN=%lx MFN=%lx." - "Expecting t=%d, got %d\n", - g->domain, g->gfn, mfn_x(mfn), p2m_ram_shared, t); + MEM_SHARING_DEBUG( + "Incorrect P2M type for d=%hu, PFN=%lx MFN=%lx." + "Expecting t=%d, got %d\n", + g->domain, g->gfn, mfn_x(mfn), p2m_ram_shared, t); errors++; } put_domain(d); @@ -522,7 +515,7 @@ static int audit(void) { MEM_SHARING_DEBUG("Mismatched counts for MFN=%lx." "nr_gfns in list %lu, in type_info %lx\n", - mfn_x(mfn), nr_gfns, + mfn_x(mfn), nr_gfns, (pg->u.inuse.type_info & PGT_count_mask)); errors++; } @@ -550,15 +543,12 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn, { struct vcpu *v = current; int rc; - vm_event_request_t req = { - .reason = VM_EVENT_REASON_MEM_SHARING, - .vcpu_id = v->vcpu_id, - .u.mem_sharing.gfn = gfn, - .u.mem_sharing.p2mt = p2m_ram_shared - }; - - if ( (rc = __vm_event_claim_slot(d, - d->vm_event_share, allow_sleep)) < 0 ) + vm_event_request_t req = {.reason = VM_EVENT_REASON_MEM_SHARING, + .vcpu_id = v->vcpu_id, + .u.mem_sharing.gfn = gfn, + .u.mem_sharing.p2mt = p2m_ram_shared}; + + if ( (rc = __vm_event_claim_slot(d, d->vm_event_share, allow_sleep)) < 0 ) return rc; if ( v->domain == d ) @@ -583,9 +573,8 @@ unsigned int mem_sharing_get_nr_shared_mfns(void) } /* Functions that change a page's type and ownership */ -static int page_make_sharable(struct domain *d, - struct page_info *page, - int expected_refcnt) +static int page_make_sharable(struct domain *d, struct page_info *page, + int expected_refcnt) { bool_t drop_dom_ref; @@ -638,7 +627,7 @@ static int page_make_private(struct domain *d, struct page_info *page) if ( !get_page(page, dom_cow) ) return -EINVAL; - + spin_lock(&d->page_alloc_lock); if ( d->is_dying ) @@ -650,7 +639,7 @@ static int page_make_private(struct domain *d, struct page_info *page) /* We can only change the type if count is one */ /* Because we are locking pages individually, we need to drop - * the lock here, while the page is typed. We cannot risk the + * the lock here, while the page is typed. We cannot risk the * race of page_unlock and then put_page_type. */ expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2); if ( page->u.inuse.type_info != expected_type ) @@ -688,7 +677,7 @@ static inline struct page_info *__grab_shared_page(mfn_t mfn) return NULL; pg = mfn_to_page(mfn); - /* If the page is not validated we can't lock it, and if it's + /* If the page is not validated we can't lock it, and if it's * not validated it's obviously not shared. */ if ( !mem_sharing_page_lock(pg) ) return NULL; @@ -707,21 +696,18 @@ static int debug_mfn(mfn_t mfn) struct page_info *page; int num_refs; - if ( (page = __grab_shared_page(mfn)) == NULL) + if ( (page = __grab_shared_page(mfn)) == NULL ) { gdprintk(XENLOG_ERR, "Invalid MFN=%lx\n", mfn_x(mfn)); return -EINVAL; } - MEM_SHARING_DEBUG( - "Debug page: MFN=%lx is ci=%lx, ti=%lx, owner_id=%d\n", - mfn_x(page_to_mfn(page)), - page->count_info, - page->u.inuse.type_info, - page_get_owner(page)->domain_id); + MEM_SHARING_DEBUG("Debug page: MFN=%lx is ci=%lx, ti=%lx, owner_id=%d\n", + mfn_x(page_to_mfn(page)), page->count_info, + page->u.inuse.type_info, page_get_owner(page)->domain_id); /* -1 because the page is locked and that's an additional type ref */ - num_refs = ((int) (page->u.inuse.type_info & PGT_count_mask)) - 1; + num_refs = ((int)(page->u.inuse.type_info & PGT_count_mask)) - 1; mem_sharing_page_unlock(page); return num_refs; } @@ -734,8 +720,8 @@ static int debug_gfn(struct domain *d, gfn_t gfn) mfn = get_gfn_query(d, gfn_x(gfn), &p2mt); - MEM_SHARING_DEBUG("Debug for dom%d, gfn=%" PRI_gfn "\n", - d->domain_id, gfn_x(gfn)); + MEM_SHARING_DEBUG("Debug for dom%d, gfn=%" PRI_gfn "\n", d->domain_id, + gfn_x(gfn)); num_refs = debug_mfn(mfn); put_gfn(d, gfn_x(gfn)); @@ -755,16 +741,15 @@ static int debug_gref(struct domain *d, grant_ref_t ref) d->domain_id, ref, rc); return rc; } - - MEM_SHARING_DEBUG( - "==> Grant [dom=%d,ref=%d], status=%x. ", - d->domain_id, ref, status); + + MEM_SHARING_DEBUG("==> Grant [dom=%d,ref=%d], status=%x. ", d->domain_id, + ref, status); return debug_gfn(d, gfn); } -static int nominate_page(struct domain *d, gfn_t gfn, - int expected_refcnt, shr_handle_t *phandle) +static int nominate_page(struct domain *d, gfn_t gfn, int expected_refcnt, + shr_handle_t *phandle) { struct p2m_domain *hp2m = p2m_get_hostp2m(d); p2m_type_t p2mt; @@ -783,12 +768,14 @@ static int nominate_page(struct domain *d, gfn_t gfn, goto out; /* Return the handle if the page is already shared */ - if ( p2m_is_shared(p2mt) ) { + if ( p2m_is_shared(p2mt) ) + { struct page_info *pg = __grab_shared_page(mfn); if ( !pg ) { gprintk(XENLOG_ERR, - "Shared p2m entry gfn %" PRI_gfn ", but could not grab mfn %" PRI_mfn " dom%d\n", + "Shared p2m entry gfn %" PRI_gfn + ", but could not grab mfn %" PRI_mfn " dom%d\n", gfn_x(gfn), mfn_x(mfn), d->domain_id); BUG(); } @@ -820,8 +807,8 @@ static int nominate_page(struct domain *d, gfn_t gfn, if ( !ap2m ) continue; - amfn = __get_gfn_type_access(ap2m, gfn_x(gfn), &ap2mt, &ap2ma, - 0, NULL, false); + amfn = __get_gfn_type_access(ap2m, gfn_x(gfn), &ap2mt, &ap2ma, 0, + NULL, false); if ( mfn_valid(amfn) && (!mfn_eq(amfn, mfn) || ap2ma != p2ma) ) { altp2m_list_unlock(d); @@ -835,12 +822,12 @@ static int nominate_page(struct domain *d, gfn_t gfn, /* Try to convert the mfn to the sharable type */ page = mfn_to_page(mfn); - ret = page_make_sharable(d, page, expected_refcnt); - if ( ret ) + ret = page_make_sharable(d, page, expected_refcnt); + if ( ret ) goto out; - /* Now that the page is validated, we can lock it. There is no - * race because we're holding the p2m entry, so no one else + /* Now that the page is validated, we can lock it. There is no + * race because we're holding the p2m entry, so no one else * could be nominating this gfn */ ret = -ENOENT; if ( !mem_sharing_page_lock(page) ) @@ -848,8 +835,7 @@ static int nominate_page(struct domain *d, gfn_t gfn, /* Initialize the shared state */ ret = -ENOMEM; - if ( (page->sharing = - xmalloc(struct page_sharing_info)) == NULL ) + if ( (page->sharing = xmalloc(struct page_sharing_info)) == NULL ) { /* Making a page private atomically unlocks it */ BUG_ON(page_make_private(d, page) != 0); @@ -859,7 +845,7 @@ static int nominate_page(struct domain *d, gfn_t gfn, rmap_init(page); /* Create the handle */ - page->sharing->handle = get_next_handle(); + page->sharing->handle = get_next_handle(); /* Create the local gfn info */ if ( mem_sharing_gfn_alloc(page, d, gfn_x(gfn)) == NULL ) @@ -901,10 +887,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh, struct two_gfns tg; struct rmap_iterator ri; - get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn, - cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg); + get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn, cd, cgfn, &cmfn_type, NULL, + &cmfn, 0, &tg); - /* This tricky business is to avoid two callers deadlocking if + /* This tricky business is to avoid two callers deadlocking if * grabbing pages in opposite client/source order */ if ( mfn_eq(smfn, cmfn) ) { @@ -930,7 +916,9 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh, mem_sharing_page_unlock(spage); goto err_out; } - } else { + } + else + { ret = XENMEM_SHARING_OP_C_HANDLE_INVALID; cpage = firstpg = __grab_shared_page(cmfn); if ( cpage == NULL ) @@ -975,9 +963,9 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh, /* Merge the lists together */ rmap_seed_iterator(cpage, &ri); - while ( (gfn = rmap_iterate(cpage, &ri)) != NULL) + while ( (gfn = rmap_iterate(cpage, &ri)) != NULL ) { - /* Get the source page and type, this should never fail: + /* Get the source page and type, this should never fail: * we are under shr lock, and got a successful lookup */ BUG_ON(!get_page_and_type(spage, dom_cow, PGT_shared_page)); /* Move the gfn_info from client list to source list. @@ -1000,7 +988,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh, mem_sharing_page_unlock(firstpg); /* Free the client page */ - if(test_and_clear_bit(_PGC_allocated, &cpage->count_info)) + if ( test_and_clear_bit(_PGC_allocated, &cpage->count_info) ) put_page(cpage); put_page(cpage); @@ -1008,14 +996,15 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh, atomic_dec(&nr_shared_mfns); atomic_inc(&nr_saved_mfns); ret = 0; - + err_out: put_two_gfns(&tg); return ret; } -int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle_t sh, - struct domain *cd, unsigned long cgfn) +int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, + shr_handle_t sh, struct domain *cd, + unsigned long cgfn) { struct page_info *spage; int ret = -EINVAL; @@ -1026,8 +1015,8 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle p2m_access_t a; struct two_gfns tg; - get_two_gfns(sd, _gfn(sgfn), &smfn_type, NULL, &smfn, - cd, _gfn(cgfn), &cmfn_type, &a, &cmfn, 0, &tg); + get_two_gfns(sd, _gfn(sgfn), &smfn_type, NULL, &smfn, cd, _gfn(cgfn), + &cmfn_type, &a, &cmfn, 0, &tg); /* Get the source shared page, check and lock */ ret = XENMEM_SHARING_OP_S_HANDLE_INVALID; @@ -1058,20 +1047,23 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle goto err_unlock; } - ret = p2m_set_entry(p2m, _gfn(cgfn), smfn, PAGE_ORDER_4K, - p2m_ram_shared, a); + ret = + p2m_set_entry(p2m, _gfn(cgfn), smfn, PAGE_ORDER_4K, p2m_ram_shared, a); /* Tempted to turn this into an assert */ if ( ret ) { mem_sharing_gfn_destroy(spage, cd, gfn_info); put_page_and_type(spage); - } else { + } + else + { /* There is a chance we're plugging a hole where a paged out page was */ if ( p2m_is_paging(cmfn_type) && (cmfn_type != p2m_ram_paging_out) ) { atomic_dec(&cd->paged_pages); - /* Further, there is a chance this was a valid page. Don't leak it. */ + /* Further, there is a chance this was a valid page. Don't leak it. + */ if ( mfn_valid(cmfn) ) { struct page_info *cpage = mfn_to_page(cmfn); @@ -1098,34 +1090,33 @@ err_out: return ret; } - /* A note on the rationale for unshare error handling: * 1. Unshare can only fail with ENOMEM. Any other error conditions BUG_ON()'s * 2. We notify a potential dom0 helper through a vm_event ring. But we - * allow the notification to not go to sleep. If the event ring is full + * allow the notification to not go to sleep. If the event ring is full * of ENOMEM warnings, then it's on the ball. * 3. We cannot go to sleep until the unshare is resolved, because we might - * be buried deep into locks (e.g. something -> copy_to_user -> __hvm_copy) + * be buried deep into locks (e.g. something -> copy_to_user -> __hvm_copy) * 4. So, we make sure we: * 4.1. return an error * 4.2. do not corrupt shared memory * 4.3. do not corrupt guest memory * 4.4. let the guest deal with it if the error propagation will reach it */ -int __mem_sharing_unshare_page(struct domain *d, - unsigned long gfn, - uint16_t flags) +int __mem_sharing_unshare_page(struct domain *d, unsigned long gfn, + uint16_t flags) { p2m_type_t p2mt; mfn_t mfn; struct page_info *page, *old_page; int last_gfn; gfn_info_t *gfn_info = NULL; - + mfn = get_gfn(d, gfn, &p2mt); - + /* Has someone already unshared it? */ - if ( !p2m_is_shared(p2mt) ) { + if ( !p2m_is_shared(p2mt) ) + { put_gfn(d, gfn); return 0; } @@ -1133,21 +1124,25 @@ int __mem_sharing_unshare_page(struct domain *d, page = __grab_shared_page(mfn); if ( page == NULL ) { - gdprintk(XENLOG_ERR, "Domain p2m is shared, but page is not: " - "%lx\n", gfn); + gdprintk(XENLOG_ERR, + "Domain p2m is shared, but page is not: " + "%lx\n", + gfn); BUG(); } gfn_info = rmap_retrieve(d->domain_id, gfn, page); if ( unlikely(gfn_info == NULL) ) { - gdprintk(XENLOG_ERR, "Could not find gfn_info for shared gfn: " - "%lx\n", gfn); + gdprintk(XENLOG_ERR, + "Could not find gfn_info for shared gfn: " + "%lx\n", + gfn); BUG(); } /* Do the accounting first. If anything fails below, we have bigger - * bigger fish to fry. First, remove the gfn from the list. */ + * bigger fish to fry. First, remove the gfn from the list. */ last_gfn = rmap_has_one_entry(page); if ( last_gfn ) { @@ -1161,7 +1156,7 @@ int __mem_sharing_unshare_page(struct domain *d, else atomic_dec(&nr_saved_mfns); - /* If the GFN is getting destroyed drop the references to MFN + /* If the GFN is getting destroyed drop the references to MFN * (possibly freeing the page), and exit early */ if ( flags & MEM_SHARING_DESTROY_GFN ) { @@ -1185,7 +1180,7 @@ int __mem_sharing_unshare_page(struct domain *d, return 0; } - + if ( last_gfn ) { /* Making a page private atomically unlocks it */ @@ -1195,7 +1190,7 @@ int __mem_sharing_unshare_page(struct domain *d, old_page = page; page = alloc_domheap_page(d, 0); - if ( !page ) + if ( !page ) { /* Undo dec of nr_saved_mfns, as the retry will decrease again. */ atomic_inc(&nr_saved_mfns); @@ -1213,11 +1208,11 @@ int __mem_sharing_unshare_page(struct domain *d, mem_sharing_page_unlock(old_page); put_page_and_type(old_page); -private_page_found: +private_page_found: if ( p2m_change_type_one(d, gfn, p2m_ram_shared, p2m_ram_rw) ) { - gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n", - d->domain_id, gfn); + gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n", + d->domain_id, gfn); BUG(); } @@ -1242,8 +1237,8 @@ int relinquish_shared_pages(struct domain *d) return 0; p2m_lock(p2m); - for ( gfn = p2m->next_shared_gfn_to_relinquish; - gfn <= p2m->max_mapped_pfn; gfn++ ) + for ( gfn = p2m->next_shared_gfn_to_relinquish; gfn <= p2m->max_mapped_pfn; + gfn++ ) { p2m_access_t a; p2m_type_t t; @@ -1256,8 +1251,7 @@ int relinquish_shared_pages(struct domain *d) if ( mfn_valid(mfn) && (t == p2m_ram_shared) ) { /* Does not fail with ENOMEM given the DESTROY flag */ - BUG_ON(__mem_sharing_unshare_page(d, gfn, - MEM_SHARING_DESTROY_GFN)); + BUG_ON(__mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN)); /* Clear out the p2m entry so no one else may try to * unshare. Must succeed: we just read the old entry and * we hold the p2m lock. */ @@ -1363,257 +1357,246 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) if ( !hap_enabled(d) || !d->arch.hvm.mem_sharing_enabled ) goto out; - switch ( mso.op ) + switch (mso.op) { - case XENMEM_sharing_op_nominate_gfn: - { - shr_handle_t handle; + case XENMEM_sharing_op_nominate_gfn: + { + shr_handle_t handle; - rc = -EINVAL; - if ( !mem_sharing_enabled(d) ) - goto out; + rc = -EINVAL; + if ( !mem_sharing_enabled(d) ) + goto out; - rc = nominate_page(d, _gfn(mso.u.nominate.u.gfn), 0, &handle); - mso.u.nominate.handle = handle; - } - break; + rc = nominate_page(d, _gfn(mso.u.nominate.u.gfn), 0, &handle); + mso.u.nominate.handle = handle; + } + break; - case XENMEM_sharing_op_nominate_gref: - { - grant_ref_t gref = mso.u.nominate.u.grant_ref; - gfn_t gfn; - shr_handle_t handle; + case XENMEM_sharing_op_nominate_gref: + { + grant_ref_t gref = mso.u.nominate.u.grant_ref; + gfn_t gfn; + shr_handle_t handle; + + rc = -EINVAL; + if ( !mem_sharing_enabled(d) ) + goto out; + rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &gfn, NULL); + if ( rc < 0 ) + goto out; + + rc = nominate_page(d, gfn, 3, &handle); + mso.u.nominate.handle = handle; + } + break; - rc = -EINVAL; - if ( !mem_sharing_enabled(d) ) - goto out; - rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &gfn, NULL); - if ( rc < 0 ) - goto out; + case XENMEM_sharing_op_share: + { + gfn_t sgfn, cgfn; + struct domain *cd; + shr_handle_t sh, ch; - rc = nominate_page(d, gfn, 3, &handle); - mso.u.nominate.handle = handle; - } - break; + rc = -EINVAL; + if ( !mem_sharing_enabled(d) ) + goto out; + + rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain, &cd); + if ( rc ) + goto out; - case XENMEM_sharing_op_share: + rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op); + if ( rc ) { - gfn_t sgfn, cgfn; - struct domain *cd; - shr_handle_t sh, ch; + rcu_unlock_domain(cd); + goto out; + } + if ( !mem_sharing_enabled(cd) ) + { + rcu_unlock_domain(cd); rc = -EINVAL; - if ( !mem_sharing_enabled(d) ) - goto out; - - rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain, - &cd); - if ( rc ) - goto out; + goto out; + } - rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op); - if ( rc ) + if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) ) + { + grant_ref_t gref = (grant_ref_t)( + XENMEM_SHARING_OP_FIELD_GET_GREF(mso.u.share.source_gfn)); + rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &sgfn, NULL); + if ( rc < 0 ) { rcu_unlock_domain(cd); goto out; } + } + else + sgfn = _gfn(mso.u.share.source_gfn); - if ( !mem_sharing_enabled(cd) ) + if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.client_gfn) ) + { + grant_ref_t gref = (grant_ref_t)( + XENMEM_SHARING_OP_FIELD_GET_GREF(mso.u.share.client_gfn)); + rc = mem_sharing_gref_to_gfn(cd->grant_table, gref, &cgfn, NULL); + if ( rc < 0 ) { rcu_unlock_domain(cd); - rc = -EINVAL; goto out; } + } + else + cgfn = _gfn(mso.u.share.client_gfn); - if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) ) - { - grant_ref_t gref = (grant_ref_t) - (XENMEM_SHARING_OP_FIELD_GET_GREF( - mso.u.share.source_gfn)); - rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &sgfn, - NULL); - if ( rc < 0 ) - { - rcu_unlock_domain(cd); - goto out; - } - } - else - sgfn = _gfn(mso.u.share.source_gfn); + sh = mso.u.share.source_handle; + ch = mso.u.share.client_handle; - if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.client_gfn) ) - { - grant_ref_t gref = (grant_ref_t) - (XENMEM_SHARING_OP_FIELD_GET_GREF( - mso.u.share.client_gfn)); - rc = mem_sharing_gref_to_gfn(cd->grant_table, gref, &cgfn, - NULL); - if ( rc < 0 ) - { - rcu_unlock_domain(cd); - goto out; - } - } - else - cgfn = _gfn(mso.u.share.client_gfn); + rc = share_pages(d, sgfn, sh, cd, cgfn, ch); - sh = mso.u.share.source_handle; - ch = mso.u.share.client_handle; + rcu_unlock_domain(cd); + } + break; - rc = share_pages(d, sgfn, sh, cd, cgfn, ch); + case XENMEM_sharing_op_add_physmap: + { + unsigned long sgfn, cgfn; + struct domain *cd; + shr_handle_t sh; + rc = -EINVAL; + if ( !mem_sharing_enabled(d) ) + goto out; + + rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain, &cd); + if ( rc ) + goto out; + + rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op); + if ( rc ) + { rcu_unlock_domain(cd); + goto out; } - break; - case XENMEM_sharing_op_add_physmap: + if ( !mem_sharing_enabled(cd) ) { - unsigned long sgfn, cgfn; - struct domain *cd; - shr_handle_t sh; + rcu_unlock_domain(cd); + rc = -EINVAL; + goto out; + } + if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) ) + { + /* Cannot add a gref to the physmap */ + rcu_unlock_domain(cd); rc = -EINVAL; - if ( !mem_sharing_enabled(d) ) - goto out; + goto out; + } - rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain, - &cd); - if ( rc ) - goto out; + sgfn = mso.u.share.source_gfn; + sh = mso.u.share.source_handle; + cgfn = mso.u.share.client_gfn; - rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op); - if ( rc ) - { - rcu_unlock_domain(cd); - goto out; - } + rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn); - if ( !mem_sharing_enabled(cd) ) - { - rcu_unlock_domain(cd); - rc = -EINVAL; - goto out; - } + rcu_unlock_domain(cd); + } + break; - if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) ) - { - /* Cannot add a gref to the physmap */ - rcu_unlock_domain(cd); - rc = -EINVAL; - goto out; - } + case XENMEM_sharing_op_range_share: + { + unsigned long max_sgfn, max_cgfn; + struct domain *cd; - sgfn = mso.u.share.source_gfn; - sh = mso.u.share.source_handle; - cgfn = mso.u.share.client_gfn; + rc = -EINVAL; + if ( mso.u.range._pad[0] || mso.u.range._pad[1] || mso.u.range._pad[2] ) + goto out; - rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn); + /* + * We use opaque for the hypercall continuation value. + * Ideally the user sets this to 0 in the beginning but + * there is no good way of enforcing that here, so we just check + * that it's at least in range. + */ + if ( mso.u.range.opaque && + (mso.u.range.opaque < mso.u.range.first_gfn || + mso.u.range.opaque > mso.u.range.last_gfn) ) + goto out; + + if ( !mem_sharing_enabled(d) ) + goto out; + + rc = rcu_lock_live_remote_domain_by_id(mso.u.range.client_domain, &cd); + if ( rc ) + goto out; + /* + * We reuse XENMEM_sharing_op_share XSM check here as this is + * essentially the same concept repeated over multiple pages. + */ + rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, XENMEM_sharing_op_share); + if ( rc ) + { rcu_unlock_domain(cd); + goto out; } - break; - case XENMEM_sharing_op_range_share: + if ( !mem_sharing_enabled(cd) ) { - unsigned long max_sgfn, max_cgfn; - struct domain *cd; - + rcu_unlock_domain(cd); rc = -EINVAL; - if ( mso.u.range._pad[0] || mso.u.range._pad[1] || - mso.u.range._pad[2] ) - goto out; - - /* - * We use opaque for the hypercall continuation value. - * Ideally the user sets this to 0 in the beginning but - * there is no good way of enforcing that here, so we just check - * that it's at least in range. - */ - if ( mso.u.range.opaque && - (mso.u.range.opaque < mso.u.range.first_gfn || - mso.u.range.opaque > mso.u.range.last_gfn) ) - goto out; - - if ( !mem_sharing_enabled(d) ) - goto out; - - rc = rcu_lock_live_remote_domain_by_id(mso.u.range.client_domain, - &cd); - if ( rc ) - goto out; - - /* - * We reuse XENMEM_sharing_op_share XSM check here as this is - * essentially the same concept repeated over multiple pages. - */ - rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, - XENMEM_sharing_op_share); - if ( rc ) - { - rcu_unlock_domain(cd); - goto out; - } - - if ( !mem_sharing_enabled(cd) ) - { - rcu_unlock_domain(cd); - rc = -EINVAL; - goto out; - } - - /* - * Sanity check only, the client should keep the domains paused for - * the duration of this op. - */ - if ( !atomic_read(&d->pause_count) || - !atomic_read(&cd->pause_count) ) - { - rcu_unlock_domain(cd); - rc = -EINVAL; - goto out; - } + goto out; + } - max_sgfn = domain_get_maximum_gpfn(d); - max_cgfn = domain_get_maximum_gpfn(cd); + /* + * Sanity check only, the client should keep the domains paused for + * the duration of this op. + */ + if ( !atomic_read(&d->pause_count) || !atomic_read(&cd->pause_count) ) + { + rcu_unlock_domain(cd); + rc = -EINVAL; + goto out; + } - if ( max_sgfn < mso.u.range.first_gfn || - max_sgfn < mso.u.range.last_gfn || - max_cgfn < mso.u.range.first_gfn || - max_cgfn < mso.u.range.last_gfn ) - { - rcu_unlock_domain(cd); - rc = -EINVAL; - goto out; - } + max_sgfn = domain_get_maximum_gpfn(d); + max_cgfn = domain_get_maximum_gpfn(cd); - rc = range_share(d, cd, &mso.u.range); + if ( max_sgfn < mso.u.range.first_gfn || + max_sgfn < mso.u.range.last_gfn || + max_cgfn < mso.u.range.first_gfn || + max_cgfn < mso.u.range.last_gfn ) + { rcu_unlock_domain(cd); + rc = -EINVAL; + goto out; + } - if ( rc > 0 ) - { - if ( __copy_to_guest(arg, &mso, 1) ) - rc = -EFAULT; - else - rc = hypercall_create_continuation(__HYPERVISOR_memory_op, - "lh", XENMEM_sharing_op, - arg); - } + rc = range_share(d, cd, &mso.u.range); + rcu_unlock_domain(cd); + + if ( rc > 0 ) + { + if ( __copy_to_guest(arg, &mso, 1) ) + rc = -EFAULT; else - mso.u.range.opaque = 0; + rc = hypercall_create_continuation(__HYPERVISOR_memory_op, "lh", + XENMEM_sharing_op, arg); } - break; + else + mso.u.range.opaque = 0; + } + break; - case XENMEM_sharing_op_debug_gfn: - rc = debug_gfn(d, _gfn(mso.u.debug.u.gfn)); - break; + case XENMEM_sharing_op_debug_gfn: + rc = debug_gfn(d, _gfn(mso.u.debug.u.gfn)); + break; - case XENMEM_sharing_op_debug_gref: - rc = debug_gref(d, mso.u.debug.u.gref); - break; + case XENMEM_sharing_op_debug_gref: + rc = debug_gref(d, mso.u.debug.u.gref); + break; - default: - rc = -ENOSYS; - break; + default: + rc = -ENOSYS; + break; } if ( !rc && __copy_to_guest(arg, &mso, 1) ) @@ -1630,22 +1613,22 @@ int mem_sharing_domctl(struct domain *d, struct xen_domctl_mem_sharing_op *mec) /* Only HAP is supported */ if ( !hap_enabled(d) ) - return -ENODEV; + return -ENODEV; - switch(mec->op) + switch (mec->op) { - case XEN_DOMCTL_MEM_SHARING_CONTROL: - { - rc = 0; - if ( unlikely(has_iommu_pt(d) && mec->u.enable) ) - rc = -EXDEV; - else - d->arch.hvm.mem_sharing_enabled = mec->u.enable; - } - break; + case XEN_DOMCTL_MEM_SHARING_CONTROL: + { + rc = 0; + if ( unlikely(has_iommu_pt(d) && mec->u.enable) ) + rc = -EXDEV; + else + d->arch.hvm.mem_sharing_enabled = mec->u.enable; + } + break; - default: - rc = -ENOSYS; + default: + rc = -ENOSYS; } return rc; @@ -1659,4 +1642,3 @@ void __init mem_sharing_init(void) INIT_LIST_HEAD(&shr_audit_list); #endif } - diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c index e3044bee2e..8f03974f34 100644 --- a/xen/arch/x86/mm/p2m-ept.c +++ b/xen/arch/x86/mm/p2m-ept.c @@ -34,11 +34,11 @@ #include "mm-locks.h" -#define atomic_read_ept_entry(__pepte) \ - ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } ) +#define atomic_read_ept_entry(__pepte) \ + ((ept_entry_t){.epte = read_atomic(&(__pepte)->epte)}) -#define is_epte_present(ept_entry) ((ept_entry)->epte & 0x7) -#define is_epte_superpage(ept_entry) ((ept_entry)->sp) +#define is_epte_present(ept_entry) ((ept_entry)->epte & 0x7) +#define is_epte_superpage(ept_entry) ((ept_entry)->sp) static inline bool_t is_epte_valid(ept_entry_t *e) { /* suppress_ve alone is not considered valid, so mask it off */ @@ -46,9 +46,8 @@ static inline bool_t is_epte_valid(ept_entry_t *e) } /* returns : 0 for success, -errno otherwise */ -static int atomic_write_ept_entry(struct p2m_domain *p2m, - ept_entry_t *entryptr, ept_entry_t new, - int level) +static int atomic_write_ept_entry(struct p2m_domain *p2m, ept_entry_t *entryptr, + ept_entry_t new, int level) { int rc = p2m_entry_modify(p2m, new.sa_p2mt, entryptr->sa_p2mt, _mfn(new.mfn), _mfn(entryptr->mfn), level); @@ -75,111 +74,107 @@ static void ept_p2m_type_to_flags(struct p2m_domain *p2m, ept_entry_t *entry, * D bit is set for all writable types in EPT leaf entry, except for * log-dirty type with PML. */ - switch(type) + switch (type) { - case p2m_invalid: - case p2m_mmio_dm: - case p2m_populate_on_demand: - case p2m_ram_paging_out: - case p2m_ram_paged: - case p2m_ram_paging_in: - default: - entry->r = entry->w = entry->x = 0; - break; - case p2m_ram_rw: - entry->r = entry->w = entry->x = 1; - entry->a = entry->d = !!cpu_has_vmx_ept_ad; - break; - case p2m_ioreq_server: - entry->r = 1; - entry->w = !(p2m->ioreq.flags & XEN_DMOP_IOREQ_MEM_ACCESS_WRITE); - entry->x = 0; - entry->a = !!cpu_has_vmx_ept_ad; - entry->d = entry->w && entry->a; - break; - case p2m_mmio_direct: - entry->r = entry->x = 1; - entry->w = !rangeset_contains_singleton(mmio_ro_ranges, - entry->mfn); - ASSERT(entry->w || !is_epte_superpage(entry)); - entry->a = !!cpu_has_vmx_ept_ad; - entry->d = entry->w && cpu_has_vmx_ept_ad; - break; - case p2m_ram_logdirty: - entry->r = entry->x = 1; - /* - * In case of PML, we don't have to write protect 4K page, but - * only need to clear D-bit for it, but we still need to write - * protect super page in order to split it to 4K pages in EPT - * violation. - */ - if ( vmx_domain_pml_enabled(p2m->domain) && - !is_epte_superpage(entry) ) - entry->w = 1; - else - entry->w = 0; - entry->a = !!cpu_has_vmx_ept_ad; - /* For both PML or non-PML cases we clear D bit anyway */ - entry->d = 0; - break; - case p2m_ram_ro: - case p2m_ram_shared: - entry->r = entry->x = 1; + case p2m_invalid: + case p2m_mmio_dm: + case p2m_populate_on_demand: + case p2m_ram_paging_out: + case p2m_ram_paged: + case p2m_ram_paging_in: + default: + entry->r = entry->w = entry->x = 0; + break; + case p2m_ram_rw: + entry->r = entry->w = entry->x = 1; + entry->a = entry->d = !!cpu_has_vmx_ept_ad; + break; + case p2m_ioreq_server: + entry->r = 1; + entry->w = !(p2m->ioreq.flags & XEN_DMOP_IOREQ_MEM_ACCESS_WRITE); + entry->x = 0; + entry->a = !!cpu_has_vmx_ept_ad; + entry->d = entry->w && entry->a; + break; + case p2m_mmio_direct: + entry->r = entry->x = 1; + entry->w = !rangeset_contains_singleton(mmio_ro_ranges, entry->mfn); + ASSERT(entry->w || !is_epte_superpage(entry)); + entry->a = !!cpu_has_vmx_ept_ad; + entry->d = entry->w && cpu_has_vmx_ept_ad; + break; + case p2m_ram_logdirty: + entry->r = entry->x = 1; + /* + * In case of PML, we don't have to write protect 4K page, but + * only need to clear D-bit for it, but we still need to write + * protect super page in order to split it to 4K pages in EPT + * violation. + */ + if ( vmx_domain_pml_enabled(p2m->domain) && !is_epte_superpage(entry) ) + entry->w = 1; + else entry->w = 0; - entry->a = !!cpu_has_vmx_ept_ad; - entry->d = 0; - break; - case p2m_grant_map_rw: - case p2m_map_foreign: - entry->r = entry->w = 1; - entry->x = 0; - entry->a = entry->d = !!cpu_has_vmx_ept_ad; - break; - case p2m_grant_map_ro: - entry->r = 1; - entry->w = entry->x = 0; - entry->a = !!cpu_has_vmx_ept_ad; - entry->d = 0; - break; + entry->a = !!cpu_has_vmx_ept_ad; + /* For both PML or non-PML cases we clear D bit anyway */ + entry->d = 0; + break; + case p2m_ram_ro: + case p2m_ram_shared: + entry->r = entry->x = 1; + entry->w = 0; + entry->a = !!cpu_has_vmx_ept_ad; + entry->d = 0; + break; + case p2m_grant_map_rw: + case p2m_map_foreign: + entry->r = entry->w = 1; + entry->x = 0; + entry->a = entry->d = !!cpu_has_vmx_ept_ad; + break; + case p2m_grant_map_ro: + entry->r = 1; + entry->w = entry->x = 0; + entry->a = !!cpu_has_vmx_ept_ad; + entry->d = 0; + break; } - /* Then restrict with access permissions */ - switch (access) + switch (access) { - case p2m_access_n: - case p2m_access_n2rwx: - entry->r = entry->w = entry->x = 0; - break; - case p2m_access_r: - entry->w = entry->x = 0; - break; - case p2m_access_w: - entry->r = entry->x = 0; - break; - case p2m_access_x: - entry->r = entry->w = 0; - break; - case p2m_access_rx: - case p2m_access_rx2rw: - entry->w = 0; - break; - case p2m_access_wx: - entry->r = 0; - break; - case p2m_access_rw: - entry->x = 0; - break; - case p2m_access_rwx: - break; + case p2m_access_n: + case p2m_access_n2rwx: + entry->r = entry->w = entry->x = 0; + break; + case p2m_access_r: + entry->w = entry->x = 0; + break; + case p2m_access_w: + entry->r = entry->x = 0; + break; + case p2m_access_x: + entry->r = entry->w = 0; + break; + case p2m_access_rx: + case p2m_access_rx2rw: + entry->w = 0; + break; + case p2m_access_wx: + entry->r = 0; + break; + case p2m_access_rw: + entry->x = 0; + break; + case p2m_access_rwx: + break; } - } -#define GUEST_TABLE_MAP_FAILED 0 +#define GUEST_TABLE_MAP_FAILED 0 #define GUEST_TABLE_NORMAL_PAGE 1 -#define GUEST_TABLE_SUPER_PAGE 2 -#define GUEST_TABLE_POD_PAGE 3 +#define GUEST_TABLE_SUPER_PAGE 2 +#define GUEST_TABLE_POD_PAGE 3 /* Fill in middle levels of ept table */ static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry) @@ -213,7 +208,8 @@ static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry) } /* free ept sub tree behind an entry */ -static void ept_free_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry, int level) +static void ept_free_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry, + int level) { /* End if the entry is a leaf entry. */ if ( level == 0 || !is_epte_present(ept_entry) || @@ -227,14 +223,14 @@ static void ept_free_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry, int l ept_free_entry(p2m, epte + i, level - 1); unmap_domain_page(epte); } - + p2m_tlb_flush_sync(p2m); p2m_free_ptp(p2m, mfn_to_page(_mfn(ept_entry->mfn))); } static bool_t ept_split_super_page(struct p2m_domain *p2m, - ept_entry_t *ept_entry, - unsigned int level, unsigned int target) + ept_entry_t *ept_entry, unsigned int level, + unsigned int target) { ept_entry_t new_ept, *table; uint64_t trunk; @@ -284,7 +280,7 @@ static bool_t ept_split_super_page(struct p2m_domain *p2m, /* Take the currently mapped table, find the corresponding gfn entry, * and map the next table, if available. If the entry is empty - * and read_only is set, + * and read_only is set, * Return values: * 0: Failed to map. Either read_only was set and the entry was * empty, or allocating a new page failed. @@ -384,8 +380,7 @@ static bool_t ept_invalidate_emt(struct p2m_domain *p2m, mfn_t mfn, * The passed in range is guaranteed to not cross a page (table) * boundary at the targeted level. */ -static int ept_invalidate_emt_range(struct p2m_domain *p2m, - unsigned int target, +static int ept_invalidate_emt_range(struct p2m_domain *p2m, unsigned int target, unsigned long first_gfn, unsigned long last_gfn) { @@ -444,7 +439,7 @@ static int ept_invalidate_emt_range(struct p2m_domain *p2m, } } - out: +out: unmap_domain_page(table); return rc; @@ -473,7 +468,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn) if ( !mfn ) return 0; - for ( ; ; --level ) + for ( ;; --level ) { ept_entry_t e; unsigned int i; @@ -500,9 +495,9 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn) e.emt = 0; if ( !is_epte_valid(&e) || !is_epte_present(&e) ) continue; - e.emt = epte_get_entry_emt(p2m->domain, gfn + i, - _mfn(e.mfn), 0, &ipat, - e.sa_p2mt == p2m_mmio_direct); + e.emt = + epte_get_entry_emt(p2m->domain, gfn + i, _mfn(e.mfn), 0, + &ipat, e.sa_p2mt == p2m_mmio_direct); e.ipat = ipat; nt = p2m_recalc_type(e.recalc, e.sa_p2mt, p2m, gfn + i); @@ -528,20 +523,19 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn) unsigned long mask = ~0UL << (level * EPT_TABLE_ORDER); ASSERT(e.sa_p2mt != p2m_ioreq_server); - switch ( p2m_is_logdirty_range(p2m, gfn & mask, - gfn | ~mask) ) + switch (p2m_is_logdirty_range(p2m, gfn & mask, gfn | ~mask)) { case 0: - e.sa_p2mt = p2m_ram_rw; - e.recalc = 0; - break; + e.sa_p2mt = p2m_ram_rw; + e.recalc = 0; + break; case 1: - e.sa_p2mt = p2m_ram_logdirty; - e.recalc = 0; - break; + e.sa_p2mt = p2m_ram_logdirty; + e.recalc = 0; + break; default: /* Force split. */ - emt = -1; - break; + emt = -1; + break; } } if ( unlikely(emt < 0) ) @@ -596,7 +590,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn) { struct vcpu *v; - for_each_vcpu ( p2m->domain, v ) + for_each_vcpu (p2m->domain, v) v->arch.hvm.vmx.ept_spurious_misconfig = 1; } @@ -630,16 +624,16 @@ bool_t ept_handle_misconfig(uint64_t gpa) * * Returns: 0 for success, -errno for failure */ -static int -ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, - unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma, - int sve) +static int ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, + unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma, + int sve) { ept_entry_t *table, *ept_entry = NULL; unsigned long gfn = gfn_x(gfn_); unsigned long gfn_remainder = gfn; unsigned int i, target = order / EPT_TABLE_ORDER; - unsigned long fn_mask = !mfn_eq(mfn, INVALID_MFN) ? (gfn | mfn_x(mfn)) : gfn; + unsigned long fn_mask = + !mfn_eq(mfn, INVALID_MFN) ? (gfn | mfn_x(mfn)) : gfn; int ret, rc = 0; bool_t entry_written = 0; bool_t direct_mmio = (p2mt == p2m_mmio_direct); @@ -648,8 +642,8 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, bool_t vtd_pte_present = 0; unsigned int iommu_flags = p2m_get_iommu_flags(p2mt, mfn); bool_t needs_sync = 1; - ept_entry_t old_entry = { .epte = 0 }; - ept_entry_t new_entry = { .epte = 0 }; + ept_entry_t old_entry = {.epte = 0}; + ept_entry_t new_entry = {.epte = 0}; struct ept_data *ept = &p2m->ept; struct domain *d = p2m->domain; @@ -671,8 +665,7 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, if ( ret < 0 ) return ret; - ASSERT((target == 2 && hap_has_1gb) || - (target == 1 && hap_has_2mb) || + ASSERT((target == 2 && hap_has_1gb) || (target == 1 && hap_has_2mb) || (target == 0)); ASSERT(!p2m_is_foreign(p2mt) || target == 0); @@ -695,7 +688,7 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, ept_entry = table + (gfn_remainder >> (i * EPT_TABLE_ORDER)); - /* In case VT-d uses same page table, this flag is needed by VT-d */ + /* In case VT-d uses same page table, this flag is needed by VT-d */ vtd_pte_present = is_epte_present(ept_entry); /* @@ -754,8 +747,8 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) ) { - int emt = epte_get_entry_emt(p2m->domain, gfn, mfn, - i * EPT_TABLE_ORDER, &ipat, direct_mmio); + int emt = epte_get_entry_emt(p2m->domain, gfn, mfn, i * EPT_TABLE_ORDER, + &ipat, direct_mmio); if ( emt >= 0 ) new_entry.emt = emt; @@ -774,7 +767,7 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, /* Safe to read-then-write because we hold the p2m lock */ if ( ept_entry->mfn == new_entry.mfn && p2m_get_iommu_flags(ept_entry->sa_p2mt, _mfn(ept_entry->mfn)) == - iommu_flags ) + iommu_flags ) need_modify_vtd_table = 0; ept_p2m_type_to_flags(p2m, &new_entry, p2mt, p2ma); @@ -783,8 +776,8 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, if ( sve != -1 ) new_entry.suppress_ve = !!sve; else - new_entry.suppress_ve = is_epte_valid(&old_entry) ? - old_entry.suppress_ve : 1; + new_entry.suppress_ve = + is_epte_valid(&old_entry) ? old_entry.suppress_ve : 1; rc = atomic_write_ept_entry(p2m, ept_entry, new_entry, target); if ( unlikely(rc) ) @@ -795,7 +788,8 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, if ( p2mt != p2m_invalid && (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) ) - /* Track the highest gfn for which we have ever had a valid mapping */ + /* Track the highest gfn for which we have ever had a valid mapping + */ p2m->max_mapped_pfn = gfn + (1UL << order) - 1; } @@ -804,15 +798,15 @@ out: ept_sync_domain(p2m); /* For host p2m, may need to change VT-d page table.*/ - if ( rc == 0 && p2m_is_hostp2m(p2m) && - need_modify_vtd_table ) + if ( rc == 0 && p2m_is_hostp2m(p2m) && need_modify_vtd_table ) { if ( iommu_use_hap_pt(d) ) - rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present); + rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, + vtd_pte_present); else if ( need_iommu_pt_sync(d) ) - rc = iommu_flags ? - iommu_legacy_map(d, _dfn(gfn), mfn, order, iommu_flags) : - iommu_legacy_unmap(d, _dfn(gfn), order); + rc = iommu_flags + ? iommu_legacy_map(d, _dfn(gfn), mfn, order, iommu_flags) + : iommu_legacy_unmap(d, _dfn(gfn), order); } unmap_domain_page(table); @@ -835,10 +829,9 @@ out: } /* Read ept p2m entries */ -static mfn_t ept_get_entry(struct p2m_domain *p2m, - gfn_t gfn_, p2m_type_t *t, p2m_access_t* a, - p2m_query_t q, unsigned int *page_order, - bool_t *sve) +static mfn_t ept_get_entry(struct p2m_domain *p2m, gfn_t gfn_, p2m_type_t *t, + p2m_access_t *a, p2m_query_t q, + unsigned int *page_order, bool_t *sve) { ept_entry_t *table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m))); @@ -888,7 +881,7 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m, /* Populate this superpage */ ASSERT(i <= 2); - index = gfn_remainder >> ( i * EPT_TABLE_ORDER); + index = gfn_remainder >> (i * EPT_TABLE_ORDER); ept_entry = table + index; if ( p2m_pod_demand_populate(p2m, gfn_, i * EPT_TABLE_ORDER) ) @@ -912,15 +905,15 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m, } ASSERT(i == 0); - + if ( !p2m_pod_demand_populate(p2m, gfn_, PAGE_ORDER_4K) ) goto out; } if ( is_epte_valid(ept_entry) ) { - *t = p2m_recalc_type(recalc || ept_entry->recalc, - ept_entry->sa_p2mt, p2m, gfn); + *t = p2m_recalc_type(recalc || ept_entry->recalc, ept_entry->sa_p2mt, + p2m, gfn); *a = ept_entry->access; if ( sve ) *sve = ept_entry->suppress_ve; @@ -928,18 +921,18 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m, mfn = _mfn(ept_entry->mfn); if ( i ) { - /* + /* * We may meet super pages, and to split into 4k pages * to emulate p2m table */ - unsigned long split_mfn = mfn_x(mfn) + - (gfn_remainder & - ((1 << (i * EPT_TABLE_ORDER)) - 1)); + unsigned long split_mfn = + mfn_x(mfn) + + (gfn_remainder & ((1 << (i * EPT_TABLE_ORDER)) - 1)); mfn = _mfn(split_mfn); } } - out: +out: if ( page_order ) *page_order = i * EPT_TABLE_ORDER; @@ -973,17 +966,17 @@ void ept_walk_table(struct domain *d, unsigned long gfn) u32 index; /* Stolen from ept_next_level */ - index = gfn_remainder >> (i*EPT_TABLE_ORDER); + index = gfn_remainder >> (i * EPT_TABLE_ORDER); ept_entry = table + index; - gprintk(XENLOG_ERR, " epte %"PRIx64"\n", ept_entry->epte); + gprintk(XENLOG_ERR, " epte %" PRIx64 "\n", ept_entry->epte); if ( (i == 0) || !is_epte_present(ept_entry) || is_epte_superpage(ept_entry) ) goto out; else { - gfn_remainder &= (1UL << (i*EPT_TABLE_ORDER)) - 1; + gfn_remainder &= (1UL << (i * EPT_TABLE_ORDER)) - 1; next = map_domain_page(_mfn(ept_entry->mfn)); @@ -998,8 +991,8 @@ out: return; } -static void ept_change_entry_type_global(struct p2m_domain *p2m, - p2m_type_t ot, p2m_type_t nt) +static void ept_change_entry_type_global(struct p2m_domain *p2m, p2m_type_t ot, + p2m_type_t nt) { unsigned long mfn = p2m->ept.mfn; @@ -1010,9 +1003,8 @@ static void ept_change_entry_type_global(struct p2m_domain *p2m, ept_sync_domain(p2m); } -static int ept_change_entry_type_range(struct p2m_domain *p2m, - p2m_type_t ot, p2m_type_t nt, - unsigned long first_gfn, +static int ept_change_entry_type_range(struct p2m_domain *p2m, p2m_type_t ot, + p2m_type_t nt, unsigned long first_gfn, unsigned long last_gfn) { unsigned int i, wl = p2m->ept.wl; @@ -1259,13 +1251,9 @@ void ept_p2m_uninit(struct p2m_domain *p2m) static const char *memory_type_to_str(unsigned int x) { static const char memory_types[8][3] = { - [MTRR_TYPE_UNCACHABLE] = "UC", - [MTRR_TYPE_WRCOMB] = "WC", - [MTRR_TYPE_WRTHROUGH] = "WT", - [MTRR_TYPE_WRPROT] = "WP", - [MTRR_TYPE_WRBACK] = "WB", - [MTRR_NUM_TYPES] = "??" - }; + [MTRR_TYPE_UNCACHABLE] = "UC", [MTRR_TYPE_WRCOMB] = "WC", + [MTRR_TYPE_WRTHROUGH] = "WT", [MTRR_TYPE_WRPROT] = "WP", + [MTRR_TYPE_WRBACK] = "WB", [MTRR_NUM_TYPES] = "??"}; ASSERT(x < ARRAY_SIZE(memory_types)); return memory_types[x][0] ? memory_types[x] : "?"; @@ -1283,7 +1271,7 @@ static void ept_dump_p2m_table(unsigned char key) struct p2m_domain *p2m; struct ept_data *ept; - for_each_domain(d) + for_each_domain (d) { if ( !hap_enabled(d) ) continue; @@ -1318,12 +1306,11 @@ static void ept_dump_p2m_table(unsigned char key) else printk("gfn: %13lx order: %2d mfn: %13lx %c%c%c %c%c%c\n", gfn, order, ept_entry->mfn + 0UL, - ept_entry->r ? 'r' : ' ', - ept_entry->w ? 'w' : ' ', + ept_entry->r ? 'r' : ' ', ept_entry->w ? 'w' : ' ', ept_entry->x ? 'x' : ' ', memory_type_to_str(ept_entry->emt)[0], memory_type_to_str(ept_entry->emt)[1] - ?: ept_entry->emt + '0', + ?: ept_entry->emt + '0', c ?: ept_entry->ipat ? '!' : ' '); if ( !(record_counter++ % 100) ) @@ -1379,7 +1366,7 @@ unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp) i = INVALID_ALTP2M; - out: +out: altp2m_list_unlock(d); return i; } diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c index 4313863066..772f5d9ce9 100644 --- a/xen/arch/x86/mm/p2m-pod.c +++ b/xen/arch/x86/mm/p2m-pod.c @@ -29,7 +29,7 @@ #include "mm-locks.h" -#define superpage_aligned(_x) (((_x)&(SUPERPAGE_PAGES-1))==0) +#define superpage_aligned(_x) (((_x) & (SUPERPAGE_PAGES - 1)) == 0) /* Enforce lock ordering when grabbing the "external" page_alloc lock */ static inline void lock_page_alloc(struct p2m_domain *p2m) @@ -50,10 +50,8 @@ static inline void unlock_page_alloc(struct p2m_domain *p2m) * Populate-on-demand functionality */ -static int -p2m_pod_cache_add(struct p2m_domain *p2m, - struct page_info *page, - unsigned int order) +static int p2m_pod_cache_add(struct p2m_domain *p2m, struct page_info *page, + unsigned int order) { unsigned long i; struct page_info *p; @@ -67,22 +65,21 @@ p2m_pod_cache_add(struct p2m_domain *p2m, /* Check to make sure this is a contiguous region */ if ( mfn_x(mfn) & ((1UL << order) - 1) ) { - printk("%s: mfn %lx not aligned order %u! (mask %lx)\n", - __func__, mfn_x(mfn), order, ((1UL << order) - 1)); + printk("%s: mfn %lx not aligned order %u! (mask %lx)\n", __func__, + mfn_x(mfn), order, ((1UL << order) - 1)); return -1; } - for ( i = 0; i < 1UL << order ; i++) + for ( i = 0; i < 1UL << order; i++ ) { - struct domain * od; + struct domain *od; p = mfn_to_page(mfn_add(mfn, i)); od = page_get_owner(p); if ( od != d ) { - printk("%s: mfn %lx expected owner d%d, got owner d%d!\n", - __func__, mfn_x(mfn), d->domain_id, - od ? od->domain_id : -1); + printk("%s: mfn %lx expected owner d%d, got owner d%d!\n", __func__, + mfn_x(mfn), d->domain_id, od ? od->domain_id : -1); return -1; } } @@ -100,7 +97,7 @@ p2m_pod_cache_add(struct p2m_domain *p2m, /* First, take all pages off the domain list */ lock_page_alloc(p2m); - for ( i = 0; i < 1UL << order ; i++ ) + for ( i = 0; i < 1UL << order; i++ ) { p = page + i; page_list_del(p, &d->page_list); @@ -109,7 +106,7 @@ p2m_pod_cache_add(struct p2m_domain *p2m, unlock_page_alloc(p2m); /* Then add to the appropriate populate-on-demand list. */ - switch ( order ) + switch (order) { case PAGE_ORDER_1G: for ( i = 0; i < (1UL << PAGE_ORDER_1G); i += 1UL << PAGE_ORDER_2M ) @@ -132,8 +129,8 @@ p2m_pod_cache_add(struct p2m_domain *p2m, /* Get a page of size order from the populate-on-demand cache. Will break * down 2-meg pages into singleton pages automatically. Returns null if * a superpage is requested and no superpages are available. */ -static struct page_info * p2m_pod_cache_get(struct p2m_domain *p2m, - unsigned int order) +static struct page_info *p2m_pod_cache_get(struct p2m_domain *p2m, + unsigned int order) { struct page_info *p = NULL; unsigned long i; @@ -149,7 +146,7 @@ static struct page_info * p2m_pod_cache_get(struct p2m_domain *p2m, unsigned long mfn; struct page_info *q; - BUG_ON( page_list_empty(&p2m->pod.super) ); + BUG_ON(page_list_empty(&p2m->pod.super)); /* * Break up a superpage to make single pages. NB count doesn't @@ -160,20 +157,20 @@ static struct page_info * p2m_pod_cache_get(struct p2m_domain *p2m, for ( i = 0; i < SUPERPAGE_PAGES; i++ ) { - q = mfn_to_page(_mfn(mfn+i)); + q = mfn_to_page(_mfn(mfn + i)); page_list_add_tail(q, &p2m->pod.single); } } - switch ( order ) + switch (order) { case PAGE_ORDER_2M: - BUG_ON( page_list_empty(&p2m->pod.super) ); + BUG_ON(page_list_empty(&p2m->pod.super)); p = page_list_remove_head(&p2m->pod.super); p2m->pod.count -= 1UL << order; break; case PAGE_ORDER_4K: - BUG_ON( page_list_empty(&p2m->pod.single) ); + BUG_ON(page_list_empty(&p2m->pod.single)); p = page_list_remove_head(&p2m->pod.single); p2m->pod.count -= 1UL; break; @@ -183,7 +180,7 @@ static struct page_info * p2m_pod_cache_get(struct p2m_domain *p2m, /* Put the pages back on the domain page_list */ lock_page_alloc(p2m); - for ( i = 0 ; i < (1UL << order); i++ ) + for ( i = 0; i < (1UL << order); i++ ) { BUG_ON(page_get_owner(p + i) != p2m->domain); page_list_add_tail(p + i, &p2m->domain->page_list); @@ -194,8 +191,8 @@ static struct page_info * p2m_pod_cache_get(struct p2m_domain *p2m, } /* Set the size of the cache, allocating or freeing as necessary. */ -static int -p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target, int preemptible) +static int p2m_pod_set_cache_target(struct p2m_domain *p2m, + unsigned long pod_target, int preemptible) { struct domain *d = p2m->domain; int ret = 0; @@ -205,7 +202,7 @@ p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target, int p /* Increasing the target */ while ( pod_target > p2m->pod.count ) { - struct page_info * page; + struct page_info *page; int order; if ( (pod_target - p2m->pod.count) >= SUPERPAGE_PAGES ) @@ -223,7 +220,8 @@ p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target, int p goto retry; } - printk("%s: Unable to allocate page for PoD cache (target=%lu cache=%ld)\n", + printk("%s: Unable to allocate page for PoD cache (target=%lu " + "cache=%ld)\n", __func__, pod_target, p2m->pod.count); ret = -ENOMEM; goto out; @@ -246,12 +244,12 @@ p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target, int p */ while ( pod_target < p2m->pod.count ) { - struct page_info * page; + struct page_info *page; unsigned int order; unsigned long i; - if ( (p2m->pod.count - pod_target) > SUPERPAGE_PAGES - && !page_list_empty(&p2m->pod.super) ) + if ( (p2m->pod.count - pod_target) > SUPERPAGE_PAGES && + !page_list_empty(&p2m->pod.super) ) order = PAGE_ORDER_2M; else order = PAGE_ORDER_4K; @@ -261,20 +259,22 @@ p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target, int p ASSERT(page != NULL); /* Then free them */ - for ( i = 0 ; i < (1UL << order) ; i++ ) + for ( i = 0; i < (1UL << order); i++ ) { /* Copied from common/memory.c:guest_remove_page() */ if ( unlikely(!get_page(page + i, d)) ) { - gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id); + gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", + d->domain_id); ret = -EINVAL; goto out; } - if ( test_and_clear_bit(_PGT_pinned, &(page+i)->u.inuse.type_info) ) + if ( test_and_clear_bit(_PGT_pinned, + &(page + i)->u.inuse.type_info) ) put_page_and_type(page + i); - if ( test_and_clear_bit(_PGC_allocated, &(page+i)->count_info) ) + if ( test_and_clear_bit(_PGC_allocated, &(page + i)->count_info) ) put_page(page + i); put_page(page + i); @@ -328,8 +328,7 @@ out: * entry when pod.entry_count == pod.count requires us to reduce both * pod.entry_count and pod.count. */ -int -p2m_pod_set_mem_target(struct domain *d, unsigned long target) +int p2m_pod_set_mem_target(struct domain *d, unsigned long target) { struct p2m_domain *p2m = p2m_get_hostp2m(d); int ret = 0; @@ -362,9 +361,9 @@ p2m_pod_set_mem_target(struct domain *d, unsigned long target) if ( populated > 0 && pod_target > p2m->pod.entry_count ) pod_target = p2m->pod.entry_count; - ASSERT( pod_target >= p2m->pod.count ); + ASSERT(pod_target >= p2m->pod.count); - ret = p2m_pod_set_cache_target(p2m, pod_target, 1/*preemptible*/); + ret = p2m_pod_set_cache_target(p2m, pod_target, 1 /*preemptible*/); out: pod_unlock(p2m); @@ -386,7 +385,7 @@ int p2m_pod_empty_cache(struct domain *d) while ( (page = page_list_remove_head(&p2m->pod.super)) ) { - for ( i = 0 ; i < SUPERPAGE_PAGES ; i++ ) + for ( i = 0; i < SUPERPAGE_PAGES; i++ ) { BUG_ON(page_get_owner(page + i) != d); page_list_add_tail(page + i, &d->page_list); @@ -411,13 +410,12 @@ int p2m_pod_empty_cache(struct domain *d) BUG_ON(p2m->pod.count != 0); - out: +out: unlock_page_alloc(p2m); return p2m->pod.count ? -ERESTART : 0; } -int -p2m_pod_offline_or_broken_hit(struct page_info *p) +int p2m_pod_offline_or_broken_hit(struct page_info *p) { struct domain *d; struct p2m_domain *p2m; @@ -436,7 +434,7 @@ p2m_pod_offline_or_broken_hit(struct page_info *p) { unsigned long i; page_list_del(q, &p2m->pod.super); - for ( i = 0; i < SUPERPAGE_PAGES; i++) + for ( i = 0; i < SUPERPAGE_PAGES; i++ ) { q = mfn_to_page(_mfn(mfn + i)); page_list_add_tail(q, &p2m->pod.single); @@ -470,8 +468,7 @@ pod_hit: return 1; } -void -p2m_pod_offline_or_broken_replace(struct page_info *p) +void p2m_pod_offline_or_broken_replace(struct page_info *p) { struct domain *d; struct p2m_domain *p2m; @@ -492,9 +489,7 @@ p2m_pod_offline_or_broken_replace(struct page_info *p) return; } -static int -p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t gfn); - +static int p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t gfn); /* * This function is needed for two reasons: @@ -505,8 +500,8 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t gfn); * Once both of these functions have been completed, we can return and * allow decrease_reservation() to handle everything else. */ -unsigned long -p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order) +unsigned long p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, + unsigned int order) { unsigned long ret = 0, i, n; struct p2m_domain *p2m = p2m_get_hostp2m(d); @@ -529,7 +524,7 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order) pod = nonpod = ram = 0; /* Figure out if we need to steal some freed memory for our cache */ - steal_for_cache = ( p2m->pod.entry_count > p2m->pod.count ); + steal_for_cache = (p2m->pod.entry_count > p2m->pod.count); for ( i = 0; i < (1UL << order); i += n ) { @@ -586,7 +581,8 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order) * - not all of the pages were RAM (now knowing order < SUPERPAGE_ORDER) */ if ( steal_for_cache && order < SUPERPAGE_ORDER && ram == (1UL << order) && - p2m_pod_zero_check_superpage(p2m, _gfn(gfn_x(gfn) & ~(SUPERPAGE_PAGES - 1))) ) + p2m_pod_zero_check_superpage( + p2m, _gfn(gfn_x(gfn) & ~(SUPERPAGE_PAGES - 1))) ) { pod = 1UL << order; ram = nonpod = 0; @@ -655,7 +651,7 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order) set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY); p2m_pod_cache_add(p2m, page, cur_order); - steal_for_cache = ( p2m->pod.entry_count > p2m->pod.count ); + steal_for_cache = (p2m->pod.entry_count > p2m->pod.count); nonpod -= n; ram -= n; @@ -667,7 +663,8 @@ out_entry_check: /* If we've reduced our "liabilities" beyond our "assets", free some */ if ( p2m->pod.entry_count < p2m->pod.count ) { - p2m_pod_set_cache_target(p2m, p2m->pod.entry_count, 0/*can't preempt*/); + p2m_pod_set_cache_target(p2m, p2m->pod.entry_count, + 0 /*can't preempt*/); } out_unlock: @@ -680,23 +677,21 @@ void p2m_pod_dump_data(struct domain *d) { struct p2m_domain *p2m = p2m_get_hostp2m(d); - printk(" PoD entries=%ld cachesize=%ld\n", - p2m->pod.entry_count, p2m->pod.count); + printk(" PoD entries=%ld cachesize=%ld\n", p2m->pod.entry_count, + p2m->pod.count); } - /* * Search for all-zero superpages to be reclaimed as superpages for the * PoD cache. Must be called w/ pod lock held, must lock the superpage * in the p2m. */ -static int -p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t gfn) +static int p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t gfn) { mfn_t mfn, mfn0 = INVALID_MFN; p2m_type_t type, type0 = 0; - unsigned long * map = NULL; - int ret=0, reset = 0; + unsigned long *map = NULL; + int ret = 0, reset = 0; unsigned long i, n; unsigned int j; int max_ref = 1; @@ -728,8 +723,8 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t gfn) unsigned long k; const struct page_info *page; - mfn = p2m->get_entry(p2m, gfn_add(gfn, i), &type, &a, 0, - &cur_order, NULL); + mfn = p2m->get_entry(p2m, gfn_add(gfn, i), &type, &a, 0, &cur_order, + NULL); /* * Conditions that must be met for superpage-superpage: @@ -778,7 +773,6 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t gfn) if ( j < 16 ) goto out; - } /* Try to remove the page, restoring old mapping if it fails. */ @@ -809,7 +803,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t gfn) map = map_domain_page(mfn_add(mfn0, i)); for ( j = 0; j < (PAGE_SIZE / sizeof(*map)); j++ ) - if ( *(map+j) != 0 ) + if ( *(map + j) != 0 ) { reset = 1; break; @@ -823,9 +817,10 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t gfn) if ( tb_init_done ) { - struct { + struct + { u64 gfn, mfn; - int d:16,order:16; + int d : 16, order : 16; } t; t.gfn = gfn_x(gfn); @@ -851,8 +846,8 @@ out_reset: * on the same gfn succeeded above. If that turns out to be false, crashing * the domain should be the safest way of making sure we don't leak memory. */ - if ( reset && p2m_set_entry(p2m, gfn, mfn0, PAGE_ORDER_2M, - type0, p2m->default_access) ) + if ( reset && p2m_set_entry(p2m, gfn, mfn0, PAGE_ORDER_2M, type0, + p2m->default_access) ) { ASSERT_UNREACHABLE(); domain_crash(d); @@ -864,10 +859,10 @@ out: } #define POD_SWEEP_LIMIT 1024 -#define POD_SWEEP_STRIDE 16 +#define POD_SWEEP_STRIDE 16 -static void -p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t *gfns, unsigned int count) +static void p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t *gfns, + unsigned int count) { mfn_t mfns[POD_SWEEP_STRIDE]; p2m_type_t types[POD_SWEEP_STRIDE]; @@ -887,8 +882,7 @@ p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t *gfns, unsigned int count p2m_access_t a; struct page_info *pg; - mfns[i] = p2m->get_entry(p2m, gfns[i], types + i, &a, - 0, NULL, NULL); + mfns[i] = p2m->get_entry(p2m, gfns[i], types + i, &a, 0, NULL, NULL); pg = mfn_to_page(mfns[i]); /* @@ -932,8 +926,8 @@ p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t *gfns, unsigned int count * If the previous p2m_set_entry call succeeded, this one shouldn't * be able to fail. If it does, crashing the domain should be safe. */ - if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K, - types[i], p2m->default_access) ) + if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K, types[i], + p2m->default_access) ) { ASSERT_UNREACHABLE(); domain_crash(d); @@ -974,8 +968,8 @@ p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t *gfns, unsigned int count * If the previous p2m_set_entry call succeeded, this one shouldn't * be able to fail. If it does, crashing the domain should be safe. */ - if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K, - types[i], p2m->default_access) ) + if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K, types[i], + p2m->default_access) ) { ASSERT_UNREACHABLE(); domain_crash(d); @@ -986,9 +980,10 @@ p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t *gfns, unsigned int count { if ( tb_init_done ) { - struct { + struct + { u64 gfn, mfn; - int d:16,order:16; + int d : 16, order : 16; } t; t.gfn = gfn_x(gfns[i]); @@ -1017,14 +1012,12 @@ out_unmap: unmap_domain_page(map[i]); } -static void -p2m_pod_emergency_sweep(struct p2m_domain *p2m) +static void p2m_pod_emergency_sweep(struct p2m_domain *p2m) { gfn_t gfns[POD_SWEEP_STRIDE]; unsigned long i, j = 0, start, limit; p2m_type_t t; - if ( gfn_eq(p2m->pod.reclaim_single, _gfn(0)) ) p2m->pod.reclaim_single = p2m->pod.max_guest; @@ -1038,7 +1031,7 @@ p2m_pod_emergency_sweep(struct p2m_domain *p2m) * careful about spinlock recursion limits and POD_SWEEP_STRIDE. */ p2m_lock(p2m); - for ( i = gfn_x(p2m->pod.reclaim_single); i > 0 ; i-- ) + for ( i = gfn_x(p2m->pod.reclaim_single); i > 0; i-- ) { p2m_access_t a; (void)p2m->get_entry(p2m, _gfn(i), &t, &a, 0, NULL, NULL); @@ -1069,7 +1062,6 @@ p2m_pod_emergency_sweep(struct p2m_domain *p2m) p2m_unlock(p2m); p2m->pod.reclaim_single = _gfn(i ? i - 1 : i); - } static void pod_eager_reclaim(struct p2m_domain *p2m) @@ -1083,8 +1075,7 @@ static void pod_eager_reclaim(struct p2m_domain *p2m) * If the PoD pool is empty, keep checking some space is found, or all * entries have been exhaused. */ - do - { + do { unsigned int idx = (mrp->idx + i++) % ARRAY_SIZE(mrp->list); gfn_t gfn = _gfn(mrp->list[idx]); @@ -1098,7 +1089,8 @@ static void pod_eager_reclaim(struct p2m_domain *p2m) { unsigned int x; - for ( x = 0; x < SUPERPAGE_PAGES; ++x, gfn = gfn_add(gfn, 1) ) + for ( x = 0; x < SUPERPAGE_PAGES; + ++x, gfn = gfn_add(gfn, 1) ) p2m_pod_zero_check(p2m, &gfn, 1); } } @@ -1123,9 +1115,8 @@ static void pod_eager_record(struct p2m_domain *p2m, gfn_t gfn, mrp->idx %= ARRAY_SIZE(mrp->list); } -bool -p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, - unsigned int order) +bool p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, + unsigned int order) { struct domain *d = p2m->domain; struct page_info *p = NULL; /* Compiler warnings */ @@ -1144,7 +1135,6 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, if ( unlikely(d->is_dying) ) goto out_fail; - /* * Because PoD does not have cache list for 1GB pages, it has to remap * 1GB region to 2MB chunks for a retry. @@ -1201,7 +1191,7 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, goto out_fail; } - for( i = 0; i < (1UL << order); i++ ) + for ( i = 0; i < (1UL << order); i++ ) { set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_x(gfn_aligned) + i); paging_mark_pfn_dirty(d, _pfn(gfn_x(gfn_aligned) + i)); @@ -1214,9 +1204,10 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, if ( tb_init_done ) { - struct { + struct + { u64 gfn, mfn; - int d:16,order:16; + int d : 16, order : 16; } t; t.gfn = gfn_x(gfn); @@ -1232,7 +1223,7 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, out_of_memory: pod_unlock(p2m); - printk("%s: Dom%d out of PoD memory! (tot=%"PRIu32" ents=%ld dom%d)\n", + printk("%s: Dom%d out of PoD memory! (tot=%" PRIu32 " ents=%ld dom%d)\n", __func__, d->domain_id, d->tot_pages, p2m->pod.entry_count, current->domain->domain_id); domain_crash(d); @@ -1257,9 +1248,10 @@ remap_and_retry: if ( tb_init_done ) { - struct { + struct + { u64 gfn; - int d:16; + int d : 16; } t; t.gfn = gfn_x(gfn); @@ -1271,10 +1263,8 @@ remap_and_retry: return true; } - -int -guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn_l, - unsigned int order) +int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn_l, + unsigned int order) { struct p2m_domain *p2m = p2m_get_hostp2m(d); gfn_t gfn = _gfn(gfn_l); @@ -1311,8 +1301,8 @@ guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn_l, } /* Now, actually do the two-way mapping */ - rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order, - p2m_populate_on_demand, p2m->default_access); + rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order, p2m_populate_on_demand, + p2m->default_access); if ( rc == 0 ) { pod_lock(p2m); diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c index cafc9f299b..aa7a307569 100644 --- a/xen/arch/x86/mm/p2m-pt.c +++ b/xen/arch/x86/mm/p2m-pt.c @@ -1,7 +1,7 @@ /****************************************************************************** * arch/x86/mm/p2m-pt.c * - * Implementation of p2m datastructures as pagetables, for use by + * Implementation of p2m datastructures as pagetables, for use by * NPT and shadow-pagetable code * * Parts of this code are Copyright (c) 2009-2011 by Citrix Systems, Inc. @@ -46,29 +46,30 @@ * to unclip on the read path, as callers are concerned only with p2m type in * such cases. */ -#define p2m_l1e_from_pfn(pfn, flags) \ +#define p2m_l1e_from_pfn(pfn, flags) \ l1e_from_pfn((pfn) & (PADDR_MASK >> PAGE_SHIFT), (flags)) -#define p2m_l2e_from_pfn(pfn, flags) \ - l2e_from_pfn((pfn) & ((PADDR_MASK & ~(_PAGE_PSE_PAT | 0UL)) \ - >> PAGE_SHIFT), (flags) | _PAGE_PSE) -#define p2m_l3e_from_pfn(pfn, flags) \ - l3e_from_pfn((pfn) & ((PADDR_MASK & ~(_PAGE_PSE_PAT | 0UL)) \ - >> PAGE_SHIFT), (flags) | _PAGE_PSE) +#define p2m_l2e_from_pfn(pfn, flags) \ + l2e_from_pfn((pfn) & \ + ((PADDR_MASK & ~(_PAGE_PSE_PAT | 0UL)) >> PAGE_SHIFT), \ + (flags) | _PAGE_PSE) +#define p2m_l3e_from_pfn(pfn, flags) \ + l3e_from_pfn((pfn) & \ + ((PADDR_MASK & ~(_PAGE_PSE_PAT | 0UL)) >> PAGE_SHIFT), \ + (flags) | _PAGE_PSE) /* PTE flags for the various types of p2m entry */ #define P2M_BASE_FLAGS \ - (_PAGE_PRESENT | _PAGE_USER | _PAGE_DIRTY | _PAGE_ACCESSED) + (_PAGE_PRESENT | _PAGE_USER | _PAGE_DIRTY | _PAGE_ACCESSED) -#define RECALC_FLAGS (_PAGE_USER|_PAGE_ACCESSED) +#define RECALC_FLAGS (_PAGE_USER | _PAGE_ACCESSED) #define set_recalc(level, ent) level##e_remove_flags(ent, RECALC_FLAGS) #define clear_recalc(level, ent) level##e_add_flags(ent, RECALC_FLAGS) -#define _needs_recalc(flags) (!((flags) & _PAGE_USER)) +#define _needs_recalc(flags) (!((flags)&_PAGE_USER)) #define needs_recalc(level, ent) _needs_recalc(level##e_get_flags(ent)) #define valid_recalc(level, ent) (!(level##e_get_flags(ent) & _PAGE_ACCESSED)) static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m, - p2m_type_t t, - mfn_t mfn, + p2m_type_t t, mfn_t mfn, unsigned int level) { unsigned long flags; @@ -79,7 +80,7 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m, */ flags = (unsigned long)(t & 0x7f) << 12; - switch(t) + switch (t) { case p2m_invalid: case p2m_mmio_dm: @@ -117,13 +118,12 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m, } } - // Find the next level's P2M entry, checking for out-of-range gfn's... // Returns NULL on error. // -static l1_pgentry_t * -p2m_find_entry(void *table, unsigned long *gfn_remainder, - unsigned long gfn, uint32_t shift, uint32_t max) +static l1_pgentry_t *p2m_find_entry(void *table, unsigned long *gfn_remainder, + unsigned long gfn, uint32_t shift, + uint32_t max) { u32 index; @@ -140,13 +140,13 @@ p2m_find_entry(void *table, unsigned long *gfn_remainder, } /* Free intermediate tables from a p2m sub-tree */ -static void -p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order) +static void p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, + int page_order) { /* End if the entry is a leaf entry. */ - if ( page_order == PAGE_ORDER_4K - || !(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) - || (l1e_get_flags(*p2m_entry) & _PAGE_PSE) ) + if ( page_order == PAGE_ORDER_4K || + !(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) || + (l1e_get_flags(*p2m_entry) & _PAGE_PSE) ) return; if ( page_order > PAGE_ORDER_2M ) @@ -166,20 +166,19 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order) // /* AMD IOMMU: Convert next level bits and r/w bits into 24 bits p2m flags */ -#define iommu_nlevel_to_flags(nl, f) ((((nl) & 0x7) << 9 )|(((f) & 0x3) << 21)) +#define iommu_nlevel_to_flags(nl, f) ((((nl)&0x7) << 9) | (((f)&0x3) << 21)) -static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry, - unsigned int nlevel, unsigned int flags) +static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry, unsigned int nlevel, + unsigned int flags) { if ( iommu_hap_pt_share ) l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags)); } /* Returns: 0 for success, -errno for failure */ -static int -p2m_next_level(struct p2m_domain *p2m, void **table, - unsigned long *gfn_remainder, unsigned long gfn, u32 shift, - u32 max, unsigned int level, bool_t unmap) +static int p2m_next_level(struct p2m_domain *p2m, void **table, + unsigned long *gfn_remainder, unsigned long gfn, + u32 shift, u32 max, unsigned int level, bool_t unmap) { l1_pgentry_t *p2m_entry, new_entry; void *next; @@ -187,8 +186,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table, int rc; mfn_t mfn; - if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn, - shift, max)) ) + if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn, shift, max)) ) return -ENOENT; flags = l1e_get_flags(*p2m_entry); @@ -203,7 +201,8 @@ p2m_next_level(struct p2m_domain *p2m, void **table, new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW); - p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable); + p2m_add_iommu_flags(&new_entry, level, + IOMMUF_readable | IOMMUF_writable); rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1); if ( rc ) goto error; @@ -215,7 +214,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table, l1_pgentry_t *l1_entry; unsigned int i; - switch ( level ) + switch (level) { case 2: break; @@ -225,8 +224,8 @@ p2m_next_level(struct p2m_domain *p2m, void **table, * New splintered mappings inherit the flags of the old superpage, * with a little reorganisation for the _PAGE_PSE_PAT bit. */ - if ( pfn & 1 ) /* ==> _PAGE_PSE_PAT was set */ - pfn -= 1; /* Clear it; _PAGE_PSE becomes _PAGE_PAT */ + if ( pfn & 1 ) /* ==> _PAGE_PSE_PAT was set */ + pfn -= 1; /* Clear it; _PAGE_PSE becomes _PAGE_PAT */ else flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */ break; @@ -251,8 +250,8 @@ p2m_next_level(struct p2m_domain *p2m, void **table, for ( i = 0; i < (1u << PAGETABLE_ORDER); i++ ) { - new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)), - flags); + new_entry = l1e_from_pfn( + pfn | (i << ((level - 1) * PAGETABLE_ORDER)), flags); rc = p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, level); if ( rc ) { @@ -265,9 +264,8 @@ p2m_next_level(struct p2m_domain *p2m, void **table, new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW); p2m_add_iommu_flags(&new_entry, level, - IOMMUF_readable|IOMMUF_writable); - rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, - level + 1); + IOMMUF_readable | IOMMUF_writable); + rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1); if ( rc ) goto error; } @@ -281,7 +279,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table, return 0; - error: +error: ASSERT(rc && mfn_valid(mfn)); ASSERT_UNREACHABLE(); p2m_free_ptp(p2m, mfn_to_page(mfn)); @@ -293,8 +291,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table, * present entries at the targeted level for the passed in GFN range, which is * guaranteed to not cross a page (table) boundary at that level. */ -static int p2m_pt_set_recalc_range(struct p2m_domain *p2m, - unsigned int level, +static int p2m_pt_set_recalc_range(struct p2m_domain *p2m, unsigned int level, unsigned long first_gfn, unsigned long last_gfn) { @@ -308,8 +305,8 @@ static int p2m_pt_set_recalc_range(struct p2m_domain *p2m, for ( i = 4; i-- > level; ) { remainder = gfn_remainder; - pent = p2m_find_entry(table, &remainder, first_gfn, - i * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER); + pent = p2m_find_entry(table, &remainder, first_gfn, i * PAGETABLE_ORDER, + 1 << PAGETABLE_ORDER); if ( !pent ) { err = -EINVAL; @@ -320,17 +317,16 @@ static int p2m_pt_set_recalc_range(struct p2m_domain *p2m, goto out; err = p2m_next_level(p2m, &table, &gfn_remainder, first_gfn, - i * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER, - i, 1); + i * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER, i, 1); if ( err ) goto out; } remainder = gfn_remainder + (last_gfn - first_gfn); - pent = p2m_find_entry(table, &gfn_remainder, first_gfn, - i * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER); - plast = p2m_find_entry(table, &remainder, last_gfn, - i * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER); + pent = p2m_find_entry(table, &gfn_remainder, first_gfn, i * PAGETABLE_ORDER, + 1 << PAGETABLE_ORDER); + plast = p2m_find_entry(table, &remainder, last_gfn, i * PAGETABLE_ORDER, + 1 << PAGETABLE_ORDER); if ( pent && plast ) for ( ; pent <= plast; ++pent ) { @@ -351,7 +347,7 @@ static int p2m_pt_set_recalc_range(struct p2m_domain *p2m, else err = -EIO; - out: +out: unmap_domain_page(table); return err; @@ -376,8 +372,8 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn) { unsigned long remainder = gfn_remainder; - pent = p2m_find_entry(table, &remainder, gfn, - level * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER); + pent = p2m_find_entry(table, &remainder, gfn, level * PAGETABLE_ORDER, + 1 << PAGETABLE_ORDER); if ( !pent || !(l1e_get_flags(*pent) & _PAGE_PRESENT) ) goto out; @@ -437,8 +433,8 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn) unmap_domain_page((void *)((unsigned long)pent & PAGE_MASK)); } - pent = p2m_find_entry(table, &gfn_remainder, gfn, - level * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER); + pent = p2m_find_entry(table, &gfn_remainder, gfn, level * PAGETABLE_ORDER, + 1 << PAGETABLE_ORDER); if ( pent && (l1e_get_flags(*pent) & _PAGE_PRESENT) && needs_recalc(l1, *pent) ) { @@ -454,25 +450,24 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn) if ( nt != ot ) { unsigned long mfn = l1e_get_pfn(e); - unsigned long flags = p2m_type_to_flags(p2m, nt, - _mfn(mfn), level); + unsigned long flags = p2m_type_to_flags(p2m, nt, _mfn(mfn), level); if ( level ) { if ( flags & _PAGE_PAT ) { - BUILD_BUG_ON(_PAGE_PAT != _PAGE_PSE); - mfn |= _PAGE_PSE_PAT >> PAGE_SHIFT; + BUILD_BUG_ON(_PAGE_PAT != _PAGE_PSE); + mfn |= _PAGE_PSE_PAT >> PAGE_SHIFT; } else - mfn &= ~((unsigned long)_PAGE_PSE_PAT >> PAGE_SHIFT); + mfn &= ~((unsigned long)_PAGE_PSE_PAT >> PAGE_SHIFT); flags |= _PAGE_PSE; } e = l1e_from_pfn(mfn, flags); - p2m_add_iommu_flags(&e, level, - (nt == p2m_ram_rw) - ? IOMMUF_readable|IOMMUF_writable : 0); + p2m_add_iommu_flags( + &e, level, + (nt == p2m_ram_rw) ? IOMMUF_readable | IOMMUF_writable : 0); ASSERT(!needs_recalc(l1, e)); } else @@ -481,7 +476,7 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn) ASSERT(!err); } - out: +out: unmap_domain_page(table); return err; @@ -524,10 +519,9 @@ static void check_entry(mfn_t mfn, p2m_type_t new, p2m_type_t old, } /* Returns: 0 for success, -errno for failure */ -static int -p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, - unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma, - int sve) +static int p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, + unsigned int page_order, p2m_type_t p2mt, + p2m_access_t p2ma, int sve) { struct domain *d = p2m->domain; /* XXX -- this might be able to be faster iff current->domain == d */ @@ -559,10 +553,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, if ( tb_init_done ) { - struct { + struct + { u64 gfn, mfn; int p2mt; - int d:16,order:16; + int d : 16, order : 16; } t; t.gfn = gfn; @@ -581,8 +576,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m))); rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn, - L4_PAGETABLE_SHIFT - PAGE_SHIFT, - L4_PAGETABLE_ENTRIES, 3, 1); + L4_PAGETABLE_SHIFT - PAGE_SHIFT, L4_PAGETABLE_ENTRIES, + 3, 1); if ( rc ) goto out; @@ -601,9 +596,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, if ( flags & _PAGE_PSE ) { old_mfn = l1e_get_pfn(*p2m_entry); - iommu_old_flags = - p2m_get_iommu_flags(p2m_flags_to_type(flags), - _mfn(old_mfn)); + iommu_old_flags = p2m_get_iommu_flags(p2m_flags_to_type(flags), + _mfn(old_mfn)); } else { @@ -613,10 +607,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, } check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order); - l3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) - ? p2m_l3e_from_pfn(mfn_x(mfn), - p2m_type_to_flags(p2m, p2mt, mfn, 2)) - : l3e_empty(); + l3e_content = + mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) + ? p2m_l3e_from_pfn(mfn_x(mfn), + p2m_type_to_flags(p2m, p2mt, mfn, 2)) + : l3e_empty(); entry_content.l1 = l3e_content.l3; if ( entry_content.l1 != 0 ) @@ -627,7 +622,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, if ( rc ) goto out; } - else + else { rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn, L3_PAGETABLE_SHIFT - PAGE_SHIFT, @@ -644,17 +639,16 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, if ( rc ) goto out; - p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn, - 0, L1_PAGETABLE_ENTRIES); + p2m_entry = + p2m_find_entry(table, &gfn_remainder, gfn, 0, L1_PAGETABLE_ENTRIES); ASSERT(p2m_entry); old_mfn = l1e_get_pfn(*p2m_entry); - iommu_old_flags = - p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)), - _mfn(old_mfn)); + iommu_old_flags = p2m_get_iommu_flags( + p2m_flags_to_type(l1e_get_flags(*p2m_entry)), _mfn(old_mfn)); if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) ) - entry_content = p2m_l1e_from_pfn(mfn_x(mfn), - p2m_type_to_flags(p2m, p2mt, mfn, 0)); + entry_content = p2m_l1e_from_pfn( + mfn_x(mfn), p2m_type_to_flags(p2m, p2mt, mfn, 0)); else entry_content = l1e_empty(); @@ -679,9 +673,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, if ( flags & _PAGE_PSE ) { old_mfn = l1e_get_pfn(*p2m_entry); - iommu_old_flags = - p2m_get_iommu_flags(p2m_flags_to_type(flags), - _mfn(old_mfn)); + iommu_old_flags = p2m_get_iommu_flags(p2m_flags_to_type(flags), + _mfn(old_mfn)); } else { @@ -691,10 +684,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, } check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order); - l2e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) - ? p2m_l2e_from_pfn(mfn_x(mfn), - p2m_type_to_flags(p2m, p2mt, mfn, 1)) - : l2e_empty(); + l2e_content = + mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) + ? p2m_l2e_from_pfn(mfn_x(mfn), + p2m_type_to_flags(p2m, p2mt, mfn, 1)) + : l2e_empty(); entry_content.l1 = l2e_content.l2; if ( entry_content.l1 != 0 ) @@ -707,20 +701,19 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, } /* Track the highest gfn for which we have ever had a valid mapping */ - if ( p2mt != p2m_invalid - && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) ) + if ( p2mt != p2m_invalid && + (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) ) p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1; - if ( iommu_enabled && (iommu_old_flags != iommu_pte_flags || - old_mfn != mfn_x(mfn)) ) + if ( iommu_enabled && + (iommu_old_flags != iommu_pte_flags || old_mfn != mfn_x(mfn)) ) { ASSERT(rc == 0); if ( need_iommu_pt_sync(p2m->domain) ) - rc = iommu_pte_flags ? - iommu_legacy_map(d, _dfn(gfn), mfn, page_order, - iommu_pte_flags) : - iommu_legacy_unmap(d, _dfn(gfn), page_order); + rc = iommu_pte_flags ? iommu_legacy_map(d, _dfn(gfn), mfn, + page_order, iommu_pte_flags) + : iommu_legacy_unmap(d, _dfn(gfn), page_order); else if ( iommu_use_hap_pt(d) && iommu_old_flags ) amd_iommu_flush_pages(p2m->domain, gfn, page_order); } @@ -733,15 +726,14 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, if ( l1e_get_flags(intermediate_entry) & _PAGE_PRESENT ) p2m_free_entry(p2m, &intermediate_entry, page_order); - out: +out: unmap_domain_page(table); return rc; } -static mfn_t -p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_, - p2m_type_t *t, p2m_access_t *a, p2m_query_t q, - unsigned int *page_order, bool_t *sve) +static mfn_t p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_, p2m_type_t *t, + p2m_access_t *a, p2m_query_t q, + unsigned int *page_order, bool_t *sve) { mfn_t mfn; unsigned long gfn = gfn_x(gfn_); @@ -757,13 +749,13 @@ p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_, if ( sve ) *sve = 1; - /* XXX This is for compatibility with the old model, where anything not + /* XXX This is for compatibility with the old model, where anything not * XXX marked as RAM was considered to be emulated MMIO space. - * XXX Once we start explicitly registering MMIO regions in the p2m + * XXX Once we start explicitly registering MMIO regions in the p2m * XXX we will return p2m_invalid for unmapped gfns */ *t = p2m_mmio_dm; /* Not implemented except with EPT */ - *a = p2m_access_rwx; + *a = p2m_access_rwx; if ( gfn > p2m->max_mapped_pfn ) { @@ -772,8 +764,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_, { for ( *page_order = 3 * PAGETABLE_ORDER; *page_order; *page_order -= PAGETABLE_ORDER ) - if ( (gfn & ~((1UL << *page_order) - 1)) > - p2m->max_mapped_pfn ) + if ( (gfn & ~((1UL << *page_order) - 1)) > p2m->max_mapped_pfn ) break; } return INVALID_MFN; @@ -801,7 +792,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_, if ( page_order ) *page_order = 2 * PAGETABLE_ORDER; -pod_retry_l3: + pod_retry_l3: flags = l3e_get_flags(*l3e); if ( !(flags & _PAGE_PRESENT) ) { @@ -811,7 +802,8 @@ pod_retry_l3: { if ( p2m_pod_demand_populate(p2m, gfn_, PAGE_ORDER_1G) ) goto pod_retry_l3; - gdprintk(XENLOG_ERR, "%s: Allocate 1GB failed!\n", __func__); + gdprintk(XENLOG_ERR, "%s: Allocate 1GB failed!\n", + __func__); } else *t = p2m_populate_on_demand; @@ -850,13 +842,15 @@ pod_retry_l2: /* PoD: Try to populate a 2-meg chunk */ if ( p2m_flags_to_type(flags) == p2m_populate_on_demand ) { - if ( q & P2M_ALLOC ) { + if ( q & P2M_ALLOC ) + { if ( p2m_pod_demand_populate(p2m, gfn_, PAGE_ORDER_2M) ) goto pod_retry_l2; - } else + } + else *t = p2m_populate_on_demand; } - + unmap_domain_page(l2e); return INVALID_MFN; } @@ -866,7 +860,7 @@ pod_retry_l2: *t = p2m_recalc_type(recalc || _needs_recalc(flags), p2m_flags_to_type(flags), p2m, gfn); unmap_domain_page(l2e); - + ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t)); return (p2m_is_valid(*t)) ? mfn : INVALID_MFN; } @@ -889,13 +883,15 @@ pod_retry_l1: /* PoD: Try to populate */ if ( l1t == p2m_populate_on_demand ) { - if ( q & P2M_ALLOC ) { + if ( q & P2M_ALLOC ) + { if ( p2m_pod_demand_populate(p2m, gfn_, PAGE_ORDER_4K) ) goto pod_retry_l1; - } else + } + else *t = p2m_populate_on_demand; } - + unmap_domain_page(l1e); return INVALID_MFN; } @@ -924,8 +920,7 @@ static void p2m_pt_change_entry_type_global(struct p2m_domain *p2m, { l1_pgentry_t e = tab[i]; - if ( (l1e_get_flags(e) & _PAGE_PRESENT) && - !needs_recalc(l1, e) ) + if ( (l1e_get_flags(e) & _PAGE_PRESENT) && !needs_recalc(l1, e) ) { int rc; @@ -943,11 +938,11 @@ static void p2m_pt_change_entry_type_global(struct p2m_domain *p2m, unmap_domain_page(tab); if ( changed ) - flush_tlb_mask(p2m->domain->dirty_cpumask); + flush_tlb_mask(p2m->domain->dirty_cpumask); } -static int p2m_pt_change_entry_type_range(struct p2m_domain *p2m, - p2m_type_t ot, p2m_type_t nt, +static int p2m_pt_change_entry_type_range(struct p2m_domain *p2m, p2m_type_t ot, + p2m_type_t nt, unsigned long first_gfn, unsigned long last_gfn) { @@ -1017,9 +1012,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m) continue; } l3e = map_l3t_from_l4e(l4e[i4]); - for ( i3 = 0; - i3 < L3_PAGETABLE_ENTRIES; - i3++ ) + for ( i3 = 0; i3 < L3_PAGETABLE_ENTRIES; i3++ ) { if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) ) { @@ -1033,17 +1026,17 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m) mfn = l3e_get_pfn(l3e[i3]); ASSERT(mfn_valid(_mfn(mfn))); /* we have to cover 512x512 4K pages */ - for ( i2 = 0; + for ( i2 = 0; i2 < (L2_PAGETABLE_ENTRIES * L1_PAGETABLE_ENTRIES); - i2++) + i2++ ) { - m2pfn = get_gpfn_from_mfn(mfn+i2); + m2pfn = get_gpfn_from_mfn(mfn + i2); if ( m2pfn != (gfn + i2) ) { pmbad++; P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx" - " -> gfn %#lx\n", gfn+i2, mfn+i2, - m2pfn); + " -> gfn %#lx\n", + gfn + i2, mfn + i2, m2pfn); BUG(); } gfn += 1 << (L3_PAGETABLE_SHIFT - PAGE_SHIFT); @@ -1056,29 +1049,29 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m) { if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) ) { - if ( (l2e_get_flags(l2e[i2]) & _PAGE_PSE) - && ( p2m_flags_to_type(l2e_get_flags(l2e[i2])) - == p2m_populate_on_demand ) ) - entry_count+=SUPERPAGE_PAGES; + if ( (l2e_get_flags(l2e[i2]) & _PAGE_PSE) && + (p2m_flags_to_type(l2e_get_flags(l2e[i2])) == + p2m_populate_on_demand) ) + entry_count += SUPERPAGE_PAGES; gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT); continue; } - + /* check for super page */ if ( l2e_get_flags(l2e[i2]) & _PAGE_PSE ) { mfn = l2e_get_pfn(l2e[i2]); ASSERT(mfn_valid(_mfn(mfn))); - for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++) + for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++ ) { - m2pfn = get_gpfn_from_mfn(mfn+i1); + m2pfn = get_gpfn_from_mfn(mfn + i1); /* Allow shared M2Ps */ if ( (m2pfn != (gfn + i1)) && !SHARED_M2P(m2pfn) ) { pmbad++; P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx" - " -> gfn %#lx\n", gfn+i1, mfn+i1, - m2pfn); + " -> gfn %#lx\n", + gfn + i1, mfn + i1, m2pfn); BUG(); } } @@ -1102,16 +1095,16 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m) mfn = l1e_get_pfn(l1e[i1]); ASSERT(mfn_valid(_mfn(mfn))); m2pfn = get_gpfn_from_mfn(mfn); - if ( m2pfn != gfn && - type != p2m_mmio_direct && - !p2m_is_grant(type) && - !p2m_is_shared(type) ) + if ( m2pfn != gfn && type != p2m_mmio_direct && + !p2m_is_grant(type) && !p2m_is_shared(type) ) { pmbad++; printk("mismatch: gfn %#lx -> mfn %#lx" - " -> gfn %#lx\n", gfn, mfn, m2pfn); + " -> gfn %#lx\n", + gfn, mfn, m2pfn); P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx" - " -> gfn %#lx\n", gfn, mfn, m2pfn); + " -> gfn %#lx\n", + gfn, mfn, m2pfn); BUG(); } } @@ -1127,17 +1120,15 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m) if ( entry_count != p2m->pod.entry_count ) { - printk("%s: refcounted entry count %ld, audit count %lu!\n", - __func__, - p2m->pod.entry_count, - entry_count); + printk("%s: refcounted entry count %ld, audit count %lu!\n", __func__, + p2m->pod.entry_count, entry_count); BUG(); } return pmbad; } #else -# define p2m_pt_audit_p2m NULL +#define p2m_pt_audit_p2m NULL #endif /* P2M_AUDIT */ /* Set up the p2m function pointers for pagetable format */ @@ -1155,5 +1146,3 @@ void p2m_pt_init(struct p2m_domain *p2m) p2m->audit_p2m = NULL; #endif } - - diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index b9bbb8f485..4e88dbb5ef 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -3,8 +3,8 @@ * * physical-to-machine mappings for automatically-translated domains. * - * Parts of this code are Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp) - * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices. + * Parts of this code are Copyright (c) 2009 by Citrix Systems, Inc. (Patrick + * Colp) Parts of this code are Copyright (c) 2007 by Advanced Micro Devices. * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc. * Parts of this code are Copyright (c) 2006 by Michael A Fetterman * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. @@ -64,8 +64,8 @@ static int p2m_init_logdirty(struct p2m_domain *p2m) if ( p2m->logdirty_ranges ) return 0; - p2m->logdirty_ranges = rangeset_new(p2m->domain, "log-dirty", - RANGESETF_prettyprint_hex); + p2m->logdirty_ranges = + rangeset_new(p2m->domain, "log-dirty", RANGESETF_prettyprint_hex); if ( !p2m->logdirty_ranges ) return -ENOMEM; @@ -285,15 +285,15 @@ int p2m_is_logdirty_range(struct p2m_domain *p2m, unsigned long start, return 0; } -static void change_entry_type_global(struct p2m_domain *p2m, - p2m_type_t ot, p2m_type_t nt) +static void change_entry_type_global(struct p2m_domain *p2m, p2m_type_t ot, + p2m_type_t nt) { p2m->change_entry_type_global(p2m, ot, nt); p2m->global_logdirty = (nt == p2m_ram_logdirty); } -void p2m_change_entry_type_global(struct domain *d, - p2m_type_t ot, p2m_type_t nt) +void p2m_change_entry_type_global(struct domain *d, p2m_type_t ot, + p2m_type_t nt) { struct p2m_domain *hostp2m = p2m_get_hostp2m(d); @@ -359,8 +359,7 @@ void p2m_memory_type_changed(struct domain *d) } #endif -int p2m_set_ioreq_server(struct domain *d, - unsigned int flags, +int p2m_set_ioreq_server(struct domain *d, unsigned int flags, struct hvm_ioreq_server *s) { struct p2m_domain *p2m = p2m_get_hostp2m(d); @@ -403,7 +402,7 @@ int p2m_set_ioreq_server(struct domain *d, rc = 0; - out: +out: spin_unlock(&p2m->ioreq.lock); return rc; @@ -459,7 +458,8 @@ void p2m_flush_hardware_cached_dirty(struct domain *d) */ void p2m_tlb_flush_sync(struct p2m_domain *p2m) { - if ( p2m->need_flush ) { + if ( p2m->need_flush ) + { p2m->need_flush = 0; p2m->tlb_flush(p2m); } @@ -470,17 +470,19 @@ void p2m_tlb_flush_sync(struct p2m_domain *p2m) */ void p2m_unlock_and_tlb_flush(struct p2m_domain *p2m) { - if ( p2m->need_flush ) { + if ( p2m->need_flush ) + { p2m->need_flush = 0; mm_write_unlock(&p2m->lock); p2m->tlb_flush(p2m); - } else + } + else mm_write_unlock(&p2m->lock); } mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l, - p2m_type_t *t, p2m_access_t *a, p2m_query_t q, - unsigned int *page_order, bool_t locked) + p2m_type_t *t, p2m_access_t *a, p2m_query_t q, + unsigned int *page_order, bool_t locked) { mfn_t mfn; gfn_t gfn = _gfn(gfn_l); @@ -513,7 +515,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l, mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL); } - if (unlikely((p2m_is_broken(*t)))) + if ( unlikely((p2m_is_broken(*t))) ) { /* Return invalid_mfn to avoid caller's access */ mfn = INVALID_MFN; @@ -536,9 +538,9 @@ void __put_gfn(struct p2m_domain *p2m, unsigned long gfn) } /* Atomically look up a GFN and take a reference count on the backing page. */ -struct page_info *p2m_get_page_from_gfn( - struct p2m_domain *p2m, gfn_t gfn, - p2m_type_t *t, p2m_access_t *a, p2m_query_t q) +struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn, + p2m_type_t *t, p2m_access_t *a, + p2m_query_t q) { struct page_info *page = NULL; p2m_access_t _a; @@ -554,8 +556,8 @@ struct page_info *p2m_get_page_from_gfn( /* Fast path: look up and get out */ p2m_read_lock(p2m); mfn = __get_gfn_type_access(p2m, gfn_x(gfn), t, a, 0, NULL, 0); - if ( p2m_is_any_ram(*t) && mfn_valid(mfn) - && !((q & P2M_UNSHARE) && p2m_is_shared(*t)) ) + if ( p2m_is_any_ram(*t) && mfn_valid(mfn) && + !((q & P2M_UNSHARE) && p2m_is_shared(*t)) ) { page = mfn_to_page(mfn); if ( unlikely(p2m_is_foreign(*t)) ) @@ -613,10 +615,12 @@ int p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn, fn_mask |= gfn_x(gfn) | todo; - order = (!(fn_mask & ((1ul << PAGE_ORDER_1G) - 1)) && - hap_has_1gb) ? PAGE_ORDER_1G : - (!(fn_mask & ((1ul << PAGE_ORDER_2M) - 1)) && - hap_has_2mb) ? PAGE_ORDER_2M : PAGE_ORDER_4K; + order = + (!(fn_mask & ((1ul << PAGE_ORDER_1G) - 1)) && hap_has_1gb) + ? PAGE_ORDER_1G + : (!(fn_mask & ((1ul << PAGE_ORDER_2M) - 1)) && hap_has_2mb) + ? PAGE_ORDER_2M + : PAGE_ORDER_4K; } else order = 0; @@ -683,8 +687,7 @@ int p2m_alloc_table(struct p2m_domain *p2m) p2m_lock(p2m); - if ( p2m_is_hostp2m(p2m) - && !page_list_empty(&d->page_list) ) + if ( p2m_is_hostp2m(p2m) && !page_list_empty(&d->page_list) ) { P2M_ERROR("dom %d already has memory allocated\n", d->domain_id); p2m_unlock(p2m); @@ -716,8 +719,8 @@ int p2m_alloc_table(struct p2m_domain *p2m) /* Initialise physmap tables for slot zero. Other code assumes this. */ p2m->defer_nested_flush = 1; - rc = p2m_set_entry(p2m, _gfn(0), INVALID_MFN, PAGE_ORDER_4K, - p2m_invalid, p2m->default_access); + rc = p2m_set_entry(p2m, _gfn(0), INVALID_MFN, PAGE_ORDER_4K, p2m_invalid, + p2m->default_access); p2m->defer_nested_flush = 0; p2m_unlock(p2m); if ( !rc ) @@ -738,7 +741,7 @@ void p2m_teardown(struct p2m_domain *p2m) struct page_info *pg; struct domain *d; - if (p2m == NULL) + if ( p2m == NULL ) return; d = p2m->domain; @@ -767,10 +770,8 @@ void p2m_final_teardown(struct domain *d) p2m_teardown_hostp2m(d); } - -static int -p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn_l, unsigned long mfn, - unsigned int page_order) +static int p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn_l, + unsigned long mfn, unsigned int page_order) { unsigned long i; gfn_t gfn = _gfn(gfn_l); @@ -789,20 +790,19 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn_l, unsigned long mfn, { for ( i = 0; i < (1UL << page_order); i++ ) { - mfn_return = p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0, - NULL, NULL); + mfn_return = + p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0, NULL, NULL); if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) ) - set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY); - ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) ); + set_gpfn_from_mfn(mfn + i, INVALID_M2P_ENTRY); + ASSERT(!p2m_is_valid(t) || mfn + i == mfn_x(mfn_return)); } } return p2m_set_entry(p2m, gfn, INVALID_MFN, page_order, p2m_invalid, p2m->default_access); } -int -guest_physmap_remove_page(struct domain *d, gfn_t gfn, - mfn_t mfn, unsigned int page_order) +int guest_physmap_remove_page(struct domain *d, gfn_t gfn, mfn_t mfn, + unsigned int page_order) { struct p2m_domain *p2m = p2m_get_hostp2m(d); int rc; @@ -812,9 +812,8 @@ guest_physmap_remove_page(struct domain *d, gfn_t gfn, return rc; } -int -guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, - unsigned int page_order, p2m_type_t t) +int guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, + unsigned int page_order, p2m_type_t t) { struct p2m_domain *p2m = p2m_get_hostp2m(d); unsigned long i; @@ -866,15 +865,14 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, /* First, remove m->p mappings for existing p->m mappings */ for ( i = 0; i < (1UL << page_order); i++ ) { - omfn = p2m->get_entry(p2m, gfn_add(gfn, i), &ot, - &a, 0, NULL, NULL); + omfn = p2m->get_entry(p2m, gfn_add(gfn, i), &ot, &a, 0, NULL, NULL); if ( p2m_is_shared(ot) ) { - /* Do an unshare to cleanly take care of all corner + /* Do an unshare to cleanly take care of all corner * cases. */ int rc; - rc = mem_sharing_unshare_page(p2m->domain, - gfn_x(gfn_add(gfn, i)), 0); + rc = mem_sharing_unshare_page(p2m->domain, gfn_x(gfn_add(gfn, i)), + 0); if ( rc ) { p2m_unlock(p2m); @@ -887,14 +885,13 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, * However, all current (changeset 3432abcf9380) code * paths avoid this unsavoury situation. For now. * - * Foreign domains are okay to place an event as they + * Foreign domains are okay to place an event as they * won't go to sleep. */ (void)mem_sharing_notify_enomem(p2m->domain, gfn_x(gfn_add(gfn, i)), false); return rc; } - omfn = p2m->get_entry(p2m, gfn_add(gfn, i), - &ot, &a, 0, NULL, NULL); + omfn = p2m->get_entry(p2m, gfn_add(gfn, i), &ot, &a, 0, NULL, NULL); ASSERT(!p2m_is_shared(ot)); } if ( p2m_is_grant(ot) || p2m_is_foreign(ot) ) @@ -902,7 +899,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, /* Really shouldn't be unmapping grant/foreign maps this way */ domain_crash(d); p2m_unlock(p2m); - + return -EINVAL; } else if ( p2m_is_ram(ot) && !p2m_is_paged(ot) ) @@ -917,7 +914,8 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, } else if ( p2m_is_paging(ot) && (ot != p2m_ram_paging_out) ) { - /* We're plugging a hole in the physmap where a paged out page was */ + /* We're plugging a hole in the physmap where a paged out page was + */ atomic_dec(&d->paged_pages); } } @@ -928,7 +926,9 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, if ( page_get_owner(mfn_to_page(mfn_add(mfn, i))) == dom_cow ) { /* This is no way to add a shared page to your physmap! */ - gdprintk(XENLOG_ERR, "Adding shared mfn %lx directly to dom%d physmap not allowed.\n", + gdprintk(XENLOG_ERR, + "Adding shared mfn %lx directly to dom%d physmap not " + "allowed.\n", mfn_x(mfn_add(mfn, i)), d->domain_id); p2m_unlock(p2m); return -EINVAL; @@ -948,8 +948,8 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, if ( p2m_is_ram(ot) && !p2m_is_paged(ot) ) { ASSERT(mfn_valid(omfn)); - P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n", - gfn_x(ogfn) , mfn_x(omfn)); + P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n", gfn_x(ogfn), + mfn_x(omfn)); if ( mfn_eq(omfn, mfn_add(mfn, i)) ) p2m_remove_page(p2m, gfn_x(ogfn), mfn_x(mfn_add(mfn, i)), 0); @@ -960,8 +960,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, /* Now, actually do the two-way mapping */ if ( mfn_valid(mfn) ) { - rc = p2m_set_entry(p2m, gfn, mfn, page_order, t, - p2m->default_access); + rc = p2m_set_entry(p2m, gfn, mfn, page_order, t, p2m->default_access); if ( rc ) goto out; /* Failed to update p2m, bail without updating m2p. */ @@ -976,8 +975,8 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, { gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n", gfn_x(gfn), mfn_x(mfn)); - rc = p2m_set_entry(p2m, gfn, INVALID_MFN, page_order, - p2m_invalid, p2m->default_access); + rc = p2m_set_entry(p2m, gfn, INVALID_MFN, page_order, p2m_invalid, + p2m->default_access); #ifdef CONFIG_HVM if ( rc == 0 ) { @@ -995,14 +994,13 @@ out: return rc; } - /* * Modify the p2m type of a single gfn from ot to nt. * Returns: 0 for success, -errno for failure. * Resets the access permissions. */ -int p2m_change_type_one(struct domain *d, unsigned long gfn_l, - p2m_type_t ot, p2m_type_t nt) +int p2m_change_type_one(struct domain *d, unsigned long gfn_l, p2m_type_t ot, + p2m_type_t nt) { p2m_access_t a; p2m_type_t pt; @@ -1017,10 +1015,9 @@ int p2m_change_type_one(struct domain *d, unsigned long gfn_l, gfn_lock(p2m, gfn, 0); mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL, NULL); - rc = likely(pt == ot) - ? p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, - p2m->default_access) - : -EBUSY; + rc = likely(pt == ot) ? p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, + p2m->default_access) + : -EBUSY; gfn_unlock(p2m, gfn, 0); @@ -1028,9 +1025,9 @@ int p2m_change_type_one(struct domain *d, unsigned long gfn_l, } /* Modify the p2m type of [start, end_exclusive) from ot to nt. */ -static void change_type_range(struct p2m_domain *p2m, - unsigned long start, unsigned long end_exclusive, - p2m_type_t ot, p2m_type_t nt) +static void change_type_range(struct p2m_domain *p2m, unsigned long start, + unsigned long end_exclusive, p2m_type_t ot, + p2m_type_t nt) { unsigned long invalidate_start, invalidate_end; struct domain *d = p2m->domain; @@ -1046,7 +1043,7 @@ static void change_type_range(struct p2m_domain *p2m, * in the altp2m. Keep track of and clip the ranges separately. */ invalidate_start = start; - invalidate_end = end; + invalidate_end = end; /* * Clip down to the host p2m. This is probably not the right behavior. @@ -1054,7 +1051,8 @@ static void change_type_range(struct p2m_domain *p2m, */ if ( unlikely(end > host_max_pfn) ) { - printk(XENLOG_G_WARNING "Dom%d logdirty rangeset clipped to max_mapped_pfn\n", + printk(XENLOG_G_WARNING + "Dom%d logdirty rangeset clipped to max_mapped_pfn\n", d->domain_id); end = invalidate_end = host_max_pfn; } @@ -1083,20 +1081,21 @@ static void change_type_range(struct p2m_domain *p2m, * >=. */ ASSERT(invalidate_end <= max_pfn); - if ( !invalidate_start && invalidate_end == max_pfn) + if ( !invalidate_start && invalidate_end == max_pfn ) p2m->change_entry_type_global(p2m, ot, nt); else - rc = p2m->change_entry_type_range(p2m, ot, nt, - invalidate_start, invalidate_end); + rc = p2m->change_entry_type_range(p2m, ot, nt, invalidate_start, + invalidate_end); if ( rc ) { - printk(XENLOG_G_ERR "Error %d changing Dom%d GFNs [%lx,%lx] from %d to %d\n", + printk(XENLOG_G_ERR + "Error %d changing Dom%d GFNs [%lx,%lx] from %d to %d\n", rc, d->domain_id, invalidate_start, invalidate_end, ot, nt); domain_crash(d); } } - switch ( nt ) + switch (nt) { case p2m_ram_rw: if ( ot == p2m_ram_logdirty ) @@ -1117,9 +1116,8 @@ static void change_type_range(struct p2m_domain *p2m, } } -void p2m_change_type_range(struct domain *d, - unsigned long start, unsigned long end, - p2m_type_t ot, p2m_type_t nt) +void p2m_change_type_range(struct domain *d, unsigned long start, + unsigned long end, p2m_type_t ot, p2m_type_t nt) { struct p2m_domain *hostp2m = p2m_get_hostp2m(d); @@ -1160,8 +1158,8 @@ void p2m_change_type_range(struct domain *d, * range for alternate p2ms. * Returns: 0/1 for success, negative for failure */ -static int finish_type_change(struct p2m_domain *p2m, - gfn_t first_gfn, unsigned long max_nr) +static int finish_type_change(struct p2m_domain *p2m, gfn_t first_gfn, + unsigned long max_nr) { unsigned long gfn = gfn_x(first_gfn); unsigned long last_gfn = gfn + max_nr - 1; @@ -1191,8 +1189,8 @@ static int finish_type_change(struct p2m_domain *p2m, return rc; } -int p2m_finish_type_change(struct domain *d, - gfn_t first_gfn, unsigned long max_nr) +int p2m_finish_type_change(struct domain *d, gfn_t first_gfn, + unsigned long max_nr) { struct p2m_domain *hostp2m = p2m_get_hostp2m(d); int rc; @@ -1230,7 +1228,7 @@ int p2m_finish_type_change(struct domain *d, } #endif - out: +out: p2m_unlock(hostp2m); return rc; @@ -1243,9 +1241,9 @@ int p2m_finish_type_change(struct domain *d, * 1 + new order for caller to retry with smaller order (guaranteed * to be smaller than order passed in) */ -static int set_typed_p2m_entry(struct domain *d, unsigned long gfn_l, - mfn_t mfn, unsigned int order, - p2m_type_t gfn_p2mt, p2m_access_t access) +static int set_typed_p2m_entry(struct domain *d, unsigned long gfn_l, mfn_t mfn, + unsigned int order, p2m_type_t gfn_p2mt, + p2m_access_t access) { int rc = 0; p2m_access_t a; @@ -1285,7 +1283,7 @@ static int set_typed_p2m_entry(struct domain *d, unsigned long gfn_l, P2M_DEBUG("set %d %lx %lx\n", gfn_p2mt, gfn_l, mfn_x(mfn)); rc = p2m_set_entry(p2m, gfn, mfn, order, gfn_p2mt, access); if ( rc ) - gdprintk(XENLOG_ERR, "p2m_set_entry: %#lx:%u -> %d (0x%"PRI_mfn")\n", + gdprintk(XENLOG_ERR, "p2m_set_entry: %#lx:%u -> %d (0x%" PRI_mfn ")\n", gfn_l, order, rc, mfn_x(mfn)); #ifdef CONFIG_HVM else if ( p2m_is_pod(ot) ) @@ -1352,9 +1350,8 @@ int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l, ret = 0; else ret = -EBUSY; - printk(XENLOG_G_WARNING - "Cannot setup identity map d%d:%lx," - " gfn already mapped to %lx.\n", + printk(XENLOG_G_WARNING "Cannot setup identity map d%d:%lx," + " gfn already mapped to %lx.\n", d->domain_id, gfn_l, mfn_x(mfn)); } @@ -1394,18 +1391,17 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn_l, mfn_t mfn, /* Do not use mfn_valid() here as it will usually fail for MMIO pages. */ if ( mfn_eq(actual_mfn, INVALID_MFN) || (t != p2m_mmio_direct) ) { - gdprintk(XENLOG_ERR, - "gfn_to_mfn failed! gfn=%08lx type:%d\n", gfn_l, t); + gdprintk(XENLOG_ERR, "gfn_to_mfn failed! gfn=%08lx type:%d\n", gfn_l, + t); goto out; } if ( !mfn_eq(mfn, actual_mfn) ) - gdprintk(XENLOG_WARNING, - "no mapping between mfn %08lx and gfn %08lx\n", + gdprintk(XENLOG_WARNING, "no mapping between mfn %08lx and gfn %08lx\n", mfn_x(mfn), gfn_l); rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order, p2m_invalid, p2m->default_access); - out: +out: gfn_unlock(p2m, gfn, order); return rc; @@ -1432,8 +1428,8 @@ int clear_identity_p2m_entry(struct domain *d, unsigned long gfn_l) mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL); if ( p2mt == p2m_mmio_direct && mfn_x(mfn) == gfn_l ) { - ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K, - p2m_invalid, p2m->default_access); + ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K, p2m_invalid, + p2m->default_access); gfn_unlock(p2m, gfn, 0); } else @@ -1471,8 +1467,8 @@ int set_shared_p2m_entry(struct domain *d, unsigned long gfn_l, mfn_t mfn) /* Set the m2p entry to invalid only if there are no further type * refs to this page as shared */ pg_type = read_atomic(&(mfn_to_page(omfn)->u.inuse.type_info)); - if ( (pg_type & PGT_count_mask) == 0 - || (pg_type & PGT_type_mask) != PGT_shared_page ) + if ( (pg_type & PGT_count_mask) == 0 || + (pg_type & PGT_type_mask) != PGT_shared_page ) set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY); P2M_DEBUG("set shared %lx %lx\n", gfn_l, mfn_x(mfn)); @@ -1480,8 +1476,7 @@ int set_shared_p2m_entry(struct domain *d, unsigned long gfn_l, mfn_t mfn) p2m->default_access); gfn_unlock(p2m, gfn, 0); if ( rc ) - gdprintk(XENLOG_ERR, - "p2m_set_entry failed! mfn=%08lx rc:%d\n", + gdprintk(XENLOG_ERR, "p2m_set_entry failed! mfn=%08lx rc:%d\n", mfn_x(get_gfn_query_unlocked(p2m->domain, gfn_l, &ot)), rc); return rc; } @@ -1542,7 +1537,7 @@ int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn_l) /* Fix p2m entry */ ret = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a); - out: +out: gfn_unlock(p2m, gfn, 0); return ret; } @@ -1605,8 +1600,7 @@ int p2m_mem_paging_evict(struct domain *d, unsigned long gfn_l) put_page(page); /* Remove mapping from p2m table */ - ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K, - p2m_ram_paged, a); + ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K, p2m_ram_paged, a); /* Clear content before returning the page to Xen */ scrub_one_page(page); @@ -1614,11 +1608,11 @@ int p2m_mem_paging_evict(struct domain *d, unsigned long gfn_l) /* Track number of paged gfns */ atomic_inc(&d->paged_pages); - out_put: +out_put: /* Put the page back so it gets freed */ put_page(page); - out: +out: gfn_unlock(p2m, gfn, 0); return ret; } @@ -1633,12 +1627,10 @@ int p2m_mem_paging_evict(struct domain *d, unsigned long gfn_l) * gfn. */ void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn, - p2m_type_t p2mt) + p2m_type_t p2mt) { - vm_event_request_t req = { - .reason = VM_EVENT_REASON_MEM_PAGING, - .u.mem_paging.gfn = gfn - }; + vm_event_request_t req = {.reason = VM_EVENT_REASON_MEM_PAGING, + .u.mem_paging.gfn = gfn}; /* We allow no ring in this unique case, because it won't affect * correctness of the guest execution at this point. If this is the only @@ -1685,10 +1677,8 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn, void p2m_mem_paging_populate(struct domain *d, unsigned long gfn_l) { struct vcpu *v = current; - vm_event_request_t req = { - .reason = VM_EVENT_REASON_MEM_PAGING, - .u.mem_paging.gfn = gfn_l - }; + vm_event_request_t req = {.reason = VM_EVENT_REASON_MEM_PAGING, + .u.mem_paging.gfn = gfn_l}; p2m_type_t p2mt; p2m_access_t a; gfn_t gfn = _gfn(gfn_l); @@ -1699,8 +1689,10 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn_l) int rc = vm_event_claim_slot(d, d->vm_event_paging); if ( rc == -ENOSYS ) { - gdprintk(XENLOG_ERR, "Domain %hu paging gfn %lx yet no ring " - "in place\n", d->domain_id, gfn_l); + gdprintk(XENLOG_ERR, + "Domain %hu paging gfn %lx yet no ring " + "in place\n", + d->domain_id, gfn_l); /* Prevent the vcpu from faulting repeatedly on the same gfn */ if ( v->domain == d ) vcpu_pause_nosync(v); @@ -1768,12 +1760,11 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long gfn_l, uint64_t buffer) mfn_t mfn; struct p2m_domain *p2m = p2m_get_hostp2m(d); int ret, page_extant = 1; - const void *user_ptr = (const void *) buffer; + const void *user_ptr = (const void *)buffer; if ( user_ptr ) /* Sanity check the buffer and bail out early if trouble */ - if ( (buffer & (PAGE_SIZE - 1)) || - (!access_ok(user_ptr, PAGE_SIZE)) ) + if ( (buffer & (PAGE_SIZE - 1)) || (!access_ok(user_ptr, PAGE_SIZE)) ) return -EINVAL; gfn_lock(p2m, gfn, 0); @@ -1807,32 +1798,34 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long gfn_l, uint64_t buffer) void *guest_map; int rc; - ASSERT( mfn_valid(mfn) ); + ASSERT(mfn_valid(mfn)); guest_map = map_domain_page(mfn); rc = copy_from_user(guest_map, user_ptr, PAGE_SIZE); unmap_domain_page(guest_map); if ( rc ) { - gdprintk(XENLOG_ERR, "Failed to load paging-in gfn %lx domain %u " - "bytes left %d\n", gfn_l, d->domain_id, rc); + gdprintk(XENLOG_ERR, + "Failed to load paging-in gfn %lx domain %u " + "bytes left %d\n", + gfn_l, d->domain_id, rc); ret = -EFAULT; put_page(page); /* Don't leak pages */ - goto out; + goto out; } } /* Make the page already guest-accessible. If the pager still has a * pending resume operation, it will be idempotent p2m entry-wise, * but will unpause the vcpu */ - ret = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, - paging_mode_log_dirty(d) ? p2m_ram_logdirty - : p2m_ram_rw, a); + ret = p2m_set_entry( + p2m, gfn, mfn, PAGE_ORDER_4K, + paging_mode_log_dirty(d) ? p2m_ram_logdirty : p2m_ram_rw, a); set_gpfn_from_mfn(mfn_x(mfn), gfn_l); if ( !page_extant ) atomic_dec(&d->paged_pages); - out: +out: gfn_unlock(p2m, gfn, 0); return ret; } @@ -1873,9 +1866,9 @@ void p2m_mem_paging_resume(struct domain *d, vm_event_response_t *rsp) */ if ( mfn_valid(mfn) && (p2mt == p2m_ram_paging_in) ) { - int rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, - paging_mode_log_dirty(d) ? p2m_ram_logdirty : - p2m_ram_rw, a); + int rc = p2m_set_entry( + p2m, gfn, mfn, PAGE_ORDER_4K, + paging_mode_log_dirty(d) ? p2m_ram_logdirty : p2m_ram_rw, a); if ( !rc ) set_gpfn_from_mfn(mfn_x(mfn), gfn_x(gfn)); @@ -1885,11 +1878,11 @@ void p2m_mem_paging_resume(struct domain *d, vm_event_response_t *rsp) } #ifdef CONFIG_HVM -static struct p2m_domain * -p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m) +static struct p2m_domain *p2m_getlru_nestedp2m(struct domain *d, + struct p2m_domain *p2m) { struct list_head *lru_list = &p2m_get_hostp2m(d)->np2m_list; - + ASSERT(!list_empty(lru_list)); if ( p2m == NULL ) @@ -1900,8 +1893,7 @@ p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m) return p2m; } -static void -p2m_flush_table_locked(struct p2m_domain *p2m) +static void p2m_flush_table_locked(struct p2m_domain *p2m) { struct page_info *top, *pg; struct domain *d = p2m->domain; @@ -1947,16 +1939,14 @@ p2m_flush_table_locked(struct p2m_domain *p2m) } /* Reset this p2m table to be empty */ -static void -p2m_flush_table(struct p2m_domain *p2m) +static void p2m_flush_table(struct p2m_domain *p2m) { p2m_lock(p2m); p2m_flush_table_locked(p2m); p2m_unlock(p2m); } -void -p2m_flush(struct vcpu *v, struct p2m_domain *p2m) +void p2m_flush(struct vcpu *v, struct p2m_domain *p2m) { ASSERT(v->domain == p2m->domain); vcpu_nestedhvm(v).nv_p2m = NULL; @@ -1964,8 +1954,7 @@ p2m_flush(struct vcpu *v, struct p2m_domain *p2m) hvm_asid_flush_vcpu(v); } -void -p2m_flush_nestedp2m(struct domain *d) +void p2m_flush_nestedp2m(struct domain *d) { int i; for ( i = 0; i < MAX_NESTEDP2M; i++ ) @@ -2016,8 +2005,7 @@ static void nvcpu_flush(struct vcpu *v) vcpu_nestedhvm(v).stale_np2m = true; } -struct p2m_domain * -p2m_get_nestedp2m_locked(struct vcpu *v) +struct p2m_domain *p2m_get_nestedp2m_locked(struct vcpu *v) { struct nestedvcpu *nv = &vcpu_nestedhvm(v); struct domain *d = v->domain; @@ -2029,13 +2017,14 @@ p2m_get_nestedp2m_locked(struct vcpu *v) /* Mask out low bits; this avoids collisions with P2M_BASE_EADDR */ np2m_base &= ~(0xfffull); - if (nv->nv_flushp2m && nv->nv_p2m) { + if ( nv->nv_flushp2m && nv->nv_p2m ) + { nv->nv_p2m = NULL; } nestedp2m_lock(d); p2m = nv->nv_p2m; - if ( p2m ) + if ( p2m ) { p2m_lock(p2m); if ( p2m->np2m_base == np2m_base ) @@ -2072,7 +2061,7 @@ p2m_get_nestedp2m_locked(struct vcpu *v) p2m_flush_table(p2m); p2m_lock(p2m); - found: +found: if ( needs_flush ) nvcpu_flush(v); p2m->np2m_base = np2m_base; @@ -2090,10 +2079,9 @@ struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v) return p2m; } -struct p2m_domain * -p2m_get_p2m(struct vcpu *v) +struct p2m_domain *p2m_get_p2m(struct vcpu *v) { - if (!nestedhvm_is_n2(v)) + if ( !nestedhvm_is_n2(v) ) return p2m_get_hostp2m(v->domain); return p2m_get_nestedp2m(v); @@ -2144,8 +2132,7 @@ void np2m_schedule(int dir) } #endif -unsigned long paging_gva_to_gfn(struct vcpu *v, - unsigned long va, +unsigned long paging_gva_to_gfn(struct vcpu *v, unsigned long va, uint32_t *pfec) { struct p2m_domain *hostp2m = p2m_get_hostp2m(v->domain); @@ -2170,8 +2157,7 @@ unsigned long paging_gva_to_gfn(struct vcpu *v, /* translate l2 guest gfn into l1 guest gfn */ rv = nestedhap_walk_L1_p2m(v, l2_gfn, &l1_gfn, &l1_page_order, &l1_p2ma, - 1, - !!(*pfec & PFEC_write_access), + 1, !!(*pfec & PFEC_write_access), !!(*pfec & PFEC_insn_fetch)); if ( rv != NESTEDHVM_PAGEFAULT_DONE ) @@ -2237,8 +2223,8 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn, return map_domain_page(*mfn); } -static unsigned int mmio_order(const struct domain *d, - unsigned long start_fn, unsigned long nr) +static unsigned int mmio_order(const struct domain *d, unsigned long start_fn, + unsigned long nr) { /* * Note that the !hap_enabled() here has two effects: @@ -2246,17 +2232,17 @@ static unsigned int mmio_order(const struct domain *d, * - exclude PV guests, should execution reach this code for such. * So be careful when altering this. */ - if ( !hap_enabled(d) || - (start_fn & ((1UL << PAGE_ORDER_2M) - 1)) || !(nr >> PAGE_ORDER_2M) ) + if ( !hap_enabled(d) || (start_fn & ((1UL << PAGE_ORDER_2M) - 1)) || + !(nr >> PAGE_ORDER_2M) ) return PAGE_ORDER_4K; if ( 0 /* * Don't use 1Gb pages, to limit the iteration count in * set_typed_p2m_entry() when it needs to zap M2P entries * for a RAM range. - */ && - !(start_fn & ((1UL << PAGE_ORDER_1G) - 1)) && (nr >> PAGE_ORDER_1G) && - hap_has_1gb ) + */ + && !(start_fn & ((1UL << PAGE_ORDER_1G) - 1)) && + (nr >> PAGE_ORDER_1G) && hap_has_1gb ) return PAGE_ORDER_1G; if ( hap_has_2mb ) @@ -2267,9 +2253,7 @@ static unsigned int mmio_order(const struct domain *d, #define MAP_MMIO_MAX_ITER 64 /* pretty arbitrary */ -int map_mmio_regions(struct domain *d, - gfn_t start_gfn, - unsigned long nr, +int map_mmio_regions(struct domain *d, gfn_t start_gfn, unsigned long nr, mfn_t mfn) { int ret = 0; @@ -2283,12 +2267,12 @@ int map_mmio_regions(struct domain *d, i += 1UL << order, ++iter ) { /* OR'ing gfn and mfn values will return an order suitable to both. */ - for ( order = mmio_order(d, (gfn_x(start_gfn) + i) | (mfn_x(mfn) + i), nr - i); ; - order = ret - 1 ) + for ( order = mmio_order(d, (gfn_x(start_gfn) + i) | (mfn_x(mfn) + i), + nr - i); + ; order = ret - 1 ) { - ret = set_mmio_p2m_entry(d, gfn_x(start_gfn) + i, - mfn_add(mfn, i), order, - p2m_get_hostp2m(d)->default_access); + ret = set_mmio_p2m_entry(d, gfn_x(start_gfn) + i, mfn_add(mfn, i), + order, p2m_get_hostp2m(d)->default_access); if ( ret <= 0 ) break; ASSERT(ret <= order); @@ -2300,9 +2284,7 @@ int map_mmio_regions(struct domain *d, return i == nr ? 0 : i ?: ret; } -int unmap_mmio_regions(struct domain *d, - gfn_t start_gfn, - unsigned long nr, +int unmap_mmio_regions(struct domain *d, gfn_t start_gfn, unsigned long nr, mfn_t mfn) { int ret = 0; @@ -2316,11 +2298,12 @@ int unmap_mmio_regions(struct domain *d, i += 1UL << order, ++iter ) { /* OR'ing gfn and mfn values will return an order suitable to both. */ - for ( order = mmio_order(d, (gfn_x(start_gfn) + i) | (mfn_x(mfn) + i), nr - i); ; - order = ret - 1 ) + for ( order = mmio_order(d, (gfn_x(start_gfn) + i) | (mfn_x(mfn) + i), + nr - i); + ; order = ret - 1 ) { - ret = clear_mmio_p2m_entry(d, gfn_x(start_gfn) + i, - mfn_add(mfn, i), order); + ret = clear_mmio_p2m_entry(d, gfn_x(start_gfn) + i, mfn_add(mfn, i), + order); if ( ret <= 0 ) break; ASSERT(ret <= order); @@ -2375,9 +2358,8 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx) * indicate that outer handler should handle fault */ -bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa, - unsigned long gla, struct npfec npfec, - struct p2m_domain **ap2m) +bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa, unsigned long gla, + struct npfec npfec, struct p2m_domain **ap2m) { struct p2m_domain *hp2m = p2m_get_hostp2m(v->domain); p2m_type_t p2mt; @@ -2390,15 +2372,14 @@ bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa, *ap2m = p2m_get_altp2m(v); - mfn = get_gfn_type_access(*ap2m, gfn_x(gfn), &p2mt, &p2ma, - 0, &page_order); + mfn = get_gfn_type_access(*ap2m, gfn_x(gfn), &p2mt, &p2ma, 0, &page_order); __put_gfn(*ap2m, gfn_x(gfn)); if ( !mfn_eq(mfn, INVALID_MFN) ) return 0; - mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma, - P2M_ALLOC, &page_order); + mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma, P2M_ALLOC, + &page_order); __put_gfn(hp2m, gfn_x(gfn)); if ( mfn_eq(mfn, INVALID_MFN) ) @@ -2420,15 +2401,17 @@ bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa, if ( rv ) { gdprintk(XENLOG_ERR, - "failed to set entry for %#"PRIx64" -> %#"PRIx64" p2m %#"PRIx64"\n", - gfn_x(gfn), mfn_x(mfn), (unsigned long)*ap2m); + "failed to set entry for %#" PRIx64 " -> %#" PRIx64 + " p2m %#" PRIx64 "\n", + gfn_x(gfn), mfn_x(mfn), (unsigned long)*ap2m); domain_crash(hp2m->domain); } return 1; } -enum altp2m_reset_type { +enum altp2m_reset_type +{ ALTP2M_RESET, ALTP2M_DEACTIVATE }; @@ -2501,7 +2484,7 @@ static int p2m_activate_altp2m(struct domain *d, unsigned int idx) p2m_init_altp2m_ept(d, idx); - out: +out: p2m_unlock(p2m); return rc; @@ -2598,7 +2581,7 @@ int p2m_switch_domain_altp2m_by_id(struct domain *d, unsigned int idx) if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) ) { - for_each_vcpu( d, v ) + for_each_vcpu (d, v) if ( idx != vcpu_altp2m(v).p2midx ) { atomic_dec(&p2m_get_altp2m(v)->active_vcpus); @@ -2617,8 +2600,8 @@ int p2m_switch_domain_altp2m_by_id(struct domain *d, unsigned int idx) return rc; } -int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx, - gfn_t old_gfn, gfn_t new_gfn) +int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx, gfn_t old_gfn, + gfn_t new_gfn) { struct p2m_domain *hp2m, *ap2m; p2m_access_t a; @@ -2649,8 +2632,8 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx, /* Check host p2m if no valid entry in alternate */ if ( !mfn_valid(mfn) ) { - mfn = __get_gfn_type_access(hp2m, gfn_x(old_gfn), &t, &a, - P2M_ALLOC, &page_order, 0); + mfn = __get_gfn_type_access(hp2m, gfn_x(old_gfn), &t, &a, P2M_ALLOC, + &page_order, 0); if ( !mfn_valid(mfn) || t != p2m_ram_rw ) goto out; @@ -2690,15 +2673,15 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx, ap2m->max_remapped_gfn = gfn_x(new_gfn); } - out: +out: p2m_unlock(ap2m); p2m_unlock(hp2m); return rc; } -int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn, - mfn_t mfn, unsigned int page_order, - p2m_type_t p2mt, p2m_access_t p2ma) +int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn, mfn_t mfn, + unsigned int page_order, p2m_type_t p2mt, + p2m_access_t p2ma) { struct p2m_domain *p2m; p2m_access_t a; @@ -2723,8 +2706,7 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn, m = get_gfn_type_access(p2m, gfn_x(gfn), &t, &a, 0, NULL); /* Check for a dropped page that may impact this altp2m */ - if ( mfn_eq(mfn, INVALID_MFN) && - gfn_x(gfn) >= p2m->min_remapped_gfn && + if ( mfn_eq(mfn, INVALID_MFN) && gfn_x(gfn) >= p2m->min_remapped_gfn && gfn_x(gfn) <= p2m->max_remapped_gfn ) { if ( !reset_count++ ) @@ -2771,10 +2753,8 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn, /*** Audit ***/ #if P2M_AUDIT && defined(CONFIG_HVM) -void audit_p2m(struct domain *d, - uint64_t *orphans, - uint64_t *m2p_bad, - uint64_t *p2m_bad) +void audit_p2m(struct domain *d, uint64_t *orphans, uint64_t *m2p_bad, + uint64_t *p2m_bad) { struct page_info *page; struct domain *od; @@ -2793,13 +2773,13 @@ void audit_p2m(struct domain *d, p2m_lock(p2m); pod_lock(p2m); - if (p2m->audit_p2m) + if ( p2m->audit_p2m ) pmbad = p2m->audit_p2m(p2m); /* Audit part two: walk the domain's page allocation list, checking * the m2p entries. */ spin_lock(&d->page_alloc_lock); - page_list_for_each ( page, &d->page_list ) + page_list_for_each (page, &d->page_list) { mfn = mfn_x(page_to_mfn(page)); @@ -2809,8 +2789,8 @@ void audit_p2m(struct domain *d, if ( od != d ) { - P2M_PRINTK("wrong owner %#lx -> %p(%u) != %p(%u)\n", - mfn, od, (od?od->domain_id:-1), d, d->domain_id); + P2M_PRINTK("wrong owner %#lx -> %p(%u) != %p(%u)\n", mfn, od, + (od ? od->domain_id : -1), d, d->domain_id); continue; } @@ -2818,15 +2798,13 @@ void audit_p2m(struct domain *d, if ( gfn == INVALID_M2P_ENTRY ) { orphans_count++; - P2M_PRINTK("orphaned guest page: mfn=%#lx has invalid gfn\n", - mfn); + P2M_PRINTK("orphaned guest page: mfn=%#lx has invalid gfn\n", mfn); continue; } if ( SHARED_M2P(gfn) ) { - P2M_PRINTK("shared mfn (%lx) on domain page list!\n", - mfn); + P2M_PRINTK("shared mfn (%lx) on domain page list!\n", mfn); continue; } @@ -2834,12 +2812,11 @@ void audit_p2m(struct domain *d, if ( mfn_x(p2mfn) != mfn ) { mpbad++; - P2M_PRINTK("map mismatch mfn %#lx -> gfn %#lx -> mfn %#lx" - " (-> gfn %#lx)\n", - mfn, gfn, mfn_x(p2mfn), - (mfn_valid(p2mfn) - ? get_gpfn_from_mfn(mfn_x(p2mfn)) - : -1u)); + P2M_PRINTK( + "map mismatch mfn %#lx -> gfn %#lx -> mfn %#lx" + " (-> gfn %#lx)\n", + mfn, gfn, mfn_x(p2mfn), + (mfn_valid(p2mfn) ? get_gpfn_from_mfn(mfn_x(p2mfn)) : -1u)); /* This m2p entry is stale: the domain has another frame in * this physical slot. No great disaster, but for neatness, * blow away the m2p entry. */ @@ -2847,28 +2824,28 @@ void audit_p2m(struct domain *d, } __put_gfn(p2m, gfn); - P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx\n", - mfn, gfn, mfn_x(p2mfn)); + P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx\n", mfn, gfn, + mfn_x(p2mfn)); } spin_unlock(&d->page_alloc_lock); pod_unlock(p2m); p2m_unlock(p2m); - + P2M_PRINTK("p2m audit complete\n"); if ( orphans_count | mpbad | pmbad ) P2M_PRINTK("p2m audit found %lu orphans\n", orphans_count); if ( mpbad | pmbad ) { - P2M_PRINTK("p2m audit found %lu odd p2m, %lu bad m2p entries\n", - pmbad, mpbad); + P2M_PRINTK("p2m audit found %lu odd p2m, %lu bad m2p entries\n", pmbad, + mpbad); WARN(); } out_p2m_audit: - *orphans = (uint64_t) orphans_count; - *m2p_bad = (uint64_t) mpbad; - *p2m_bad = (uint64_t) pmbad; + *orphans = (uint64_t)orphans_count; + *m2p_bad = (uint64_t)mpbad; + *p2m_bad = (uint64_t)pmbad; } #endif /* P2M_AUDIT */ @@ -2888,8 +2865,8 @@ out_p2m_audit: * * Returns: 0 ==> success */ -int p2m_add_foreign(struct domain *tdom, unsigned long fgfn, - unsigned long gpfn, domid_t foreigndom) +int p2m_add_foreign(struct domain *tdom, unsigned long fgfn, unsigned long gpfn, + domid_t foreigndom) { p2m_type_t p2mt, p2mt_prev; mfn_t prev_mfn, mfn; @@ -2927,8 +2904,8 @@ int p2m_add_foreign(struct domain *tdom, unsigned long fgfn, * ram_rw | ram_logdirty | ram_ro | paging_out. */ page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC); - if ( !page || - !p2m_is_ram(p2mt) || p2m_is_shared(p2mt) || p2m_is_hole(p2mt) ) + if ( !page || !p2m_is_ram(p2mt) || p2m_is_shared(p2mt) || + p2m_is_hole(p2mt) ) { if ( page ) put_page(page); @@ -2957,11 +2934,12 @@ int p2m_add_foreign(struct domain *tdom, unsigned long fgfn, */ rc = set_foreign_p2m_entry(tdom, gpfn, mfn); if ( rc ) - gdprintk(XENLOG_WARNING, "set_foreign_p2m_entry failed. " + gdprintk(XENLOG_WARNING, + "set_foreign_p2m_entry failed. " "gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n", gpfn, mfn_x(mfn), fgfn, tdom->domain_id, fdom->domain_id); - put_both: +put_both: put_page(page); /* diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c index 21db3eceb6..08cb5d4939 100644 --- a/xen/arch/x86/mm/paging.c +++ b/xen/arch/x86/mm/paging.c @@ -34,15 +34,14 @@ #include "mm-locks.h" /* Printouts */ -#define PAGING_PRINTK(_f, _a...) \ +#define PAGING_PRINTK(_f, _a...) \ debugtrace_printk("pg: %s(): " _f, __func__, ##_a) -#define PAGING_ERROR(_f, _a...) \ - printk("pg error: %s(): " _f, __func__, ##_a) +#define PAGING_ERROR(_f, _a...) printk("pg error: %s(): " _f, __func__, ##_a) #define PAGING_DEBUG(flag, _f, _a...) \ do { \ - if (PAGING_DEBUG_ ## flag) \ + if ( PAGING_DEBUG_##flag ) \ debugtrace_printk("pgdebug: %s(): " _f, __func__, ##_a); \ - } while (0) + } while ( 0 ) /* Per-CPU variable for enforcing the lock ordering */ DEFINE_PER_CPU(int, mm_lock_level); @@ -286,11 +285,11 @@ void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn) /* Recursive: this is called from inside the shadow code */ paging_lock_recursive(d); - if ( unlikely(!mfn_valid(d->arch.paging.log_dirty.top)) ) + if ( unlikely(!mfn_valid(d->arch.paging.log_dirty.top)) ) { - d->arch.paging.log_dirty.top = paging_new_log_dirty_node(d); - if ( unlikely(!mfn_valid(d->arch.paging.log_dirty.top)) ) - goto out; + d->arch.paging.log_dirty.top = paging_new_log_dirty_node(d); + if ( unlikely(!mfn_valid(d->arch.paging.log_dirty.top)) ) + goto out; } l4 = paging_map_log_dirty_bitmap(d); @@ -349,7 +348,6 @@ void paging_mark_dirty(struct domain *d, mfn_t gmfn) paging_mark_pfn_dirty(d, pfn); } - /* Is this guest page dirty? */ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn) { @@ -395,12 +393,10 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn) return rv; } - /* Read a domain's log-dirty bitmap and stats. If the operation is a CLEAN, * clear the bitmap and stats as well. */ static int paging_log_dirty_op(struct domain *d, - struct xen_domctl_shadow_op *sc, - bool_t resuming) + struct xen_domctl_shadow_op *sc, bool_t resuming) { int rv = 0, clean = 0, peek = 1; unsigned long pages = 0; @@ -414,8 +410,7 @@ static int paging_log_dirty_op(struct domain *d, * Mark dirty all currently write-mapped pages on e.g. the * final iteration of a save operation. */ - if ( is_hvm_domain(d) && - (sc->mode & XEN_DOMCTL_SHADOW_LOGDIRTY_FINAL) ) + if ( is_hvm_domain(d) && (sc->mode & XEN_DOMCTL_SHADOW_LOGDIRTY_FINAL) ) hvm_mapped_guest_frames_mark_dirty(d); domain_pause(d); @@ -445,8 +440,7 @@ static int paging_log_dirty_op(struct domain *d, clean = (sc->op == XEN_DOMCTL_SHADOW_OP_CLEAN); PAGING_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n", - (clean) ? "clean" : "peek", - d->domain_id, + (clean) ? "clean" : "peek", d->domain_id, d->arch.paging.log_dirty.fault_count, d->arch.paging.log_dirty.dirty_count); @@ -457,7 +451,8 @@ static int paging_log_dirty_op(struct domain *d, /* caller may have wanted just to clean the state or access stats. */ peek = 0; - if ( unlikely(d->arch.paging.log_dirty.failed_allocs) ) { + if ( unlikely(d->arch.paging.log_dirty.failed_allocs) ) + { printk(XENLOG_WARNING "%u failed page allocs while logging dirty pages of d%d\n", d->arch.paging.log_dirty.failed_allocs, d->domain_id); @@ -475,15 +470,13 @@ static int paging_log_dirty_op(struct domain *d, l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(l4[i4]) : NULL; for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ ) { - l2 = ((l3 && mfn_valid(l3[i3])) ? - map_domain_page(l3[i3]) : NULL); - for ( i2 = 0; - (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES); + l2 = ((l3 && mfn_valid(l3[i3])) ? map_domain_page(l3[i3]) : NULL); + for ( i2 = 0; (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES); i2++ ) { unsigned int bytes = PAGE_SIZE; - l1 = ((l2 && mfn_valid(l2[i2])) ? - map_domain_page(l2[i2]) : NULL); + l1 = ((l2 && mfn_valid(l2[i2])) ? map_domain_page(l2[i2]) + : NULL); if ( unlikely(((sc->pages - pages + 7) >> 3) < bytes) ) bytes = (unsigned int)((sc->pages - pages + 7) >> 3); if ( likely(peek) ) @@ -491,8 +484,8 @@ static int paging_log_dirty_op(struct domain *d, if ( (l1 ? copy_to_guest_offset(sc->dirty_bitmap, pages >> 3, (uint8_t *)l1, bytes) - : clear_guest_offset(sc->dirty_bitmap, - pages >> 3, bytes)) != 0 ) + : clear_guest_offset(sc->dirty_bitmap, pages >> 3, + bytes)) != 0 ) { rv = -EFAULT; goto out; @@ -569,7 +562,7 @@ static int paging_log_dirty_op(struct domain *d, domain_unpause(d); return rv; - out: +out: d->arch.paging.preempt.dom = NULL; paging_unlock(d); domain_unpause(d); @@ -586,10 +579,8 @@ static int paging_log_dirty_op(struct domain *d, return rv; } -void paging_log_dirty_range(struct domain *d, - unsigned long begin_pfn, - unsigned long nr, - uint8_t *dirty_bitmap) +void paging_log_dirty_range(struct domain *d, unsigned long begin_pfn, + unsigned long nr, uint8_t *dirty_bitmap) { struct p2m_domain *p2m = p2m_get_hostp2m(d); int i; @@ -667,7 +658,6 @@ void paging_vcpu_init(struct vcpu *v) shadow_vcpu_init(v); } - int paging_domctl(struct domain *d, struct xen_domctl_shadow_op *sc, XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl, bool_t resuming) @@ -694,17 +684,17 @@ int paging_domctl(struct domain *d, struct xen_domctl_shadow_op *sc, return -EINVAL; } - if ( resuming - ? (d->arch.paging.preempt.dom != current->domain || - d->arch.paging.preempt.op != sc->op) - : (d->arch.paging.preempt.dom && - sc->op != XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION) ) + if ( resuming ? (d->arch.paging.preempt.dom != current->domain || + d->arch.paging.preempt.op != sc->op) + : (d->arch.paging.preempt.dom && + sc->op != XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION) ) { - printk(XENLOG_G_DEBUG - "%pv: Paging op %#x on Dom%u with unfinished prior op %#x by Dom%u\n", + printk(XENLOG_G_DEBUG "%pv: Paging op %#x on Dom%u with unfinished " + "prior op %#x by Dom%u\n", current, sc->op, d->domain_id, d->arch.paging.preempt.op, d->arch.paging.preempt.dom - ? d->arch.paging.preempt.dom->domain_id : DOMID_INVALID); + ? d->arch.paging.preempt.dom->domain_id + : DOMID_INVALID); return -EBUSY; } @@ -719,9 +709,8 @@ int paging_domctl(struct domain *d, struct xen_domctl_shadow_op *sc, * shadow code. For this reason, we need to further dispatch domctl * to next-level paging code (shadow or hap). */ - switch ( sc->op ) + switch (sc->op) { - case XEN_DOMCTL_SHADOW_OP_ENABLE: if ( !(sc->mode & XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY) ) break; @@ -781,8 +770,7 @@ long paging_domctl_continuation(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) rcu_unlock_domain(d); if ( ret == -ERESTART ) - ret = hypercall_create_continuation(__HYPERVISOR_arch_1, - "h", u_domctl); + ret = hypercall_create_continuation(__HYPERVISOR_arch_1, "h", u_domctl); else if ( __copy_field_to_guest(u_domctl, &op, u.shadow_op) ) ret = -EFAULT; @@ -834,7 +822,7 @@ int paging_enable(struct domain *d, u32 mode) return -EINVAL; /* All of external|translate|refcounts, or none. */ - switch ( mode & (PG_external | PG_translate | PG_refcounts) ) + switch (mode & (PG_external | PG_translate | PG_refcounts)) { case 0: case PG_external | PG_translate | PG_refcounts: @@ -903,8 +891,7 @@ void paging_dump_vcpu_info(struct vcpu *v) printk("not shadowed\n"); } else if ( paging_mode_hap(v->domain) && paging_get_hostmode(v) ) - printk("hap, %u levels\n", - paging_get_hostmode(v)->guest_levels); + printk("hap, %u levels\n", paging_get_hostmode(v)->guest_levels); else printk("none\n"); } @@ -912,7 +899,7 @@ void paging_dump_vcpu_info(struct vcpu *v) const struct paging_mode *paging_get_mode(struct vcpu *v) { - if (!nestedhvm_is_n2(v)) + if ( !nestedhvm_is_n2(v) ) return paging_get_hostmode(v); return paging_get_nestedmode(v); @@ -922,7 +909,7 @@ const struct paging_mode *paging_get_mode(struct vcpu *v) void paging_update_nestedmode(struct vcpu *v) { ASSERT(nestedhvm_enabled(v->domain)); - if (nestedhvm_paging_mode_hap(v)) + if ( nestedhvm_paging_mode_hap(v) ) /* nested-on-nested */ v->arch.paging.nestedmode = hap_paging_get_mode(v); else diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 025071a163..3525ec8946 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -38,7 +38,7 @@ #include #include "private.h" -DEFINE_PER_CPU(uint32_t,trace_shadow_path_flags); +DEFINE_PER_CPU(uint32_t, trace_shadow_path_flags); static int sh_enable_log_dirty(struct domain *, bool log_global); static int sh_disable_log_dirty(struct domain *); @@ -49,9 +49,9 @@ static void sh_clean_dirty_bitmap(struct domain *); int shadow_domain_init(struct domain *d, unsigned int domcr_flags) { static const struct log_dirty_ops sh_ops = { - .enable = sh_enable_log_dirty, + .enable = sh_enable_log_dirty, .disable = sh_disable_log_dirty, - .clean = sh_clean_dirty_bitmap, + .clean = sh_clean_dirty_bitmap, }; INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist); @@ -60,7 +60,7 @@ int shadow_domain_init(struct domain *d, unsigned int domcr_flags) /* Use shadow pagetables for log-dirty support */ paging_log_dirty_init(d, &sh_ops); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) d->arch.paging.shadow.oos_active = 0; d->arch.paging.shadow.oos_off = domcr_flags & XEN_DOMCTL_CDF_oos_off; #endif @@ -77,7 +77,7 @@ int shadow_domain_init(struct domain *d, unsigned int domcr_flags) */ void shadow_vcpu_init(struct vcpu *v) { -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) int i, j; for ( i = 0; i < SHADOW_OOS_PAGES; i++ ) @@ -89,9 +89,9 @@ void shadow_vcpu_init(struct vcpu *v) } #endif - v->arch.paging.mode = is_pv_vcpu(v) ? - &SHADOW_INTERNAL_NAME(sh_paging_mode, 4) : - &SHADOW_INTERNAL_NAME(sh_paging_mode, 3); + v->arch.paging.mode = is_pv_vcpu(v) + ? &SHADOW_INTERNAL_NAME(sh_paging_mode, 4) + : &SHADOW_INTERNAL_NAME(sh_paging_mode, 3); } #if SHADOW_AUDIT @@ -100,8 +100,7 @@ int shadow_audit_enable = 0; static void shadow_audit_key(unsigned char key) { shadow_audit_enable = !shadow_audit_enable; - printk("%s shadow_audit_enable=%d\n", - __func__, shadow_audit_enable); + printk("%s shadow_audit_enable=%d\n", __func__, shadow_audit_enable); } static int __init shadow_audit_key_init(void) @@ -112,23 +111,20 @@ static int __init shadow_audit_key_init(void) __initcall(shadow_audit_key_init); #endif /* SHADOW_AUDIT */ - #ifdef CONFIG_HVM extern const struct x86_emulate_ops hvm_shadow_emulator_ops; -extern struct segment_register *hvm_get_seg_reg( - enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt); -extern int hvm_translate_virtual_addr( - enum x86_segment seg, - unsigned long offset, - unsigned int bytes, - enum hvm_access_type access_type, - struct sh_emulate_ctxt *sh_ctxt, - unsigned long *linear); +extern struct segment_register * +hvm_get_seg_reg(enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt); +extern int hvm_translate_virtual_addr(enum x86_segment seg, + unsigned long offset, unsigned int bytes, + enum hvm_access_type access_type, + struct sh_emulate_ctxt *sh_ctxt, + unsigned long *linear); #endif -const struct x86_emulate_ops *shadow_init_emulation( - struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs, - unsigned int pte_size) +const struct x86_emulate_ops * +shadow_init_emulation(struct sh_emulate_ctxt *sh_ctxt, + struct cpu_user_regs *regs, unsigned int pte_size) { #ifdef CONFIG_HVM struct segment_register *creg, *sreg; @@ -153,7 +149,7 @@ const struct x86_emulate_ops *shadow_init_emulation( { sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt); sh_ctxt->ctxt.addr_size = creg->db ? 32 : 16; - sh_ctxt->ctxt.sp_size = sreg->db ? 32 : 16; + sh_ctxt->ctxt.sp_size = sreg->db ? 32 : 16; } sh_ctxt->pte_size = pte_size; @@ -161,13 +157,14 @@ const struct x86_emulate_ops *shadow_init_emulation( /* Attempt to prefetch whole instruction. */ sh_ctxt->insn_buf_eip = regs->rip; sh_ctxt->insn_buf_bytes = - (!hvm_translate_virtual_addr( - x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf), - hvm_access_insn_fetch, sh_ctxt, &addr) && - !hvm_copy_from_guest_linear( - sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), - PFEC_insn_fetch, NULL)) - ? sizeof(sh_ctxt->insn_buf) : 0; + (!hvm_translate_virtual_addr(x86_seg_cs, regs->rip, + sizeof(sh_ctxt->insn_buf), + hvm_access_insn_fetch, sh_ctxt, &addr) && + !hvm_copy_from_guest_linear(sh_ctxt->insn_buf, addr, + sizeof(sh_ctxt->insn_buf), PFEC_insn_fetch, + NULL)) + ? sizeof(sh_ctxt->insn_buf) + : 0; return &hvm_shadow_emulator_ops; #else @@ -197,12 +194,13 @@ void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt, /* Prefetch more bytes. */ sh_ctxt->insn_buf_bytes = (!hvm_translate_virtual_addr( - x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf), - hvm_access_insn_fetch, sh_ctxt, &addr) && - !hvm_copy_from_guest_linear( - sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), - PFEC_insn_fetch, NULL)) - ? sizeof(sh_ctxt->insn_buf) : 0; + x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf), + hvm_access_insn_fetch, sh_ctxt, &addr) && + !hvm_copy_from_guest_linear(sh_ctxt->insn_buf, addr, + sizeof(sh_ctxt->insn_buf), + PFEC_insn_fetch, NULL)) + ? sizeof(sh_ctxt->insn_buf) + : 0; sh_ctxt->insn_buf_eip = regs->rip; } #else @@ -210,8 +208,7 @@ void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt, #endif } - -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /**************************************************************************/ /* Out-of-sync shadows. */ @@ -264,7 +261,7 @@ static void sh_oos_audit(struct domain *d) struct page_info *pg; struct vcpu *v; - for_each_vcpu(d, v) + for_each_vcpu (d, v) { for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ ) { @@ -277,15 +274,15 @@ static void sh_oos_audit(struct domain *d) if ( idx != expected_idx && idx != expected_idx_alt ) { printk("%s: idx %x contains gmfn %lx, expected at %x or %x.\n", - __func__, idx, mfn_x(oos[idx]), - expected_idx, expected_idx_alt); + __func__, idx, mfn_x(oos[idx]), expected_idx, + expected_idx_alt); BUG(); } pg = mfn_to_page(oos[idx]); if ( !(pg->count_info & PGC_page_table) ) { - printk("%s: idx %x gmfn %lx not a pt (count %lx)\n", - __func__, idx, mfn_x(oos[idx]), pg->count_info); + printk("%s: idx %x gmfn %lx not a pt (count %lx)\n", __func__, + idx, mfn_x(oos[idx]), pg->count_info); BUG(); } if ( !(pg->shadow_flags & SHF_out_of_sync) ) @@ -313,7 +310,7 @@ void oos_audit_hash_is_present(struct domain *d, mfn_t gmfn) ASSERT(mfn_is_out_of_sync(gmfn)); - for_each_vcpu(d, v) + for_each_vcpu (d, v) { oos = v->arch.paging.shadow.oos; idx = mfn_x(gmfn) % SHADOW_OOS_PAGES; @@ -324,7 +321,7 @@ void oos_audit_hash_is_present(struct domain *d, mfn_t gmfn) return; } - printk(XENLOG_ERR "gmfn %"PRI_mfn" marked OOS but not in hash table\n", + printk(XENLOG_ERR "gmfn %" PRI_mfn " marked OOS but not in hash table\n", mfn_x(gmfn)); BUG(); } @@ -347,7 +344,6 @@ static inline void _sh_resync_l1(struct vcpu *v, mfn_t gmfn, mfn_t snpmfn) SHADOW_INTERNAL_NAME(sh_resync_l1, 4)(v, gmfn, snpmfn); } - /* * Fixup arrays: We limit the maximum number of writable mappings to * SHADOW_OOS_FIXUPS and store enough information to remove them @@ -363,8 +359,7 @@ static inline int oos_fixup_flush_gmfn(struct vcpu *v, mfn_t gmfn, { if ( !mfn_eq(fixup->smfn[i], INVALID_MFN) ) { - sh_remove_write_access_from_sl1p(d, gmfn, - fixup->smfn[i], + sh_remove_write_access_from_sl1p(d, gmfn, fixup->smfn[i], fixup->off[i]); fixup->smfn[i] = INVALID_MFN; } @@ -374,8 +369,7 @@ static inline int oos_fixup_flush_gmfn(struct vcpu *v, mfn_t gmfn, return 1; } -void oos_fixup_add(struct domain *d, mfn_t gmfn, - mfn_t smfn, unsigned long off) +void oos_fixup_add(struct domain *d, mfn_t gmfn, mfn_t smfn, unsigned long off) { int idx, next; mfn_t *oos; @@ -384,7 +378,7 @@ void oos_fixup_add(struct domain *d, mfn_t gmfn, perfc_incr(shadow_oos_fixup_add); - for_each_vcpu(d, v) + for_each_vcpu (d, v) { oos = v->arch.paging.shadow.oos; oos_fixup = v->arch.paging.shadow.oos_fixup; @@ -396,9 +390,9 @@ void oos_fixup_add(struct domain *d, mfn_t gmfn, int i; for ( i = 0; i < SHADOW_OOS_FIXUPS; i++ ) { - if ( mfn_valid(oos_fixup[idx].smfn[i]) - && mfn_eq(oos_fixup[idx].smfn[i], smfn) - && (oos_fixup[idx].off[i] == off) ) + if ( mfn_valid(oos_fixup[idx].smfn[i]) && + mfn_eq(oos_fixup[idx].smfn[i], smfn) && + (oos_fixup[idx].off[i] == off) ) return; } @@ -429,7 +423,7 @@ void oos_fixup_add(struct domain *d, mfn_t gmfn, } } - printk(XENLOG_ERR "gmfn %"PRI_mfn" was OOS but not in hash table\n", + printk(XENLOG_ERR "gmfn %" PRI_mfn " was OOS but not in hash table\n", mfn_x(gmfn)); BUG(); } @@ -442,7 +436,7 @@ static int oos_remove_write_access(struct vcpu *v, mfn_t gmfn, ftlb |= oos_fixup_flush_gmfn(v, gmfn, fixup); - switch ( sh_remove_write_access(d, gmfn, 0, 0) ) + switch (sh_remove_write_access(d, gmfn, 0, 0)) { default: case 0: @@ -468,31 +462,30 @@ static int oos_remove_write_access(struct vcpu *v, mfn_t gmfn, return 0; } - static inline void trace_resync(int event, mfn_t gmfn) { if ( tb_init_done ) { /* Convert gmfn to gfn */ unsigned long gfn = mfn_to_gfn(current->domain, gmfn); - __trace_var(event, 0/*!tsc*/, sizeof(gfn), &gfn); + __trace_var(event, 0 /*!tsc*/, sizeof(gfn), &gfn); } } /* Pull all the entries on an out-of-sync page back into sync. */ -static void _sh_resync(struct vcpu *v, mfn_t gmfn, - struct oos_fixup *fixup, mfn_t snp) +static void _sh_resync(struct vcpu *v, mfn_t gmfn, struct oos_fixup *fixup, + mfn_t snp) { struct page_info *pg = mfn_to_page(gmfn); ASSERT(paging_locked_by_me(v->domain)); ASSERT(mfn_is_out_of_sync(gmfn)); /* Guest page must be shadowed *only* as L1 when out of sync. */ - ASSERT(!(mfn_to_page(gmfn)->shadow_flags & SHF_page_type_mask - & ~SHF_L1_ANY)); + ASSERT( + !(mfn_to_page(gmfn)->shadow_flags & SHF_page_type_mask & ~SHF_L1_ANY)); ASSERT(!sh_page_has_multiple_shadows(mfn_to_page(gmfn))); - SHADOW_PRINTK("%pv gmfn=%"PRI_mfn"\n", v, mfn_x(gmfn)); + SHADOW_PRINTK("%pv gmfn=%" PRI_mfn "\n", v, mfn_x(gmfn)); /* Need to pull write access so the page *stays* in sync. */ if ( oos_remove_write_access(v, gmfn, fixup) ) @@ -513,7 +506,6 @@ static void _sh_resync(struct vcpu *v, mfn_t gmfn, trace_resync(TRC_SHADOW_RESYNC_FULL, gmfn); } - /* Add an MFN to the list of out-of-sync guest pagetables */ static void oos_hash_add(struct vcpu *v, mfn_t gmfn) { @@ -522,16 +514,15 @@ static void oos_hash_add(struct vcpu *v, mfn_t gmfn) mfn_t *oos = v->arch.paging.shadow.oos; mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot; struct oos_fixup *oos_fixup = v->arch.paging.shadow.oos_fixup; - struct oos_fixup fixup = { .next = 0 }; + struct oos_fixup fixup = {.next = 0}; - for (i = 0; i < SHADOW_OOS_FIXUPS; i++ ) + for ( i = 0; i < SHADOW_OOS_FIXUPS; i++ ) fixup.smfn[i] = INVALID_MFN; idx = mfn_x(gmfn) % SHADOW_OOS_PAGES; oidx = idx; - if ( mfn_valid(oos[idx]) - && (mfn_x(oos[idx]) % SHADOW_OOS_PAGES) == idx ) + if ( mfn_valid(oos[idx]) && (mfn_x(oos[idx]) % SHADOW_OOS_PAGES) == idx ) { /* Punt the current occupant into the next slot */ SWAP(oos[idx], gmfn); @@ -540,7 +531,7 @@ static void oos_hash_add(struct vcpu *v, mfn_t gmfn) idx = (idx + 1) % SHADOW_OOS_PAGES; } if ( mfn_valid(oos[idx]) ) - { + { /* Crush the current occupant. */ _sh_resync(v, oos[idx], &oos_fixup[idx], oos_snapshot[idx]); perfc_incr(shadow_unsync_evict); @@ -567,7 +558,7 @@ static void oos_hash_remove(struct domain *d, mfn_t gmfn) SHADOW_PRINTK("d%d gmfn %lx\n", d->domain_id, mfn_x(gmfn)); - for_each_vcpu(d, v) + for_each_vcpu (d, v) { oos = v->arch.paging.shadow.oos; idx = mfn_x(gmfn) % SHADOW_OOS_PAGES; @@ -580,7 +571,7 @@ static void oos_hash_remove(struct domain *d, mfn_t gmfn) } } - printk(XENLOG_ERR "gmfn %"PRI_mfn" was OOS but not in hash table\n", + printk(XENLOG_ERR "gmfn %" PRI_mfn " was OOS but not in hash table\n", mfn_x(gmfn)); BUG(); } @@ -592,7 +583,7 @@ mfn_t oos_snapshot_lookup(struct domain *d, mfn_t gmfn) mfn_t *oos_snapshot; struct vcpu *v; - for_each_vcpu(d, v) + for_each_vcpu (d, v) { oos = v->arch.paging.shadow.oos; oos_snapshot = v->arch.paging.shadow.oos_snapshot; @@ -605,7 +596,7 @@ mfn_t oos_snapshot_lookup(struct domain *d, mfn_t gmfn) } } - printk(XENLOG_ERR "gmfn %"PRI_mfn" was OOS but not in hash table\n", + printk(XENLOG_ERR "gmfn %" PRI_mfn " was OOS but not in hash table\n", mfn_x(gmfn)); BUG(); } @@ -619,7 +610,7 @@ void sh_resync(struct domain *d, mfn_t gmfn) struct oos_fixup *oos_fixup; struct vcpu *v; - for_each_vcpu(d, v) + for_each_vcpu (d, v) { oos = v->arch.paging.shadow.oos; oos_fixup = v->arch.paging.shadow.oos_fixup; @@ -636,7 +627,7 @@ void sh_resync(struct domain *d, mfn_t gmfn) } } - printk(XENLOG_ERR "gmfn %"PRI_mfn" was OOS but not in hash table\n", + printk(XENLOG_ERR "gmfn %" PRI_mfn " was OOS but not in hash table\n", mfn_x(gmfn)); BUG(); } @@ -652,12 +643,11 @@ static int sh_skip_sync(struct vcpu *v, mfn_t gl1mfn) return SHADOW_INTERNAL_NAME(sh_safe_not_to_sync, 3)(v, gl1mfn); else if ( pg->shadow_flags & SHF_L1_64 ) return SHADOW_INTERNAL_NAME(sh_safe_not_to_sync, 4)(v, gl1mfn); - printk(XENLOG_ERR "gmfn %"PRI_mfn" was OOS but not shadowed as an l1\n", + printk(XENLOG_ERR "gmfn %" PRI_mfn " was OOS but not shadowed as an l1\n", mfn_x(gl1mfn)); BUG(); } - /* Pull all out-of-sync pages back into sync. Pages brought out of sync * on other vcpus are allowed to remain out of sync, but their contents * will be made safe (TLB flush semantics); pages unsynced by this vcpu @@ -687,12 +677,12 @@ void sh_resync_all(struct vcpu *v, int skip, int this, int others) oos[idx] = INVALID_MFN; } - resync_others: +resync_others: if ( !others ) return; /* Second: make all *other* vcpus' oos pages safe. */ - for_each_vcpu(v->domain, other) + for_each_vcpu (v->domain, other) { if ( v == other ) continue; @@ -732,7 +722,7 @@ int sh_unsync(struct vcpu *v, mfn_t gmfn) ASSERT(paging_locked_by_me(v->domain)); - SHADOW_PRINTK("%pv gmfn=%"PRI_mfn"\n", v, mfn_x(gmfn)); + SHADOW_PRINTK("%pv gmfn=%" PRI_mfn "\n", v, mfn_x(gmfn)); pg = mfn_to_page(gmfn); @@ -740,16 +730,15 @@ int sh_unsync(struct vcpu *v, mfn_t gmfn) * of sync. Also, get out now if it's already out of sync. * Also, can't safely unsync if some vcpus have paging disabled.*/ if ( pg->shadow_flags & - ((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync) - || sh_page_has_multiple_shadows(pg) - || is_pv_vcpu(v) - || !v->domain->arch.paging.shadow.oos_active ) + ((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync) || + sh_page_has_multiple_shadows(pg) || is_pv_vcpu(v) || + !v->domain->arch.paging.shadow.oos_active ) return 0; BUILD_BUG_ON(!(typeof(pg->shadow_flags))SHF_out_of_sync); BUILD_BUG_ON(!(typeof(pg->shadow_flags))SHF_oos_may_write); - pg->shadow_flags |= SHF_out_of_sync|SHF_oos_may_write; + pg->shadow_flags |= SHF_out_of_sync | SHF_oos_may_write; oos_hash_add(v, gmfn); perfc_incr(shadow_unsync); TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_UNSYNC); @@ -758,7 +747,6 @@ int sh_unsync(struct vcpu *v, mfn_t gmfn) #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */ - /**************************************************************************/ /* Code for "promoting" a guest page to the point where the shadow code is * willing to let it be treated as a guest page table. This generally @@ -771,16 +759,16 @@ void shadow_promote(struct domain *d, mfn_t gmfn, unsigned int type) ASSERT(mfn_valid(gmfn)); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Is the page already shadowed and out of sync? */ if ( page_is_out_of_sync(page) ) sh_resync(d, gmfn); #endif /* We should never try to promote a gmfn that has writeable mappings */ - ASSERT((page->u.inuse.type_info & PGT_type_mask) != PGT_writable_page - || (page->u.inuse.type_info & PGT_count_mask) == 0 - || d->is_shutting_down); + ASSERT((page->u.inuse.type_info & PGT_type_mask) != PGT_writable_page || + (page->u.inuse.type_info & PGT_count_mask) == 0 || + d->is_shutting_down); /* Is the page already shadowed? */ if ( !test_and_set_bit(_PGC_page_table, &page->count_info) ) @@ -806,7 +794,7 @@ void shadow_demote(struct domain *d, mfn_t gmfn, u32 type) if ( (page->shadow_flags & SHF_page_type_mask) == 0 ) { -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Was the page out of sync? */ if ( page_is_out_of_sync(page) ) { @@ -823,8 +811,7 @@ void shadow_demote(struct domain *d, mfn_t gmfn, u32 type) /* Validate a pagetable change from the guest and update the shadows. * Returns a bitmask of SHADOW_SET_* flags. */ -int -sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, void *entry, u32 size) +int sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, void *entry, u32 size) { int result = 0; struct page_info *page = mfn_to_page(gmfn); @@ -845,50 +832,48 @@ sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, void *entry, u32 size) // if ( !(page->count_info & PGC_page_table) ) - return 0; /* Not shadowed at all */ + return 0; /* Not shadowed at all */ if ( page->shadow_flags & SHF_L1_32 ) - result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 2) - (v, gmfn, entry, size); + result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, + 2)(v, gmfn, entry, size); if ( page->shadow_flags & SHF_L2_32 ) - result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 2) - (v, gmfn, entry, size); + result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, + 2)(v, gmfn, entry, size); if ( page->shadow_flags & SHF_L1_PAE ) - result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 3) - (v, gmfn, entry, size); + result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, + 3)(v, gmfn, entry, size); if ( page->shadow_flags & SHF_L2_PAE ) - result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 3) - (v, gmfn, entry, size); + result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, + 3)(v, gmfn, entry, size); if ( page->shadow_flags & SHF_L2H_PAE ) - result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 3) - (v, gmfn, entry, size); + result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, + 3)(v, gmfn, entry, size); if ( page->shadow_flags & SHF_L1_64 ) - result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 4) - (v, gmfn, entry, size); + result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, + 4)(v, gmfn, entry, size); if ( page->shadow_flags & SHF_L2_64 ) - result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 4) - (v, gmfn, entry, size); + result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, + 4)(v, gmfn, entry, size); if ( page->shadow_flags & SHF_L2H_64 ) - result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 4) - (v, gmfn, entry, size); + result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, + 4)(v, gmfn, entry, size); if ( page->shadow_flags & SHF_L3_64 ) - result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, 4) - (v, gmfn, entry, size); + result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, + 4)(v, gmfn, entry, size); if ( page->shadow_flags & SHF_L4_64 ) - result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, 4) - (v, gmfn, entry, size); + result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, + 4)(v, gmfn, entry, size); - this_cpu(trace_shadow_path_flags) |= (result<<(TRCE_SFLAG_SET_CHANGED)); + this_cpu(trace_shadow_path_flags) |= (result << (TRCE_SFLAG_SET_CHANGED)); return result; } - -void -sh_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn, - void *entry, u32 size) +void sh_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn, void *entry, + u32 size) /* This is the entry point for emulated writes to pagetables in HVM guests and * PV translated guests. */ @@ -912,7 +897,6 @@ sh_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn, } } - /**************************************************************************/ /* Memory management for shadow pages. */ @@ -989,7 +973,7 @@ static unsigned int shadow_min_acceptable_pages(const struct domain *d) void shadow_unhook_mappings(struct domain *d, mfn_t smfn, int user_only) { struct page_info *sp = mfn_to_page(smfn); - switch ( sp->u.sh.type ) + switch (sp->u.sh.type) { case SH_type_l2_32_shadow: SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, 2)(d, smfn, user_only); @@ -1015,7 +999,7 @@ static inline void trace_shadow_prealloc_unpin(struct domain *d, mfn_t smfn) unsigned long gfn; ASSERT(mfn_valid(smfn)); gfn = mfn_to_gfn(d, backpointer(mfn_to_page(smfn))); - __trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/, sizeof(gfn), &gfn); + __trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0 /*!tsc*/, sizeof(gfn), &gfn); } } @@ -1028,14 +1012,15 @@ static void _shadow_prealloc(struct domain *d, unsigned int pages) mfn_t smfn; int i; - if ( d->arch.paging.shadow.free_pages >= pages ) return; + if ( d->arch.paging.shadow.free_pages >= pages ) + return; /* Shouldn't have enabled shadows if we've no vcpus. */ ASSERT(d->vcpu && d->vcpu[0]); /* Stage one: walk the list of pinned pages, unpinning them */ perfc_incr(shadow_prealloc_1); - foreach_pinned_shadow(d, sp, t) + foreach_pinned_shadow (d, sp, t) { smfn = page_to_mfn(sp); @@ -1044,7 +1029,8 @@ static void _shadow_prealloc(struct domain *d, unsigned int pages) sh_unpin(d, smfn); /* See if that freed up enough space */ - if ( d->arch.paging.shadow.free_pages >= pages ) return; + if ( d->arch.paging.shadow.free_pages >= pages ) + return; } /* Stage two: all shadow pages are in use in hierarchies that are @@ -1052,14 +1038,14 @@ static void _shadow_prealloc(struct domain *d, unsigned int pages) * mappings. */ perfc_incr(shadow_prealloc_2); - for_each_vcpu(d, v) - for ( i = 0 ; i < 4 ; i++ ) + for_each_vcpu (d, v) + for ( i = 0; i < 4; i++ ) { if ( !pagetable_is_null(v->arch.shadow_table[i]) ) { TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_PREALLOC_UNHOOK); - shadow_unhook_mappings(d, - pagetable_get_mfn(v->arch.shadow_table[i]), 0); + shadow_unhook_mappings( + d, pagetable_get_mfn(v->arch.shadow_table[i]), 0); /* See if that freed up enough space */ if ( d->arch.paging.shadow.free_pages >= pages ) @@ -1073,11 +1059,9 @@ static void _shadow_prealloc(struct domain *d, unsigned int pages) /* Nothing more we can do: all remaining shadows are of pages that * hold Xen mappings for some vcpu. This can never happen. */ printk(XENLOG_ERR "Can't pre-allocate %u shadow pages!\n" - " shadow pages total = %u, free = %u, p2m=%u\n", - pages, - d->arch.paging.shadow.total_pages, - d->arch.paging.shadow.free_pages, - d->arch.paging.shadow.p2m_pages); + " shadow pages total = %u, free = %u, p2m=%u\n", + pages, d->arch.paging.shadow.total_pages, + d->arch.paging.shadow.free_pages, d->arch.paging.shadow.p2m_pages); BUG(); } @@ -1104,18 +1088,18 @@ static void shadow_blow_tables(struct domain *d) ASSERT(d->vcpu && d->vcpu[0]); /* Pass one: unpin all pinned pages */ - foreach_pinned_shadow(d, sp, t) + foreach_pinned_shadow (d, sp, t) { smfn = page_to_mfn(sp); sh_unpin(d, smfn); } /* Second pass: unhook entries of in-use shadows */ - for_each_vcpu(d, v) - for ( i = 0 ; i < 4 ; i++ ) + for_each_vcpu (d, v) + for ( i = 0; i < 4; i++ ) if ( !pagetable_is_null(v->arch.shadow_table[i]) ) - shadow_unhook_mappings(d, - pagetable_get_mfn(v->arch.shadow_table[i]), 0); + shadow_unhook_mappings( + d, pagetable_get_mfn(v->arch.shadow_table[i]), 0); /* Make sure everyone sees the unshadowings */ flush_tlb_mask(d->dirty_cpumask); @@ -1123,7 +1107,8 @@ static void shadow_blow_tables(struct domain *d) void shadow_blow_tables_per_domain(struct domain *d) { - if ( shadow_mode_enabled(d) && d->vcpu != NULL && d->vcpu[0] != NULL ) { + if ( shadow_mode_enabled(d) && d->vcpu != NULL && d->vcpu[0] != NULL ) + { paging_lock(d); shadow_blow_tables(d); paging_unlock(d); @@ -1139,7 +1124,7 @@ static void shadow_blow_all_tables(unsigned char c) struct domain *d; printk("'%c' pressed -> blowing all shadow tables\n", c); rcu_read_lock(&domlist_read_lock); - for_each_domain(d) + for_each_domain (d) { if ( shadow_mode_enabled(d) && d->vcpu != NULL && d->vcpu[0] != NULL ) { @@ -1154,21 +1139,20 @@ static void shadow_blow_all_tables(unsigned char c) /* Register this function in the Xen console keypress table */ static __init int shadow_blow_tables_keyhandler_init(void) { - register_keyhandler('S', shadow_blow_all_tables, "reset shadow pagetables", 1); + register_keyhandler('S', shadow_blow_all_tables, "reset shadow pagetables", + 1); return 0; } __initcall(shadow_blow_tables_keyhandler_init); #endif /* !NDEBUG */ /* Accessors for the singly-linked list that's used for hash chains */ -static inline struct page_info * -next_shadow(const struct page_info *sp) +static inline struct page_info *next_shadow(const struct page_info *sp) { return sp->next_shadow ? pdx_to_page(sp->next_shadow) : NULL; } -static inline void -set_next_shadow(struct page_info *sp, struct page_info *next) +static inline void set_next_shadow(struct page_info *sp, struct page_info *next) { sp->next_shadow = next ? page_to_pdx(next) : 0; } @@ -1176,9 +1160,7 @@ set_next_shadow(struct page_info *sp, struct page_info *next) /* Allocate another shadow's worth of (contiguous, aligned) pages, * and fill in the type and backpointer fields of their page_infos. * Never fails to allocate. */ -mfn_t shadow_alloc(struct domain *d, - u32 shadow_type, - unsigned long backpointer) +mfn_t shadow_alloc(struct domain *d, u32 shadow_type, unsigned long backpointer) { struct page_info *sp = NULL; unsigned int pages = shadow_size(shadow_type); @@ -1217,7 +1199,7 @@ mfn_t shadow_alloc(struct domain *d, INIT_PAGE_LIST_HEAD(&tmp_list); /* Init page info fields and clear the pages */ - for ( i = 0; i < pages ; i++ ) + for ( i = 0; i < pages; i++ ) { sp = page_list_remove_head(&d->arch.paging.shadow.freelist); /* Before we overwrite the old contents of this page, @@ -1241,8 +1223,8 @@ mfn_t shadow_alloc(struct domain *d, set_next_shadow(sp, NULL); perfc_incr(shadow_alloc_count); } - if ( shadow_type >= SH_type_min_shadow - && shadow_type <= SH_type_max_shadow ) + if ( shadow_type >= SH_type_min_shadow && + shadow_type <= SH_type_max_shadow ) sp->u.sh.head = 1; sh_terminate_list(&tmp_list); @@ -1250,7 +1232,6 @@ mfn_t shadow_alloc(struct domain *d, return page_to_mfn(sp); } - /* Return some shadow pages to the pool. */ void shadow_free(struct domain *d, mfn_t smfn) { @@ -1273,12 +1254,12 @@ void shadow_free(struct domain *d, mfn_t smfn) { #if SHADOW_OPTIMIZATIONS & (SHOPT_WRITABLE_HEURISTIC | SHOPT_FAST_EMULATION) struct vcpu *v; - for_each_vcpu(d, v) + for_each_vcpu (d, v) { #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC /* No longer safe to look for a writeable mapping in this shadow */ - if ( v->arch.paging.shadow.last_writeable_pte_smfn - == mfn_x(page_to_mfn(sp)) ) + if ( v->arch.paging.shadow.last_writeable_pte_smfn == + mfn_x(page_to_mfn(sp)) ) v->arch.paging.shadow.last_writeable_pte_smfn = 0; #endif #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION @@ -1308,8 +1289,7 @@ void shadow_free(struct domain *d, mfn_t smfn) * This action is irreversible: the p2m mapping only ever grows. * That's OK because the p2m table only exists for translated domains, * and those domains can't ever turn off shadow mode. */ -static struct page_info * -shadow_alloc_p2m_page(struct domain *d) +static struct page_info *shadow_alloc_p2m_page(struct domain *d) { struct page_info *pg; @@ -1317,14 +1297,15 @@ shadow_alloc_p2m_page(struct domain *d) * paging lock) and the log-dirty code (which always does). */ paging_lock_recursive(d); - if ( d->arch.paging.shadow.total_pages - < shadow_min_acceptable_pages(d) + 1 ) + if ( d->arch.paging.shadow.total_pages < + shadow_min_acceptable_pages(d) + 1 ) { if ( !d->arch.paging.p2m_alloc_failed ) { d->arch.paging.p2m_alloc_failed = 1; dprintk(XENLOG_ERR, - "d%d failed to allocate from shadow pool (tot=%u p2m=%u min=%u)\n", + "d%d failed to allocate from shadow pool (tot=%u p2m=%u " + "min=%u)\n", d->domain_id, d->arch.paging.shadow.total_pages, d->arch.paging.shadow.p2m_pages, shadow_min_acceptable_pages(d)); @@ -1344,19 +1325,18 @@ shadow_alloc_p2m_page(struct domain *d) return pg; } -static void -shadow_free_p2m_page(struct domain *d, struct page_info *pg) +static void shadow_free_p2m_page(struct domain *d, struct page_info *pg) { struct domain *owner = page_get_owner(pg); /* Should still have no owner and count zero. */ if ( owner || (pg->count_info & PGC_count_mask) ) { - printk(XENLOG_ERR - "d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n", + printk(XENLOG_ERR "d%d: Odd p2m page %" PRI_mfn + " d=%d c=%lx t=%" PRtype_info "\n", d->domain_id, mfn_x(page_to_mfn(pg)), - owner ? owner->domain_id : DOMID_INVALID, - pg->count_info, pg->u.inuse.type_info); + owner ? owner->domain_id : DOMID_INVALID, pg->count_info, + pg->u.inuse.type_info); pg->count_info &= ~PGC_count_mask; page_set_owner(pg, NULL); } @@ -1406,16 +1386,15 @@ int shadow_set_allocation(struct domain *d, unsigned int pages, bool *preempted) pages = lower_bound; } - SHADOW_PRINTK("current %i target %i\n", - d->arch.paging.shadow.total_pages, pages); + SHADOW_PRINTK("current %i target %i\n", d->arch.paging.shadow.total_pages, + pages); - for ( ; ; ) + for ( ;; ) { if ( d->arch.paging.shadow.total_pages < pages ) { /* Need to allocate more memory from domheap */ - sp = (struct page_info *) - alloc_domheap_page(d, MEMF_no_owner); + sp = (struct page_info *)alloc_domheap_page(d, MEMF_no_owner); if ( sp == NULL ) { SHADOW_PRINTK("failed to allocate shadow pages.\n"); @@ -1461,10 +1440,10 @@ int shadow_set_allocation(struct domain *d, unsigned int pages, bool *preempted) /* Return the size of the shadow pool, rounded up to the nearest MB */ static unsigned int shadow_get_allocation(struct domain *d) { - unsigned int pg = d->arch.paging.shadow.total_pages - + d->arch.paging.shadow.p2m_pages; - return ((pg >> (20 - PAGE_SHIFT)) - + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0)); + unsigned int pg = + d->arch.paging.shadow.total_pages + d->arch.paging.shadow.p2m_pages; + return ((pg >> (20 - PAGE_SHIFT)) + + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0)); } /**************************************************************************/ @@ -1482,7 +1461,8 @@ static inline key_t sh_hash(unsigned long n, unsigned int t) unsigned char *p = (unsigned char *)&n; key_t k = t; int i; - for ( i = 0; i < sizeof(n) ; i++ ) k = (u32)p[i] + (k<<6) + (k<<16) - k; + for ( i = 0; i < sizeof(n); i++ ) + k = (u32)p[i] + (k << 6) + (k << 16) - k; return k % SHADOW_HASH_BUCKETS; } @@ -1493,7 +1473,7 @@ static void sh_hash_audit_bucket(struct domain *d, int bucket) { struct page_info *sp, *x; - if ( !(SHADOW_AUDIT & (SHADOW_AUDIT_HASH|SHADOW_AUDIT_HASH_FULL)) || + if ( !(SHADOW_AUDIT & (SHADOW_AUDIT_HASH | SHADOW_AUDIT_HASH_FULL)) || !SHADOW_AUDIT_ENABLE ) return; @@ -1501,39 +1481,40 @@ static void sh_hash_audit_bucket(struct domain *d, int bucket) while ( sp ) { /* Not a shadow? */ - BUG_ON( (sp->count_info & PGC_count_mask )!= 0 ) ; + BUG_ON((sp->count_info & PGC_count_mask) != 0); /* Bogus type? */ - BUG_ON( sp->u.sh.type == 0 ); - BUG_ON( sp->u.sh.type > SH_type_max_shadow ); + BUG_ON(sp->u.sh.type == 0); + BUG_ON(sp->u.sh.type > SH_type_max_shadow); /* Wrong page of a multi-page shadow? */ - BUG_ON( !sp->u.sh.head ); + BUG_ON(!sp->u.sh.head); /* Wrong bucket? */ - BUG_ON( sh_hash(__backpointer(sp), sp->u.sh.type) != bucket ); + BUG_ON(sh_hash(__backpointer(sp), sp->u.sh.type) != bucket); /* Duplicate entry? */ for ( x = next_shadow(sp); x; x = next_shadow(x) ) - BUG_ON( x->v.sh.back == sp->v.sh.back && - x->u.sh.type == sp->u.sh.type ); + BUG_ON(x->v.sh.back == sp->v.sh.back && + x->u.sh.type == sp->u.sh.type); /* Follow the backpointer to the guest pagetable */ - if ( sp->u.sh.type != SH_type_fl1_32_shadow - && sp->u.sh.type != SH_type_fl1_pae_shadow - && sp->u.sh.type != SH_type_fl1_64_shadow ) + if ( sp->u.sh.type != SH_type_fl1_32_shadow && + sp->u.sh.type != SH_type_fl1_pae_shadow && + sp->u.sh.type != SH_type_fl1_64_shadow ) { struct page_info *gpg = mfn_to_page(backpointer(sp)); /* Bad shadow flags on guest page? */ - BUG_ON( !(gpg->shadow_flags & (1<u.sh.type)) ); + BUG_ON(!(gpg->shadow_flags & (1 << sp->u.sh.type))); /* Bad type count on guest page? */ -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) - if ( sp->u.sh.type == SH_type_l1_32_shadow - || sp->u.sh.type == SH_type_l1_pae_shadow - || sp->u.sh.type == SH_type_l1_64_shadow ) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) + if ( sp->u.sh.type == SH_type_l1_32_shadow || + sp->u.sh.type == SH_type_l1_pae_shadow || + sp->u.sh.type == SH_type_l1_64_shadow ) { - if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page - && (gpg->u.inuse.type_info & PGT_count_mask) != 0 ) + if ( (gpg->u.inuse.type_info & PGT_type_mask) == + PGT_writable_page && + (gpg->u.inuse.type_info & PGT_count_mask) != 0 ) { if ( !page_is_out_of_sync(gpg) ) { printk(XENLOG_ERR - "MFN %"PRI_mfn" shadowed (by %"PRI_mfn")" + "MFN %" PRI_mfn " shadowed (by %" PRI_mfn ")" " and not OOS but has typecount %#lx\n", __backpointer(sp), mfn_x(page_to_mfn(sp)), gpg->u.inuse.type_info); @@ -1543,11 +1524,12 @@ static void sh_hash_audit_bucket(struct domain *d, int bucket) } else /* Not an l1 */ #endif - if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page - && (gpg->u.inuse.type_info & PGT_count_mask) != 0 ) + if ( (gpg->u.inuse.type_info & PGT_type_mask) == + PGT_writable_page && + (gpg->u.inuse.type_info & PGT_count_mask) != 0 ) { - printk(XENLOG_ERR "MFN %"PRI_mfn" shadowed (by %"PRI_mfn")" - " but has typecount %#lx\n", + printk(XENLOG_ERR "MFN %" PRI_mfn " shadowed (by %" PRI_mfn ")" + " but has typecount %#lx\n", __backpointer(sp), mfn_x(page_to_mfn(sp)), gpg->u.inuse.type_info); BUG(); @@ -1582,7 +1564,8 @@ static int shadow_hash_alloc(struct domain *d) ASSERT(!d->arch.paging.shadow.hash_table); table = xzalloc_array(struct page_info *, SHADOW_HASH_BUCKETS); - if ( !table ) return 1; + if ( !table ) + return 1; d->arch.paging.shadow.hash_table = table; return 0; } @@ -1598,7 +1581,6 @@ static void shadow_hash_teardown(struct domain *d) d->arch.paging.shadow.hash_table = NULL; } - mfn_t shadow_hash_lookup(struct domain *d, unsigned long n, unsigned int t) /* Find an entry in the hash table. Returns the MFN of the shadow, * or INVALID_MFN if it doesn't exist */ @@ -1618,7 +1600,7 @@ mfn_t shadow_hash_lookup(struct domain *d, unsigned long n, unsigned int t) sp = d->arch.paging.shadow.hash_table[key]; prev = NULL; - while(sp) + while ( sp ) { if ( __backpointer(sp) == n && sp->u.sh.type == t ) { @@ -1719,12 +1701,14 @@ void shadow_hash_delete(struct domain *d, unsigned long n, unsigned int t, sh_hash_audit_bucket(d, key); } -typedef int (*hash_vcpu_callback_t)(struct vcpu *v, mfn_t smfn, mfn_t other_mfn); -typedef int (*hash_domain_callback_t)(struct domain *d, mfn_t smfn, mfn_t other_mfn); +typedef int (*hash_vcpu_callback_t)(struct vcpu *v, mfn_t smfn, + mfn_t other_mfn); +typedef int (*hash_domain_callback_t)(struct domain *d, mfn_t smfn, + mfn_t other_mfn); -static void hash_vcpu_foreach(struct vcpu *v, unsigned int callback_mask, - const hash_vcpu_callback_t callbacks[], - mfn_t callback_mfn) +static void hash_vcpu_foreach (struct vcpu *v, unsigned int callback_mask, + const hash_vcpu_callback_t callbacks[], + mfn_t callback_mfn) /* Walk the hash table looking at the types of the entries and * calling the appropriate callback function for each entry. * The mask determines which shadow types we call back for, and the array @@ -1759,20 +1743,20 @@ static void hash_vcpu_foreach(struct vcpu *v, unsigned int callback_mask, { ASSERT(x->u.sh.type <= 15); ASSERT(callbacks[x->u.sh.type] != NULL); - done = callbacks[x->u.sh.type](v, page_to_mfn(x), - callback_mfn); - if ( done ) break; + done = callbacks[x->u.sh.type](v, page_to_mfn(x), callback_mfn); + if ( done ) + break; } } - if ( done ) break; + if ( done ) + break; } d->arch.paging.shadow.hash_walking = 0; } -static void hash_domain_foreach(struct domain *d, - unsigned int callback_mask, - const hash_domain_callback_t callbacks[], - mfn_t callback_mfn) +static void hash_domain_foreach (struct domain *d, unsigned int callback_mask, + const hash_domain_callback_t callbacks[], + mfn_t callback_mfn) /* Walk the hash table looking at the types of the entries and * calling the appropriate callback function for each entry. * The mask determines which shadow types we call back for, and the array @@ -1806,17 +1790,17 @@ static void hash_domain_foreach(struct domain *d, { ASSERT(x->u.sh.type <= 15); ASSERT(callbacks[x->u.sh.type] != NULL); - done = callbacks[x->u.sh.type](d, page_to_mfn(x), - callback_mfn); - if ( done ) break; + done = callbacks[x->u.sh.type](d, page_to_mfn(x), callback_mfn); + if ( done ) + break; } } - if ( done ) break; + if ( done ) + break; } d->arch.paging.shadow.hash_walking = 0; } - /**************************************************************************/ /* Destroy a shadow page: simple dispatcher to call the per-type destructor * which will decrement refcounts appropriately and return memory to the @@ -1827,21 +1811,18 @@ void sh_destroy_shadow(struct domain *d, mfn_t smfn) struct page_info *sp = mfn_to_page(smfn); unsigned int t = sp->u.sh.type; - SHADOW_PRINTK("smfn=%#lx\n", mfn_x(smfn)); /* Double-check, if we can, that the shadowed page belongs to this * domain, (by following the back-pointer). */ - ASSERT(t == SH_type_fl1_32_shadow || - t == SH_type_fl1_pae_shadow || - t == SH_type_fl1_64_shadow || - t == SH_type_monitor_table || + ASSERT(t == SH_type_fl1_32_shadow || t == SH_type_fl1_pae_shadow || + t == SH_type_fl1_64_shadow || t == SH_type_monitor_table || (is_pv_32bit_domain(d) && t == SH_type_l4_64_shadow) || (page_get_owner(mfn_to_page(backpointer(sp))) == d)); /* The down-shifts here are so that the switch statement is on nice * small numbers that the compiler will enjoy */ - switch ( t ) + switch (t) { case SH_type_l1_32_shadow: case SH_type_fl1_32_shadow: @@ -1889,7 +1870,7 @@ static inline void trace_shadow_wrmap_bf(mfn_t gmfn) { /* Convert gmfn to gfn */ unsigned long gfn = mfn_to_gfn(current->domain, gmfn); - __trace_var(TRC_SHADOW_WRMAP_BF, 0/*!tsc*/, sizeof(gfn), &gfn); + __trace_var(TRC_SHADOW_WRMAP_BF, 0 /*!tsc*/, sizeof(gfn), &gfn); } } @@ -1900,38 +1881,32 @@ static inline void trace_shadow_wrmap_bf(mfn_t gmfn) * level==0 means we have some other reason for revoking write access. * If level==0 we are allowed to fail, returning -1. */ -int sh_remove_write_access(struct domain *d, mfn_t gmfn, - unsigned int level, +int sh_remove_write_access(struct domain *d, mfn_t gmfn, unsigned int level, unsigned long fault_addr) { /* Dispatch table for getting per-type functions */ static const hash_domain_callback_t callbacks[SH_type_unused] = { - NULL, /* none */ + NULL, /* none */ SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* l1_32 */ SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* fl1_32 */ - NULL, /* l2_32 */ + NULL, /* l2_32 */ SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3), /* l1_pae */ SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3), /* fl1_pae */ - NULL, /* l2_pae */ - NULL, /* l2h_pae */ + NULL, /* l2_pae */ + NULL, /* l2h_pae */ SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* l1_64 */ SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* fl1_64 */ - NULL, /* l2_64 */ - NULL, /* l2h_64 */ - NULL, /* l3_64 */ - NULL, /* l4_64 */ - NULL, /* p2m */ - NULL /* unused */ + NULL, /* l2_64 */ + NULL, /* l2h_64 */ + NULL, /* l3_64 */ + NULL, /* l4_64 */ + NULL, /* p2m */ + NULL /* unused */ }; - static const unsigned int callback_mask = - SHF_L1_32 - | SHF_FL1_32 - | SHF_L1_PAE - | SHF_FL1_PAE - | SHF_L1_64 - | SHF_FL1_64 - ; + static const unsigned int callback_mask = SHF_L1_32 | SHF_FL1_32 | + SHF_L1_PAE | SHF_FL1_PAE | + SHF_L1_64 | SHF_FL1_64; struct page_info *pg = mfn_to_page(gmfn); #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC struct vcpu *curr = current; @@ -1948,12 +1923,12 @@ int sh_remove_write_access(struct domain *d, mfn_t gmfn, /* Early exit if it's already a pagetable, or otherwise not writeable */ if ( (sh_mfn_is_a_page_table(gmfn) -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) - /* Unless they've been allowed to go out of sync with their shadows */ - && !mfn_oos_may_write(gmfn) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) + /* Unless they've been allowed to go out of sync with their shadows */ + && !mfn_oos_may_write(gmfn) #endif - ) - || (pg->u.inuse.type_info & PGT_count_mask) == 0 ) + ) || + (pg->u.inuse.type_info & PGT_count_mask) == 0 ) return 0; TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_WRMAP); @@ -1964,8 +1939,8 @@ int sh_remove_write_access(struct domain *d, mfn_t gmfn, * put pagetables in special memory of some kind. We can't allow that. */ if ( (pg->u.inuse.type_info & PGT_type_mask) != PGT_writable_page ) { - printk(XENLOG_G_ERR "can't remove write access to mfn %"PRI_mfn - ", type_info is %"PRtype_info "\n", + printk(XENLOG_G_ERR "can't remove write access to mfn %" PRI_mfn + ", type_info is %" PRtype_info "\n", mfn_x(gmfn), mfn_to_page(gmfn)->u.inuse.type_info); domain_crash(d); } @@ -1978,16 +1953,16 @@ int sh_remove_write_access(struct domain *d, mfn_t gmfn, * and that mapping is likely to be in the current pagetable, * in the guest's linear map (on non-HIGHPTE linux and windows)*/ -#define GUESS(_a, _h) do { \ - if ( curr->arch.paging.mode->shadow.guess_wrmap( \ - curr, (_a), gmfn) ) \ - perfc_incr(shadow_writeable_h_ ## _h); \ - if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 ) \ - { \ - TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_WRMAP_GUESS_FOUND); \ - return 1; \ - } \ - } while (0) +#define GUESS(_a, _h) \ + do { \ + if ( curr->arch.paging.mode->shadow.guess_wrmap(curr, (_a), gmfn) ) \ + perfc_incr(shadow_writeable_h_##_h); \ + if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 ) \ + { \ + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_WRMAP_GUESS_FOUND); \ + return 1; \ + } \ + } while ( 0 ) if ( curr->arch.paging.mode->guest_levels == 2 ) { @@ -1996,47 +1971,58 @@ int sh_remove_write_access(struct domain *d, mfn_t gmfn, GUESS(0xC0000000UL + (fault_addr >> 10), 1); /* Linux lowmem: first 896MB is mapped 1-to-1 above 0xC0000000 */ - if ((gfn = mfn_to_gfn(d, gmfn)) < 0x38000 ) + if ( (gfn = mfn_to_gfn(d, gmfn)) < 0x38000 ) GUESS(0xC0000000UL + (gfn << PAGE_SHIFT), 4); /* FreeBSD: Linear map at 0xBFC00000 */ if ( level == 1 ) - GUESS(0xBFC00000UL - + ((fault_addr & VADDR_MASK) >> 10), 6); + GUESS(0xBFC00000UL + ((fault_addr & VADDR_MASK) >> 10), 6); } else if ( curr->arch.paging.mode->guest_levels == 3 ) { /* 32bit PAE w2k3: linear map at 0xC0000000 */ - switch ( level ) + switch (level) { - case 1: GUESS(0xC0000000UL + (fault_addr >> 9), 2); break; - case 2: GUESS(0xC0600000UL + (fault_addr >> 18), 2); break; + case 1: + GUESS(0xC0000000UL + (fault_addr >> 9), 2); + break; + case 2: + GUESS(0xC0600000UL + (fault_addr >> 18), 2); + break; } /* Linux lowmem: first 896MB is mapped 1-to-1 above 0xC0000000 */ - if ((gfn = mfn_to_gfn(d, gmfn)) < 0x38000 ) + if ( (gfn = mfn_to_gfn(d, gmfn)) < 0x38000 ) GUESS(0xC0000000UL + (gfn << PAGE_SHIFT), 4); /* FreeBSD PAE: Linear map at 0xBF800000 */ - switch ( level ) + switch (level) { - case 1: GUESS(0xBF800000UL - + ((fault_addr & VADDR_MASK) >> 9), 6); break; - case 2: GUESS(0xBFDFC000UL - + ((fault_addr & VADDR_MASK) >> 18), 6); break; + case 1: + GUESS(0xBF800000UL + ((fault_addr & VADDR_MASK) >> 9), 6); + break; + case 2: + GUESS(0xBFDFC000UL + ((fault_addr & VADDR_MASK) >> 18), 6); + break; } } else if ( curr->arch.paging.mode->guest_levels == 4 ) { /* 64bit w2k3: linear map at 0xfffff68000000000 */ - switch ( level ) + switch (level) { - case 1: GUESS(0xfffff68000000000UL - + ((fault_addr & VADDR_MASK) >> 9), 3); break; - case 2: GUESS(0xfffff6fb40000000UL - + ((fault_addr & VADDR_MASK) >> 18), 3); break; - case 3: GUESS(0xfffff6fb7da00000UL - + ((fault_addr & VADDR_MASK) >> 27), 3); break; + case 1: + GUESS(0xfffff68000000000UL + ((fault_addr & VADDR_MASK) >> 9), + 3); + break; + case 2: + GUESS(0xfffff6fb40000000UL + ((fault_addr & VADDR_MASK) >> 18), + 3); + break; + case 3: + GUESS(0xfffff6fb7da00000UL + ((fault_addr & VADDR_MASK) >> 27), + 3); + break; } /* 64bit Linux direct map at 0xffff880000000000; older kernels @@ -2053,18 +2039,23 @@ int sh_remove_write_access(struct domain *d, mfn_t gmfn, */ GUESS(0xfffffe0000000000UL + (gfn << PAGE_SHIFT), 4); - /* FreeBSD 64bit: linear map 0xffff800000000000 */ - switch ( level ) - { - case 1: GUESS(0xffff800000000000 - + ((fault_addr & VADDR_MASK) >> 9), 6); break; - case 2: GUESS(0xffff804000000000UL - + ((fault_addr & VADDR_MASK) >> 18), 6); break; - case 3: GUESS(0xffff804020000000UL - + ((fault_addr & VADDR_MASK) >> 27), 6); break; - } - /* FreeBSD 64bit: direct map at 0xffffff0000000000 */ - GUESS(0xffffff0000000000 + (gfn << PAGE_SHIFT), 6); + /* FreeBSD 64bit: linear map 0xffff800000000000 */ + switch (level) + { + case 1: + GUESS(0xffff800000000000 + ((fault_addr & VADDR_MASK) >> 9), 6); + break; + case 2: + GUESS(0xffff804000000000UL + ((fault_addr & VADDR_MASK) >> 18), + 6); + break; + case 3: + GUESS(0xffff804020000000UL + ((fault_addr & VADDR_MASK) >> 27), + 6); + break; + } + /* FreeBSD 64bit: direct map at 0xffffff0000000000 */ + GUESS(0xffffff0000000000 + (gfn << PAGE_SHIFT), 6); } #undef GUESS @@ -2083,7 +2074,8 @@ int sh_remove_write_access(struct domain *d, mfn_t gmfn, (curr->arch.paging.shadow.last_writeable_pte_smfn != 0) ) { unsigned long old_count = (pg->u.inuse.type_info & PGT_count_mask); - mfn_t last_smfn = _mfn(curr->arch.paging.shadow.last_writeable_pte_smfn); + mfn_t last_smfn = + _mfn(curr->arch.paging.shadow.last_writeable_pte_smfn); int shtype = mfn_to_page(last_smfn)->u.sh.type; if ( callbacks[shtype] ) @@ -2104,7 +2096,8 @@ int sh_remove_write_access(struct domain *d, mfn_t gmfn, perfc_incr(shadow_writeable_bf_1); else perfc_incr(shadow_writeable_bf); - hash_domain_foreach(d, callback_mask, callbacks, gmfn); + hash_domain_foreach (d, callback_mask, callbacks, gmfn) + ; /* If that didn't catch the mapping, then there's some non-pagetable * mapping -- ioreq page, grant mapping, &c. */ @@ -2113,8 +2106,9 @@ int sh_remove_write_access(struct domain *d, mfn_t gmfn, if ( level == 0 ) return -1; - printk(XENLOG_G_ERR "can't remove write access to mfn %"PRI_mfn - ": guest has %lu special-use mappings\n", mfn_x(gmfn), + printk(XENLOG_G_ERR "can't remove write access to mfn %" PRI_mfn + ": guest has %lu special-use mappings\n", + mfn_x(gmfn), mfn_to_page(gmfn)->u.inuse.type_info & PGT_count_mask); domain_crash(d); } @@ -2123,29 +2117,29 @@ int sh_remove_write_access(struct domain *d, mfn_t gmfn, return 1; } -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) -int sh_remove_write_access_from_sl1p(struct domain *d, mfn_t gmfn, - mfn_t smfn, unsigned long off) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) +int sh_remove_write_access_from_sl1p(struct domain *d, mfn_t gmfn, mfn_t smfn, + unsigned long off) { struct page_info *sp = mfn_to_page(smfn); ASSERT(mfn_valid(smfn)); ASSERT(mfn_valid(gmfn)); - if ( sp->u.sh.type == SH_type_l1_32_shadow - || sp->u.sh.type == SH_type_fl1_32_shadow ) + if ( sp->u.sh.type == SH_type_l1_32_shadow || + sp->u.sh.type == SH_type_fl1_32_shadow ) { - return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,2) - (d, gmfn, smfn, off); + return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p, 2)(d, gmfn, + smfn, off); } - else if ( sp->u.sh.type == SH_type_l1_pae_shadow - || sp->u.sh.type == SH_type_fl1_pae_shadow ) - return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,3) - (d, gmfn, smfn, off); - else if ( sp->u.sh.type == SH_type_l1_64_shadow - || sp->u.sh.type == SH_type_fl1_64_shadow ) - return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,4) - (d, gmfn, smfn, off); + else if ( sp->u.sh.type == SH_type_l1_pae_shadow || + sp->u.sh.type == SH_type_fl1_pae_shadow ) + return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p, 3)(d, gmfn, + smfn, off); + else if ( sp->u.sh.type == SH_type_l1_64_shadow || + sp->u.sh.type == SH_type_fl1_64_shadow ) + return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p, 4)(d, gmfn, + smfn, off); return 0; } @@ -2161,32 +2155,27 @@ static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn, gfn_t gfn) /* Dispatch table for getting per-type functions */ static const hash_domain_callback_t callbacks[SH_type_unused] = { - NULL, /* none */ + NULL, /* none */ SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* l1_32 */ SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* fl1_32 */ - NULL, /* l2_32 */ + NULL, /* l2_32 */ SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3), /* l1_pae */ SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3), /* fl1_pae */ - NULL, /* l2_pae */ - NULL, /* l2h_pae */ + NULL, /* l2_pae */ + NULL, /* l2h_pae */ SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* l1_64 */ SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* fl1_64 */ - NULL, /* l2_64 */ - NULL, /* l2h_64 */ - NULL, /* l3_64 */ - NULL, /* l4_64 */ - NULL, /* p2m */ - NULL /* unused */ + NULL, /* l2_64 */ + NULL, /* l2h_64 */ + NULL, /* l3_64 */ + NULL, /* l4_64 */ + NULL, /* p2m */ + NULL /* unused */ }; - static const unsigned int callback_mask = - SHF_L1_32 - | SHF_FL1_32 - | SHF_L1_PAE - | SHF_FL1_PAE - | SHF_L1_64 - | SHF_FL1_64 - ; + static const unsigned int callback_mask = SHF_L1_32 | SHF_FL1_32 | + SHF_L1_PAE | SHF_FL1_PAE | + SHF_L1_64 | SHF_FL1_64; perfc_incr(shadow_mappings); if ( sh_check_page_has_no_refs(page) ) @@ -2202,7 +2191,8 @@ static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn, gfn_t gfn) /* Brute-force search of all the shadows, by walking the hash */ perfc_incr(shadow_mappings_bf); - hash_domain_foreach(d, callback_mask, callbacks, gmfn); + hash_domain_foreach (d, callback_mask, callbacks, gmfn) + ; /* If that didn't catch the mapping, something is very wrong */ if ( !sh_check_page_has_no_refs(page) ) @@ -2215,16 +2205,15 @@ static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn, gfn_t gfn) * - Xen heap pages, to match share_xen_page_with_guest(), * - ioreq server pages, to match prepare_ring_for_helper(). */ - if ( !(shadow_mode_external(d) - && (page->count_info & PGC_count_mask) <= 3 - && ((page->u.inuse.type_info & PGT_count_mask) - == (is_xen_heap_page(page) || - (is_hvm_domain(d) && is_ioreq_server_page(d, page))))) ) - printk(XENLOG_G_ERR "can't find all mappings of mfn %"PRI_mfn - " (gfn %"PRI_gfn"): c=%lx t=%lx x=%d i=%d\n", - mfn_x(gmfn), gfn_x(gfn), - page->count_info, page->u.inuse.type_info, - !!is_xen_heap_page(page), + if ( !(shadow_mode_external(d) && + (page->count_info & PGC_count_mask) <= 3 && + ((page->u.inuse.type_info & PGT_count_mask) == + (is_xen_heap_page(page) || + (is_hvm_domain(d) && is_ioreq_server_page(d, page))))) ) + printk(XENLOG_G_ERR "can't find all mappings of mfn %" PRI_mfn + " (gfn %" PRI_gfn "): c=%lx t=%lx x=%d i=%d\n", + mfn_x(gmfn), gfn_x(gfn), page->count_info, + page->u.inuse.type_info, !!is_xen_heap_page(page), (is_hvm_domain(d) && is_ioreq_server_page(d, page))); } @@ -2234,7 +2223,6 @@ static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn, gfn_t gfn) return 1; } - /**************************************************************************/ /* Remove all shadows of a guest frame from the shadow tables */ @@ -2251,7 +2239,8 @@ static int sh_remove_shadow_via_pointer(struct domain *d, mfn_t smfn) ASSERT(sp->u.sh.type < SH_type_max_shadow); ASSERT(sh_type_has_up_pointer(d, sp->u.sh.type)); - if (sp->up == 0) return 0; + if ( sp->up == 0 ) + return 0; pmfn = maddr_to_mfn(sp->up); ASSERT(mfn_valid(pmfn)); vaddr = map_domain_page(pmfn) + (sp->up & (PAGE_SIZE - 1)); @@ -2279,7 +2268,8 @@ static int sh_remove_shadow_via_pointer(struct domain *d, mfn_t smfn) case SH_type_l4_64_shadow: SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, 4)(d, vaddr, pmfn); break; - default: BUG(); /* Some wierd unknown shadow type */ + default: + BUG(); /* Some wierd unknown shadow type */ } unmap_domain_page(vaddr); @@ -2307,42 +2297,42 @@ void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all) /* Dispatch table for getting per-type functions: each level must * be called with the function to remove a lower-level shadow. */ static const hash_domain_callback_t callbacks[SH_type_unused] = { - NULL, /* none */ - NULL, /* l1_32 */ - NULL, /* fl1_32 */ + NULL, /* none */ + NULL, /* l1_32 */ + NULL, /* fl1_32 */ SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 2), /* l2_32 */ - NULL, /* l1_pae */ - NULL, /* fl1_pae */ + NULL, /* l1_pae */ + NULL, /* fl1_pae */ SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 3), /* l2_pae */ SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 3), /* l2h_pae */ - NULL, /* l1_64 */ - NULL, /* fl1_64 */ + NULL, /* l1_64 */ + NULL, /* fl1_64 */ SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2_64 */ SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2h_64 */ SHADOW_INTERNAL_NAME(sh_remove_l2_shadow, 4), /* l3_64 */ SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, 4), /* l4_64 */ - NULL, /* p2m */ - NULL /* unused */ + NULL, /* p2m */ + NULL /* unused */ }; /* Another lookup table, for choosing which mask to use */ static const unsigned int masks[SH_type_unused] = { - 0, /* none */ - SHF_L2_32, /* l1_32 */ - 0, /* fl1_32 */ - 0, /* l2_32 */ + 0, /* none */ + SHF_L2_32, /* l1_32 */ + 0, /* fl1_32 */ + 0, /* l2_32 */ SHF_L2H_PAE | SHF_L2_PAE, /* l1_pae */ - 0, /* fl1_pae */ - 0, /* l2_pae */ - 0, /* l2h_pae */ - SHF_L2H_64 | SHF_L2_64, /* l1_64 */ - 0, /* fl1_64 */ - SHF_L3_64, /* l2_64 */ - SHF_L3_64, /* l2h_64 */ - SHF_L4_64, /* l3_64 */ - 0, /* l4_64 */ - 0, /* p2m */ - 0 /* unused */ + 0, /* fl1_pae */ + 0, /* l2_pae */ + 0, /* l2h_pae */ + SHF_L2H_64 | SHF_L2_64, /* l1_64 */ + 0, /* fl1_64 */ + SHF_L3_64, /* l2_64 */ + SHF_L3_64, /* l2h_64 */ + SHF_L4_64, /* l3_64 */ + 0, /* l4_64 */ + 0, /* p2m */ + 0 /* unused */ }; ASSERT(!(all && fast)); @@ -2353,7 +2343,7 @@ void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all) * can be called via put_page_type when we clear a shadow l1e).*/ paging_lock_recursive(d); - SHADOW_PRINTK("d%d gmfn=%"PRI_mfn"\n", d->domain_id, mfn_x(gmfn)); + SHADOW_PRINTK("d%d gmfn=%" PRI_mfn "\n", d->domain_id, mfn_x(gmfn)); /* Bail out now if the page is not shadowed */ if ( (pg->count_info & PGC_page_table) == 0 ) @@ -2369,28 +2359,29 @@ void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all) * This call to hash_vcpu_foreach() looks dangerous but is in fact OK: each * call will remove at most one shadow, and terminate immediately when * it does remove it, so we never walk the hash after doing a deletion. */ -#define DO_UNSHADOW(_type) do { \ - t = (_type); \ - if( !(pg->count_info & PGC_page_table) \ - || !(pg->shadow_flags & (1 << t)) ) \ - break; \ - smfn = shadow_hash_lookup(d, mfn_x(gmfn), t); \ - if ( unlikely(!mfn_valid(smfn)) ) \ - { \ - printk(XENLOG_G_ERR "gmfn %"PRI_mfn" has flags %#x" \ - " but no type-%#x shadow\n", \ - mfn_x(gmfn), pg->shadow_flags, t); \ - break; \ - } \ - if ( sh_type_is_pinnable(d, t) ) \ - sh_unpin(d, smfn); \ - else if ( sh_type_has_up_pointer(d, t) ) \ - sh_remove_shadow_via_pointer(d, smfn); \ - if( !fast \ - && (pg->count_info & PGC_page_table) \ - && (pg->shadow_flags & (1 << t)) ) \ - hash_domain_foreach(d, masks[t], callbacks, smfn); \ -} while (0) +#define DO_UNSHADOW(_type) \ + do { \ + t = (_type); \ + if ( !(pg->count_info & PGC_page_table) || \ + !(pg->shadow_flags & (1 << t)) ) \ + break; \ + smfn = shadow_hash_lookup(d, mfn_x(gmfn), t); \ + if ( unlikely(!mfn_valid(smfn)) ) \ + { \ + printk(XENLOG_G_ERR "gmfn %" PRI_mfn " has flags %#x" \ + " but no type-%#x shadow\n", \ + mfn_x(gmfn), pg->shadow_flags, t); \ + break; \ + } \ + if ( sh_type_is_pinnable(d, t) ) \ + sh_unpin(d, smfn); \ + else if ( sh_type_has_up_pointer(d, t) ) \ + sh_remove_shadow_via_pointer(d, smfn); \ + if ( !fast && (pg->count_info & PGC_page_table) && \ + (pg->shadow_flags & (1 << t)) ) \ + hash_domain_foreach (d, masks[t], callbacks, smfn) \ + ; \ + } while ( 0 ) DO_UNSHADOW(SH_type_l2_32_shadow); DO_UNSHADOW(SH_type_l1_32_shadow); @@ -2408,8 +2399,9 @@ void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all) /* If that didn't catch the shadows, something is wrong */ if ( !fast && all && (pg->count_info & PGC_page_table) ) { - printk(XENLOG_G_ERR "can't find all shadows of mfn %"PRI_mfn - " (shadow_flags=%04x)\n", mfn_x(gmfn), pg->shadow_flags); + printk(XENLOG_G_ERR "can't find all shadows of mfn %" PRI_mfn + " (shadow_flags=%04x)\n", + mfn_x(gmfn), pg->shadow_flags); domain_crash(d); } @@ -2426,7 +2418,7 @@ void shadow_prepare_page_type_change(struct domain *d, struct page_info *page, if ( !(page->count_info & PGC_page_table) ) return; -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* * Normally we should never let a page go from type count 0 to type * count 1 when it is shadowed. One exception: out-of-sync shadowed @@ -2440,8 +2432,7 @@ void shadow_prepare_page_type_change(struct domain *d, struct page_info *page, shadow_remove_all_shadows(d, page_to_mfn(page)); } -static void -sh_remove_all_shadows_and_parents(struct domain *d, mfn_t gmfn) +static void sh_remove_all_shadows_and_parents(struct domain *d, mfn_t gmfn) /* Even harsher: this is a HVM page that we thing is no longer a pagetable. * Unshadow it, and recursively unshadow pages that reference it. */ { @@ -2470,29 +2461,29 @@ static int sh_clear_up_pointer(struct vcpu *v, mfn_t smfn, mfn_t unused) void sh_reset_l3_up_pointers(struct vcpu *v) { static const hash_vcpu_callback_t callbacks[SH_type_unused] = { - NULL, /* none */ - NULL, /* l1_32 */ - NULL, /* fl1_32 */ - NULL, /* l2_32 */ - NULL, /* l1_pae */ - NULL, /* fl1_pae */ - NULL, /* l2_pae */ - NULL, /* l2h_pae */ - NULL, /* l1_64 */ - NULL, /* fl1_64 */ - NULL, /* l2_64 */ - NULL, /* l2h_64 */ + NULL, /* none */ + NULL, /* l1_32 */ + NULL, /* fl1_32 */ + NULL, /* l2_32 */ + NULL, /* l1_pae */ + NULL, /* fl1_pae */ + NULL, /* l2_pae */ + NULL, /* l2h_pae */ + NULL, /* l1_64 */ + NULL, /* fl1_64 */ + NULL, /* l2_64 */ + NULL, /* l2h_64 */ sh_clear_up_pointer, /* l3_64 */ - NULL, /* l4_64 */ - NULL, /* p2m */ - NULL /* unused */ + NULL, /* l4_64 */ + NULL, /* p2m */ + NULL /* unused */ }; static const unsigned int callback_mask = SHF_L3_64; - hash_vcpu_foreach(v, callback_mask, callbacks, INVALID_MFN); + hash_vcpu_foreach (v, callback_mask, callbacks, INVALID_MFN) + ; } - /**************************************************************************/ static void sh_update_paging_modes(struct vcpu *v) @@ -2502,7 +2493,7 @@ static void sh_update_paging_modes(struct vcpu *v) ASSERT(paging_locked_by_me(d)); -#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB ) /* Make sure this vcpu has a virtual TLB array allocated */ if ( unlikely(!v->arch.paging.vtlb) ) { @@ -2517,11 +2508,11 @@ static void sh_update_paging_modes(struct vcpu *v) } #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */ -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) if ( mfn_eq(v->arch.paging.shadow.oos_snapshot[0], INVALID_MFN) ) { int i; - for(i = 0; i < SHADOW_OOS_PAGES; i++) + for ( i = 0; i < SHADOW_OOS_PAGES; i++ ) { shadow_prealloc(d, SH_type_oos_snapshot, 1); v->arch.paging.shadow.oos_snapshot[i] = @@ -2551,7 +2542,7 @@ static void sh_update_paging_modes(struct vcpu *v) ASSERT(shadow_mode_translate(d)); ASSERT(shadow_mode_external(d)); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Need to resync all our pages now, because if a page goes out * of sync with paging enabled and is resynced with paging * disabled, the resync will go wrong. */ @@ -2569,20 +2560,17 @@ static void sh_update_paging_modes(struct vcpu *v) else if ( hvm_long_mode_active(v) ) { // long mode guest... - v->arch.paging.mode = - &SHADOW_INTERNAL_NAME(sh_paging_mode, 4); + v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 4); } else if ( hvm_pae_enabled(v) ) { // 32-bit PAE mode guest... - v->arch.paging.mode = - &SHADOW_INTERNAL_NAME(sh_paging_mode, 3); + v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3); } else { // 32-bit 2 level guest... - v->arch.paging.mode = - &SHADOW_INTERNAL_NAME(sh_paging_mode, 2); + v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 2); } if ( pagetable_is_null(v->arch.monitor_table) ) @@ -2597,15 +2585,13 @@ static void sh_update_paging_modes(struct vcpu *v) { SHADOW_PRINTK("new paging mode: %pv pe=%d gl=%u " "sl=%u (was g=%u s=%u)\n", - v, - is_hvm_domain(d) ? hvm_paging_enabled(v) : 1, + v, is_hvm_domain(d) ? hvm_paging_enabled(v) : 1, v->arch.paging.mode->guest_levels, v->arch.paging.mode->shadow.shadow_levels, old_mode ? old_mode->guest_levels : 0, old_mode ? old_mode->shadow.shadow_levels : 0); - if ( old_mode && - (v->arch.paging.mode->shadow.shadow_levels != - old_mode->shadow.shadow_levels) ) + if ( old_mode && (v->arch.paging.mode->shadow.shadow_levels != + old_mode->shadow.shadow_levels) ) { /* Need to make a new monitor table for the new mode */ mfn_t new_mfn, old_mfn; @@ -2626,8 +2612,8 @@ static void sh_update_paging_modes(struct vcpu *v) v->arch.monitor_table = pagetable_null(); new_mfn = v->arch.paging.mode->shadow.make_monitor_table(v); v->arch.monitor_table = pagetable_from_mfn(new_mfn); - SHADOW_PRINTK("new monitor table %"PRI_mfn "\n", - mfn_x(new_mfn)); + SHADOW_PRINTK("new monitor table %" PRI_mfn "\n", + mfn_x(new_mfn)); /* Don't be running on the old monitor table when we * pull it down! Switch CR3, and warn the HVM code that @@ -2646,7 +2632,7 @@ static void sh_update_paging_modes(struct vcpu *v) // This *does* happen, at least for CR4.PGE... } -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* We need to check that all the vcpus have paging enabled to * unsync PTs. */ if ( is_hvm_domain(d) && !d->arch.paging.shadow.oos_off ) @@ -2654,7 +2640,7 @@ static void sh_update_paging_modes(struct vcpu *v) int pe = 1; struct vcpu *vptr; - for_each_vcpu(d, vptr) + for_each_vcpu (d, vptr) { if ( !hvm_paging_enabled(vptr) ) { @@ -2698,7 +2684,7 @@ static void sh_new_mode(struct domain *d, u32 new_mode) new_mode |= PG_SH_forced | PG_SH_enable; d->arch.paging.mode = new_mode; - for_each_vcpu(d, v) + for_each_vcpu (d, v) sh_update_paging_modes(v); } @@ -2748,7 +2734,7 @@ int shadow_enable(struct domain *d, u32 mode) if ( mode & PG_translate ) { rv = p2m_alloc_table(p2m); - if (rv != 0) + if ( rv != 0 ) goto out_unlocked; } @@ -2790,7 +2776,7 @@ int shadow_enable(struct domain *d, u32 mode) goto out_locked; } -#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL ) /* We assume we're dealing with an older 64bit linux guest until we * see the guest use more than one l4 per vcpu. */ d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL; @@ -2803,9 +2789,9 @@ int shadow_enable(struct domain *d, u32 mode) /* Update the bits */ sh_new_mode(d, mode); - out_locked: +out_locked: paging_unlock(d); - out_unlocked: +out_unlocked: if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) ) p2m_teardown(p2m); if ( rv != 0 && pg != NULL ) @@ -2834,7 +2820,7 @@ void shadow_teardown(struct domain *d, bool *preempted) if ( shadow_mode_enabled(d) ) { /* Release the shadow and monitor tables held by each vcpu */ - for_each_vcpu(d, v) + for_each_vcpu (d, v) { if ( v->arch.paging.mode ) { @@ -2843,18 +2829,19 @@ void shadow_teardown(struct domain *d, bool *preempted) { mfn = pagetable_get_mfn(v->arch.monitor_table); if ( mfn_valid(mfn) && (mfn_x(mfn) != 0) ) - v->arch.paging.mode->shadow.destroy_monitor_table(v, mfn); + v->arch.paging.mode->shadow.destroy_monitor_table(v, + mfn); v->arch.monitor_table = pagetable_null(); } } } } -#if (SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB|SHOPT_OUT_OF_SYNC)) +#if ( SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB | SHOPT_OUT_OF_SYNC) ) /* Free the virtual-TLB array attached to each vcpu */ - for_each_vcpu(d, v) + for_each_vcpu (d, v) { -#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB ) if ( v->arch.paging.vtlb ) { xfree(v->arch.paging.vtlb); @@ -2862,7 +2849,7 @@ void shadow_teardown(struct domain *d, bool *preempted) } #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */ -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) { int i; mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot; @@ -2886,7 +2873,7 @@ void shadow_teardown(struct domain *d, bool *preempted) goto out; /* Release the hash table back to xenheap */ - if (d->arch.paging.shadow.hash_table) + if ( d->arch.paging.shadow.hash_table ) shadow_hash_teardown(d); ASSERT(d->arch.paging.shadow.total_pages == 0); @@ -2897,7 +2884,7 @@ void shadow_teardown(struct domain *d, bool *preempted) if ( !pagetable_is_null(d->arch.paging.shadow.unpaged_pagetable) ) { ASSERT(is_hvm_domain(d)); - for_each_vcpu(d, v) + for_each_vcpu (d, v) if ( !hvm_paging_enabled(v) ) v->arch.guest_table = pagetable_null(); unpaged_pagetable = @@ -2932,8 +2919,8 @@ out: /* Complain here in cases where shadow_free_p2m_page() won't. */ else if ( !page_get_owner(unpaged_pagetable) && !(unpaged_pagetable->count_info & PGC_count_mask) ) - printk(XENLOG_ERR - "d%d: Odd unpaged pt %"PRI_mfn" c=%lx t=%"PRtype_info"\n", + printk(XENLOG_ERR "d%d: Odd unpaged pt %" PRI_mfn + " c=%lx t=%" PRtype_info "\n", d->domain_id, mfn_x(page_to_mfn(unpaged_pagetable)), unpaged_pagetable->count_info, unpaged_pagetable->u.inuse.type_info); @@ -2945,11 +2932,10 @@ void shadow_final_teardown(struct domain *d) /* Called by arch_domain_destroy(), when it's safe to pull down the p2m map. */ { SHADOW_PRINTK("dom %u final teardown starts." - " Shadow pages total = %u, free = %u, p2m=%u\n", - d->domain_id, - d->arch.paging.shadow.total_pages, - d->arch.paging.shadow.free_pages, - d->arch.paging.shadow.p2m_pages); + " Shadow pages total = %u, free = %u, p2m=%u\n", + d->domain_id, d->arch.paging.shadow.total_pages, + d->arch.paging.shadow.free_pages, + d->arch.paging.shadow.p2m_pages); /* Double-check that the domain didn't have any shadow memory. * It is possible for a domain that never got domain_kill()ed @@ -2963,11 +2949,10 @@ void shadow_final_teardown(struct domain *d) paging_lock(d); shadow_set_allocation(d, 0, NULL); SHADOW_PRINTK("dom %u final teardown done." - " Shadow pages total = %u, free = %u, p2m=%u\n", - d->domain_id, - d->arch.paging.shadow.total_pages, - d->arch.paging.shadow.free_pages, - d->arch.paging.shadow.p2m_pages); + " Shadow pages total = %u, free = %u, p2m=%u\n", + d->domain_id, d->arch.paging.shadow.total_pages, + d->arch.paging.shadow.free_pages, + d->arch.paging.shadow.p2m_pages); paging_unlock(d); } @@ -3032,12 +3017,11 @@ static int shadow_one_bit_disable(struct domain *d, u32 mode) { /* Get this domain off shadows */ SHADOW_PRINTK("un-shadowing of domain %u starts." - " Shadow pages total = %u, free = %u, p2m=%u\n", - d->domain_id, - d->arch.paging.shadow.total_pages, - d->arch.paging.shadow.free_pages, - d->arch.paging.shadow.p2m_pages); - for_each_vcpu(d, v) + " Shadow pages total = %u, free = %u, p2m=%u\n", + d->domain_id, d->arch.paging.shadow.total_pages, + d->arch.paging.shadow.free_pages, + d->arch.paging.shadow.p2m_pages); + for_each_vcpu (d, v) { if ( v->arch.paging.mode ) v->arch.paging.mode->shadow.detach_old_tables(v); @@ -3046,7 +3030,7 @@ static int shadow_one_bit_disable(struct domain *d, u32 mode) else make_cr3(v, pagetable_get_mfn(v->arch.guest_table)); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) { int i; mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot; @@ -3065,11 +3049,10 @@ static int shadow_one_bit_disable(struct domain *d, u32 mode) BUG(); /* In fact, we will have BUG()ed already */ shadow_hash_teardown(d); SHADOW_PRINTK("un-shadowing of domain %u done." - " Shadow pages total = %u, free = %u, p2m=%u\n", - d->domain_id, - d->arch.paging.shadow.total_pages, - d->arch.paging.shadow.free_pages, - d->arch.paging.shadow.p2m_pages); + " Shadow pages total = %u, free = %u, p2m=%u\n", + d->domain_id, d->arch.paging.shadow.total_pages, + d->arch.paging.shadow.free_pages, + d->arch.paging.shadow.p2m_pages); } return 0; @@ -3149,16 +3132,15 @@ static void sh_unshadow_for_p2m_change(struct domain *d, unsigned long gfn, cpumask_clear(&flushmask); /* If we're replacing a superpage with a normal L1 page, map it */ - if ( (l1e_get_flags(new) & _PAGE_PRESENT) - && !(l1e_get_flags(new) & _PAGE_PSE) - && mfn_valid(nmfn) ) + if ( (l1e_get_flags(new) & _PAGE_PRESENT) && + !(l1e_get_flags(new) & _PAGE_PSE) && mfn_valid(nmfn) ) npte = map_domain_page(nmfn); for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) { - if ( !npte - || !p2m_is_ram(p2m_flags_to_type(l1e_get_flags(npte[i]))) - || !mfn_eq(l1e_get_mfn(npte[i]), omfn) ) + if ( !npte || + !p2m_is_ram(p2m_flags_to_type(l1e_get_flags(npte[i]))) || + !mfn_eq(l1e_get_mfn(npte[i]), omfn) ) { /* This GFN->MFN mapping has gone away */ sh_remove_all_shadows_and_parents(d, omfn); @@ -3176,10 +3158,9 @@ static void sh_unshadow_for_p2m_change(struct domain *d, unsigned long gfn, } } -int -shadow_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, - l1_pgentry_t *p, l1_pgentry_t new, - unsigned int level) +int shadow_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, + l1_pgentry_t *p, l1_pgentry_t new, + unsigned int level) { struct domain *d = p2m->domain; int rc; @@ -3189,7 +3170,7 @@ shadow_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, /* If there are any shadows, update them. But if shadow_teardown() * has already been called then it's not safe to try. */ if ( likely(d->arch.paging.shadow.total_pages != 0) ) - sh_unshadow_for_p2m_change(d, gfn, p, new, level); + sh_unshadow_for_p2m_change(d, gfn, p, new, level); rc = p2m_entry_modify(p2m, p2m_flags_to_type(l1e_get_flags(new)), p2m_flags_to_type(l1e_get_flags(*p)), @@ -3203,7 +3184,7 @@ shadow_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, /* Update the entry with new content */ safe_write_pte(p, new); -#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH ) /* If we're doing FAST_FAULT_PATH, then shadow mode may have cached the fact that this is an mmio region in the shadow page tables. Blow the tables away to remove the cache. @@ -3241,7 +3222,7 @@ static int sh_enable_log_dirty(struct domain *d, bool log_global) shadow_blow_tables(d); } -#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL ) /* 32bit PV guests on 64bit xen behave like older 64bit linux: they * change an l4e instead of cr3 to switch tables. Give them the * same optimization */ @@ -3280,11 +3261,9 @@ static void sh_clean_dirty_bitmap(struct domain *d) paging_unlock(d); } - /**************************************************************************/ /* VRAM dirty tracking support */ -int shadow_track_dirty_vram(struct domain *d, - unsigned long begin_pfn, +int shadow_track_dirty_vram(struct domain *d, unsigned long begin_pfn, unsigned long nr, XEN_GUEST_HANDLE_PARAM(void) guest_dirty_bitmap) { @@ -3307,12 +3286,12 @@ int shadow_track_dirty_vram(struct domain *d, dirty_vram = d->arch.hvm.dirty_vram; - if ( dirty_vram && (!nr || - ( begin_pfn != dirty_vram->begin_pfn - || end_pfn != dirty_vram->end_pfn )) ) + if ( dirty_vram && (!nr || (begin_pfn != dirty_vram->begin_pfn || + end_pfn != dirty_vram->end_pfn)) ) { /* Different tracking, tear the previous down. */ - gdprintk(XENLOG_INFO, "stopping tracking VRAM %lx - %lx\n", dirty_vram->begin_pfn, dirty_vram->end_pfn); + gdprintk(XENLOG_INFO, "stopping tracking VRAM %lx - %lx\n", + dirty_vram->begin_pfn, dirty_vram->end_pfn); xfree(dirty_vram->sl1ma); xfree(dirty_vram->dirty_bitmap); xfree(dirty_vram); @@ -3349,7 +3328,8 @@ int shadow_track_dirty_vram(struct domain *d, goto out_dirty_vram; memset(dirty_vram->sl1ma, ~0, sizeof(paddr_t) * nr); - if ( (dirty_vram->dirty_bitmap = xzalloc_array(uint8_t, dirty_size)) == NULL ) + if ( (dirty_vram->dirty_bitmap = xzalloc_array(uint8_t, dirty_size)) == + NULL ) goto out_sl1ma; dirty_vram->last_dirty = NOW(); @@ -3357,7 +3337,7 @@ int shadow_track_dirty_vram(struct domain *d, /* Tell the caller that this time we could not track dirty bits. */ rc = -ENODATA; } - else if (dirty_vram->last_dirty == -1) + else if ( dirty_vram->last_dirty == -1 ) /* still completely clean, just copy our empty bitmap */ memcpy(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size); else @@ -3366,7 +3346,8 @@ int shadow_track_dirty_vram(struct domain *d, void *map_sl1p = NULL; /* Iterate over VRAM to track dirty bits. */ - for ( i = 0; i < nr; i++ ) { + for ( i = 0; i < nr; i++ ) + { mfn_t mfn = get_gfn_query_unlocked(d, begin_pfn + i, &t); struct page_info *page; int dirty = 0; @@ -3392,8 +3373,8 @@ int shadow_track_dirty_vram(struct domain *d, dirty = 1; /* TODO: Heuristics for finding the single mapping of * this gmfn */ - flush_tlb |= sh_remove_all_mappings(d, mfn, - _gfn(begin_pfn + i)); + flush_tlb |= + sh_remove_all_mappings(d, mfn, _gfn(begin_pfn + i)); } else { @@ -3483,14 +3464,13 @@ out: /**************************************************************************/ /* Shadow-control XEN_DOMCTL dispatcher */ -int shadow_domctl(struct domain *d, - struct xen_domctl_shadow_op *sc, +int shadow_domctl(struct domain *d, struct xen_domctl_shadow_op *sc, XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) { int rc; bool preempted = false; - switch ( sc->op ) + switch (sc->op) { case XEN_DOMCTL_SHADOW_OP_OFF: if ( d->arch.paging.mode == PG_SH_enable ) @@ -3514,8 +3494,10 @@ int shadow_domctl(struct domain *d, { /* Can't set the allocation to zero unless the domain stops using * shadow pagetables first */ - dprintk(XENLOG_G_ERR, "Can't set shadow allocation to zero, " - "d%d is still using shadows\n", d->domain_id); + dprintk(XENLOG_G_ERR, + "Can't set shadow allocation to zero, " + "d%d is still using shadows\n", + d->domain_id); paging_unlock(d); return -EINVAL; } @@ -3523,8 +3505,8 @@ int shadow_domctl(struct domain *d, paging_unlock(d); if ( preempted ) /* Not finished. Set up to re-run the call. */ - rc = hypercall_create_continuation( - __HYPERVISOR_domctl, "h", u_domctl); + rc = hypercall_create_continuation(__HYPERVISOR_domctl, "h", + u_domctl); else /* Finished. Return the new allocation */ sc->mb = shadow_get_allocation(d); @@ -3535,7 +3517,6 @@ int shadow_domctl(struct domain *d, } } - /**************************************************************************/ /* Auditing shadow tables */ @@ -3559,7 +3540,7 @@ void shadow_audit_tables(struct vcpu *v) SHADOW_INTERNAL_NAME(sh_audit_l3_table, 4), /* l3_64 */ SHADOW_INTERNAL_NAME(sh_audit_l4_table, 4), /* l4_64 */ #endif - NULL /* All the rest */ + NULL /* All the rest */ }; unsigned int mask; @@ -3569,27 +3550,33 @@ void shadow_audit_tables(struct vcpu *v) if ( SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_FULL ) { -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) sh_oos_audit(v->domain); #endif mask = SHF_page_type_mask; /* Audit every table in the system */ } - else + else { /* Audit only the current mode's tables */ - switch ( v->arch.paging.mode->guest_levels ) + switch (v->arch.paging.mode->guest_levels) { - case 2: mask = (SHF_L1_32|SHF_FL1_32|SHF_L2_32); break; - case 3: mask = (SHF_L1_PAE|SHF_FL1_PAE|SHF_L2_PAE - |SHF_L2H_PAE); break; - case 4: mask = (SHF_L1_64|SHF_FL1_64|SHF_L2_64 - |SHF_L3_64|SHF_L4_64); break; - default: BUG(); + case 2: + mask = (SHF_L1_32 | SHF_FL1_32 | SHF_L2_32); + break; + case 3: + mask = (SHF_L1_PAE | SHF_FL1_PAE | SHF_L2_PAE | SHF_L2H_PAE); + break; + case 4: + mask = (SHF_L1_64 | SHF_FL1_64 | SHF_L2_64 | SHF_L3_64 | SHF_L4_64); + break; + default: + BUG(); } } - hash_vcpu_foreach(v, mask, callbacks, INVALID_MFN); + hash_vcpu_foreach (v, mask, callbacks, INVALID_MFN) + ; } #ifdef CONFIG_PV diff --git a/xen/arch/x86/mm/shadow/hvm.c b/xen/arch/x86/mm/shadow/hvm.c index 8994cb9f87..32d746ccaf 100644 --- a/xen/arch/x86/mm/shadow/hvm.c +++ b/xen/arch/x86/mm/shadow/hvm.c @@ -41,8 +41,8 @@ * indicators. */ #define MAPPING_UNHANDLEABLE ERR_PTR(~(long)X86EMUL_UNHANDLEABLE) -#define MAPPING_EXCEPTION ERR_PTR(~(long)X86EMUL_EXCEPTION) -#define MAPPING_SILENT_FAIL ERR_PTR(~(long)X86EMUL_OKAY) +#define MAPPING_EXCEPTION ERR_PTR(~(long)X86EMUL_EXCEPTION) +#define MAPPING_SILENT_FAIL ERR_PTR(~(long)X86EMUL_OKAY) static void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr, unsigned int bytes, struct sh_emulate_ctxt *sh_ctxt); @@ -54,8 +54,8 @@ static void sh_emulate_unmap_dest(struct vcpu *v, void *addr, * Callers which pass a known in-range x86_segment can rely on the return * pointer being valid. Other callers must explicitly check for errors. */ -struct segment_register *hvm_get_seg_reg( - enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt) +struct segment_register *hvm_get_seg_reg(enum x86_segment seg, + struct sh_emulate_ctxt *sh_ctxt) { unsigned int idx = seg; struct segment_register *seg_reg; @@ -69,13 +69,11 @@ struct segment_register *hvm_get_seg_reg( return seg_reg; } -int hvm_translate_virtual_addr( - enum x86_segment seg, - unsigned long offset, - unsigned int bytes, - enum hvm_access_type access_type, - struct sh_emulate_ctxt *sh_ctxt, - unsigned long *linear) +int hvm_translate_virtual_addr(enum x86_segment seg, unsigned long offset, + unsigned int bytes, + enum hvm_access_type access_type, + struct sh_emulate_ctxt *sh_ctxt, + unsigned long *linear) { const struct segment_register *reg; int okay; @@ -84,9 +82,9 @@ int hvm_translate_virtual_addr( if ( IS_ERR(reg) ) return -PTR_ERR(reg); - okay = hvm_virtual_to_linear_addr( - seg, reg, offset, bytes, access_type, - hvm_get_seg_reg(x86_seg_cs, sh_ctxt), linear); + okay = hvm_virtual_to_linear_addr(seg, reg, offset, bytes, access_type, + hvm_get_seg_reg(x86_seg_cs, sh_ctxt), + linear); if ( !okay ) { @@ -96,38 +94,33 @@ int hvm_translate_virtual_addr( * determine the kind of exception (#GP or #TS) in that case. */ if ( is_x86_user_segment(seg) ) - x86_emul_hw_exception( - (seg == x86_seg_ss) ? TRAP_stack_error : TRAP_gp_fault, - 0, &sh_ctxt->ctxt); + x86_emul_hw_exception((seg == x86_seg_ss) ? TRAP_stack_error + : TRAP_gp_fault, + 0, &sh_ctxt->ctxt); return X86EMUL_EXCEPTION; } return 0; } -static int -hvm_read(enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - enum hvm_access_type access_type, - struct sh_emulate_ctxt *sh_ctxt) +static int hvm_read(enum x86_segment seg, unsigned long offset, void *p_data, + unsigned int bytes, enum hvm_access_type access_type, + struct sh_emulate_ctxt *sh_ctxt) { pagefault_info_t pfinfo; unsigned long addr; int rc; - rc = hvm_translate_virtual_addr( - seg, offset, bytes, access_type, sh_ctxt, &addr); + rc = hvm_translate_virtual_addr(seg, offset, bytes, access_type, sh_ctxt, + &addr); if ( rc || !bytes ) return rc; - rc = hvm_copy_from_guest_linear(p_data, addr, bytes, - (access_type == hvm_access_insn_fetch - ? PFEC_insn_fetch : 0), - &pfinfo); + rc = hvm_copy_from_guest_linear( + p_data, addr, bytes, + (access_type == hvm_access_insn_fetch ? PFEC_insn_fetch : 0), &pfinfo); - switch ( rc ) + switch (rc) { case HVMTRANS_okay: return X86EMUL_OKAY; @@ -146,12 +139,9 @@ hvm_read(enum x86_segment seg, return X86EMUL_UNHANDLEABLE; } -static int -hvm_emulate_read(enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt) +static int hvm_emulate_read(enum x86_segment seg, unsigned long offset, + void *p_data, unsigned int bytes, + struct x86_emulate_ctxt *ctxt) { if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; @@ -159,12 +149,9 @@ hvm_emulate_read(enum x86_segment seg, container_of(ctxt, struct sh_emulate_ctxt, ctxt)); } -static int -hvm_emulate_insn_fetch(enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt) +static int hvm_emulate_insn_fetch(enum x86_segment seg, unsigned long offset, + void *p_data, unsigned int bytes, + struct x86_emulate_ctxt *ctxt) { struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); @@ -174,20 +161,17 @@ hvm_emulate_insn_fetch(enum x86_segment seg, /* Fall back if requested bytes are not in the prefetch cache. */ if ( unlikely((insn_off + bytes) > sh_ctxt->insn_buf_bytes) ) - return hvm_read(seg, offset, p_data, bytes, - hvm_access_insn_fetch, sh_ctxt); + return hvm_read(seg, offset, p_data, bytes, hvm_access_insn_fetch, + sh_ctxt); /* Hit the cache. Simple memcpy. */ memcpy(p_data, &sh_ctxt->insn_buf[insn_off], bytes); return X86EMUL_OKAY; } -static int -hvm_emulate_write(enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt) +static int hvm_emulate_write(enum x86_segment seg, unsigned long offset, + void *p_data, unsigned int bytes, + struct x86_emulate_ctxt *ctxt) { struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); @@ -200,13 +184,13 @@ hvm_emulate_write(enum x86_segment seg, if ( seg == x86_seg_ss ) perfc_incr(shadow_fault_emulate_stack); - rc = hvm_translate_virtual_addr( - seg, offset, bytes, hvm_access_write, sh_ctxt, &addr); + rc = hvm_translate_virtual_addr(seg, offset, bytes, hvm_access_write, + sh_ctxt, &addr); if ( rc || !bytes ) return rc; /* Unaligned writes are only acceptable on HVM */ - if ( (addr & (bytes - 1)) && !is_hvm_vcpu(v) ) + if ( (addr & (bytes - 1)) && !is_hvm_vcpu(v) ) return X86EMUL_UNHANDLEABLE; ptr = sh_emulate_map_dest(v, addr, bytes, sh_ctxt); @@ -217,8 +201,8 @@ hvm_emulate_write(enum x86_segment seg, memcpy(ptr, p_data, bytes); if ( tb_init_done ) - v->arch.paging.mode->shadow.trace_emul_write_val(ptr, addr, - p_data, bytes); + v->arch.paging.mode->shadow.trace_emul_write_val(ptr, addr, p_data, + bytes); sh_emulate_unmap_dest(v, ptr, bytes, sh_ctxt); shadow_audit_tables(v); @@ -227,14 +211,9 @@ hvm_emulate_write(enum x86_segment seg, return X86EMUL_OKAY; } -static int -hvm_emulate_cmpxchg(enum x86_segment seg, - unsigned long offset, - void *p_old, - void *p_new, - unsigned int bytes, - bool lock, - struct x86_emulate_ctxt *ctxt) +static int hvm_emulate_cmpxchg(enum x86_segment seg, unsigned long offset, + void *p_old, void *p_new, unsigned int bytes, + bool lock, struct x86_emulate_ctxt *ctxt) { struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); @@ -246,13 +225,13 @@ hvm_emulate_cmpxchg(enum x86_segment seg, if ( bytes > sizeof(long) ) return X86EMUL_UNHANDLEABLE; - rc = hvm_translate_virtual_addr( - seg, offset, bytes, hvm_access_write, sh_ctxt, &addr); + rc = hvm_translate_virtual_addr(seg, offset, bytes, hvm_access_write, + sh_ctxt, &addr); if ( rc ) return rc; /* Unaligned writes are only acceptable on HVM */ - if ( (addr & (bytes - 1)) && !is_hvm_vcpu(v) ) + if ( (addr & (bytes - 1)) && !is_hvm_vcpu(v) ) return X86EMUL_UNHANDLEABLE; ptr = sh_emulate_map_dest(v, addr, bytes, sh_ctxt); @@ -264,12 +243,20 @@ hvm_emulate_cmpxchg(enum x86_segment seg, memcpy(&new, p_new, bytes); paging_lock(v->domain); - switch ( bytes ) + switch (bytes) { - case 1: prev = cmpxchg((uint8_t *)ptr, old, new); break; - case 2: prev = cmpxchg((uint16_t *)ptr, old, new); break; - case 4: prev = cmpxchg((uint32_t *)ptr, old, new); break; - case 8: prev = cmpxchg((uint64_t *)ptr, old, new); break; + case 1: + prev = cmpxchg((uint8_t *)ptr, old, new); + break; + case 2: + prev = cmpxchg((uint16_t *)ptr, old, new); + break; + case 4: + prev = cmpxchg((uint32_t *)ptr, old, new); + break; + case 8: + prev = cmpxchg((uint64_t *)ptr, old, new); + break; default: SHADOW_PRINTK("cmpxchg size %u is not supported\n", bytes); prev = ~old; @@ -281,9 +268,10 @@ hvm_emulate_cmpxchg(enum x86_segment seg, rc = X86EMUL_CMPXCHG_FAILED; } - SHADOW_DEBUG(EMULATE, - "va %#lx was %#lx expected %#lx wanted %#lx now %#lx bytes %u\n", - addr, prev, old, new, *(unsigned long *)ptr, bytes); + SHADOW_DEBUG( + EMULATE, + "va %#lx was %#lx expected %#lx wanted %#lx now %#lx bytes %u\n", addr, + prev, old, new, *(unsigned long *)ptr, bytes); sh_emulate_unmap_dest(v, ptr, bytes, sh_ctxt); shadow_audit_tables(v); @@ -293,11 +281,11 @@ hvm_emulate_cmpxchg(enum x86_segment seg, } const struct x86_emulate_ops hvm_shadow_emulator_ops = { - .read = hvm_emulate_read, + .read = hvm_emulate_read, .insn_fetch = hvm_emulate_insn_fetch, - .write = hvm_emulate_write, - .cmpxchg = hvm_emulate_cmpxchg, - .cpuid = hvmemul_cpuid, + .write = hvm_emulate_write, + .cmpxchg = hvm_emulate_cmpxchg, + .cpuid = hvmemul_cpuid, }; /**************************************************************************/ @@ -309,7 +297,7 @@ const struct x86_emulate_ops hvm_shadow_emulator_ops = { */ #define BAD_GVA_TO_GFN (~0UL) #define BAD_GFN_TO_MFN (~1UL) -#define READONLY_GFN (~2UL) +#define READONLY_GFN (~2UL) static mfn_t emulate_gva_to_mfn(struct vcpu *v, unsigned long vaddr, struct sh_emulate_ctxt *sh_ctxt) { @@ -378,7 +366,8 @@ static void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr, if ( is_hvm_domain(d) ? hvm_get_cpl(v) == 3 : !guest_kernel_mode(v, guest_cpu_user_regs()) ) { - gdprintk(XENLOG_DEBUG, "User-mode write to pagetable reached " + gdprintk(XENLOG_DEBUG, + "User-mode write to pagetable reached " "emulate_map_dest(). This should never happen!\n"); return MAPPING_UNHANDLEABLE; } @@ -387,17 +376,20 @@ static void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr, sh_ctxt->mfn[0] = emulate_gva_to_mfn(v, vaddr, sh_ctxt); if ( !mfn_valid(sh_ctxt->mfn[0]) ) { - switch ( mfn_x(sh_ctxt->mfn[0]) ) + switch (mfn_x(sh_ctxt->mfn[0])) { - case BAD_GVA_TO_GFN: return MAPPING_EXCEPTION; - case READONLY_GFN: return MAPPING_SILENT_FAIL; - default: return MAPPING_UNHANDLEABLE; + case BAD_GVA_TO_GFN: + return MAPPING_EXCEPTION; + case READONLY_GFN: + return MAPPING_SILENT_FAIL; + default: + return MAPPING_UNHANDLEABLE; } } /* Unaligned writes mean probably this isn't a pagetable. */ if ( vaddr & (bytes - 1) ) - sh_remove_shadows(d, sh_ctxt->mfn[0], 0, 0 /* Slow, can fail. */ ); + sh_remove_shadows(d, sh_ctxt->mfn[0], 0, 0 /* Slow, can fail. */); if ( likely(((vaddr + bytes - 1) & PAGE_MASK) == (vaddr & PAGE_MASK)) ) { @@ -417,21 +409,24 @@ static void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr, else { /* This write crosses a page boundary. Translate the second page. */ - sh_ctxt->mfn[1] = emulate_gva_to_mfn( - v, (vaddr + bytes - 1) & PAGE_MASK, sh_ctxt); + sh_ctxt->mfn[1] = + emulate_gva_to_mfn(v, (vaddr + bytes - 1) & PAGE_MASK, sh_ctxt); if ( !mfn_valid(sh_ctxt->mfn[1]) ) { put_page(mfn_to_page(sh_ctxt->mfn[0])); - switch ( mfn_x(sh_ctxt->mfn[1]) ) + switch (mfn_x(sh_ctxt->mfn[1])) { - case BAD_GVA_TO_GFN: return MAPPING_EXCEPTION; - case READONLY_GFN: return MAPPING_SILENT_FAIL; - default: return MAPPING_UNHANDLEABLE; + case BAD_GVA_TO_GFN: + return MAPPING_EXCEPTION; + case READONLY_GFN: + return MAPPING_SILENT_FAIL; + default: + return MAPPING_UNHANDLEABLE; } } /* Cross-page writes mean probably not a pagetable. */ - sh_remove_shadows(d, sh_ctxt->mfn[1], 0, 0 /* Slow, can fail. */ ); + sh_remove_shadows(d, sh_ctxt->mfn[1], 0, 0 /* Slow, can fail. */); map = vmap(sh_ctxt->mfn, 2); if ( !map ) @@ -443,7 +438,7 @@ static void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr, map += (vaddr & ~PAGE_MASK); } -#if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY ) /* * Remember if the bottom bit was clear, so we can choose not to run * the change through the verify code if it's still clear afterwards. @@ -477,16 +472,17 @@ static inline void check_for_early_unshadow(struct vcpu *v, mfn_t gmfn) * * Don't bother trying to unshadow if it's not a PT, or if it's > l1. */ - if ( ( v->arch.paging.shadow.pagetable_dying - || ( !d->arch.paging.shadow.pagetable_dying_op - && v->arch.paging.shadow.last_emulated_mfn_for_unshadow == mfn_x(gmfn) ) ) - && sh_mfn_is_a_page_table(gmfn) - && (!d->arch.paging.shadow.pagetable_dying_op || - !(mfn_to_page(gmfn)->shadow_flags - & (SHF_L2_32|SHF_L2_PAE|SHF_L2H_PAE|SHF_L4_64))) ) + if ( (v->arch.paging.shadow.pagetable_dying || + (!d->arch.paging.shadow.pagetable_dying_op && + v->arch.paging.shadow.last_emulated_mfn_for_unshadow == + mfn_x(gmfn))) && + sh_mfn_is_a_page_table(gmfn) && + (!d->arch.paging.shadow.pagetable_dying_op || + !(mfn_to_page(gmfn)->shadow_flags & + (SHF_L2_32 | SHF_L2_PAE | SHF_L2H_PAE | SHF_L4_64))) ) { perfc_incr(shadow_early_unshadow); - sh_remove_shadows(d, gmfn, 1, 0 /* Fast, can fail to unshadow */ ); + sh_remove_shadows(d, gmfn, 1, 0 /* Fast, can fail to unshadow */); TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EARLY_UNSHADOW); } v->arch.paging.shadow.last_emulated_mfn_for_unshadow = mfn_x(gmfn); @@ -525,24 +521,20 @@ static void sh_emulate_unmap_dest(struct vcpu *v, void *addr, * - _PAGE_PRESENT was clear before and after the write. */ shflags = mfn_to_page(sh_ctxt->mfn[0])->shadow_flags; -#if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY) - if ( sh_ctxt->low_bit_was_clear - && !(*(u8 *)addr & _PAGE_PRESENT) - && ((!(shflags & SHF_32) - /* - * Not shadowed 32-bit: aligned 64-bit writes that leave - * the present bit unset are safe to ignore. - */ - && ((unsigned long)addr & 7) == 0 - && bytes <= 8) - || - (!(shflags & (SHF_PAE|SHF_64)) - /* - * Not shadowed PAE/64-bit: aligned 32-bit writes that - * leave the present bit unset are safe to ignore. - */ - && ((unsigned long)addr & 3) == 0 - && bytes <= 4)) ) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY ) + if ( sh_ctxt->low_bit_was_clear && !(*(u8 *)addr & _PAGE_PRESENT) && + ((!(shflags & SHF_32) + /* + * Not shadowed 32-bit: aligned 64-bit writes that leave + * the present bit unset are safe to ignore. + */ + && ((unsigned long)addr & 7) == 0 && bytes <= 8) || + (!(shflags & (SHF_PAE | SHF_64)) + /* + * Not shadowed PAE/64-bit: aligned 32-bit writes that + * leave the present bit unset are safe to ignore. + */ + && ((unsigned long)addr & 3) == 0 && bytes <= 4)) ) { /* Writes with this alignment constraint can't possibly cross pages. */ ASSERT(!mfn_valid(sh_ctxt->mfn[1])); diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index 3e5651d029..143f17973b 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -67,11 +67,12 @@ asm(".file \"" __OBJECT_FILE__ "\""); */ #define FETCH_TYPE_PREFETCH 1 -#define FETCH_TYPE_DEMAND 2 -#define FETCH_TYPE_WRITE 4 -typedef enum { - ft_prefetch = FETCH_TYPE_PREFETCH, - ft_demand_read = FETCH_TYPE_DEMAND, +#define FETCH_TYPE_DEMAND 2 +#define FETCH_TYPE_WRITE 4 +typedef enum +{ + ft_prefetch = FETCH_TYPE_PREFETCH, + ft_demand_read = FETCH_TYPE_DEMAND, ft_demand_write = FETCH_TYPE_DEMAND | FETCH_TYPE_WRITE, } fetch_type_t; @@ -79,8 +80,8 @@ extern const char *const fetch_type_names[]; #if SHADOW_DEBUG_PROPAGATE && CONFIG_PAGING_LEVELS == GUEST_PAGING_LEVELS const char *const fetch_type_names[] = { - [ft_prefetch] = "prefetch", - [ft_demand_read] = "demand read", + [ft_prefetch] = "prefetch", + [ft_demand_read] = "demand read", [ft_demand_write] = "demand write", }; #endif @@ -93,8 +94,7 @@ const char *const fetch_type_names[] = { * shadow L1 which maps its "splinters". */ -static inline mfn_t -get_fl1_shadow_status(struct domain *d, gfn_t gfn) +static inline mfn_t get_fl1_shadow_status(struct domain *d, gfn_t gfn) /* Look for FL1 shadows in the hash table */ { mfn_t smfn = shadow_hash_lookup(d, gfn_x(gfn), SH_type_fl1_shadow); @@ -102,8 +102,8 @@ get_fl1_shadow_status(struct domain *d, gfn_t gfn) return smfn; } -static inline mfn_t -get_shadow_status(struct domain *d, mfn_t gmfn, u32 shadow_type) +static inline mfn_t get_shadow_status(struct domain *d, mfn_t gmfn, + u32 shadow_type) /* Look for shadows in the hash table */ { mfn_t smfn = shadow_hash_lookup(d, mfn_x(gmfn), shadow_type); @@ -112,25 +112,25 @@ get_shadow_status(struct domain *d, mfn_t gmfn, u32 shadow_type) return smfn; } -static inline void -set_fl1_shadow_status(struct domain *d, gfn_t gfn, mfn_t smfn) +static inline void set_fl1_shadow_status(struct domain *d, gfn_t gfn, + mfn_t smfn) /* Put an FL1 shadow into the hash table */ { - SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%"PRI_mfn"\n", - gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn)); + SHADOW_PRINTK("gfn=%" SH_PRI_gfn ", type=%08x, smfn=%" PRI_mfn "\n", + gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn)); ASSERT(mfn_to_page(smfn)->u.sh.head); shadow_hash_insert(d, gfn_x(gfn), SH_type_fl1_shadow, smfn); } -static inline void -set_shadow_status(struct domain *d, mfn_t gmfn, u32 shadow_type, mfn_t smfn) +static inline void set_shadow_status(struct domain *d, mfn_t gmfn, + u32 shadow_type, mfn_t smfn) /* Put a shadow into the hash table */ { int res; - SHADOW_PRINTK("d%d gmfn=%lx, type=%08x, smfn=%lx\n", - d->domain_id, mfn_x(gmfn), shadow_type, mfn_x(smfn)); + SHADOW_PRINTK("d%d gmfn=%lx, type=%08x, smfn=%lx\n", d->domain_id, + mfn_x(gmfn), shadow_type, mfn_x(smfn)); ASSERT(mfn_to_page(smfn)->u.sh.head); @@ -144,21 +144,21 @@ set_shadow_status(struct domain *d, mfn_t gmfn, u32 shadow_type, mfn_t smfn) shadow_hash_insert(d, mfn_x(gmfn), shadow_type, smfn); } -static inline void -delete_fl1_shadow_status(struct domain *d, gfn_t gfn, mfn_t smfn) +static inline void delete_fl1_shadow_status(struct domain *d, gfn_t gfn, + mfn_t smfn) /* Remove a shadow from the hash table */ { - SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%"PRI_mfn"\n", - gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn)); + SHADOW_PRINTK("gfn=%" SH_PRI_gfn ", type=%08x, smfn=%" PRI_mfn "\n", + gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn)); ASSERT(mfn_to_page(smfn)->u.sh.head); shadow_hash_delete(d, gfn_x(gfn), SH_type_fl1_shadow, smfn); } -static inline void -delete_shadow_status(struct domain *d, mfn_t gmfn, u32 shadow_type, mfn_t smfn) +static inline void delete_shadow_status(struct domain *d, mfn_t gmfn, + u32 shadow_type, mfn_t smfn) /* Remove a shadow from the hash table */ { - SHADOW_PRINTK("d%d gmfn=%"PRI_mfn", type=%08x, smfn=%"PRI_mfn"\n", + SHADOW_PRINTK("d%d gmfn=%" PRI_mfn ", type=%08x, smfn=%" PRI_mfn "\n", d->domain_id, mfn_x(gmfn), shadow_type, mfn_x(smfn)); ASSERT(mfn_to_page(smfn)->u.sh.head); shadow_hash_delete(d, mfn_x(gmfn), shadow_type, smfn); @@ -167,13 +167,11 @@ delete_shadow_status(struct domain *d, mfn_t gmfn, u32 shadow_type, mfn_t smfn) put_page(mfn_to_page(gmfn)); } - /**************************************************************************/ /* Functions for walking the guest page tables */ -static inline bool -sh_walk_guest_tables(struct vcpu *v, unsigned long va, walk_t *gw, - uint32_t pfec) +static inline bool sh_walk_guest_tables(struct vcpu *v, unsigned long va, + walk_t *gw, uint32_t pfec) { #if GUEST_PAGING_LEVELS == 3 /* PAE */ return guest_walk_tables(v, p2m_get_hostp2m(v->domain), va, gw, pfec, @@ -181,11 +179,11 @@ sh_walk_guest_tables(struct vcpu *v, unsigned long va, walk_t *gw, #else /* 32 or 64 */ const struct domain *d = v->domain; mfn_t root_mfn = ((v->arch.flags & TF_kernel_mode) || is_pv_32bit_domain(d) - ? pagetable_get_mfn(v->arch.guest_table) - : pagetable_get_mfn(v->arch.guest_table_user)); + ? pagetable_get_mfn(v->arch.guest_table) + : pagetable_get_mfn(v->arch.guest_table_user)); void *root_map = map_domain_page(root_mfn); - bool ok = guest_walk_tables(v, p2m_get_hostp2m(d), va, gw, pfec, - root_mfn, root_map); + bool ok = guest_walk_tables(v, p2m_get_hostp2m(d), va, gw, pfec, root_mfn, + root_map); unmap_domain_page(root_map); @@ -199,8 +197,8 @@ sh_walk_guest_tables(struct vcpu *v, unsigned long va, walk_t *gw, * * Return 1 to indicate success and 0 for inconsistency */ -static inline uint32_t -shadow_check_gwalk(struct vcpu *v, unsigned long va, walk_t *gw, int version) +static inline uint32_t shadow_check_gwalk(struct vcpu *v, unsigned long va, + walk_t *gw, int version) { struct domain *d = v->domain; guest_l1e_t *l1p; @@ -215,7 +213,7 @@ shadow_check_gwalk(struct vcpu *v, unsigned long va, walk_t *gw, int version) /* No need for smp_rmb() here; taking the paging lock was enough. */ if ( version == atomic_read(&d->arch.paging.shadow.gtable_dirty_version) ) - return 1; + return 1; /* We may consider caching guest page mapping from last * guest table walk. However considering this check happens @@ -256,9 +254,8 @@ shadow_check_gwalk(struct vcpu *v, unsigned long va, walk_t *gw, int version) return !mismatch; } -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) -static int -shadow_check_gl1e(struct vcpu *v, walk_t *gw) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) +static int shadow_check_gl1e(struct vcpu *v, walk_t *gw) { guest_l1e_t *l1p, nl1e; @@ -283,17 +280,17 @@ shadow_check_gl1e(struct vcpu *v, walk_t *gw) * pages, re-walk from the beginning. */ #define GW_RMWR_FLUSHTLB 1 -#define GW_RMWR_REWALK 2 +#define GW_RMWR_REWALK 2 -static inline uint32_t -gw_remove_write_accesses(struct vcpu *v, unsigned long va, walk_t *gw) +static inline uint32_t gw_remove_write_accesses(struct vcpu *v, + unsigned long va, walk_t *gw) { struct domain *d = v->domain; uint32_t rc = 0; #if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */ #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */ -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) if ( mfn_is_out_of_sync(gw->l3mfn) ) { sh_resync(d, gw->l3mfn); @@ -301,11 +298,11 @@ gw_remove_write_accesses(struct vcpu *v, unsigned long va, walk_t *gw) } else #endif /* OOS */ - if ( sh_remove_write_access(d, gw->l3mfn, 3, va) ) - rc = GW_RMWR_FLUSHTLB; + if ( sh_remove_write_access(d, gw->l3mfn, 3, va) ) + rc = GW_RMWR_FLUSHTLB; #endif /* GUEST_PAGING_LEVELS >= 4 */ -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) if ( mfn_is_out_of_sync(gw->l2mfn) ) { sh_resync(d, gw->l2mfn); @@ -313,13 +310,13 @@ gw_remove_write_accesses(struct vcpu *v, unsigned long va, walk_t *gw) } else #endif /* OOS */ - if ( sh_remove_write_access(d, gw->l2mfn, 2, va) ) + if ( sh_remove_write_access(d, gw->l2mfn, 2, va) ) rc |= GW_RMWR_FLUSHTLB; #endif /* GUEST_PAGING_LEVELS >= 3 */ if ( !(guest_can_use_l2_superpages(v) && (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)) -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) && !mfn_is_out_of_sync(gw->l1mfn) #endif /* OOS */ && sh_remove_write_access(d, gw->l1mfn, 1, va) ) @@ -340,35 +337,35 @@ static void sh_audit_gw(struct vcpu *v, const walk_t *gw) return; #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */ - if ( mfn_valid(gw->l4mfn) - && mfn_valid((smfn = get_shadow_status(d, gw->l4mfn, - SH_type_l4_shadow))) ) - (void) sh_audit_l4_table(v, smfn, INVALID_MFN); - if ( mfn_valid(gw->l3mfn) - && mfn_valid((smfn = get_shadow_status(d, gw->l3mfn, - SH_type_l3_shadow))) ) - (void) sh_audit_l3_table(v, smfn, INVALID_MFN); + if ( mfn_valid(gw->l4mfn) && + mfn_valid( + (smfn = get_shadow_status(d, gw->l4mfn, SH_type_l4_shadow))) ) + (void)sh_audit_l4_table(v, smfn, INVALID_MFN); + if ( mfn_valid(gw->l3mfn) && + mfn_valid( + (smfn = get_shadow_status(d, gw->l3mfn, SH_type_l3_shadow))) ) + (void)sh_audit_l3_table(v, smfn, INVALID_MFN); #endif /* PAE or 64... */ if ( mfn_valid(gw->l2mfn) ) { - if ( mfn_valid((smfn = get_shadow_status(d, gw->l2mfn, - SH_type_l2_shadow))) ) - (void) sh_audit_l2_table(v, smfn, INVALID_MFN); + if ( mfn_valid( + (smfn = get_shadow_status(d, gw->l2mfn, SH_type_l2_shadow))) ) + (void)sh_audit_l2_table(v, smfn, INVALID_MFN); #if GUEST_PAGING_LEVELS == 3 - if ( mfn_valid((smfn = get_shadow_status(d, gw->l2mfn, - SH_type_l2h_shadow))) ) - (void) sh_audit_l2_table(v, smfn, INVALID_MFN); + if ( mfn_valid( + (smfn = get_shadow_status(d, gw->l2mfn, SH_type_l2h_shadow))) ) + (void)sh_audit_l2_table(v, smfn, INVALID_MFN); #endif } - if ( mfn_valid(gw->l1mfn) - && mfn_valid((smfn = get_shadow_status(d, gw->l1mfn, - SH_type_l1_shadow))) ) - (void) sh_audit_l1_table(v, smfn, INVALID_MFN); - else if ( (guest_l2e_get_flags(gw->l2e) & _PAGE_PRESENT) - && (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE) - && mfn_valid( - (smfn = get_fl1_shadow_status(d, guest_l2e_get_gfn(gw->l2e)))) ) - (void) sh_audit_fl1_table(v, smfn, INVALID_MFN); + if ( mfn_valid(gw->l1mfn) && + mfn_valid( + (smfn = get_shadow_status(d, gw->l1mfn, SH_type_l1_shadow))) ) + (void)sh_audit_l1_table(v, smfn, INVALID_MFN); + else if ( (guest_l2e_get_flags(gw->l2e) & _PAGE_PRESENT) && + (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE) && + mfn_valid((smfn = get_fl1_shadow_status( + d, guest_l2e_get_gfn(gw->l2e)))) ) + (void)sh_audit_fl1_table(v, smfn, INVALID_MFN); #endif /* SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES */ } @@ -376,8 +373,8 @@ static void sh_audit_gw(struct vcpu *v, const walk_t *gw) * Write a new value into the guest pagetable, and update the shadows * appropriately. Returns false if we page-faulted, true for success. */ -static bool -sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn) +static bool sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, + mfn_t gmfn) { #if CONFIG_PAGING_LEVELS == GUEST_PAGING_LEVELS int failed; @@ -400,9 +397,8 @@ sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn) * N.B. caller should check the value of "old" to see if the cmpxchg itself * was successful. */ -static bool -sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old, - intpte_t new, mfn_t gmfn) +static bool sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old, + intpte_t new, mfn_t gmfn) { #if CONFIG_PAGING_LEVELS == GUEST_PAGING_LEVELS int failed; @@ -446,9 +442,9 @@ static inline mfn_t sh_next_page(mfn_t smfn) struct page_info *pg = mfn_to_page(smfn), *next; struct page_list_head h = PAGE_LIST_HEAD_INIT(h); - ASSERT(pg->u.sh.type == SH_type_l1_32_shadow - || pg->u.sh.type == SH_type_fl1_32_shadow - || pg->u.sh.type == SH_type_l2_32_shadow); + ASSERT(pg->u.sh.type == SH_type_l1_32_shadow || + pg->u.sh.type == SH_type_fl1_32_shadow || + pg->u.sh.type == SH_type_l2_32_shadow); ASSERT(pg->u.sh.type == SH_type_l2_32_shadow || pg->u.sh.head); next = page_list_next(pg, &h); @@ -460,16 +456,14 @@ static inline mfn_t sh_next_page(mfn_t smfn) } #endif -static inline u32 -guest_index(void *ptr) +static inline u32 guest_index(void *ptr) { return (u32)((unsigned long)ptr & ~PAGE_MASK) / sizeof(guest_l1e_t); } -static u32 -shadow_l1_index(mfn_t *smfn, u32 guest_index) +static u32 shadow_l1_index(mfn_t *smfn, u32 guest_index) { -#if (GUEST_PAGING_LEVELS == 2) +#if ( GUEST_PAGING_LEVELS == 2 ) ASSERT(mfn_to_page(*smfn)->u.sh.head); if ( guest_index >= SHADOW_L1_PAGETABLE_ENTRIES ) *smfn = sh_next_page(*smfn); @@ -479,10 +473,9 @@ shadow_l1_index(mfn_t *smfn, u32 guest_index) #endif } -static u32 -shadow_l2_index(mfn_t *smfn, u32 guest_index) +static u32 shadow_l2_index(mfn_t *smfn, u32 guest_index) { -#if (GUEST_PAGING_LEVELS == 2) +#if ( GUEST_PAGING_LEVELS == 2 ) int i; ASSERT(mfn_to_page(*smfn)->u.sh.head); // Because we use 2 shadow l2 entries for each guest entry, the number of @@ -499,21 +492,18 @@ shadow_l2_index(mfn_t *smfn, u32 guest_index) #if GUEST_PAGING_LEVELS >= 4 -static u32 -shadow_l3_index(mfn_t *smfn, u32 guest_index) +static u32 shadow_l3_index(mfn_t *smfn, u32 guest_index) { return guest_index; } -static u32 -shadow_l4_index(mfn_t *smfn, u32 guest_index) +static u32 shadow_l4_index(mfn_t *smfn, u32 guest_index) { return guest_index; } #endif // GUEST_PAGING_LEVELS >= 4 - /**************************************************************************/ /* Function which computes shadow entries from their corresponding guest * entries. This is the "heart" of the shadow code. It operates using @@ -521,16 +511,13 @@ shadow_l4_index(mfn_t *smfn, u32 guest_index) * Don't call it directly, but use the four wrappers below. */ -static always_inline void -_sh_propagate(struct vcpu *v, - guest_intpte_t guest_intpte, - mfn_t target_mfn, - void *shadow_entry_ptr, - int level, - fetch_type_t ft, - p2m_type_t p2mt) +static always_inline void _sh_propagate(struct vcpu *v, + guest_intpte_t guest_intpte, + mfn_t target_mfn, + void *shadow_entry_ptr, int level, + fetch_type_t ft, p2m_type_t p2mt) { - guest_l1e_t guest_entry = { guest_intpte }; + guest_l1e_t guest_entry = {guest_intpte}; shadow_l1e_t *sp = shadow_entry_ptr; struct domain *d = v->domain; struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram; @@ -543,8 +530,8 @@ _sh_propagate(struct vcpu *v, ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3); /* Check there's something for the shadows to map to */ - if ( (!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) - || !gfn_valid(d, target_gfn) ) + if ( (!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) || + !gfn_valid(d, target_gfn) ) { *sp = shadow_l1e_empty(); goto done; @@ -578,12 +565,11 @@ _sh_propagate(struct vcpu *v, // mfn means that we can not usefully shadow anything, and so we // return early. // - mmio_mfn = !mfn_valid(target_mfn) - || (level == 1 - && page_get_owner(mfn_to_page(target_mfn)) == dom_io); - if ( mmio_mfn - && !(level == 1 && (!shadow_mode_refcounts(d) - || p2mt == p2m_mmio_direct)) ) + mmio_mfn = + !mfn_valid(target_mfn) || + (level == 1 && page_get_owner(mfn_to_page(target_mfn)) == dom_io); + if ( mmio_mfn && !(level == 1 && + (!shadow_mode_refcounts(d) || p2mt == p2m_mmio_direct)) ) { ASSERT((ft == ft_prefetch)); *sp = shadow_l1e_empty(); @@ -595,8 +581,7 @@ _sh_propagate(struct vcpu *v, // Since we know the guest's PRESENT bit is set, we also set the shadow's // SHADOW_PRESENT bit. // - pass_thru_flags = (_PAGE_ACCESSED | _PAGE_USER | - _PAGE_RW | _PAGE_PRESENT); + pass_thru_flags = (_PAGE_ACCESSED | _PAGE_USER | _PAGE_RW | _PAGE_PRESENT); if ( guest_nx_enabled(v) ) pass_thru_flags |= _PAGE_NX_BIT; if ( level == 1 && !shadow_mode_refcounts(d) && mmio_mfn ) @@ -626,24 +611,20 @@ _sh_propagate(struct vcpu *v, sflags |= pat_type_2_pte_flags(type); else if ( d->arch.hvm.is_in_uc_mode ) sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE); - else - if ( iomem_access_permitted(d, mfn_x(target_mfn), mfn_x(target_mfn)) ) - { - if ( p2mt == p2m_mmio_direct ) - sflags |= get_pat_flags(v, - gflags, - gfn_to_paddr(target_gfn), - mfn_to_maddr(target_mfn), - MTRR_TYPE_UNCACHABLE); - else if ( iommu_snoop ) - sflags |= pat_type_2_pte_flags(PAT_TYPE_WRBACK); - else - sflags |= get_pat_flags(v, - gflags, - gfn_to_paddr(target_gfn), - mfn_to_maddr(target_mfn), - NO_HARDCODE_MEM_TYPE); - } + else if ( iomem_access_permitted(d, mfn_x(target_mfn), + mfn_x(target_mfn)) ) + { + if ( p2mt == p2m_mmio_direct ) + sflags |= get_pat_flags(v, gflags, gfn_to_paddr(target_gfn), + mfn_to_maddr(target_mfn), + MTRR_TYPE_UNCACHABLE); + else if ( iommu_snoop ) + sflags |= pat_type_2_pte_flags(PAT_TYPE_WRBACK); + else + sflags |= get_pat_flags(v, gflags, gfn_to_paddr(target_gfn), + mfn_to_maddr(target_mfn), + NO_HARDCODE_MEM_TYPE); + } } // Set the A&D bits for higher level shadows. @@ -663,11 +644,9 @@ _sh_propagate(struct vcpu *v, sflags &= ~_PAGE_PRESENT; /* D bits exist in L1es and PSE L2es */ - if ( unlikely(((level == 1) || - ((level == 2) && - (gflags & _PAGE_PSE) && - guest_can_use_l2_superpages(v))) - && !(gflags & _PAGE_DIRTY)) ) + if ( unlikely(((level == 1) || ((level == 2) && (gflags & _PAGE_PSE) && + guest_can_use_l2_superpages(v))) && + !(gflags & _PAGE_DIRTY)) ) sflags &= ~_PAGE_RW; // shadow_mode_log_dirty support @@ -679,7 +658,8 @@ _sh_propagate(struct vcpu *v, // p2m_ram_logdirty p2m type: only HAP uses that.) if ( unlikely((level == 1) && shadow_mode_log_dirty(d)) ) { - if ( mfn_valid(target_mfn) ) { + if ( mfn_valid(target_mfn) ) + { if ( ft & FETCH_TYPE_WRITE ) paging_mark_dirty(d, target_mfn); else if ( !paging_mfn_is_dirty(d, target_mfn) ) @@ -687,10 +667,9 @@ _sh_propagate(struct vcpu *v, } } - if ( unlikely((level == 1) && dirty_vram - && dirty_vram->last_dirty == -1 - && gfn_x(target_gfn) >= dirty_vram->begin_pfn - && gfn_x(target_gfn) < dirty_vram->end_pfn) ) + if ( unlikely((level == 1) && dirty_vram && dirty_vram->last_dirty == -1 && + gfn_x(target_gfn) >= dirty_vram->begin_pfn && + gfn_x(target_gfn) < dirty_vram->end_pfn) ) { if ( ft & FETCH_TYPE_WRITE ) dirty_vram->last_dirty = NOW(); @@ -710,46 +689,41 @@ _sh_propagate(struct vcpu *v, // protect guest page tables // - if ( unlikely((level == 1) - && sh_mfn_is_a_page_table(target_mfn) -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) + if ( unlikely((level == 1) && + sh_mfn_is_a_page_table(target_mfn) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Unless the page is out of sync and the guest is writing to it. */ - && !(mfn_oos_may_write(target_mfn) - && (ft == ft_demand_write)) + && !(mfn_oos_may_write(target_mfn) && (ft == ft_demand_write)) #endif /* OOS */ - ) ) + ) ) sflags &= ~_PAGE_RW; // PV guests in 64-bit mode use two different page tables for user vs // supervisor permissions, making the guest's _PAGE_USER bit irrelevant. // It is always shadowed as present... - if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32bit_domain(d) - && is_pv_domain(d) ) + if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32bit_domain(d) && + is_pv_domain(d) ) { sflags |= _PAGE_USER; } *sp = shadow_l1e_from_mfn(target_mfn, sflags); - done: +done: SHADOW_DEBUG(PROPAGATE, "%s level %u guest %" SH_PRI_gpte " shadow %" SH_PRI_pte "\n", fetch_type_names[ft], level, guest_entry.l1, sp->l1); } - /* These four wrappers give us a little bit of type-safety back around * the use of void-* pointers and intpte types in _sh_propagate(), and * allow the compiler to optimize out some level checks. */ #if GUEST_PAGING_LEVELS >= 4 -static void -l4e_propagate_from_guest(struct vcpu *v, - guest_l4e_t gl4e, - mfn_t sl3mfn, - shadow_l4e_t *sl4e, - fetch_type_t ft) +static void l4e_propagate_from_guest(struct vcpu *v, guest_l4e_t gl4e, + mfn_t sl3mfn, shadow_l4e_t *sl4e, + fetch_type_t ft) { if ( !mfn_eq(sl3mfn, INVALID_MFN) && (guest_l4e_get_flags(gl4e) & _PAGE_PRESENT) ) @@ -758,12 +732,9 @@ l4e_propagate_from_guest(struct vcpu *v, _sh_propagate(v, gl4e.l4, sl3mfn, sl4e, 4, ft, p2m_ram_rw); } -static void -l3e_propagate_from_guest(struct vcpu *v, - guest_l3e_t gl3e, - mfn_t sl2mfn, - shadow_l3e_t *sl3e, - fetch_type_t ft) +static void l3e_propagate_from_guest(struct vcpu *v, guest_l3e_t gl3e, + mfn_t sl2mfn, shadow_l3e_t *sl3e, + fetch_type_t ft) { if ( !mfn_eq(sl2mfn, INVALID_MFN) && (guest_l3e_get_flags(gl3e) & _PAGE_PRESENT) ) @@ -773,12 +744,9 @@ l3e_propagate_from_guest(struct vcpu *v, } #endif // GUEST_PAGING_LEVELS >= 4 -static void -l2e_propagate_from_guest(struct vcpu *v, - guest_l2e_t gl2e, - mfn_t sl1mfn, - shadow_l2e_t *sl2e, - fetch_type_t ft) +static void l2e_propagate_from_guest(struct vcpu *v, guest_l2e_t gl2e, + mfn_t sl1mfn, shadow_l2e_t *sl2e, + fetch_type_t ft) { if ( !mfn_eq(sl1mfn, INVALID_MFN) && (guest_l2e_get_flags(gl2e) & _PAGE_PRESENT) ) @@ -787,13 +755,9 @@ l2e_propagate_from_guest(struct vcpu *v, _sh_propagate(v, gl2e.l2, sl1mfn, sl2e, 2, ft, p2m_ram_rw); } -static void -l1e_propagate_from_guest(struct vcpu *v, - guest_l1e_t gl1e, - mfn_t gmfn, - shadow_l1e_t *sl1e, - fetch_type_t ft, - p2m_type_t p2mt) +static void l1e_propagate_from_guest(struct vcpu *v, guest_l1e_t gl1e, + mfn_t gmfn, shadow_l1e_t *sl1e, + fetch_type_t ft, p2m_type_t p2mt) { if ( !mfn_eq(gmfn, INVALID_MFN) && (guest_l1e_get_flags(gl1e) & _PAGE_PRESENT) ) @@ -802,7 +766,6 @@ l1e_propagate_from_guest(struct vcpu *v, _sh_propagate(v, gl1e.l1, gmfn, sl1e, 1, ft, p2mt); } - /**************************************************************************/ /* These functions update shadow entries (and do bookkeeping on the shadow * tables they are in). It is intended that they are the only @@ -817,16 +780,15 @@ static inline void safe_write_entry(void *dst, void *src) { volatile unsigned long *d = dst; unsigned long *s = src; - ASSERT(!((unsigned long) d & (sizeof (shadow_l1e_t) - 1))); + ASSERT(!((unsigned long)d & (sizeof(shadow_l1e_t) - 1))); /* In 64-bit, sizeof(pte) == sizeof(ulong) == 1 word, * which will be an atomic write, since the entry is aligned. */ - BUILD_BUG_ON(sizeof (shadow_l1e_t) != sizeof (unsigned long)); + BUILD_BUG_ON(sizeof(shadow_l1e_t) != sizeof(unsigned long)); *d = *s; } - -static inline void -shadow_write_entries(void *d, void *s, int entries, mfn_t mfn) +static inline void shadow_write_entries(void *d, void *s, int entries, + mfn_t mfn) /* This function does the actual writes to shadow pages. * It must not be called directly, since it doesn't do the bookkeeping * that shadow_set_l*e() functions do. */ @@ -841,24 +803,24 @@ shadow_write_entries(void *d, void *s, int entries, mfn_t mfn) * no write access through the linear map. * We detect that by writing to the shadow with copy_to_user() and * using map_domain_page() to get a writeable mapping if we need to. */ - if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 ) + if ( __copy_to_user(d, d, sizeof(unsigned long)) != 0 ) { perfc_incr(shadow_linear_map_failed); map = map_domain_page(mfn); dst = map + ((unsigned long)dst & (PAGE_SIZE - 1)); } - for ( i = 0; i < entries; i++ ) safe_write_entry(dst++, src++); - if ( map != NULL ) unmap_domain_page(map); + if ( map != NULL ) + unmap_domain_page(map); } /* type is only used to distinguish grant map pages from ordinary RAM * i.e. non-p2m_is_grant() pages are treated as p2m_ram_rw. */ -static int inline -shadow_get_page_from_l1e(shadow_l1e_t sl1e, struct domain *d, p2m_type_t type) +static int inline shadow_get_page_from_l1e(shadow_l1e_t sl1e, struct domain *d, + p2m_type_t type) { int res; mfn_t mfn; @@ -874,27 +836,26 @@ shadow_get_page_from_l1e(shadow_l1e_t sl1e, struct domain *d, p2m_type_t type) // If a privileged domain is attempting to install a map of a page it does // not own, we let it succeed anyway. // - if ( unlikely(res < 0) && - !shadow_mode_translate(d) && + if ( unlikely(res < 0) && !shadow_mode_translate(d) && mfn_valid(mfn = shadow_l1e_get_mfn(sl1e)) && - (owner = page_get_owner(mfn_to_page(mfn))) && - (d != owner) ) + (owner = page_get_owner(mfn_to_page(mfn))) && (d != owner) ) { res = xsm_priv_mapping(XSM_TARGET, d, owner); - if ( !res ) { + if ( !res ) + { res = get_page_from_l1e(sl1e, d, owner); - SHADOW_PRINTK("privileged domain %d installs map of mfn %"PRI_mfn" " - "which is owned by d%d: %s\n", - d->domain_id, mfn_x(mfn), owner->domain_id, - res >= 0 ? "success" : "failed"); + SHADOW_PRINTK("privileged domain %d installs map of mfn %" PRI_mfn + " " + "which is owned by d%d: %s\n", + d->domain_id, mfn_x(mfn), owner->domain_id, + res >= 0 ? "success" : "failed"); } } /* Okay, it might still be a grant mapping PTE. Try it. */ - if ( unlikely(res < 0) && - (type == p2m_grant_map_rw || - (type == p2m_grant_map_ro && - !(shadow_l1e_get_flags(sl1e) & _PAGE_RW))) ) + if ( unlikely(res < 0) && (type == p2m_grant_map_rw || + (type == p2m_grant_map_ro && + !(shadow_l1e_get_flags(sl1e) & _PAGE_RW))) ) { /* It's a grant mapping. The grant table implementation will already have checked that we're supposed to have access, so @@ -913,8 +874,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl1e, struct domain *d, p2m_type_t type) return res; } -static void inline -shadow_put_page_from_l1e(shadow_l1e_t sl1e, struct domain *d) +static void inline shadow_put_page_from_l1e(shadow_l1e_t sl1e, struct domain *d) { if ( !shadow_mode_refcounts(d) ) return; @@ -923,10 +883,8 @@ shadow_put_page_from_l1e(shadow_l1e_t sl1e, struct domain *d) } #if GUEST_PAGING_LEVELS >= 4 -static int shadow_set_l4e(struct domain *d, - shadow_l4e_t *sl4e, - shadow_l4e_t new_sl4e, - mfn_t sl4mfn) +static int shadow_set_l4e(struct domain *d, shadow_l4e_t *sl4e, + shadow_l4e_t new_sl4e, mfn_t sl4mfn) { int flags = 0; shadow_l4e_t old_sl4e; @@ -934,10 +892,11 @@ static int shadow_set_l4e(struct domain *d, ASSERT(sl4e != NULL); old_sl4e = *sl4e; - if ( old_sl4e.l4 == new_sl4e.l4 ) return 0; /* Nothing to do */ + if ( old_sl4e.l4 == new_sl4e.l4 ) + return 0; /* Nothing to do */ - paddr = ((((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT) - | (((unsigned long)sl4e) & ~PAGE_MASK)); + paddr = ((((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT) | + (((unsigned long)sl4e) & ~PAGE_MASK)); if ( shadow_l4e_get_flags(new_sl4e) & _PAGE_PRESENT ) { @@ -966,9 +925,9 @@ static int shadow_set_l4e(struct domain *d, /* We lost a reference to an old mfn. */ mfn_t osl3mfn = shadow_l4e_get_mfn(old_sl4e); - if ( !mfn_eq(osl3mfn, shadow_l4e_get_mfn(new_sl4e)) - || !perms_strictly_increased(shadow_l4e_get_flags(old_sl4e), - shadow_l4e_get_flags(new_sl4e)) ) + if ( !mfn_eq(osl3mfn, shadow_l4e_get_mfn(new_sl4e)) || + !perms_strictly_increased(shadow_l4e_get_flags(old_sl4e), + shadow_l4e_get_flags(new_sl4e)) ) { flags |= SHADOW_SET_FLUSH; } @@ -978,10 +937,8 @@ static int shadow_set_l4e(struct domain *d, return flags; } -static int shadow_set_l3e(struct domain *d, - shadow_l3e_t *sl3e, - shadow_l3e_t new_sl3e, - mfn_t sl3mfn) +static int shadow_set_l3e(struct domain *d, shadow_l3e_t *sl3e, + shadow_l3e_t new_sl3e, mfn_t sl3mfn) { int flags = 0; shadow_l3e_t old_sl3e; @@ -989,10 +946,11 @@ static int shadow_set_l3e(struct domain *d, ASSERT(sl3e != NULL); old_sl3e = *sl3e; - if ( old_sl3e.l3 == new_sl3e.l3 ) return 0; /* Nothing to do */ + if ( old_sl3e.l3 == new_sl3e.l3 ) + return 0; /* Nothing to do */ - paddr = ((((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT) - | (((unsigned long)sl3e) & ~PAGE_MASK)); + paddr = ((((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT) | + (((unsigned long)sl3e) & ~PAGE_MASK)); if ( shadow_l3e_get_flags(new_sl3e) & _PAGE_PRESENT ) { @@ -1025,10 +983,8 @@ static int shadow_set_l3e(struct domain *d, } #endif /* GUEST_PAGING_LEVELS >= 4 */ -static int shadow_set_l2e(struct domain *d, - shadow_l2e_t *sl2e, - shadow_l2e_t new_sl2e, - mfn_t sl2mfn) +static int shadow_set_l2e(struct domain *d, shadow_l2e_t *sl2e, + shadow_l2e_t new_sl2e, mfn_t sl2mfn) { int flags = 0; shadow_l2e_t old_sl2e; @@ -1040,7 +996,7 @@ static int shadow_set_l2e(struct domain *d, * page of the shadow to the first l2e, so make sure that we're * working with those: * Start with a pair of identical entries */ - shadow_l2e_t pair[2] = { new_sl2e, new_sl2e }; + shadow_l2e_t pair[2] = {new_sl2e, new_sl2e}; /* Align the pointer down so it's pointing at the first of the pair */ sl2e = (shadow_l2e_t *)((unsigned long)sl2e & ~(sizeof(shadow_l2e_t))); #endif @@ -1048,10 +1004,11 @@ static int shadow_set_l2e(struct domain *d, ASSERT(sl2e != NULL); old_sl2e = *sl2e; - if ( old_sl2e.l2 == new_sl2e.l2 ) return 0; /* Nothing to do */ + if ( old_sl2e.l2 == new_sl2e.l2 ) + return 0; /* Nothing to do */ - paddr = ((((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT) - | (((unsigned long)sl2e) & ~PAGE_MASK)); + paddr = ((((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT) | + (((unsigned long)sl2e) & ~PAGE_MASK)); if ( shadow_l2e_get_flags(new_sl2e) & _PAGE_PRESENT ) { @@ -1064,7 +1021,7 @@ static int shadow_set_l2e(struct domain *d, domain_crash(d); return SHADOW_SET_ERROR; } -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) { struct page_info *sp = mfn_to_page(sl1mfn); mfn_t gl1mfn; @@ -1074,8 +1031,8 @@ static int shadow_set_l2e(struct domain *d, /* If the shadow is a fl1 then the backpointer contains the GFN instead of the GMFN, and it's definitely not OOS. */ - if ( (sp->u.sh.type != SH_type_fl1_shadow) && mfn_valid(gl1mfn) - && mfn_is_out_of_sync(gl1mfn) ) + if ( (sp->u.sh.type != SH_type_fl1_shadow) && mfn_valid(gl1mfn) && + mfn_is_out_of_sync(gl1mfn) ) sh_resync(d, gl1mfn); } #endif @@ -1111,8 +1068,7 @@ static int shadow_set_l2e(struct domain *d, } static inline void shadow_vram_get_l1e(shadow_l1e_t new_sl1e, - shadow_l1e_t *sl1e, - mfn_t sl1mfn, + shadow_l1e_t *sl1e, mfn_t sl1mfn, struct domain *d) { mfn_t mfn = shadow_l1e_get_mfn(new_sl1e); @@ -1120,7 +1076,7 @@ static inline void shadow_vram_get_l1e(shadow_l1e_t new_sl1e, unsigned long gfn; struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram; - if ( !dirty_vram /* tracking disabled? */ + if ( !dirty_vram /* tracking disabled? */ || !(flags & _PAGE_RW) /* read-only mapping? */ || !mfn_valid(mfn) ) /* mfn can be invalid in mmio_direct */ return; @@ -1136,14 +1092,13 @@ static inline void shadow_vram_get_l1e(shadow_l1e_t new_sl1e, if ( (page->u.inuse.type_info & PGT_count_mask) == 1 ) /* Initial guest reference, record it */ - dirty_vram->sl1ma[i] = mfn_to_maddr(sl1mfn) - | ((unsigned long)sl1e & ~PAGE_MASK); + dirty_vram->sl1ma[i] = + mfn_to_maddr(sl1mfn) | ((unsigned long)sl1e & ~PAGE_MASK); } } static inline void shadow_vram_put_l1e(shadow_l1e_t old_sl1e, - shadow_l1e_t *sl1e, - mfn_t sl1mfn, + shadow_l1e_t *sl1e, mfn_t sl1mfn, struct domain *d) { mfn_t mfn = shadow_l1e_get_mfn(old_sl1e); @@ -1151,7 +1106,7 @@ static inline void shadow_vram_put_l1e(shadow_l1e_t old_sl1e, unsigned long gfn; struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram; - if ( !dirty_vram /* tracking disabled? */ + if ( !dirty_vram /* tracking disabled? */ || !(flags & _PAGE_RW) /* read-only mapping? */ || !mfn_valid(mfn) ) /* mfn can be invalid in mmio_direct */ return; @@ -1165,13 +1120,14 @@ static inline void shadow_vram_put_l1e(shadow_l1e_t old_sl1e, unsigned long i = gfn - dirty_vram->begin_pfn; struct page_info *page = mfn_to_page(mfn); int dirty = 0; - paddr_t sl1ma = mfn_to_maddr(sl1mfn) - | ((unsigned long)sl1e & ~PAGE_MASK); + paddr_t sl1ma = + mfn_to_maddr(sl1mfn) | ((unsigned long)sl1e & ~PAGE_MASK); if ( (page->u.inuse.type_info & PGT_count_mask) == 1 ) { /* Last reference */ - if ( dirty_vram->sl1ma[i] == INVALID_PADDR ) { + if ( dirty_vram->sl1ma[i] == INVALID_PADDR ) + { /* We didn't know it was that one, let's say it is dirty */ dirty = 1; } @@ -1207,10 +1163,8 @@ static inline void shadow_vram_put_l1e(shadow_l1e_t old_sl1e, } } -static int shadow_set_l1e(struct domain *d, - shadow_l1e_t *sl1e, - shadow_l1e_t new_sl1e, - p2m_type_t new_type, +static int shadow_set_l1e(struct domain *d, shadow_l1e_t *sl1e, + shadow_l1e_t new_sl1e, p2m_type_t new_type, mfn_t sl1mfn) { int flags = 0; @@ -1221,18 +1175,19 @@ static int shadow_set_l1e(struct domain *d, ASSERT(sl1e != NULL); #if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC - if ( mfn_valid(new_gmfn) && mfn_oos_may_write(new_gmfn) - && ((shadow_l1e_get_flags(new_sl1e) & (_PAGE_RW|_PAGE_PRESENT)) - == (_PAGE_RW|_PAGE_PRESENT)) ) + if ( mfn_valid(new_gmfn) && mfn_oos_may_write(new_gmfn) && + ((shadow_l1e_get_flags(new_sl1e) & (_PAGE_RW | _PAGE_PRESENT)) == + (_PAGE_RW | _PAGE_PRESENT)) ) oos_fixup_add(d, new_gmfn, sl1mfn, pgentry_ptr_to_slot(sl1e)); #endif old_sl1e = *sl1e; - if ( old_sl1e.l1 == new_sl1e.l1 ) return 0; /* Nothing to do */ + if ( old_sl1e.l1 == new_sl1e.l1 ) + return 0; /* Nothing to do */ - if ( (shadow_l1e_get_flags(new_sl1e) & _PAGE_PRESENT) - && !sh_l1e_is_magic(new_sl1e) ) + if ( (shadow_l1e_get_flags(new_sl1e) & _PAGE_PRESENT) && + !sh_l1e_is_magic(new_sl1e) ) { /* About to install a new reference */ if ( shadow_mode_refcounts(d) ) @@ -1241,14 +1196,14 @@ static int shadow_set_l1e(struct domain *d, int rc; TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_GET_REF); - switch ( rc = shadow_get_page_from_l1e(new_sl1e, d, new_type) ) + switch (rc = shadow_get_page_from_l1e(new_sl1e, d, new_type)) { default: /* Doesn't look like a pagetable. */ flags |= SHADOW_SET_ERROR; new_sl1e = shadow_l1e_empty(); break; - case PAGE_FLIPPABLE & -PAGE_FLIPPABLE ... PAGE_FLIPPABLE: + case PAGE_FLIPPABLE & -PAGE_FLIPPABLE... PAGE_FLIPPABLE: ASSERT(!(rc & ~PAGE_FLIPPABLE)); new_sl1e = shadow_l1e_flip_flags(new_sl1e, rc); /* fall through */ @@ -1264,8 +1219,8 @@ static int shadow_set_l1e(struct domain *d, shadow_write_entries(sl1e, &new_sl1e, 1, sl1mfn); flags |= SHADOW_SET_CHANGED; - if ( (shadow_l1e_get_flags(old_sl1e) & _PAGE_PRESENT) - && !sh_l1e_is_magic(old_sl1e) ) + if ( (shadow_l1e_get_flags(old_sl1e) & _PAGE_PRESENT) && + !sh_l1e_is_magic(old_sl1e) ) { /* We lost a reference to an old mfn. */ /* N.B. Unlike higher-level sets, never need an extra flush @@ -1282,7 +1237,6 @@ static int shadow_set_l1e(struct domain *d, return flags; } - /**************************************************************************/ /* Macros to walk pagetables. These take the shadow of a pagetable and * walk every "interesting" entry. That is, they don't touch Xen mappings, @@ -1308,175 +1262,190 @@ static inline void increment_ptr_to_guest_entry(void *ptr) } /* All kinds of l1: touch all entries */ -#define _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \ -do { \ - int _i; \ - shadow_l1e_t *_sp = map_domain_page((_sl1mfn)); \ - ASSERT(mfn_to_page(_sl1mfn)->u.sh.type == SH_type_l1_shadow \ - || mfn_to_page(_sl1mfn)->u.sh.type == SH_type_fl1_shadow);\ - for ( _i = 0; _i < SHADOW_L1_PAGETABLE_ENTRIES; _i++ ) \ - { \ - (_sl1e) = _sp + _i; \ - if ( shadow_l1e_get_flags(*(_sl1e)) & _PAGE_PRESENT ) \ - {_code} \ - if ( _done ) break; \ - increment_ptr_to_guest_entry(_gl1p); \ - } \ - unmap_domain_page(_sp); \ -} while (0) +#define _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \ + do { \ + int _i; \ + shadow_l1e_t *_sp = map_domain_page((_sl1mfn)); \ + ASSERT(mfn_to_page(_sl1mfn)->u.sh.type == SH_type_l1_shadow || \ + mfn_to_page(_sl1mfn)->u.sh.type == SH_type_fl1_shadow); \ + for ( _i = 0; _i < SHADOW_L1_PAGETABLE_ENTRIES; _i++ ) \ + { \ + (_sl1e) = _sp + _i; \ + if ( shadow_l1e_get_flags(*(_sl1e)) & _PAGE_PRESENT ) \ + { \ + _code \ + } \ + if ( _done ) \ + break; \ + increment_ptr_to_guest_entry(_gl1p); \ + } \ + unmap_domain_page(_sp); \ + } while ( 0 ) /* 32-bit l1, on PAE or 64-bit shadows: need to walk both pages of shadow */ #if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2 -#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \ -do { \ - int __done = 0; \ - _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, \ - ({ (__done = _done); }), _code); \ - _sl1mfn = sh_next_page(_sl1mfn); \ - if ( !__done ) \ - _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, \ - ({ (__done = _done); }), _code); \ -} while (0) +#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \ + do { \ + int __done = 0; \ + _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, ({ (__done = _done); }), \ + _code); \ + _sl1mfn = sh_next_page(_sl1mfn); \ + if ( !__done ) \ + _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, \ + ({ (__done = _done); }), _code); \ + } while ( 0 ) #else /* Everything else; l1 shadows are only one page */ -#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \ - _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) +#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \ + _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) #endif - #if GUEST_PAGING_LEVELS == 2 /* 32-bit l2 on PAE/64: four pages, touch every second entry */ -#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \ -do { \ - int _i, _j, __done = 0; \ - ASSERT(shadow_mode_external(_dom)); \ - ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_32_shadow); \ - for ( _j = 0; _j < 4 && !__done; _j++ ) \ - { \ - shadow_l2e_t *_sp = map_domain_page(_sl2mfn); \ - for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i += 2 ) \ - { \ - (_sl2e) = _sp + _i; \ - if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \ - {_code} \ - if ( (__done = (_done)) ) break; \ - increment_ptr_to_guest_entry(_gl2p); \ - } \ - unmap_domain_page(_sp); \ - if ( _j < 3 ) _sl2mfn = sh_next_page(_sl2mfn); \ - } \ -} while (0) +#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \ + do { \ + int _i, _j, __done = 0; \ + ASSERT(shadow_mode_external(_dom)); \ + ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_32_shadow); \ + for ( _j = 0; _j < 4 && !__done; _j++ ) \ + { \ + shadow_l2e_t *_sp = map_domain_page(_sl2mfn); \ + for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i += 2 ) \ + { \ + (_sl2e) = _sp + _i; \ + if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \ + { \ + _code \ + } \ + if ( (__done = (_done)) ) \ + break; \ + increment_ptr_to_guest_entry(_gl2p); \ + } \ + unmap_domain_page(_sp); \ + if ( _j < 3 ) \ + _sl2mfn = sh_next_page(_sl2mfn); \ + } \ + } while ( 0 ) #elif GUEST_PAGING_LEVELS == 3 /* PAE: touch all entries */ #define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \ -do { \ - int _i; \ - shadow_l2e_t *_sp = map_domain_page((_sl2mfn)); \ - ASSERT(shadow_mode_external(_dom)); \ - ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_pae_shadow \ - || mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow); \ - for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \ - { \ - (_sl2e) = _sp + _i; \ - if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \ - {_code} \ - if ( _done ) break; \ - increment_ptr_to_guest_entry(_gl2p); \ - } \ - unmap_domain_page(_sp); \ -} while (0) + do { \ + int _i; \ + shadow_l2e_t *_sp = map_domain_page((_sl2mfn)); \ + ASSERT(shadow_mode_external(_dom)); \ + ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_pae_shadow || \ + mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow); \ + for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \ + { \ + (_sl2e) = _sp + _i; \ + if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \ + { \ + _code \ + } \ + if ( _done ) \ + break; \ + increment_ptr_to_guest_entry(_gl2p); \ + } \ + unmap_domain_page(_sp); \ + } while ( 0 ) #else /* 64-bit l2: touch all entries except for PAE compat guests. */ -#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \ -do { \ - int _i; \ - int _xen = !shadow_mode_external(_dom); \ - shadow_l2e_t *_sp = map_domain_page((_sl2mfn)); \ - ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_64_shadow ||\ - mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2h_64_shadow);\ - for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \ - { \ - if ( (!(_xen)) \ - || !is_pv_32bit_domain(_dom) \ - || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow \ - || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) ) \ - { \ - (_sl2e) = _sp + _i; \ - if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \ - {_code} \ - if ( _done ) break; \ - increment_ptr_to_guest_entry(_gl2p); \ - } \ - } \ - unmap_domain_page(_sp); \ -} while (0) +#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \ + do { \ + int _i; \ + int _xen = !shadow_mode_external(_dom); \ + shadow_l2e_t *_sp = map_domain_page((_sl2mfn)); \ + ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_64_shadow || \ + mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2h_64_shadow); \ + for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \ + { \ + if ( (!(_xen)) || !is_pv_32bit_domain(_dom) || \ + mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow || \ + (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) ) \ + { \ + (_sl2e) = _sp + _i; \ + if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \ + { \ + _code \ + } \ + if ( _done ) \ + break; \ + increment_ptr_to_guest_entry(_gl2p); \ + } \ + } \ + unmap_domain_page(_sp); \ + } while ( 0 ) #endif /* different kinds of l2 */ #if GUEST_PAGING_LEVELS == 4 /* 64-bit l3: touch all entries */ -#define SHADOW_FOREACH_L3E(_sl3mfn, _sl3e, _gl3p, _done, _code) \ -do { \ - int _i; \ - shadow_l3e_t *_sp = map_domain_page((_sl3mfn)); \ - ASSERT(mfn_to_page(_sl3mfn)->u.sh.type == SH_type_l3_64_shadow);\ - for ( _i = 0; _i < SHADOW_L3_PAGETABLE_ENTRIES; _i++ ) \ - { \ - (_sl3e) = _sp + _i; \ - if ( shadow_l3e_get_flags(*(_sl3e)) & _PAGE_PRESENT ) \ - {_code} \ - if ( _done ) break; \ - increment_ptr_to_guest_entry(_gl3p); \ - } \ - unmap_domain_page(_sp); \ -} while (0) +#define SHADOW_FOREACH_L3E(_sl3mfn, _sl3e, _gl3p, _done, _code) \ + do { \ + int _i; \ + shadow_l3e_t *_sp = map_domain_page((_sl3mfn)); \ + ASSERT(mfn_to_page(_sl3mfn)->u.sh.type == SH_type_l3_64_shadow); \ + for ( _i = 0; _i < SHADOW_L3_PAGETABLE_ENTRIES; _i++ ) \ + { \ + (_sl3e) = _sp + _i; \ + if ( shadow_l3e_get_flags(*(_sl3e)) & _PAGE_PRESENT ) \ + { \ + _code \ + } \ + if ( _done ) \ + break; \ + increment_ptr_to_guest_entry(_gl3p); \ + } \ + unmap_domain_page(_sp); \ + } while ( 0 ) /* 64-bit l4: avoid Xen mappings */ -#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _dom, _code) \ -do { \ - shadow_l4e_t *_sp = map_domain_page((_sl4mfn)); \ - int _xen = !shadow_mode_external(_dom); \ - int _i; \ - ASSERT(mfn_to_page(_sl4mfn)->u.sh.type == SH_type_l4_64_shadow);\ - for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ ) \ - { \ - if ( (!(_xen)) || is_guest_l4_slot(_dom, _i) ) \ - { \ - (_sl4e) = _sp + _i; \ - if ( shadow_l4e_get_flags(*(_sl4e)) & _PAGE_PRESENT ) \ - {_code} \ - if ( _done ) break; \ - } \ - increment_ptr_to_guest_entry(_gl4p); \ - } \ - unmap_domain_page(_sp); \ -} while (0) +#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _dom, _code) \ + do { \ + shadow_l4e_t *_sp = map_domain_page((_sl4mfn)); \ + int _xen = !shadow_mode_external(_dom); \ + int _i; \ + ASSERT(mfn_to_page(_sl4mfn)->u.sh.type == SH_type_l4_64_shadow); \ + for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ ) \ + { \ + if ( (!(_xen)) || is_guest_l4_slot(_dom, _i) ) \ + { \ + (_sl4e) = _sp + _i; \ + if ( shadow_l4e_get_flags(*(_sl4e)) & _PAGE_PRESENT ) \ + { \ + _code \ + } \ + if ( _done ) \ + break; \ + } \ + increment_ptr_to_guest_entry(_gl4p); \ + } \ + unmap_domain_page(_sp); \ + } while ( 0 ) #endif - /**************************************************************************/ /* Create a shadow of a given guest page. */ -static mfn_t -sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type) +static mfn_t sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type) { struct domain *d = v->domain; mfn_t smfn = shadow_alloc(d, shadow_type, mfn_x(gmfn)); - SHADOW_DEBUG(MAKE_SHADOW, "(%"PRI_mfn", %u)=>%"PRI_mfn"\n", - mfn_x(gmfn), shadow_type, mfn_x(smfn)); + SHADOW_DEBUG(MAKE_SHADOW, "(%" PRI_mfn ", %u)=>%" PRI_mfn "\n", mfn_x(gmfn), + shadow_type, mfn_x(smfn)); if ( sh_type_has_up_pointer(d, shadow_type) ) /* Lower-level shadow, not yet linked form a higher level */ mfn_to_page(smfn)->up = 0; #if GUEST_PAGING_LEVELS == 4 -#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL ) if ( shadow_type == SH_type_l4_64_shadow && unlikely(d->arch.paging.shadow.opt_flags & SHOPT_LINUX_L3_TOPLEVEL) ) { @@ -1489,7 +1458,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type) struct page_info *sp, *t; unsigned int l4count = 0; - page_list_for_each(sp, &d->arch.paging.shadow.pinned_shadows) + page_list_for_each (sp, &d->arch.paging.shadow.pinned_shadows) { if ( sp->u.sh.type == SH_type_l4_64_shadow ) l4count++; @@ -1497,7 +1466,8 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type) if ( l4count > 2 * d->max_vcpus ) { /* Unpin all the pinned l3 tables, and don't pin any more. */ - page_list_for_each_safe(sp, t, &d->arch.paging.shadow.pinned_shadows) + page_list_for_each_safe(sp, t, + &d->arch.paging.shadow.pinned_shadows) { if ( sp->u.sh.type == SH_type_l3_64_shadow ) sh_unpin(d, page_to_mfn(sp)); @@ -1521,8 +1491,9 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type) BUILD_BUG_ON(sizeof(l4_pgentry_t) != sizeof(shadow_l4e_t)); - init_xen_l4_slots(l4t, gmfn, d, smfn, (!is_pv_32bit_domain(d) && - VM_ASSIST(d, m2p_strict))); + init_xen_l4_slots( + l4t, gmfn, d, smfn, + (!is_pv_32bit_domain(d) && VM_ASSIST(d, m2p_strict))); unmap_domain_page(l4t); } break; @@ -1539,7 +1510,8 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type) } break; #endif - default: /* Do nothing */ break; + default: /* Do nothing */ + break; } } @@ -1550,22 +1522,19 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type) } /* Make a splintered superpage shadow */ -static mfn_t -make_fl1_shadow(struct domain *d, gfn_t gfn) +static mfn_t make_fl1_shadow(struct domain *d, gfn_t gfn) { mfn_t smfn = shadow_alloc(d, SH_type_fl1_shadow, gfn_x(gfn)); - SHADOW_DEBUG(MAKE_SHADOW, "(%" SH_PRI_gfn ")=>%" PRI_mfn "\n", - gfn_x(gfn), mfn_x(smfn)); + SHADOW_DEBUG(MAKE_SHADOW, "(%" SH_PRI_gfn ")=>%" PRI_mfn "\n", gfn_x(gfn), + mfn_x(smfn)); set_fl1_shadow_status(d, gfn, smfn); return smfn; } - #if SHADOW_PAGING_LEVELS == GUEST_PAGING_LEVELS -mfn_t -sh_make_monitor_table(struct vcpu *v) +mfn_t sh_make_monitor_table(struct vcpu *v) { struct domain *d = v->domain; @@ -1600,8 +1569,8 @@ sh_make_monitor_table(struct vcpu *v) m3mfn = shadow_alloc(d, SH_type_monitor_table, 0); mfn_to_page(m3mfn)->shadow_flags = 3; - l4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] - = l4e_from_mfn(m3mfn, __PAGE_HYPERVISOR_RW); + l4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] = + l4e_from_mfn(m3mfn, __PAGE_HYPERVISOR_RW); m2mfn = shadow_alloc(d, SH_type_monitor_table, 0); mfn_to_page(m2mfn)->shadow_flags = 2; @@ -1630,7 +1599,6 @@ sh_make_monitor_table(struct vcpu *v) unmap_domain_page(l3e); } - } #endif /* SHADOW_PAGING_LEVELS < 4 */ @@ -1654,9 +1622,8 @@ sh_make_monitor_table(struct vcpu *v) * to access them. */ #if GUEST_PAGING_LEVELS >= 4 -static shadow_l4e_t * shadow_get_and_create_l4e(struct vcpu *v, - walk_t *gw, - mfn_t *sl4mfn) +static shadow_l4e_t *shadow_get_and_create_l4e(struct vcpu *v, walk_t *gw, + mfn_t *sl4mfn) { /* There is always a shadow of the top level table. Get it. */ *sl4mfn = pagetable_get_mfn(v->arch.shadow_table[0]); @@ -1664,16 +1631,15 @@ static shadow_l4e_t * shadow_get_and_create_l4e(struct vcpu *v, return sh_linear_l4_table(v) + shadow_l4_linear_offset(gw->va); } -static shadow_l3e_t * shadow_get_and_create_l3e(struct vcpu *v, - walk_t *gw, - mfn_t *sl3mfn, - fetch_type_t ft, - int *resync) +static shadow_l3e_t *shadow_get_and_create_l3e(struct vcpu *v, walk_t *gw, + mfn_t *sl3mfn, fetch_type_t ft, + int *resync) { struct domain *d = v->domain; mfn_t sl4mfn; shadow_l4e_t *sl4e; - if ( !mfn_valid(gw->l3mfn) ) return NULL; /* No guest page. */ + if ( !mfn_valid(gw->l3mfn) ) + return NULL; /* No guest page. */ /* Get the l4e */ sl4e = shadow_get_and_create_l4e(v, gw, &sl4mfn); ASSERT(sl4e != NULL); @@ -1700,31 +1666,29 @@ static shadow_l3e_t * shadow_get_and_create_l3e(struct vcpu *v, if ( r & SHADOW_SET_ERROR ) return NULL; -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) *resync |= 1; #endif - } /* Now follow it down a level. Guaranteed to succeed. */ return sh_linear_l3_table(v) + shadow_l3_linear_offset(gw->va); } #endif /* GUEST_PAGING_LEVELS >= 4 */ - -static shadow_l2e_t * shadow_get_and_create_l2e(struct vcpu *v, - walk_t *gw, - mfn_t *sl2mfn, - fetch_type_t ft, - int *resync) +static shadow_l2e_t *shadow_get_and_create_l2e(struct vcpu *v, walk_t *gw, + mfn_t *sl2mfn, fetch_type_t ft, + int *resync) { #if GUEST_PAGING_LEVELS >= 4 /* 64bit... */ struct domain *d = v->domain; mfn_t sl3mfn = INVALID_MFN; shadow_l3e_t *sl3e; - if ( !mfn_valid(gw->l2mfn) ) return NULL; /* No guest page. */ + if ( !mfn_valid(gw->l2mfn) ) + return NULL; /* No guest page. */ /* Get the l3e */ sl3e = shadow_get_and_create_l3e(v, gw, &sl3mfn, ft, resync); - if ( sl3e == NULL ) return NULL; + if ( sl3e == NULL ) + return NULL; if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT ) { *sl2mfn = shadow_l3e_get_mfn(*sl3e); @@ -1737,8 +1701,7 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct vcpu *v, unsigned int t = SH_type_l2_shadow; /* Tag compat L2 containing hypervisor (m2p) mappings */ - if ( is_pv_32bit_vcpu(v) && - guest_l4_table_offset(gw->va) == 0 && + if ( is_pv_32bit_vcpu(v) && guest_l4_table_offset(gw->va) == 0 && guest_l3_table_offset(gw->va) == 3 ) t = SH_type_l2h_shadow; @@ -1756,40 +1719,36 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct vcpu *v, if ( r & SHADOW_SET_ERROR ) return NULL; -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) *resync |= 1; #endif - } /* Now follow it down a level. Guaranteed to succeed. */ return sh_linear_l2_table(v) + shadow_l2_linear_offset(gw->va); #elif GUEST_PAGING_LEVELS == 3 /* PAE... */ /* We never demand-shadow PAE l3es: they are only created in * sh_update_cr3(). Check if the relevant sl3e is present. */ - shadow_l3e_t *sl3e = ((shadow_l3e_t *)&v->arch.paging.shadow.l3table) - + shadow_l3_linear_offset(gw->va); + shadow_l3e_t *sl3e = ((shadow_l3e_t *)&v->arch.paging.shadow.l3table) + + shadow_l3_linear_offset(gw->va); if ( !(shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT) ) return NULL; *sl2mfn = shadow_l3e_get_mfn(*sl3e); ASSERT(mfn_valid(*sl2mfn)); return sh_linear_l2_table(v) + shadow_l2_linear_offset(gw->va); -#else /* 32bit... */ +#else /* 32bit... */ /* There is always a shadow of the top level table. Get it. */ *sl2mfn = pagetable_get_mfn(v->arch.shadow_table[0]); /* This next line is important: the guest l2 has a 16k * shadow, we need to return the right mfn of the four. This * call will set it for us as a side-effect. */ - (void) shadow_l2_index(sl2mfn, guest_l2_table_offset(gw->va)); + (void)shadow_l2_index(sl2mfn, guest_l2_table_offset(gw->va)); /* Reading the top level table is always valid. */ return sh_linear_l2_table(v) + shadow_l2_linear_offset(gw->va); #endif } - -static shadow_l1e_t * shadow_get_and_create_l1e(struct vcpu *v, - walk_t *gw, - mfn_t *sl1mfn, - fetch_type_t ft) +static shadow_l1e_t *shadow_get_and_create_l1e(struct vcpu *v, walk_t *gw, + mfn_t *sl1mfn, fetch_type_t ft) { struct domain *d = v->domain; mfn_t sl2mfn; @@ -1798,14 +1757,15 @@ static shadow_l1e_t * shadow_get_and_create_l1e(struct vcpu *v, /* Get the l2e */ sl2e = shadow_get_and_create_l2e(v, gw, &sl2mfn, ft, &resync); - if ( sl2e == NULL ) return NULL; + if ( sl2e == NULL ) + return NULL; /* Install the sl1 in the l2e if it wasn't there or if we need to * re-do it to fix a PSE dirty bit. */ - if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT - && likely(ft != ft_demand_write - || (shadow_l2e_get_flags(*sl2e) & _PAGE_RW) - || !(guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)) ) + if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT && + likely(ft != ft_demand_write || + (shadow_l2e_get_flags(*sl2e) & _PAGE_RW) || + !(guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)) ) { *sl1mfn = shadow_l2e_get_mfn(*sl2e); ASSERT(mfn_valid(*sl1mfn)); @@ -1831,7 +1791,8 @@ static shadow_l1e_t * shadow_get_and_create_l1e(struct vcpu *v, else { /* Shadowing an actual guest l1 table */ - if ( !mfn_valid(gw->l1mfn) ) return NULL; /* No guest page. */ + if ( !mfn_valid(gw->l1mfn) ) + return NULL; /* No guest page. */ *sl1mfn = get_shadow_status(d, gw->l1mfn, SH_type_l1_shadow); if ( !mfn_valid(*sl1mfn) ) { @@ -1851,10 +1812,10 @@ static shadow_l1e_t * shadow_get_and_create_l1e(struct vcpu *v, * the right mfn of the pair. This call will set it for us as a * side-effect. (In all other cases, it's a no-op and will be * compiled out.) */ - (void) shadow_l1_index(sl1mfn, guest_l1_table_offset(gw->va)); + (void)shadow_l1_index(sl1mfn, guest_l1_table_offset(gw->va)); } -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* All pages walked are now pagetables. Safe to resync pages in case level 4 or 3 shadows were set. */ if ( resync ) @@ -1865,8 +1826,6 @@ static shadow_l1e_t * shadow_get_and_create_l1e(struct vcpu *v, return sh_linear_l1_table(v) + shadow_l1_linear_offset(gw->va); } - - /**************************************************************************/ /* Destructors for shadow tables: * Unregister the shadow, decrement refcounts of any entries present in it, @@ -1885,7 +1844,7 @@ void sh_destroy_l4_shadow(struct domain *d, mfn_t smfn) u32 t = sp->u.sh.type; mfn_t gmfn, sl4mfn; - SHADOW_DEBUG(DESTROY_SHADOW, "%"PRI_mfn"\n", mfn_x(smfn)); + SHADOW_DEBUG(DESTROY_SHADOW, "%" PRI_mfn "\n", mfn_x(smfn)); ASSERT(t == SH_type_l4_shadow); ASSERT(sp->u.sh.head); @@ -1899,8 +1858,8 @@ void sh_destroy_l4_shadow(struct domain *d, mfn_t smfn) if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT ) { sh_put_ref(d, shadow_l4e_get_mfn(*sl4e), - (((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT) - | ((unsigned long)sl4e & ~PAGE_MASK)); + (((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT) | + ((unsigned long)sl4e & ~PAGE_MASK)); } }); @@ -1915,7 +1874,7 @@ void sh_destroy_l3_shadow(struct domain *d, mfn_t smfn) u32 t = sp->u.sh.type; mfn_t gmfn, sl3mfn; - SHADOW_DEBUG(DESTROY_SHADOW, "%"PRI_mfn"\n", mfn_x(smfn)); + SHADOW_DEBUG(DESTROY_SHADOW, "%" PRI_mfn "\n", mfn_x(smfn)); ASSERT(t == SH_type_l3_shadow); ASSERT(sp->u.sh.head); @@ -1929,8 +1888,8 @@ void sh_destroy_l3_shadow(struct domain *d, mfn_t smfn) SHADOW_FOREACH_L3E(sl3mfn, sl3e, 0, 0, { if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT ) sh_put_ref(d, shadow_l3e_get_mfn(*sl3e), - (((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT) - | ((unsigned long)sl3e & ~PAGE_MASK)); + (((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT) | + ((unsigned long)sl3e & ~PAGE_MASK)); }); /* Put the memory back in the pool */ @@ -1938,7 +1897,6 @@ void sh_destroy_l3_shadow(struct domain *d, mfn_t smfn) } #endif /* GUEST_PAGING_LEVELS >= 4 */ - void sh_destroy_l2_shadow(struct domain *d, mfn_t smfn) { shadow_l2e_t *sl2e; @@ -1946,7 +1904,7 @@ void sh_destroy_l2_shadow(struct domain *d, mfn_t smfn) u32 t = sp->u.sh.type; mfn_t gmfn, sl2mfn; - SHADOW_DEBUG(DESTROY_SHADOW, "%"PRI_mfn"\n", mfn_x(smfn)); + SHADOW_DEBUG(DESTROY_SHADOW, "%" PRI_mfn "\n", mfn_x(smfn)); #if GUEST_PAGING_LEVELS >= 3 ASSERT(t == SH_type_l2_shadow || t == SH_type_l2h_shadow); @@ -1965,8 +1923,8 @@ void sh_destroy_l2_shadow(struct domain *d, mfn_t smfn) SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, { if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT ) sh_put_ref(d, shadow_l2e_get_mfn(*sl2e), - (((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT) - | ((unsigned long)sl2e & ~PAGE_MASK)); + (((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT) | + ((unsigned long)sl2e & ~PAGE_MASK)); }); /* Put the memory back in the pool */ @@ -1979,7 +1937,7 @@ void sh_destroy_l1_shadow(struct domain *d, mfn_t smfn) struct page_info *sp = mfn_to_page(smfn); u32 t = sp->u.sh.type; - SHADOW_DEBUG(DESTROY_SHADOW, "%"PRI_mfn"\n", mfn_x(smfn)); + SHADOW_DEBUG(DESTROY_SHADOW, "%" PRI_mfn "\n", mfn_x(smfn)); ASSERT(t == SH_type_l1_shadow || t == SH_type_fl1_shadow); ASSERT(sp->u.sh.head); @@ -2001,8 +1959,9 @@ void sh_destroy_l1_shadow(struct domain *d, mfn_t smfn) /* Decrement refcounts of all the old entries */ mfn_t sl1mfn = smfn; SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, 0, { - if ( (shadow_l1e_get_flags(*sl1e) & _PAGE_PRESENT) - && !sh_l1e_is_magic(*sl1e) ) { + if ( (shadow_l1e_get_flags(*sl1e) & _PAGE_PRESENT) && + !sh_l1e_is_magic(*sl1e) ) + { shadow_vram_put_l1e(*sl1e, sl1e, sl1mfn, d); shadow_put_page_from_l1e(*sl1e, d); } @@ -2075,7 +2034,7 @@ void sh_unhook_32b_mappings(struct domain *d, mfn_t sl2mfn, int user_only) shadow_l2e_t *sl2e; SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, { if ( !user_only || (sl2e->l2 & _PAGE_USER) ) - (void) shadow_set_l2e(d, sl2e, shadow_l2e_empty(), sl2mfn); + (void)shadow_set_l2e(d, sl2e, shadow_l2e_empty(), sl2mfn); }); } @@ -2087,7 +2046,7 @@ void sh_unhook_pae_mappings(struct domain *d, mfn_t sl2mfn, int user_only) shadow_l2e_t *sl2e; SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, { if ( !user_only || (sl2e->l2 & _PAGE_USER) ) - (void) shadow_set_l2e(d, sl2e, shadow_l2e_empty(), sl2mfn); + (void)shadow_set_l2e(d, sl2e, shadow_l2e_empty(), sl2mfn); }); } @@ -2098,7 +2057,7 @@ void sh_unhook_64b_mappings(struct domain *d, mfn_t sl4mfn, int user_only) shadow_l4e_t *sl4e; SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, d, { if ( !user_only || (sl4e->l4 & _PAGE_USER) ) - (void) shadow_set_l4e(d, sl4e, shadow_l4e_empty(), sl4mfn); + (void)shadow_set_l4e(d, sl4e, shadow_l4e_empty(), sl4mfn); }); } @@ -2138,7 +2097,7 @@ static int validate_gl4e(struct vcpu *v, void *new_ge, mfn_t sl4mfn, void *se) else if ( p2mt != p2m_populate_on_demand ) result |= SHADOW_SET_ERROR; -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) if ( mfn_valid(sl3mfn) ) shadow_resync_all(v); #endif @@ -2148,8 +2107,8 @@ static int validate_gl4e(struct vcpu *v, void *new_ge, mfn_t sl4mfn, void *se) // check for updates to xen reserved slots if ( !shadow_mode_external(d) ) { - int shadow_index = (((unsigned long)sl4p & ~PAGE_MASK) / - sizeof(shadow_l4e_t)); + int shadow_index = + (((unsigned long)sl4p & ~PAGE_MASK) / sizeof(shadow_l4e_t)); int reserved_xen_slot = !is_guest_l4_slot(d, shadow_index); if ( unlikely(reserved_xen_slot) ) @@ -2157,7 +2116,7 @@ static int validate_gl4e(struct vcpu *v, void *new_ge, mfn_t sl4mfn, void *se) // attempt by the guest to write to a xen reserved slot // SHADOW_PRINTK("out-of-range update " - "sl4mfn=%"PRI_mfn" index=%#x val=%" SH_PRI_pte "\n", + "sl4mfn=%" PRI_mfn " index=%#x val=%" SH_PRI_pte "\n", mfn_x(sl4mfn), shadow_index, new_sl4e.l4); if ( shadow_l4e_get_flags(new_sl4e) & _PAGE_PRESENT ) { @@ -2174,7 +2133,6 @@ static int validate_gl4e(struct vcpu *v, void *new_ge, mfn_t sl4mfn, void *se) return result; } - static int validate_gl3e(struct vcpu *v, void *new_ge, mfn_t sl3mfn, void *se) { struct domain *d = v->domain; @@ -2197,7 +2155,7 @@ static int validate_gl3e(struct vcpu *v, void *new_ge, mfn_t sl3mfn, void *se) else if ( p2mt != p2m_populate_on_demand ) result |= SHADOW_SET_ERROR; -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) if ( mfn_valid(sl2mfn) ) shadow_resync_all(v); #endif @@ -2266,7 +2224,7 @@ static int validate_gl1e(struct vcpu *v, void *new_ge, mfn_t sl1mfn, void *se) mfn_t gmfn = INVALID_MFN; p2m_type_t p2mt = p2m_invalid; int result = 0; -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) mfn_t gl1mfn; #endif /* OOS */ @@ -2282,10 +2240,9 @@ static int validate_gl1e(struct vcpu *v, void *new_ge, mfn_t sl1mfn, void *se) l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt); result |= shadow_set_l1e(d, sl1p, new_sl1e, p2mt, sl1mfn); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) gl1mfn = backpointer(mfn_to_page(sl1mfn)); - if ( mfn_valid(gl1mfn) - && mfn_is_out_of_sync(gl1mfn) ) + if ( mfn_valid(gl1mfn) && mfn_is_out_of_sync(gl1mfn) ) { /* Update the OOS snapshot. */ mfn_t snpmfn = oos_snapshot_lookup(d, gl1mfn); @@ -2302,7 +2259,7 @@ static int validate_gl1e(struct vcpu *v, void *new_ge, mfn_t sl1mfn, void *se) return result; } -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /**************************************************************************/ /* Special validation function for re-syncing out-of-sync shadows. * Walks the *shadow* page, and for every entry that it finds, @@ -2326,7 +2283,7 @@ void sh_resync_l1(struct vcpu *v, mfn_t gl1mfn, mfn_t snpmfn) gp = map_domain_page(gl1mfn); gl1p = gp; - SHADOW_FOREACH_L1E(sl1mfn, sl1p, &gl1p, 0, { + SHADOW_FOREACH_L1E(sl1mfn, sl1p, &gl1p, 0, { guest_l1e_t gl1e = *gl1p; guest_l1e_t *snpl1p = (guest_l1e_t *)snp + guest_index(gl1p); @@ -2382,7 +2339,7 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn) smfn = maddr_to_mfn(sp->up); ASSERT(mfn_valid(smfn)); -#if (SHADOW_PAGING_LEVELS == 4) +#if ( SHADOW_PAGING_LEVELS == 4 ) /* up to l3 */ sp = mfn_to_page(smfn); ASSERT(sh_type_has_up_pointer(d, SH_type_l2_shadow)); @@ -2393,20 +2350,20 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn) /* up to l4 */ sp = mfn_to_page(smfn); - if ( sp->u.sh.count != 1 - || !sh_type_has_up_pointer(d, SH_type_l3_64_shadow) || !sp->up ) + if ( sp->u.sh.count != 1 || + !sh_type_has_up_pointer(d, SH_type_l3_64_shadow) || !sp->up ) return 0; smfn = maddr_to_mfn(sp->up); ASSERT(mfn_valid(smfn)); #endif if ( pagetable_get_pfn(v->arch.shadow_table[0]) == mfn_x(smfn) -#if (SHADOW_PAGING_LEVELS == 3) - || pagetable_get_pfn(v->arch.shadow_table[1]) == mfn_x(smfn) - || pagetable_get_pfn(v->arch.shadow_table[2]) == mfn_x(smfn) - || pagetable_get_pfn(v->arch.shadow_table[3]) == mfn_x(smfn) +#if ( SHADOW_PAGING_LEVELS == 3 ) + || pagetable_get_pfn(v->arch.shadow_table[1]) == mfn_x(smfn) || + pagetable_get_pfn(v->arch.shadow_table[2]) == mfn_x(smfn) || + pagetable_get_pfn(v->arch.shadow_table[3]) == mfn_x(smfn) #endif - ) + ) return 0; /* Only in use in one toplevel shadow, and it's not the one we're @@ -2415,18 +2372,14 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn) } #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */ - /**************************************************************************/ /* Functions which translate and install the shadows of arbitrary guest * entries that we have just seen the guest write. */ - -static inline int -sh_map_and_validate(struct vcpu *v, mfn_t gmfn, - void *new_gp, u32 size, u32 sh_type, - u32 (*shadow_index)(mfn_t *smfn, u32 idx), - int (*validate_ge)(struct vcpu *v, void *ge, - mfn_t smfn, void *se)) +static inline int sh_map_and_validate( + struct vcpu *v, mfn_t gmfn, void *new_gp, u32 size, u32 sh_type, + u32 (*shadow_index)(mfn_t *smfn, u32 idx), + int (*validate_ge)(struct vcpu *v, void *ge, mfn_t smfn, void *se)) /* Generic function for mapping and validating. */ { struct domain *d = v->domain; @@ -2436,9 +2389,9 @@ sh_map_and_validate(struct vcpu *v, mfn_t gmfn, int result = 0; /* Align address and size to guest entry boundaries */ - size += (unsigned long)new_gp & (sizeof (guest_l1e_t) - 1); - new_gp = (void *)((unsigned long)new_gp & ~(sizeof (guest_l1e_t) - 1)); - size = (size + sizeof (guest_l1e_t) - 1) & ~(sizeof (guest_l1e_t) - 1); + size += (unsigned long)new_gp & (sizeof(guest_l1e_t) - 1); + new_gp = (void *)((unsigned long)new_gp & ~(sizeof(guest_l1e_t) - 1)); + size = (size + sizeof(guest_l1e_t) - 1) & ~(sizeof(guest_l1e_t) - 1); ASSERT(size + (((unsigned long)new_gp) & ~PAGE_MASK) <= PAGE_SIZE); /* Map the shadow page */ @@ -2462,10 +2415,7 @@ sh_map_and_validate(struct vcpu *v, mfn_t gmfn, unmap_domain_page(sl1p); sl1p = map_domain_page(map_mfn); } - result |= validate_ge(v, - new_gp, - map_mfn, - &sl1p[shadow_idx]); + result |= validate_ge(v, new_gp, map_mfn, &sl1p[shadow_idx]); size -= sizeof(guest_l1e_t); new_gp += sizeof(guest_l1e_t); } @@ -2473,70 +2423,53 @@ sh_map_and_validate(struct vcpu *v, mfn_t gmfn, return result; } - -int -sh_map_and_validate_gl4e(struct vcpu *v, mfn_t gl4mfn, - void *new_gl4p, u32 size) +int sh_map_and_validate_gl4e(struct vcpu *v, mfn_t gl4mfn, void *new_gl4p, + u32 size) { #if GUEST_PAGING_LEVELS >= 4 - return sh_map_and_validate(v, gl4mfn, new_gl4p, size, - SH_type_l4_shadow, - shadow_l4_index, - validate_gl4e); + return sh_map_and_validate(v, gl4mfn, new_gl4p, size, SH_type_l4_shadow, + shadow_l4_index, validate_gl4e); #else // ! GUEST_PAGING_LEVELS >= 4 BUG(); /* Called in wrong paging mode! */ #endif } -int -sh_map_and_validate_gl3e(struct vcpu *v, mfn_t gl3mfn, - void *new_gl3p, u32 size) +int sh_map_and_validate_gl3e(struct vcpu *v, mfn_t gl3mfn, void *new_gl3p, + u32 size) { #if GUEST_PAGING_LEVELS >= 4 - return sh_map_and_validate(v, gl3mfn, new_gl3p, size, - SH_type_l3_shadow, - shadow_l3_index, - validate_gl3e); + return sh_map_and_validate(v, gl3mfn, new_gl3p, size, SH_type_l3_shadow, + shadow_l3_index, validate_gl3e); #else // ! GUEST_PAGING_LEVELS >= 4 BUG(); /* Called in wrong paging mode! */ #endif } -int -sh_map_and_validate_gl2e(struct vcpu *v, mfn_t gl2mfn, - void *new_gl2p, u32 size) +int sh_map_and_validate_gl2e(struct vcpu *v, mfn_t gl2mfn, void *new_gl2p, + u32 size) { - return sh_map_and_validate(v, gl2mfn, new_gl2p, size, - SH_type_l2_shadow, - shadow_l2_index, - validate_gl2e); + return sh_map_and_validate(v, gl2mfn, new_gl2p, size, SH_type_l2_shadow, + shadow_l2_index, validate_gl2e); } -int -sh_map_and_validate_gl2he(struct vcpu *v, mfn_t gl2mfn, - void *new_gl2p, u32 size) +int sh_map_and_validate_gl2he(struct vcpu *v, mfn_t gl2mfn, void *new_gl2p, + u32 size) { #if GUEST_PAGING_LEVELS >= 3 - return sh_map_and_validate(v, gl2mfn, new_gl2p, size, - SH_type_l2h_shadow, - shadow_l2_index, - validate_gl2e); + return sh_map_and_validate(v, gl2mfn, new_gl2p, size, SH_type_l2h_shadow, + shadow_l2_index, validate_gl2e); #else /* Non-PAE guests don't have different kinds of l2 table */ BUG(); /* Called in wrong paging mode! */ #endif } -int -sh_map_and_validate_gl1e(struct vcpu *v, mfn_t gl1mfn, - void *new_gl1p, u32 size) +int sh_map_and_validate_gl1e(struct vcpu *v, mfn_t gl1mfn, void *new_gl1p, + u32 size) { - return sh_map_and_validate(v, gl1mfn, new_gl1p, size, - SH_type_l1_shadow, - shadow_l1_index, - validate_gl1e); + return sh_map_and_validate(v, gl1mfn, new_gl1p, size, SH_type_l1_shadow, + shadow_l1_index, validate_gl1e); } - /**************************************************************************/ /* Optimization: Prefetch multiple L1 entries. This is called after we have * demand-faulted a shadow l1e in the fault handler, to see if it's @@ -2548,8 +2481,8 @@ sh_map_and_validate_gl1e(struct vcpu *v, mfn_t gl1mfn, /* XXX magic number */ #define PREFETCH_DISTANCE 32 -static void sh_prefetch(struct vcpu *v, walk_t *gw, - shadow_l1e_t *ptr_sl1e, mfn_t sl1mfn) +static void sh_prefetch(struct vcpu *v, walk_t *gw, shadow_l1e_t *ptr_sl1e, + mfn_t sl1mfn) { struct domain *d = v->domain; int i, dist; @@ -2559,11 +2492,10 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw, shadow_l1e_t sl1e; u32 gflags; p2m_type_t p2mt; -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) guest_l1e_t *snpl1p = NULL; #endif /* OOS */ - /* Prefetch no further than the end of the _shadow_ l1 MFN */ dist = (PAGE_SIZE - ((unsigned long)ptr_sl1e & ~PAGE_MASK)) / sizeof sl1e; /* And no more than a maximum fetches-per-fault */ @@ -2576,7 +2508,7 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw, gl1p = map_domain_page(gw->l1mfn); gl1p += guest_l1_table_offset(gw->va); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) if ( mfn_is_out_of_sync(gw->l1mfn) ) { mfn_t snpmfn = oos_snapshot_lookup(d, gw->l1mfn); @@ -2588,7 +2520,7 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw, #endif /* OOS */ } - for ( i = 1; i < dist ; i++ ) + for ( i = 1; i < dist; i++ ) { /* No point in prefetching if there's already a shadow */ if ( ptr_sl1e[i].l1 != 0 ) @@ -2601,9 +2533,9 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw, /* Not worth continuing if we hit an entry that will need another * fault for A/D-bit propagation anyway */ gflags = guest_l1e_get_flags(gl1e); - if ( (gflags & _PAGE_PRESENT) - && (!(gflags & _PAGE_ACCESSED) - || ((gflags & _PAGE_RW) && !(gflags & _PAGE_DIRTY))) ) + if ( (gflags & _PAGE_PRESENT) && + (!(gflags & _PAGE_ACCESSED) || + ((gflags & _PAGE_RW) && !(gflags & _PAGE_DIRTY))) ) break; } else @@ -2611,9 +2543,9 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw, /* Fragmented superpage, unless we've been called wrongly */ ASSERT(guest_l2e_get_flags(gw->l2e) & _PAGE_PSE); /* Increment the l1e's GFN by the right number of guest pages */ - gl1e = guest_l1e_from_gfn( - _gfn(gfn_x(guest_l1e_get_gfn(gw->l1e)) + i), - guest_l1e_get_flags(gw->l1e)); + gl1e = + guest_l1e_from_gfn(_gfn(gfn_x(guest_l1e_get_gfn(gw->l1e)) + i), + guest_l1e_get_flags(gw->l1e)); } /* Look at the gfn that the l1e is pointing at */ @@ -2631,16 +2563,16 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw, /* Propagate the entry. */ l1e_propagate_from_guest(v, gl1e, gmfn, &sl1e, ft_prefetch, p2mt); - (void) shadow_set_l1e(d, ptr_sl1e + i, sl1e, p2mt, sl1mfn); + (void)shadow_set_l1e(d, ptr_sl1e + i, sl1e, p2mt, sl1mfn); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) if ( snpl1p != NULL ) snpl1p[i] = gl1e; #endif /* OOS */ } if ( gl1p != NULL ) unmap_domain_page(gl1p); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) if ( snpl1p != NULL ) unmap_domain_page(snpl1p); #endif /* OOS */ @@ -2663,17 +2595,17 @@ static inline void trace_shadow_gen(u32 event, guest_va_t va) { if ( tb_init_done ) { - event |= (GUEST_PAGING_LEVELS-2)<<8; - __trace_var(event, 0/*!tsc*/, sizeof(va), &va); + event |= (GUEST_PAGING_LEVELS - 2) << 8; + __trace_var(event, 0 /*!tsc*/, sizeof(va), &va); } } -static inline void trace_shadow_fixup(guest_l1e_t gl1e, - guest_va_t va) +static inline void trace_shadow_fixup(guest_l1e_t gl1e, guest_va_t va) { if ( tb_init_done ) { - struct __packed { + struct __packed + { /* for PAE, guest_l1e may be 64 while guest_va may be 32; so put it first for alignment sake. */ guest_l1e_t gl1e; @@ -2682,22 +2614,22 @@ static inline void trace_shadow_fixup(guest_l1e_t gl1e, } d; u32 event; - event = TRC_SHADOW_FIXUP | ((GUEST_PAGING_LEVELS-2)<<8); + event = TRC_SHADOW_FIXUP | ((GUEST_PAGING_LEVELS - 2) << 8); d.gl1e = gl1e; d.va = va; d.flags = this_cpu(trace_shadow_path_flags); - __trace_var(event, 0/*!tsc*/, sizeof(d), &d); + __trace_var(event, 0 /*!tsc*/, sizeof(d), &d); } } -static inline void trace_not_shadow_fault(guest_l1e_t gl1e, - guest_va_t va) +static inline void trace_not_shadow_fault(guest_l1e_t gl1e, guest_va_t va) { if ( tb_init_done ) { - struct __packed { + struct __packed + { /* for PAE, guest_l1e may be 64 while guest_va may be 32; so put it first for alignment sake. */ guest_l1e_t gl1e; @@ -2706,23 +2638,23 @@ static inline void trace_not_shadow_fault(guest_l1e_t gl1e, } d; u32 event; - event = TRC_SHADOW_NOT_SHADOW | ((GUEST_PAGING_LEVELS-2)<<8); + event = TRC_SHADOW_NOT_SHADOW | ((GUEST_PAGING_LEVELS - 2) << 8); d.gl1e = gl1e; d.va = va; d.flags = this_cpu(trace_shadow_path_flags); - __trace_var(event, 0/*!tsc*/, sizeof(d), &d); + __trace_var(event, 0 /*!tsc*/, sizeof(d), &d); } } -static inline void trace_shadow_emulate_other(u32 event, - guest_va_t va, - gfn_t gfn) +static inline void trace_shadow_emulate_other(u32 event, guest_va_t va, + gfn_t gfn) { if ( tb_init_done ) { - struct __packed { + struct __packed + { /* for PAE, guest_l1e may be 64 while guest_va may be 32; so put it first for alignment sake. */ #if GUEST_PAGING_LEVELS == 2 @@ -2733,20 +2665,20 @@ static inline void trace_shadow_emulate_other(u32 event, guest_va_t va; } d; - event |= ((GUEST_PAGING_LEVELS-2)<<8); + event |= ((GUEST_PAGING_LEVELS - 2) << 8); - d.gfn=gfn_x(gfn); + d.gfn = gfn_x(gfn); d.va = va; - __trace_var(event, 0/*!tsc*/, sizeof(d), &d); + __trace_var(event, 0 /*!tsc*/, sizeof(d), &d); } } #if GUEST_PAGING_LEVELS == 3 -static DEFINE_PER_CPU(guest_va_t,trace_emulate_initial_va); -static DEFINE_PER_CPU(int,trace_extra_emulation_count); +static DEFINE_PER_CPU(guest_va_t, trace_emulate_initial_va); +static DEFINE_PER_CPU(int, trace_extra_emulation_count); #endif -static DEFINE_PER_CPU(guest_pa_t,trace_emulate_write_val); +static DEFINE_PER_CPU(guest_pa_t, trace_emulate_write_val); static void trace_emulate_write_val(const void *ptr, unsigned long vaddr, const void *src, unsigned int bytes) @@ -2771,16 +2703,17 @@ static inline void trace_shadow_emulate(guest_l1e_t gl1e, unsigned long va) { if ( tb_init_done ) { - struct __packed { + struct __packed + { /* for PAE, guest_l1e may be 64 while guest_va may be 32; so put it first for alignment sake. */ guest_l1e_t gl1e, write_val; guest_va_t va; - unsigned flags:29, emulation_count:3; + unsigned flags : 29, emulation_count : 3; } d; u32 event; - event = TRC_SHADOW_EMULATE | ((GUEST_PAGING_LEVELS-2)<<8); + event = TRC_SHADOW_EMULATE | ((GUEST_PAGING_LEVELS - 2) << 8); d.gl1e = gl1e; d.write_val.l1 = this_cpu(trace_emulate_write_val); @@ -2790,7 +2723,7 @@ static inline void trace_shadow_emulate(guest_l1e_t gl1e, unsigned long va) #endif d.flags = this_cpu(trace_shadow_path_flags); - __trace_var(event, 0/*!tsc*/, sizeof(d), &d); + __trace_var(event, 0 /*!tsc*/, sizeof(d), &d); } } @@ -2802,9 +2735,8 @@ static inline void trace_shadow_emulate(guest_l1e_t gl1e, unsigned long va) * shadow code (and the guest should retry) or 0 if it is not (and the * fault should be handled elsewhere or passed to the guest). */ -static int sh_page_fault(struct vcpu *v, - unsigned long va, - struct cpu_user_regs *regs) +static int sh_page_fault(struct vcpu *v, unsigned long va, + struct cpu_user_regs *regs) { struct domain *d = v->domain; walk_t gw; @@ -2820,20 +2752,19 @@ static int sh_page_fault(struct vcpu *v, bool walk_ok; int version; unsigned int cpl; - const struct npfec access = { - .read_access = 1, - .write_access = !!(regs->error_code & PFEC_write_access), - .gla_valid = 1, - .kind = npfec_kind_with_gla - }; + const struct npfec access = {.read_access = 1, + .write_access = + !!(regs->error_code & PFEC_write_access), + .gla_valid = 1, + .kind = npfec_kind_with_gla}; const fetch_type_t ft = access.write_access ? ft_demand_write : ft_demand_read; #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION int fast_emul = 0; #endif - SHADOW_PRINTK("%pv va=%#lx err=%#x, rip=%lx\n", - v, va, regs->error_code, regs->rip); + SHADOW_PRINTK("%pv va=%#lx err=%#x, rip=%lx\n", v, va, regs->error_code, + regs->rip); perfc_incr(shadow_fault); @@ -2842,8 +2773,8 @@ static int sh_page_fault(struct vcpu *v, * it's highly likely to reach same emulation action for this frame. * Then try to emulate early to avoid lock aquisition. */ - if ( v->arch.paging.last_write_emul_ok - && v->arch.paging.shadow.last_emulated_frame == (va >> PAGE_SHIFT) ) + if ( v->arch.paging.last_write_emul_ok && + v->arch.paging.shadow.last_emulated_frame == (va >> PAGE_SHIFT) ) { /* check whether error code is 3, or else fall back to normal path * in case of some validation is required @@ -2853,7 +2784,7 @@ static int sh_page_fault(struct vcpu *v, fast_emul = 1; gmfn = _mfn(v->arch.paging.shadow.last_emulated_mfn); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Fall back to the slow path if we're trying to emulate writes to an out of sync page. */ if ( mfn_valid(gmfn) && mfn_is_out_of_sync(gmfn) ) @@ -2878,48 +2809,47 @@ static int sh_page_fault(struct vcpu *v, // bunch of 4K maps. // -#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH ) if ( (regs->error_code & PFEC_reserved_bit) ) { -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* First, need to check that this isn't an out-of-sync * shadow l1e. If it is, we fall back to the slow path, which * will sync it up again. */ { shadow_l2e_t sl2e; mfn_t gl1mfn; - if ( (__copy_from_user(&sl2e, - (sh_linear_l2_table(v) - + shadow_l2_linear_offset(va)), - sizeof(sl2e)) != 0) - || !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT) - || !mfn_valid(gl1mfn = backpointer(mfn_to_page( - shadow_l2e_get_mfn(sl2e)))) - || unlikely(mfn_is_out_of_sync(gl1mfn)) ) + if ( (__copy_from_user( + &sl2e, + (sh_linear_l2_table(v) + shadow_l2_linear_offset(va)), + sizeof(sl2e)) != 0) || + !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT) || + !mfn_valid(gl1mfn = backpointer( + mfn_to_page(shadow_l2e_get_mfn(sl2e)))) || + unlikely(mfn_is_out_of_sync(gl1mfn)) ) { /* Hit the slow path as if there had been no * shadow entry at all, and let it tidy up */ ASSERT(regs->error_code & PFEC_page_present); - regs->error_code ^= (PFEC_reserved_bit|PFEC_page_present); + regs->error_code ^= (PFEC_reserved_bit | PFEC_page_present); goto page_fault_slow_path; } } #endif /* SHOPT_OUT_OF_SYNC */ /* The only reasons for reserved bits to be set in shadow entries * are the two "magic" shadow_l1e entries. */ - if ( likely((__copy_from_user(&sl1e, - (sh_linear_l1_table(v) - + shadow_l1_linear_offset(va)), - sizeof(sl1e)) == 0) - && sh_l1e_is_magic(sl1e)) ) + if ( likely((__copy_from_user( + &sl1e, + (sh_linear_l1_table(v) + shadow_l1_linear_offset(va)), + sizeof(sl1e)) == 0) && + sh_l1e_is_magic(sl1e)) ) { - if ( sh_l1e_is_gnp(sl1e) ) { /* Not-present in a guest PT: pass to the guest as * a not-present fault (by flipping two bits). */ ASSERT(regs->error_code & PFEC_page_present); - regs->error_code ^= (PFEC_reserved_bit|PFEC_page_present); + regs->error_code ^= (PFEC_reserved_bit | PFEC_page_present); sh_reset_early_unshadow(v); perfc_incr(shadow_fault_fast_gnp); SHADOW_PRINTK("fast path not-present\n"); @@ -2932,11 +2862,12 @@ static int sh_page_fault(struct vcpu *v, ASSERT(is_hvm_vcpu(v)); gpa = gfn_to_gaddr(sh_l1e_mmio_get_gfn(sl1e)) | (va & ~PAGE_MASK); perfc_incr(shadow_fault_fast_mmio); - SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa); + SHADOW_PRINTK("fast path mmio %#" PRIpaddr "\n", gpa); sh_reset_early_unshadow(v); trace_shadow_gen(TRC_SHADOW_FAST_MMIO, va); return handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access) - ? EXCRET_fault_fixed : 0; + ? EXCRET_fault_fixed + : 0; #else /* When HVM is not enabled, there shouldn't be MMIO marker */ BUG(); @@ -2954,8 +2885,8 @@ static int sh_page_fault(struct vcpu *v, } } -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) - page_fault_slow_path: +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) +page_fault_slow_path: #endif #endif /* SHOPT_FAST_FAULT_PATH */ @@ -2973,7 +2904,7 @@ static int sh_page_fault(struct vcpu *v, cpl = is_pv_vcpu(v) ? (regs->ss & 3) : hvm_get_cpl(v); - rewalk: +rewalk: error_code = regs->error_code; @@ -3029,7 +2960,7 @@ static int sh_page_fault(struct vcpu *v, * If this corner case comes about accidentally, then a security-relevant * bug has been tickled. */ - if ( !(error_code & (PFEC_insn_fetch|PFEC_user_mode)) && cpl == 3 ) + if ( !(error_code & (PFEC_insn_fetch | PFEC_user_mode)) && cpl == 3 ) error_code |= PFEC_implicit; /* The walk is done in a lock-free style, with some sanity check @@ -3040,7 +2971,7 @@ static int sh_page_fault(struct vcpu *v, smp_rmb(); walk_ok = sh_walk_guest_tables(v, va, &gw, error_code); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) regs->error_code &= ~PFEC_page_present; if ( gw.pfec & PFEC_page_present ) regs->error_code |= PFEC_page_present; @@ -3074,14 +3005,14 @@ static int sh_page_fault(struct vcpu *v, (!p2m_is_mmio(p2mt) && !mfn_valid(gmfn))) ) { perfc_incr(shadow_fault_bail_bad_gfn); - SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n", - gfn_x(gfn), mfn_x(gmfn)); + SHADOW_PRINTK("BAD gfn=%" SH_PRI_gfn " gmfn=%" PRI_mfn "\n", gfn_x(gfn), + mfn_x(gmfn)); sh_reset_early_unshadow(v); put_gfn(d, gfn_x(gfn)); goto propagate; } -#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB ) /* Remember this successful VA->GFN translation for later. */ vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn), regs->error_code | PFEC_page_present); @@ -3100,8 +3031,7 @@ static int sh_page_fault(struct vcpu *v, * Preallocate shadow pages *before* removing writable accesses * otherwhise an OOS L1 might be demoted and promoted again with * writable mappings. */ - shadow_prealloc(d, - SH_type_l1_shadow, + shadow_prealloc(d, SH_type_l1_shadow, GUEST_PAGING_LEVELS < 4 ? 1 : GUEST_PAGING_LEVELS - 1); rc = gw_remove_write_accesses(v, va, &gw); @@ -3118,7 +3048,7 @@ static int sh_page_fault(struct vcpu *v, flush_tlb_mask(d->dirty_cpumask); } -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Second bit set: Resynced a page. Re-walk needed. */ if ( rc & GW_RMWR_REWALK ) { @@ -3164,10 +3094,9 @@ static int sh_page_fault(struct vcpu *v, return 0; } -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Always unsync when writing to L1 page tables. */ - if ( sh_mfn_is_a_page_table(gmfn) - && ft == ft_demand_write ) + if ( sh_mfn_is_a_page_table(gmfn) && ft == ft_demand_write ) sh_unsync(v, gmfn); if ( unlikely(d->is_shutting_down && d->shutdown_code == SHUTDOWN_crash) ) @@ -3183,7 +3112,7 @@ static int sh_page_fault(struct vcpu *v, /* Final check: if someone has synced a page, it's possible that * our l1e is stale. Compare the entries, and rewalk if necessary. */ - if ( shadow_check_gl1e(v, &gw) ) + if ( shadow_check_gl1e(v, &gw) ) { perfc_incr(shadow_inconsistent_gwalk); paging_unlock(d); @@ -3196,9 +3125,8 @@ static int sh_page_fault(struct vcpu *v, l1e_propagate_from_guest(v, gw.l1e, gmfn, &sl1e, ft, p2mt); r = shadow_set_l1e(d, ptr_sl1e, sl1e, p2mt, sl1mfn); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) - if ( mfn_valid(gw.l1mfn) - && mfn_is_out_of_sync(gw.l1mfn) ) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) + if ( mfn_valid(gw.l1mfn) && mfn_is_out_of_sync(gw.l1mfn) ) { /* Update the OOS snapshot. */ mfn_t snpmfn = oos_snapshot_lookup(d, gw.l1mfn); @@ -3219,11 +3147,10 @@ static int sh_page_fault(struct vcpu *v, /* Need to emulate accesses to page tables */ if ( sh_mfn_is_a_page_table(gmfn) -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Unless they've been allowed to go out of sync with their shadows and we don't need to unshadow it. */ - && !(mfn_is_out_of_sync(gmfn) - && !(regs->error_code & PFEC_user_mode)) + && !(mfn_is_out_of_sync(gmfn) && !(regs->error_code & PFEC_user_mode)) #endif && (ft == ft_demand_write) ) { @@ -3243,7 +3170,8 @@ static int sh_page_fault(struct vcpu *v, { static unsigned long lastpage; if ( xchg(&lastpage, va & PAGE_MASK) != (va & PAGE_MASK) ) - gdprintk(XENLOG_DEBUG, "guest attempted write to read-only memory" + gdprintk(XENLOG_DEBUG, + "guest attempted write to read-only memory" " page. va page=%#lx, mfn=%#lx\n", va & PAGE_MASK, mfn_x(gmfn)); goto emulate_readonly; /* skip over the instruction */ @@ -3254,10 +3182,9 @@ static int sh_page_fault(struct vcpu *v, * CR0.WP is clear, we must emulate faulting supervisor writes to * allow the guest to write through read-only PTEs. Emulate if the * fault was a non-user write to a present page. */ - if ( is_hvm_domain(d) - && unlikely(!hvm_wp_enabled(v)) - && regs->error_code == (PFEC_write_access|PFEC_page_present) - && mfn_valid(gmfn) ) + if ( is_hvm_domain(d) && unlikely(!hvm_wp_enabled(v)) && + regs->error_code == (PFEC_write_access | PFEC_page_present) && + mfn_valid(gmfn) ) { perfc_incr(shadow_fault_emulate_wp); goto emulate; @@ -3268,7 +3195,7 @@ static int sh_page_fault(struct vcpu *v, sh_reset_early_unshadow(v); trace_shadow_fixup(gw.l1e, va); - done: +done: sh_audit_gw(v, &gw); SHADOW_PRINTK("fixed\n"); shadow_audit_tables(v); @@ -3276,7 +3203,7 @@ static int sh_page_fault(struct vcpu *v, put_gfn(d, gfn_x(gfn)); return EXCRET_fault_fixed; - emulate: +emulate: if ( !shadow_mode_refcounts(d) || !guest_mode(regs) ) goto not_a_shadow_fault; @@ -3291,8 +3218,7 @@ static int sh_page_fault(struct vcpu *v, mfn_x(gmfn)); perfc_incr(shadow_fault_emulate_failed); sh_remove_shadows(d, gmfn, 0 /* thorough */, 1 /* must succeed */); - trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_USER, - va, gfn); + trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_USER, va, gfn); goto done; } @@ -3300,7 +3226,7 @@ static int sh_page_fault(struct vcpu *v, * Write from userspace to ro-mem needs to jump here to avoid getting * caught by user-mode page-table check above. */ - emulate_readonly: +emulate_readonly: /* Unshadow if we are writing to a toplevel pagetable that is * flagged as a dying process, and that is not currently used. */ @@ -3309,7 +3235,7 @@ static int sh_page_fault(struct vcpu *v, { int used = 0; struct vcpu *tmp; - for_each_vcpu(d, tmp) + for_each_vcpu (d, tmp) { #if GUEST_PAGING_LEVELS == 3 int i; @@ -3348,7 +3274,7 @@ static int sh_page_fault(struct vcpu *v, this_cpu(trace_emulate_write_val) = 0; #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION - early_emulation: +early_emulation: #endif if ( is_hvm_domain(d) ) { @@ -3368,12 +3294,13 @@ static int sh_page_fault(struct vcpu *v, v->arch.paging.last_write_emul_ok = 0; } #endif - gdprintk(XENLOG_DEBUG, "write to pagetable during event " + gdprintk(XENLOG_DEBUG, + "write to pagetable during event " "injection: cr2=%#lx, mfn=%#lx\n", va, mfn_x(gmfn)); sh_remove_shadows(d, gmfn, 0 /* thorough */, 1 /* must succeed */); - trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ, - va, gfn); + trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ, va, + gfn); return EXCRET_fault_fixed; } } @@ -3426,15 +3353,15 @@ static int sh_page_fault(struct vcpu *v, v->arch.paging.last_write_emul_ok = 0; } #endif - SHADOW_PRINTK("emulator failure (rc=%d), unshadowing mfn %#lx\n", - r, mfn_x(gmfn)); + SHADOW_PRINTK("emulator failure (rc=%d), unshadowing mfn %#lx\n", r, + mfn_x(gmfn)); /* If this is actually a page table, then we have a bug, and need * to support more operations in the emulator. More likely, * though, this is a hint that this page should not be shadowed. */ shadow_remove_all_shadows(d, gmfn); - trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED, - va, gfn); + trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED, va, + gfn); goto emulate_done; } @@ -3470,10 +3397,10 @@ static int sh_page_fault(struct vcpu *v, */ if ( r == X86EMUL_OKAY && !emul_ctxt.ctxt.retire.raw ) { - int i, emulation_count=0; + int i, emulation_count = 0; this_cpu(trace_emulate_initial_va) = va; - for ( i = 0 ; i < 4 ; i++ ) + for ( i = 0; i < 4; i++ ) { shadow_continue_emulation(&emul_ctxt, regs); v->arch.paging.last_write_was_pt = 0; @@ -3507,35 +3434,36 @@ static int sh_page_fault(struct vcpu *v, break; /* Don't emulate again if we failed! */ } } - this_cpu(trace_extra_emulation_count)=emulation_count; + this_cpu(trace_extra_emulation_count) = emulation_count; } #endif /* PAE guest */ trace_shadow_emulate(gw.l1e, va); - emulate_done: +emulate_done: SHADOW_PRINTK("emulated\n"); return EXCRET_fault_fixed; - mmio: +mmio: if ( !guest_mode(regs) ) goto not_a_shadow_fault; #ifdef CONFIG_HVM ASSERT(is_hvm_vcpu(v)); perfc_incr(shadow_fault_mmio); sh_audit_gw(v, &gw); - SHADOW_PRINTK("mmio %#"PRIpaddr"\n", gpa); + SHADOW_PRINTK("mmio %#" PRIpaddr "\n", gpa); shadow_audit_tables(v); sh_reset_early_unshadow(v); paging_unlock(d); put_gfn(d, gfn_x(gfn)); trace_shadow_gen(TRC_SHADOW_MMIO, va); return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access) - ? EXCRET_fault_fixed : 0); + ? EXCRET_fault_fixed + : 0); #else BUG(); #endif - not_a_shadow_fault: +not_a_shadow_fault: sh_audit_gw(v, &gw); SHADOW_PRINTK("not a shadow fault\n"); shadow_audit_tables(v); @@ -3549,7 +3477,6 @@ propagate: return 0; } - /* * Called when the guest requests an invlpg. Returns true if the invlpg * instruction should be issued on the hardware, or false if it's safe not @@ -3562,7 +3489,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long linear) perfc_incr(shadow_invlpg); -#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB ) /* No longer safe to use cached gva->gfn translations */ vtlb_flush(v); #endif @@ -3578,15 +3505,16 @@ static bool sh_invlpg(struct vcpu *v, unsigned long linear) { shadow_l3e_t sl3e; if ( !(shadow_l4e_get_flags( - sh_linear_l4_table(v)[shadow_l4_linear_offset(linear)]) - & _PAGE_PRESENT) ) + sh_linear_l4_table(v)[shadow_l4_linear_offset(linear)]) & + _PAGE_PRESENT) ) return false; /* This must still be a copy-from-user because we don't have the * paging lock, and the higher-level shadows might disappear * under our feet. */ - if ( __copy_from_user(&sl3e, (sh_linear_l3_table(v) - + shadow_l3_linear_offset(linear)), - sizeof (sl3e)) != 0 ) + if ( __copy_from_user( + &sl3e, + (sh_linear_l3_table(v) + shadow_l3_linear_offset(linear)), + sizeof(sl3e)) != 0 ) { perfc_incr(shadow_invlpg_fault); return false; @@ -3595,17 +3523,18 @@ static bool sh_invlpg(struct vcpu *v, unsigned long linear) return false; } #else /* SHADOW_PAGING_LEVELS == 3 */ - if ( !(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(linear)]) - & _PAGE_PRESENT) ) + if ( !(l3e_get_flags( + v->arch.paging.shadow.l3table[shadow_l3_linear_offset(linear)]) & + _PAGE_PRESENT) ) // no need to flush anything if there's no SL2... return false; #endif /* This must still be a copy-from-user because we don't have the shadow * lock, and the higher-level shadows might disappear under our feet. */ - if ( __copy_from_user(&sl2e, - sh_linear_l2_table(v) + shadow_l2_linear_offset(linear), - sizeof (sl2e)) != 0 ) + if ( __copy_from_user( + &sl2e, sh_linear_l2_table(v) + shadow_l2_linear_offset(linear), + sizeof(sl2e)) != 0 ) { perfc_incr(shadow_invlpg_fault); return false; @@ -3622,21 +3551,19 @@ static bool sh_invlpg(struct vcpu *v, unsigned long linear) // easier than invalidating all of the individual 4K pages). // sl1mfn = shadow_l2e_get_mfn(sl2e); - if ( mfn_to_page(sl1mfn)->u.sh.type - == SH_type_fl1_shadow ) + if ( mfn_to_page(sl1mfn)->u.sh.type == SH_type_fl1_shadow ) { flush_tlb_local(); return false; } -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Check to see if the SL1 is out of sync. */ { struct domain *d = v->domain; mfn_t gl1mfn = backpointer(mfn_to_page(sl1mfn)); struct page_info *pg = mfn_to_page(gl1mfn); - if ( mfn_valid(gl1mfn) - && page_is_out_of_sync(pg) ) + if ( mfn_valid(gl1mfn) && page_is_out_of_sync(pg) ) { /* The test above may give false positives, since we don't * hold the paging lock yet. Check again with the lock held. */ @@ -3647,9 +3574,9 @@ static bool sh_invlpg(struct vcpu *v, unsigned long linear) * higher-level shadows might have disappeared under our * feet. */ if ( __copy_from_user(&sl2e, - sh_linear_l2_table(v) - + shadow_l2_linear_offset(linear), - sizeof (sl2e)) != 0 ) + sh_linear_l2_table(v) + + shadow_l2_linear_offset(linear), + sizeof(sl2e)) != 0 ) { perfc_incr(shadow_invlpg_fault); paging_unlock(d); @@ -3666,14 +3593,14 @@ static bool sh_invlpg(struct vcpu *v, unsigned long linear) gl1mfn = backpointer(mfn_to_page(sl1mfn)); pg = mfn_to_page(gl1mfn); - if ( likely(sh_mfn_is_a_page_table(gl1mfn) - && page_is_out_of_sync(pg) ) ) + if ( likely(sh_mfn_is_a_page_table(gl1mfn) && + page_is_out_of_sync(pg)) ) { shadow_l1e_t *sl1; sl1 = sh_linear_l1_table(v) + shadow_l1_linear_offset(linear); /* Remove the shadow entry that maps this VA */ - (void) shadow_set_l1e(d, sl1, shadow_l1e_empty(), - p2m_invalid, sl1mfn); + (void)shadow_set_l1e(d, sl1, shadow_l1e_empty(), p2m_invalid, + sl1mfn); } paging_unlock(d); /* Need the invlpg, to pick up the disappeareance of the sl1e */ @@ -3685,10 +3612,8 @@ static bool sh_invlpg(struct vcpu *v, unsigned long linear) return true; } - -static unsigned long -sh_gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m, - unsigned long va, uint32_t *pfec) +static unsigned long sh_gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m, + unsigned long va, uint32_t *pfec) /* Called to translate a guest virtual address to what the *guest* * pagetables would map it to. */ { @@ -3696,7 +3621,7 @@ sh_gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m, gfn_t gfn; bool walk_ok; -#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB ) /* Check the vTLB cache first */ unsigned long vtlb_gfn = vtlb_lookup(v, va, *pfec); if ( vtlb_gfn != gfn_x(INVALID_GFN) ) @@ -3710,7 +3635,7 @@ sh_gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m, } gfn = guest_walk_to_gfn(&gw); -#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB ) /* Remember this successful VA->GFN translation for later. */ vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn), *pfec); #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */ @@ -3718,9 +3643,7 @@ sh_gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m, return gfn_x(gfn); } - -static inline void -sh_update_linear_entries(struct vcpu *v) +static inline void sh_update_linear_entries(struct vcpu *v) /* Sync up all the linear mappings for this vcpu's pagetables */ { struct domain *d = v->domain; @@ -3756,8 +3679,8 @@ sh_update_linear_entries(struct vcpu *v) */ /* Don't try to update the monitor table if it doesn't exist */ - if ( shadow_mode_external(d) - && pagetable_get_pfn(v->arch.monitor_table) == 0 ) + if ( shadow_mode_external(d) && + pagetable_get_pfn(v->arch.monitor_table) == 0 ) return; #if SHADOW_PAGING_LEVELS == 4 @@ -3807,8 +3730,8 @@ sh_update_linear_entries(struct vcpu *v) /* Use linear mappings if we can; otherwise make new mappings */ if ( v == current ) - ml2e = __linear_l2_table - + l2_linear_offset(SH_LINEAR_PT_VIRT_START); + ml2e = + __linear_l2_table + l2_linear_offset(SH_LINEAR_PT_VIRT_START); else { mfn_t l3mfn, l2mfn; @@ -3833,11 +3756,10 @@ sh_update_linear_entries(struct vcpu *v) for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ ) { - ml2e[i] = - (shadow_l3e_get_flags(sl3e[i]) & _PAGE_PRESENT) - ? l2e_from_mfn(shadow_l3e_get_mfn(sl3e[i]), - __PAGE_HYPERVISOR_RW) - : l2e_empty(); + ml2e[i] = (shadow_l3e_get_flags(sl3e[i]) & _PAGE_PRESENT) + ? l2e_from_mfn(shadow_l3e_get_mfn(sl3e[i]), + __PAGE_HYPERVISOR_RW) + : l2e_empty(); } if ( v != current ) @@ -3864,13 +3786,11 @@ sh_update_linear_entries(struct vcpu *v) } } - /* * Removes vcpu->arch.shadow_table[]. * Does all appropriate management/bookkeeping/refcounting/etc... */ -static void -sh_detach_old_tables(struct vcpu *v) +static void sh_detach_old_tables(struct vcpu *v) { struct domain *d = v->domain; mfn_t smfn; @@ -3882,7 +3802,7 @@ sh_detach_old_tables(struct vcpu *v) #if GUEST_PAGING_LEVELS == 3 /* PAE guests have four shadow_table entries */ - for ( i = 0 ; i < 4 ; i++ ) + for ( i = 0; i < 4; i++ ) #endif { smfn = pagetable_get_mfn(v->arch.shadow_table[i]); @@ -3893,11 +3813,8 @@ sh_detach_old_tables(struct vcpu *v) } /* Set up the top-level shadow and install it in slot 'slot' of shadow_table */ -static void -sh_set_toplevel_shadow(struct vcpu *v, - int slot, - mfn_t gmfn, - unsigned int root_type) +static void sh_set_toplevel_shadow(struct vcpu *v, int slot, mfn_t gmfn, + unsigned int root_type) { mfn_t smfn; pagetable_t old_entry, new_entry; @@ -3936,37 +3853,36 @@ sh_set_toplevel_shadow(struct vcpu *v, } else { - printk(XENLOG_G_ERR "can't install %"PRI_mfn" as toplevel shadow\n", + printk(XENLOG_G_ERR "can't install %" PRI_mfn " as toplevel shadow\n", mfn_x(smfn)); domain_crash(d); new_entry = pagetable_null(); } - install_new_entry: +install_new_entry: /* Done. Install it */ - SHADOW_PRINTK("%u/%u [%u] gmfn %#"PRI_mfn" smfn %#"PRI_mfn"\n", - GUEST_PAGING_LEVELS, SHADOW_PAGING_LEVELS, slot, - mfn_x(gmfn), mfn_x(pagetable_get_mfn(new_entry))); + SHADOW_PRINTK("%u/%u [%u] gmfn %#" PRI_mfn " smfn %#" PRI_mfn "\n", + GUEST_PAGING_LEVELS, SHADOW_PAGING_LEVELS, slot, mfn_x(gmfn), + mfn_x(pagetable_get_mfn(new_entry))); v->arch.shadow_table[slot] = new_entry; /* Decrement the refcount of the old contents of this slot */ - if ( !pagetable_is_null(old_entry) ) { + if ( !pagetable_is_null(old_entry) ) + { mfn_t old_smfn = pagetable_get_mfn(old_entry); /* Need to repin the old toplevel shadow if it's been unpinned * by shadow_prealloc(): in PV mode we're still running on this * shadow and it's not safe to free it yet. */ if ( !mfn_to_page(old_smfn)->u.sh.pinned && !sh_pin(d, old_smfn) ) { - printk(XENLOG_G_ERR "can't re-pin %"PRI_mfn"\n", mfn_x(old_smfn)); + printk(XENLOG_G_ERR "can't re-pin %" PRI_mfn "\n", mfn_x(old_smfn)); domain_crash(d); } sh_put_ref(d, old_smfn, 0); } } - -static void -sh_update_cr3(struct vcpu *v, int do_locking, bool noflush) +static void sh_update_cr3(struct vcpu *v, int do_locking, bool noflush) /* Updates vcpu->arch.cr3 after the guest has changed CR3. * Paravirtual guests should set v->arch.guest_table (and guest_table_user, * if appropriate). @@ -3992,9 +3908,10 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool noflush) return; } - if ( do_locking ) paging_lock(v->domain); + if ( do_locking ) + paging_lock(v->domain); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Need to resync all the shadow entries on a TLB flush. Resync * current vcpus OOS pages before switching to the new shadow * tables so that the VA hint is still valid. */ @@ -4016,13 +3933,13 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool noflush) if ( hvm_paging_enabled(v) ) ASSERT(pagetable_get_pfn(v->arch.guest_table)); else - ASSERT(v->arch.guest_table.pfn - == d->arch.paging.shadow.unpaged_pagetable.pfn); + ASSERT(v->arch.guest_table.pfn == + d->arch.paging.shadow.unpaged_pagetable.pfn); } #endif - SHADOW_PRINTK("%pv guest_table=%"PRI_mfn"\n", - v, (unsigned long)pagetable_get_pfn(v->arch.guest_table)); + SHADOW_PRINTK("%pv guest_table=%" PRI_mfn "\n", v, + (unsigned long)pagetable_get_pfn(v->arch.guest_table)); #if GUEST_PAGING_LEVELS == 4 if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32bit_domain(d) ) @@ -4046,12 +3963,11 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool noflush) guest_idx = guest_index((void *)v->arch.hvm.guest_cr[3]) & ~3; gl3e = ((guest_l3e_t *)map_domain_page(gmfn)) + guest_idx; - for ( i = 0; i < 4 ; i++ ) + for ( i = 0; i < 4; i++ ) v->arch.paging.shadow.gl3e[i] = gl3e[i]; unmap_domain_page(gl3e); #endif - //// //// vcpu->arch.shadow_table[] //// @@ -4094,9 +4010,9 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool noflush) gl2gfn = guest_l3e_get_gfn(gl3e[i]); gl2mfn = get_gfn_query_unlocked(d, gfn_x(gl2gfn), &p2mt); if ( p2m_is_ram(p2mt) ) - sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3) - ? SH_type_l2h_shadow - : SH_type_l2_shadow); + sh_set_toplevel_shadow(v, i, gl2mfn, + (i == 3) ? SH_type_l2h_shadow + : SH_type_l2_shadow); else sh_set_toplevel_shadow(v, i, INVALID_MFN, 0); } @@ -4122,33 +4038,30 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool noflush) #error This should never happen #endif - /// /// v->arch.paging.shadow.l3table /// #if SHADOW_PAGING_LEVELS == 3 + { + mfn_t smfn = pagetable_get_mfn(v->arch.shadow_table[0]); + int i; + for ( i = 0; i < 4; i++ ) { - mfn_t smfn = pagetable_get_mfn(v->arch.shadow_table[0]); - int i; - for ( i = 0; i < 4; i++ ) - { #if GUEST_PAGING_LEVELS == 2 - /* 2-on-3: make a PAE l3 that points at the four-page l2 */ - if ( i != 0 ) - smfn = sh_next_page(smfn); + /* 2-on-3: make a PAE l3 that points at the four-page l2 */ + if ( i != 0 ) + smfn = sh_next_page(smfn); #else - /* 3-on-3: make a PAE l3 that points at the four l2 pages */ - smfn = pagetable_get_mfn(v->arch.shadow_table[i]); + /* 3-on-3: make a PAE l3 that points at the four l2 pages */ + smfn = pagetable_get_mfn(v->arch.shadow_table[i]); #endif - v->arch.paging.shadow.l3table[i] = - (mfn_x(smfn) == 0) - ? shadow_l3e_empty() - : shadow_l3e_from_mfn(smfn, _PAGE_PRESENT); - } + v->arch.paging.shadow.l3table[i] = + (mfn_x(smfn) == 0) ? shadow_l3e_empty() + : shadow_l3e_from_mfn(smfn, _PAGE_PRESENT); } + } #endif /* SHADOW_PAGING_LEVELS == 3 */ - /// /// v->arch.cr3 /// @@ -4166,7 +4079,6 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool noflush) } #endif - /// /// v->arch.hvm.hw_cr[3] /// @@ -4186,7 +4098,7 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool noflush) /* Fix up the linear pagetable mappings */ sh_update_linear_entries(v); -#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB ) /* No longer safe to use cached gva->gfn translations */ vtlb_flush(v); #endif @@ -4195,7 +4107,7 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool noflush) v->arch.paging.last_write_emul_ok = 0; #endif -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Need to resync all the shadow entries on a TLB flush. We only * update the shadows, leaving the pages out of sync. Also, we try * to skip synchronization of shadows not mapped in the new @@ -4204,16 +4116,16 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool noflush) #endif /* Release the lock, if we took it (otherwise it's the caller's problem) */ - if ( do_locking ) paging_unlock(v->domain); + if ( do_locking ) + paging_unlock(v->domain); } - /**************************************************************************/ /* Functions to revoke guest rights */ #if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC -int sh_rm_write_access_from_sl1p(struct domain *d, mfn_t gmfn, - mfn_t smfn, unsigned long off) +int sh_rm_write_access_from_sl1p(struct domain *d, mfn_t gmfn, mfn_t smfn, + unsigned long off) { struct vcpu *curr = current; int r; @@ -4225,22 +4137,22 @@ int sh_rm_write_access_from_sl1p(struct domain *d, mfn_t gmfn, /* Remember if we've been told that this process is being torn down */ if ( curr->domain == d && is_hvm_domain(d) ) - curr->arch.paging.shadow.pagetable_dying - = mfn_to_page(gmfn)->pagetable_dying; + curr->arch.paging.shadow.pagetable_dying = + mfn_to_page(gmfn)->pagetable_dying; sp = mfn_to_page(smfn); - if ( ((sp->count_info & PGC_count_mask) != 0) - || (sp->u.sh.type != SH_type_l1_shadow - && sp->u.sh.type != SH_type_fl1_shadow) ) + if ( ((sp->count_info & PGC_count_mask) != 0) || + (sp->u.sh.type != SH_type_l1_shadow && + sp->u.sh.type != SH_type_fl1_shadow) ) goto fail; sl1p = map_domain_page(smfn); sl1p += off; sl1e = *sl1p; - if ( ((shadow_l1e_get_flags(sl1e) & (_PAGE_PRESENT|_PAGE_RW)) - != (_PAGE_PRESENT|_PAGE_RW)) - || !mfn_eq(shadow_l1e_get_mfn(sl1e), gmfn) ) + if ( ((shadow_l1e_get_flags(sl1e) & (_PAGE_PRESENT | _PAGE_RW)) != + (_PAGE_PRESENT | _PAGE_RW)) || + !mfn_eq(shadow_l1e_get_mfn(sl1e), gmfn) ) { unmap_domain_page(sl1p); goto fail; @@ -4249,13 +4161,13 @@ int sh_rm_write_access_from_sl1p(struct domain *d, mfn_t gmfn, /* Found it! Need to remove its write permissions. */ sl1e = shadow_l1e_remove_flags(sl1e, _PAGE_RW); r = shadow_set_l1e(d, sl1p, sl1e, p2m_ram_rw, smfn); - ASSERT( !(r & SHADOW_SET_ERROR) ); + ASSERT(!(r & SHADOW_SET_ERROR)); unmap_domain_page(sl1p); perfc_incr(shadow_writeable_h_7); return 1; - fail: +fail: perfc_incr(shadow_writeable_h_8); return 0; } @@ -4286,7 +4198,7 @@ static int sh_guess_wrmap(struct vcpu *v, unsigned long vaddr, mfn_t gmfn) */ if ( paging_mode_external(d) ) { - sl4p = __linear_l4_table + l4_linear_offset(SH_LINEAR_PT_VIRT_START); + sl4p = __linear_l4_table + l4_linear_offset(SH_LINEAR_PT_VIRT_START); if ( !(shadow_l4e_get_flags(*sl4p) & _PAGE_PRESENT) ) return 0; } @@ -4297,8 +4209,8 @@ static int sh_guess_wrmap(struct vcpu *v, unsigned long vaddr, mfn_t gmfn) if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) ) return 0; #else /* SHADOW_PAGING_LEVELS == 3 */ - sl3p = ((shadow_l3e_t *) v->arch.paging.shadow.l3table) - + shadow_l3_linear_offset(vaddr); + sl3p = ((shadow_l3e_t *)v->arch.paging.shadow.l3table) + + shadow_l3_linear_offset(vaddr); if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) ) return 0; #endif @@ -4307,16 +4219,17 @@ static int sh_guess_wrmap(struct vcpu *v, unsigned long vaddr, mfn_t gmfn) return 0; sl1p = sh_linear_l1_table(v) + shadow_l1_linear_offset(vaddr); sl1e = *sl1p; - if ( ((shadow_l1e_get_flags(sl1e) & (_PAGE_PRESENT|_PAGE_RW)) - != (_PAGE_PRESENT|_PAGE_RW)) - || !mfn_eq(shadow_l1e_get_mfn(sl1e), gmfn) ) + if ( ((shadow_l1e_get_flags(sl1e) & (_PAGE_PRESENT | _PAGE_RW)) != + (_PAGE_PRESENT | _PAGE_RW)) || + !mfn_eq(shadow_l1e_get_mfn(sl1e), gmfn) ) return 0; /* Found it! Need to remove its write permissions. */ sl1mfn = shadow_l2e_get_mfn(*sl2p); sl1e = shadow_l1e_remove_flags(sl1e, _PAGE_RW); r = shadow_set_l1e(d, sl1p, sl1e, p2m_ram_rw, sl1mfn); - if ( r & SHADOW_SET_ERROR ) { + if ( r & SHADOW_SET_ERROR ) + { /* Can only currently happen if we found a grant-mapped * page. Just make the guess fail. */ return 0; @@ -4338,22 +4251,21 @@ int sh_rm_write_access_from_l1(struct domain *d, mfn_t sl1mfn, mfn_t base_sl1mfn = sl1mfn; /* Because sl1mfn changes in the foreach */ #endif - SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done, - { + SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done, { flags = shadow_l1e_get_flags(*sl1e); - if ( (flags & _PAGE_PRESENT) - && (flags & _PAGE_RW) - && (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(readonly_mfn)) ) + if ( (flags & _PAGE_PRESENT) && (flags & _PAGE_RW) && + (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(readonly_mfn)) ) { shadow_l1e_t ro_sl1e = shadow_l1e_remove_flags(*sl1e, _PAGE_RW); - (void) shadow_set_l1e(d, sl1e, ro_sl1e, p2m_ram_rw, sl1mfn); + (void)shadow_set_l1e(d, sl1e, ro_sl1e, p2m_ram_rw, sl1mfn); #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC /* Remember the last shadow that we shot a writeable mapping in */ if ( curr->domain == d ) - curr->arch.paging.shadow.last_writeable_pte_smfn = mfn_x(base_sl1mfn); + curr->arch.paging.shadow.last_writeable_pte_smfn = + mfn_x(base_sl1mfn); #endif - if ( (mfn_to_page(readonly_mfn)->u.inuse.type_info - & PGT_count_mask) == 0 ) + if ( (mfn_to_page(readonly_mfn)->u.inuse.type_info & + PGT_count_mask) == 0 ) /* This breaks us cleanly out of the FOREACH macro */ done = 1; } @@ -4361,7 +4273,6 @@ int sh_rm_write_access_from_l1(struct domain *d, mfn_t sl1mfn, return done; } - int sh_rm_mappings_from_l1(struct domain *d, mfn_t sl1mfn, mfn_t target_mfn) /* Excises all mappings to guest frame from this shadow l1 table */ { @@ -4369,14 +4280,13 @@ int sh_rm_mappings_from_l1(struct domain *d, mfn_t sl1mfn, mfn_t target_mfn) int done = 0; int flags; - SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done, - { + SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done, { flags = shadow_l1e_get_flags(*sl1e); - if ( (flags & _PAGE_PRESENT) - && (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(target_mfn)) ) + if ( (flags & _PAGE_PRESENT) && + (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(target_mfn)) ) { - (void) shadow_set_l1e(d, sl1e, shadow_l1e_empty(), - p2m_invalid, sl1mfn); + (void)shadow_set_l1e(d, sl1e, shadow_l1e_empty(), p2m_invalid, + sl1mfn); if ( sh_check_page_has_no_refs(mfn_to_page(target_mfn)) ) /* This breaks us cleanly out of the FOREACH macro */ done = 1; @@ -4391,26 +4301,27 @@ int sh_rm_mappings_from_l1(struct domain *d, mfn_t sl1mfn, mfn_t target_mfn) void sh_clear_shadow_entry(struct domain *d, void *ep, mfn_t smfn) /* Blank out a single shadow entry */ { - switch ( mfn_to_page(smfn)->u.sh.type ) + switch (mfn_to_page(smfn)->u.sh.type) { case SH_type_l1_shadow: - (void) shadow_set_l1e(d, ep, shadow_l1e_empty(), p2m_invalid, smfn); + (void)shadow_set_l1e(d, ep, shadow_l1e_empty(), p2m_invalid, smfn); break; case SH_type_l2_shadow: #if GUEST_PAGING_LEVELS >= 3 case SH_type_l2h_shadow: #endif - (void) shadow_set_l2e(d, ep, shadow_l2e_empty(), smfn); + (void)shadow_set_l2e(d, ep, shadow_l2e_empty(), smfn); break; #if GUEST_PAGING_LEVELS >= 4 case SH_type_l3_shadow: - (void) shadow_set_l3e(d, ep, shadow_l3e_empty(), smfn); + (void)shadow_set_l3e(d, ep, shadow_l3e_empty(), smfn); break; case SH_type_l4_shadow: - (void) shadow_set_l4e(d, ep, shadow_l4e_empty(), smfn); + (void)shadow_set_l4e(d, ep, shadow_l4e_empty(), smfn); break; #endif - default: BUG(); /* Called with the wrong kind of shadow. */ + default: + BUG(); /* Called with the wrong kind of shadow. */ } } @@ -4421,13 +4332,12 @@ int sh_remove_l1_shadow(struct domain *d, mfn_t sl2mfn, mfn_t sl1mfn) int done = 0; int flags; - SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, d, - { + SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, d, { flags = shadow_l2e_get_flags(*sl2e); - if ( (flags & _PAGE_PRESENT) - && (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) ) + if ( (flags & _PAGE_PRESENT) && + (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) ) { - (void) shadow_set_l2e(d, sl2e, shadow_l2e_empty(), sl2mfn); + (void)shadow_set_l2e(d, sl2e, shadow_l2e_empty(), sl2mfn); if ( mfn_to_page(sl1mfn)->u.sh.type == 0 ) /* This breaks us cleanly out of the FOREACH macro */ done = 1; @@ -4444,13 +4354,12 @@ int sh_remove_l2_shadow(struct domain *d, mfn_t sl3mfn, mfn_t sl2mfn) int done = 0; int flags; - SHADOW_FOREACH_L3E(sl3mfn, sl3e, 0, done, - { + SHADOW_FOREACH_L3E(sl3mfn, sl3e, 0, done, { flags = shadow_l3e_get_flags(*sl3e); - if ( (flags & _PAGE_PRESENT) - && (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) ) + if ( (flags & _PAGE_PRESENT) && + (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) ) { - (void) shadow_set_l3e(d, sl3e, shadow_l3e_empty(), sl3mfn); + (void)shadow_set_l3e(d, sl3e, shadow_l3e_empty(), sl3mfn); if ( mfn_to_page(sl2mfn)->u.sh.type == 0 ) /* This breaks us cleanly out of the FOREACH macro */ done = 1; @@ -4466,13 +4375,12 @@ int sh_remove_l3_shadow(struct domain *d, mfn_t sl4mfn, mfn_t sl3mfn) int done = 0; int flags; - SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, done, d, - { + SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, done, d, { flags = shadow_l4e_get_flags(*sl4e); - if ( (flags & _PAGE_PRESENT) - && (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) ) + if ( (flags & _PAGE_PRESENT) && + (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) ) { - (void) shadow_set_l4e(d, sl4e, shadow_l4e_empty(), sl4mfn); + (void)shadow_set_l4e(d, sl4e, shadow_l4e_empty(), sl4mfn); if ( mfn_to_page(sl3mfn)->u.sh.type == 0 ) /* This breaks us cleanly out of the FOREACH macro */ done = 1; @@ -4511,7 +4419,7 @@ static void sh_pagetable_dying(paddr_t gpa) l3mfn = get_gfn_query(d, _gfn(l3gfn), &p2mt); if ( !mfn_valid(l3mfn) || !p2m_is_ram(p2mt) ) { - printk(XENLOG_DEBUG "sh_pagetable_dying: gpa not valid %"PRIpaddr"\n", + printk(XENLOG_DEBUG "sh_pagetable_dying: gpa not valid %" PRIpaddr "\n", gpa); goto out_put_gfn; } @@ -4527,7 +4435,8 @@ static void sh_pagetable_dying(paddr_t gpa) { mfn_t smfn, gmfn; - if ( fast_path ) { + if ( fast_path ) + { if ( pagetable_is_null(v->arch.shadow_table[i]) ) smfn = INVALID_MFN; else @@ -4538,16 +4447,17 @@ static void sh_pagetable_dying(paddr_t gpa) /* retrieving the l2s */ gmfn = get_gfn_query_unlocked(d, gfn_x(guest_l3e_get_gfn(gl3e[i])), &p2mt); - smfn = unlikely(mfn_eq(gmfn, INVALID_MFN)) - ? INVALID_MFN - : shadow_hash_lookup(d, mfn_x(gmfn), SH_type_l2_pae_shadow); + smfn = + unlikely(mfn_eq(gmfn, INVALID_MFN)) + ? INVALID_MFN + : shadow_hash_lookup(d, mfn_x(gmfn), SH_type_l2_pae_shadow); } if ( mfn_valid(smfn) && is_hvm_domain(d) ) { gmfn = _mfn(mfn_to_page(smfn)->v.sh.back); mfn_to_page(gmfn)->pagetable_dying = true; - shadow_unhook_mappings(d, smfn, 1/* user pages only */); + shadow_unhook_mappings(d, smfn, 1 /* user pages only */); flush = 1; } } @@ -4587,7 +4497,7 @@ static void sh_pagetable_dying(paddr_t gpa) if ( mfn_valid(smfn) && is_hvm_domain(d) ) { mfn_to_page(gmfn)->pagetable_dying = true; - shadow_unhook_mappings(d, smfn, 1/* user pages only */); + shadow_unhook_mappings(d, smfn, 1 /* user pages only */); /* Now flush the TLB: we removed toplevel mappings. */ flush_tlb_mask(d->dirty_cpumask); } @@ -4609,38 +4519,33 @@ static void sh_pagetable_dying(paddr_t gpa) #if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES -#define AUDIT_FAIL(_level, _fmt, _a...) do { \ - printk("Shadow %u-on-%u audit failed at level %i, index %i\n" \ - "gl" #_level "mfn = %" PRI_mfn \ - " sl" #_level "mfn = %" PRI_mfn \ - " &gl" #_level "e = %p &sl" #_level "e = %p" \ - " gl" #_level "e = %" SH_PRI_gpte \ - " sl" #_level "e = %" SH_PRI_pte "\nError: " _fmt "\n", \ - GUEST_PAGING_LEVELS, SHADOW_PAGING_LEVELS, \ - _level, guest_index(gl ## _level ## e), \ - mfn_x(gl ## _level ## mfn), mfn_x(sl ## _level ## mfn), \ - gl ## _level ## e, sl ## _level ## e, \ - gl ## _level ## e->l ## _level, sl ## _level ## e->l ## _level, \ - ##_a); \ - BUG(); \ - done = 1; \ -} while (0) - -#define AUDIT_FAIL_MIN(_level, _fmt, _a...) do { \ - printk("Shadow %u-on-%u audit failed at level %i\n" \ - "gl" #_level "mfn = %" PRI_mfn \ - " sl" #_level "mfn = %" PRI_mfn \ - " Error: " _fmt "\n", \ - GUEST_PAGING_LEVELS, SHADOW_PAGING_LEVELS, \ - _level, \ - mfn_x(gl ## _level ## mfn), mfn_x(sl ## _level ## mfn), \ - ##_a); \ - BUG(); \ - done = 1; \ -} while (0) - -static char * sh_audit_flags(struct vcpu *v, int level, - int gflags, int sflags) +#define AUDIT_FAIL(_level, _fmt, _a...) \ + do { \ + printk("Shadow %u-on-%u audit failed at level %i, index %i\n" \ + "gl" #_level "mfn = %" PRI_mfn " sl" #_level "mfn = %" PRI_mfn \ + " &gl" #_level "e = %p &sl" #_level "e = %p" \ + " gl" #_level "e = %" SH_PRI_gpte " sl" #_level \ + "e = %" SH_PRI_pte "\nError: " _fmt "\n", \ + GUEST_PAGING_LEVELS, SHADOW_PAGING_LEVELS, _level, \ + guest_index(gl##_level##e), mfn_x(gl##_level##mfn), \ + mfn_x(sl##_level##mfn), gl##_level##e, sl##_level##e, \ + gl##_level##e->l##_level, sl##_level##e->l##_level, ##_a); \ + BUG(); \ + done = 1; \ + } while ( 0 ) + +#define AUDIT_FAIL_MIN(_level, _fmt, _a...) \ + do { \ + printk("Shadow %u-on-%u audit failed at level %i\n" \ + "gl" #_level "mfn = %" PRI_mfn " sl" #_level "mfn = %" PRI_mfn \ + " Error: " _fmt "\n", \ + GUEST_PAGING_LEVELS, SHADOW_PAGING_LEVELS, _level, \ + mfn_x(gl##_level##mfn), mfn_x(sl##_level##mfn), ##_a); \ + BUG(); \ + done = 1; \ + } while ( 0 ) + +static char *sh_audit_flags(struct vcpu *v, int level, int gflags, int sflags) /* Common code for auditing flag bits */ { if ( (sflags & _PAGE_PRESENT) && !(gflags & _PAGE_PRESENT) ) @@ -4650,12 +4555,13 @@ static char * sh_audit_flags(struct vcpu *v, int level, if ( level == 2 && (sflags & _PAGE_PSE) ) return "PS bit set in shadow"; #if SHADOW_PAGING_LEVELS == 3 - if ( level == 3 ) return NULL; /* All the other bits are blank in PAEl3 */ + if ( level == 3 ) + return NULL; /* All the other bits are blank in PAEl3 */ #endif if ( (sflags & _PAGE_PRESENT) && !(gflags & _PAGE_ACCESSED) ) return "accessed bit not propagated"; - if ( (level == 1 || (level == 2 && (gflags & _PAGE_PSE))) - && ((sflags & _PAGE_RW) && !(gflags & _PAGE_DIRTY)) ) + if ( (level == 1 || (level == 2 && (gflags & _PAGE_PSE))) && + ((sflags & _PAGE_RW) && !(gflags & _PAGE_DIRTY)) ) return "dirty bit not propagated"; if ( (sflags & _PAGE_USER) != (gflags & _PAGE_USER) ) return "user/supervisor bit does not match"; @@ -4680,7 +4586,7 @@ int sh_audit_l1_table(struct vcpu *v, mfn_t sl1mfn, mfn_t x) ASSERT(mfn_to_page(sl1mfn)->u.sh.head); gl1mfn = backpointer(mfn_to_page(sl1mfn)); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Out-of-sync l1 shadows can contain anything: just check the OOS hash */ if ( page_is_out_of_sync(mfn_to_page(gl1mfn)) ) { @@ -4691,10 +4597,9 @@ int sh_audit_l1_table(struct vcpu *v, mfn_t sl1mfn, mfn_t x) gl1e = gp = map_domain_page(gl1mfn); SHADOW_FOREACH_L1E(sl1mfn, sl1e, &gl1e, done, { - if ( sh_l1e_is_magic(*sl1e) ) { -#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH ) if ( sh_l1e_is_gnp(*sl1e) ) { if ( guest_l1e_get_flags(*gl1e) & _PAGE_PRESENT ) @@ -4705,10 +4610,10 @@ int sh_audit_l1_table(struct vcpu *v, mfn_t sl1mfn, mfn_t x) ASSERT(sh_l1e_is_mmio(*sl1e)); gfn = sh_l1e_mmio_get_gfn(*sl1e); if ( gfn_x(gfn) != gfn_x(guest_l1e_get_gfn(*gl1e)) ) - AUDIT_FAIL(1, "shadow MMIO gfn is %" SH_PRI_gfn + AUDIT_FAIL(1, + "shadow MMIO gfn is %" SH_PRI_gfn " but guest gfn is %" SH_PRI_gfn, - gfn_x(gfn), - gfn_x(guest_l1e_get_gfn(*gl1e))); + gfn_x(gfn), gfn_x(guest_l1e_get_gfn(*gl1e))); } #endif } @@ -4716,7 +4621,8 @@ int sh_audit_l1_table(struct vcpu *v, mfn_t sl1mfn, mfn_t x) { s = sh_audit_flags(v, 1, guest_l1e_get_flags(*gl1e), shadow_l1e_get_flags(*sl1e)); - if ( s ) AUDIT_FAIL(1, "%s", s); + if ( s ) + AUDIT_FAIL(1, "%s", s); if ( SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_MFNS ) { @@ -4724,7 +4630,8 @@ int sh_audit_l1_table(struct vcpu *v, mfn_t sl1mfn, mfn_t x) mfn = shadow_l1e_get_mfn(*sl1e); gmfn = get_gfn_query_unlocked(v->domain, gfn_x(gfn), &p2mt); if ( !p2m_is_grant(p2mt) && !mfn_eq(gmfn, mfn) ) - AUDIT_FAIL(1, "bad translation: gfn %" SH_PRI_gfn + AUDIT_FAIL(1, + "bad translation: gfn %" SH_PRI_gfn " --> %" PRI_mfn " != mfn %" PRI_mfn, gfn_x(gfn), mfn_x(gmfn), mfn_x(mfn)); } @@ -4743,18 +4650,19 @@ int sh_audit_fl1_table(struct vcpu *v, mfn_t sl1mfn, mfn_t x) int done = 0; /* fl1 has no useful backpointer: all we can check are flags */ - e = guest_l1e_from_gfn(_gfn(0), 0); gl1e = &e; /* Needed for macro */ + e = guest_l1e_from_gfn(_gfn(0), 0); + gl1e = &e; /* Needed for macro */ SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done, { f = shadow_l1e_get_flags(*sl1e); - f &= ~(_PAGE_AVAIL0|_PAGE_AVAIL1|_PAGE_AVAIL2); - if ( !(f == 0 - || f == (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW| - _PAGE_ACCESSED) - || f == (_PAGE_PRESENT|_PAGE_USER|_PAGE_ACCESSED) - || f == (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW| - _PAGE_ACCESSED|_PAGE_DIRTY) - || f == (_PAGE_PRESENT|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY) - || sh_l1e_is_magic(*sl1e)) ) + f &= ~(_PAGE_AVAIL0 | _PAGE_AVAIL1 | _PAGE_AVAIL2); + if ( !(f == 0 || + f == (_PAGE_PRESENT | _PAGE_USER | _PAGE_RW | _PAGE_ACCESSED) || + f == (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) || + f == (_PAGE_PRESENT | _PAGE_USER | _PAGE_RW | _PAGE_ACCESSED | + _PAGE_DIRTY) || + f == (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | + _PAGE_DIRTY) || + sh_l1e_is_magic(*sl1e)) ) AUDIT_FAIL(1, "fl1e has bad flags"); }); return 0; @@ -4775,7 +4683,7 @@ int sh_audit_l2_table(struct vcpu *v, mfn_t sl2mfn, mfn_t x) ASSERT(mfn_to_page(sl2mfn)->u.sh.head); gl2mfn = backpointer(mfn_to_page(sl2mfn)); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Only L1's may be out of sync. */ if ( page_is_out_of_sync(mfn_to_page(gl2mfn)) ) AUDIT_FAIL_MIN(2, "gmfn %lx is out of sync", mfn_x(gl2mfn)); @@ -4783,28 +4691,30 @@ int sh_audit_l2_table(struct vcpu *v, mfn_t sl2mfn, mfn_t x) gl2e = gp = map_domain_page(gl2mfn); SHADOW_FOREACH_L2E(sl2mfn, sl2e, &gl2e, done, d, { - s = sh_audit_flags(v, 2, guest_l2e_get_flags(*gl2e), - shadow_l2e_get_flags(*sl2e)); - if ( s ) AUDIT_FAIL(2, "%s", s); + shadow_l2e_get_flags(*sl2e)); + if ( s ) + AUDIT_FAIL(2, "%s", s); if ( SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_MFNS ) { gfn = guest_l2e_get_gfn(*gl2e); mfn = shadow_l2e_get_mfn(*sl2e); gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE) - ? get_fl1_shadow_status(d, gfn) - : get_shadow_status(d, - get_gfn_query_unlocked(d, gfn_x(gfn), - &p2mt), SH_type_l1_shadow); + ? get_fl1_shadow_status(d, gfn) + : get_shadow_status( + d, get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt), + SH_type_l1_shadow); if ( !mfn_eq(gmfn, mfn) ) - AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn - " (--> %" PRI_mfn ")" - " --> %" PRI_mfn " != mfn %" PRI_mfn, - gfn_x(gfn), - (guest_l2e_get_flags(*gl2e) & _PAGE_PSE) ? 0 - : mfn_x(get_gfn_query_unlocked(d, - gfn_x(gfn), &p2mt)), mfn_x(gmfn), mfn_x(mfn)); + AUDIT_FAIL( + 2, + "bad translation: gfn %" SH_PRI_gfn " (--> %" PRI_mfn ")" + " --> %" PRI_mfn " != mfn %" PRI_mfn, + gfn_x(gfn), + (guest_l2e_get_flags(*gl2e) & _PAGE_PSE) + ? 0 + : mfn_x(get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt)), + mfn_x(gmfn), mfn_x(mfn)); } }); unmap_domain_page(gp); @@ -4827,7 +4737,7 @@ int sh_audit_l3_table(struct vcpu *v, mfn_t sl3mfn, mfn_t x) ASSERT(mfn_to_page(sl3mfn)->u.sh.head); gl3mfn = backpointer(mfn_to_page(sl3mfn)); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Only L1's may be out of sync. */ if ( page_is_out_of_sync(mfn_to_page(gl3mfn)) ) AUDIT_FAIL_MIN(3, "gmfn %lx is out of sync", mfn_x(gl3mfn)); @@ -4835,26 +4745,25 @@ int sh_audit_l3_table(struct vcpu *v, mfn_t sl3mfn, mfn_t x) gl3e = gp = map_domain_page(gl3mfn); SHADOW_FOREACH_L3E(sl3mfn, sl3e, &gl3e, done, { - s = sh_audit_flags(v, 3, guest_l3e_get_flags(*gl3e), - shadow_l3e_get_flags(*sl3e)); - if ( s ) AUDIT_FAIL(3, "%s", s); + shadow_l3e_get_flags(*sl3e)); + if ( s ) + AUDIT_FAIL(3, "%s", s); if ( SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_MFNS ) { gfn = guest_l3e_get_gfn(*gl3e); mfn = shadow_l3e_get_mfn(*sl3e); - gmfn = get_shadow_status(d, get_gfn_query_unlocked( - d, gfn_x(gfn), &p2mt), - ((GUEST_PAGING_LEVELS == 3 || - is_pv_32bit_domain(d)) - && !shadow_mode_external(d) - && (guest_index(gl3e) % 4) == 3) - ? SH_type_l2h_shadow - : SH_type_l2_shadow); + gmfn = get_shadow_status( + d, get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt), + ((GUEST_PAGING_LEVELS == 3 || is_pv_32bit_domain(d)) && + !shadow_mode_external(d) && (guest_index(gl3e) % 4) == 3) + ? SH_type_l2h_shadow + : SH_type_l2_shadow); if ( !mfn_eq(gmfn, mfn) ) - AUDIT_FAIL(3, "bad translation: gfn %" SH_PRI_gfn - " --> %" PRI_mfn " != mfn %" PRI_mfn, + AUDIT_FAIL(3, + "bad translation: gfn %" SH_PRI_gfn " --> %" PRI_mfn + " != mfn %" PRI_mfn, gfn_x(gfn), mfn_x(gmfn), mfn_x(mfn)); } }); @@ -4877,29 +4786,30 @@ int sh_audit_l4_table(struct vcpu *v, mfn_t sl4mfn, mfn_t x) ASSERT(mfn_to_page(sl4mfn)->u.sh.head); gl4mfn = backpointer(mfn_to_page(sl4mfn)); -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) +#if ( SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) /* Only L1's may be out of sync. */ if ( page_is_out_of_sync(mfn_to_page(gl4mfn)) ) AUDIT_FAIL_MIN(4, "gmfn %lx is out of sync", mfn_x(gl4mfn)); #endif gl4e = gp = map_domain_page(gl4mfn); - SHADOW_FOREACH_L4E(sl4mfn, sl4e, &gl4e, done, d, - { + SHADOW_FOREACH_L4E(sl4mfn, sl4e, &gl4e, done, d, { s = sh_audit_flags(v, 4, guest_l4e_get_flags(*gl4e), - shadow_l4e_get_flags(*sl4e)); - if ( s ) AUDIT_FAIL(4, "%s", s); + shadow_l4e_get_flags(*sl4e)); + if ( s ) + AUDIT_FAIL(4, "%s", s); if ( SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_MFNS ) { gfn = guest_l4e_get_gfn(*gl4e); mfn = shadow_l4e_get_mfn(*sl4e); - gmfn = get_shadow_status(d, get_gfn_query_unlocked( - d, gfn_x(gfn), &p2mt), - SH_type_l3_shadow); + gmfn = get_shadow_status( + d, get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt), + SH_type_l3_shadow); if ( !mfn_eq(gmfn, mfn) ) - AUDIT_FAIL(4, "bad translation: gfn %" SH_PRI_gfn - " --> %" PRI_mfn " != mfn %" PRI_mfn, + AUDIT_FAIL(4, + "bad translation: gfn %" SH_PRI_gfn " --> %" PRI_mfn + " != mfn %" PRI_mfn, gfn_x(gfn), mfn_x(gmfn), mfn_x(mfn)); } }); @@ -4908,7 +4818,6 @@ int sh_audit_l4_table(struct vcpu *v, mfn_t sl4mfn, mfn_t x) } #endif /* GUEST_PAGING_LEVELS >= 4 */ - #undef AUDIT_FAIL #endif /* Audit code */ @@ -4917,24 +4826,24 @@ int sh_audit_l4_table(struct vcpu *v, mfn_t sl4mfn, mfn_t x) /* Entry points into this mode of the shadow code. * This will all be mangled by the preprocessor to uniquify everything. */ const struct paging_mode sh_paging_mode = { - .page_fault = sh_page_fault, - .invlpg = sh_invlpg, - .gva_to_gfn = sh_gva_to_gfn, - .update_cr3 = sh_update_cr3, - .update_paging_modes = shadow_update_paging_modes, - .write_p2m_entry = shadow_write_p2m_entry, - .guest_levels = GUEST_PAGING_LEVELS, - .shadow.detach_old_tables = sh_detach_old_tables, - .shadow.write_guest_entry = sh_write_guest_entry, - .shadow.cmpxchg_guest_entry = sh_cmpxchg_guest_entry, - .shadow.make_monitor_table = sh_make_monitor_table, - .shadow.destroy_monitor_table = sh_destroy_monitor_table, + .page_fault = sh_page_fault, + .invlpg = sh_invlpg, + .gva_to_gfn = sh_gva_to_gfn, + .update_cr3 = sh_update_cr3, + .update_paging_modes = shadow_update_paging_modes, + .write_p2m_entry = shadow_write_p2m_entry, + .guest_levels = GUEST_PAGING_LEVELS, + .shadow.detach_old_tables = sh_detach_old_tables, + .shadow.write_guest_entry = sh_write_guest_entry, + .shadow.cmpxchg_guest_entry = sh_cmpxchg_guest_entry, + .shadow.make_monitor_table = sh_make_monitor_table, + .shadow.destroy_monitor_table = sh_destroy_monitor_table, #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC - .shadow.guess_wrmap = sh_guess_wrmap, + .shadow.guess_wrmap = sh_guess_wrmap, #endif - .shadow.pagetable_dying = sh_pagetable_dying, - .shadow.trace_emul_write_val = trace_emulate_write_val, - .shadow.shadow_levels = SHADOW_PAGING_LEVELS, + .shadow.pagetable_dying = sh_pagetable_dying, + .shadow.trace_emul_write_val = trace_emulate_write_val, + .shadow.shadow_levels = SHADOW_PAGING_LEVELS, }; /* diff --git a/xen/arch/x86/mm/shadow/none.c b/xen/arch/x86/mm/shadow/none.c index a70888bd98..52fa154230 100644 --- a/xen/arch/x86/mm/shadow/none.c +++ b/xen/arch/x86/mm/shadow/none.c @@ -21,9 +21,9 @@ static void _clean_dirty_bitmap(struct domain *d) int shadow_domain_init(struct domain *d, unsigned int domcr_flags) { static const struct log_dirty_ops sh_none_ops = { - .enable = _enable_log_dirty, + .enable = _enable_log_dirty, .disable = _disable_log_dirty, - .clean = _clean_dirty_bitmap, + .clean = _clean_dirty_bitmap, }; paging_log_dirty_init(d, &sh_none_ops); @@ -69,12 +69,12 @@ static int _write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, } static const struct paging_mode sh_paging_none = { - .page_fault = _page_fault, - .invlpg = _invlpg, - .gva_to_gfn = _gva_to_gfn, - .update_cr3 = _update_cr3, - .update_paging_modes = _update_paging_modes, - .write_p2m_entry = _write_p2m_entry, + .page_fault = _page_fault, + .invlpg = _invlpg, + .gva_to_gfn = _gva_to_gfn, + .update_cr3 = _update_cr3, + .update_paging_modes = _update_paging_modes, + .write_p2m_entry = _write_p2m_entry, }; void shadow_vcpu_init(struct vcpu *v) diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c index 3c42e21906..0f32e4611a 100644 --- a/xen/arch/x86/monitor.c +++ b/xen/arch/x86/monitor.c @@ -25,8 +25,8 @@ int arch_monitor_init_domain(struct domain *d) { if ( !d->arch.monitor.msr_bitmap ) - d->arch.monitor.msr_bitmap = xzalloc_array(struct monitor_msr_bitmap, - 2); + d->arch.monitor.msr_bitmap = + xzalloc_array(struct monitor_msr_bitmap, 2); if ( !d->arch.monitor.msr_bitmap ) return -ENOMEM; @@ -46,15 +46,15 @@ static unsigned long *monitor_bitmap_for_msr(const struct domain *d, u32 *msr) { ASSERT(d->arch.monitor.msr_bitmap && msr); - switch ( *msr ) + switch (*msr) { case 0 ... 0x1fff: BUILD_BUG_ON(sizeof(d->arch.monitor.msr_bitmap->low) * 8 <= 0x1fff); return d->arch.monitor.msr_bitmap->low; case 0x40000000 ... 0x40001fff: - BUILD_BUG_ON( - sizeof(d->arch.monitor.msr_bitmap->hypervisor) * 8 <= 0x1fff); + BUILD_BUG_ON(sizeof(d->arch.monitor.msr_bitmap->hypervisor) * 8 <= + 0x1fff); *msr &= 0x1fff; return d->arch.monitor.msr_bitmap->hypervisor; @@ -146,7 +146,7 @@ int arch_monitor_domctl_event(struct domain *d, struct arch_domain *ad = &d->arch; bool requested_status = (XEN_DOMCTL_MONITOR_OP_ENABLE == mop->op); - switch ( mop->event ) + switch (mop->event) { case XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG: { @@ -180,7 +180,8 @@ int arch_monitor_domctl_event(struct domain *d, if ( requested_status ) { - ad->monitor.write_ctrlreg_mask[mop->u.mov_to_cr.index] = mop->u.mov_to_cr.bitmask; + ad->monitor.write_ctrlreg_mask[mop->u.mov_to_cr.index] = + mop->u.mov_to_cr.bitmask; ad->monitor.write_ctrlreg_enabled |= ctrlreg_bitmask; } else @@ -194,7 +195,7 @@ int arch_monitor_domctl_event(struct domain *d, { struct vcpu *v; /* Latches new CR3 or CR4 mask through CR0 code. */ - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) hvm_update_guest_cr(v, 0); } @@ -269,7 +270,7 @@ int arch_monitor_domctl_event(struct domain *d, domain_pause(d); ad->monitor.descriptor_access_enabled = requested_status; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) hvm_funcs.set_descriptor_access_exiting(v, requested_status); domain_unpause(d); @@ -298,9 +299,8 @@ int arch_monitor_domctl_event(struct domain *d, domain_pause(d); ad->monitor.debug_exception_enabled = requested_status; - ad->monitor.debug_exception_sync = requested_status ? - mop->u.debug_exception.sync : - 0; + ad->monitor.debug_exception_sync = + requested_status ? mop->u.debug_exception.sync : 0; hvm_set_icebp_interception(d, requested_status); diff --git a/xen/arch/x86/mpparse.c b/xen/arch/x86/mpparse.c index 16c93a935d..cc356bfe31 100644 --- a/xen/arch/x86/mpparse.c +++ b/xen/arch/x86/mpparse.c @@ -68,141 +68,150 @@ physid_mask_t phys_cpu_present_map; void __init set_nr_cpu_ids(unsigned int max_cpus) { - unsigned int tot_cpus = num_processors + disabled_cpus; - - if (!max_cpus) - max_cpus = tot_cpus; - if (max_cpus > NR_CPUS) - max_cpus = NR_CPUS; - else if (!max_cpus) - max_cpus = 1; - printk(XENLOG_INFO "SMP: Allowing %u CPUs (%d hotplug CPUs)\n", - max_cpus, max_t(int, max_cpus - num_processors, 0)); - - if (!park_offline_cpus) - tot_cpus = max_cpus; - nr_cpu_ids = min(tot_cpus, NR_CPUS + 0u); - if (park_offline_cpus && nr_cpu_ids < num_processors) - printk(XENLOG_WARNING "SMP: Cannot bring up %u further CPUs\n", - num_processors - nr_cpu_ids); + unsigned int tot_cpus = num_processors + disabled_cpus; + + if ( !max_cpus ) + max_cpus = tot_cpus; + if ( max_cpus > NR_CPUS ) + max_cpus = NR_CPUS; + else if ( !max_cpus ) + max_cpus = 1; + printk(XENLOG_INFO "SMP: Allowing %u CPUs (%d hotplug CPUs)\n", max_cpus, + max_t(int, max_cpus - num_processors, 0)); + + if ( !park_offline_cpus ) + tot_cpus = max_cpus; + nr_cpu_ids = min(tot_cpus, NR_CPUS + 0u); + if ( park_offline_cpus && nr_cpu_ids < num_processors ) + printk(XENLOG_WARNING "SMP: Cannot bring up %u further CPUs\n", + num_processors - nr_cpu_ids); #ifndef nr_cpumask_bits - nr_cpumask_bits = ROUNDUP(nr_cpu_ids, BITS_PER_LONG); - printk(XENLOG_DEBUG "NR_CPUS:%u nr_cpumask_bits:%u\n", - NR_CPUS, nr_cpumask_bits); + nr_cpumask_bits = ROUNDUP(nr_cpu_ids, BITS_PER_LONG); + printk(XENLOG_DEBUG "NR_CPUS:%u nr_cpumask_bits:%u\n", NR_CPUS, + nr_cpumask_bits); #endif } void __init set_nr_sockets(void) { - nr_sockets = last_physid(phys_cpu_present_map) - / boot_cpu_data.x86_max_cores - / boot_cpu_data.x86_num_siblings + 1; - if (disabled_cpus) - nr_sockets += (disabled_cpus - 1) - / boot_cpu_data.x86_max_cores - / boot_cpu_data.x86_num_siblings + 1; - printk(XENLOG_DEBUG "nr_sockets: %u\n", nr_sockets); + nr_sockets = last_physid(phys_cpu_present_map) / + boot_cpu_data.x86_max_cores / + boot_cpu_data.x86_num_siblings + + 1; + if ( disabled_cpus ) + nr_sockets += (disabled_cpus - 1) / boot_cpu_data.x86_max_cores / + boot_cpu_data.x86_num_siblings + + 1; + printk(XENLOG_DEBUG "nr_sockets: %u\n", nr_sockets); } /* * Intel MP BIOS table parsing routines: */ - /* * Checksum an MP configuration block. */ static int __init mpf_checksum(unsigned char *mp, int len) { - int sum = 0; + int sum = 0; - while (len--) - sum += *mp++; + while ( len-- ) + sum += *mp++; - return sum & 0xFF; + return sum & 0xFF; } /* Return xen's logical cpu_id of the new added cpu or <0 if error */ -static int MP_processor_info_x(struct mpc_config_processor *m, - u32 apicid, bool hotplug) +static int MP_processor_info_x(struct mpc_config_processor *m, u32 apicid, + bool hotplug) { - int ver, cpu = 0; - - if (!(m->mpc_cpuflag & CPU_ENABLED)) { - if (!hotplug) - ++disabled_cpus; - return -EINVAL; - } - - if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { - Dprintk(" Bootup CPU\n"); - boot_cpu_physical_apicid = apicid; - } - - ver = m->mpc_apicver; - - /* - * Validate version - */ - if (ver == 0x0) { - printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! " - "fixing up to 0x10. (tell your hw vendor)\n", - apicid); - ver = 0x10; - } - apic_version[apicid] = ver; - - set_apicid(apicid, &phys_cpu_present_map); - - if (num_processors >= nr_cpu_ids) { - printk(KERN_WARNING "WARNING: NR_CPUS limit of %u reached." - " Processor ignored.\n", nr_cpu_ids); - return -ENOSPC; - } - - if (num_processors >= 8 && hotplug - && genapic.name == apic_default.name) { - printk(KERN_WARNING "WARNING: CPUs limit of 8 reached." - " Processor ignored.\n"); - return -ENOSPC; - } - - /* Boot cpu has been marked present in smp_prepare_boot_cpu */ - if (!(m->mpc_cpuflag & CPU_BOOTPROCESSOR)) { - cpu = alloc_cpu_id(); - if (cpu < 0) { - printk(KERN_WARNING "WARNING: Can't alloc cpu_id." - " Processor with apicid %i ignored\n", apicid); - return cpu; - } - x86_cpu_to_apicid[cpu] = apicid; - cpumask_set_cpu(cpu, &cpu_present_map); - } - - if (++num_processors > 8) { - /* - * No need for processor or APIC checks: physical delivery - * (bigsmp) mode should always work. - */ - def_to_bigsmp = true; - } - - return cpu; + int ver, cpu = 0; + + if ( !(m->mpc_cpuflag & CPU_ENABLED) ) + { + if ( !hotplug ) + ++disabled_cpus; + return -EINVAL; + } + + if ( m->mpc_cpuflag & CPU_BOOTPROCESSOR ) + { + Dprintk(" Bootup CPU\n"); + boot_cpu_physical_apicid = apicid; + } + + ver = m->mpc_apicver; + + /* + * Validate version + */ + if ( ver == 0x0 ) + { + printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! " + "fixing up to 0x10. (tell your hw vendor)\n", + apicid); + ver = 0x10; + } + apic_version[apicid] = ver; + + set_apicid(apicid, &phys_cpu_present_map); + + if ( num_processors >= nr_cpu_ids ) + { + printk(KERN_WARNING "WARNING: NR_CPUS limit of %u reached." + " Processor ignored.\n", + nr_cpu_ids); + return -ENOSPC; + } + + if ( num_processors >= 8 && hotplug && genapic.name == apic_default.name ) + { + printk(KERN_WARNING "WARNING: CPUs limit of 8 reached." + " Processor ignored.\n"); + return -ENOSPC; + } + + /* Boot cpu has been marked present in smp_prepare_boot_cpu */ + if ( !(m->mpc_cpuflag & CPU_BOOTPROCESSOR) ) + { + cpu = alloc_cpu_id(); + if ( cpu < 0 ) + { + printk(KERN_WARNING "WARNING: Can't alloc cpu_id." + " Processor with apicid %i ignored\n", + apicid); + return cpu; + } + x86_cpu_to_apicid[cpu] = apicid; + cpumask_set_cpu(cpu, &cpu_present_map); + } + + if ( ++num_processors > 8 ) + { + /* + * No need for processor or APIC checks: physical delivery + * (bigsmp) mode should always work. + */ + def_to_bigsmp = true; + } + + return cpu; } static int MP_processor_info(struct mpc_config_processor *m) { - return MP_processor_info_x(m, m->mpc_apicid, 0); + return MP_processor_info_x(m, m->mpc_apicid, 0); } -static void __init MP_bus_info (struct mpc_config_bus *m) +static void __init MP_bus_info(struct mpc_config_bus *m) { - char str[7]; + char str[7]; - memcpy(str, m->mpc_bustype, 6); - str[6] = 0; + memcpy(str, m->mpc_bustype, 6); + str[6] = 0; #if 0 /* size of mpc_busid (8 bits) makes this check unnecessary */ if (m->mpc_busid >= MAX_MP_BUSSES) { @@ -213,74 +222,84 @@ static void __init MP_bus_info (struct mpc_config_bus *m) } #endif - if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; - } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; - } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; - } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) { - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; - } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) { - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98; - } else { - printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); - } + if ( strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0 ) + { + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; + } + else if ( strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0 ) + { + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; + } + else if ( strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0 ) + { + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; + } + else if ( strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0 ) + { + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; + } + else if ( strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98) - 1) == 0 ) + { + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98; + } + else + { + printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); + } } -static void __init MP_ioapic_info (struct mpc_config_ioapic *m) +static void __init MP_ioapic_info(struct mpc_config_ioapic *m) { - if (!(m->mpc_flags & MPC_APIC_USABLE)) - return; - - printk(KERN_INFO "I/O APIC #%d Version %d at %#x.\n", - m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); - if (nr_ioapics >= MAX_IO_APICS) { - printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n", - MAX_IO_APICS, nr_ioapics); - panic("Recompile kernel with bigger MAX_IO_APICS\n"); - } - if (!m->mpc_apicaddr) { - printk(KERN_ERR "WARNING: bogus zero I/O APIC address" - " found in MP table, skipping!\n"); - return; - } - mp_ioapics[nr_ioapics] = *m; - nr_ioapics++; + if ( !(m->mpc_flags & MPC_APIC_USABLE) ) + return; + + printk(KERN_INFO "I/O APIC #%d Version %d at %#x.\n", m->mpc_apicid, + m->mpc_apicver, m->mpc_apicaddr); + if ( nr_ioapics >= MAX_IO_APICS ) + { + printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n", + MAX_IO_APICS, nr_ioapics); + panic("Recompile kernel with bigger MAX_IO_APICS\n"); + } + if ( !m->mpc_apicaddr ) + { + printk(KERN_ERR "WARNING: bogus zero I/O APIC address" + " found in MP table, skipping!\n"); + return; + } + mp_ioapics[nr_ioapics] = *m; + nr_ioapics++; } -static void __init MP_intsrc_info (struct mpc_config_intsrc *m) +static void __init MP_intsrc_info(struct mpc_config_intsrc *m) { - mp_irqs [mp_irq_entries] = *m; - Dprintk("Int: type %d, pol %d, trig %d, bus %d," - " IRQ %02x, APIC ID %x, APIC INT %02x\n", - m->mpc_irqtype, m->mpc_irqflag & 3, - (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, - m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); - if (++mp_irq_entries == MAX_IRQ_SOURCES) - panic("Max # of irq sources exceeded\n"); + mp_irqs[mp_irq_entries] = *m; + Dprintk("Int: type %d, pol %d, trig %d, bus %d," + " IRQ %02x, APIC ID %x, APIC INT %02x\n", + m->mpc_irqtype, m->mpc_irqflag & 3, (m->mpc_irqflag >> 2) & 3, + m->mpc_srcbus, m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); + if ( ++mp_irq_entries == MAX_IRQ_SOURCES ) + panic("Max # of irq sources exceeded\n"); } -static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) +static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) { - Dprintk("Lint: type %d, pol %d, trig %d, bus %d," - " IRQ %02x, APIC ID %x, APIC LINT %02x\n", - m->mpc_irqtype, m->mpc_irqflag & 3, - (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid, - m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); - /* - * Well it seems all SMP boards in existence - * use ExtINT/LVT1 == LINT0 and - * NMI/LVT2 == LINT1 - the following check - * will show us if this assumptions is false. - * Until then we do not have to add baggage. - */ - if ((m->mpc_irqtype == mp_ExtINT) && - (m->mpc_destapiclint != 0)) - BUG(); - if ((m->mpc_irqtype == mp_NMI) && - (m->mpc_destapiclint != 1)) - BUG(); + Dprintk("Lint: type %d, pol %d, trig %d, bus %d," + " IRQ %02x, APIC ID %x, APIC LINT %02x\n", + m->mpc_irqtype, m->mpc_irqflag & 3, (m->mpc_irqflag >> 2) & 3, + m->mpc_srcbusid, m->mpc_srcbusirq, m->mpc_destapic, + m->mpc_destapiclint); + /* + * Well it seems all SMP boards in existence + * use ExtINT/LVT1 == LINT0 and + * NMI/LVT2 == LINT1 - the following check + * will show us if this assumptions is false. + * Until then we do not have to add baggage. + */ + if ( (m->mpc_irqtype == mp_ExtINT) && (m->mpc_destapiclint != 0) ) + BUG(); + if ( (m->mpc_irqtype == mp_NMI) && (m->mpc_destapiclint != 1) ) + BUG(); } /* @@ -289,290 +308,298 @@ static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) static int __init smp_read_mpc(struct mp_config_table *mpc) { - char str[16]; - char oem[10]; - int count=sizeof(*mpc); - unsigned char *mpt=((unsigned char *)mpc)+count; - - if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) { - printk(KERN_ERR "SMP mptable: bad signature [%#x]!\n", - *(u32 *)mpc->mpc_signature); - return 0; - } - if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) { - printk(KERN_ERR "SMP mptable: checksum error!\n"); - return 0; - } - if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) { - printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n", - mpc->mpc_spec); - return 0; - } - if (!mpc->mpc_lapic) { - printk(KERN_ERR "SMP mptable: null local APIC address!\n"); - return 0; - } - memcpy(oem,mpc->mpc_oem,8); - oem[8]=0; - printk(KERN_INFO "OEM ID: %s ",oem); - - memcpy(str,mpc->mpc_productid,12); - str[12]=0; - printk("Product ID: %s ",str); - - mps_oem_check(mpc, oem, str); - - printk("APIC at: %#x\n", mpc->mpc_lapic); - - /* - * Save the local APIC address (it might be non-default) -- but only - * if we're not using ACPI. - */ - if (!acpi_lapic) - mp_lapic_addr = mpc->mpc_lapic; - - /* - * Now process the configuration blocks. - */ - while (count < mpc->mpc_length) { - switch(*mpt) { - case MP_PROCESSOR: - { - struct mpc_config_processor *m= - (struct mpc_config_processor *)mpt; - - mpt += sizeof(*m); - count += sizeof(*m); - - /* ACPI may have already provided this data. */ - if (acpi_lapic) - break; - - printk("Processor #%02x %u:%u APIC version %u%s\n", - m->mpc_apicid, - MASK_EXTR(m->mpc_cpufeature, - CPU_FAMILY_MASK), - MASK_EXTR(m->mpc_cpufeature, - CPU_MODEL_MASK), - m->mpc_apicver, - m->mpc_cpuflag & CPU_ENABLED - ? "" : " [disabled]"); - MP_processor_info(m); - break; - } - case MP_BUS: - { - struct mpc_config_bus *m= - (struct mpc_config_bus *)mpt; - MP_bus_info(m); - mpt += sizeof(*m); - count += sizeof(*m); - break; - } - case MP_IOAPIC: - { - struct mpc_config_ioapic *m= - (struct mpc_config_ioapic *)mpt; - MP_ioapic_info(m); - mpt+=sizeof(*m); - count+=sizeof(*m); - break; - } - case MP_INTSRC: - { - struct mpc_config_intsrc *m= - (struct mpc_config_intsrc *)mpt; - - MP_intsrc_info(m); - mpt+=sizeof(*m); - count+=sizeof(*m); - break; - } - case MP_LINTSRC: - { - struct mpc_config_lintsrc *m= - (struct mpc_config_lintsrc *)mpt; - MP_lintsrc_info(m); - mpt+=sizeof(*m); - count+=sizeof(*m); - break; - } - default: - { - count = mpc->mpc_length; - break; - } - } - } - clustered_apic_check(); - if (!num_processors) - printk(KERN_ERR "SMP mptable: no processors registered!\n"); - return num_processors; + char str[16]; + char oem[10]; + int count = sizeof(*mpc); + unsigned char *mpt = ((unsigned char *)mpc) + count; + + if ( memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4) ) + { + printk(KERN_ERR "SMP mptable: bad signature [%#x]!\n", + *(u32 *)mpc->mpc_signature); + return 0; + } + if ( mpf_checksum((unsigned char *)mpc, mpc->mpc_length) ) + { + printk(KERN_ERR "SMP mptable: checksum error!\n"); + return 0; + } + if ( mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04 ) + { + printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n", + mpc->mpc_spec); + return 0; + } + if ( !mpc->mpc_lapic ) + { + printk(KERN_ERR "SMP mptable: null local APIC address!\n"); + return 0; + } + memcpy(oem, mpc->mpc_oem, 8); + oem[8] = 0; + printk(KERN_INFO "OEM ID: %s ", oem); + + memcpy(str, mpc->mpc_productid, 12); + str[12] = 0; + printk("Product ID: %s ", str); + + mps_oem_check(mpc, oem, str); + + printk("APIC at: %#x\n", mpc->mpc_lapic); + + /* + * Save the local APIC address (it might be non-default) -- but only + * if we're not using ACPI. + */ + if ( !acpi_lapic ) + mp_lapic_addr = mpc->mpc_lapic; + + /* + * Now process the configuration blocks. + */ + while ( count < mpc->mpc_length ) + { + switch (*mpt) + { + case MP_PROCESSOR: + { + struct mpc_config_processor *m = (struct mpc_config_processor *)mpt; + + mpt += sizeof(*m); + count += sizeof(*m); + + /* ACPI may have already provided this data. */ + if ( acpi_lapic ) + break; + + printk("Processor #%02x %u:%u APIC version %u%s\n", m->mpc_apicid, + MASK_EXTR(m->mpc_cpufeature, CPU_FAMILY_MASK), + MASK_EXTR(m->mpc_cpufeature, CPU_MODEL_MASK), m->mpc_apicver, + m->mpc_cpuflag & CPU_ENABLED ? "" : " [disabled]"); + MP_processor_info(m); + break; + } + case MP_BUS: + { + struct mpc_config_bus *m = (struct mpc_config_bus *)mpt; + MP_bus_info(m); + mpt += sizeof(*m); + count += sizeof(*m); + break; + } + case MP_IOAPIC: + { + struct mpc_config_ioapic *m = (struct mpc_config_ioapic *)mpt; + MP_ioapic_info(m); + mpt += sizeof(*m); + count += sizeof(*m); + break; + } + case MP_INTSRC: + { + struct mpc_config_intsrc *m = (struct mpc_config_intsrc *)mpt; + + MP_intsrc_info(m); + mpt += sizeof(*m); + count += sizeof(*m); + break; + } + case MP_LINTSRC: + { + struct mpc_config_lintsrc *m = (struct mpc_config_lintsrc *)mpt; + MP_lintsrc_info(m); + mpt += sizeof(*m); + count += sizeof(*m); + break; + } + default: + { + count = mpc->mpc_length; + break; + } + } + } + clustered_apic_check(); + if ( !num_processors ) + printk(KERN_ERR "SMP mptable: no processors registered!\n"); + return num_processors; } static int __init ELCR_trigger(unsigned int irq) { - unsigned int port; + unsigned int port; - port = 0x4d0 + (irq >> 3); - return (inb(port) >> (irq & 7)) & 1; + port = 0x4d0 + (irq >> 3); + return (inb(port) >> (irq & 7)) & 1; } static void __init construct_default_ioirq_mptable(int mpc_default_type) { - struct mpc_config_intsrc intsrc; - int i; - int ELCR_fallback = 0; - - intsrc.mpc_type = MP_INTSRC; - intsrc.mpc_irqflag = 0; /* conforming */ - intsrc.mpc_srcbus = 0; - intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid; - - intsrc.mpc_irqtype = mp_INT; - - /* - * If true, we have an ISA/PCI system with no IRQ entries - * in the MP table. To prevent the PCI interrupts from being set up - * incorrectly, we try to use the ELCR. The sanity check to see if - * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can - * never be level sensitive, so we simply see if the ELCR agrees. - * If it does, we assume it's valid. - */ - if (mpc_default_type == 5) { - printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); - - if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13)) - printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n"); - else { - printk(KERN_INFO "Using ELCR to identify PCI interrupts\n"); - ELCR_fallback = 1; - } - } - - for (i = 0; platform_legacy_irq(i); i++) { - switch (mpc_default_type) { - case 2: - if (i == 0 || i == 13) - continue; /* IRQ0 & IRQ13 not connected */ - /* fall through */ - default: - if (i == 2) - continue; /* IRQ2 is never connected */ - } - - if (ELCR_fallback) { - /* - * If the ELCR indicates a level-sensitive interrupt, we - * copy that information over to the MP table in the - * irqflag field (level sensitive, active high polarity). - */ - if (ELCR_trigger(i)) - intsrc.mpc_irqflag = 13; - else - intsrc.mpc_irqflag = 0; - } - - intsrc.mpc_srcbusirq = i; - intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ - MP_intsrc_info(&intsrc); - } - - intsrc.mpc_irqtype = mp_ExtINT; - intsrc.mpc_srcbusirq = 0; - intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ - MP_intsrc_info(&intsrc); + struct mpc_config_intsrc intsrc; + int i; + int ELCR_fallback = 0; + + intsrc.mpc_type = MP_INTSRC; + intsrc.mpc_irqflag = 0; /* conforming */ + intsrc.mpc_srcbus = 0; + intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid; + + intsrc.mpc_irqtype = mp_INT; + + /* + * If true, we have an ISA/PCI system with no IRQ entries + * in the MP table. To prevent the PCI interrupts from being set up + * incorrectly, we try to use the ELCR. The sanity check to see if + * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can + * never be level sensitive, so we simply see if the ELCR agrees. + * If it does, we assume it's valid. + */ + if ( mpc_default_type == 5 ) + { + printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling " + "back to ELCR\n"); + + if ( ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || + ELCR_trigger(13) ) + printk(KERN_WARNING + "ELCR contains invalid data... not using ELCR\n"); + else + { + printk(KERN_INFO "Using ELCR to identify PCI interrupts\n"); + ELCR_fallback = 1; + } + } + + for ( i = 0; platform_legacy_irq(i); i++ ) + { + switch (mpc_default_type) + { + case 2: + if ( i == 0 || i == 13 ) + continue; /* IRQ0 & IRQ13 not connected */ + /* fall through */ + default: + if ( i == 2 ) + continue; /* IRQ2 is never connected */ + } + + if ( ELCR_fallback ) + { + /* + * If the ELCR indicates a level-sensitive interrupt, we + * copy that information over to the MP table in the + * irqflag field (level sensitive, active high polarity). + */ + if ( ELCR_trigger(i) ) + intsrc.mpc_irqflag = 13; + else + intsrc.mpc_irqflag = 0; + } + + intsrc.mpc_srcbusirq = i; + intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ + MP_intsrc_info(&intsrc); + } + + intsrc.mpc_irqtype = mp_ExtINT; + intsrc.mpc_srcbusirq = 0; + intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ + MP_intsrc_info(&intsrc); } static inline void __init construct_default_ISA_mptable(int mpc_default_type) { - struct mpc_config_processor processor; - struct mpc_config_bus bus; - struct mpc_config_ioapic ioapic; - struct mpc_config_lintsrc lintsrc; - int linttypes[2] = { mp_ExtINT, mp_NMI }; - int i; - - /* - * local APIC has default address - */ - mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; - - /* - * 2 CPUs, numbered 0 & 1. - */ - processor.mpc_type = MP_PROCESSOR; - /* Either an integrated APIC or a discrete 82489DX. */ - processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; - processor.mpc_cpuflag = CPU_ENABLED; - processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | - (boot_cpu_data.x86_model << 4) | - boot_cpu_data.x86_mask; - processor.mpc_featureflag = - boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_FPU)]; - processor.mpc_reserved[0] = 0; - processor.mpc_reserved[1] = 0; - for (i = 0; i < 2; i++) { - processor.mpc_apicid = i; - MP_processor_info(&processor); - } - - bus.mpc_type = MP_BUS; - bus.mpc_busid = 0; - switch (mpc_default_type) { - default: - printk("???\n"); - printk(KERN_ERR "Unknown standard configuration %d\n", - mpc_default_type); - /* fall through */ - case 1: - case 5: - memcpy(bus.mpc_bustype, "ISA ", 6); - break; - case 2: - case 6: - case 3: - memcpy(bus.mpc_bustype, "EISA ", 6); - break; - case 4: - case 7: - memcpy(bus.mpc_bustype, "MCA ", 6); - } - MP_bus_info(&bus); - if (mpc_default_type > 4) { - bus.mpc_busid = 1; - memcpy(bus.mpc_bustype, "PCI ", 6); - MP_bus_info(&bus); - } - - ioapic.mpc_type = MP_IOAPIC; - ioapic.mpc_apicid = 2; - ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; - ioapic.mpc_flags = MPC_APIC_USABLE; - ioapic.mpc_apicaddr = 0xFEC00000; - MP_ioapic_info(&ioapic); - - /* - * We set up most of the low 16 IO-APIC pins according to MPS rules. - */ - construct_default_ioirq_mptable(mpc_default_type); - - lintsrc.mpc_type = MP_LINTSRC; - lintsrc.mpc_irqflag = 0; /* conforming */ - lintsrc.mpc_srcbusid = 0; - lintsrc.mpc_srcbusirq = 0; - lintsrc.mpc_destapic = MP_APIC_ALL; - for (i = 0; i < 2; i++) { - lintsrc.mpc_irqtype = linttypes[i]; - lintsrc.mpc_destapiclint = i; - MP_lintsrc_info(&lintsrc); - } + struct mpc_config_processor processor; + struct mpc_config_bus bus; + struct mpc_config_ioapic ioapic; + struct mpc_config_lintsrc lintsrc; + int linttypes[2] = {mp_ExtINT, mp_NMI}; + int i; + + /* + * local APIC has default address + */ + mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; + + /* + * 2 CPUs, numbered 0 & 1. + */ + processor.mpc_type = MP_PROCESSOR; + /* Either an integrated APIC or a discrete 82489DX. */ + processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; + processor.mpc_cpuflag = CPU_ENABLED; + processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | + (boot_cpu_data.x86_model << 4) | + boot_cpu_data.x86_mask; + processor.mpc_featureflag = + boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_FPU)]; + processor.mpc_reserved[0] = 0; + processor.mpc_reserved[1] = 0; + for ( i = 0; i < 2; i++ ) + { + processor.mpc_apicid = i; + MP_processor_info(&processor); + } + + bus.mpc_type = MP_BUS; + bus.mpc_busid = 0; + switch (mpc_default_type) + { + default: + printk("???\n"); + printk(KERN_ERR "Unknown standard configuration %d\n", + mpc_default_type); + /* fall through */ + case 1: + case 5: + memcpy(bus.mpc_bustype, "ISA ", 6); + break; + case 2: + case 6: + case 3: + memcpy(bus.mpc_bustype, "EISA ", 6); + break; + case 4: + case 7: + memcpy(bus.mpc_bustype, "MCA ", 6); + } + MP_bus_info(&bus); + if ( mpc_default_type > 4 ) + { + bus.mpc_busid = 1; + memcpy(bus.mpc_bustype, "PCI ", 6); + MP_bus_info(&bus); + } + + ioapic.mpc_type = MP_IOAPIC; + ioapic.mpc_apicid = 2; + ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; + ioapic.mpc_flags = MPC_APIC_USABLE; + ioapic.mpc_apicaddr = 0xFEC00000; + MP_ioapic_info(&ioapic); + + /* + * We set up most of the low 16 IO-APIC pins according to MPS rules. + */ + construct_default_ioirq_mptable(mpc_default_type); + + lintsrc.mpc_type = MP_LINTSRC; + lintsrc.mpc_irqflag = 0; /* conforming */ + lintsrc.mpc_srcbusid = 0; + lintsrc.mpc_srcbusirq = 0; + lintsrc.mpc_destapic = MP_APIC_ALL; + for ( i = 0; i < 2; i++ ) + { + lintsrc.mpc_irqtype = linttypes[i]; + lintsrc.mpc_destapiclint = i; + MP_lintsrc_info(&lintsrc); + } } static __init void efi_unmap_mpf(void) { - if (efi_enabled(EFI_BOOT)) - clear_fixmap(FIX_EFI_MPF); + if ( efi_enabled(EFI_BOOT) ) + clear_fixmap(FIX_EFI_MPF); } static struct intel_mp_floating *__initdata mpf_found; @@ -580,101 +607,111 @@ static struct intel_mp_floating *__initdata mpf_found; /* * Scan the memory blocks for an SMP configuration block. */ -void __init get_smp_config (void) +void __init get_smp_config(void) { - struct intel_mp_floating *mpf = mpf_found; - - /* - * ACPI supports both logical (e.g. Hyper-Threading) and physical - * processors, where MPS only supports physical. - */ - if (acpi_lapic && acpi_ioapic) { - efi_unmap_mpf(); - printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n"); - return; - } - else if (acpi_lapic) - printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n"); - - printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); - if (mpf->mpf_feature2 & (1<<7)) { - printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); - pic_mode = true; - } else { - printk(KERN_INFO " Virtual Wire compatibility mode.\n"); - pic_mode = false; - } - - /* - * Now see if we need to read further. - */ - if (mpf->mpf_feature1 != 0) { - - printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1); - construct_default_ISA_mptable(mpf->mpf_feature1); - - } else if (mpf->mpf_physptr) { - - /* - * Read the physical hardware table. Anything here will - * override the defaults. - */ - if (!smp_read_mpc((void *)(unsigned long)mpf->mpf_physptr)) { - efi_unmap_mpf(); - smp_found_config = false; - printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); - printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); - return; - } - /* - * If there are no explicit MP IRQ entries, then we are - * broken. We set up most of the low 16 IO-APIC pins to - * ISA defaults and hope it will work. - */ - if (!mp_irq_entries) { - struct mpc_config_bus bus; - - printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); - - bus.mpc_type = MP_BUS; - bus.mpc_busid = 0; - memcpy(bus.mpc_bustype, "ISA ", 6); - MP_bus_info(&bus); - - construct_default_ioirq_mptable(0); - } - - } else - BUG(); - - efi_unmap_mpf(); - - printk(KERN_INFO "Processors: %d\n", num_processors); - /* - * Only use the first configuration found. - */ + struct intel_mp_floating *mpf = mpf_found; + + /* + * ACPI supports both logical (e.g. Hyper-Threading) and physical + * processors, where MPS only supports physical. + */ + if ( acpi_lapic && acpi_ioapic ) + { + efi_unmap_mpf(); + printk(KERN_INFO + "Using ACPI (MADT) for SMP configuration information\n"); + return; + } + else if ( acpi_lapic ) + printk(KERN_INFO + "Using ACPI for processor (LAPIC) configuration information\n"); + + printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", + mpf->mpf_specification); + if ( mpf->mpf_feature2 & (1 << 7) ) + { + printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); + pic_mode = true; + } + else + { + printk(KERN_INFO " Virtual Wire compatibility mode.\n"); + pic_mode = false; + } + + /* + * Now see if we need to read further. + */ + if ( mpf->mpf_feature1 != 0 ) + { + printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1); + construct_default_ISA_mptable(mpf->mpf_feature1); + } + else if ( mpf->mpf_physptr ) + { + /* + * Read the physical hardware table. Anything here will + * override the defaults. + */ + if ( !smp_read_mpc((void *)(unsigned long)mpf->mpf_physptr) ) + { + efi_unmap_mpf(); + smp_found_config = false; + printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); + printk(KERN_ERR + "... disabling SMP support. (tell your hw vendor)\n"); + return; + } + /* + * If there are no explicit MP IRQ entries, then we are + * broken. We set up most of the low 16 IO-APIC pins to + * ISA defaults and hope it will work. + */ + if ( !mp_irq_entries ) + { + struct mpc_config_bus bus; + + printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default " + "mptable. (tell your hw vendor)\n"); + + bus.mpc_type = MP_BUS; + bus.mpc_busid = 0; + memcpy(bus.mpc_bustype, "ISA ", 6); + MP_bus_info(&bus); + + construct_default_ioirq_mptable(0); + } + } + else + BUG(); + + efi_unmap_mpf(); + + printk(KERN_INFO "Processors: %d\n", num_processors); + /* + * Only use the first configuration found. + */ } -static int __init smp_scan_config (unsigned long base, unsigned long length) +static int __init smp_scan_config(unsigned long base, unsigned long length) { - unsigned int *bp = maddr_to_virt(base); - struct intel_mp_floating *mpf; - - Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length); - if (sizeof(*mpf) != 16) - printk("Error: MPF size\n"); - - while (length > 0) { - mpf = (struct intel_mp_floating *)bp; - if ((*bp == SMP_MAGIC_IDENT) && - (mpf->mpf_length == 1) && - !mpf_checksum((unsigned char *)bp, 16) && - ((mpf->mpf_specification == 1) - || (mpf->mpf_specification == 4)) ) { - - smp_found_config = true; - printk(KERN_INFO "found SMP MP-table at %08lx\n", - virt_to_maddr(mpf)); + unsigned int *bp = maddr_to_virt(base); + struct intel_mp_floating *mpf; + + Dprintk("Scan SMP from %p for %ld bytes.\n", bp, length); + if ( sizeof(*mpf) != 16 ) + printk("Error: MPF size\n"); + + while ( length > 0 ) + { + mpf = (struct intel_mp_floating *)bp; + if ( (*bp == SMP_MAGIC_IDENT) && (mpf->mpf_length == 1) && + !mpf_checksum((unsigned char *)bp, 16) && + ((mpf->mpf_specification == 1) || (mpf->mpf_specification == 4)) ) + { + smp_found_config = true; + printk(KERN_INFO "found SMP MP-table at %08lx\n", + virt_to_maddr(mpf)); #if 0 reserve_bootmem(virt_to_maddr(mpf), PAGE_SIZE); if (mpf->mpf_physptr) { @@ -694,78 +731,78 @@ static int __init smp_scan_config (unsigned long base, unsigned long length) reserve_bootmem(mpf->mpf_physptr, size); } #endif - mpf_found = mpf; - return 1; - } - bp += 4; - length -= 16; - } - return 0; + mpf_found = mpf; + return 1; + } + bp += 4; + length -= 16; + } + return 0; } static void __init efi_check_config(void) { - struct intel_mp_floating *mpf; - - if (efi.mps == EFI_INVALID_TABLE_ADDR) - return; - - __set_fixmap(FIX_EFI_MPF, PFN_DOWN(efi.mps), __PAGE_HYPERVISOR); - mpf = fix_to_virt(FIX_EFI_MPF) + ((long)efi.mps & (PAGE_SIZE-1)); - - if (memcmp(mpf->mpf_signature, "_MP_", 4) == 0 && - mpf->mpf_length == 1 && - mpf_checksum((void *)mpf, 16) && - (mpf->mpf_specification == 1 || mpf->mpf_specification == 4)) { - smp_found_config = true; - printk(KERN_INFO "SMP MP-table at %08lx\n", efi.mps); - mpf_found = mpf; - } - else - efi_unmap_mpf(); + struct intel_mp_floating *mpf; + + if ( efi.mps == EFI_INVALID_TABLE_ADDR ) + return; + + __set_fixmap(FIX_EFI_MPF, PFN_DOWN(efi.mps), __PAGE_HYPERVISOR); + mpf = fix_to_virt(FIX_EFI_MPF) + ((long)efi.mps & (PAGE_SIZE - 1)); + + if ( memcmp(mpf->mpf_signature, "_MP_", 4) == 0 && mpf->mpf_length == 1 && + mpf_checksum((void *)mpf, 16) && + (mpf->mpf_specification == 1 || mpf->mpf_specification == 4) ) + { + smp_found_config = true; + printk(KERN_INFO "SMP MP-table at %08lx\n", efi.mps); + mpf_found = mpf; + } + else + efi_unmap_mpf(); } -void __init find_smp_config (void) +void __init find_smp_config(void) { - unsigned int address; - - if (efi_enabled(EFI_BOOT)) { - efi_check_config(); - return; - } - - /* - * FIXME: Linux assumes you have 640K of base ram.. - * this continues the error... - * - * 1) Scan the bottom 1K for a signature - * 2) Scan the top 1K of base RAM - * 3) Scan the 64K of bios - */ - if (smp_scan_config(0x0,0x400) || - smp_scan_config(639*0x400,0x400) || - smp_scan_config(0xF0000,0x10000)) - return; - /* - * If it is an SMP machine we should know now, unless the - * configuration is in an EISA/MCA bus machine with an - * extended bios data area. - * - * there is a real-mode segmented pointer pointing to the - * 4K EBDA area at 0x40E, calculate and scan it here. - * - * NOTE! There are Linux loaders that will corrupt the EBDA - * area, and as such this kind of SMP config may be less - * trustworthy, simply because the SMP table may have been - * stomped on during early boot. These loaders are buggy and - * should be fixed. - * - * MP1.4 SPEC states to only scan first 1K of 4K EBDA. - */ - - address = get_bios_ebda(); - if (address) - smp_scan_config(address, 0x400); + unsigned int address; + + if ( efi_enabled(EFI_BOOT) ) + { + efi_check_config(); + return; + } + + /* + * FIXME: Linux assumes you have 640K of base ram.. + * this continues the error... + * + * 1) Scan the bottom 1K for a signature + * 2) Scan the top 1K of base RAM + * 3) Scan the 64K of bios + */ + if ( smp_scan_config(0x0, 0x400) || smp_scan_config(639 * 0x400, 0x400) || + smp_scan_config(0xF0000, 0x10000) ) + return; + /* + * If it is an SMP machine we should know now, unless the + * configuration is in an EISA/MCA bus machine with an + * extended bios data area. + * + * there is a real-mode segmented pointer pointing to the + * 4K EBDA area at 0x40E, calculate and scan it here. + * + * NOTE! There are Linux loaders that will corrupt the EBDA + * area, and as such this kind of SMP config may be less + * trustworthy, simply because the SMP table may have been + * stomped on during early boot. These loaders are buggy and + * should be fixed. + * + * MP1.4 SPEC states to only scan first 1K of 4K EBDA. + */ + + address = get_bios_ebda(); + if ( address ) + smp_scan_config(address, 0x400); } /* -------------------------------------------------------------------------- @@ -774,334 +811,335 @@ void __init find_smp_config (void) #ifdef CONFIG_ACPI -void __init mp_register_lapic_address ( - u64 address) +void __init mp_register_lapic_address(u64 address) { - if (!x2apic_enabled) { - mp_lapic_addr = (unsigned long) address; - set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); - } + if ( !x2apic_enabled ) + { + mp_lapic_addr = (unsigned long)address; + set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); + } - if (boot_cpu_physical_apicid == -1U) - boot_cpu_physical_apicid = get_apic_id(); + if ( boot_cpu_physical_apicid == -1U ) + boot_cpu_physical_apicid = get_apic_id(); - Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); + Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); } - int mp_register_lapic(u32 id, bool enabled, bool hotplug) { - struct mpc_config_processor processor = { - .mpc_type = MP_PROCESSOR, - /* Note: We don't fill in fields not consumed anywhere. */ - .mpc_apicid = id, - .mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR)), - .mpc_cpuflag = (enabled ? CPU_ENABLED : 0) | - (id == boot_cpu_physical_apicid ? - CPU_BOOTPROCESSOR : 0), - }; - - if (MAX_APICS <= id) { - printk(KERN_WARNING "Processor #%d invalid (max %d)\n", - id, MAX_APICS); - return -EINVAL; - } - - return MP_processor_info_x(&processor, id, hotplug); + struct mpc_config_processor processor = { + .mpc_type = MP_PROCESSOR, + /* Note: We don't fill in fields not consumed anywhere. */ + .mpc_apicid = id, + .mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR)), + .mpc_cpuflag = (enabled ? CPU_ENABLED : 0) | + (id == boot_cpu_physical_apicid ? CPU_BOOTPROCESSOR : 0), + }; + + if ( MAX_APICS <= id ) + { + printk(KERN_WARNING "Processor #%d invalid (max %d)\n", id, MAX_APICS); + return -EINVAL; + } + + return MP_processor_info_x(&processor, id, hotplug); } void mp_unregister_lapic(uint32_t apic_id, uint32_t cpu) { - if (!cpu || (apic_id == boot_cpu_physical_apicid)) - return; + if ( !cpu || (apic_id == boot_cpu_physical_apicid) ) + return; - if (x86_cpu_to_apicid[cpu] != apic_id) - return; + if ( x86_cpu_to_apicid[cpu] != apic_id ) + return; - physid_clear(apic_id, phys_cpu_present_map); + physid_clear(apic_id, phys_cpu_present_map); - x86_cpu_to_apicid[cpu] = BAD_APICID; - cpumask_clear_cpu(cpu, &cpu_present_map); + x86_cpu_to_apicid[cpu] = BAD_APICID; + cpumask_clear_cpu(cpu, &cpu_present_map); } -#define MP_ISA_BUS 0 -#define MP_MAX_IOAPIC_PIN 127 +#define MP_ISA_BUS 0 +#define MP_MAX_IOAPIC_PIN 127 -static struct mp_ioapic_routing { - int gsi_base; - int gsi_end; - unsigned long pin_programmed[BITS_TO_LONGS(MP_MAX_IOAPIC_PIN + 1)]; +static struct mp_ioapic_routing +{ + int gsi_base; + int gsi_end; + unsigned long pin_programmed[BITS_TO_LONGS(MP_MAX_IOAPIC_PIN + 1)]; } mp_ioapic_routing[MAX_IO_APICS]; - -static int mp_find_ioapic ( - int gsi) +static int mp_find_ioapic(int gsi) { - unsigned int i; + unsigned int i; - /* Find the IOAPIC that manages this GSI. */ - for (i = 0; i < nr_ioapics; i++) { - if ((gsi >= mp_ioapic_routing[i].gsi_base) - && (gsi <= mp_ioapic_routing[i].gsi_end)) - return i; - } + /* Find the IOAPIC that manages this GSI. */ + for ( i = 0; i < nr_ioapics; i++ ) + { + if ( (gsi >= mp_ioapic_routing[i].gsi_base) && + (gsi <= mp_ioapic_routing[i].gsi_end) ) + return i; + } - printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); + printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); - return -1; + return -1; } - -void __init mp_register_ioapic ( - u8 id, - u32 address, - u32 gsi_base) +void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base) { - int idx = 0; - int tmpid; - - if (nr_ioapics >= MAX_IO_APICS) { - printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " - "(found %d)\n", MAX_IO_APICS, nr_ioapics); - panic("Recompile kernel with bigger MAX_IO_APICS\n"); - } - if (!address) { - printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" - " found in MADT table, skipping!\n"); - return; - } - - idx = nr_ioapics++; - - mp_ioapics[idx].mpc_type = MP_IOAPIC; - mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; - mp_ioapics[idx].mpc_apicaddr = address; - - set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); - if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) - && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) - tmpid = io_apic_get_unique_id(idx, id); - else - tmpid = id; - if (tmpid == -1) { - nr_ioapics--; - return; - } - mp_ioapics[idx].mpc_apicid = tmpid; - mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); - - /* - * Build basic GSI lookup table to facilitate gsi->io_apic lookups - * and to prevent reprogramming of IOAPIC pins (PCI GSIs). - */ - mp_ioapic_routing[idx].gsi_base = gsi_base; - mp_ioapic_routing[idx].gsi_end = gsi_base + - io_apic_get_redir_entries(idx); - - printk("IOAPIC[%d]: apic_id %d, version %d, address %#x, " - "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, - mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, - mp_ioapic_routing[idx].gsi_base, - mp_ioapic_routing[idx].gsi_end); - - return; + int idx = 0; + int tmpid; + + if ( nr_ioapics >= MAX_IO_APICS ) + { + printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " + "(found %d)\n", + MAX_IO_APICS, nr_ioapics); + panic("Recompile kernel with bigger MAX_IO_APICS\n"); + } + if ( !address ) + { + printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" + " found in MADT table, skipping!\n"); + return; + } + + idx = nr_ioapics++; + + mp_ioapics[idx].mpc_type = MP_IOAPIC; + mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; + mp_ioapics[idx].mpc_apicaddr = address; + + set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); + if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && + !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]) ) + tmpid = io_apic_get_unique_id(idx, id); + else + tmpid = id; + if ( tmpid == -1 ) + { + nr_ioapics--; + return; + } + mp_ioapics[idx].mpc_apicid = tmpid; + mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); + + /* + * Build basic GSI lookup table to facilitate gsi->io_apic lookups + * and to prevent reprogramming of IOAPIC pins (PCI GSIs). + */ + mp_ioapic_routing[idx].gsi_base = gsi_base; + mp_ioapic_routing[idx].gsi_end = gsi_base + io_apic_get_redir_entries(idx); + + printk("IOAPIC[%d]: apic_id %d, version %d, address %#x, " + "GSI %d-%d\n", + idx, mp_ioapics[idx].mpc_apicid, mp_ioapics[idx].mpc_apicver, + mp_ioapics[idx].mpc_apicaddr, mp_ioapic_routing[idx].gsi_base, + mp_ioapic_routing[idx].gsi_end); + + return; } unsigned __init highest_gsi(void) { - unsigned x, res = 0; - for (x = 0; x < nr_ioapics; x++) - if (res < mp_ioapic_routing[x].gsi_end) - res = mp_ioapic_routing[x].gsi_end; - return res; + unsigned x, res = 0; + for ( x = 0; x < nr_ioapics; x++ ) + if ( res < mp_ioapic_routing[x].gsi_end ) + res = mp_ioapic_routing[x].gsi_end; + return res; } unsigned int io_apic_gsi_base(unsigned int apic) { - return mp_ioapic_routing[apic].gsi_base; + return mp_ioapic_routing[apic].gsi_base; } -void __init mp_override_legacy_irq ( - u8 bus_irq, - u8 polarity, - u8 trigger, - u32 gsi) +void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) { - struct mpc_config_intsrc intsrc; - int ioapic = -1; - int pin = -1; - - /* - * Convert 'gsi' to 'ioapic.pin'. - */ - ioapic = mp_find_ioapic(gsi); - if (ioapic < 0) - return; - pin = gsi - mp_ioapic_routing[ioapic].gsi_base; - - /* - * TBD: This check is for faulty timer entries, where the override - * erroneously sets the trigger to level, resulting in a HUGE - * increase of timer interrupts! - */ - if ((bus_irq == 0) && (trigger == 3)) - trigger = 1; - - intsrc.mpc_type = MP_INTSRC; - intsrc.mpc_irqtype = mp_INT; - intsrc.mpc_irqflag = (trigger << 2) | polarity; - intsrc.mpc_srcbus = MP_ISA_BUS; - intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ - intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ - intsrc.mpc_dstirq = pin; /* INTIN# */ - - Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", - intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, - (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, - intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); - - mp_irqs[mp_irq_entries] = intsrc; - if (++mp_irq_entries == MAX_IRQ_SOURCES) - panic("Max # of irq sources exceeded\n"); - - return; + struct mpc_config_intsrc intsrc; + int ioapic = -1; + int pin = -1; + + /* + * Convert 'gsi' to 'ioapic.pin'. + */ + ioapic = mp_find_ioapic(gsi); + if ( ioapic < 0 ) + return; + pin = gsi - mp_ioapic_routing[ioapic].gsi_base; + + /* + * TBD: This check is for faulty timer entries, where the override + * erroneously sets the trigger to level, resulting in a HUGE + * increase of timer interrupts! + */ + if ( (bus_irq == 0) && (trigger == 3) ) + trigger = 1; + + intsrc.mpc_type = MP_INTSRC; + intsrc.mpc_irqtype = mp_INT; + intsrc.mpc_irqflag = (trigger << 2) | polarity; + intsrc.mpc_srcbus = MP_ISA_BUS; + intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ + intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ + intsrc.mpc_dstirq = pin; /* INTIN# */ + + Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", + intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, + (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, + intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); + + mp_irqs[mp_irq_entries] = intsrc; + if ( ++mp_irq_entries == MAX_IRQ_SOURCES ) + panic("Max # of irq sources exceeded\n"); + + return; } -void __init mp_config_acpi_legacy_irqs (void) +void __init mp_config_acpi_legacy_irqs(void) { - struct mpc_config_intsrc intsrc; - int i = 0; - int ioapic = -1; - - /* - * Fabricate the legacy ISA bus (bus #31). - */ - mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; - Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); - - /* - * Locate the IOAPIC that manages the ISA IRQs (0-15). - */ - ioapic = mp_find_ioapic(0); - if (ioapic < 0) - return; - - intsrc.mpc_type = MP_INTSRC; - intsrc.mpc_irqflag = 0; /* Conforming */ - intsrc.mpc_srcbus = MP_ISA_BUS; - intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; - - /* - * Use the default configuration for the IRQs 0-15. Unless - * overriden by (MADT) interrupt source override entries. - */ - for (i = 0; platform_legacy_irq(i); i++) { - int idx; - - for (idx = 0; idx < mp_irq_entries; idx++) { - struct mpc_config_intsrc *irq = mp_irqs + idx; - - /* Do we already have a mapping for this ISA IRQ? */ - if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i) - break; - - /* Do we already have a mapping for this IOAPIC pin */ - if ((irq->mpc_dstapic == intsrc.mpc_dstapic) && - (irq->mpc_dstirq == i)) - break; - } - - if (idx != mp_irq_entries) { - printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); - continue; /* IRQ already used */ - } - - intsrc.mpc_irqtype = mp_INT; - intsrc.mpc_srcbusirq = i; /* Identity mapped */ - intsrc.mpc_dstirq = i; - - Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " - "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, - (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, - intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, - intsrc.mpc_dstirq); - - mp_irqs[mp_irq_entries] = intsrc; - if (++mp_irq_entries == MAX_IRQ_SOURCES) - panic("Max # of irq sources exceeded\n"); - } + struct mpc_config_intsrc intsrc; + int i = 0; + int ioapic = -1; + + /* + * Fabricate the legacy ISA bus (bus #31). + */ + mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; + Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); + + /* + * Locate the IOAPIC that manages the ISA IRQs (0-15). + */ + ioapic = mp_find_ioapic(0); + if ( ioapic < 0 ) + return; + + intsrc.mpc_type = MP_INTSRC; + intsrc.mpc_irqflag = 0; /* Conforming */ + intsrc.mpc_srcbus = MP_ISA_BUS; + intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; + + /* + * Use the default configuration for the IRQs 0-15. Unless + * overriden by (MADT) interrupt source override entries. + */ + for ( i = 0; platform_legacy_irq(i); i++ ) + { + int idx; + + for ( idx = 0; idx < mp_irq_entries; idx++ ) + { + struct mpc_config_intsrc *irq = mp_irqs + idx; + + /* Do we already have a mapping for this ISA IRQ? */ + if ( irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i ) + break; + + /* Do we already have a mapping for this IOAPIC pin */ + if ( (irq->mpc_dstapic == intsrc.mpc_dstapic) && + (irq->mpc_dstirq == i) ) + break; + } + + if ( idx != mp_irq_entries ) + { + printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); + continue; /* IRQ already used */ + } + + intsrc.mpc_irqtype = mp_INT; + intsrc.mpc_srcbusirq = i; /* Identity mapped */ + intsrc.mpc_dstirq = i; + + Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " + "%d-%d\n", + intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, + (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, + intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); + + mp_irqs[mp_irq_entries] = intsrc; + if ( ++mp_irq_entries == MAX_IRQ_SOURCES ) + panic("Max # of irq sources exceeded\n"); + } } -int mp_register_gsi (u32 gsi, int triggering, int polarity) +int mp_register_gsi(u32 gsi, int triggering, int polarity) { - int ioapic; - int ioapic_pin; - struct irq_desc * desc; - unsigned long flags; + int ioapic; + int ioapic_pin; + struct irq_desc *desc; + unsigned long flags; - /* - * Mapping between Global System Interrups, which - * represent all possible interrupts, and IRQs - * assigned to actual devices. - */ + /* + * Mapping between Global System Interrups, which + * represent all possible interrupts, and IRQs + * assigned to actual devices. + */ #ifdef CONFIG_ACPI_BUS - /* Don't set up the ACPI SCI because it's already set up */ - if (acpi_fadt.sci_int == gsi) - return gsi; + /* Don't set up the ACPI SCI because it's already set up */ + if ( acpi_fadt.sci_int == gsi ) + return gsi; #endif - if (!nr_ioapics) { - unsigned int port = 0x4d0 + (gsi >> 3); - u8 val; - - if (!platform_legacy_irq(gsi)) - return -EINVAL; - val = inb(port); - if (triggering) - val |= 1 << (gsi & 7); - else - val &= ~(1 << (gsi & 7)); - outb(val, port); - return 0; - } - - ioapic = mp_find_ioapic(gsi); - if (ioapic < 0) { - printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi); - return -EINVAL; - } - - ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; - - desc = irq_to_desc(gsi); - spin_lock_irqsave(&desc->lock, flags); - if (!(desc->status & IRQ_DISABLED) && desc->handler != &no_irq_type) { - spin_unlock_irqrestore(&desc->lock, flags); - return -EEXIST; - } - spin_unlock_irqrestore(&desc->lock, flags); - - /* - * Avoid pin reprogramming. PRTs typically include entries - * with redundant pin->gsi mappings (but unique PCI devices); - * we only program the IOAPIC on the first. - */ - if (ioapic_pin > MP_MAX_IOAPIC_PIN) { - printk(KERN_ERR "Invalid reference to IOAPIC pin " - "%d-%d\n", mp_ioapics[ioapic].mpc_apicid, - ioapic_pin); - return -EINVAL; - } - if (test_and_set_bit(ioapic_pin, - mp_ioapic_routing[ioapic].pin_programmed)) { - Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", - mp_ioapics[ioapic].mpc_apicid, ioapic_pin); - return -EEXIST; - } - - return io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, - triggering, polarity); + if ( !nr_ioapics ) + { + unsigned int port = 0x4d0 + (gsi >> 3); + u8 val; + + if ( !platform_legacy_irq(gsi) ) + return -EINVAL; + val = inb(port); + if ( triggering ) + val |= 1 << (gsi & 7); + else + val &= ~(1 << (gsi & 7)); + outb(val, port); + return 0; + } + + ioapic = mp_find_ioapic(gsi); + if ( ioapic < 0 ) + { + printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi); + return -EINVAL; + } + + ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; + + desc = irq_to_desc(gsi); + spin_lock_irqsave(&desc->lock, flags); + if ( !(desc->status & IRQ_DISABLED) && desc->handler != &no_irq_type ) + { + spin_unlock_irqrestore(&desc->lock, flags); + return -EEXIST; + } + spin_unlock_irqrestore(&desc->lock, flags); + + /* + * Avoid pin reprogramming. PRTs typically include entries + * with redundant pin->gsi mappings (but unique PCI devices); + * we only program the IOAPIC on the first. + */ + if ( ioapic_pin > MP_MAX_IOAPIC_PIN ) + { + printk(KERN_ERR "Invalid reference to IOAPIC pin " + "%d-%d\n", + mp_ioapics[ioapic].mpc_apicid, ioapic_pin); + return -EINVAL; + } + if ( test_and_set_bit(ioapic_pin, + mp_ioapic_routing[ioapic].pin_programmed) ) + { + Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", + mp_ioapics[ioapic].mpc_apicid, ioapic_pin); + return -EEXIST; + } + + return io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, triggering, + polarity); } #endif /* CONFIG_ACPI */ diff --git a/xen/arch/x86/msi.c b/xen/arch/x86/msi.c index babc4147c4..6b40d244e9 100644 --- a/xen/arch/x86/msi.c +++ b/xen/arch/x86/msi.c @@ -54,7 +54,7 @@ static int msix_fixmap_alloc(void) rc = FIX_MSIX_IO_RESERV_BASE + i; set_bit(i, &msix_fixmap_pages); - out: +out: spin_unlock(&msix_fixmap_lock); return rc; } @@ -93,7 +93,7 @@ static int msix_get_fixmap(struct arch_msix *msix, u64 table_paddr, else idx = msix->table_idx[nr_page]; - out: +out: spin_unlock(&msix->table_lock); return idx; } @@ -118,7 +118,7 @@ static void msix_put_fixmap(struct arch_msix *msix, int idx) msix->table_idx[i] = 0; } - out: +out: spin_unlock(&msix->table_lock); } @@ -157,7 +157,8 @@ static bool msix_memory_decoded(const struct pci_dev *dev, unsigned int pos) /* * MSI message composition */ -void msi_compose_msg(unsigned vector, const cpumask_t *cpu_mask, struct msi_msg *msg) +void msi_compose_msg(unsigned vector, const cpumask_t *cpu_mask, + struct msi_msg *msg) { memset(msg, 0, sizeof(*msg)); @@ -176,25 +177,23 @@ void msi_compose_msg(unsigned vector, const cpumask_t *cpu_mask, struct msi_msg } msg->address_hi = MSI_ADDR_BASE_HI; - msg->address_lo = MSI_ADDR_BASE_LO | - (INT_DEST_MODE ? MSI_ADDR_DESTMODE_LOGIC - : MSI_ADDR_DESTMODE_PHYS) | - ((INT_DELIVERY_MODE != dest_LowestPrio) - ? MSI_ADDR_REDIRECTION_CPU - : MSI_ADDR_REDIRECTION_LOWPRI) | - MSI_ADDR_DEST_ID(msg->dest32); - - msg->data = MSI_DATA_TRIGGER_EDGE | - MSI_DATA_LEVEL_ASSERT | - ((INT_DELIVERY_MODE != dest_LowestPrio) - ? MSI_DATA_DELIVERY_FIXED - : MSI_DATA_DELIVERY_LOWPRI) | - MSI_DATA_VECTOR(vector); + msg->address_lo = + MSI_ADDR_BASE_LO | + (INT_DEST_MODE ? MSI_ADDR_DESTMODE_LOGIC : MSI_ADDR_DESTMODE_PHYS) | + ((INT_DELIVERY_MODE != dest_LowestPrio) ? MSI_ADDR_REDIRECTION_CPU + : MSI_ADDR_REDIRECTION_LOWPRI) | + MSI_ADDR_DEST_ID(msg->dest32); + + msg->data = + MSI_DATA_TRIGGER_EDGE | MSI_DATA_LEVEL_ASSERT | + ((INT_DELIVERY_MODE != dest_LowestPrio) ? MSI_DATA_DELIVERY_FIXED + : MSI_DATA_DELIVERY_LOWPRI) | + MSI_DATA_VECTOR(vector); } static bool read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { - switch ( entry->msi_attrib.type ) + switch (entry->msi_attrib.type) { case PCI_CAP_ID_MSI: { @@ -205,20 +204,18 @@ static bool read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) u8 slot = PCI_SLOT(dev->devfn); u8 func = PCI_FUNC(dev->devfn); - msg->address_lo = pci_conf_read32(seg, bus, slot, func, - msi_lower_address_reg(pos)); + msg->address_lo = + pci_conf_read32(seg, bus, slot, func, msi_lower_address_reg(pos)); if ( entry->msi_attrib.is_64 ) { msg->address_hi = pci_conf_read32(seg, bus, slot, func, msi_upper_address_reg(pos)); - data = pci_conf_read16(seg, bus, slot, func, - msi_data_reg(pos, 1)); + data = pci_conf_read16(seg, bus, slot, func, msi_data_reg(pos, 1)); } else { msg->address_hi = 0; - data = pci_conf_read16(seg, bus, slot, func, - msi_data_reg(pos, 0)); + data = pci_conf_read16(seg, bus, slot, func, msi_data_reg(pos, 0)); } msg->data = data; break; @@ -227,8 +224,7 @@ static bool read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { void __iomem *base = entry->mask_base; - if ( unlikely(!msix_memory_decoded(entry->dev, - entry->msi_attrib.pos)) ) + if ( unlikely(!msix_memory_decoded(entry->dev, entry->msi_attrib.pos)) ) return false; msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); @@ -259,7 +255,7 @@ static int write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) return rc; } - switch ( entry->msi_attrib.type ) + switch (entry->msi_attrib.type) { case PCI_CAP_ID_MSI: { @@ -293,13 +289,10 @@ static int write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { void __iomem *base = entry->mask_base; - if ( unlikely(!msix_memory_decoded(entry->dev, - entry->msi_attrib.pos)) ) + if ( unlikely(!msix_memory_decoded(entry->dev, entry->msi_attrib.pos)) ) return -ENXIO; - writel(msg->address_lo, - base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); - writel(msg->address_hi, - base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); + writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); + writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET); break; } @@ -380,8 +373,8 @@ static void msix_set_enable(struct pci_dev *dev, int enable) int msi_maskable_irq(const struct msi_desc *entry) { BUG_ON(!entry); - return entry->msi_attrib.type != PCI_CAP_ID_MSI - || entry->msi_attrib.maskbit; + return entry->msi_attrib.type != PCI_CAP_ID_MSI || + entry->msi_attrib.maskbit; } static bool msi_set_mask_bit(struct irq_desc *desc, bool host, bool guest) @@ -399,7 +392,7 @@ static bool msi_set_mask_bit(struct irq_desc *desc, bool host, bool guest) bus = pdev->bus; slot = PCI_SLOT(pdev->devfn); func = PCI_FUNC(pdev->devfn); - switch ( entry->msi_attrib.type ) + switch (entry->msi_attrib.type) { case PCI_CAP_ID_MSI: if ( entry->msi_attrib.maskbit ) @@ -419,10 +412,9 @@ static bool msi_set_mask_bit(struct irq_desc *desc, bool host, bool guest) if ( unlikely(!(control & PCI_MSIX_FLAGS_ENABLE)) ) { pdev->msix->host_maskall = 1; - pci_conf_write16(seg, bus, slot, func, - msix_control_reg(entry->msi_attrib.pos), - control | (PCI_MSIX_FLAGS_ENABLE | - PCI_MSIX_FLAGS_MASKALL)); + pci_conf_write16( + seg, bus, slot, func, msix_control_reg(entry->msi_attrib.pos), + control | (PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL)); } if ( likely(memory_decoded(pdev)) ) { @@ -445,8 +437,8 @@ static bool msi_set_mask_bit(struct irq_desc *desc, bool host, bool guest) if ( pdev->msix->warned != domid ) { pdev->msix->warned = domid; - printk(XENLOG_G_WARNING - "cannot mask IRQ %d: masking MSI-X on Dom%d's %04x:%02x:%02x.%u\n", + printk(XENLOG_G_WARNING "cannot mask IRQ %d: masking MSI-X on " + "Dom%d's %04x:%02x:%02x.%u\n", desc->irq, domid, seg, bus, slot, func); } } @@ -470,19 +462,18 @@ static int msi_get_mask_bit(const struct msi_desc *entry) if ( !entry->dev ) return -1; - switch ( entry->msi_attrib.type ) + switch (entry->msi_attrib.type) { case PCI_CAP_ID_MSI: if ( !entry->msi_attrib.maskbit ) break; return (pci_conf_read32(entry->dev->seg, entry->dev->bus, PCI_SLOT(entry->dev->devfn), - PCI_FUNC(entry->dev->devfn), - entry->msi.mpos) >> - entry->msi_attrib.entry_nr) & 1; + PCI_FUNC(entry->dev->devfn), entry->msi.mpos) >> + entry->msi_attrib.entry_nr) & + 1; case PCI_CAP_ID_MSIX: - if ( unlikely(!msix_memory_decoded(entry->dev, - entry->msi_attrib.pos)) ) + if ( unlikely(!msix_memory_decoded(entry->dev, entry->msi_attrib.pos)) ) break; return readl(entry->mask_base + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET) & 1; } @@ -542,27 +533,24 @@ void end_nonmaskable_msi_irq(struct irq_desc *desc, u8 vector) * IRQ chip for MSI PCI/PCI-X/PCI-Express devices, * which implement the MSI or MSI-X capability structure. */ -static hw_irq_controller pci_msi_maskable = { - .typename = "PCI-MSI/-X", - .startup = startup_msi_irq, - .shutdown = shutdown_msi_irq, - .enable = unmask_msi_irq, - .disable = mask_msi_irq, - .ack = ack_maskable_msi_irq, - .set_affinity = set_msi_affinity -}; +static hw_irq_controller pci_msi_maskable = {.typename = "PCI-MSI/-X", + .startup = startup_msi_irq, + .shutdown = shutdown_msi_irq, + .enable = unmask_msi_irq, + .disable = mask_msi_irq, + .ack = ack_maskable_msi_irq, + .set_affinity = set_msi_affinity}; /* As above, but without having masking capability. */ -static hw_irq_controller pci_msi_nonmaskable = { - .typename = "PCI-MSI", - .startup = irq_startup_none, - .shutdown = irq_shutdown_none, - .enable = irq_enable_none, - .disable = irq_disable_none, - .ack = ack_nonmaskable_msi_irq, - .end = end_nonmaskable_msi_irq, - .set_affinity = set_msi_affinity -}; +static hw_irq_controller pci_msi_nonmaskable = {.typename = "PCI-MSI", + .startup = irq_startup_none, + .shutdown = irq_shutdown_none, + .enable = irq_enable_none, + .disable = irq_disable_none, + .ack = ack_nonmaskable_msi_irq, + .end = end_nonmaskable_msi_irq, + .set_affinity = + set_msi_affinity}; static struct msi_desc *alloc_msi_entry(unsigned int nr) { @@ -597,10 +585,10 @@ int setup_msi_irq(struct irq_desc *desc, struct msi_desc *msidesc) control = pci_conf_read16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), cpos); if ( !(control & PCI_MSIX_FLAGS_ENABLE) ) - pci_conf_write16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn), cpos, - control | (PCI_MSIX_FLAGS_ENABLE | - PCI_MSIX_FLAGS_MASKALL)); + pci_conf_write16( + pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn), cpos, + control | (PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL)); } rc = __setup_msi_irq(desc, msidesc, @@ -635,8 +623,8 @@ int __setup_msi_irq(struct irq_desc *desc, struct msi_desc *msidesc, int msi_free_irq(struct msi_desc *entry) { - unsigned int nr = entry->msi_attrib.type != PCI_CAP_ID_MSIX - ? entry->msi.nvec : 1; + unsigned int nr = + entry->msi_attrib.type != PCI_CAP_ID_MSIX ? entry->msi.nvec : 1; while ( nr-- ) { @@ -657,12 +645,11 @@ int msi_free_irq(struct msi_desc *entry) return 0; } -static struct msi_desc *find_msi_entry(struct pci_dev *dev, - int irq, int cap_id) +static struct msi_desc *find_msi_entry(struct pci_dev *dev, int irq, int cap_id) { struct msi_desc *entry; - list_for_each_entry( entry, &dev->msi_list, list ) + list_for_each_entry (entry, &dev->msi_list, list) { if ( entry->msi_attrib.type == cap_id && (irq == -1 || entry->irq == irq) ) @@ -681,10 +668,8 @@ static struct msi_desc *find_msi_entry(struct pci_dev *dev, * multiple messages. A return of zero indicates the successful setup * of an entry zero with the new MSI irq or non-zero for otherwise. **/ -static int msi_capability_init(struct pci_dev *dev, - int irq, - struct msi_desc **desc, - unsigned int nvec) +static int msi_capability_init(struct pci_dev *dev, int irq, + struct msi_desc **desc, unsigned int nvec) { struct msi_desc *entry; int pos; @@ -718,8 +703,8 @@ static int msi_capability_init(struct pci_dev *dev, entry[i].msi_attrib.type = PCI_CAP_ID_MSI; entry[i].msi_attrib.is_64 = is_64bit_address(control); entry[i].msi_attrib.entry_nr = i; - entry[i].msi_attrib.host_masked = - entry[i].msi_attrib.maskbit = is_mask_bit_support(control); + entry[i].msi_attrib.host_masked = entry[i].msi_attrib.maskbit = + is_mask_bit_support(control); entry[i].msi_attrib.guest_masked = 0; entry[i].msi_attrib.pos = pos; if ( entry[i].msi_attrib.maskbit ) @@ -766,22 +751,19 @@ static u64 read_pci_mem_bar(u16 seg, u8 bus, u8 slot, u8 func, u8 bir, int vf) if ( vf >= 0 ) { struct pci_dev *pdev = pci_get_pdev(seg, bus, PCI_DEVFN(slot, func)); - unsigned int pos = pci_find_ext_capability(seg, bus, - PCI_DEVFN(slot, func), - PCI_EXT_CAP_ID_SRIOV); + unsigned int pos = pci_find_ext_capability( + seg, bus, PCI_DEVFN(slot, func), PCI_EXT_CAP_ID_SRIOV); u16 ctrl = pci_conf_read16(seg, bus, slot, func, pos + PCI_SRIOV_CTRL); - u16 num_vf = pci_conf_read16(seg, bus, slot, func, - pos + PCI_SRIOV_NUM_VF); - u16 offset = pci_conf_read16(seg, bus, slot, func, - pos + PCI_SRIOV_VF_OFFSET); - u16 stride = pci_conf_read16(seg, bus, slot, func, - pos + PCI_SRIOV_VF_STRIDE); - - if ( !pdev || !pos || - !(ctrl & PCI_SRIOV_CTRL_VFE) || - !(ctrl & PCI_SRIOV_CTRL_MSE) || - !num_vf || !offset || (num_vf > 1 && !stride) || - bir >= PCI_SRIOV_NUM_BARS || + u16 num_vf = + pci_conf_read16(seg, bus, slot, func, pos + PCI_SRIOV_NUM_VF); + u16 offset = + pci_conf_read16(seg, bus, slot, func, pos + PCI_SRIOV_VF_OFFSET); + u16 stride = + pci_conf_read16(seg, bus, slot, func, pos + PCI_SRIOV_VF_STRIDE); + + if ( !pdev || !pos || !(ctrl & PCI_SRIOV_CTRL_VFE) || + !(ctrl & PCI_SRIOV_CTRL_MSE) || !num_vf || !offset || + (num_vf > 1 && !stride) || bir >= PCI_SRIOV_NUM_BARS || !pdev->vf_rlen[bir] ) return 0; base = pos + PCI_SRIOV_BAR; @@ -800,35 +782,36 @@ static u64 read_pci_mem_bar(u16 seg, u8 bus, u8 slot, u8 func, u8 bir, int vf) disp = vf * pdev->vf_rlen[bir]; limit = PCI_SRIOV_NUM_BARS; } - else switch ( pci_conf_read8(seg, bus, slot, func, - PCI_HEADER_TYPE) & 0x7f ) - { - case PCI_HEADER_TYPE_NORMAL: - limit = 6; - break; - case PCI_HEADER_TYPE_BRIDGE: - limit = 2; - break; - case PCI_HEADER_TYPE_CARDBUS: - limit = 1; - break; - default: - return 0; - } + else + switch (pci_conf_read8(seg, bus, slot, func, PCI_HEADER_TYPE) & 0x7f) + { + case PCI_HEADER_TYPE_NORMAL: + limit = 6; + break; + case PCI_HEADER_TYPE_BRIDGE: + limit = 2; + break; + case PCI_HEADER_TYPE_CARDBUS: + limit = 1; + break; + default: + return 0; + } if ( bir >= limit ) return 0; addr = pci_conf_read32(seg, bus, slot, func, base + bir * 4); if ( (addr & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO ) return 0; - if ( (addr & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64 ) + if ( (addr & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == + PCI_BASE_ADDRESS_MEM_TYPE_64 ) { addr &= PCI_BASE_ADDRESS_MEM_MASK; if ( ++bir >= limit ) return 0; return addr + disp + - ((u64)pci_conf_read32(seg, bus, slot, func, - base + bir * 4) << 32); + ((u64)pci_conf_read32(seg, bus, slot, func, base + bir * 4) + << 32); } return (addr & PCI_BASE_ADDRESS_MEM_MASK) + disp; } @@ -843,10 +826,8 @@ static u64 read_pci_mem_bar(u16 seg, u8 bus, u8 slot, u8 func, u8 bir, int vf) * number MSI-X irqs. A return of zero indicates the successful setup of * requested MSI-X entries with allocated irqs or non-zero for otherwise. **/ -static int msix_capability_init(struct pci_dev *dev, - unsigned int pos, - struct msi_info *msi, - struct msi_desc **desc, +static int msix_capability_init(struct pci_dev *dev, unsigned int pos, + struct msi_info *msi, struct msi_desc **desc, unsigned int nr_entries) { struct arch_msix *msix = dev->msix; @@ -873,8 +854,8 @@ static int msix_capability_init(struct pci_dev *dev, */ msix->host_maskall = 1; pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos), - control | (PCI_MSIX_FLAGS_ENABLE | - PCI_MSIX_FLAGS_MASKALL)); + control | + (PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL)); if ( unlikely(!memory_decoded(dev)) ) { @@ -896,8 +877,8 @@ static int msix_capability_init(struct pci_dev *dev, } /* Locate MSI-X table region */ - table_offset = pci_conf_read32(seg, bus, slot, func, - msix_table_offset_reg(pos)); + table_offset = + pci_conf_read32(seg, bus, slot, func, msix_table_offset_reg(pos)); bir = (u8)(table_offset & PCI_MSIX_BIRMASK); table_offset &= ~PCI_MSIX_BIRMASK; @@ -938,21 +919,20 @@ static int msix_capability_init(struct pci_dev *dev, msix->nr_entries = nr_entries; msix->table.first = PFN_DOWN(table_paddr); - msix->table.last = PFN_DOWN(table_paddr + - nr_entries * PCI_MSIX_ENTRY_SIZE - 1); + msix->table.last = + PFN_DOWN(table_paddr + nr_entries * PCI_MSIX_ENTRY_SIZE - 1); WARN_ON(rangeset_overlaps_range(mmio_ro_ranges, msix->table.first, msix->table.last)); - pba_offset = pci_conf_read32(seg, bus, slot, func, - msix_pba_offset_reg(pos)); + pba_offset = + pci_conf_read32(seg, bus, slot, func, msix_pba_offset_reg(pos)); bir = (u8)(pba_offset & PCI_MSIX_BIRMASK); pba_paddr = read_pci_mem_bar(seg, pbus, pslot, pfunc, bir, vf); WARN_ON(!pba_paddr); pba_paddr += pba_offset & ~PCI_MSIX_BIRMASK; msix->pba.first = PFN_DOWN(pba_paddr); - msix->pba.last = PFN_DOWN(pba_paddr + - BITS_TO_LONGS(nr_entries) - 1); + msix->pba.last = PFN_DOWN(pba_paddr + BITS_TO_LONGS(nr_entries) - 1); WARN_ON(rangeset_overlaps_range(mmio_ro_ranges, msix->pba.first, msix->pba.last)); } @@ -1013,9 +993,9 @@ static int msix_capability_init(struct pci_dev *dev, if ( !is_hardware_domain(currd) || d != currd ) printk("%s use of MSI-X on %04x:%02x:%02x.%u by Dom%d\n", - is_hardware_domain(currd) - ? XENLOG_WARNING "Potentially insecure" - : XENLOG_ERR "Insecure", + is_hardware_domain(currd) ? XENLOG_WARNING + "Potentially insecure" + : XENLOG_ERR "Insecure", seg, bus, slot, func, d->domain_id); if ( !is_hardware_domain(d) && /* Assume a domain without memory has no mappings yet. */ @@ -1072,8 +1052,8 @@ static int __pci_enable_msi(struct msi_info *msi, struct msi_desc **desc) if ( old_desc ) { printk(XENLOG_ERR "irq %d already mapped to MSI on %04x:%02x:%02x.%u\n", - msi->irq, msi->seg, msi->bus, - PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn)); + msi->irq, msi->seg, msi->bus, PCI_SLOT(msi->devfn), + PCI_FUNC(msi->devfn)); return -EEXIST; } @@ -1081,8 +1061,7 @@ static int __pci_enable_msi(struct msi_info *msi, struct msi_desc **desc) if ( old_desc ) { printk(XENLOG_WARNING "MSI-X already in use on %04x:%02x:%02x.%u\n", - msi->seg, msi->bus, - PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn)); + msi->seg, msi->bus, PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn)); __pci_disable_msix(old_desc); } @@ -1131,8 +1110,8 @@ static int __pci_enable_msix(struct msi_info *msi, struct msi_desc **desc) if ( !pdev || !pos ) return -ENODEV; - control = pci_conf_read16(msi->seg, msi->bus, slot, func, - msix_control_reg(pos)); + control = + pci_conf_read16(msi->seg, msi->bus, slot, func, msix_control_reg(pos)); nr_entries = multi_msix_capable(control); if ( msi->entry_nr >= nr_entries ) return -EINVAL; @@ -1140,7 +1119,8 @@ static int __pci_enable_msix(struct msi_info *msi, struct msi_desc **desc) old_desc = find_msi_entry(pdev, msi->irq, PCI_CAP_ID_MSIX); if ( old_desc ) { - printk(XENLOG_ERR "irq %d already mapped to MSI-X on %04x:%02x:%02x.%u\n", + printk(XENLOG_ERR + "irq %d already mapped to MSI-X on %04x:%02x:%02x.%u\n", msi->irq, msi->seg, msi->bus, slot, func); return -EEXIST; } @@ -1176,8 +1156,8 @@ static void __pci_disable_msix(struct msi_desc *entry) u8 bus = dev->bus; u8 slot = PCI_SLOT(dev->devfn); u8 func = PCI_FUNC(dev->devfn); - unsigned int pos = pci_find_cap_offset(seg, bus, slot, func, - PCI_CAP_ID_MSIX); + unsigned int pos = + pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSIX); u16 control = pci_conf_read16(seg, bus, slot, func, msix_control_reg(entry->msi_attrib.pos)); bool maskall = dev->msix->host_maskall; @@ -1186,8 +1166,8 @@ static void __pci_disable_msix(struct msi_desc *entry) { dev->msix->host_maskall = 1; pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos), - control | (PCI_MSIX_FLAGS_ENABLE | - PCI_MSIX_FLAGS_MASKALL)); + control | + (PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL)); } BUG_ON(list_empty(&dev->msi_list)); @@ -1214,8 +1194,8 @@ int pci_prepare_msix(u16 seg, u8 bus, u8 devfn, bool off) int rc; struct pci_dev *pdev; u8 slot = PCI_SLOT(devfn), func = PCI_FUNC(devfn); - unsigned int pos = pci_find_cap_offset(seg, bus, slot, func, - PCI_CAP_ID_MSIX); + unsigned int pos = + pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSIX); if ( !use_msi ) return 0; @@ -1236,8 +1216,8 @@ int pci_prepare_msix(u16 seg, u8 bus, u8 devfn, bool off) } else { - u16 control = pci_conf_read16(seg, bus, slot, func, - msix_control_reg(pos)); + u16 control = + pci_conf_read16(seg, bus, slot, func, msix_control_reg(pos)); rc = msix_capability_init(pdev, pos, NULL, NULL, multi_msix_capable(control)); @@ -1258,8 +1238,8 @@ int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc) if ( !use_msi ) return -EPERM; - return msi->table_base ? __pci_enable_msix(msi, desc) : - __pci_enable_msi(msi, desc); + return msi->table_base ? __pci_enable_msix(msi, desc) + : __pci_enable_msi(msi, desc); } /* @@ -1273,11 +1253,11 @@ void pci_disable_msi(struct msi_desc *msi_desc) __pci_disable_msix(msi_desc); } -static void msi_free_irqs(struct pci_dev* dev) +static void msi_free_irqs(struct pci_dev *dev) { struct msi_desc *entry, *tmp; - list_for_each_entry_safe( entry, tmp, &dev->msi_list, list ) + list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { pci_disable_msi(entry); msi_free_irq(entry); @@ -1305,9 +1285,9 @@ int pci_msi_conf_write_intercept(struct pci_dev *pdev, unsigned int reg, if ( pdev->msix ) { entry = find_msi_entry(pdev, -1, PCI_CAP_ID_MSIX); - pos = entry ? entry->msi_attrib.pos - : pci_find_cap_offset(seg, bus, slot, func, - PCI_CAP_ID_MSIX); + pos = entry + ? entry->msi_attrib.pos + : pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSIX); ASSERT(pos); if ( reg >= pos && reg < msix_pba_offset_reg(pos) + 4 ) @@ -1374,13 +1354,12 @@ int pci_restore_msi_state(struct pci_dev *pdev) if ( !use_msi ) return -EOPNOTSUPP; - ret = xsm_resource_setup_pci(XSM_PRIV, - (pdev->seg << 16) | (pdev->bus << 8) | - pdev->devfn); + ret = xsm_resource_setup_pci(XSM_PRIV, (pdev->seg << 16) | + (pdev->bus << 8) | pdev->devfn); if ( ret ) return ret; - list_for_each_entry_safe( entry, tmp, &pdev->msi_list, list ) + list_for_each_entry_safe(entry, tmp, &pdev->msi_list, list) { unsigned int i = 0, nr = 1; @@ -1391,9 +1370,9 @@ int pci_restore_msi_state(struct pci_dev *pdev) ASSERT(desc->msi_desc == entry); - if (desc->msi_desc != entry) + if ( desc->msi_desc != entry ) { - bogus: + bogus: dprintk(XENLOG_ERR, "Restore MSI for %04x:%02x:%02x:%u entry %u not set?\n", pdev->seg, pdev->bus, slot, func, i); @@ -1416,10 +1395,9 @@ int pci_restore_msi_state(struct pci_dev *pdev) { control = pci_conf_read16(pdev->seg, pdev->bus, slot, func, msix_control_reg(pos)); - pci_conf_write16(pdev->seg, pdev->bus, slot, func, - msix_control_reg(pos), - control | (PCI_MSIX_FLAGS_ENABLE | - PCI_MSIX_FLAGS_MASKALL)); + pci_conf_write16( + pdev->seg, pdev->bus, slot, func, msix_control_reg(pos), + control | (PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL)); if ( unlikely(!memory_decoded(pdev)) ) { spin_unlock_irqrestore(&desc->lock, flags); @@ -1434,7 +1412,7 @@ int pci_restore_msi_state(struct pci_dev *pdev) msg = entry->msg; write_msi_msg(entry, &msg); - for ( i = 0; ; ) + for ( i = 0;; ) { if ( unlikely(!msi_set_mask_bit(desc, entry[i].msi_attrib.host_masked, @@ -1510,15 +1488,23 @@ static void dump_msi(unsigned char key) continue; } - switch ( entry->msi_attrib.type ) + switch (entry->msi_attrib.type) { - case PCI_CAP_ID_MSI: type = "MSI"; break; - case PCI_CAP_ID_MSIX: type = "MSI-X"; break; + case PCI_CAP_ID_MSI: + type = "MSI"; + break; + case PCI_CAP_ID_MSIX: + type = "MSI-X"; + break; case 0: - switch ( entry->msi_attrib.pos ) + switch (entry->msi_attrib.pos) { - case MSI_TYPE_HPET: type = "HPET"; break; - case MSI_TYPE_IOMMU: type = "IOMMU"; break; + case MSI_TYPE_HPET: + type = "HPET"; + break; + case MSI_TYPE_IOMMU: + type = "IOMMU"; + break; } break; } @@ -1546,11 +1532,9 @@ static void dump_msi(unsigned char key) data & MSI_DATA_TRIGGER_LEVEL ? "level" : "edge", data & MSI_DATA_LEVEL_ASSERT ? "" : "de", addr & MSI_ADDR_DESTMODE_LOGIC ? "log" : "phys", - addr & MSI_ADDR_REDIRECTION_LOWPRI ? "lowest" : "cpu", - dest32, attr.maskbit, - attr.host_masked ? 'H' : ' ', - attr.guest_masked ? 'G' : ' ', - mask); + addr & MSI_ADDR_REDIRECTION_LOWPRI ? "lowest" : "cpu", dest32, + attr.maskbit, attr.host_masked ? 'H' : ' ', + attr.guest_masked ? 'G' : ' ', mask); } vpci_dump_msi(); diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c index 9bb38b6d66..07297a2ea6 100644 --- a/xen/arch/x86/msr.c +++ b/xen/arch/x86/msr.c @@ -29,13 +29,11 @@ DEFINE_PER_CPU(uint32_t, tsc_aux); -struct msr_policy __read_mostly raw_msr_policy, - __read_mostly host_msr_policy, - __read_mostly hvm_max_msr_policy, - __read_mostly pv_max_msr_policy; +struct msr_policy __read_mostly raw_msr_policy, __read_mostly host_msr_policy, + __read_mostly hvm_max_msr_policy, __read_mostly pv_max_msr_policy; struct vcpu_msrs __read_mostly hvm_max_vcpu_msrs, - __read_mostly pv_max_vcpu_msrs; + __read_mostly pv_max_vcpu_msrs; static void __init calculate_raw_policy(void) { @@ -85,8 +83,7 @@ void __init init_guest_msr_policy(void) int init_domain_msr_policy(struct domain *d) { struct msr_policy *mp = - xmemdup(is_pv_domain(d) ? &pv_max_msr_policy - : &hvm_max_msr_policy); + xmemdup(is_pv_domain(d) ? &pv_max_msr_policy : &hvm_max_msr_policy); if ( !mp ) return -ENOMEM; @@ -104,8 +101,7 @@ int init_vcpu_msr_policy(struct vcpu *v) { struct domain *d = v->domain; struct vcpu_msrs *msrs = - xmemdup(is_pv_domain(d) ? &pv_max_vcpu_msrs - : &hvm_max_vcpu_msrs); + xmemdup(is_pv_domain(d) ? &pv_max_vcpu_msrs : &hvm_max_vcpu_msrs); if ( !msrs ) return -ENOMEM; @@ -124,7 +120,7 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) const struct vcpu_msrs *msrs = v->arch.msrs; int ret = X86EMUL_OKAY; - switch ( msr ) + switch (msr) { case MSR_AMD_PATCHLOADER: case MSR_IA32_UCODE_WRITE: @@ -182,20 +178,21 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) if ( !cp->extd.dbext ) goto gp_fault; - /* - * In HVM context when we've allowed the guest direct access to debug - * registers, the value in msrs->dr_mask[] may be stale. Re-read it - * out of hardware. - */ + /* + * In HVM context when we've allowed the guest direct access to + * debug registers, the value in msrs->dr_mask[] may be stale. + * Re-read it out of hardware. + */ #ifdef CONFIG_HVM if ( v == current && is_hvm_domain(d) && v->arch.hvm.flag_dr_dirty ) rdmsrl(msr, *val); else #endif - *val = msrs->dr_mask[ - array_index_nospec((msr == MSR_AMD64_DR0_ADDRESS_MASK) - ? 0 : (msr - MSR_AMD64_DR1_ADDRESS_MASK + 1), - ARRAY_SIZE(msrs->dr_mask))]; + *val = msrs->dr_mask[array_index_nospec( + (msr == MSR_AMD64_DR0_ADDRESS_MASK) + ? 0 + : (msr - MSR_AMD64_DR1_ADDRESS_MASK + 1), + ARRAY_SIZE(msrs->dr_mask))]; break; default: @@ -210,7 +207,7 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) return ret; - gp_fault: +gp_fault: return X86EMUL_EXCEPTION; } @@ -223,7 +220,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) struct vcpu_msrs *msrs = v->arch.msrs; int ret = X86EMUL_OKAY; - switch ( msr ) + switch (msr) { uint64_t rsvd; @@ -237,8 +234,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) * See note on MSR_IA32_UCODE_WRITE below, which may or may not apply * to AMD CPUs as well (at least the architectural/CPUID part does). */ - if ( is_pv_domain(d) || - d->arch.cpuid->x86_vendor != X86_VENDOR_AMD ) + if ( is_pv_domain(d) || d->arch.cpuid->x86_vendor != X86_VENDOR_AMD ) goto gp_fault; break; @@ -249,8 +245,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) * for such attempts. Also the MSR is architectural and not qualified * by any CPUID bit. */ - if ( is_pv_domain(d) || - d->arch.cpuid->x86_vendor != X86_VENDOR_INTEL ) + if ( is_pv_domain(d) || d->arch.cpuid->x86_vendor != X86_VENDOR_INTEL ) goto gp_fault; break; @@ -347,10 +342,11 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) if ( !cp->extd.dbext || val != (uint32_t)val ) goto gp_fault; - msrs->dr_mask[ - array_index_nospec((msr == MSR_AMD64_DR0_ADDRESS_MASK) - ? 0 : (msr - MSR_AMD64_DR1_ADDRESS_MASK + 1), - ARRAY_SIZE(msrs->dr_mask))] = val; + msrs->dr_mask[array_index_nospec( + (msr == MSR_AMD64_DR0_ADDRESS_MASK) + ? 0 + : (msr - MSR_AMD64_DR1_ADDRESS_MASK + 1), + ARRAY_SIZE(msrs->dr_mask))] = val; if ( v == curr && (curr->arch.dr7 & DR7_ACTIVE_MASK) ) wrmsrl(msr, val); @@ -368,7 +364,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) return ret; - gp_fault: +gp_fault: return X86EMUL_EXCEPTION; } diff --git a/xen/arch/x86/nmi.c b/xen/arch/x86/nmi.c index e26121a737..5a9cbfb986 100644 --- a/xen/arch/x86/nmi.c +++ b/xen/arch/x86/nmi.c @@ -35,7 +35,7 @@ unsigned int nmi_watchdog = NMI_NONE; static unsigned int nmi_hz = HZ; -static unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */ +static unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */ static unsigned int nmi_p4_cccr_val; static unsigned int nmi_p6_event_width; static DEFINE_PER_CPU(struct timer, nmi_timer); @@ -55,7 +55,7 @@ static int __init parse_watchdog(const char *s) return 0; } - switch ( parse_bool(s, NULL) ) + switch (parse_bool(s, NULL)) { case 0: opt_watchdog = false; @@ -99,8 +99,8 @@ custom_param("watchdog_timeout", parse_watchdog_timeout); */ static DEFINE_SPINLOCK(lapic_nmi_owner_lock); static unsigned int lapic_nmi_owner; -#define LAPIC_NMI_WATCHDOG (1<<0) -#define LAPIC_NMI_RESERVED (1<<1) +#define LAPIC_NMI_WATCHDOG (1 << 0) +#define LAPIC_NMI_RESERVED (1 << 1) /* nmi_active: * +1: the lapic NMI watchdog is active, but can be disabled @@ -110,43 +110,44 @@ static unsigned int lapic_nmi_owner; */ int nmi_active; -#define K7_EVNTSEL_ENABLE (1 << 22) -#define K7_EVNTSEL_INT (1 << 20) -#define K7_EVNTSEL_OS (1 << 17) -#define K7_EVNTSEL_USR (1 << 16) -#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 -#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING -#define K7_EVENT_WIDTH 32 - -#define P6_EVNTSEL0_ENABLE (1 << 22) -#define P6_EVNTSEL_INT (1 << 20) -#define P6_EVNTSEL_OS (1 << 17) -#define P6_EVNTSEL_USR (1 << 16) -#define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79 +#define K7_EVNTSEL_ENABLE (1 << 22) +#define K7_EVNTSEL_INT (1 << 20) +#define K7_EVNTSEL_OS (1 << 17) +#define K7_EVNTSEL_USR (1 << 16) +#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 +#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING +#define K7_EVENT_WIDTH 32 + +#define P6_EVNTSEL0_ENABLE (1 << 22) +#define P6_EVNTSEL_INT (1 << 20) +#define P6_EVNTSEL_OS (1 << 17) +#define P6_EVNTSEL_USR (1 << 16) +#define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79 #define CORE_EVENT_CPU_CLOCKS_NOT_HALTED 0x3c /* Bit width of IA32_PMCx MSRs is reported using CPUID.0AH:EAX[23:16]. */ -#define P6_EVENT_WIDTH_MASK (((1 << 8) - 1) << 16) -#define P6_EVENT_WIDTH_MIN 32 - -#define P4_ESCR_EVENT_SELECT(N) ((N)<<25) -#define P4_CCCR_OVF_PMI0 (1<<26) -#define P4_CCCR_OVF_PMI1 (1<<27) -#define P4_CCCR_OVF (1<<31) -#define P4_CCCR_THRESHOLD(N) ((N)<<20) -#define P4_CCCR_COMPLEMENT (1<<19) -#define P4_CCCR_COMPARE (1<<18) -#define P4_CCCR_REQUIRED (3<<16) -#define P4_CCCR_ESCR_SELECT(N) ((N)<<13) -#define P4_CCCR_ENABLE (1<<12) -/* +#define P6_EVENT_WIDTH_MASK (((1 << 8) - 1) << 16) +#define P6_EVENT_WIDTH_MIN 32 + +#define P4_ESCR_EVENT_SELECT(N) ((N) << 25) +#define P4_CCCR_OVF_PMI0 (1 << 26) +#define P4_CCCR_OVF_PMI1 (1 << 27) +#define P4_CCCR_OVF (1 << 31) +#define P4_CCCR_THRESHOLD(N) ((N) << 20) +#define P4_CCCR_COMPLEMENT (1 << 19) +#define P4_CCCR_COMPARE (1 << 18) +#define P4_CCCR_REQUIRED (3 << 16) +#define P4_CCCR_ESCR_SELECT(N) ((N) << 13) +#define P4_CCCR_ENABLE (1 << 12) +/* * Set up IQ_PERFCTR0 to behave like a clock, by having IQ_CCCR0 filter * CRU_ESCR0 (with any non-null event selector) through a complemented - * max threshold. [IA32-Vol3, Section 14.9.9] + * max threshold. [IA32-Vol3, Section 14.9.9] */ -#define P4_NMI_CRU_ESCR0 P4_ESCR_EVENT_SELECT(0x3F) -#define P4_NMI_IQ_CCCR0 \ - (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ - P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) +#define P4_NMI_CRU_ESCR0 P4_ESCR_EVENT_SELECT(0x3F) +#define P4_NMI_IQ_CCCR0 \ + (P4_CCCR_OVF_PMI0 | P4_CCCR_THRESHOLD(15) | P4_CCCR_COMPLEMENT | \ + P4_CCCR_COMPARE | P4_CCCR_REQUIRED | P4_CCCR_ESCR_SELECT(4) | \ + P4_CCCR_ENABLE) static void __init wait_for_nmis(void *p) { @@ -161,7 +162,7 @@ static void __init wait_for_nmis(void *p) if ( nmi_count(cpu) >= start_count + 2 ) break; e = rdtsc(); - } while( e - s < ticks ); + } while ( e - s < ticks ); } void __init check_nmi_watchdog(void) @@ -175,7 +176,7 @@ void __init check_nmi_watchdog(void) printk("Testing NMI watchdog on all CPUs:"); - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) prev_nmi_count[cpu] = nmi_count(cpu); /* @@ -185,7 +186,7 @@ void __init check_nmi_watchdog(void) */ on_selected_cpus(&cpu_online_map, wait_for_nmis, NULL, 1); - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) { if ( nmi_count(cpu) - prev_nmi_count[cpu] < 2 ) { @@ -202,7 +203,7 @@ void __init check_nmi_watchdog(void) * There's a limit to how slow we can go because writing the perfctr * MSRs only sets the low 32 bits, with the top 8 bits sign-extended * from those, so it's not possible to set up a delay larger than - * 2^31 cycles and smaller than (2^40 - 2^31) cycles. + * 2^31 cycles and smaller than (2^40 - 2^31) cycles. * (Intel SDM, section 18.22.2) */ if ( nmi_watchdog == NMI_LOCAL_APIC ) @@ -219,14 +220,16 @@ static void nmi_timer_fn(void *unused) void disable_lapic_nmi_watchdog(void) { - if (nmi_active <= 0) + if ( nmi_active <= 0 ) return; - switch (boot_cpu_data.x86_vendor) { + switch (boot_cpu_data.x86_vendor) + { case X86_VENDOR_AMD: wrmsr(MSR_K7_EVNTSEL0, 0, 0); break; case X86_VENDOR_INTEL: - switch (boot_cpu_data.x86) { + switch (boot_cpu_data.x86) + { case 6: wrmsr(MSR_P6_EVNTSEL(0), 0, 0); break; @@ -244,7 +247,8 @@ void disable_lapic_nmi_watchdog(void) static void enable_lapic_nmi_watchdog(void) { - if (nmi_active < 0) { + if ( nmi_active < 0 ) + { nmi_watchdog = NMI_LOCAL_APIC; setup_apic_nmi_watchdog(); } @@ -258,9 +262,9 @@ int reserve_lapic_nmi(void) old_owner = lapic_nmi_owner; lapic_nmi_owner |= LAPIC_NMI_RESERVED; spin_unlock(&lapic_nmi_owner_lock); - if (old_owner & LAPIC_NMI_RESERVED) + if ( old_owner & LAPIC_NMI_RESERVED ) return -EBUSY; - if (old_owner & LAPIC_NMI_WATCHDOG) + if ( old_owner & LAPIC_NMI_WATCHDOG ) disable_lapic_nmi_watchdog(); return 0; } @@ -273,7 +277,7 @@ void release_lapic_nmi(void) new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED; lapic_nmi_owner = new_owner; spin_unlock(&lapic_nmi_owner_lock); - if (new_owner & LAPIC_NMI_WATCHDOG) + if ( new_owner & LAPIC_NMI_WATCHDOG ) enable_lapic_nmi_watchdog(); } @@ -286,8 +290,8 @@ static void clear_msr_range(unsigned int base, unsigned int n) { unsigned int i; - for (i = 0; i < n; i++) - wrmsr(base+i, 0, 0); + for ( i = 0; i < n; i++ ) + wrmsr(base + i, 0, 0); } static inline void write_watchdog_counter(const char *descr) @@ -295,8 +299,8 @@ static inline void write_watchdog_counter(const char *descr) u64 count = (u64)cpu_khz * 1000; do_div(count, nmi_hz); - if(descr) - Dprintk("setting %s to -%#"PRIx64"\n", descr, count); + if ( descr ) + Dprintk("setting %s to -%#" PRIx64 "\n", descr, count); wrmsrl(nmi_perfctr_msr, 0 - count); } @@ -309,10 +313,7 @@ static void setup_k7_watchdog(void) clear_msr_range(MSR_K7_EVNTSEL0, 4); clear_msr_range(MSR_K7_PERFCTR0, 4); - evntsel = K7_EVNTSEL_INT - | K7_EVNTSEL_OS - | K7_EVNTSEL_USR - | K7_NMI_EVENT; + evntsel = K7_EVNTSEL_INT | K7_EVNTSEL_OS | K7_EVNTSEL_USR | K7_NMI_EVENT; wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); write_watchdog_counter("K7_PERFCTR0"); @@ -339,10 +340,7 @@ static void setup_p6_watchdog(unsigned counter) clear_msr_range(MSR_P6_EVNTSEL(0), 2); clear_msr_range(MSR_P6_PERFCTR(0), 2); - evntsel = P6_EVNTSEL_INT - | P6_EVNTSEL_OS - | P6_EVNTSEL_USR - | counter; + evntsel = P6_EVNTSEL_INT | P6_EVNTSEL_OS | P6_EVNTSEL_USR | counter; wrmsr(MSR_P6_EVNTSEL(0), evntsel, 0); write_watchdog_counter("P6_PERFCTR0"); @@ -356,7 +354,7 @@ static int setup_p4_watchdog(void) uint64_t misc_enable; rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); - if (!(misc_enable & MSR_IA32_MISC_ENABLE_PERF_AVAIL)) + if ( !(misc_enable & MSR_IA32_MISC_ENABLE_PERF_AVAIL) ) return 0; nmi_perfctr_msr = MSR_P4_IQ_PERFCTR0; @@ -364,15 +362,18 @@ static int setup_p4_watchdog(void) if ( boot_cpu_data.x86_num_siblings == 2 ) nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1; - if (!(misc_enable & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL)) + if ( !(misc_enable & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL) ) clear_msr_range(0x3F1, 2); /* MSR 0x3F0 seems to have a default value of 0xFC00, but current docs doesn't fully define it, so leave it alone for now. */ - if (boot_cpu_data.x86_model >= 0x3) { + if ( boot_cpu_data.x86_model >= 0x3 ) + { /* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */ clear_msr_range(0x3A0, 26); clear_msr_range(0x3BC, 3); - } else { + } + else + { clear_msr_range(0x3A0, 31); } clear_msr_range(0x3C0, 6); @@ -380,7 +381,7 @@ static int setup_p4_watchdog(void) clear_msr_range(0x3E0, 2); clear_msr_range(MSR_P4_BPU_CCCR0, 18); clear_msr_range(MSR_P4_BPU_PERFCTR0, 18); - + wrmsrl(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0); wrmsrl(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE); write_watchdog_counter("P4_IQ_COUNTER0"); @@ -394,9 +395,11 @@ void setup_apic_nmi_watchdog(void) if ( nmi_watchdog == NMI_NONE ) return; - switch (boot_cpu_data.x86_vendor) { + switch (boot_cpu_data.x86_vendor) + { case X86_VENDOR_AMD: - switch (boot_cpu_data.x86) { + switch (boot_cpu_data.x86) + { case 6: case 0xf ... 0x17: setup_k7_watchdog(); @@ -406,14 +409,15 @@ void setup_apic_nmi_watchdog(void) } break; case X86_VENDOR_INTEL: - switch (boot_cpu_data.x86) { + switch (boot_cpu_data.x86) + { case 6: - setup_p6_watchdog((boot_cpu_data.x86_model < 14) - ? P6_EVENT_CPU_CLOCKS_NOT_HALTED - : CORE_EVENT_CPU_CLOCKS_NOT_HALTED); + setup_p6_watchdog((boot_cpu_data.x86_model < 14) + ? P6_EVENT_CPU_CLOCKS_NOT_HALTED + : CORE_EVENT_CPU_CLOCKS_NOT_HALTED); break; case 15: - if (!setup_p4_watchdog()) + if ( !setup_p4_watchdog() ) return; break; default: @@ -428,12 +432,12 @@ void setup_apic_nmi_watchdog(void) nmi_active = 1; } -static int cpu_nmi_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_nmi_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - switch ( action ) + switch (action) { case CPU_UP_PREPARE: init_timer(&per_cpu(nmi_timer, cpu), nmi_timer_fn, NULL, cpu); @@ -450,9 +454,7 @@ static int cpu_nmi_callback( return NOTIFY_DONE; } -static struct notifier_block cpu_nmi_nfb = { - .notifier_call = cpu_nmi_callback -}; +static struct notifier_block cpu_nmi_nfb = {.notifier_call = cpu_nmi_callback}; static DEFINE_PER_CPU(unsigned int, last_irq_sums); static DEFINE_PER_CPU(unsigned int, alert_counter); @@ -479,10 +481,10 @@ int __init watchdog_setup(void) unsigned int cpu; /* - * Activate periodic heartbeats. We cannot do this earlier during + * Activate periodic heartbeats. We cannot do this earlier during * setup because the timer infrastructure is not available. */ - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) cpu_nmi_callback(&cpu_nmi_nfb, CPU_UP_PREPARE, (void *)(long)cpu); register_cpu_notifier(&cpu_nmi_nfb); @@ -503,15 +505,15 @@ bool nmi_watchdog_tick(const struct cpu_user_regs *regs) * before doing the oops ... */ this_cpu(alert_counter)++; - if ( this_cpu(alert_counter) == opt_watchdog_timeout*nmi_hz ) + if ( this_cpu(alert_counter) == opt_watchdog_timeout * nmi_hz ) { console_force_unlock(); printk("Watchdog timer detects that CPU%d is stuck!\n", smp_processor_id()); fatal_trap(regs, 1); } - } - else + } + else { this_cpu(last_irq_sums) = sum; this_cpu(alert_counter) = 0; @@ -591,7 +593,7 @@ static void do_nmi_stats(unsigned char key) struct vcpu *v; printk("CPU\tNMI\n"); - for_each_online_cpu ( i ) + for_each_online_cpu (i) printk("%3d\t%3d\n", i, nmi_count(i)); if ( ((d = hardware_domain) == NULL) || (d->vcpu == NULL) || @@ -600,8 +602,7 @@ static void do_nmi_stats(unsigned char key) i = v->async_exception_mask & (1 << VCPU_TRAP_NMI); if ( v->nmi_pending || i ) - printk("dom0 vpu0: NMI %s%s\n", - v->nmi_pending ? "pending " : "", + printk("dom0 vpu0: NMI %s%s\n", v->nmi_pending ? "pending " : "", i ? "masked " : ""); else printk("dom0 vcpu0: NMI neither pending nor masked\n"); diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c index b3c9c12d7f..78d7d00af3 100644 --- a/xen/arch/x86/numa.c +++ b/xen/arch/x86/numa.c @@ -1,8 +1,8 @@ -/* +/* * Generic VM initialization for x86-64 NUMA setups. * Copyright 2002,2003 Andi Kleen, SuSE Labs. * Adapted for Xen: Ryan Harper - */ + */ #include #include @@ -26,7 +26,7 @@ custom_param("numa", numa_setup); #endif /* from proto.h */ -#define round_up(x,y) ((((x)+(y))-1) & (~((y)-1))) +#define round_up(x, y) ((((x) + (y)) - 1) & (~((y)-1))) struct node_data node_data[MAX_NUMNODES]; @@ -36,18 +36,16 @@ static typeof(*memnodemap) _memnodemap[64]; unsigned long memnodemapsize; u8 *memnodemap; -nodeid_t cpu_to_node[NR_CPUS] __read_mostly = { - [0 ... NR_CPUS-1] = NUMA_NO_NODE -}; +nodeid_t cpu_to_node[NR_CPUS] __read_mostly = {[0 ... NR_CPUS - 1] = + NUMA_NO_NODE}; /* * Keep BIOS's CPU2node information, should not be used for memory allocaion */ -nodeid_t apicid_to_node[MAX_LOCAL_APIC] = { - [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE -}; +nodeid_t apicid_to_node[MAX_LOCAL_APIC] = {[0 ... MAX_LOCAL_APIC - 1] = + NUMA_NO_NODE}; cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly; -nodemask_t __read_mostly node_online_map = { { [0] = 1UL } }; +nodemask_t __read_mostly node_online_map = {{[0] = 1UL}}; bool numa_off; s8 acpi_numa = 0; @@ -64,8 +62,8 @@ int srat_disabled(void) * 0 if memnodmap[] too small (of shift too small) * -1 if node overlap or lost ram (shift too big) */ -static int __init populate_memnodemap(const struct node *nodes, - int numnodes, int shift, nodeid_t *nodeids) +static int __init populate_memnodemap(const struct node *nodes, int numnodes, + int shift, nodeid_t *nodeids) { unsigned long spdx, epdx; int i, res = -1; @@ -104,8 +102,8 @@ static int __init allocate_cachealigned_memnodemap(void) memnodemap = mfn_to_virt(mfn); mfn <<= PAGE_SHIFT; size <<= PAGE_SHIFT; - printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", - mfn, mfn + size); + printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", mfn, + mfn + size); memnodemapsize = size / sizeof(*memnodemap); return 0; @@ -115,8 +113,7 @@ static int __init allocate_cachealigned_memnodemap(void) * The LSB of all start and end addresses in the node map is the value of the * maximum possible shift. */ -static int __init extract_lsb_from_nodes(const struct node *nodes, - int numnodes) +static int __init extract_lsb_from_nodes(const struct node *nodes, int numnodes) { int i, nodes_used = 0; unsigned long spdx, epdx; @@ -136,7 +133,7 @@ static int __init extract_lsb_from_nodes(const struct node *nodes, if ( nodes_used <= 1 ) i = BITS_PER_LONG - 1; else - i = find_first_bit(&bitfield, sizeof(unsigned long)*8); + i = find_first_bit(&bitfield, sizeof(unsigned long) * 8); memnodemapsize = (memtop >> i) + 1; return i; } @@ -156,8 +153,9 @@ int __init compute_hash_shift(struct node *nodes, int numnodes, if ( populate_memnodemap(nodes, numnodes, shift, nodeids) != 1 ) { printk(KERN_INFO "Your memory is not aligned you need to " - "rebuild your hypervisor with a bigger NODEMAPSIZE " - "shift=%d\n", shift); + "rebuild your hypervisor with a bigger NODEMAPSIZE " + "shift=%d\n", + shift); return -1; } @@ -165,7 +163,7 @@ int __init compute_hash_shift(struct node *nodes, int numnodes, } /* initialize NODE_DATA given nodeid and start/end */ void __init setup_node_bootmem(nodeid_t nodeid, u64 start, u64 end) -{ +{ unsigned long start_pfn, end_pfn; start_pfn = start >> PAGE_SHIFT; @@ -175,7 +173,7 @@ void __init setup_node_bootmem(nodeid_t nodeid, u64 start, u64 end) NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn; node_set_online(nodeid); -} +} void __init numa_init_array(void) { @@ -206,7 +204,7 @@ static int __init numa_emulation(u64 start_pfn, u64 end_pfn) { int i; struct node nodes[MAX_NUMNODES]; - u64 sz = ((end_pfn - start_pfn)< 1 ) @@ -214,21 +212,22 @@ static int __init numa_emulation(u64 start_pfn, u64 end_pfn) u64 x = 1; while ( (x << 1) < sz ) x <<= 1; - if ( x < sz/2 ) - printk(KERN_ERR "Numa emulation unbalanced. Complain to maintainer\n"); + if ( x < sz / 2 ) + printk(KERN_ERR + "Numa emulation unbalanced. Complain to maintainer\n"); sz = x; } - memset(&nodes,0,sizeof(nodes)); + memset(&nodes, 0, sizeof(nodes)); for ( i = 0; i < numa_fake; i++ ) { - nodes[i].start = (start_pfn<> 20); node_set_online(i); } @@ -239,8 +238,7 @@ static int __init numa_emulation(u64 start_pfn, u64 end_pfn) printk(KERN_ERR "No NUMA hash function found. Emulation disabled.\n"); return -1; } - for_each_online_node ( i ) - setup_node_bootmem(i, nodes[i].start, nodes[i].end); + for_each_online_node(i) setup_node_bootmem(i, nodes[i].start, nodes[i].end); numa_init_array(); return 0; @@ -248,7 +246,7 @@ static int __init numa_emulation(u64 start_pfn, u64 end_pfn) #endif void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) -{ +{ int i; #ifdef CONFIG_NUMA_EMU @@ -258,16 +256,15 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) #ifdef CONFIG_ACPI_NUMA if ( !numa_off && !acpi_scan_nodes((u64)start_pfn << PAGE_SHIFT, - (u64)end_pfn << PAGE_SHIFT) ) + (u64)end_pfn << PAGE_SHIFT) ) return; #endif printk(KERN_INFO "%s\n", numa_off ? "NUMA turned off" : "No NUMA configuration found"); - printk(KERN_INFO "Faking a node at %016"PRIx64"-%016"PRIx64"\n", - (u64)start_pfn << PAGE_SHIFT, - (u64)end_pfn << PAGE_SHIFT); + printk(KERN_INFO "Faking a node at %016" PRIx64 "-%016" PRIx64 "\n", + (u64)start_pfn << PAGE_SHIFT, (u64)end_pfn << PAGE_SHIFT); /* setup dummy node covering all memory */ memnode_shift = BITS_PER_LONG - 1; memnodemap = _memnodemap; @@ -277,13 +274,13 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) numa_set_node(i, 0); cpumask_copy(&node_to_cpumask[0], cpumask_of(0)); setup_node_bootmem(0, (u64)start_pfn << PAGE_SHIFT, - (u64)end_pfn << PAGE_SHIFT); + (u64)end_pfn << PAGE_SHIFT); } void numa_add_cpu(int cpu) { cpumask_set_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]); -} +} void numa_set_node(int cpu, nodeid_t node) { @@ -293,21 +290,21 @@ void numa_set_node(int cpu, nodeid_t node) /* [numa=off] */ static __init int numa_setup(const char *opt) { - if ( !strncmp(opt,"off",3) ) + if ( !strncmp(opt, "off", 3) ) numa_off = true; - else if ( !strncmp(opt,"on",2) ) + else if ( !strncmp(opt, "on", 2) ) numa_off = false; #ifdef CONFIG_NUMA_EMU else if ( !strncmp(opt, "fake=", 5) ) { numa_off = false; - numa_fake = simple_strtoul(opt+5,NULL,0); + numa_fake = simple_strtoul(opt + 5, NULL, 0); if ( numa_fake >= MAX_NUMNODES ) numa_fake = MAX_NUMNODES; } #endif #ifdef CONFIG_ACPI_NUMA - else if ( !strncmp(opt,"noacpi",6) ) + else if ( !strncmp(opt, "noacpi", 6) ) { numa_off = false; acpi_numa = -1; @@ -317,7 +314,7 @@ static __init int numa_setup(const char *opt) return -EINVAL; return 0; -} +} /* * Setup early cpu_to_node. @@ -352,10 +349,9 @@ unsigned int __init arch_get_dma_bitsize(void) { unsigned int node; - for_each_online_node(node) - if ( node_spanned_pages(node) && - !(node_start_pfn(node) >> (32 - PAGE_SHIFT)) ) - break; + for_each_online_node( + node) if ( node_spanned_pages(node) && + !(node_start_pfn(node) >> (32 - PAGE_SHIFT)) ) break; if ( node >= MAX_NUMNODES ) panic("No node with memory below 4Gb\n"); @@ -364,8 +360,9 @@ unsigned int __init arch_get_dma_bitsize(void) * its spanned pages by (arbitrarily chosen) 4. */ return min_t(unsigned int, - flsl(node_start_pfn(node) + node_spanned_pages(node) / 4 - 1) - + PAGE_SHIFT, 32); + flsl(node_start_pfn(node) + node_spanned_pages(node) / 4 - 1) + + PAGE_SHIFT, + 32); } static void dump_numa(unsigned char key) @@ -377,25 +374,24 @@ static void dump_numa(unsigned char key) unsigned int page_num_node[MAX_NUMNODES]; const struct vnuma_info *vnuma; - printk("'%c' pressed -> dumping numa info (now = %"PRI_stime")\n", key, + printk("'%c' pressed -> dumping numa info (now = %" PRI_stime ")\n", key, now); - for_each_online_node ( i ) + for_each_online_node(i) { paddr_t pa = pfn_to_paddr(node_start_pfn(i) + 1); - printk("NODE%u start->%lu size->%lu free->%lu\n", - i, node_start_pfn(i), node_spanned_pages(i), - avail_node_heap_pages(i)); + printk("NODE%u start->%lu size->%lu free->%lu\n", i, node_start_pfn(i), + node_spanned_pages(i), avail_node_heap_pages(i)); /* sanity check phys_to_nid() */ if ( phys_to_nid(pa) != i ) - printk("phys_to_nid(%"PRIpaddr") -> %d should be %u\n", - pa, phys_to_nid(pa), i); + printk("phys_to_nid(%" PRIpaddr ") -> %d should be %u\n", pa, + phys_to_nid(pa), i); } j = cpumask_first(&cpu_online_map); n = 0; - for_each_online_cpu ( i ) + for_each_online_cpu (i) { if ( i != j + n || cpu_to_node[j] != cpu_to_node[i] ) { @@ -417,24 +413,23 @@ static void dump_numa(unsigned char key) rcu_read_lock(&domlist_read_lock); printk("Memory location of each domain:\n"); - for_each_domain ( d ) + for_each_domain (d) { process_pending_softirqs(); printk("Domain %u (total: %u):\n", d->domain_id, d->tot_pages); - for_each_online_node ( i ) - page_num_node[i] = 0; + for_each_online_node(i) page_num_node[i] = 0; spin_lock(&d->page_alloc_lock); - page_list_for_each(page, &d->page_list) + page_list_for_each (page, &d->page_list) { i = phys_to_nid(page_to_maddr(page)); page_num_node[i]++; } spin_unlock(&d->page_alloc_lock); - for_each_online_node ( i ) + for_each_online_node(i) printk(" Node %u: %u\n", i, page_num_node[i]); if ( !read_trylock(&d->vnuma_rwlock) ) @@ -483,7 +478,7 @@ static void dump_numa(unsigned char key) } } - if ( start_cpu != ~0U && start_cpu != j - 1 ) + if ( start_cpu != ~0U && start_cpu != j - 1 ) printk("-%d", j - 1); printk("\n"); @@ -491,9 +486,8 @@ static void dump_numa(unsigned char key) for ( j = 0; j < vnuma->nr_vmemranges; j++ ) { if ( vnuma->vmemrange[j].nid == i ) - printk(" %016"PRIx64" - %016"PRIx64"\n", - vnuma->vmemrange[j].start, - vnuma->vmemrange[j].end); + printk(" %016" PRIx64 " - %016" PRIx64 "\n", + vnuma->vmemrange[j].start, vnuma->vmemrange[j].end); } } @@ -509,4 +503,3 @@ static __init int register_numa_trigger(void) return 0; } __initcall(register_numa_trigger); - diff --git a/xen/arch/x86/oprofile/backtrace.c b/xen/arch/x86/oprofile/backtrace.c index 316821fd34..1eb6330b9a 100644 --- a/xen/arch/x86/oprofile/backtrace.c +++ b/xen/arch/x86/oprofile/backtrace.c @@ -15,14 +15,16 @@ #include #include -struct __packed frame_head { - struct frame_head * ebp; +struct __packed frame_head +{ + struct frame_head *ebp; unsigned long ret; }; typedef struct frame_head frame_head_t; DEFINE_XEN_GUEST_HANDLE(frame_head_t); -struct __packed frame_head_32bit { +struct __packed frame_head_32bit +{ uint32_t ebp; uint32_t ret; }; @@ -33,41 +35,40 @@ static struct frame_head * dump_hypervisor_backtrace(struct vcpu *vcpu, const struct frame_head *head, int mode) { - if (!xenoprof_add_trace(vcpu, head->ret, mode)) + if ( !xenoprof_add_trace(vcpu, head->ret, mode) ) return 0; - + /* frame pointers should strictly progress back up the stack * (towards higher addresses) */ - if (head >= head->ebp) + if ( head >= head->ebp ) return NULL; - + return head->ebp; } static inline int is_32bit_vcpu(struct vcpu *vcpu) { - if (is_hvm_vcpu(vcpu)) + if ( is_hvm_vcpu(vcpu) ) return !hvm_long_mode_active(vcpu); else return is_pv_32bit_vcpu(vcpu); } static struct frame_head * -dump_guest_backtrace(struct vcpu *vcpu, const struct frame_head *head, - int mode) +dump_guest_backtrace(struct vcpu *vcpu, const struct frame_head *head, int mode) { frame_head_t bufhead; if ( is_32bit_vcpu(vcpu) ) { - __compat_handle_const_frame_head32_t guest_head = - { .c = (unsigned long)head }; + __compat_handle_const_frame_head32_t guest_head = { + .c = (unsigned long)head}; frame_head32_t bufhead32; /* Also check accessibility of one struct frame_head beyond */ - if (!compat_handle_okay(guest_head, 2)) + if ( !compat_handle_okay(guest_head, 2) ) return 0; - if (__copy_from_compat(&bufhead32, guest_head, 1)) + if ( __copy_from_compat(&bufhead32, guest_head, 1) ) return 0; bufhead.ebp = (struct frame_head *)(unsigned long)bufhead32.ebp; bufhead.ret = bufhead32.ret; @@ -75,26 +76,26 @@ dump_guest_backtrace(struct vcpu *vcpu, const struct frame_head *head, else { XEN_GUEST_HANDLE(const_frame_head_t) guest_head; - XEN_GUEST_HANDLE_PARAM(const_frame_head_t) guest_head_param = - const_guest_handle_from_ptr(head, frame_head_t); - guest_head = guest_handle_from_param(guest_head_param, - const_frame_head_t); + XEN_GUEST_HANDLE_PARAM(const_frame_head_t) + guest_head_param = const_guest_handle_from_ptr(head, frame_head_t); + guest_head = + guest_handle_from_param(guest_head_param, const_frame_head_t); /* Also check accessibility of one struct frame_head beyond */ - if (!guest_handle_okay(guest_head, 2)) + if ( !guest_handle_okay(guest_head, 2) ) return 0; - if (__copy_from_guest(&bufhead, guest_head, 1)) + if ( __copy_from_guest(&bufhead, guest_head, 1) ) return 0; } - - if (!xenoprof_add_trace(vcpu, bufhead.ret, mode)) + + if ( !xenoprof_add_trace(vcpu, bufhead.ret, mode) ) return 0; - + /* frame pointers should strictly progress back up the stack * (towards higher addresses) */ - if (head >= bufhead.ebp) + if ( head >= bufhead.ebp ) return NULL; - + return bufhead.ebp; } @@ -130,7 +131,7 @@ dump_guest_backtrace(struct vcpu *vcpu, const struct frame_head *head, */ #if defined(CONFIG_FRAME_POINTER) static int valid_hypervisor_stack(const struct frame_head *head, - const struct cpu_user_regs *regs) + const struct cpu_user_regs *regs) { unsigned long headaddr = (unsigned long)head; unsigned long stack = (unsigned long)regs->rsp; @@ -141,23 +142,24 @@ static int valid_hypervisor_stack(const struct frame_head *head, #else /* without fp, it's just junk */ static int valid_hypervisor_stack(const struct frame_head *head, - const struct cpu_user_regs *regs) + const struct cpu_user_regs *regs) { return 0; } #endif void xenoprof_backtrace(struct vcpu *vcpu, const struct cpu_user_regs *regs, - unsigned long depth, int mode) + unsigned long depth, int mode) { const struct frame_head *head = (void *)regs->rbp; - if (mode > 1) { - while (depth-- && valid_hypervisor_stack(head, regs)) + if ( mode > 1 ) + { + while ( depth-- && valid_hypervisor_stack(head, regs) ) head = dump_hypervisor_backtrace(vcpu, head, mode); return; } - while (depth-- && head) + while ( depth-- && head ) head = dump_guest_backtrace(vcpu, head, mode); } diff --git a/xen/arch/x86/oprofile/nmi_int.c b/xen/arch/x86/oprofile/nmi_int.c index 3dfb8fef93..8e58ef39e1 100644 --- a/xen/arch/x86/oprofile/nmi_int.c +++ b/xen/arch/x86/oprofile/nmi_int.c @@ -37,434 +37,440 @@ static unsigned long saved_lvtpc[NR_CPUS]; static char *cpu_type; -static int passive_domain_msr_op_checks(unsigned int msr, int *typep, int *indexp) +static int passive_domain_msr_op_checks(unsigned int msr, int *typep, + int *indexp) { - struct vpmu_struct *vpmu = vcpu_vpmu(current); - if ( model == NULL ) - return 0; - if ( model->is_arch_pmu_msr == NULL ) - return 0; - if ( !model->is_arch_pmu_msr(msr, typep, indexp) ) - return 0; - - if ( !vpmu_is_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED) ) - if ( ! model->allocated_msr(current) ) - return 0; - return 1; + struct vpmu_struct *vpmu = vcpu_vpmu(current); + if ( model == NULL ) + return 0; + if ( model->is_arch_pmu_msr == NULL ) + return 0; + if ( !model->is_arch_pmu_msr(msr, typep, indexp) ) + return 0; + + if ( !vpmu_is_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED) ) + if ( !model->allocated_msr(current) ) + return 0; + return 1; } int passive_domain_do_rdmsr(unsigned int msr, uint64_t *msr_content) { - int type, index; + int type, index; - if ( !passive_domain_msr_op_checks(msr, &type, &index)) - return 0; + if ( !passive_domain_msr_op_checks(msr, &type, &index) ) + return 0; - model->load_msr(current, type, index, msr_content); - return 1; + model->load_msr(current, type, index, msr_content); + return 1; } int passive_domain_do_wrmsr(unsigned int msr, uint64_t msr_content) { - int type, index; + int type, index; - if ( !passive_domain_msr_op_checks(msr, &type, &index)) - return 0; + if ( !passive_domain_msr_op_checks(msr, &type, &index) ) + return 0; - model->save_msr(current, type, index, msr_content); - return 1; + model->save_msr(current, type, index, msr_content); + return 1; } void passive_domain_destroy(struct vcpu *v) { - struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu_is_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED) ) - model->free_msr(v); + struct vpmu_struct *vpmu = vcpu_vpmu(v); + if ( vpmu_is_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED) ) + model->free_msr(v); } static int nmi_callback(const struct cpu_user_regs *regs, int cpu) { - int xen_mode, ovf; + int xen_mode, ovf; - ovf = model->check_ctrs(cpu, &cpu_msrs[cpu], regs); - xen_mode = ring_0(regs); - if ( ovf && is_active(current->domain) && !xen_mode ) - send_guest_vcpu_virq(current, VIRQ_XENOPROF); + ovf = model->check_ctrs(cpu, &cpu_msrs[cpu], regs); + xen_mode = ring_0(regs); + if ( ovf && is_active(current->domain) && !xen_mode ) + send_guest_vcpu_virq(current, VIRQ_XENOPROF); - if ( ovf == 2 ) - current->nmi_pending = 1; - return 1; + if ( ovf == 2 ) + current->nmi_pending = 1; + return 1; } - static void nmi_cpu_save_registers(struct op_msrs *msrs) { - unsigned int const nr_ctrs = model->num_counters; - unsigned int const nr_ctrls = model->num_controls; - struct op_msr *counters = msrs->counters; - struct op_msr *controls = msrs->controls; - unsigned int i; - - for (i = 0; i < nr_ctrs; ++i) { - rdmsrl(counters[i].addr, counters[i].value); - } - - for (i = 0; i < nr_ctrls; ++i) { - rdmsrl(controls[i].addr, controls[i].value); - } + unsigned int const nr_ctrs = model->num_counters; + unsigned int const nr_ctrls = model->num_controls; + struct op_msr *counters = msrs->counters; + struct op_msr *controls = msrs->controls; + unsigned int i; + + for ( i = 0; i < nr_ctrs; ++i ) + { + rdmsrl(counters[i].addr, counters[i].value); + } + + for ( i = 0; i < nr_ctrls; ++i ) + { + rdmsrl(controls[i].addr, controls[i].value); + } } - -static void nmi_save_registers(void * dummy) +static void nmi_save_registers(void *dummy) { - int cpu = smp_processor_id(); - struct op_msrs * msrs = &cpu_msrs[cpu]; - model->fill_in_addresses(msrs); - nmi_cpu_save_registers(msrs); + int cpu = smp_processor_id(); + struct op_msrs *msrs = &cpu_msrs[cpu]; + model->fill_in_addresses(msrs); + nmi_cpu_save_registers(msrs); } - static void free_msrs(void) { - int i; - for (i = 0; i < nr_cpu_ids; ++i) { - xfree(cpu_msrs[i].counters); - cpu_msrs[i].counters = NULL; - xfree(cpu_msrs[i].controls); - cpu_msrs[i].controls = NULL; - } + int i; + for ( i = 0; i < nr_cpu_ids; ++i ) + { + xfree(cpu_msrs[i].counters); + cpu_msrs[i].counters = NULL; + xfree(cpu_msrs[i].controls); + cpu_msrs[i].controls = NULL; + } } - static int allocate_msrs(void) { - int success = 1; - size_t controls_size = sizeof(struct op_msr) * model->num_controls; - size_t counters_size = sizeof(struct op_msr) * model->num_counters; - - int i; - for_each_online_cpu (i) { - cpu_msrs[i].counters = xmalloc_bytes(counters_size); - if (!cpu_msrs[i].counters) { - success = 0; - break; - } - cpu_msrs[i].controls = xmalloc_bytes(controls_size); - if (!cpu_msrs[i].controls) { - success = 0; - break; - } - } - - if (!success) - free_msrs(); - - return success; + int success = 1; + size_t controls_size = sizeof(struct op_msr) * model->num_controls; + size_t counters_size = sizeof(struct op_msr) * model->num_counters; + + int i; + for_each_online_cpu (i) + { + cpu_msrs[i].counters = xmalloc_bytes(counters_size); + if ( !cpu_msrs[i].counters ) + { + success = 0; + break; + } + cpu_msrs[i].controls = xmalloc_bytes(controls_size); + if ( !cpu_msrs[i].controls ) + { + success = 0; + break; + } + } + + if ( !success ) + free_msrs(); + + return success; } - -static void nmi_cpu_setup(void * dummy) +static void nmi_cpu_setup(void *dummy) { - int cpu = smp_processor_id(); - struct op_msrs * msrs = &cpu_msrs[cpu]; - model->setup_ctrs(msrs); + int cpu = smp_processor_id(); + struct op_msrs *msrs = &cpu_msrs[cpu]; + model->setup_ctrs(msrs); } - int nmi_setup_events(void) { - on_each_cpu(nmi_cpu_setup, NULL, 1); - return 0; + on_each_cpu(nmi_cpu_setup, NULL, 1); + return 0; } int nmi_reserve_counters(void) { - if (!allocate_msrs()) - return -ENOMEM; - - /* - * We need to be careful to install our NMI handler - * without actually triggering any NMIs as this will - * break the core code horrifically. - */ - if (reserve_lapic_nmi() < 0) { - free_msrs(); - return -EBUSY; - } - /* We need to serialize save and setup for HT because the subset - * of msrs are distinct for save and setup operations - */ - on_each_cpu(nmi_save_registers, NULL, 1); - return 0; + if ( !allocate_msrs() ) + return -ENOMEM; + + /* + * We need to be careful to install our NMI handler + * without actually triggering any NMIs as this will + * break the core code horrifically. + */ + if ( reserve_lapic_nmi() < 0 ) + { + free_msrs(); + return -EBUSY; + } + /* We need to serialize save and setup for HT because the subset + * of msrs are distinct for save and setup operations + */ + on_each_cpu(nmi_save_registers, NULL, 1); + return 0; } int nmi_enable_virq(void) { - set_nmi_callback(nmi_callback); - return 0; + set_nmi_callback(nmi_callback); + return 0; } - void nmi_disable_virq(void) { - unset_nmi_callback(); + unset_nmi_callback(); } - -static void nmi_restore_registers(struct op_msrs * msrs) +static void nmi_restore_registers(struct op_msrs *msrs) { - unsigned int const nr_ctrs = model->num_counters; - unsigned int const nr_ctrls = model->num_controls; - struct op_msr * counters = msrs->counters; - struct op_msr * controls = msrs->controls; - unsigned int i; - - for (i = 0; i < nr_ctrls; ++i) { - wrmsrl(controls[i].addr, controls[i].value); - } - - for (i = 0; i < nr_ctrs; ++i) { - wrmsrl(counters[i].addr, counters[i].value); - } + unsigned int const nr_ctrs = model->num_counters; + unsigned int const nr_ctrls = model->num_controls; + struct op_msr *counters = msrs->counters; + struct op_msr *controls = msrs->controls; + unsigned int i; + + for ( i = 0; i < nr_ctrls; ++i ) + { + wrmsrl(controls[i].addr, controls[i].value); + } + + for ( i = 0; i < nr_ctrs; ++i ) + { + wrmsrl(counters[i].addr, counters[i].value); + } } - -static void nmi_cpu_shutdown(void * dummy) +static void nmi_cpu_shutdown(void *dummy) { - int cpu = smp_processor_id(); - struct op_msrs * msrs = &cpu_msrs[cpu]; - nmi_restore_registers(msrs); + int cpu = smp_processor_id(); + struct op_msrs *msrs = &cpu_msrs[cpu]; + nmi_restore_registers(msrs); } - void nmi_release_counters(void) { - on_each_cpu(nmi_cpu_shutdown, NULL, 1); - release_lapic_nmi(); - free_msrs(); + on_each_cpu(nmi_cpu_shutdown, NULL, 1); + release_lapic_nmi(); + free_msrs(); } - -static void nmi_cpu_start(void * dummy) +static void nmi_cpu_start(void *dummy) { - int cpu = smp_processor_id(); - struct op_msrs const * msrs = &cpu_msrs[cpu]; - saved_lvtpc[cpu] = apic_read(APIC_LVTPC); - apic_write(APIC_LVTPC, APIC_DM_NMI); - model->start(msrs); + int cpu = smp_processor_id(); + struct op_msrs const *msrs = &cpu_msrs[cpu]; + saved_lvtpc[cpu] = apic_read(APIC_LVTPC); + apic_write(APIC_LVTPC, APIC_DM_NMI); + model->start(msrs); } - int nmi_start(void) { - on_each_cpu(nmi_cpu_start, NULL, 1); - return 0; + on_each_cpu(nmi_cpu_start, NULL, 1); + return 0; } - -static void nmi_cpu_stop(void * dummy) +static void nmi_cpu_stop(void *dummy) { - unsigned int v; - int cpu = smp_processor_id(); - struct op_msrs const * msrs = &cpu_msrs[cpu]; - model->stop(msrs); - - /* restoring APIC_LVTPC can trigger an apic error because the delivery - * mode and vector nr combination can be illegal. That's by design: on - * power on apic lvt contain a zero vector nr which are legal only for - * NMI delivery mode. So inhibit apic err before restoring lvtpc - */ - if ( (apic_read(APIC_LVTPC) & APIC_MODE_MASK) != APIC_DM_NMI - || (apic_read(APIC_LVTPC) & APIC_LVT_MASKED) ) - { - printk("nmi_stop: APIC not good %ul\n", apic_read(APIC_LVTPC)); - mdelay(5000); - } - v = apic_read(APIC_LVTERR); - apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); - apic_write(APIC_LVTPC, saved_lvtpc[cpu]); - apic_write(APIC_LVTERR, v); + unsigned int v; + int cpu = smp_processor_id(); + struct op_msrs const *msrs = &cpu_msrs[cpu]; + model->stop(msrs); + + /* restoring APIC_LVTPC can trigger an apic error because the delivery + * mode and vector nr combination can be illegal. That's by design: on + * power on apic lvt contain a zero vector nr which are legal only for + * NMI delivery mode. So inhibit apic err before restoring lvtpc + */ + if ( (apic_read(APIC_LVTPC) & APIC_MODE_MASK) != APIC_DM_NMI || + (apic_read(APIC_LVTPC) & APIC_LVT_MASKED) ) + { + printk("nmi_stop: APIC not good %ul\n", apic_read(APIC_LVTPC)); + mdelay(5000); + } + v = apic_read(APIC_LVTERR); + apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); + apic_write(APIC_LVTPC, saved_lvtpc[cpu]); + apic_write(APIC_LVTERR, v); } - void nmi_stop(void) { - on_each_cpu(nmi_cpu_stop, NULL, 1); + on_each_cpu(nmi_cpu_stop, NULL, 1); } - -static int __init p4_init(char ** cpu_type) +static int __init p4_init(char **cpu_type) { - __u8 cpu_model = current_cpu_data.x86_model; - - if ((cpu_model > 6) || (cpu_model == 5)) { - printk("xenoprof: Initialization failed. " - "Intel processor model %d for pentium 4 family is not " - "supported\n", cpu_model); - return 0; - } - - switch (current_cpu_data.x86_num_siblings) { - case 1: - *cpu_type = "i386/p4"; - model = &op_p4_spec; - return 1; - - case 2: - *cpu_type = "i386/p4-ht"; - model = &op_p4_ht2_spec; - return 1; - } - - printk("Xenoprof ERROR: P4 HyperThreading detected with > 2 threads\n"); - - return 0; + __u8 cpu_model = current_cpu_data.x86_model; + + if ( (cpu_model > 6) || (cpu_model == 5) ) + { + printk("xenoprof: Initialization failed. " + "Intel processor model %d for pentium 4 family is not " + "supported\n", + cpu_model); + return 0; + } + + switch (current_cpu_data.x86_num_siblings) + { + case 1: + *cpu_type = "i386/p4"; + model = &op_p4_spec; + return 1; + + case 2: + *cpu_type = "i386/p4-ht"; + model = &op_p4_ht2_spec; + return 1; + } + + printk("Xenoprof ERROR: P4 HyperThreading detected with > 2 threads\n"); + + return 0; } - static int force_arch_perfmon; static int force_cpu_type(const char *str) { - if (!strcmp(str, "arch_perfmon")) { - force_arch_perfmon = 1; - printk(KERN_INFO "oprofile: forcing architectural perfmon\n"); - } - else - return -EINVAL; - - return 0; + if ( !strcmp(str, "arch_perfmon") ) + { + force_arch_perfmon = 1; + printk(KERN_INFO "oprofile: forcing architectural perfmon\n"); + } + else + return -EINVAL; + + return 0; } custom_param("cpu_type", force_cpu_type); -static int __init ppro_init(char ** cpu_type) +static int __init ppro_init(char **cpu_type) { - __u8 cpu_model = current_cpu_data.x86_model; - - if (force_arch_perfmon && cpu_has_arch_perfmon) - return 0; - - switch (cpu_model) { - case 14: - *cpu_type = "i386/core"; - break; - case 15: - *cpu_type = "i386/core_2"; - ppro_has_global_ctrl = 1; - break; - default: - /* Unknown */ - return 0; - } - - model = &op_ppro_spec; - return 1; + __u8 cpu_model = current_cpu_data.x86_model; + + if ( force_arch_perfmon && cpu_has_arch_perfmon ) + return 0; + + switch (cpu_model) + { + case 14: + *cpu_type = "i386/core"; + break; + case 15: + *cpu_type = "i386/core_2"; + ppro_has_global_ctrl = 1; + break; + default: + /* Unknown */ + return 0; + } + + model = &op_ppro_spec; + return 1; } static int __init arch_perfmon_init(char **cpu_type) { - if (!cpu_has_arch_perfmon) - return 0; - *cpu_type = "i386/arch_perfmon"; - model = &op_arch_perfmon_spec; - arch_perfmon_setup_counters(); - ppro_has_global_ctrl = 1; - return 1; + if ( !cpu_has_arch_perfmon ) + return 0; + *cpu_type = "i386/arch_perfmon"; + model = &op_arch_perfmon_spec; + arch_perfmon_setup_counters(); + ppro_has_global_ctrl = 1; + return 1; } static int __init nmi_init(void) { - __u8 vendor = current_cpu_data.x86_vendor; - __u8 family = current_cpu_data.x86; - __u8 _model = current_cpu_data.x86_model; - - if (!cpu_has_apic) { - printk("xenoprof: Initialization failed. No APIC\n"); - return -ENODEV; - } - - switch (vendor) { - case X86_VENDOR_AMD: - /* Needs to be at least an Athlon (or hammer in 32bit mode) */ - - switch (family) { - default: - printk("xenoprof: Initialization failed. " - "AMD processor family %d is not " - "supported\n", family); - return -ENODEV; - case 0xf: - model = &op_athlon_spec; - cpu_type = "x86-64/hammer"; - break; - case 0x10: - model = &op_athlon_spec; - cpu_type = "x86-64/family10"; - ibs_init(); - break; - case 0x11: - model = &op_athlon_spec; - cpu_type = "x86-64/family11h"; - break; - case 0x12: - model = &op_athlon_spec; - cpu_type = "x86-64/family12h"; - break; - case 0x14: - model = &op_athlon_spec; - cpu_type = "x86-64/family14h"; - break; - case 0x15: - model = &op_amd_fam15h_spec; - cpu_type = "x86-64/family15h"; - break; - case 0x16: - model = &op_athlon_spec; - cpu_type = "x86-64/family16h"; - break; - } - break; - - case X86_VENDOR_INTEL: - switch (family) { - /* Pentium IV */ - case 0xf: - p4_init(&cpu_type); - break; - - /* A P6-class processor */ - case 6: - ppro_init(&cpu_type); - break; - - default: - break; - } - if (!cpu_type && !arch_perfmon_init(&cpu_type)) { - printk("xenoprof: Initialization failed. " - "Intel processor family %d model %d " - "is not supported\n", family, _model); - return -ENODEV; - } - break; - - default: - printk("xenoprof: Initialization failed. " - "Unsupported processor. Unknown vendor %d\n", - vendor); - return -ENODEV; - } - - return 0; + __u8 vendor = current_cpu_data.x86_vendor; + __u8 family = current_cpu_data.x86; + __u8 _model = current_cpu_data.x86_model; + + if ( !cpu_has_apic ) + { + printk("xenoprof: Initialization failed. No APIC\n"); + return -ENODEV; + } + + switch (vendor) + { + case X86_VENDOR_AMD: + /* Needs to be at least an Athlon (or hammer in 32bit mode) */ + + switch (family) + { + default: + printk("xenoprof: Initialization failed. " + "AMD processor family %d is not " + "supported\n", + family); + return -ENODEV; + case 0xf: + model = &op_athlon_spec; + cpu_type = "x86-64/hammer"; + break; + case 0x10: + model = &op_athlon_spec; + cpu_type = "x86-64/family10"; + ibs_init(); + break; + case 0x11: + model = &op_athlon_spec; + cpu_type = "x86-64/family11h"; + break; + case 0x12: + model = &op_athlon_spec; + cpu_type = "x86-64/family12h"; + break; + case 0x14: + model = &op_athlon_spec; + cpu_type = "x86-64/family14h"; + break; + case 0x15: + model = &op_amd_fam15h_spec; + cpu_type = "x86-64/family15h"; + break; + case 0x16: + model = &op_athlon_spec; + cpu_type = "x86-64/family16h"; + break; + } + break; + + case X86_VENDOR_INTEL: + switch (family) + { + /* Pentium IV */ + case 0xf: + p4_init(&cpu_type); + break; + + /* A P6-class processor */ + case 6: + ppro_init(&cpu_type); + break; + + default: + break; + } + if ( !cpu_type && !arch_perfmon_init(&cpu_type) ) + { + printk("xenoprof: Initialization failed. " + "Intel processor family %d model %d " + "is not supported\n", + family, _model); + return -ENODEV; + } + break; + + default: + printk("xenoprof: Initialization failed. " + "Unsupported processor. Unknown vendor %d\n", + vendor); + return -ENODEV; + } + + return 0; } __initcall(nmi_init); int xenoprof_arch_init(int *num_events, char *_cpu_type) { - if (cpu_type == NULL) - return -ENODEV; - *num_events = model->num_counters; - strlcpy(_cpu_type, cpu_type, XENOPROF_CPU_TYPE_SIZE); - return 0; + if ( cpu_type == NULL ) + return -ENODEV; + *num_events = model->num_counters; + strlcpy(_cpu_type, cpu_type, XENOPROF_CPU_TYPE_SIZE); + return 0; } diff --git a/xen/arch/x86/oprofile/op_model_athlon.c b/xen/arch/x86/oprofile/op_model_athlon.c index 3d6e26f636..7957b14c8f 100644 --- a/xen/arch/x86/oprofile/op_model_athlon.c +++ b/xen/arch/x86/oprofile/op_model_athlon.c @@ -33,18 +33,30 @@ #define MAX_COUNTERS FAM15H_NUM_COUNTERS -#define CTR_READ(msr_content,msrs,c) do {rdmsrl(msrs->counters[(c)].addr, (msr_content));} while (0) -#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1);} while (0) -#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<31))) - -#define CTRL_READ(msr_content,msrs,c) do {rdmsrl(msrs->controls[(c)].addr, (msr_content));} while (0) -#define CTRL_WRITE(msr_content,msrs,c) do {wrmsrl(msrs->controls[(c)].addr, (msr_content));} while (0) -#define CTRL_SET_ACTIVE(n) (n |= (1ULL<<22)) -#define CTRL_SET_INACTIVE(n) (n &= ~(1ULL<<22)) -#define CTRL_CLEAR(val) (val &= (1ULL<<21)) -#define CTRL_SET_ENABLE(val) (val |= 1ULL<<20) -#define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16)) -#define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) +#define CTR_READ(msr_content, msrs, c) \ + do { \ + rdmsrl(msrs->counters[(c)].addr, (msr_content)); \ + } while ( 0 ) +#define CTR_WRITE(l, msrs, c) \ + do { \ + wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); \ + } while ( 0 ) +#define CTR_OVERFLOWED(n) (!((n) & (1ULL << 31))) + +#define CTRL_READ(msr_content, msrs, c) \ + do { \ + rdmsrl(msrs->controls[(c)].addr, (msr_content)); \ + } while ( 0 ) +#define CTRL_WRITE(msr_content, msrs, c) \ + do { \ + wrmsrl(msrs->controls[(c)].addr, (msr_content)); \ + } while ( 0 ) +#define CTRL_SET_ACTIVE(n) (n |= (1ULL << 22)) +#define CTRL_SET_INACTIVE(n) (n &= ~(1ULL << 22)) +#define CTRL_CLEAR(val) (val &= (1ULL << 21)) +#define CTRL_SET_ENABLE(val) (val |= 1ULL << 20) +#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) +#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) #define CTRL_SET_UM(val, m) (val |= ((m & 0xff) << 8)) #define CTRL_SET_EVENT(val, e) (val |= (((e >> 8) & 0xf) | (e & 0xff))) #define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 0x1ULL) << 41)) @@ -58,59 +70,61 @@ u32 ibs_caps = 0; static u64 ibs_op_ctl; /* IBS cpuid feature detection */ -#define IBS_CPUID_FEATURES 0x8000001b +#define IBS_CPUID_FEATURES 0x8000001b /* IBS MSRs */ -#define MSR_AMD64_IBSFETCHCTL 0xc0011030 -#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 -#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 -#define MSR_AMD64_IBSOPCTL 0xc0011033 -#define MSR_AMD64_IBSOPRIP 0xc0011034 -#define MSR_AMD64_IBSOPDATA 0xc0011035 -#define MSR_AMD64_IBSOPDATA2 0xc0011036 -#define MSR_AMD64_IBSOPDATA3 0xc0011037 -#define MSR_AMD64_IBSDCLINAD 0xc0011038 -#define MSR_AMD64_IBSDCPHYSAD 0xc0011039 -#define MSR_AMD64_IBSCTL 0xc001103a +#define MSR_AMD64_IBSFETCHCTL 0xc0011030 +#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 +#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 +#define MSR_AMD64_IBSOPCTL 0xc0011033 +#define MSR_AMD64_IBSOPRIP 0xc0011034 +#define MSR_AMD64_IBSOPDATA 0xc0011035 +#define MSR_AMD64_IBSOPDATA2 0xc0011036 +#define MSR_AMD64_IBSOPDATA3 0xc0011037 +#define MSR_AMD64_IBSDCLINAD 0xc0011038 +#define MSR_AMD64_IBSDCPHYSAD 0xc0011039 +#define MSR_AMD64_IBSCTL 0xc001103a /* * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but * bit 0 is used to indicate the existence of IBS. */ -#define IBS_CAPS_AVAIL (1LL<<0) -#define IBS_CAPS_RDWROPCNT (1LL<<3) -#define IBS_CAPS_OPCNT (1LL<<4) +#define IBS_CAPS_AVAIL (1LL << 0) +#define IBS_CAPS_RDWROPCNT (1LL << 3) +#define IBS_CAPS_OPCNT (1LL << 4) /* IBS randomization macros */ -#define IBS_RANDOM_BITS 12 -#define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1) -#define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5)) +#define IBS_RANDOM_BITS 12 +#define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1) +#define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5)) /* IbsFetchCtl bits/masks */ -#define IBS_FETCH_RAND_EN (1ULL<<57) -#define IBS_FETCH_VAL (1ULL<<49) -#define IBS_FETCH_ENABLE (1ULL<<48) -#define IBS_FETCH_CNT 0xFFFF0000ULL -#define IBS_FETCH_MAX_CNT 0x0000FFFFULL +#define IBS_FETCH_RAND_EN (1ULL << 57) +#define IBS_FETCH_VAL (1ULL << 49) +#define IBS_FETCH_ENABLE (1ULL << 48) +#define IBS_FETCH_CNT 0xFFFF0000ULL +#define IBS_FETCH_MAX_CNT 0x0000FFFFULL /* IbsOpCtl bits */ -#define IBS_OP_CNT_CTL (1ULL<<19) -#define IBS_OP_VAL (1ULL<<18) -#define IBS_OP_ENABLE (1ULL<<17) -#define IBS_OP_MAX_CNT 0x0000FFFFULL +#define IBS_OP_CNT_CTL (1ULL << 19) +#define IBS_OP_VAL (1ULL << 18) +#define IBS_OP_ENABLE (1ULL << 17) +#define IBS_OP_MAX_CNT 0x0000FFFFULL /* IBS sample identifier */ -#define IBS_FETCH_CODE 13 -#define IBS_OP_CODE 14 - -#define clamp(val, min, max) ({ \ - typeof(val) __val = (val); \ - typeof(min) __min = (min); \ - typeof(max) __max = (max); \ - (void) (&__val == &__min); \ - (void) (&__val == &__max); \ - __val = __val < __min ? __min: __val; \ - __val > __max ? __max: __val; }) +#define IBS_FETCH_CODE 13 +#define IBS_OP_CODE 14 + +#define clamp(val, min, max) \ + ({ \ + typeof(val) __val = (val); \ + typeof(min) __min = (min); \ + typeof(max) __max = (max); \ + (void)(&__val == &__min); \ + (void)(&__val == &__max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; \ + }) /* * 16-bit Linear Feedback Shift Register (LFSR) @@ -121,10 +135,9 @@ static unsigned int lfsr_random(void) unsigned int bit; /* Compute next bit to shift in */ - bit = ((lfsr_value >> 0) ^ - (lfsr_value >> 2) ^ - (lfsr_value >> 3) ^ - (lfsr_value >> 5)) & 0x0001; + bit = ((lfsr_value >> 0) ^ (lfsr_value >> 2) ^ (lfsr_value >> 3) ^ + (lfsr_value >> 5)) & + 0x0001; /* Advance to next register value */ lfsr_value = (lfsr_value >> 1) | (bit << 15); @@ -143,7 +156,7 @@ static inline u64 op_amd_randomize_ibs_op(u64 val) { unsigned int random = lfsr_random(); - if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) + if ( !(ibs_caps & IBS_CAPS_RDWROPCNT) ) /* * Work around if the hw can not write to IbsOpCurCnt * @@ -164,383 +177,407 @@ static inline u64 op_amd_randomize_ibs_op(u64 val) return val; } -static void athlon_fill_in_addresses(struct op_msrs * const msrs) +static void athlon_fill_in_addresses(struct op_msrs *const msrs) { - msrs->counters[0].addr = MSR_K7_PERFCTR0; - msrs->counters[1].addr = MSR_K7_PERFCTR1; - msrs->counters[2].addr = MSR_K7_PERFCTR2; - msrs->counters[3].addr = MSR_K7_PERFCTR3; - - msrs->controls[0].addr = MSR_K7_EVNTSEL0; - msrs->controls[1].addr = MSR_K7_EVNTSEL1; - msrs->controls[2].addr = MSR_K7_EVNTSEL2; - msrs->controls[3].addr = MSR_K7_EVNTSEL3; + msrs->counters[0].addr = MSR_K7_PERFCTR0; + msrs->counters[1].addr = MSR_K7_PERFCTR1; + msrs->counters[2].addr = MSR_K7_PERFCTR2; + msrs->counters[3].addr = MSR_K7_PERFCTR3; + + msrs->controls[0].addr = MSR_K7_EVNTSEL0; + msrs->controls[1].addr = MSR_K7_EVNTSEL1; + msrs->controls[2].addr = MSR_K7_EVNTSEL2; + msrs->controls[3].addr = MSR_K7_EVNTSEL3; } -static void fam15h_fill_in_addresses(struct op_msrs * const msrs) +static void fam15h_fill_in_addresses(struct op_msrs *const msrs) { - msrs->counters[0].addr = MSR_AMD_FAM15H_PERFCTR0; - msrs->counters[1].addr = MSR_AMD_FAM15H_PERFCTR1; - msrs->counters[2].addr = MSR_AMD_FAM15H_PERFCTR2; - msrs->counters[3].addr = MSR_AMD_FAM15H_PERFCTR3; - msrs->counters[4].addr = MSR_AMD_FAM15H_PERFCTR4; - msrs->counters[5].addr = MSR_AMD_FAM15H_PERFCTR5; - - msrs->controls[0].addr = MSR_AMD_FAM15H_EVNTSEL0; - msrs->controls[1].addr = MSR_AMD_FAM15H_EVNTSEL1; - msrs->controls[2].addr = MSR_AMD_FAM15H_EVNTSEL2; - msrs->controls[3].addr = MSR_AMD_FAM15H_EVNTSEL3; - msrs->controls[4].addr = MSR_AMD_FAM15H_EVNTSEL4; - msrs->controls[5].addr = MSR_AMD_FAM15H_EVNTSEL5; + msrs->counters[0].addr = MSR_AMD_FAM15H_PERFCTR0; + msrs->counters[1].addr = MSR_AMD_FAM15H_PERFCTR1; + msrs->counters[2].addr = MSR_AMD_FAM15H_PERFCTR2; + msrs->counters[3].addr = MSR_AMD_FAM15H_PERFCTR3; + msrs->counters[4].addr = MSR_AMD_FAM15H_PERFCTR4; + msrs->counters[5].addr = MSR_AMD_FAM15H_PERFCTR5; + + msrs->controls[0].addr = MSR_AMD_FAM15H_EVNTSEL0; + msrs->controls[1].addr = MSR_AMD_FAM15H_EVNTSEL1; + msrs->controls[2].addr = MSR_AMD_FAM15H_EVNTSEL2; + msrs->controls[3].addr = MSR_AMD_FAM15H_EVNTSEL3; + msrs->controls[4].addr = MSR_AMD_FAM15H_EVNTSEL4; + msrs->controls[5].addr = MSR_AMD_FAM15H_EVNTSEL5; } -static void athlon_setup_ctrs(struct op_msrs const * const msrs) +static void athlon_setup_ctrs(struct op_msrs const *const msrs) { - uint64_t msr_content; - int i; - unsigned int const nr_ctrs = model->num_counters; - unsigned int const nr_ctrls = model->num_controls; - - /* clear all counters */ - for (i = 0 ; i < nr_ctrls; ++i) { - CTRL_READ(msr_content, msrs, i); - CTRL_CLEAR(msr_content); - CTRL_WRITE(msr_content, msrs, i); - } - - /* avoid a false detection of ctr overflows in NMI handler */ - for (i = 0; i < nr_ctrs; ++i) { - CTR_WRITE(1, msrs, i); - } - - /* enable active counters */ - for (i = 0; i < nr_ctrs; ++i) { - if (counter_config[i].enabled) { - reset_value[i] = counter_config[i].count; - - CTR_WRITE(counter_config[i].count, msrs, i); - - CTRL_READ(msr_content, msrs, i); - CTRL_CLEAR(msr_content); - CTRL_SET_ENABLE(msr_content); - CTRL_SET_USR(msr_content, counter_config[i].user); - CTRL_SET_KERN(msr_content, counter_config[i].kernel); - CTRL_SET_UM(msr_content, counter_config[i].unit_mask); - CTRL_SET_EVENT(msr_content, counter_config[i].event); - CTRL_SET_HOST_ONLY(msr_content, 0); - CTRL_SET_GUEST_ONLY(msr_content, 0); - CTRL_WRITE(msr_content, msrs, i); - } else { - reset_value[i] = 0; - } - } + uint64_t msr_content; + int i; + unsigned int const nr_ctrs = model->num_counters; + unsigned int const nr_ctrls = model->num_controls; + + /* clear all counters */ + for ( i = 0; i < nr_ctrls; ++i ) + { + CTRL_READ(msr_content, msrs, i); + CTRL_CLEAR(msr_content); + CTRL_WRITE(msr_content, msrs, i); + } + + /* avoid a false detection of ctr overflows in NMI handler */ + for ( i = 0; i < nr_ctrs; ++i ) + { + CTR_WRITE(1, msrs, i); + } + + /* enable active counters */ + for ( i = 0; i < nr_ctrs; ++i ) + { + if ( counter_config[i].enabled ) + { + reset_value[i] = counter_config[i].count; + + CTR_WRITE(counter_config[i].count, msrs, i); + + CTRL_READ(msr_content, msrs, i); + CTRL_CLEAR(msr_content); + CTRL_SET_ENABLE(msr_content); + CTRL_SET_USR(msr_content, counter_config[i].user); + CTRL_SET_KERN(msr_content, counter_config[i].kernel); + CTRL_SET_UM(msr_content, counter_config[i].unit_mask); + CTRL_SET_EVENT(msr_content, counter_config[i].event); + CTRL_SET_HOST_ONLY(msr_content, 0); + CTRL_SET_GUEST_ONLY(msr_content, 0); + CTRL_WRITE(msr_content, msrs, i); + } + else + { + reset_value[i] = 0; + } + } } static inline void -ibs_log_event(u64 data, struct cpu_user_regs const * const regs, int mode) +ibs_log_event(u64 data, struct cpu_user_regs const *const regs, int mode) { - struct vcpu *v = current; - u32 temp = 0; - - temp = data & 0xFFFFFFFF; - xenoprof_log_event(v, regs, temp, mode, 0); - - temp = (data >> 32) & 0xFFFFFFFF; - xenoprof_log_event(v, regs, temp, mode, 0); - + struct vcpu *v = current; + u32 temp = 0; + + temp = data & 0xFFFFFFFF; + xenoprof_log_event(v, regs, temp, mode, 0); + + temp = (data >> 32) & 0xFFFFFFFF; + xenoprof_log_event(v, regs, temp, mode, 0); } -static inline int handle_ibs(int mode, struct cpu_user_regs const * const regs) +static inline int handle_ibs(int mode, struct cpu_user_regs const *const regs) { - u64 val, ctl; - struct vcpu *v = current; - - if (!ibs_caps) - return 1; - - if (ibs_config.fetch_enabled) { - rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl); - if (ctl & IBS_FETCH_VAL) { - rdmsrl(MSR_AMD64_IBSFETCHLINAD, val); - xenoprof_log_event(v, regs, IBS_FETCH_CODE, mode, 0); - xenoprof_log_event(v, regs, val, mode, 0); - - ibs_log_event(val, regs, mode); - ibs_log_event(ctl, regs, mode); - - rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val); - ibs_log_event(val, regs, mode); - - /* reenable the IRQ */ - ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT); - ctl |= IBS_FETCH_ENABLE; - wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl); - } - } - - if (ibs_config.op_enabled) { - rdmsrl(MSR_AMD64_IBSOPCTL, ctl); - if (ctl & IBS_OP_VAL) { - - rdmsrl(MSR_AMD64_IBSOPRIP, val); - xenoprof_log_event(v, regs, IBS_OP_CODE, mode, 0); - xenoprof_log_event(v, regs, val, mode, 0); - - ibs_log_event(val, regs, mode); - - rdmsrl(MSR_AMD64_IBSOPDATA, val); - ibs_log_event(val, regs, mode); - rdmsrl(MSR_AMD64_IBSOPDATA2, val); - ibs_log_event(val, regs, mode); - rdmsrl(MSR_AMD64_IBSOPDATA3, val); - ibs_log_event(val, regs, mode); - rdmsrl(MSR_AMD64_IBSDCLINAD, val); - ibs_log_event(val, regs, mode); - rdmsrl(MSR_AMD64_IBSDCPHYSAD, val); - ibs_log_event(val, regs, mode); - - /* reenable the IRQ */ - ctl = op_amd_randomize_ibs_op(ibs_op_ctl); - wrmsrl(MSR_AMD64_IBSOPCTL, ctl); - } - } + u64 val, ctl; + struct vcpu *v = current; + + if ( !ibs_caps ) + return 1; + + if ( ibs_config.fetch_enabled ) + { + rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl); + if ( ctl & IBS_FETCH_VAL ) + { + rdmsrl(MSR_AMD64_IBSFETCHLINAD, val); + xenoprof_log_event(v, regs, IBS_FETCH_CODE, mode, 0); + xenoprof_log_event(v, regs, val, mode, 0); + + ibs_log_event(val, regs, mode); + ibs_log_event(ctl, regs, mode); + + rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val); + ibs_log_event(val, regs, mode); + + /* reenable the IRQ */ + ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT); + ctl |= IBS_FETCH_ENABLE; + wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl); + } + } + + if ( ibs_config.op_enabled ) + { + rdmsrl(MSR_AMD64_IBSOPCTL, ctl); + if ( ctl & IBS_OP_VAL ) + { + rdmsrl(MSR_AMD64_IBSOPRIP, val); + xenoprof_log_event(v, regs, IBS_OP_CODE, mode, 0); + xenoprof_log_event(v, regs, val, mode, 0); + + ibs_log_event(val, regs, mode); + + rdmsrl(MSR_AMD64_IBSOPDATA, val); + ibs_log_event(val, regs, mode); + rdmsrl(MSR_AMD64_IBSOPDATA2, val); + ibs_log_event(val, regs, mode); + rdmsrl(MSR_AMD64_IBSOPDATA3, val); + ibs_log_event(val, regs, mode); + rdmsrl(MSR_AMD64_IBSDCLINAD, val); + ibs_log_event(val, regs, mode); + rdmsrl(MSR_AMD64_IBSDCPHYSAD, val); + ibs_log_event(val, regs, mode); + + /* reenable the IRQ */ + ctl = op_amd_randomize_ibs_op(ibs_op_ctl); + wrmsrl(MSR_AMD64_IBSOPCTL, ctl); + } + } return 1; } static int athlon_check_ctrs(unsigned int const cpu, - struct op_msrs const * const msrs, - struct cpu_user_regs const * const regs) + struct op_msrs const *const msrs, + struct cpu_user_regs const *const regs) { - uint64_t msr_content; - int i; - int ovf = 0; - unsigned long eip = regs->rip; - int mode = 0; - struct vcpu *v = current; - unsigned int const nr_ctrs = model->num_counters; + uint64_t msr_content; + int i; + int ovf = 0; + unsigned long eip = regs->rip; + int mode = 0; + struct vcpu *v = current; + unsigned int const nr_ctrs = model->num_counters; #if CONFIG_HVM - struct cpu_user_regs *guest_regs = guest_cpu_user_regs(); - - if (!guest_mode(regs) && - (eip == (unsigned long)svm_stgi_label)) { - /* SVM guest was running when NMI occurred */ - ASSERT(is_hvm_vcpu(v)); - eip = guest_regs->rip; - mode = xenoprofile_get_mode(v, guest_regs); - } else + struct cpu_user_regs *guest_regs = guest_cpu_user_regs(); + + if ( !guest_mode(regs) && (eip == (unsigned long)svm_stgi_label) ) + { + /* SVM guest was running when NMI occurred */ + ASSERT(is_hvm_vcpu(v)); + eip = guest_regs->rip; + mode = xenoprofile_get_mode(v, guest_regs); + } + else #endif - mode = xenoprofile_get_mode(v, regs); - - for (i = 0 ; i < nr_ctrs; ++i) { - CTR_READ(msr_content, msrs, i); - if (CTR_OVERFLOWED(msr_content)) { - xenoprof_log_event(current, regs, eip, mode, i); - CTR_WRITE(reset_value[i], msrs, i); - ovf = 1; - } - } - - ovf = handle_ibs(mode, regs); - /* See op_model_ppro.c */ - return ovf; + mode = xenoprofile_get_mode(v, regs); + + for ( i = 0; i < nr_ctrs; ++i ) + { + CTR_READ(msr_content, msrs, i); + if ( CTR_OVERFLOWED(msr_content) ) + { + xenoprof_log_event(current, regs, eip, mode, i); + CTR_WRITE(reset_value[i], msrs, i); + ovf = 1; + } + } + + ovf = handle_ibs(mode, regs); + /* See op_model_ppro.c */ + return ovf; } static inline void start_ibs(void) { - u64 val = 0; - - if (!ibs_caps) - return; - - if (ibs_config.fetch_enabled) { - val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT; - val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; - val |= IBS_FETCH_ENABLE; - wrmsrl(MSR_AMD64_IBSFETCHCTL, val); - } - - if (ibs_config.op_enabled) { - ibs_op_ctl = ibs_config.max_cnt_op >> 4; - if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) { - /* - * IbsOpCurCnt not supported. See - * op_amd_randomize_ibs_op() for details. - */ - ibs_op_ctl = clamp((unsigned long long)ibs_op_ctl, - 0x0081ULL, 0xFF80ULL); - } else { - /* - * The start value is randomized with a - * positive offset, we need to compensate it - * with the half of the randomized range. Also - * avoid underflows. - */ - ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET, - IBS_OP_MAX_CNT); - } - if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops) - ibs_op_ctl |= IBS_OP_CNT_CTL; - ibs_op_ctl |= IBS_OP_ENABLE; - val = op_amd_randomize_ibs_op(ibs_op_ctl); - wrmsrl(MSR_AMD64_IBSOPCTL, val); - } + u64 val = 0; + + if ( !ibs_caps ) + return; + + if ( ibs_config.fetch_enabled ) + { + val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT; + val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; + val |= IBS_FETCH_ENABLE; + wrmsrl(MSR_AMD64_IBSFETCHCTL, val); + } + + if ( ibs_config.op_enabled ) + { + ibs_op_ctl = ibs_config.max_cnt_op >> 4; + if ( !(ibs_caps & IBS_CAPS_RDWROPCNT) ) + { + /* + * IbsOpCurCnt not supported. See + * op_amd_randomize_ibs_op() for details. + */ + ibs_op_ctl = + clamp((unsigned long long)ibs_op_ctl, 0x0081ULL, 0xFF80ULL); + } + else + { + /* + * The start value is randomized with a + * positive offset, we need to compensate it + * with the half of the randomized range. Also + * avoid underflows. + */ + ibs_op_ctl = + min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET, IBS_OP_MAX_CNT); + } + if ( ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops ) + ibs_op_ctl |= IBS_OP_CNT_CTL; + ibs_op_ctl |= IBS_OP_ENABLE; + val = op_amd_randomize_ibs_op(ibs_op_ctl); + wrmsrl(MSR_AMD64_IBSOPCTL, val); + } } - -static void athlon_start(struct op_msrs const * const msrs) + +static void athlon_start(struct op_msrs const *const msrs) { - uint64_t msr_content; - int i; - unsigned int const nr_ctrs = model->num_counters; - for (i = 0 ; i < nr_ctrs ; ++i) { - if (reset_value[i]) { - CTRL_READ(msr_content, msrs, i); - CTRL_SET_ACTIVE(msr_content); - CTRL_WRITE(msr_content, msrs, i); - } - } - start_ibs(); + uint64_t msr_content; + int i; + unsigned int const nr_ctrs = model->num_counters; + for ( i = 0; i < nr_ctrs; ++i ) + { + if ( reset_value[i] ) + { + CTRL_READ(msr_content, msrs, i); + CTRL_SET_ACTIVE(msr_content); + CTRL_WRITE(msr_content, msrs, i); + } + } + start_ibs(); } static void stop_ibs(void) { - if (!ibs_caps) - return; + if ( !ibs_caps ) + return; - if (ibs_config.fetch_enabled) - /* clear max count and enable */ - wrmsrl(MSR_AMD64_IBSFETCHCTL, 0); + if ( ibs_config.fetch_enabled ) + /* clear max count and enable */ + wrmsrl(MSR_AMD64_IBSFETCHCTL, 0); - if (ibs_config.op_enabled) - /* clear max count and enable */ - wrmsrl(MSR_AMD64_IBSOPCTL, 0); + if ( ibs_config.op_enabled ) + /* clear max count and enable */ + wrmsrl(MSR_AMD64_IBSOPCTL, 0); } -static void athlon_stop(struct op_msrs const * const msrs) +static void athlon_stop(struct op_msrs const *const msrs) { - uint64_t msr_content; - int i; - unsigned int const nr_ctrs = model->num_counters; - - /* Subtle: stop on all counters to avoid race with - * setting our pm callback */ - for (i = 0 ; i < nr_ctrs ; ++i) { - CTRL_READ(msr_content, msrs, i); - CTRL_SET_INACTIVE(msr_content); - CTRL_WRITE(msr_content, msrs, i); - } - - stop_ibs(); + uint64_t msr_content; + int i; + unsigned int const nr_ctrs = model->num_counters; + + /* Subtle: stop on all counters to avoid race with + * setting our pm callback */ + for ( i = 0; i < nr_ctrs; ++i ) + { + CTRL_READ(msr_content, msrs, i); + CTRL_SET_INACTIVE(msr_content); + CTRL_WRITE(msr_content, msrs, i); + } + + stop_ibs(); } -#define IBSCTL_LVTOFFSETVAL (1 << 8) -#define APIC_EILVT_MSG_NMI 0x4 -#define APIC_EILVT_LVTOFF_IBS 1 -#define APIC_EILVTn(n) (0x500 + 0x10 * n) +#define IBSCTL_LVTOFFSETVAL (1 << 8) +#define APIC_EILVT_MSG_NMI 0x4 +#define APIC_EILVT_LVTOFF_IBS 1 +#define APIC_EILVTn(n) (0x500 + 0x10 * n) static inline void __init init_ibs_nmi_per_cpu(void *arg) { - unsigned long reg; + unsigned long reg; - reg = (APIC_EILVT_LVTOFF_IBS << 4) + APIC_EILVTn(0); - apic_write(reg, APIC_EILVT_MSG_NMI << 8); + reg = (APIC_EILVT_LVTOFF_IBS << 4) + APIC_EILVTn(0); + apic_write(reg, APIC_EILVT_MSG_NMI << 8); } -#define PCI_DEVICE_ID_AMD_10H_NB_MISC 0x1203 -#define IBSCTL 0x1cc +#define PCI_DEVICE_ID_AMD_10H_NB_MISC 0x1203 +#define IBSCTL 0x1cc static int __init init_ibs_nmi(void) { - int bus, dev, func; - u32 id, value; - u16 vendor_id, dev_id; - int nodes; - - /* per CPU setup */ - on_each_cpu(init_ibs_nmi_per_cpu, NULL, 1); - - nodes = 0; - for (bus = 0; bus < 256; bus++) { - for (dev = 0; dev < 32; dev++) { - for (func = 0; func < 8; func++) { - id = pci_conf_read32(0, bus, dev, func, PCI_VENDOR_ID); - - vendor_id = id & 0xffff; - dev_id = (id >> 16) & 0xffff; - - if ((vendor_id == PCI_VENDOR_ID_AMD) && - (dev_id == PCI_DEVICE_ID_AMD_10H_NB_MISC)) { - - pci_conf_write32(0, bus, dev, func, IBSCTL, - IBSCTL_LVTOFFSETVAL | APIC_EILVT_LVTOFF_IBS); - - value = pci_conf_read32(0, bus, dev, func, IBSCTL); - - if (value != (IBSCTL_LVTOFFSETVAL | - APIC_EILVT_LVTOFF_IBS)) { - printk("Xenoprofile: Failed to setup IBS LVT offset, " - "IBSCTL = %#x\n", value); - return 1; - } - nodes++; - } - } - } - } - - if (!nodes) { - printk("Xenoprofile: No CPU node configured for IBS\n"); - return 1; - } - - return 0; + int bus, dev, func; + u32 id, value; + u16 vendor_id, dev_id; + int nodes; + + /* per CPU setup */ + on_each_cpu(init_ibs_nmi_per_cpu, NULL, 1); + + nodes = 0; + for ( bus = 0; bus < 256; bus++ ) + { + for ( dev = 0; dev < 32; dev++ ) + { + for ( func = 0; func < 8; func++ ) + { + id = pci_conf_read32(0, bus, dev, func, PCI_VENDOR_ID); + + vendor_id = id & 0xffff; + dev_id = (id >> 16) & 0xffff; + + if ( (vendor_id == PCI_VENDOR_ID_AMD) && + (dev_id == PCI_DEVICE_ID_AMD_10H_NB_MISC) ) + { + pci_conf_write32(0, bus, dev, func, IBSCTL, + IBSCTL_LVTOFFSETVAL | + APIC_EILVT_LVTOFF_IBS); + + value = pci_conf_read32(0, bus, dev, func, IBSCTL); + + if ( value != + (IBSCTL_LVTOFFSETVAL | APIC_EILVT_LVTOFF_IBS) ) + { + printk("Xenoprofile: Failed to setup IBS LVT offset, " + "IBSCTL = %#x\n", + value); + return 1; + } + nodes++; + } + } + } + } + + if ( !nodes ) + { + printk("Xenoprofile: No CPU node configured for IBS\n"); + return 1; + } + + return 0; } static void __init get_ibs_caps(void) { - if (!boot_cpu_has(X86_FEATURE_IBS)) - return; + if ( !boot_cpu_has(X86_FEATURE_IBS) ) + return; /* check IBS cpuid feature flags */ - if (current_cpu_data.extended_cpuid_level >= IBS_CPUID_FEATURES) - ibs_caps = cpuid_eax(IBS_CPUID_FEATURES); - if (!(ibs_caps & IBS_CAPS_AVAIL)) - /* cpuid flags not valid */ - ibs_caps = 0; + if ( current_cpu_data.extended_cpuid_level >= IBS_CPUID_FEATURES ) + ibs_caps = cpuid_eax(IBS_CPUID_FEATURES); + if ( !(ibs_caps & IBS_CAPS_AVAIL) ) + /* cpuid flags not valid */ + ibs_caps = 0; } void __init ibs_init(void) { - get_ibs_caps(); + get_ibs_caps(); - if ( !ibs_caps ) - return; + if ( !ibs_caps ) + return; - if (init_ibs_nmi()) { - ibs_caps = 0; - return; - } + if ( init_ibs_nmi() ) + { + ibs_caps = 0; + return; + } - printk("Xenoprofile: AMD IBS detected (%#x)\n", - (unsigned)ibs_caps); + printk("Xenoprofile: AMD IBS detected (%#x)\n", (unsigned)ibs_caps); } struct op_x86_model_spec const op_athlon_spec = { - .num_counters = K7_NUM_COUNTERS, - .num_controls = K7_NUM_CONTROLS, - .fill_in_addresses = &athlon_fill_in_addresses, - .setup_ctrs = &athlon_setup_ctrs, - .check_ctrs = &athlon_check_ctrs, - .start = &athlon_start, - .stop = &athlon_stop -}; + .num_counters = K7_NUM_COUNTERS, + .num_controls = K7_NUM_CONTROLS, + .fill_in_addresses = &athlon_fill_in_addresses, + .setup_ctrs = &athlon_setup_ctrs, + .check_ctrs = &athlon_check_ctrs, + .start = &athlon_start, + .stop = &athlon_stop}; struct op_x86_model_spec const op_amd_fam15h_spec = { - .num_counters = FAM15H_NUM_COUNTERS, - .num_controls = FAM15H_NUM_CONTROLS, - .fill_in_addresses = &fam15h_fill_in_addresses, - .setup_ctrs = &athlon_setup_ctrs, - .check_ctrs = &athlon_check_ctrs, - .start = &athlon_start, - .stop = &athlon_stop -}; + .num_counters = FAM15H_NUM_COUNTERS, + .num_controls = FAM15H_NUM_CONTROLS, + .fill_in_addresses = &fam15h_fill_in_addresses, + .setup_ctrs = &athlon_setup_ctrs, + .check_ctrs = &athlon_check_ctrs, + .start = &athlon_start, + .stop = &athlon_stop}; diff --git a/xen/arch/x86/oprofile/op_model_p4.c b/xen/arch/x86/oprofile/op_model_p4.c index b08ba53cbd..d2edcf9124 100644 --- a/xen/arch/x86/oprofile/op_model_p4.c +++ b/xen/arch/x86/oprofile/op_model_p4.c @@ -34,354 +34,325 @@ static unsigned int num_counters = NUM_COUNTERS_NON_HT; - /* this has to be checked dynamically since the hyper-threadedness of a chip is discovered at kernel boot-time. */ static inline void setup_num_counters(void) { - if (boot_cpu_data.x86_num_siblings == 2) /* XXX */ - num_counters = NUM_COUNTERS_HT2; + if ( boot_cpu_data.x86_num_siblings == 2 ) /* XXX */ + num_counters = NUM_COUNTERS_HT2; } static int inline addr_increment(void) { - return boot_cpu_data.x86_num_siblings == 2 ? 2 : 1; + return boot_cpu_data.x86_num_siblings == 2 ? 2 : 1; } - /* tables to simulate simplified hardware view of p4 registers */ -struct p4_counter_binding { - int virt_counter; - int counter_address; - int cccr_address; +struct p4_counter_binding +{ + int virt_counter; + int counter_address; + int cccr_address; }; -struct p4_event_binding { - int escr_select; /* value to put in CCCR */ - int event_select; /* value to put in ESCR */ - struct { - int virt_counter; /* for this counter... */ - int escr_address; /* use this ESCR */ - } bindings[2]; +struct p4_event_binding +{ + int escr_select; /* value to put in CCCR */ + int event_select; /* value to put in ESCR */ + struct + { + int virt_counter; /* for this counter... */ + int escr_address; /* use this ESCR */ + } bindings[2]; }; /* nb: these CTR_* defines are a duplicate of defines in event/i386.p4*events. */ - -#define CTR_BPU_0 (1 << 0) -#define CTR_MS_0 (1 << 1) -#define CTR_FLAME_0 (1 << 2) -#define CTR_IQ_4 (1 << 3) -#define CTR_BPU_2 (1 << 4) -#define CTR_MS_2 (1 << 5) -#define CTR_FLAME_2 (1 << 6) -#define CTR_IQ_5 (1 << 7) - -static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = { - { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 }, - { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 }, - { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 }, - { CTR_IQ_4, MSR_P4_IQ_PERFCTR4, MSR_P4_IQ_CCCR4 }, - { CTR_BPU_2, MSR_P4_BPU_PERFCTR2, MSR_P4_BPU_CCCR2 }, - { CTR_MS_2, MSR_P4_MS_PERFCTR2, MSR_P4_MS_CCCR2 }, - { CTR_FLAME_2, MSR_P4_FLAME_PERFCTR2, MSR_P4_FLAME_CCCR2 }, - { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 } -}; - -#define NUM_UNUSED_CCCRS NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT +#define CTR_BPU_0 (1 << 0) +#define CTR_MS_0 (1 << 1) +#define CTR_FLAME_0 (1 << 2) +#define CTR_IQ_4 (1 << 3) +#define CTR_BPU_2 (1 << 4) +#define CTR_MS_2 (1 << 5) +#define CTR_FLAME_2 (1 << 6) +#define CTR_IQ_5 (1 << 7) + +static struct p4_counter_binding p4_counters[NUM_COUNTERS_NON_HT] = { + {CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0}, + {CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0}, + {CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0}, + {CTR_IQ_4, MSR_P4_IQ_PERFCTR4, MSR_P4_IQ_CCCR4}, + {CTR_BPU_2, MSR_P4_BPU_PERFCTR2, MSR_P4_BPU_CCCR2}, + {CTR_MS_2, MSR_P4_MS_PERFCTR2, MSR_P4_MS_CCCR2}, + {CTR_FLAME_2, MSR_P4_FLAME_PERFCTR2, MSR_P4_FLAME_CCCR2}, + {CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5}}; + +#define NUM_UNUSED_CCCRS NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT /* All cccr we don't use. */ static int p4_unused_cccr[NUM_UNUSED_CCCRS] = { - MSR_P4_BPU_CCCR1, MSR_P4_BPU_CCCR3, - MSR_P4_MS_CCCR1, MSR_P4_MS_CCCR3, - MSR_P4_FLAME_CCCR1, MSR_P4_FLAME_CCCR3, - MSR_P4_IQ_CCCR0, MSR_P4_IQ_CCCR1, - MSR_P4_IQ_CCCR2, MSR_P4_IQ_CCCR3 -}; + MSR_P4_BPU_CCCR1, MSR_P4_BPU_CCCR3, MSR_P4_MS_CCCR1, MSR_P4_MS_CCCR3, + MSR_P4_FLAME_CCCR1, MSR_P4_FLAME_CCCR3, MSR_P4_IQ_CCCR0, MSR_P4_IQ_CCCR1, + MSR_P4_IQ_CCCR2, MSR_P4_IQ_CCCR3}; /* p4 event codes in libop/op_event.h are indices into this table. */ static const struct p4_event_binding p4_events[NUM_EVENTS] = { - - { /* BRANCH_RETIRED */ - 0x05, 0x06, - { {CTR_IQ_4, MSR_P4_CRU_ESCR2}, - {CTR_IQ_5, MSR_P4_CRU_ESCR3} } - }, - - { /* MISPRED_BRANCH_RETIRED */ - 0x04, 0x03, - { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, - { CTR_IQ_5, MSR_P4_CRU_ESCR1} } - }, - - { /* TC_DELIVER_MODE */ - 0x01, 0x01, - { { CTR_MS_0, MSR_P4_TC_ESCR0}, - { CTR_MS_2, MSR_P4_TC_ESCR1} } - }, - - { /* BPU_FETCH_REQUEST */ - 0x00, 0x03, - { { CTR_BPU_0, MSR_P4_BPU_ESCR0}, - { CTR_BPU_2, MSR_P4_BPU_ESCR1} } - }, - - { /* ITLB_REFERENCE */ - 0x03, 0x18, - { { CTR_BPU_0, MSR_P4_ITLB_ESCR0}, - { CTR_BPU_2, MSR_P4_ITLB_ESCR1} } - }, - - { /* MEMORY_CANCEL */ - 0x05, 0x02, - { { CTR_FLAME_0, MSR_P4_DAC_ESCR0}, - { CTR_FLAME_2, MSR_P4_DAC_ESCR1} } - }, - - { /* MEMORY_COMPLETE */ - 0x02, 0x08, - { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, - { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} } - }, - - { /* LOAD_PORT_REPLAY */ - 0x02, 0x04, - { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, - { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} } - }, - - { /* STORE_PORT_REPLAY */ - 0x02, 0x05, - { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, - { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} } - }, - - { /* MOB_LOAD_REPLAY */ - 0x02, 0x03, - { { CTR_BPU_0, MSR_P4_MOB_ESCR0}, - { CTR_BPU_2, MSR_P4_MOB_ESCR1} } - }, - - { /* PAGE_WALK_TYPE */ - 0x04, 0x01, - { { CTR_BPU_0, MSR_P4_PMH_ESCR0}, - { CTR_BPU_2, MSR_P4_PMH_ESCR1} } - }, - - { /* BSQ_CACHE_REFERENCE */ - 0x07, 0x0c, - { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, - { CTR_BPU_2, MSR_P4_BSU_ESCR1} } - }, - - { /* IOQ_ALLOCATION */ - 0x06, 0x03, - { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, - { 0, 0 } } - }, - - { /* IOQ_ACTIVE_ENTRIES */ - 0x06, 0x1a, - { { CTR_BPU_2, MSR_P4_FSB_ESCR1}, - { 0, 0 } } - }, - - { /* FSB_DATA_ACTIVITY */ - 0x06, 0x17, - { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, - { CTR_BPU_2, MSR_P4_FSB_ESCR1} } - }, - - { /* BSQ_ALLOCATION */ - 0x07, 0x05, - { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, - { 0, 0 } } - }, - - { /* BSQ_ACTIVE_ENTRIES */ - 0x07, 0x06, - { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */}, - { 0, 0 } } - }, - - { /* X87_ASSIST */ - 0x05, 0x03, - { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, - { CTR_IQ_5, MSR_P4_CRU_ESCR3} } - }, - - { /* SSE_INPUT_ASSIST */ - 0x01, 0x34, - { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, - { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } - }, - - { /* PACKED_SP_UOP */ - 0x01, 0x08, - { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, - { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } - }, - - { /* PACKED_DP_UOP */ - 0x01, 0x0c, - { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, - { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } - }, - - { /* SCALAR_SP_UOP */ - 0x01, 0x0a, - { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, - { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } - }, - - { /* SCALAR_DP_UOP */ - 0x01, 0x0e, - { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, - { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } - }, - - { /* 64BIT_MMX_UOP */ - 0x01, 0x02, - { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, - { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } - }, - - { /* 128BIT_MMX_UOP */ - 0x01, 0x1a, - { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, - { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } - }, - - { /* X87_FP_UOP */ - 0x01, 0x04, - { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, - { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } - }, - - { /* X87_SIMD_MOVES_UOP */ - 0x01, 0x2e, - { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, - { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } - }, - - { /* MACHINE_CLEAR */ - 0x05, 0x02, - { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, - { CTR_IQ_5, MSR_P4_CRU_ESCR3} } - }, - - { /* GLOBAL_POWER_EVENTS */ - 0x06, 0x13 /* older manual says 0x05, newer 0x13 */, - { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, - { CTR_BPU_2, MSR_P4_FSB_ESCR1} } - }, - - { /* TC_MS_XFER */ - 0x00, 0x05, - { { CTR_MS_0, MSR_P4_MS_ESCR0}, - { CTR_MS_2, MSR_P4_MS_ESCR1} } - }, - - { /* UOP_QUEUE_WRITES */ - 0x00, 0x09, - { { CTR_MS_0, MSR_P4_MS_ESCR0}, - { CTR_MS_2, MSR_P4_MS_ESCR1} } - }, - - { /* FRONT_END_EVENT */ - 0x05, 0x08, - { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, - { CTR_IQ_5, MSR_P4_CRU_ESCR3} } - }, - - { /* EXECUTION_EVENT */ - 0x05, 0x0c, - { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, - { CTR_IQ_5, MSR_P4_CRU_ESCR3} } - }, - - { /* REPLAY_EVENT */ - 0x05, 0x09, - { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, - { CTR_IQ_5, MSR_P4_CRU_ESCR3} } - }, - - { /* INSTR_RETIRED */ - 0x04, 0x02, - { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, - { CTR_IQ_5, MSR_P4_CRU_ESCR1} } - }, - - { /* UOPS_RETIRED */ - 0x04, 0x01, - { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, - { CTR_IQ_5, MSR_P4_CRU_ESCR1} } - }, - - { /* UOP_TYPE */ - 0x02, 0x02, - { { CTR_IQ_4, MSR_P4_RAT_ESCR0}, - { CTR_IQ_5, MSR_P4_RAT_ESCR1} } - }, - - { /* RETIRED_MISPRED_BRANCH_TYPE */ - 0x02, 0x05, - { { CTR_MS_0, MSR_P4_TBPU_ESCR0}, - { CTR_MS_2, MSR_P4_TBPU_ESCR1} } - }, - - { /* RETIRED_BRANCH_TYPE */ - 0x02, 0x04, - { { CTR_MS_0, MSR_P4_TBPU_ESCR0}, - { CTR_MS_2, MSR_P4_TBPU_ESCR1} } - } -}; - -#define MISC_PMC_ENABLED_P(x) ((x) & 1ULL << 7) + {/* BRANCH_RETIRED */ + 0x05, + 0x06, + {{CTR_IQ_4, MSR_P4_CRU_ESCR2}, {CTR_IQ_5, MSR_P4_CRU_ESCR3}}}, + + {/* MISPRED_BRANCH_RETIRED */ + 0x04, + 0x03, + {{CTR_IQ_4, MSR_P4_CRU_ESCR0}, {CTR_IQ_5, MSR_P4_CRU_ESCR1}}}, + + {/* TC_DELIVER_MODE */ + 0x01, + 0x01, + {{CTR_MS_0, MSR_P4_TC_ESCR0}, {CTR_MS_2, MSR_P4_TC_ESCR1}}}, + + {/* BPU_FETCH_REQUEST */ + 0x00, + 0x03, + {{CTR_BPU_0, MSR_P4_BPU_ESCR0}, {CTR_BPU_2, MSR_P4_BPU_ESCR1}}}, + + {/* ITLB_REFERENCE */ + 0x03, + 0x18, + {{CTR_BPU_0, MSR_P4_ITLB_ESCR0}, {CTR_BPU_2, MSR_P4_ITLB_ESCR1}}}, + + {/* MEMORY_CANCEL */ + 0x05, + 0x02, + {{CTR_FLAME_0, MSR_P4_DAC_ESCR0}, {CTR_FLAME_2, MSR_P4_DAC_ESCR1}}}, + + {/* MEMORY_COMPLETE */ + 0x02, + 0x08, + {{CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, {CTR_FLAME_2, MSR_P4_SAAT_ESCR1}}}, + + {/* LOAD_PORT_REPLAY */ + 0x02, + 0x04, + {{CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, {CTR_FLAME_2, MSR_P4_SAAT_ESCR1}}}, + + {/* STORE_PORT_REPLAY */ + 0x02, + 0x05, + {{CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, {CTR_FLAME_2, MSR_P4_SAAT_ESCR1}}}, + + {/* MOB_LOAD_REPLAY */ + 0x02, + 0x03, + {{CTR_BPU_0, MSR_P4_MOB_ESCR0}, {CTR_BPU_2, MSR_P4_MOB_ESCR1}}}, + + {/* PAGE_WALK_TYPE */ + 0x04, + 0x01, + {{CTR_BPU_0, MSR_P4_PMH_ESCR0}, {CTR_BPU_2, MSR_P4_PMH_ESCR1}}}, + + {/* BSQ_CACHE_REFERENCE */ + 0x07, + 0x0c, + {{CTR_BPU_0, MSR_P4_BSU_ESCR0}, {CTR_BPU_2, MSR_P4_BSU_ESCR1}}}, + + {/* IOQ_ALLOCATION */ + 0x06, + 0x03, + {{CTR_BPU_0, MSR_P4_FSB_ESCR0}, {0, 0}}}, + + {/* IOQ_ACTIVE_ENTRIES */ + 0x06, + 0x1a, + {{CTR_BPU_2, MSR_P4_FSB_ESCR1}, {0, 0}}}, + + {/* FSB_DATA_ACTIVITY */ + 0x06, + 0x17, + {{CTR_BPU_0, MSR_P4_FSB_ESCR0}, {CTR_BPU_2, MSR_P4_FSB_ESCR1}}}, + + {/* BSQ_ALLOCATION */ + 0x07, + 0x05, + {{CTR_BPU_0, MSR_P4_BSU_ESCR0}, {0, 0}}}, + + {/* BSQ_ACTIVE_ENTRIES */ + 0x07, + 0x06, + {{CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */}, {0, 0}}}, + + {/* X87_ASSIST */ + 0x05, + 0x03, + {{CTR_IQ_4, MSR_P4_CRU_ESCR2}, {CTR_IQ_5, MSR_P4_CRU_ESCR3}}}, + + {/* SSE_INPUT_ASSIST */ + 0x01, + 0x34, + {{CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, {CTR_FLAME_2, MSR_P4_FIRM_ESCR1}}}, + + {/* PACKED_SP_UOP */ + 0x01, + 0x08, + {{CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, {CTR_FLAME_2, MSR_P4_FIRM_ESCR1}}}, + + {/* PACKED_DP_UOP */ + 0x01, + 0x0c, + {{CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, {CTR_FLAME_2, MSR_P4_FIRM_ESCR1}}}, + + {/* SCALAR_SP_UOP */ + 0x01, + 0x0a, + {{CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, {CTR_FLAME_2, MSR_P4_FIRM_ESCR1}}}, + + {/* SCALAR_DP_UOP */ + 0x01, + 0x0e, + {{CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, {CTR_FLAME_2, MSR_P4_FIRM_ESCR1}}}, + + {/* 64BIT_MMX_UOP */ + 0x01, + 0x02, + {{CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, {CTR_FLAME_2, MSR_P4_FIRM_ESCR1}}}, + + {/* 128BIT_MMX_UOP */ + 0x01, + 0x1a, + {{CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, {CTR_FLAME_2, MSR_P4_FIRM_ESCR1}}}, + + {/* X87_FP_UOP */ + 0x01, + 0x04, + {{CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, {CTR_FLAME_2, MSR_P4_FIRM_ESCR1}}}, + + {/* X87_SIMD_MOVES_UOP */ + 0x01, + 0x2e, + {{CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, {CTR_FLAME_2, MSR_P4_FIRM_ESCR1}}}, + + {/* MACHINE_CLEAR */ + 0x05, + 0x02, + {{CTR_IQ_4, MSR_P4_CRU_ESCR2}, {CTR_IQ_5, MSR_P4_CRU_ESCR3}}}, + + {/* GLOBAL_POWER_EVENTS */ + 0x06, + 0x13 /* older manual says 0x05, newer 0x13 */, + {{CTR_BPU_0, MSR_P4_FSB_ESCR0}, {CTR_BPU_2, MSR_P4_FSB_ESCR1}}}, + + {/* TC_MS_XFER */ + 0x00, + 0x05, + {{CTR_MS_0, MSR_P4_MS_ESCR0}, {CTR_MS_2, MSR_P4_MS_ESCR1}}}, + + {/* UOP_QUEUE_WRITES */ + 0x00, + 0x09, + {{CTR_MS_0, MSR_P4_MS_ESCR0}, {CTR_MS_2, MSR_P4_MS_ESCR1}}}, + + {/* FRONT_END_EVENT */ + 0x05, + 0x08, + {{CTR_IQ_4, MSR_P4_CRU_ESCR2}, {CTR_IQ_5, MSR_P4_CRU_ESCR3}}}, + + {/* EXECUTION_EVENT */ + 0x05, + 0x0c, + {{CTR_IQ_4, MSR_P4_CRU_ESCR2}, {CTR_IQ_5, MSR_P4_CRU_ESCR3}}}, + + {/* REPLAY_EVENT */ + 0x05, + 0x09, + {{CTR_IQ_4, MSR_P4_CRU_ESCR2}, {CTR_IQ_5, MSR_P4_CRU_ESCR3}}}, + + {/* INSTR_RETIRED */ + 0x04, + 0x02, + {{CTR_IQ_4, MSR_P4_CRU_ESCR0}, {CTR_IQ_5, MSR_P4_CRU_ESCR1}}}, + + {/* UOPS_RETIRED */ + 0x04, + 0x01, + {{CTR_IQ_4, MSR_P4_CRU_ESCR0}, {CTR_IQ_5, MSR_P4_CRU_ESCR1}}}, + + {/* UOP_TYPE */ + 0x02, + 0x02, + {{CTR_IQ_4, MSR_P4_RAT_ESCR0}, {CTR_IQ_5, MSR_P4_RAT_ESCR1}}}, + + {/* RETIRED_MISPRED_BRANCH_TYPE */ + 0x02, + 0x05, + {{CTR_MS_0, MSR_P4_TBPU_ESCR0}, {CTR_MS_2, MSR_P4_TBPU_ESCR1}}}, + + {/* RETIRED_BRANCH_TYPE */ + 0x02, + 0x04, + {{CTR_MS_0, MSR_P4_TBPU_ESCR0}, {CTR_MS_2, MSR_P4_TBPU_ESCR1}}}}; + +#define MISC_PMC_ENABLED_P(x) ((x)&1ULL << 7) #define ESCR_RESERVED_BITS 0x80000003ULL #define ESCR_CLEAR(escr) ((escr) &= ESCR_RESERVED_BITS) -#define ESCR_SET_USR_0(escr, usr) ((escr) |= (((usr) & 1ULL) << 2)) -#define ESCR_SET_OS_0(escr, os) ((escr) |= (((os) & 1ULL) << 3)) -#define ESCR_SET_USR_1(escr, usr) ((escr) |= (((usr) & 1ULL))) -#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1ULL) << 1)) -#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3fULL) << 25)) -#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffffULL) << 9)) -#define ESCR_READ(escr,ev,i) do {rdmsrl(ev->bindings[(i)].escr_address, (escr));} while (0) -#define ESCR_WRITE(escr,ev,i) do {wrmsrl(ev->bindings[(i)].escr_address, (escr));} while (0) +#define ESCR_SET_USR_0(escr, usr) ((escr) |= (((usr)&1ULL) << 2)) +#define ESCR_SET_OS_0(escr, os) ((escr) |= (((os)&1ULL) << 3)) +#define ESCR_SET_USR_1(escr, usr) ((escr) |= (((usr)&1ULL))) +#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os)&1ULL) << 1)) +#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel)&0x3fULL) << 25)) +#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask)&0xffffULL) << 9)) +#define ESCR_READ(escr, ev, i) \ + do { \ + rdmsrl(ev->bindings[(i)].escr_address, (escr)); \ + } while ( 0 ) +#define ESCR_WRITE(escr, ev, i) \ + do { \ + wrmsrl(ev->bindings[(i)].escr_address, (escr)); \ + } while ( 0 ) #define CCCR_RESERVED_BITS 0x38030FFFULL #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) #define CCCR_SET_REQUIRED_BITS(cccr) ((cccr) |= 0x00030000ULL) -#define CCCR_SET_ESCR_SELECT(cccr, sel) ((cccr) |= (((sel) & 0x07ULL) << 13)) -#define CCCR_SET_PMI_OVF_0(cccr) ((cccr) |= (1ULL<<26)) -#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1ULL<<27)) -#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1ULL<<12)) -#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1ULL<<12)) -#define CCCR_READ(msr_content, i) do {rdmsrl(p4_counters[(i)].cccr_address, (msr_content));} while (0) -#define CCCR_WRITE(msr_content, i) do {wrmsrl(p4_counters[(i)].cccr_address, (msr_content));} while (0) -#define CCCR_OVF_P(cccr) ((cccr) & (1ULL<<31)) -#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1ULL<<31))) - -#define CTR_READ(msr_content,i) do {rdmsrl(p4_counters[(i)].counter_address, (msr_content));} while (0) -#define CTR_WRITE(msr_content,i) do {wrmsrl(p4_counters[(i)].counter_address, -(msr_content));} while (0) -#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000ULL)) - +#define CCCR_SET_ESCR_SELECT(cccr, sel) ((cccr) |= (((sel)&0x07ULL) << 13)) +#define CCCR_SET_PMI_OVF_0(cccr) ((cccr) |= (1ULL << 26)) +#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1ULL << 27)) +#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1ULL << 12)) +#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1ULL << 12)) +#define CCCR_READ(msr_content, i) \ + do { \ + rdmsrl(p4_counters[(i)].cccr_address, (msr_content)); \ + } while ( 0 ) +#define CCCR_WRITE(msr_content, i) \ + do { \ + wrmsrl(p4_counters[(i)].cccr_address, (msr_content)); \ + } while ( 0 ) +#define CCCR_OVF_P(cccr) ((cccr) & (1ULL << 31)) +#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1ULL << 31))) + +#define CTR_READ(msr_content, i) \ + do { \ + rdmsrl(p4_counters[(i)].counter_address, (msr_content)); \ + } while ( 0 ) +#define CTR_WRITE(msr_content, i) \ + do { \ + wrmsrl(p4_counters[(i)].counter_address, -(msr_content)); \ + } while ( 0 ) +#define CTR_OVERFLOW_P(ctr) (!((ctr)&0x80000000ULL)) /* this assigns a "stagger" to the current CPU, which is used throughout the code in this module as an extra array offset, to select the "even" or "odd" part of all the divided resources. */ static unsigned int get_stagger(void) { - int cpu = smp_processor_id(); - return (cpu != cpumask_first(per_cpu(cpu_sibling_mask, cpu))); + int cpu = smp_processor_id(); + return (cpu != cpumask_first(per_cpu(cpu_sibling_mask, cpu))); } - /* finally, mediate access to a real hardware counter by passing a "virtual" counter numer to this macro, along with your stagger setting. */ @@ -389,333 +360,365 @@ static unsigned int get_stagger(void) static unsigned long reset_value[NUM_COUNTERS_NON_HT]; - -static void p4_fill_in_addresses(struct op_msrs * const msrs) +static void p4_fill_in_addresses(struct op_msrs *const msrs) { - unsigned int i; - unsigned int addr, stag; - - setup_num_counters(); - stag = get_stagger(); - - /* the counter registers we pay attention to */ - for (i = 0; i < num_counters; ++i) { - msrs->counters[i].addr = - p4_counters[VIRT_CTR(stag, i)].counter_address; - } - - /* FIXME: bad feeling, we don't save the 10 counters we don't use. */ - - /* 18 CCCR registers */ - for (i = 0, addr = MSR_P4_BPU_CCCR0 + stag; - addr <= MSR_P4_IQ_CCCR5; ++i, addr += addr_increment()) { - msrs->controls[i].addr = addr; - } - - /* 43 ESCR registers in three or four discontiguous group */ - for (addr = MSR_P4_BSU_ESCR0 + stag; - addr < MSR_P4_IQ_ESCR0; ++i, addr += addr_increment()) { - msrs->controls[i].addr = addr; - } - - /* no IQ_ESCR0/1 on some models, we save a seconde time BSU_ESCR0/1 - * to avoid special case in nmi_{save|restore}_registers() */ - if (boot_cpu_data.x86_model >= 0x3) { - for (addr = MSR_P4_BSU_ESCR0 + stag; - addr <= MSR_P4_BSU_ESCR1; ++i, addr += addr_increment()) { - msrs->controls[i].addr = addr; - } - } else { - for (addr = MSR_P4_IQ_ESCR0 + stag; - addr <= MSR_P4_IQ_ESCR1; ++i, addr += addr_increment()) { - msrs->controls[i].addr = addr; - } - } - - for (addr = MSR_P4_RAT_ESCR0 + stag; - addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { - msrs->controls[i].addr = addr; - } - - for (addr = MSR_P4_MS_ESCR0 + stag; - addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { - msrs->controls[i].addr = addr; - } - - for (addr = MSR_P4_IX_ESCR0 + stag; - addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { - msrs->controls[i].addr = addr; - } - - /* there are 2 remaining non-contiguously located ESCRs */ - - if (num_counters == NUM_COUNTERS_NON_HT) { - /* standard non-HT CPUs handle both remaining ESCRs*/ - msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; - msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; - - } else if (stag == 0) { - /* HT CPUs give the first remainder to the even thread, as - the 32nd control register */ - msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; - - } else { - /* and two copies of the second to the odd thread, - for the 22st and 23nd control registers */ - msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; - msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; - } + unsigned int i; + unsigned int addr, stag; + + setup_num_counters(); + stag = get_stagger(); + + /* the counter registers we pay attention to */ + for ( i = 0; i < num_counters; ++i ) + { + msrs->counters[i].addr = p4_counters[VIRT_CTR(stag, i)].counter_address; + } + + /* FIXME: bad feeling, we don't save the 10 counters we don't use. */ + + /* 18 CCCR registers */ + for ( i = 0, addr = MSR_P4_BPU_CCCR0 + stag; addr <= MSR_P4_IQ_CCCR5; + ++i, addr += addr_increment() ) + { + msrs->controls[i].addr = addr; + } + + /* 43 ESCR registers in three or four discontiguous group */ + for ( addr = MSR_P4_BSU_ESCR0 + stag; addr < MSR_P4_IQ_ESCR0; + ++i, addr += addr_increment() ) + { + msrs->controls[i].addr = addr; + } + + /* no IQ_ESCR0/1 on some models, we save a seconde time BSU_ESCR0/1 + * to avoid special case in nmi_{save|restore}_registers() */ + if ( boot_cpu_data.x86_model >= 0x3 ) + { + for ( addr = MSR_P4_BSU_ESCR0 + stag; addr <= MSR_P4_BSU_ESCR1; + ++i, addr += addr_increment() ) + { + msrs->controls[i].addr = addr; + } + } + else + { + for ( addr = MSR_P4_IQ_ESCR0 + stag; addr <= MSR_P4_IQ_ESCR1; + ++i, addr += addr_increment() ) + { + msrs->controls[i].addr = addr; + } + } + + for ( addr = MSR_P4_RAT_ESCR0 + stag; addr <= MSR_P4_SSU_ESCR0; + ++i, addr += addr_increment() ) + { + msrs->controls[i].addr = addr; + } + + for ( addr = MSR_P4_MS_ESCR0 + stag; addr <= MSR_P4_TC_ESCR1; + ++i, addr += addr_increment() ) + { + msrs->controls[i].addr = addr; + } + + for ( addr = MSR_P4_IX_ESCR0 + stag; addr <= MSR_P4_CRU_ESCR3; + ++i, addr += addr_increment() ) + { + msrs->controls[i].addr = addr; + } + + /* there are 2 remaining non-contiguously located ESCRs */ + + if ( num_counters == NUM_COUNTERS_NON_HT ) + { + /* standard non-HT CPUs handle both remaining ESCRs*/ + msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; + msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; + } + else if ( stag == 0 ) + { + /* HT CPUs give the first remainder to the even thread, as + the 32nd control register */ + msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; + } + else + { + /* and two copies of the second to the odd thread, + for the 22st and 23nd control registers */ + msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; + msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; + } } - static void pmc_setup_one_p4_counter(unsigned int ctr) { - int i; - int const maxbind = 2; - uint64_t cccr = 0; - uint64_t escr = 0; - unsigned int counter_bit; - const struct p4_event_binding *ev = NULL; - unsigned int stag; - - stag = get_stagger(); - - /* convert from counter *number* to counter *bit* */ - counter_bit = 1 << VIRT_CTR(stag, ctr); - - /* find our event binding structure. */ - if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) { - printk(KERN_ERR "oprofile: P4 event code %#lx out of range\n", - counter_config[ctr].event); - return; - } - - ev = &(p4_events[counter_config[ctr].event - 1]); - - for (i = 0; i < maxbind; i++) { - if (ev->bindings[i].virt_counter & counter_bit) { - - /* modify ESCR */ - ESCR_READ(escr, ev, i); - ESCR_CLEAR(escr); - if (stag == 0) { - ESCR_SET_USR_0(escr, counter_config[ctr].user); - ESCR_SET_OS_0(escr, counter_config[ctr].kernel); - } else { - ESCR_SET_USR_1(escr, counter_config[ctr].user); - ESCR_SET_OS_1(escr, counter_config[ctr].kernel); - } - ESCR_SET_EVENT_SELECT(escr, ev->event_select); - ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); - ESCR_WRITE(escr, ev, i); - - /* modify CCCR */ - CCCR_READ(cccr, VIRT_CTR(stag, ctr)); - CCCR_CLEAR(cccr); - CCCR_SET_REQUIRED_BITS(cccr); - CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); - if (stag == 0) { - CCCR_SET_PMI_OVF_0(cccr); - } else { - CCCR_SET_PMI_OVF_1(cccr); - } - CCCR_WRITE(cccr, VIRT_CTR(stag, ctr)); - return; - } - } - - printk(KERN_ERR - "oprofile: P4 event code %#lx no binding, stag %d ctr %d\n", - counter_config[ctr].event, stag, ctr); + int i; + int const maxbind = 2; + uint64_t cccr = 0; + uint64_t escr = 0; + unsigned int counter_bit; + const struct p4_event_binding *ev = NULL; + unsigned int stag; + + stag = get_stagger(); + + /* convert from counter *number* to counter *bit* */ + counter_bit = 1 << VIRT_CTR(stag, ctr); + + /* find our event binding structure. */ + if ( counter_config[ctr].event <= 0 || + counter_config[ctr].event > NUM_EVENTS ) + { + printk(KERN_ERR "oprofile: P4 event code %#lx out of range\n", + counter_config[ctr].event); + return; + } + + ev = &(p4_events[counter_config[ctr].event - 1]); + + for ( i = 0; i < maxbind; i++ ) + { + if ( ev->bindings[i].virt_counter & counter_bit ) + { + /* modify ESCR */ + ESCR_READ(escr, ev, i); + ESCR_CLEAR(escr); + if ( stag == 0 ) + { + ESCR_SET_USR_0(escr, counter_config[ctr].user); + ESCR_SET_OS_0(escr, counter_config[ctr].kernel); + } + else + { + ESCR_SET_USR_1(escr, counter_config[ctr].user); + ESCR_SET_OS_1(escr, counter_config[ctr].kernel); + } + ESCR_SET_EVENT_SELECT(escr, ev->event_select); + ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); + ESCR_WRITE(escr, ev, i); + + /* modify CCCR */ + CCCR_READ(cccr, VIRT_CTR(stag, ctr)); + CCCR_CLEAR(cccr); + CCCR_SET_REQUIRED_BITS(cccr); + CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); + if ( stag == 0 ) + { + CCCR_SET_PMI_OVF_0(cccr); + } + else + { + CCCR_SET_PMI_OVF_1(cccr); + } + CCCR_WRITE(cccr, VIRT_CTR(stag, ctr)); + return; + } + } + + printk(KERN_ERR "oprofile: P4 event code %#lx no binding, stag %d ctr %d\n", + counter_config[ctr].event, stag, ctr); } - -static void p4_setup_ctrs(struct op_msrs const * const msrs) +static void p4_setup_ctrs(struct op_msrs const *const msrs) { - unsigned int i; - uint64_t msr_content; - unsigned int addr; - unsigned int stag; - - stag = get_stagger(); - - rdmsrl(MSR_IA32_MISC_ENABLE, msr_content); - if (! MISC_PMC_ENABLED_P(msr_content)) { - printk(KERN_ERR "oprofile: P4 PMC not available\n"); - return; - } - - /* clear the cccrs we will use */ - for (i = 0 ; i < num_counters ; i++) { - rdmsrl(p4_counters[VIRT_CTR(stag, i)].cccr_address, msr_content); - CCCR_CLEAR(msr_content); - CCCR_SET_REQUIRED_BITS(msr_content); - wrmsrl(p4_counters[VIRT_CTR(stag, i)].cccr_address, msr_content); - } - - /* clear cccrs outside our concern */ - for (i = stag ; i < NUM_UNUSED_CCCRS ; i += addr_increment()) { - rdmsrl(p4_unused_cccr[i], msr_content); - CCCR_CLEAR(msr_content); - CCCR_SET_REQUIRED_BITS(msr_content); - wrmsrl(p4_unused_cccr[i], msr_content); - } - - /* clear all escrs (including those outside our concern) */ - for (addr = MSR_P4_BSU_ESCR0 + stag; - addr < MSR_P4_IQ_ESCR0; addr += addr_increment()) { - wrmsrl(addr, 0x0ULL); - } - - /* On older models clear also MSR_P4_IQ_ESCR0/1 */ - if (boot_cpu_data.x86_model < 0x3) { - wrmsrl(MSR_P4_IQ_ESCR0, 0x0ULL); - wrmsrl(MSR_P4_IQ_ESCR1, 0x0ULL); - } - - for (addr = MSR_P4_RAT_ESCR0 + stag; - addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { - wrmsrl(addr, 0x0ULL); - } - - for (addr = MSR_P4_MS_ESCR0 + stag; - addr <= MSR_P4_TC_ESCR1; addr += addr_increment()){ - wrmsrl(addr, 0x0ULL); - } - - for (addr = MSR_P4_IX_ESCR0 + stag; - addr <= MSR_P4_CRU_ESCR3; addr += addr_increment()){ - wrmsrl(addr, 0x0ULL); - } - - if (num_counters == NUM_COUNTERS_NON_HT) { - wrmsrl(MSR_P4_CRU_ESCR4, 0x0ULL); - wrmsrl(MSR_P4_CRU_ESCR5, 0x0ULL); - } else if (stag == 0) { - wrmsrl(MSR_P4_CRU_ESCR4, 0x0ULL); - } else { - wrmsrl(MSR_P4_CRU_ESCR5, 0x0ULL); - } - - /* setup all counters */ - for (i = 0 ; i < num_counters ; ++i) { - if (counter_config[i].enabled) { - reset_value[i] = counter_config[i].count; - pmc_setup_one_p4_counter(i); - CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); - } else { - reset_value[i] = 0; - } - } + unsigned int i; + uint64_t msr_content; + unsigned int addr; + unsigned int stag; + + stag = get_stagger(); + + rdmsrl(MSR_IA32_MISC_ENABLE, msr_content); + if ( !MISC_PMC_ENABLED_P(msr_content) ) + { + printk(KERN_ERR "oprofile: P4 PMC not available\n"); + return; + } + + /* clear the cccrs we will use */ + for ( i = 0; i < num_counters; i++ ) + { + rdmsrl(p4_counters[VIRT_CTR(stag, i)].cccr_address, msr_content); + CCCR_CLEAR(msr_content); + CCCR_SET_REQUIRED_BITS(msr_content); + wrmsrl(p4_counters[VIRT_CTR(stag, i)].cccr_address, msr_content); + } + + /* clear cccrs outside our concern */ + for ( i = stag; i < NUM_UNUSED_CCCRS; i += addr_increment() ) + { + rdmsrl(p4_unused_cccr[i], msr_content); + CCCR_CLEAR(msr_content); + CCCR_SET_REQUIRED_BITS(msr_content); + wrmsrl(p4_unused_cccr[i], msr_content); + } + + /* clear all escrs (including those outside our concern) */ + for ( addr = MSR_P4_BSU_ESCR0 + stag; addr < MSR_P4_IQ_ESCR0; + addr += addr_increment() ) + { + wrmsrl(addr, 0x0ULL); + } + + /* On older models clear also MSR_P4_IQ_ESCR0/1 */ + if ( boot_cpu_data.x86_model < 0x3 ) + { + wrmsrl(MSR_P4_IQ_ESCR0, 0x0ULL); + wrmsrl(MSR_P4_IQ_ESCR1, 0x0ULL); + } + + for ( addr = MSR_P4_RAT_ESCR0 + stag; addr <= MSR_P4_SSU_ESCR0; + ++i, addr += addr_increment() ) + { + wrmsrl(addr, 0x0ULL); + } + + for ( addr = MSR_P4_MS_ESCR0 + stag; addr <= MSR_P4_TC_ESCR1; + addr += addr_increment() ) + { + wrmsrl(addr, 0x0ULL); + } + + for ( addr = MSR_P4_IX_ESCR0 + stag; addr <= MSR_P4_CRU_ESCR3; + addr += addr_increment() ) + { + wrmsrl(addr, 0x0ULL); + } + + if ( num_counters == NUM_COUNTERS_NON_HT ) + { + wrmsrl(MSR_P4_CRU_ESCR4, 0x0ULL); + wrmsrl(MSR_P4_CRU_ESCR5, 0x0ULL); + } + else if ( stag == 0 ) + { + wrmsrl(MSR_P4_CRU_ESCR4, 0x0ULL); + } + else + { + wrmsrl(MSR_P4_CRU_ESCR5, 0x0ULL); + } + + /* setup all counters */ + for ( i = 0; i < num_counters; ++i ) + { + if ( counter_config[i].enabled ) + { + reset_value[i] = counter_config[i].count; + pmc_setup_one_p4_counter(i); + CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); + } + else + { + reset_value[i] = 0; + } + } } static int p4_check_ctrs(unsigned int const cpu, - struct op_msrs const * const msrs, - struct cpu_user_regs const * const regs) + struct op_msrs const *const msrs, + struct cpu_user_regs const *const regs) { - unsigned long ctr, stag, real; - uint64_t msr_content; - int i; - int ovf = 0; - unsigned long eip = regs->rip; - int mode = xenoprofile_get_mode(current, regs); - - stag = get_stagger(); - - for (i = 0; i < num_counters; ++i) { - - if (!reset_value[i]) - continue; - - /* - * there is some eccentricity in the hardware which - * requires that we perform 2 extra corrections: - * - * - check both the CCCR:OVF flag for overflow and the - * counter high bit for un-flagged overflows. - * - * - write the counter back twice to ensure it gets - * updated properly. - * - * the former seems to be related to extra NMIs happening - * during the current NMI; the latter is reported as errata - * N15 in intel doc 249199-029, pentium 4 specification - * update, though their suggested work-around does not - * appear to solve the problem. - */ - - real = VIRT_CTR(stag, i); - - CCCR_READ(msr_content, real); - CTR_READ(ctr, real); - if (CCCR_OVF_P(msr_content) || CTR_OVERFLOW_P(ctr)) { - xenoprof_log_event(current, regs, eip, mode, i); - CTR_WRITE(reset_value[i], real); - CCCR_CLEAR_OVF(msr_content); - CCCR_WRITE(msr_content, real); - CTR_WRITE(reset_value[i], real); - ovf = 1; - } - } - - /* P4 quirk: you have to re-unmask the apic vector */ - apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); - - return ovf; + unsigned long ctr, stag, real; + uint64_t msr_content; + int i; + int ovf = 0; + unsigned long eip = regs->rip; + int mode = xenoprofile_get_mode(current, regs); + + stag = get_stagger(); + + for ( i = 0; i < num_counters; ++i ) + { + if ( !reset_value[i] ) + continue; + + /* + * there is some eccentricity in the hardware which + * requires that we perform 2 extra corrections: + * + * - check both the CCCR:OVF flag for overflow and the + * counter high bit for un-flagged overflows. + * + * - write the counter back twice to ensure it gets + * updated properly. + * + * the former seems to be related to extra NMIs happening + * during the current NMI; the latter is reported as errata + * N15 in intel doc 249199-029, pentium 4 specification + * update, though their suggested work-around does not + * appear to solve the problem. + */ + + real = VIRT_CTR(stag, i); + + CCCR_READ(msr_content, real); + CTR_READ(ctr, real); + if ( CCCR_OVF_P(msr_content) || CTR_OVERFLOW_P(ctr) ) + { + xenoprof_log_event(current, regs, eip, mode, i); + CTR_WRITE(reset_value[i], real); + CCCR_CLEAR_OVF(msr_content); + CCCR_WRITE(msr_content, real); + CTR_WRITE(reset_value[i], real); + ovf = 1; + } + } + + /* P4 quirk: you have to re-unmask the apic vector */ + apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); + + return ovf; } - -static void p4_start(struct op_msrs const * const msrs) +static void p4_start(struct op_msrs const *const msrs) { - unsigned int stag; - uint64_t msr_content; - int i; - - stag = get_stagger(); - - for (i = 0; i < num_counters; ++i) { - if (!reset_value[i]) - continue; - CCCR_READ(msr_content, VIRT_CTR(stag, i)); - CCCR_SET_ENABLE(msr_content); - CCCR_WRITE(msr_content, VIRT_CTR(stag, i)); - } + unsigned int stag; + uint64_t msr_content; + int i; + + stag = get_stagger(); + + for ( i = 0; i < num_counters; ++i ) + { + if ( !reset_value[i] ) + continue; + CCCR_READ(msr_content, VIRT_CTR(stag, i)); + CCCR_SET_ENABLE(msr_content); + CCCR_WRITE(msr_content, VIRT_CTR(stag, i)); + } } - -static void p4_stop(struct op_msrs const * const msrs) +static void p4_stop(struct op_msrs const *const msrs) { - unsigned int stag; - uint64_t msr_content; - int i; - - stag = get_stagger(); - - for (i = 0; i < num_counters; ++i) { - CCCR_READ(msr_content, VIRT_CTR(stag, i)); - CCCR_SET_DISABLE(msr_content); - CCCR_WRITE(msr_content, VIRT_CTR(stag, i)); - } + unsigned int stag; + uint64_t msr_content; + int i; + + stag = get_stagger(); + + for ( i = 0; i < num_counters; ++i ) + { + CCCR_READ(msr_content, VIRT_CTR(stag, i)); + CCCR_SET_DISABLE(msr_content); + CCCR_WRITE(msr_content, VIRT_CTR(stag, i)); + } } - struct op_x86_model_spec const op_p4_ht2_spec = { - .num_counters = NUM_COUNTERS_HT2, - .num_controls = NUM_CONTROLS_HT2, - .fill_in_addresses = &p4_fill_in_addresses, - .setup_ctrs = &p4_setup_ctrs, - .check_ctrs = &p4_check_ctrs, - .start = &p4_start, - .stop = &p4_stop -}; - + .num_counters = NUM_COUNTERS_HT2, + .num_controls = NUM_CONTROLS_HT2, + .fill_in_addresses = &p4_fill_in_addresses, + .setup_ctrs = &p4_setup_ctrs, + .check_ctrs = &p4_check_ctrs, + .start = &p4_start, + .stop = &p4_stop}; struct op_x86_model_spec const op_p4_spec = { - .num_counters = NUM_COUNTERS_NON_HT, - .num_controls = NUM_CONTROLS_NON_HT, - .fill_in_addresses = &p4_fill_in_addresses, - .setup_ctrs = &p4_setup_ctrs, - .check_ctrs = &p4_check_ctrs, - .start = &p4_start, - .stop = &p4_stop -}; + .num_counters = NUM_COUNTERS_NON_HT, + .num_controls = NUM_CONTROLS_NON_HT, + .fill_in_addresses = &p4_fill_in_addresses, + .setup_ctrs = &p4_setup_ctrs, + .check_ctrs = &p4_check_ctrs, + .start = &p4_start, + .stop = &p4_stop}; diff --git a/xen/arch/x86/oprofile/op_model_ppro.c b/xen/arch/x86/oprofile/op_model_ppro.c index 72c504a102..3bdd330693 100644 --- a/xen/arch/x86/oprofile/op_model_ppro.c +++ b/xen/arch/x86/oprofile/op_model_ppro.c @@ -24,7 +24,8 @@ #include "op_x86_model.h" #include "op_counter.h" -struct arch_msr_pair { +struct arch_msr_pair +{ u64 counter; u64 control; }; @@ -34,254 +35,268 @@ struct arch_msr_pair { * detection/enumeration details: */ union cpuid10_eax { - struct { - unsigned int version_id:8; - unsigned int num_counters:8; - unsigned int bit_width:8; - unsigned int mask_length:8; - } split; - unsigned int full; + struct + { + unsigned int version_id : 8; + unsigned int num_counters : 8; + unsigned int bit_width : 8; + unsigned int mask_length : 8; + } split; + unsigned int full; }; static int num_counters = 2; static int counter_width = 32; -#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) - -#define CTRL_READ(msr_content,msrs,c) do {rdmsrl((msrs->controls[(c)].addr), (msr_content));} while (0) -#define CTRL_WRITE(msr_content,msrs,c) do {wrmsrl((msrs->controls[(c)].addr), (msr_content));} while (0) -#define CTRL_SET_ACTIVE(n) (n |= (1ULL<<22)) -#define CTRL_SET_INACTIVE(n) (n &= ~(1ULL<<22)) -#define CTRL_CLEAR(x) (x &= (1ULL<<21)) -#define CTRL_SET_ENABLE(val) (val |= 1ULL<<20) -#define CTRL_SET_USR(val,u) (val |= ((u & 1ULL) << 16)) -#define CTRL_SET_KERN(val,k) (val |= ((k & 1ULL) << 17)) +#define CTR_OVERFLOWED(n) (!((n) & (1ULL << (counter_width - 1)))) + +#define CTRL_READ(msr_content, msrs, c) \ + do { \ + rdmsrl((msrs->controls[(c)].addr), (msr_content)); \ + } while ( 0 ) +#define CTRL_WRITE(msr_content, msrs, c) \ + do { \ + wrmsrl((msrs->controls[(c)].addr), (msr_content)); \ + } while ( 0 ) +#define CTRL_SET_ACTIVE(n) (n |= (1ULL << 22)) +#define CTRL_SET_INACTIVE(n) (n &= ~(1ULL << 22)) +#define CTRL_CLEAR(x) (x &= (1ULL << 21)) +#define CTRL_SET_ENABLE(val) (val |= 1ULL << 20) +#define CTRL_SET_USR(val, u) (val |= ((u & 1ULL) << 16)) +#define CTRL_SET_KERN(val, k) (val |= ((k & 1ULL) << 17)) #define CTRL_SET_UM(val, m) (val |= (m << 8)) #define CTRL_SET_EVENT(val, e) (val |= e) -#define IS_ACTIVE(val) (val & (1ULL << 22) ) -#define IS_ENABLE(val) (val & (1ULL << 20) ) +#define IS_ACTIVE(val) (val & (1ULL << 22)) +#define IS_ENABLE(val) (val & (1ULL << 20)) static unsigned long reset_value[OP_MAX_COUNTER]; int ppro_has_global_ctrl = 0; -static void ppro_fill_in_addresses(struct op_msrs * const msrs) +static void ppro_fill_in_addresses(struct op_msrs *const msrs) { - int i; + int i; - for (i = 0; i < num_counters; i++) - msrs->counters[i].addr = MSR_P6_PERFCTR(i); - for (i = 0; i < num_counters; i++) - msrs->controls[i].addr = MSR_P6_EVNTSEL(i); + for ( i = 0; i < num_counters; i++ ) + msrs->counters[i].addr = MSR_P6_PERFCTR(i); + for ( i = 0; i < num_counters; i++ ) + msrs->controls[i].addr = MSR_P6_EVNTSEL(i); } - -static void ppro_setup_ctrs(struct op_msrs const * const msrs) +static void ppro_setup_ctrs(struct op_msrs const *const msrs) { - uint64_t msr_content; - int i; - - if (cpu_has_arch_perfmon) { - union cpuid10_eax eax; - eax.full = cpuid_eax(0xa); - - /* - * For Core2 (family 6, model 15), don't reset the - * counter width: - */ - if (!(eax.split.version_id == 0 && - current_cpu_data.x86 == 6 && - current_cpu_data.x86_model == 15)) { - - if (counter_width < eax.split.bit_width) - counter_width = eax.split.bit_width; - } - } - - /* clear all counters */ - for (i = 0 ; i < num_counters; ++i) { - CTRL_READ(msr_content, msrs, i); - CTRL_CLEAR(msr_content); - CTRL_WRITE(msr_content, msrs, i); - } - - /* avoid a false detection of ctr overflows in NMI handler */ - for (i = 0; i < num_counters; ++i) - wrmsrl(msrs->counters[i].addr, ~0x0ULL); - - /* enable active counters */ - for (i = 0; i < num_counters; ++i) { - if (counter_config[i].enabled) { - reset_value[i] = counter_config[i].count; - - wrmsrl(msrs->counters[i].addr, -reset_value[i]); - - CTRL_READ(msr_content, msrs, i); - CTRL_CLEAR(msr_content); - CTRL_SET_ENABLE(msr_content); - CTRL_SET_USR(msr_content, counter_config[i].user); - CTRL_SET_KERN(msr_content, counter_config[i].kernel); - CTRL_SET_UM(msr_content, counter_config[i].unit_mask); - CTRL_SET_EVENT(msr_content, counter_config[i].event); - CTRL_WRITE(msr_content, msrs, i); - } else { - reset_value[i] = 0; - } - } + uint64_t msr_content; + int i; + + if ( cpu_has_arch_perfmon ) + { + union cpuid10_eax eax; + eax.full = cpuid_eax(0xa); + + /* + * For Core2 (family 6, model 15), don't reset the + * counter width: + */ + if ( !(eax.split.version_id == 0 && current_cpu_data.x86 == 6 && + current_cpu_data.x86_model == 15) ) + { + if ( counter_width < eax.split.bit_width ) + counter_width = eax.split.bit_width; + } + } + + /* clear all counters */ + for ( i = 0; i < num_counters; ++i ) + { + CTRL_READ(msr_content, msrs, i); + CTRL_CLEAR(msr_content); + CTRL_WRITE(msr_content, msrs, i); + } + + /* avoid a false detection of ctr overflows in NMI handler */ + for ( i = 0; i < num_counters; ++i ) + wrmsrl(msrs->counters[i].addr, ~0x0ULL); + + /* enable active counters */ + for ( i = 0; i < num_counters; ++i ) + { + if ( counter_config[i].enabled ) + { + reset_value[i] = counter_config[i].count; + + wrmsrl(msrs->counters[i].addr, -reset_value[i]); + + CTRL_READ(msr_content, msrs, i); + CTRL_CLEAR(msr_content); + CTRL_SET_ENABLE(msr_content); + CTRL_SET_USR(msr_content, counter_config[i].user); + CTRL_SET_KERN(msr_content, counter_config[i].kernel); + CTRL_SET_UM(msr_content, counter_config[i].unit_mask); + CTRL_SET_EVENT(msr_content, counter_config[i].event); + CTRL_WRITE(msr_content, msrs, i); + } + else + { + reset_value[i] = 0; + } + } } static int ppro_check_ctrs(unsigned int const cpu, - struct op_msrs const * const msrs, - struct cpu_user_regs const * const regs) + struct op_msrs const *const msrs, + struct cpu_user_regs const *const regs) { - u64 val; - int i; - int ovf = 0; - unsigned long eip = regs->rip; - int mode = xenoprofile_get_mode(current, regs); - struct arch_msr_pair *msrs_content = vcpu_vpmu(current)->context; - - for (i = 0 ; i < num_counters; ++i) { - if (!reset_value[i]) - continue; - rdmsrl(msrs->counters[i].addr, val); - if (CTR_OVERFLOWED(val)) { - xenoprof_log_event(current, regs, eip, mode, i); - wrmsrl(msrs->counters[i].addr, -reset_value[i]); - if ( is_passive(current->domain) && (mode != 2) && - vpmu_is_set(vcpu_vpmu(current), - VPMU_PASSIVE_DOMAIN_ALLOCATED) ) - { - if ( IS_ACTIVE(msrs_content[i].control) ) - { - msrs_content[i].counter = val; - if ( IS_ENABLE(msrs_content[i].control) ) - ovf = 2; - } - } - if ( !ovf ) - ovf = 1; - } - } - - /* Only P6 based Pentium M need to re-unmask the apic vector but it - * doesn't hurt other P6 variant */ - apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); - - return ovf; -} + u64 val; + int i; + int ovf = 0; + unsigned long eip = regs->rip; + int mode = xenoprofile_get_mode(current, regs); + struct arch_msr_pair *msrs_content = vcpu_vpmu(current)->context; + + for ( i = 0; i < num_counters; ++i ) + { + if ( !reset_value[i] ) + continue; + rdmsrl(msrs->counters[i].addr, val); + if ( CTR_OVERFLOWED(val) ) + { + xenoprof_log_event(current, regs, eip, mode, i); + wrmsrl(msrs->counters[i].addr, -reset_value[i]); + if ( is_passive(current->domain) && (mode != 2) && + vpmu_is_set(vcpu_vpmu(current), + VPMU_PASSIVE_DOMAIN_ALLOCATED) ) + { + if ( IS_ACTIVE(msrs_content[i].control) ) + { + msrs_content[i].counter = val; + if ( IS_ENABLE(msrs_content[i].control) ) + ovf = 2; + } + } + if ( !ovf ) + ovf = 1; + } + } + /* Only P6 based Pentium M need to re-unmask the apic vector but it + * doesn't hurt other P6 variant */ + apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); -static void ppro_start(struct op_msrs const * const msrs) + return ovf; +} + +static void ppro_start(struct op_msrs const *const msrs) { - uint64_t msr_content; - int i; - - for (i = 0; i < num_counters; ++i) { - if (reset_value[i]) { - CTRL_READ(msr_content, msrs, i); - CTRL_SET_ACTIVE(msr_content); - CTRL_WRITE(msr_content, msrs, i); - } - } + uint64_t msr_content; + int i; + + for ( i = 0; i < num_counters; ++i ) + { + if ( reset_value[i] ) + { + CTRL_READ(msr_content, msrs, i); + CTRL_SET_ACTIVE(msr_content); + CTRL_WRITE(msr_content, msrs, i); + } + } /* Global Control MSR is enabled by default when system power on. * However, this may not hold true when xenoprof starts to run. */ if ( ppro_has_global_ctrl ) - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, (1ULL<= MSR_IA32_PERFCTR0) && - (msr_index < (MSR_IA32_PERFCTR0 + num_counters)) ) - { - *type = MSR_TYPE_ARCH_COUNTER; - *index = msr_index - MSR_IA32_PERFCTR0; - return 1; - } - if ( (msr_index >= MSR_P6_EVNTSEL(0)) && - (msr_index < (MSR_P6_EVNTSEL(num_counters))) ) - { - *type = MSR_TYPE_ARCH_CTRL; - *index = msr_index - MSR_P6_EVNTSEL(0); - return 1; - } - - return 0; + if ( (msr_index >= MSR_IA32_PERFCTR0) && + (msr_index < (MSR_IA32_PERFCTR0 + num_counters)) ) + { + *type = MSR_TYPE_ARCH_COUNTER; + *index = msr_index - MSR_IA32_PERFCTR0; + return 1; + } + if ( (msr_index >= MSR_P6_EVNTSEL(0)) && + (msr_index < (MSR_P6_EVNTSEL(num_counters))) ) + { + *type = MSR_TYPE_ARCH_CTRL; + *index = msr_index - MSR_P6_EVNTSEL(0); + return 1; + } + + return 0; } static int ppro_allocate_msr(struct vcpu *v) { - struct vpmu_struct *vpmu = vcpu_vpmu(v); - struct arch_msr_pair *msr_content; - - msr_content = xzalloc_array(struct arch_msr_pair, num_counters); - if ( !msr_content ) - goto out; - vpmu->context = (void *)msr_content; - vpmu_clear(vpmu); - vpmu_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED); - return 1; + struct vpmu_struct *vpmu = vcpu_vpmu(v); + struct arch_msr_pair *msr_content; + + msr_content = xzalloc_array(struct arch_msr_pair, num_counters); + if ( !msr_content ) + goto out; + vpmu->context = (void *)msr_content; + vpmu_clear(vpmu); + vpmu_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED); + return 1; out: - printk(XENLOG_G_WARNING "Insufficient memory for oprofile," - " oprofile is unavailable on dom%d vcpu%d\n", - v->vcpu_id, v->domain->domain_id); - return 0; + printk(XENLOG_G_WARNING "Insufficient memory for oprofile," + " oprofile is unavailable on dom%d vcpu%d\n", + v->vcpu_id, v->domain->domain_id); + return 0; } static void ppro_free_msr(struct vcpu *v) { - struct vpmu_struct *vpmu = vcpu_vpmu(v); + struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( !vpmu_is_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED) ) - return; - xfree(vpmu->context); - vpmu_reset(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED); + if ( !vpmu_is_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED) ) + return; + xfree(vpmu->context); + vpmu_reset(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED); } static void ppro_load_msr(struct vcpu *v, int type, int index, u64 *msr_content) { - struct arch_msr_pair *msrs = vcpu_vpmu(v)->context; - switch ( type ) - { - case MSR_TYPE_ARCH_COUNTER: - *msr_content = msrs[index].counter; - break; - case MSR_TYPE_ARCH_CTRL: - *msr_content = msrs[index].control; - break; - } + struct arch_msr_pair *msrs = vcpu_vpmu(v)->context; + switch (type) + { + case MSR_TYPE_ARCH_COUNTER: + *msr_content = msrs[index].counter; + break; + case MSR_TYPE_ARCH_CTRL: + *msr_content = msrs[index].control; + break; + } } static void ppro_save_msr(struct vcpu *v, int type, int index, u64 msr_content) { - struct arch_msr_pair *msrs = vcpu_vpmu(v)->context; - - switch ( type ) - { - case MSR_TYPE_ARCH_COUNTER: - msrs[index].counter = msr_content; - break; - case MSR_TYPE_ARCH_CTRL: - msrs[index].control = msr_content; - break; - } + struct arch_msr_pair *msrs = vcpu_vpmu(v)->context; + + switch (type) + { + case MSR_TYPE_ARCH_COUNTER: + msrs[index].counter = msr_content; + break; + case MSR_TYPE_ARCH_CTRL: + msrs[index].control = msr_content; + break; + } } /* @@ -294,51 +309,50 @@ static void ppro_save_msr(struct vcpu *v, int type, int index, u64 msr_content) */ void arch_perfmon_setup_counters(void) { - union cpuid10_eax eax; + union cpuid10_eax eax; - eax.full = cpuid_eax(0xa); + eax.full = cpuid_eax(0xa); - /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ - if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 && - current_cpu_data.x86_model == 15) { - eax.split.version_id = 2; - eax.split.num_counters = 2; - eax.split.bit_width = 40; - } + /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ + if ( eax.split.version_id == 0 && current_cpu_data.x86 == 6 && + current_cpu_data.x86_model == 15 ) + { + eax.split.version_id = 2; + eax.split.num_counters = 2; + eax.split.bit_width = 40; + } - num_counters = min_t(u8, eax.split.num_counters, OP_MAX_COUNTER); + num_counters = min_t(u8, eax.split.num_counters, OP_MAX_COUNTER); - op_arch_perfmon_spec.num_counters = num_counters; - op_arch_perfmon_spec.num_controls = num_counters; - op_ppro_spec.num_counters = num_counters; - op_ppro_spec.num_controls = num_counters; + op_arch_perfmon_spec.num_counters = num_counters; + op_arch_perfmon_spec.num_controls = num_counters; + op_ppro_spec.num_counters = num_counters; + op_ppro_spec.num_controls = num_counters; } struct op_x86_model_spec __read_mostly op_ppro_spec = { - .num_counters = 2, - .num_controls = 2, - .fill_in_addresses = &ppro_fill_in_addresses, - .setup_ctrs = &ppro_setup_ctrs, - .check_ctrs = &ppro_check_ctrs, - .start = &ppro_start, - .stop = &ppro_stop, - .is_arch_pmu_msr = &ppro_is_arch_pmu_msr, - .allocated_msr = &ppro_allocate_msr, - .free_msr = &ppro_free_msr, - .load_msr = &ppro_load_msr, - .save_msr = &ppro_save_msr -}; + .num_counters = 2, + .num_controls = 2, + .fill_in_addresses = &ppro_fill_in_addresses, + .setup_ctrs = &ppro_setup_ctrs, + .check_ctrs = &ppro_check_ctrs, + .start = &ppro_start, + .stop = &ppro_stop, + .is_arch_pmu_msr = &ppro_is_arch_pmu_msr, + .allocated_msr = &ppro_allocate_msr, + .free_msr = &ppro_free_msr, + .load_msr = &ppro_load_msr, + .save_msr = &ppro_save_msr}; struct op_x86_model_spec __read_mostly op_arch_perfmon_spec = { - /* num_counters/num_controls filled in at runtime */ - .fill_in_addresses = &ppro_fill_in_addresses, - .setup_ctrs = &ppro_setup_ctrs, - .check_ctrs = &ppro_check_ctrs, - .start = &ppro_start, - .stop = &ppro_stop, - .is_arch_pmu_msr = &ppro_is_arch_pmu_msr, - .allocated_msr = &ppro_allocate_msr, - .free_msr = &ppro_free_msr, - .load_msr = &ppro_load_msr, - .save_msr = &ppro_save_msr -}; + /* num_counters/num_controls filled in at runtime */ + .fill_in_addresses = &ppro_fill_in_addresses, + .setup_ctrs = &ppro_setup_ctrs, + .check_ctrs = &ppro_check_ctrs, + .start = &ppro_start, + .stop = &ppro_stop, + .is_arch_pmu_msr = &ppro_is_arch_pmu_msr, + .allocated_msr = &ppro_allocate_msr, + .free_msr = &ppro_free_msr, + .load_msr = &ppro_load_msr, + .save_msr = &ppro_save_msr}; diff --git a/xen/arch/x86/oprofile/xenoprof.c b/xen/arch/x86/oprofile/xenoprof.c index cca759b54f..936ba26f1e 100644 --- a/xen/arch/x86/oprofile/xenoprof.c +++ b/xen/arch/x86/oprofile/xenoprof.c @@ -27,11 +27,11 @@ int xenoprof_arch_counter(XEN_GUEST_HANDLE_PARAM(void) arg) if ( counter.ind >= OP_MAX_COUNTER ) return -E2BIG; - counter_config[counter.ind].count = counter.count; - counter_config[counter.ind].enabled = counter.enabled; - counter_config[counter.ind].event = counter.event; - counter_config[counter.ind].kernel = counter.kernel; - counter_config[counter.ind].user = counter.user; + counter_config[counter.ind].count = counter.count; + counter_config[counter.ind].enabled = counter.enabled; + counter_config[counter.ind].event = counter.event; + counter_config[counter.ind].kernel = counter.kernel; + counter_config[counter.ind].user = counter.user; counter_config[counter.ind].unit_mask = counter.unit_mask; return 0; @@ -64,11 +64,11 @@ int compat_oprof_arch_counter(XEN_GUEST_HANDLE_PARAM(void) arg) if ( counter.ind >= OP_MAX_COUNTER ) return -E2BIG; - counter_config[counter.ind].count = counter.count; - counter_config[counter.ind].enabled = counter.enabled; - counter_config[counter.ind].event = counter.event; - counter_config[counter.ind].kernel = counter.kernel; - counter_config[counter.ind].user = counter.user; + counter_config[counter.ind].count = counter.count; + counter_config[counter.ind].enabled = counter.enabled; + counter_config[counter.ind].event = counter.event; + counter_config[counter.ind].kernel = counter.kernel; + counter_config[counter.ind].user = counter.user; counter_config[counter.ind].unit_mask = counter.unit_mask; return 0; @@ -82,7 +82,7 @@ int xenoprofile_get_mode(struct vcpu *curr, const struct cpu_user_regs *regs) if ( !is_hvm_vcpu(curr) ) return guest_kernel_mode(curr, regs); - switch ( hvm_guest_x86_mode(curr) ) + switch (hvm_guest_x86_mode(curr)) { case 0: /* real mode */ return 1; diff --git a/xen/arch/x86/pci.c b/xen/arch/x86/pci.c index a9decd4f33..6a08d95f80 100644 --- a/xen/arch/x86/pci.c +++ b/xen/arch/x86/pci.c @@ -1,6 +1,6 @@ /****************************************************************************** * pci.c - * + * * Architecture-dependent PCI access functions. */ @@ -22,7 +22,7 @@ uint32_t pci_conf_read(uint32_t cf8, uint8_t offset, uint8_t bytes) outl(cf8, 0xcf8); - switch ( bytes ) + switch (bytes) { case 1: value = inb(0xcfc + offset); @@ -53,7 +53,7 @@ void pci_conf_write(uint32_t cf8, uint8_t offset, uint8_t bytes, uint32_t data) outl(cf8, 0xcf8); - switch ( bytes ) + switch (bytes) { case 1: outb((uint8_t)data, 0xcfc + offset); @@ -74,8 +74,8 @@ int pci_conf_write_intercept(unsigned int seg, unsigned int bdf, uint32_t *data) { struct pci_dev *pdev; - int rc = xsm_pci_config_permission(XSM_HOOK, current->domain, bdf, - reg, reg + size - 1, 1); + int rc = xsm_pci_config_permission(XSM_HOOK, current->domain, bdf, reg, + reg + size - 1, 1); if ( rc < 0 ) return rc; diff --git a/xen/arch/x86/percpu.c b/xen/arch/x86/percpu.c index 8be4ebddf4..7f47ffd8e0 100644 --- a/xen/arch/x86/percpu.c +++ b/xen/arch/x86/percpu.c @@ -39,7 +39,8 @@ static int init_percpu_area(unsigned int cpu) return 0; } -struct free_info { +struct free_info +{ unsigned int cpu; struct rcu_head rcu; }; @@ -63,13 +64,13 @@ static void free_percpu_area(unsigned int cpu) call_rcu(&info->rcu, _free_percpu_area); } -static int cpu_percpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_percpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int rc = 0; - switch ( action ) + switch (action) { case CPU_UP_PREPARE: rc = init_percpu_area(cpu); @@ -89,8 +90,7 @@ static int cpu_percpu_callback( } static struct notifier_block cpu_percpu_nfb = { - .notifier_call = cpu_percpu_callback, - .priority = 100 /* highest priority */ + .notifier_call = cpu_percpu_callback, .priority = 100 /* highest priority */ }; static int __init percpu_presmp_init(void) diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c index 3a3c15890b..eacc8a6cc6 100644 --- a/xen/arch/x86/physdev.c +++ b/xen/arch/x86/physdev.c @@ -27,17 +27,18 @@ int physdev_unmap_pirq(domid_t, int pirq); #ifndef COMPAT typedef long ret_t; -static int physdev_hvm_map_pirq( - struct domain *d, int type, int *index, int *pirq) +static int physdev_hvm_map_pirq(struct domain *d, int type, int *index, + int *pirq) { int ret = 0; ASSERT(!is_hardware_domain(d)); spin_lock(&d->event_lock); - switch ( type ) + switch (type) + { + case MAP_PIRQ_TYPE_GSI: { - case MAP_PIRQ_TYPE_GSI: { const struct hvm_irq_dpci *hvm_irq_dpci; unsigned int machine_gsi = 0; @@ -55,9 +56,7 @@ static int physdev_hvm_map_pirq( const struct hvm_girq_dpci_mapping *girq; BUILD_BUG_ON(ARRAY_SIZE(hvm_irq_dpci->girq) < NR_HVM_DOMU_IRQS); - list_for_each_entry ( girq, - &hvm_irq_dpci->girq[*index], - list ) + list_for_each_entry (girq, &hvm_irq_dpci->girq[*index], list) machine_gsi = girq->machine_gsi; } /* found one, this mean we are dealing with a pt device */ @@ -115,7 +114,7 @@ int physdev_map_pirq(domid_t domid, int type, int *index, int *pirq_p, goto free_domain; /* Verify or get irq. */ - switch ( type ) + switch (type) { case MAP_PIRQ_TYPE_GSI: ret = allocate_and_map_gsi_pirq(d, *index, pirq_p); @@ -127,13 +126,13 @@ int physdev_map_pirq(domid_t domid, int type, int *index, int *pirq_p, break; default: - dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", - d->domain_id, type); + dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", d->domain_id, + type); ret = -EINVAL; break; } - free_domain: +free_domain: rcu_unlock_domain(d); return ret; } @@ -168,7 +167,7 @@ int physdev_unmap_pirq(domid_t domid, int pirq) spin_unlock(&d->event_lock); pcidevs_unlock(); - free_domain: +free_domain: rcu_unlock_domain(d); return ret; } @@ -180,9 +179,10 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) ret_t ret; struct domain *currd = current->domain; - switch ( cmd ) + switch (cmd) + { + case PHYSDEVOP_eoi: { - case PHYSDEVOP_eoi: { struct physdev_eoi eoi; struct pirq *pirq; @@ -194,7 +194,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; spin_lock(&currd->event_lock); pirq = pirq_info(currd, eoi.irq); - if ( !pirq ) { + if ( !pirq ) + { spin_unlock(&currd->event_lock); break; } @@ -202,16 +203,15 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) evtchn_unmask(pirq->evtchn); if ( is_pv_domain(currd) || domain_pirq_to_irq(currd, eoi.irq) > 0 ) pirq_guest_eoi(pirq); - if ( is_hvm_domain(currd) && - domain_pirq_to_emuirq(currd, eoi.irq) > 0 ) + if ( is_hvm_domain(currd) && domain_pirq_to_emuirq(currd, eoi.irq) > 0 ) { struct hvm_irq *hvm_irq = hvm_domain_irq(currd); int gsi = domain_pirq_to_emuirq(currd, eoi.irq); /* if this is a level irq and count > 0, send another - * notification */ + * notification */ if ( gsi >= NR_ISAIRQS /* ISA irqs are edge triggered */ - && hvm_irq->gsi_assert_count[gsi] ) + && hvm_irq->gsi_assert_count[gsi] ) send_guest_pirq(currd, pirq); } spin_unlock(&currd->event_lock); @@ -220,7 +220,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) } case PHYSDEVOP_pirq_eoi_gmfn_v2: - case PHYSDEVOP_pirq_eoi_gmfn_v1: { + case PHYSDEVOP_pirq_eoi_gmfn_v1: + { struct physdev_pirq_eoi_gmfn info; struct page_info *page; @@ -238,8 +239,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - if ( cmpxchg(&currd->arch.pirq_eoi_map_mfn, - 0, mfn_x(page_to_mfn(page))) != 0 ) + if ( cmpxchg(&currd->arch.pirq_eoi_map_mfn, 0, + mfn_x(page_to_mfn(page))) != 0 ) { put_page_and_type(page); ret = -EBUSY; @@ -262,12 +263,14 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) } /* Legacy since 0x00030202. */ - case PHYSDEVOP_IRQ_UNMASK_NOTIFY: { + case PHYSDEVOP_IRQ_UNMASK_NOTIFY: + { ret = pirq_guest_unmask(currd); break; } - case PHYSDEVOP_irq_status_query: { + case PHYSDEVOP_irq_status_query: + { struct physdev_irq_status_query irq_status_query; ret = -EFAULT; if ( copy_from_guest(&irq_status_query, arg, 1) != 0 ) @@ -277,8 +280,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) if ( (irq < 0) || (irq >= currd->nr_pirqs) ) break; irq_status_query.flags = 0; - if ( is_hvm_domain(currd) && - domain_pirq_to_irq(currd, irq) <= 0 && + if ( is_hvm_domain(currd) && domain_pirq_to_irq(currd, irq) <= 0 && domain_pirq_to_emuirq(currd, irq) == IRQ_UNBOUND ) { ret = -EINVAL; @@ -300,7 +302,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case PHYSDEVOP_map_pirq: { + case PHYSDEVOP_map_pirq: + { physdev_map_pirq_t map; struct msi_info msi; @@ -308,7 +311,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) if ( copy_from_guest(&map, arg, 1) != 0 ) break; - switch ( map.type ) + switch (map.type) { case MAP_PIRQ_TYPE_MSI_SEG: map.type = MAP_PIRQ_TYPE_MSI; @@ -329,8 +332,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) msi.devfn = map.devfn; msi.entry_nr = map.entry_nr; msi.table_base = map.table_base; - ret = physdev_map_pirq(map.domid, map.type, &map.index, &map.pirq, - &msi); + ret = + physdev_map_pirq(map.domid, map.type, &map.index, &map.pirq, &msi); if ( map.type == MAP_PIRQ_TYPE_MULTI_MSI ) map.entry_nr = msi.entry_nr; @@ -339,7 +342,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case PHYSDEVOP_unmap_pirq: { + case PHYSDEVOP_unmap_pirq: + { struct physdev_unmap_pirq unmap; ret = -EFAULT; @@ -350,7 +354,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case PHYSDEVOP_apic_read: { + case PHYSDEVOP_apic_read: + { struct physdev_apic apic; ret = -EFAULT; if ( copy_from_guest(&apic, arg, 1) != 0 ) @@ -364,7 +369,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case PHYSDEVOP_apic_write: { + case PHYSDEVOP_apic_write: + { struct physdev_apic apic; ret = -EFAULT; if ( copy_from_guest(&apic, arg, 1) != 0 ) @@ -376,7 +382,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case PHYSDEVOP_alloc_irq_vector: { + case PHYSDEVOP_alloc_irq_vector: + { struct physdev_irq irq_op; ret = -EFAULT; @@ -391,17 +398,18 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) /* Vector is only used by hypervisor, and dom0 shouldn't touch it in its world, return irq_op.irq as the vecotr, - and make this hypercall dummy, and also defer the vector + and make this hypercall dummy, and also defer the vector allocation when dom0 tries to programe ioapic entry. */ irq_op.vector = irq_op.irq; ret = 0; - + if ( __copy_to_guest(arg, &irq_op, 1) ) ret = -EFAULT; break; } - case PHYSDEVOP_set_iopl: { + case PHYSDEVOP_set_iopl: + { struct vcpu *curr = current; struct physdev_set_iopl set_iopl; @@ -416,7 +424,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case PHYSDEVOP_set_iobitmap: { + case PHYSDEVOP_set_iobitmap: + { struct vcpu *curr = current; struct physdev_set_iobitmap set_iobitmap; @@ -437,18 +446,20 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case PHYSDEVOP_manage_pci_add: { + case PHYSDEVOP_manage_pci_add: + { struct physdev_manage_pci manage_pci; ret = -EFAULT; if ( copy_from_guest(&manage_pci, arg, 1) != 0 ) break; - ret = pci_add_device(0, manage_pci.bus, manage_pci.devfn, - NULL, NUMA_NO_NODE); + ret = pci_add_device(0, manage_pci.bus, manage_pci.devfn, NULL, + NUMA_NO_NODE); break; } - case PHYSDEVOP_manage_pci_remove: { + case PHYSDEVOP_manage_pci_remove: + { struct physdev_manage_pci manage_pci; ret = -EFAULT; if ( copy_from_guest(&manage_pci, arg, 1) != 0 ) @@ -458,7 +469,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case PHYSDEVOP_manage_pci_add_ext: { + case PHYSDEVOP_manage_pci_add_ext: + { struct physdev_manage_pci_ext manage_pci_ext; struct pci_dev_info pdev_info; @@ -474,13 +486,13 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) pdev_info.is_virtfn = manage_pci_ext.is_virtfn; pdev_info.physfn.bus = manage_pci_ext.physfn.bus; pdev_info.physfn.devfn = manage_pci_ext.physfn.devfn; - ret = pci_add_device(0, manage_pci_ext.bus, - manage_pci_ext.devfn, + ret = pci_add_device(0, manage_pci_ext.bus, manage_pci_ext.devfn, &pdev_info, NUMA_NO_NODE); break; } - case PHYSDEVOP_pci_device_add: { + case PHYSDEVOP_pci_device_add: + { struct physdev_pci_device_add add; struct pci_dev_info pdev_info; nodeid_t node; @@ -502,8 +514,9 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) if ( add.flags & XEN_PCI_DEV_PXM ) { uint32_t pxm; - size_t optarr_off = offsetof(struct physdev_pci_device_add, optarr) / - sizeof(add.optarr[0]); + size_t optarr_off = + offsetof(struct physdev_pci_device_add, optarr) / + sizeof(add.optarr[0]); if ( copy_from_guest_offset(&pxm, arg, optarr_off, 1) ) break; @@ -517,7 +530,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case PHYSDEVOP_pci_device_remove: { + case PHYSDEVOP_pci_device_remove: + { struct physdev_pci_device dev; ret = -EFAULT; @@ -529,21 +543,22 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) } case PHYSDEVOP_prepare_msix: - case PHYSDEVOP_release_msix: { + case PHYSDEVOP_release_msix: + { struct physdev_pci_device dev; if ( copy_from_guest(&dev, arg, 1) ) ret = -EFAULT; else - ret = xsm_resource_setup_pci(XSM_PRIV, - (dev.seg << 16) | (dev.bus << 8) | - dev.devfn) ?: - pci_prepare_msix(dev.seg, dev.bus, dev.devfn, - cmd != PHYSDEVOP_prepare_msix); + ret = xsm_resource_setup_pci( + XSM_PRIV, (dev.seg << 16) | (dev.bus << 8) | dev.devfn) + ?: pci_prepare_msix(dev.seg, dev.bus, dev.devfn, + cmd != PHYSDEVOP_prepare_msix); break; } - case PHYSDEVOP_pci_mmcfg_reserved: { + case PHYSDEVOP_pci_mmcfg_reserved: + { struct physdev_pci_mmcfg_reserved info; ret = xsm_resource_setup_misc(XSM_PRIV); @@ -554,23 +569,24 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) if ( copy_from_guest(&info, arg, 1) ) break; - ret = pci_mmcfg_reserved(info.address, info.segment, - info.start_bus, info.end_bus, info.flags); + ret = pci_mmcfg_reserved(info.address, info.segment, info.start_bus, + info.end_bus, info.flags); if ( !ret && has_vpci(currd) ) { /* * For HVM (PVH) domains try to add the newly found MMCFG to the * domain. */ - ret = register_vpci_mmcfg_handler(currd, info.address, - info.start_bus, info.end_bus, - info.segment); + ret = + register_vpci_mmcfg_handler(currd, info.address, info.start_bus, + info.end_bus, info.segment); } break; } - case PHYSDEVOP_restore_msi: { + case PHYSDEVOP_restore_msi: + { struct physdev_restore_msi restore_msi; struct pci_dev *pdev; @@ -585,7 +601,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case PHYSDEVOP_restore_msi_ext: { + case PHYSDEVOP_restore_msi_ext: + { struct physdev_pci_device dev; struct pci_dev *pdev; @@ -600,13 +617,14 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case PHYSDEVOP_setup_gsi: { + case PHYSDEVOP_setup_gsi: + { struct physdev_setup_gsi setup_gsi; ret = -EFAULT; if ( copy_from_guest(&setup_gsi, arg, 1) != 0 ) break; - + ret = -EINVAL; if ( setup_gsi.gsi < 0 || setup_gsi.gsi >= nr_irqs_gsi ) break; @@ -617,9 +635,10 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) ret = mp_register_gsi(setup_gsi.gsi, setup_gsi.triggering, setup_gsi.polarity); - break; + break; } - case PHYSDEVOP_get_free_pirq: { + case PHYSDEVOP_get_free_pirq: + { struct physdev_get_free_pirq out; ret = -EFAULT; @@ -650,7 +669,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case PHYSDEVOP_dbgp_op: { + case PHYSDEVOP_dbgp_op: + { struct physdev_dbgp_op op; if ( !is_hardware_domain(currd) ) diff --git a/xen/arch/x86/platform_hypercall.c b/xen/arch/x86/platform_hypercall.c index b19f6ec4ed..dbe6ebad50 100644 --- a/xen/arch/x86/platform_hypercall.c +++ b/xen/arch/x86/platform_hypercall.c @@ -1,8 +1,8 @@ /****************************************************************************** * platform_hypercall.c - * + * * Hardware platform operations. Intended for use by domain-0 kernel. - * + * * Copyright (c) 2002-2006, K Fraser */ @@ -37,7 +37,8 @@ extern spinlock_t xenpf_lock; #define RESOURCE_ACCESS_MAX_ENTRIES 3 -struct resource_access { +struct resource_access +{ unsigned int nr_done; unsigned int nr_entries; xenpf_resource_entry_t *entries; @@ -50,12 +51,12 @@ void resource_access(void *); #ifndef COMPAT typedef long ret_t; DEFINE_SPINLOCK(xenpf_lock); -# undef copy_from_compat -# define copy_from_compat copy_from_guest -# undef copy_to_compat -# define copy_to_compat copy_to_guest -# undef guest_from_compat_handle -# define guest_from_compat_handle(x,y) ((x)=(y)) +#undef copy_from_compat +#define copy_from_compat copy_from_guest +#undef copy_to_compat +#define copy_to_compat copy_to_guest +#undef guest_from_compat_handle +#define guest_from_compat_handle(x, y) ((x) = (y)) long cpu_frequency_change_helper(void *data) { @@ -64,7 +65,7 @@ long cpu_frequency_change_helper(void *data) static bool allow_access_msr(unsigned int msr) { - switch ( msr ) + switch (msr) { /* MSR for CMT, refer to chapter 17.14 of Intel SDM. */ case MSR_IA32_CMT_EVTSEL: @@ -91,7 +92,7 @@ void check_resource_access(struct resource_access *ra) break; } - switch ( entry->u.cmd ) + switch (entry->u.cmd) { case XEN_RESOURCE_OP_MSR_READ: case XEN_RESOURCE_OP_MSR_WRITE: @@ -107,8 +108,8 @@ void check_resource_access(struct resource_access *ra) if ( ret ) { - entry->u.ret = ret; - break; + entry->u.ret = ret; + break; } } @@ -126,14 +127,14 @@ void resource_access(void *info) int ret; xenpf_resource_entry_t *entry = ra->entries + i; - switch ( entry->u.cmd ) + switch (entry->u.cmd) { case XEN_RESOURCE_OP_MSR_READ: if ( unlikely(entry->idx == MSR_IA32_TSC) ) { /* Return obfuscated scaled time instead of raw timestamp */ - entry->val = get_s_time_fixed(tsc) - + SECONDS(boot_random) - boot_random; + entry->val = + get_s_time_fixed(tsc) + SECONDS(boot_random) - boot_random; ret = 0; } else @@ -202,21 +203,19 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) */ while ( !spin_trylock(&xenpf_lock) ) if ( hypercall_preempt_check() ) - return hypercall_create_continuation( - __HYPERVISOR_platform_op, "h", u_xenpf_op); + return hypercall_create_continuation(__HYPERVISOR_platform_op, "h", + u_xenpf_op); - switch ( op->cmd ) + switch (op->cmd) { case XENPF_settime32: - do_settime(op->u.settime32.secs, - op->u.settime32.nsecs, + do_settime(op->u.settime32.secs, op->u.settime32.nsecs, op->u.settime32.system_time); break; case XENPF_settime64: if ( likely(!op->u.settime64.mbz) ) - do_settime(op->u.settime64.secs, - op->u.settime64.nsecs, + do_settime(op->u.settime64.secs, op->u.settime64.nsecs, op->u.settime64.system_time); else ret = -EINVAL; @@ -224,17 +223,14 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) case XENPF_add_memtype: { - ret = mtrr_add_page( - op->u.add_memtype.mfn, - op->u.add_memtype.nr_mfns, - op->u.add_memtype.type, - 1); + ret = mtrr_add_page(op->u.add_memtype.mfn, op->u.add_memtype.nr_mfns, + op->u.add_memtype.type, 1); if ( ret >= 0 ) { op->u.add_memtype.handle = 0; - op->u.add_memtype.reg = ret; - ret = __copy_field_to_guest(u_xenpf_op, op, u.add_memtype) ? - -EFAULT : 0; + op->u.add_memtype.reg = ret; + ret = __copy_field_to_guest(u_xenpf_op, op, u.add_memtype) ? -EFAULT + : 0; if ( ret != 0 ) mtrr_del_page(ret, 0, 0); } @@ -243,9 +239,9 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) case XENPF_del_memtype: { - if (op->u.del_memtype.handle == 0 - /* mtrr/main.c otherwise does a lookup */ - && (int)op->u.del_memtype.reg >= 0) + if ( op->u.del_memtype.handle == 0 + /* mtrr/main.c otherwise does a lookup */ + && (int)op->u.del_memtype.reg >= 0 ) { ret = mtrr_del_page(op->u.del_memtype.reg, 0, 0); if ( ret > 0 ) @@ -259,17 +255,18 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) case XENPF_read_memtype: { unsigned long mfn, nr_mfns; - mtrr_type type; + mtrr_type type; ret = -EINVAL; if ( op->u.read_memtype.reg < num_var_ranges ) { mtrr_if->get(op->u.read_memtype.reg, &mfn, &nr_mfns, &type); - op->u.read_memtype.mfn = mfn; + op->u.read_memtype.mfn = mfn; op->u.read_memtype.nr_mfns = nr_mfns; - op->u.read_memtype.type = type; + op->u.read_memtype.type = type; ret = __copy_field_to_guest(u_xenpf_op, op, u.read_memtype) - ? -EFAULT : 0; + ? -EFAULT + : 0; } } break; @@ -280,9 +277,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) guest_from_compat_handle(data, op->u.microcode.data); - ret = microcode_update( - guest_handle_to_param(data, const_void), - op->u.microcode.length); + ret = microcode_update(guest_handle_to_param(data, const_void), + op->u.microcode.length); } break; @@ -290,7 +286,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) { int quirk_id = op->u.platform_quirk.quirk_id; - switch ( quirk_id ) + switch (quirk_id) { case QUIRK_NOIRQBALANCING: printk("Platform quirk -- Disabling IRQ balancing/affinity.\n"); @@ -311,9 +307,10 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) break; case XENPF_firmware_info: - switch ( op->u.firmware_info.type ) + switch (op->u.firmware_info.type) + { + case XEN_FW_DISK_INFO: { - case XEN_FW_DISK_INFO: { const struct edd_info *info; u16 length; @@ -325,20 +322,19 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) /* Transfer the EDD info block. */ ret = -EFAULT; - if ( copy_from_compat(&length, op->u.firmware_info.u. - disk_info.edd_params, 1) ) + if ( copy_from_compat( + &length, op->u.firmware_info.u.disk_info.edd_params, 1) ) break; if ( length > info->edd_device_params.length ) length = info->edd_device_params.length; if ( copy_to_compat(op->u.firmware_info.u.disk_info.edd_params, - (u8 *)&info->edd_device_params, - length) ) + (u8 *)&info->edd_device_params, length) ) break; if ( copy_to_compat(op->u.firmware_info.u.disk_info.edd_params, &length, 1) ) break; - /* Transfer miscellaneous other information values. */ + /* Transfer miscellaneous other information values. */ #define C(x) op->u.firmware_info.u.disk_info.x = info->x C(device); C(version); @@ -350,10 +346,12 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) ret = (__copy_field_to_guest(u_xenpf_op, op, u.firmware_info.u.disk_info) - ? -EFAULT : 0); + ? -EFAULT + : 0); break; } - case XEN_FW_DISK_MBR_SIGNATURE: { + case XEN_FW_DISK_MBR_SIGNATURE: + { const struct mbr_signature *sig; ret = -ESRCH; @@ -368,7 +366,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) ret = (__copy_field_to_guest(u_xenpf_op, op, u.firmware_info.u.disk_mbr_signature) - ? -EFAULT : 0); + ? -EFAULT + : 0); break; } case XEN_FW_VBEDDC_INFO: @@ -385,10 +384,12 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) bootsym(boot_edid_caps) >> 8; ret = 0; - if ( __copy_field_to_guest(u_xenpf_op, op, u.firmware_info. - u.vbeddc_info.capabilities) || - __copy_field_to_guest(u_xenpf_op, op, u.firmware_info. - u.vbeddc_info.edid_transfer_time) || + if ( __copy_field_to_guest( + u_xenpf_op, op, + u.firmware_info.u.vbeddc_info.capabilities) || + __copy_field_to_guest( + u_xenpf_op, op, + u.firmware_info.u.vbeddc_info.edid_transfer_time) || copy_to_compat(op->u.firmware_info.u.vbeddc_info.edid, bootsym(boot_edid_info), 128) ) ret = -EFAULT; @@ -397,9 +398,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) case XEN_FW_EFI_INFO: ret = efi_get_info(op->u.firmware_info.index, &op->u.firmware_info.u.efi_info); - if ( ret == 0 && - __copy_field_to_guest(u_xenpf_op, op, - u.firmware_info.u.efi_info) ) + if ( ret == 0 && __copy_field_to_guest(u_xenpf_op, op, + u.firmware_info.u.efi_info) ) ret = -EFAULT; break; case XEN_FW_KBD_SHIFT_FLAGS: @@ -456,7 +456,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) if ( cpufreq_controller != FREQCTL_dom0_kernel ) break; - ctlmap.nr_bits = op->u.getidletime.cpumap_nr_cpus; + ctlmap.nr_bits = op->u.getidletime.cpumap_nr_cpus; guest_from_compat_handle(cpumap_bitmap, op->u.getidletime.cpumap_bitmap); ctlmap.bitmap.p = cpumap_bitmap.p; /* handle -> handle_64 conversion */ @@ -464,7 +464,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) goto out; guest_from_compat_handle(idletimes, op->u.getidletime.idletime); - for_each_cpu ( cpu, cpumap ) + for_each_cpu (cpu, cpumap) { idletime = get_cpu_idle_time(cpu); @@ -492,7 +492,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) break; case XENPF_set_processor_pminfo: - switch ( op->u.set_pminfo.type ) + switch (op->u.set_pminfo.type) { case XEN_PM_PX: if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_PX) ) @@ -502,7 +502,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) } ret = set_px_pminfo(op->u.set_pminfo.id, &op->u.set_pminfo.u.perf); break; - + case XEN_PM_CX: if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_CX) ) { @@ -526,9 +526,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) XEN_GUEST_HANDLE(uint32) pdc; guest_from_compat_handle(pdc, op->u.set_pminfo.u.pdc); - ret = acpi_set_pdc_bits( - op->u.set_pminfo.id, - guest_handle_to_param(pdc, uint32)); + ret = acpi_set_pdc_bits(op->u.set_pminfo.id, + guest_handle_to_param(pdc, uint32)); } break; @@ -561,7 +560,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) g_info->acpi_id = acpi_get_processor_id(g_info->xen_cpuid); ASSERT(g_info->apic_id != BAD_APICID); g_info->flags = 0; - if (cpu_online(g_info->xen_cpuid)) + if ( cpu_online(g_info->xen_cpuid) ) g_info->flags |= XEN_PCPU_FLAGS_ONLINE; } @@ -617,8 +616,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) if ( ret ) break; - if ( cpu >= nr_cpu_ids || !cpu_present(cpu) || - clocksource_is_tsc() ) + if ( cpu >= nr_cpu_ids || !cpu_present(cpu) || clocksource_is_tsc() ) { ret = -EINVAL; break; @@ -630,8 +628,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) break; } - ret = continue_hypercall_on_cpu( - 0, cpu_up_helper, (void *)(unsigned long)cpu); + ret = continue_hypercall_on_cpu(0, cpu_up_helper, + (void *)(unsigned long)cpu); break; } @@ -661,8 +659,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) break; } - ret = continue_hypercall_on_cpu( - 0, cpu_down_helper, (void *)(unsigned long)cpu); + ret = continue_hypercall_on_cpu(0, cpu_down_helper, + (void *)(unsigned long)cpu); break; } break; @@ -672,38 +670,37 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) if ( ret ) break; - ret = cpu_add(op->u.cpu_add.apic_id, - op->u.cpu_add.acpi_id, + ret = cpu_add(op->u.cpu_add.apic_id, op->u.cpu_add.acpi_id, op->u.cpu_add.pxm); - break; + break; case XENPF_mem_hotadd: ret = xsm_resource_plug_core(XSM_HOOK); if ( ret ) break; - ret = memory_add(op->u.mem_add.spfn, - op->u.mem_add.epfn, - op->u.mem_add.pxm); + ret = memory_add(op->u.mem_add.spfn, op->u.mem_add.epfn, + op->u.mem_add.pxm); break; case XENPF_core_parking: { uint32_t idle_nums; - switch(op->u.core_parking.type) + switch (op->u.core_parking.type) { case XEN_CORE_PARKING_SET: - idle_nums = min_t(uint32_t, - op->u.core_parking.idle_nums, num_present_cpus() - 1); - ret = continue_hypercall_on_cpu( - 0, core_parking_helper, (void *)(unsigned long)idle_nums); + idle_nums = min_t(uint32_t, op->u.core_parking.idle_nums, + num_present_cpus() - 1); + ret = continue_hypercall_on_cpu(0, core_parking_helper, + (void *)(unsigned long)idle_nums); break; case XEN_CORE_PARKING_GET: op->u.core_parking.idle_nums = get_cur_idle_nums(); - ret = __copy_field_to_guest(u_xenpf_op, op, u.core_parking) ? - -EFAULT : 0; + ret = __copy_field_to_guest(u_xenpf_op, op, u.core_parking) + ? -EFAULT + : 0; break; default: @@ -789,8 +786,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) guest_from_compat_handle(nameh, op->u.symdata.name); - ret = xensyms_read(&op->u.symdata.symnum, &op->u.symdata.type, - &addr, name); + ret = xensyms_read(&op->u.symdata.symnum, &op->u.symdata.type, &addr, + name); op->u.symdata.address = addr; namelen = strlen(name) + 1; @@ -813,7 +810,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) break; } - out: +out: spin_unlock(&xenpf_lock); return ret; diff --git a/xen/arch/x86/psr.c b/xen/arch/x86/psr.c index 5866a261e3..d5fd53ffa4 100644 --- a/xen/arch/x86/psr.c +++ b/xen/arch/x86/psr.c @@ -33,10 +33,10 @@ * - THRTL_MAX Max throttle value (delay value) of MBA */ -#define PSR_CMT (1u << 0) -#define PSR_CAT (1u << 1) -#define PSR_CDP (1u << 2) -#define PSR_MBA (1u << 3) +#define PSR_CMT (1u << 0) +#define PSR_CAT (1u << 1) +#define PSR_CDP (1u << 2) +#define PSR_MBA (1u << 3) #define CAT_CBM_LEN_MASK 0x1f #define CAT_COS_MAX_MASK 0xffff @@ -51,7 +51,7 @@ * * So, the maximum COS register count of one feature is 128. */ -#define MAX_COS_REG_CNT 128 +#define MAX_COS_REG_CNT 128 #define ASSOC_REG_SHIFT 32 @@ -63,10 +63,11 @@ */ #define MAX_COS_NUM 2 -#define MBA_LINEAR_MASK (1u << 2) +#define MBA_LINEAR_MASK (1u << 2) #define MBA_THRTL_MAX_MASK 0xfff -enum psr_feat_type { +enum psr_feat_type +{ FEAT_TYPE_L3_CAT, FEAT_TYPE_L3_CDP, FEAT_TYPE_L2_CAT, @@ -85,18 +86,21 @@ enum psr_feat_type { * COS_ID=0 corresponds to cos_reg_val[0] (Data) and * cos_reg_val[1] (Code). */ -struct feat_node { +struct feat_node +{ /* cos_max is common among all features so far. */ unsigned int cos_max; /* Feature specific HW info. */ union { - struct { + struct + { /* The length of CBM got through CPUID. */ unsigned int cbm_len; } cat; - struct { + struct + { /* The max throttling value got through CPUID. */ unsigned int thrtl_max; bool linear; @@ -112,7 +116,8 @@ struct feat_node { * Array 'feat_props' is defined to save every feature's properties. We use * 'enum psr_feat_type' as index. */ -static const struct feat_props { +static const struct feat_props +{ /* * cos_num - COS registers number that feature uses for one COS ID. * It is defined in SDM. @@ -133,8 +138,8 @@ static const struct feat_props { enum psr_type alt_type; /* get_feat_info is used to return feature HW info through sysctl. */ - bool (*get_feat_info)(const struct feat_node *feat, - uint32_t data[], unsigned int array_len); + bool (*get_feat_info)(const struct feat_node *feat, uint32_t data[], + unsigned int array_len); /* write_msr is used to write out feature MSR register. */ void (*write_msr)(unsigned int cos, uint32_t val, enum psr_type type); @@ -144,7 +149,7 @@ static const struct feat_props { * And change it to valid value if SDM allows. */ bool (*sanitize)(const struct feat_node *feat, uint32_t *val); -} *feat_props[FEAT_TYPE_NUM]; +} * feat_props[FEAT_TYPE_NUM]; /* * PSR features are managed per socket. Below structure defines the members @@ -156,7 +161,8 @@ static const struct feat_props { * COS ID. Every entry of cos_ref corresponds to one COS ID. * dom_set - A bitmap to indicate which domain's cos id has been set. */ -struct psr_socket_info { +struct psr_socket_info +{ bool feat_init; /* Feature array's index is 'enum psr_feat_type' which is same as 'props' */ struct feat_node *features[FEAT_TYPE_NUM]; @@ -166,7 +172,8 @@ struct psr_socket_info { DECLARE_BITMAP(dom_set, DOMID_IDLE + 1); }; -struct psr_assoc { +struct psr_assoc +{ uint64_t val; uint64_t cos_mask; }; @@ -197,16 +204,14 @@ static struct feat_node *feat_mba; * @feat: the feature node. * @cos: the COS ID. */ -#define get_cdp_data(feat, cos) \ - ((feat)->cos_reg_val[(cos) * 2]) +#define get_cdp_data(feat, cos) ((feat)->cos_reg_val[(cos)*2]) /* * get_cdp_code - get CODE COS register value from input COS ID. * @feat: the feature node. * @cos: the COS ID. */ -#define get_cdp_code(feat, cos) \ - ((feat)->cos_reg_val[(cos) * 2 + 1]) +#define get_cdp_code(feat, cos) ((feat)->cos_reg_val[(cos)*2 + 1]) /* * Use this function to check if any allocation feature has been enabled @@ -246,7 +251,7 @@ static enum psr_feat_type psr_type_to_feat_type(enum psr_type type) { enum psr_feat_type feat_type = FEAT_TYPE_UNKNOWN; - switch ( type ) + switch (type) { case PSR_TYPE_L3_CBM: feat_type = FEAT_TYPE_L3_CAT; @@ -324,7 +329,7 @@ static bool cat_init_feature(const struct cpuid_leaf *regs, feat->cos_max = min(opt_cos_max, regs->d & CAT_COS_MAX_MASK); feat->cat.cbm_len = (regs->a & CAT_CBM_LEN_MASK) + 1; - switch ( type ) + switch (type) { case FEAT_TYPE_L3_CAT: case FEAT_TYPE_L2_CAT: @@ -334,9 +339,8 @@ static bool cat_init_feature(const struct cpuid_leaf *regs, /* We reserve cos=0 as default cbm (all bits within cbm_len are 1). */ feat->cos_reg_val[0] = cat_default_val(feat->cat.cbm_len); - wrmsrl((type == FEAT_TYPE_L3_CAT ? - MSR_IA32_PSR_L3_MASK(0) : - MSR_IA32_PSR_L2_MASK(0)), + wrmsrl((type == FEAT_TYPE_L3_CAT ? MSR_IA32_PSR_L3_MASK(0) + : MSR_IA32_PSR_L2_MASK(0)), cat_default_val(feat->cat.cbm_len)); break; @@ -380,9 +384,9 @@ static bool cat_init_feature(const struct cpuid_leaf *regs, } static bool mba_init_feature(const struct cpuid_leaf *regs, - struct feat_node *feat, - struct psr_socket_info *info, - enum psr_feat_type type) + struct feat_node *feat, + struct psr_socket_info *info, + enum psr_feat_type type) { /* No valid value so do not enable feature. */ if ( !regs->a || !regs->d || type != FEAT_TYPE_MBA ) @@ -408,16 +412,17 @@ static bool mba_init_feature(const struct cpuid_leaf *regs, info->features[type] = feat; if ( opt_cpu_info ) - printk(XENLOG_INFO - "MBA: enabled on socket %u, cos_max:%u, thrtl_max:%u, linear:%d\n", - cpu_to_socket(smp_processor_id()), - feat->cos_max, feat->mba.thrtl_max, feat->mba.linear); + printk( + XENLOG_INFO + "MBA: enabled on socket %u, cos_max:%u, thrtl_max:%u, linear:%d\n", + cpu_to_socket(smp_processor_id()), feat->cos_max, + feat->mba.thrtl_max, feat->mba.linear); return true; } -static bool cat_get_feat_info(const struct feat_node *feat, - uint32_t data[], unsigned int array_len) +static bool cat_get_feat_info(const struct feat_node *feat, uint32_t data[], + unsigned int array_len) { if ( array_len != PSR_INFO_ARRAY_SIZE ) return false; @@ -430,8 +435,7 @@ static bool cat_get_feat_info(const struct feat_node *feat, } /* L3 CAT props */ -static void l3_cat_write_msr(unsigned int cos, uint32_t val, - enum psr_type type) +static void l3_cat_write_msr(unsigned int cos, uint32_t val, enum psr_type type) { wrmsrl(MSR_IA32_PSR_L3_MASK(cos), val); } @@ -446,8 +450,8 @@ static const struct feat_props l3_cat_props = { }; /* L3 CDP props */ -static bool l3_cdp_get_feat_info(const struct feat_node *feat, - uint32_t data[], uint32_t array_len) +static bool l3_cdp_get_feat_info(const struct feat_node *feat, uint32_t data[], + uint32_t array_len) { if ( !cat_get_feat_info(feat, data, array_len) ) return false; @@ -457,12 +461,10 @@ static bool l3_cdp_get_feat_info(const struct feat_node *feat, return true; } -static void l3_cdp_write_msr(unsigned int cos, uint32_t val, - enum psr_type type) +static void l3_cdp_write_msr(unsigned int cos, uint32_t val, enum psr_type type) { - wrmsrl(((type == PSR_TYPE_L3_DATA) ? - MSR_IA32_PSR_L3_MASK_DATA(cos) : - MSR_IA32_PSR_L3_MASK_CODE(cos)), + wrmsrl(((type == PSR_TYPE_L3_DATA) ? MSR_IA32_PSR_L3_MASK_DATA(cos) + : MSR_IA32_PSR_L3_MASK_CODE(cos)), val); } @@ -477,8 +479,7 @@ static const struct feat_props l3_cdp_props = { }; /* L2 CAT props */ -static void l2_cat_write_msr(unsigned int cos, uint32_t val, - enum psr_type type) +static void l2_cat_write_msr(unsigned int cos, uint32_t val, enum psr_type type) { wrmsrl(MSR_IA32_PSR_L2_MASK(cos), val); } @@ -493,8 +494,8 @@ static const struct feat_props l2_cat_props = { }; /* MBA props */ -static bool mba_get_feat_info(const struct feat_node *feat, - uint32_t data[], unsigned int array_len) +static bool mba_get_feat_info(const struct feat_node *feat, uint32_t data[], + unsigned int array_len) { ASSERT(array_len == PSR_INFO_ARRAY_SIZE); @@ -507,8 +508,7 @@ static bool mba_get_feat_info(const struct feat_node *feat, return true; } -static void mba_write_msr(unsigned int cos, uint32_t val, - enum psr_type type) +static void mba_write_msr(unsigned int cos, uint32_t val, enum psr_type type) { wrmsrl(MSR_IA32_PSR_MBA_MASK(cos), val); } @@ -736,8 +736,8 @@ static void psr_assoc_init(void) unsigned int cos_max = get_max_cos_max(info); if ( info->feat_init ) - psra->cos_mask = ((1ull << get_count_order(cos_max)) - 1) << - ASSOC_REG_SHIFT; + psra->cos_mask = ((1ull << get_count_order(cos_max)) - 1) + << ASSOC_REG_SHIFT; } if ( psr_cmt_enabled() || psra->cos_mask ) @@ -749,11 +749,9 @@ static inline void psr_assoc_rmid(uint64_t *reg, unsigned int rmid) *reg = (*reg & ~rmid_mask) | (rmid & rmid_mask); } -static uint64_t psr_assoc_cos(uint64_t reg, unsigned int cos, - uint64_t cos_mask) +static uint64_t psr_assoc_cos(uint64_t reg, unsigned int cos, uint64_t cos_mask) { - return (reg & ~cos_mask) | - (((uint64_t)cos << ASSOC_REG_SHIFT) & cos_mask); + return (reg & ~cos_mask) | (((uint64_t)cos << ASSOC_REG_SHIFT) & cos_mask); } void psr_ctxt_switch_to(struct domain *d) @@ -805,8 +803,8 @@ static struct psr_socket_info *get_socket_info(unsigned int socket) return socket_info + socket; } -int psr_get_info(unsigned int socket, enum psr_type type, - uint32_t data[], unsigned int array_len) +int psr_get_info(unsigned int socket, enum psr_type type, uint32_t data[], + unsigned int array_len) { const struct psr_socket_info *info = get_socket_info(socket); const struct feat_node *feat; @@ -837,8 +835,8 @@ int psr_get_info(unsigned int socket, enum psr_type type, return -EINVAL; } -int psr_get_val(struct domain *d, unsigned int socket, - uint32_t *val, enum psr_type type) +int psr_get_val(struct domain *d, unsigned int socket, uint32_t *val, + enum psr_type type) { const struct psr_socket_info *info = get_socket_info(socket); const struct feat_node *feat; @@ -907,8 +905,7 @@ static unsigned int get_cos_num(void) return num; } -static int gather_val_array(uint32_t val[], - unsigned int array_len, +static int gather_val_array(uint32_t val[], unsigned int array_len, const struct psr_socket_info *info, unsigned int old_cos) { @@ -976,12 +973,10 @@ static int skip_prior_features(unsigned int *array_len, return skip_len; } -static int insert_val_into_array(uint32_t val[], - unsigned int array_len, +static int insert_val_into_array(uint32_t val[], unsigned int array_len, const struct psr_socket_info *info, enum psr_feat_type feat_type, - enum psr_type type, - uint32_t new_val) + enum psr_type type, uint32_t new_val) { const struct feat_node *feat; const struct feat_props *props; @@ -1037,10 +1032,8 @@ static int insert_val_into_array(uint32_t val[], return ret; } -static int compare_val(const uint32_t val[], - const struct feat_node *feat, - const struct feat_props *props, - unsigned int cos) +static int compare_val(const uint32_t val[], const struct feat_node *feat, + const struct feat_props *props, unsigned int cos) { unsigned int i; @@ -1144,10 +1137,8 @@ static int find_cos(const uint32_t val[], unsigned int array_len, return -ENOENT; } -static bool fits_cos_max(const uint32_t val[], - uint32_t array_len, - const struct psr_socket_info *info, - unsigned int cos) +static bool fits_cos_max(const uint32_t val[], uint32_t array_len, + const struct psr_socket_info *info, unsigned int cos) { unsigned int i; @@ -1191,8 +1182,7 @@ static bool fits_cos_max(const uint32_t val[], static int pick_avail_cos(const struct psr_socket_info *info, const uint32_t val[], unsigned int array_len, - unsigned int old_cos, - enum psr_feat_type feat_type) + unsigned int old_cos, enum psr_feat_type feat_type) { unsigned int cos, cos_max = 0; const struct feat_node *feat; @@ -1210,7 +1200,7 @@ static int pick_avail_cos(const struct psr_socket_info *info, /* We cannot use id 0 because it stores the default values. */ if ( old_cos && ref[old_cos] == 1 && fits_cos_max(val, array_len, info, old_cos) ) - return old_cos; + return old_cos; /* Find an unused one other than cos0. */ for ( cos = 1; cos <= cos_max; cos++ ) @@ -1285,8 +1275,7 @@ static int write_psr_msrs(unsigned int socket, unsigned int cos, enum psr_feat_type feat_type) { struct psr_socket_info *info = get_socket_info(socket); - struct cos_write_info data = - { + struct cos_write_info data = { .cos = cos, .val = val, .array_len = array_len, @@ -1309,8 +1298,8 @@ static int write_psr_msrs(unsigned int socket, unsigned int cos, return 0; } -int psr_set_val(struct domain *d, unsigned int socket, - uint64_t new_val, enum psr_type type) +int psr_set_val(struct domain *d, unsigned int socket, uint64_t new_val, + enum psr_type type) { unsigned int old_cos, array_len; int cos, ret; @@ -1327,8 +1316,7 @@ int psr_set_val(struct domain *d, unsigned int socket, return -EINVAL; feat_type = psr_type_to_feat_type(type); - if ( feat_type >= ARRAY_SIZE(info->features) || - !info->features[feat_type] ) + if ( feat_type >= ARRAY_SIZE(info->features) || !info->features[feat_type] ) return -ENOENT; /* @@ -1368,8 +1356,8 @@ int psr_set_val(struct domain *d, unsigned int socket, if ( (ret = gather_val_array(val_array, array_len, info, old_cos)) != 0 ) goto free_array; - if ( (ret = insert_val_into_array(val_array, array_len, info, - feat_type, type, val)) != 0 ) + if ( (ret = insert_val_into_array(val_array, array_len, info, feat_type, + type, val)) != 0 ) goto free_array; spin_lock(&info->ref_lock); @@ -1436,10 +1424,10 @@ int psr_set_val(struct domain *d, unsigned int socket, goto free_array; - unlock_free_array: +unlock_free_array: spin_unlock(&info->ref_lock); - free_array: +free_array: xfree(val_array); return ret; } @@ -1523,16 +1511,14 @@ static int psr_cpu_prepare(void) return 0; /* Malloc memory for the global feature node here. */ - if ( feat_l3 == NULL && - (feat_l3 = xzalloc(struct feat_node)) == NULL ) + if ( feat_l3 == NULL && (feat_l3 = xzalloc(struct feat_node)) == NULL ) return -ENOMEM; if ( feat_l2_cat == NULL && (feat_l2_cat = xzalloc(struct feat_node)) == NULL ) return -ENOMEM; - if ( feat_mba == NULL && - (feat_mba = xzalloc(struct feat_node)) == NULL ) + if ( feat_mba == NULL && (feat_mba = xzalloc(struct feat_node)) == NULL ) return -ENOMEM; return 0; @@ -1611,7 +1597,7 @@ static void psr_cpu_init(void) info->feat_init = true; - assoc_init: +assoc_init: psr_assoc_init(); } @@ -1630,13 +1616,13 @@ static void psr_cpu_fini(unsigned int cpu) free_socket_resources(socket); } -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { int rc = 0; unsigned int cpu = (unsigned long)hcpu; - switch ( action ) + switch (action) { case CPU_UP_PREPARE: rc = psr_cpu_prepare(); @@ -1660,8 +1646,7 @@ static struct notifier_block cpu_nfb = { * (E.g. our CPU_DEAD notification should be called ahead of * cpu_smpboot_free). */ - .priority = -1 -}; + .priority = -1}; static int __init psr_presmp_init(void) { diff --git a/xen/arch/x86/pv/callback.c b/xen/arch/x86/pv/callback.c index acfd1c70d8..e04362d321 100644 --- a/xen/arch/x86/pv/callback.c +++ b/xen/arch/x86/pv/callback.c @@ -40,10 +40,9 @@ static int register_guest_nmi_callback(unsigned long address) if ( !is_canonical_address(address) ) return -EINVAL; - t->vector = TRAP_nmi; - t->flags = 0; - t->cs = (is_pv_32bit_domain(d) ? - FLAT_COMPAT_KERNEL_CS : FLAT_KERNEL_CS); + t->vector = TRAP_nmi; + t->flags = 0; + t->cs = (is_pv_32bit_domain(d) ? FLAT_COMPAT_KERNEL_CS : FLAT_KERNEL_CS); t->address = address; TI_SET_IF(t, 1); @@ -73,7 +72,7 @@ static long register_guest_callback(struct callback_register *reg) if ( !is_canonical_address(reg->address) ) return -EINVAL; - switch ( reg->type ) + switch (reg->type) { case CALLBACKTYPE_event: curr->arch.pv.event_callback_eip = reg->address; @@ -123,7 +122,7 @@ static long unregister_guest_callback(struct callback_unregister *unreg) { long ret; - switch ( unreg->type ) + switch (unreg->type) { case CALLBACKTYPE_event: case CALLBACKTYPE_failsafe: @@ -150,7 +149,7 @@ long do_callback_op(int cmd, XEN_GUEST_HANDLE_PARAM(const_void) arg) { long ret; - switch ( cmd ) + switch (cmd) { case CALLBACKOP_register: { @@ -215,7 +214,7 @@ static long compat_register_guest_callback(struct compat_callback_register *reg) fixup_guest_code_selector(curr->domain, reg->address.cs); - switch ( reg->type ) + switch (reg->type) { case CALLBACKTYPE_event: curr->arch.pv.event_callback_cs = reg->address.cs; @@ -257,12 +256,12 @@ static long compat_register_guest_callback(struct compat_callback_register *reg) return ret; } -static long compat_unregister_guest_callback( - struct compat_callback_unregister *unreg) +static long +compat_unregister_guest_callback(struct compat_callback_unregister *unreg) { long ret; - switch ( unreg->type ) + switch (unreg->type) { case CALLBACKTYPE_event: case CALLBACKTYPE_failsafe: @@ -288,7 +287,7 @@ long compat_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg) { long ret; - switch ( cmd ) + switch (cmd) { case CALLBACKOP_register: { @@ -329,18 +328,10 @@ long compat_set_callbacks(unsigned long event_selector, { struct compat_callback_register event = { .type = CALLBACKTYPE_event, - .address = { - .cs = event_selector, - .eip = event_address - } - }; + .address = {.cs = event_selector, .eip = event_address}}; struct compat_callback_register failsafe = { .type = CALLBACKTYPE_failsafe, - .address = { - .cs = failsafe_selector, - .eip = failsafe_address - } - }; + .address = {.cs = failsafe_selector, .eip = failsafe_address}}; compat_register_guest_callback(&event); compat_register_guest_callback(&failsafe); @@ -362,7 +353,7 @@ long do_set_trap_table(XEN_GUEST_HANDLE_PARAM(const_trap_info_t) traps) return 0; } - for ( ; ; ) + for ( ;; ) { if ( copy_from_guest(&cur, traps, 1) ) { @@ -384,8 +375,8 @@ long do_set_trap_table(XEN_GUEST_HANDLE_PARAM(const_trap_info_t) traps) if ( hypercall_preempt_check() ) { - rc = hypercall_create_continuation( - __HYPERVISOR_set_trap_table, "h", traps); + rc = hypercall_create_continuation(__HYPERVISOR_set_trap_table, "h", + traps); break; } } @@ -407,7 +398,7 @@ int compat_set_trap_table(XEN_GUEST_HANDLE(trap_info_compat_t) traps) return 0; } - for ( ; ; ) + for ( ;; ) { if ( copy_from_guest(&cur, traps, 1) ) { @@ -426,8 +417,8 @@ int compat_set_trap_table(XEN_GUEST_HANDLE(trap_info_compat_t) traps) if ( hypercall_preempt_check() ) { - rc = hypercall_create_continuation( - __HYPERVISOR_set_trap_table, "h", traps); + rc = hypercall_create_continuation(__HYPERVISOR_set_trap_table, "h", + traps); break; } } @@ -440,7 +431,7 @@ long do_nmi_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) struct xennmi_callback cb; long rc = 0; - switch ( cmd ) + switch (cmd) { case XENNMI_register_callback: rc = -EFAULT; @@ -465,7 +456,7 @@ int compat_nmi_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) struct compat_nmi_callback cb; int rc = 0; - switch ( cmd ) + switch (cmd) { case XENNMI_register_callback: rc = -EFAULT; diff --git a/xen/arch/x86/pv/descriptor-tables.c b/xen/arch/x86/pv/descriptor-tables.c index 940804b18a..bd493c3589 100644 --- a/xen/arch/x86/pv/descriptor-tables.c +++ b/xen/arch/x86/pv/descriptor-tables.c @@ -66,7 +66,7 @@ bool pv_destroy_ldt(struct vcpu *v) ASSERT(v->arch.pv.shadow_ldt_mapcnt == mappings_dropped); v->arch.pv.shadow_ldt_mapcnt = 0; - out: +out: spin_unlock(&v->arch.pv.shadow_ldt_lock); #endif @@ -137,7 +137,7 @@ long pv_set_gdt(struct vcpu *v, unsigned long *frames, unsigned int entries) return 0; - fail: +fail: while ( i-- > 0 ) { put_page_and_type(mfn_to_page(_mfn(frames[i]))); @@ -226,7 +226,7 @@ long do_update_descriptor(uint64_t gaddr, seg_desc_t d) mfn = page_to_mfn(page); /* Check if the given frame is in use in an unsafe context. */ - switch ( page->u.inuse.type_info & PGT_type_mask ) + switch (page->u.inuse.type_info & PGT_type_mask) { case PGT_seg_desc_page: if ( unlikely(!get_page_type(page, PGT_seg_desc_page)) ) @@ -249,14 +249,14 @@ long do_update_descriptor(uint64_t gaddr, seg_desc_t d) ret = 0; /* success */ - out: +out: put_page(page); return ret; } -int compat_update_descriptor(uint32_t pa_lo, uint32_t pa_hi, - uint32_t desc_lo, uint32_t desc_hi) +int compat_update_descriptor(uint32_t pa_lo, uint32_t pa_hi, uint32_t desc_lo, + uint32_t desc_hi) { seg_desc_t d; diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c index 837ef7bca1..0d8b6022e6 100644 --- a/xen/arch/x86/pv/dom0_build.c +++ b/xen/arch/x86/pv/dom0_build.c @@ -23,13 +23,13 @@ #include /* Allow ring-3 access in long mode as guest cannot use ring 1 ... */ -#define BASE_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER) -#define L1_PROT (BASE_PROT|_PAGE_GUEST_KERNEL) +#define BASE_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_USER) +#define L1_PROT (BASE_PROT | _PAGE_GUEST_KERNEL) /* ... except for compatibility mode guests. */ -#define COMPAT_L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED) -#define L2_PROT (BASE_PROT|_PAGE_DIRTY) -#define L3_PROT (BASE_PROT|_PAGE_DIRTY) -#define L4_PROT (BASE_PROT|_PAGE_DIRTY) +#define COMPAT_L1_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED) +#define L2_PROT (BASE_PROT | _PAGE_DIRTY) +#define L3_PROT (BASE_PROT | _PAGE_DIRTY) +#define L4_PROT (BASE_PROT | _PAGE_DIRTY) void __init dom0_update_physmap(struct domain *d, unsigned long pfn, unsigned long mfn, unsigned long vphysmap_s) @@ -67,15 +67,14 @@ static __init void mark_pv_pt_pages_rdonly(struct domain *d, page = mfn_to_page(l1e_get_mfn(*pl1e)); /* Read-only mapping + PGC_allocated + page-table page. */ - page->count_info = PGC_allocated | 3; + page->count_info = PGC_allocated | 3; page->u.inuse.type_info |= PGT_validated | 1; /* Top-level p.t. is pinned. */ if ( (page->u.inuse.type_info & PGT_type_mask) == - (!is_pv_32bit_domain(d) ? - PGT_l4_page_table : PGT_l3_page_table) ) + (!is_pv_32bit_domain(d) ? PGT_l4_page_table : PGT_l3_page_table) ) { - page->count_info += 1; + page->count_info += 1; page->u.inuse.type_info += 1 | PGT_pinned; } @@ -110,8 +109,10 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn, while ( vphysmap_start < vphysmap_end ) { - if ( d->tot_pages + ((round_pgup(vphysmap_end) - vphysmap_start) - >> PAGE_SHIFT) + 3 > nr_pages ) + if ( d->tot_pages + + ((round_pgup(vphysmap_end) - vphysmap_start) >> PAGE_SHIFT) + + 3 > + nr_pages ) panic("Dom0 allocation too small for initial P->M table\n"); if ( pl1e ) @@ -142,7 +143,8 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn, pl3e = __map_domain_page(page); clear_page(pl3e); *pl4e = l4e_from_page(page, L4_PROT); - } else + } + else pl3e = map_l3t_from_l4e(*pl4e); pl3e += l3_table_offset(vphysmap_start); @@ -155,14 +157,14 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn, * logdirty mode on dom0. */ if ( (!IS_ENABLED(CONFIG_SHADOW_PAGING) || - !d->arch.pv.check_l1tf) && cpu_has_page1gb && + !d->arch.pv.check_l1tf) && + cpu_has_page1gb && !(vphysmap_start & ((1UL << L3_PAGETABLE_SHIFT) - 1)) && vphysmap_end >= vphysmap_start + (1UL << L3_PAGETABLE_SHIFT) && - (page = alloc_domheap_pages(d, - L3_PAGETABLE_SHIFT - PAGE_SHIFT, + (page = alloc_domheap_pages(d, L3_PAGETABLE_SHIFT - PAGE_SHIFT, MEMF_no_scrub)) != NULL ) { - *pl3e = l3e_from_page(page, L1_PROT|_PAGE_DIRTY|_PAGE_PSE); + *pl3e = l3e_from_page(page, L1_PROT | _PAGE_DIRTY | _PAGE_PSE); vphysmap_start += 1UL << L3_PAGETABLE_SHIFT; continue; } @@ -184,11 +186,10 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn, { if ( !(vphysmap_start & ((1UL << L2_PAGETABLE_SHIFT) - 1)) && vphysmap_end >= vphysmap_start + (1UL << L2_PAGETABLE_SHIFT) && - (page = alloc_domheap_pages(d, - L2_PAGETABLE_SHIFT - PAGE_SHIFT, + (page = alloc_domheap_pages(d, L2_PAGETABLE_SHIFT - PAGE_SHIFT, MEMF_no_scrub)) != NULL ) { - *pl2e = l2e_from_page(page, L1_PROT|_PAGE_DIRTY|_PAGE_PSE); + *pl2e = l2e_from_page(page, L1_PROT | _PAGE_DIRTY | _PAGE_PSE); vphysmap_start += 1UL << L2_PAGETABLE_SHIFT; continue; } @@ -211,7 +212,7 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn, if ( !page ) break; - *pl1e = l1e_from_page(page, L1_PROT|_PAGE_DIRTY); + *pl1e = l1e_from_page(page, L1_PROT | _PAGE_DIRTY); vphysmap_start += PAGE_SIZE; vphysmap_start &= PAGE_MASK; } @@ -228,8 +229,8 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn, unmap_domain_page(l4start); } -static struct page_info * __init alloc_chunk(struct domain *d, - unsigned long max_pages) +static struct page_info *__init alloc_chunk(struct domain *d, + unsigned long max_pages) { static unsigned int __initdata last_order = MAX_ORDER; struct page_info *page; @@ -239,8 +240,8 @@ static struct page_info * __init alloc_chunk(struct domain *d, order = last_order; else if ( max_pages & (max_pages - 1) ) --order; - while ( (page = alloc_domheap_pages(d, order, dom0_memflags | - MEMF_no_scrub)) == NULL ) + while ( (page = alloc_domheap_pages( + d, order, dom0_memflags | MEMF_no_scrub)) == NULL ) if ( order-- == 0 ) break; if ( page ) @@ -279,10 +280,8 @@ static struct page_info * __init alloc_chunk(struct domain *d, return page; } -int __init dom0_construct_pv(struct domain *d, - const module_t *image, - unsigned long image_headroom, - module_t *initrd, +int __init dom0_construct_pv(struct domain *d, const module_t *image, + unsigned long image_headroom, module_t *initrd, char *cmdline) { int i, cpu, rc, compatible, compat32, order, machine; @@ -354,7 +353,7 @@ int __init dom0_construct_pv(struct domain *d, /* compatibility check */ compatible = 0; - compat32 = 0; + compat32 = 0; machine = elf_uval(&elf, elf.ehdr, e_machine); printk(" Xen kernel: 64-bit, lsb, compat32\n"); if ( elf_32bit(&elf) && parms.pae == XEN_PAE_BIMODAL ) @@ -364,13 +363,11 @@ int __init dom0_construct_pv(struct domain *d, compat32 = 1; compatible = 1; } - if (elf_64bit(&elf) && machine == EM_X86_64) + if ( elf_64bit(&elf) && machine == EM_X86_64 ) compatible = 1; printk(" Dom0 kernel: %s%s, %s, paddr %#" PRIx64 " -> %#" PRIx64 "\n", - elf_64bit(&elf) ? "64-bit" : "32-bit", - parms.pae ? ", PAE" : "", - elf_msb(&elf) ? "msb" : "lsb", - elf.pstart, elf.pend); + elf_64bit(&elf) ? "64-bit" : "32-bit", parms.pae ? ", PAE" : "", + elf_msb(&elf) ? "msb" : "lsb", elf.pstart, elf.pend); if ( elf.bsd_symtab_pstart ) printk(" Dom0 symbol map %#" PRIx64 " -> %#" PRIx64 "\n", elf.bsd_symtab_pstart, elf.bsd_symtab_pend); @@ -405,7 +402,7 @@ int __init dom0_construct_pv(struct domain *d, nr_pages = dom0_compute_nr_pages(d, &parms, initrd_len); if ( parms.pae == XEN_PAE_EXTCR3 ) - set_bit(VMASST_TYPE_pae_extended_cr3, &d->vm_assist); + set_bit(VMASST_TYPE_pae_extended_cr3, &d->vm_assist); if ( !pv_shim && (parms.virt_hv_start_low != UNSET_ADDR) && elf_32bit(&elf) ) @@ -434,59 +431,60 @@ int __init dom0_construct_pv(struct domain *d, * read-only). We have a pair of simultaneous equations in two unknowns, * which we solve by exhaustive search. */ - v_start = parms.virt_base; - vkern_start = parms.virt_kstart; - vkern_end = parms.virt_kend; + v_start = parms.virt_base; + vkern_start = parms.virt_kstart; + vkern_end = parms.virt_kend; if ( parms.unmapped_initrd ) { - vinitrd_start = vinitrd_end = 0; + vinitrd_start = vinitrd_end = 0; vphysmap_start = round_pgup(vkern_end); } else { - vinitrd_start = round_pgup(vkern_end); - vinitrd_end = vinitrd_start + initrd_len; + vinitrd_start = round_pgup(vkern_end); + vinitrd_end = vinitrd_start + initrd_len; vphysmap_start = round_pgup(vinitrd_end); } - vphysmap_end = vphysmap_start + (nr_pages * (!is_pv_32bit_domain(d) ? - sizeof(unsigned long) : - sizeof(unsigned int))); + vphysmap_end = vphysmap_start + + (nr_pages * (!is_pv_32bit_domain(d) ? sizeof(unsigned long) + : sizeof(unsigned int))); if ( parms.p2m_base != UNSET_ADDR ) vphysmap_end = vphysmap_start; vstartinfo_start = round_pgup(vphysmap_end); - vstartinfo_end = vstartinfo_start + sizeof(struct start_info); + vstartinfo_end = vstartinfo_start + sizeof(struct start_info); if ( pv_shim ) { - vxenstore_start = round_pgup(vstartinfo_end); - vxenstore_end = vxenstore_start + PAGE_SIZE; - vconsole_start = vxenstore_end; - vconsole_end = vconsole_start + PAGE_SIZE; - vpt_start = vconsole_end; + vxenstore_start = round_pgup(vstartinfo_end); + vxenstore_end = vxenstore_start + PAGE_SIZE; + vconsole_start = vxenstore_end; + vconsole_end = vconsole_start + PAGE_SIZE; + vpt_start = vconsole_end; } else { - vpt_start = round_pgup(vstartinfo_end); - vstartinfo_end += sizeof(struct dom0_vga_console_info); + vpt_start = round_pgup(vstartinfo_end); + vstartinfo_end += sizeof(struct dom0_vga_console_info); } - for ( nr_pt_pages = 2; ; nr_pt_pages++ ) + for ( nr_pt_pages = 2;; nr_pt_pages++ ) { - vpt_end = vpt_start + (nr_pt_pages * PAGE_SIZE); - vstack_start = vpt_end; - vstack_end = vstack_start + PAGE_SIZE; - v_end = (vstack_end + (1UL<<22)-1) & ~((1UL<<22)-1); + vpt_end = vpt_start + (nr_pt_pages * PAGE_SIZE); + vstack_start = vpt_end; + vstack_end = vstack_start + PAGE_SIZE; + v_end = (vstack_end + (1UL << 22) - 1) & ~((1UL << 22) - 1); if ( (v_end - vstack_end) < (512UL << 10) ) v_end += 1UL << 22; /* Add extra 4MB to get >= 512kB padding. */ -#define NR(_l,_h,_s) \ - (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \ - ((_l) & ~((1UL<<(_s))-1))) >> (_s)) - if ( (!is_pv_32bit_domain(d) + /* # L4 */ +#define NR(_l, _h, _s) \ + (((((_h) + ((1UL << (_s)) - 1)) & ~((1UL << (_s)) - 1)) - \ + ((_l) & ~((1UL << (_s)) - 1))) >> \ + (_s)) + if ( (!is_pv_32bit_domain(d) + /* # L4 */ NR(v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */ - (!is_pv_32bit_domain(d) ? - NR(v_start, v_end, L3_PAGETABLE_SHIFT) : /* # L2 */ - 4) + /* # compat L2 */ - NR(v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */ + (!is_pv_32bit_domain(d) ? NR(v_start, v_end, L3_PAGETABLE_SHIFT) + : /* # L2 */ + 4) + /* # compat L2 */ + NR(v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */ <= nr_pt_pages ) break; } @@ -501,7 +499,7 @@ int __init dom0_construct_pv(struct domain *d, if ( parms.p2m_base != UNSET_ADDR ) { vphysmap_start = parms.p2m_base; - vphysmap_end = vphysmap_start + nr_pages * sizeof(unsigned long); + vphysmap_end = vphysmap_start + nr_pages * sizeof(unsigned long); } page = alloc_domheap_pages(d, order, MEMF_no_scrub); if ( page == NULL ) @@ -511,9 +509,8 @@ int __init dom0_construct_pv(struct domain *d, if ( initrd_len ) { - initrd_pfn = vinitrd_start ? - (vinitrd_start - v_start) >> PAGE_SHIFT : - d->tot_pages; + initrd_pfn = vinitrd_start ? (vinitrd_start - v_start) >> PAGE_SHIFT + : d->tot_pages; initrd_mfn = mfn = initrd->mod_start; count = PFN_UP(initrd_len); if ( d->arch.physaddr_bitsize && @@ -532,8 +529,7 @@ int __init dom0_construct_pv(struct domain *d, memcpy(page_to_virt(page), mfn_to_virt(initrd->mod_start), initrd_len); mpt_alloc = (paddr_t)initrd->mod_start << PAGE_SHIFT; - init_domheap_pages(mpt_alloc, - mpt_alloc + PAGE_ALIGN(initrd_len)); + init_domheap_pages(mpt_alloc, mpt_alloc + PAGE_ALIGN(initrd_len)); initrd->mod_start = initrd_mfn = mfn_x(page_to_mfn(page)); } else @@ -546,16 +542,15 @@ int __init dom0_construct_pv(struct domain *d, } printk("PHYSICAL MEMORY ARRANGEMENT:\n" - " Dom0 alloc.: %"PRIpaddr"->%"PRIpaddr, + " Dom0 alloc.: %" PRIpaddr "->%" PRIpaddr, pfn_to_paddr(alloc_spfn), pfn_to_paddr(alloc_epfn)); if ( d->tot_pages < nr_pages ) - printk(" (%lu pages to be allocated)", - nr_pages - d->tot_pages); + printk(" (%lu pages to be allocated)", nr_pages - d->tot_pages); if ( initrd ) { mpt_alloc = (paddr_t)initrd->mod_start << PAGE_SHIFT; - printk("\n Init. ramdisk: %"PRIpaddr"->%"PRIpaddr, - mpt_alloc, mpt_alloc + initrd_len); + printk("\n Init. ramdisk: %" PRIpaddr "->%" PRIpaddr, mpt_alloc, + mpt_alloc + initrd_len); } printk("\nVIRTUAL MEMORY ARRANGEMENT:\n" " Loaded kernel: %p->%p\n" @@ -567,15 +562,11 @@ int __init dom0_construct_pv(struct domain *d, " Page tables: %p->%p\n" " Boot stack: %p->%p\n" " TOTAL: %p->%p\n", - _p(vkern_start), _p(vkern_end), - _p(vinitrd_start), _p(vinitrd_end), - _p(vphysmap_start), _p(vphysmap_end), - _p(vstartinfo_start), _p(vstartinfo_end), - _p(vxenstore_start), _p(vxenstore_end), - _p(vconsole_start), _p(vconsole_end), - _p(vpt_start), _p(vpt_end), - _p(vstack_start), _p(vstack_end), - _p(v_start), _p(v_end)); + _p(vkern_start), _p(vkern_end), _p(vinitrd_start), _p(vinitrd_end), + _p(vphysmap_start), _p(vphysmap_end), _p(vstartinfo_start), + _p(vstartinfo_end), _p(vxenstore_start), _p(vxenstore_end), + _p(vconsole_start), _p(vconsole_end), _p(vpt_start), _p(vpt_end), + _p(vstack_start), _p(vstack_end), _p(v_start), _p(v_end)); printk(" ENTRY ADDRESS: %p\n", _p(parms.virt_entry)); process_pending_softirqs(); @@ -585,10 +576,9 @@ int __init dom0_construct_pv(struct domain *d, mpt_alloc -= PAGE_ALIGN(initrd_len); /* Overlap with Xen protected area? */ - if ( !is_pv_32bit_domain(d) ? - ((v_start < HYPERVISOR_VIRT_END) && - (v_end > HYPERVISOR_VIRT_START)) : - (v_end > HYPERVISOR_COMPAT_VIRT_START(d)) ) + if ( !is_pv_32bit_domain(d) ? ((v_start < HYPERVISOR_VIRT_END) && + (v_end > HYPERVISOR_VIRT_START)) + : (v_end > HYPERVISOR_COMPAT_VIRT_START(d)) ) { printk("DOM0 image overlaps with Xen private area.\n"); rc = -EINVAL; @@ -598,57 +588,61 @@ int __init dom0_construct_pv(struct domain *d, if ( is_pv_32bit_domain(d) ) { v->arch.pv.failsafe_callback_cs = FLAT_COMPAT_KERNEL_CS; - v->arch.pv.event_callback_cs = FLAT_COMPAT_KERNEL_CS; + v->arch.pv.event_callback_cs = FLAT_COMPAT_KERNEL_CS; } /* WARNING: The new domain must have its 'processor' field filled in! */ if ( !is_pv_32bit_domain(d) ) { maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table; - l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE; + l4start = l4tab = __va(mpt_alloc); + mpt_alloc += PAGE_SIZE; } else { page = alloc_domheap_page(d, MEMF_no_owner | MEMF_no_scrub); if ( !page ) panic("Not enough RAM for domain 0 PML4\n"); - page->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1; + page->u.inuse.type_info = PGT_l4_page_table | PGT_validated | 1; l4start = l4tab = page_to_virt(page); maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l3_page_table; - l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE; + l3start = __va(mpt_alloc); + mpt_alloc += PAGE_SIZE; } clear_page(l4tab); - init_xen_l4_slots(l4tab, _mfn(virt_to_mfn(l4start)), - d, INVALID_MFN, true); + init_xen_l4_slots(l4tab, _mfn(virt_to_mfn(l4start)), d, INVALID_MFN, true); v->arch.guest_table = pagetable_from_paddr(__pa(l4start)); if ( is_pv_32bit_domain(d) ) v->arch.guest_table_user = v->arch.guest_table; l4tab += l4_table_offset(v_start); pfn = alloc_spfn; - for ( count = 0; count < ((v_end-v_start) >> PAGE_SHIFT); count++ ) + for ( count = 0; count < ((v_end - v_start) >> PAGE_SHIFT); count++ ) { - if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) ) + if ( !((unsigned long)l1tab & (PAGE_SIZE - 1)) ) { maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l1_page_table; - l1start = l1tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE; + l1start = l1tab = __va(mpt_alloc); + mpt_alloc += PAGE_SIZE; clear_page(l1tab); if ( count == 0 ) l1tab += l1_table_offset(v_start); - if ( !((unsigned long)l2tab & (PAGE_SIZE-1)) ) + if ( !((unsigned long)l2tab & (PAGE_SIZE - 1)) ) { maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table; - l2start = l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE; + l2start = l2tab = __va(mpt_alloc); + mpt_alloc += PAGE_SIZE; clear_page(l2tab); if ( count == 0 ) l2tab += l2_table_offset(v_start); - if ( !((unsigned long)l3tab & (PAGE_SIZE-1)) ) + if ( !((unsigned long)l3tab & (PAGE_SIZE - 1)) ) { if ( count || !l3start ) { maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l3_page_table; - l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE; + l3start = __va(mpt_alloc); + mpt_alloc += PAGE_SIZE; } l3tab = l3start; clear_page(l3tab); @@ -667,8 +661,8 @@ int __init dom0_construct_pv(struct domain *d, mfn = pfn++; else mfn = initrd_mfn++; - *l1tab = l1e_from_pfn(mfn, (!is_pv_32bit_domain(d) ? - L1_PROT : COMPAT_L1_PROT)); + *l1tab = l1e_from_pfn( + mfn, (!is_pv_32bit_domain(d) ? L1_PROT : COMPAT_L1_PROT)); l1tab++; page = mfn_to_page(_mfn(mfn)); @@ -685,7 +679,8 @@ int __init dom0_construct_pv(struct domain *d, if ( !l3e_get_intpte(*l3tab) ) { maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table; - l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE; + l2tab = __va(mpt_alloc); + mpt_alloc += PAGE_SIZE; clear_page(l2tab); *l3tab = l3e_from_paddr(__pa(l2tab), L3_PROT); } @@ -728,7 +723,7 @@ int __init dom0_construct_pv(struct domain *d, mapcache_override_current(v); /* Copy the OS image and free temporary buffer. */ - elf.dest_base = (void*)vkern_start; + elf.dest_base = (void *)vkern_start; elf.dest_size = vkern_end - vkern_start; elf_set_vcpu(&elf, v); rc = elf_load_binary(&elf); @@ -750,8 +745,8 @@ int __init dom0_construct_pv(struct domain *d, rc = -1; goto out; } - hypercall_page_initialise( - d, (void *)(unsigned long)parms.virt_hypercall); + hypercall_page_initialise(d, + (void *)(unsigned long)parms.virt_hypercall); } /* Free temporary buffers. */ @@ -765,13 +760,13 @@ int __init dom0_construct_pv(struct domain *d, si->shared_info = virt_to_maddr(d->shared_info); if ( !pv_shim ) - si->flags = SIF_PRIVILEGED | SIF_INITDOMAIN; + si->flags = SIF_PRIVILEGED | SIF_INITDOMAIN; if ( !vinitrd_start && initrd_len ) - si->flags |= SIF_MOD_START_PFN; - si->flags |= (xen_processor_pmbits << 8) & SIF_PM_MASK; - si->pt_base = vpt_start; + si->flags |= SIF_MOD_START_PFN; + si->flags |= (xen_processor_pmbits << 8) & SIF_PM_MASK; + si->pt_base = vpt_start; si->nr_pt_frames = nr_pt_pages; - si->mfn_list = vphysmap_start; + si->mfn_list = vphysmap_start; snprintf(si->magic, sizeof(si->magic), "xen-3.0-x86_%d%s", elf_64bit(&elf) ? 64 : 32, parms.pae ? "p" : ""); @@ -807,7 +802,7 @@ int __init dom0_construct_pv(struct domain *d, } si->first_p2m_pfn = pfn; si->nr_p2m_frames = d->tot_pages - count; - page_list_for_each ( page, &d->page_list ) + page_list_for_each (page, &d->page_list) { mfn = mfn_x(page_to_mfn(page)); BUG_ON(SHARED_M2P(get_gpfn_from_mfn(mfn))); @@ -840,7 +835,8 @@ int __init dom0_construct_pv(struct domain *d, #endif dom0_update_physmap(d, pfn, mfn, vphysmap_start); #undef pfn - page++; pfn++; + page++; + pfn++; if ( !(pfn & 0xfffff) ) process_pending_softirqs(); } @@ -849,7 +845,7 @@ int __init dom0_construct_pv(struct domain *d, if ( initrd_len != 0 ) { si->mod_start = vinitrd_start ?: initrd_pfn; - si->mod_len = initrd_len; + si->mod_len = initrd_len; } memset(si->cmd_line, 0, sizeof(si->cmd_line)); @@ -859,7 +855,7 @@ int __init dom0_construct_pv(struct domain *d, #ifdef CONFIG_VIDEO if ( !pv_shim && fill_console_start_info((void *)(si + 1)) ) { - si->console.dom0.info_off = sizeof(struct start_info); + si->console.dom0.info_off = sizeof(struct start_info); si->console.dom0.info_size = sizeof(struct dom0_vga_console_info); } #endif @@ -893,10 +889,10 @@ int __init dom0_construct_pv(struct domain *d, regs = &v->arch.user_regs; regs->ds = regs->es = regs->fs = regs->gs = !is_pv_32bit_domain(d) ? FLAT_KERNEL_DS : FLAT_COMPAT_KERNEL_DS; - regs->ss = (!is_pv_32bit_domain(d) ? - FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS); - regs->cs = (!is_pv_32bit_domain(d) ? - FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS); + regs->ss = + (!is_pv_32bit_domain(d) ? FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS); + regs->cs = + (!is_pv_32bit_domain(d) ? FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS); regs->rip = parms.virt_entry; regs->rsp = vstack_end; regs->rsi = vstartinfo_start; @@ -929,8 +925,7 @@ int __init dom0_construct_pv(struct domain *d, out: if ( elf_check_broken(&elf) ) - printk(" Xen dom0 kernel broken ELF: %s\n", - elf_check_broken(&elf)); + printk(" Xen dom0 kernel broken ELF: %s\n", elf_check_broken(&elf)); return rc; } diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c index 1060a7ac45..7428a2d8a3 100644 --- a/xen/arch/x86/pv/domain.c +++ b/xen/arch/x86/pv/domain.c @@ -26,7 +26,7 @@ static int parse_pcid(const char *s) { int rc = 0; - switch ( parse_bool(s, NULL) ) + switch (parse_bool(s, NULL)) { case 0: opt_pcid = PCID_OFF; @@ -37,7 +37,7 @@ static int parse_pcid(const char *s) break; default: - switch ( parse_boolean("xpti", s, NULL) ) + switch (parse_boolean("xpti", s, NULL)) { case 0: opt_pcid = PCID_NOXPTI; @@ -102,10 +102,9 @@ unsigned long pv_fixup_guest_cr4(const struct vcpu *v, unsigned long cr4) const struct cpuid_policy *p = v->domain->arch.cpuid; /* Discard attempts to set guest controllable bits outside of the policy. */ - cr4 &= ~((p->basic.tsc ? 0 : X86_CR4_TSD) | - (p->basic.de ? 0 : X86_CR4_DE) | + cr4 &= ~((p->basic.tsc ? 0 : X86_CR4_TSD) | (p->basic.de ? 0 : X86_CR4_DE) | (p->feat.fsgsbase ? 0 : X86_CR4_FSGSBASE) | - (p->basic.xsave ? 0 : X86_CR4_OSXSAVE)); + (p->basic.xsave ? 0 : X86_CR4_OSXSAVE)); /* Masks expected to be disjoint sets. */ BUILD_BUG_ON(PV_CR4_GUEST_MASK & PV_CR4_GUEST_VISIBLE_MASK); @@ -121,8 +120,8 @@ unsigned long pv_fixup_guest_cr4(const struct vcpu *v, unsigned long cr4) unsigned long pv_make_cr4(const struct vcpu *v) { const struct domain *d = v->domain; - unsigned long cr4 = mmu_cr4_features & - ~(X86_CR4_PCIDE | X86_CR4_PGE | X86_CR4_TSD); + unsigned long cr4 = + mmu_cr4_features & ~(X86_CR4_PCIDE | X86_CR4_PGE | X86_CR4_TSD); /* * PCIDE or PGE depends on the PCID/XPTI settings, but must not both be @@ -165,10 +164,9 @@ int switch_compat(struct domain *d) d->arch.has_32bit_shinfo = 1; d->arch.is_32bit_pv = 1; - for_each_vcpu( d, v ) + for_each_vcpu (d, v) { - if ( (rc = setup_compat_arg_xlat(v)) || - (rc = setup_compat_l4(v)) ) + if ( (rc = setup_compat_arg_xlat(v)) || (rc = setup_compat_l4(v)) ) goto undo_and_fail; } @@ -182,9 +180,9 @@ int switch_compat(struct domain *d) return 0; - undo_and_fail: +undo_and_fail: d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0; - for_each_vcpu( d, v ) + for_each_vcpu (d, v) { free_compat_arg_xlat(v); release_compat_l4(v); @@ -197,8 +195,7 @@ static int pv_create_gdt_ldt_l1tab(struct vcpu *v) { return create_perdomain_mapping(v->domain, GDT_VIRT_START(v), 1U << GDT_LDT_VCPU_SHIFT, - v->domain->arch.pv.gdt_ldt_l1tab, - NULL); + v->domain->arch.pv.gdt_ldt_l1tab, NULL); } static void pv_destroy_gdt_ldt_l1tab(struct vcpu *v) @@ -234,8 +231,7 @@ int pv_vcpu_initialise(struct vcpu *v) if ( rc ) return rc; - BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv.trap_ctxt) > - PAGE_SIZE); + BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv.trap_ctxt) > PAGE_SIZE); v->arch.pv.trap_ctxt = xzalloc_array(struct trap_info, NR_VECTORS); if ( !v->arch.pv.trap_ctxt ) { @@ -257,7 +253,7 @@ int pv_vcpu_initialise(struct vcpu *v) goto done; } - done: +done: if ( rc ) pv_vcpu_destroy(v); return rc; @@ -275,12 +271,11 @@ void pv_domain_destroy(struct domain *d) FREE_XENHEAP_PAGE(d->arch.pv.gdt_ldt_l1tab); } - int pv_domain_initialise(struct domain *d) { static const struct arch_csw pv_csw = { .from = paravirt_ctxt_switch_from, - .to = paravirt_ctxt_switch_to, + .to = paravirt_ctxt_switch_to, .tail = continue_nonidle_domain, }; int rc = -ENOMEM; @@ -297,9 +292,8 @@ int pv_domain_initialise(struct domain *d) (d->arch.pv.cpuidmasks = xmemdup(&cpuidmask_defaults)) == NULL ) goto fail; - rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START, - GDT_LDT_MBYTES << (20 - PAGE_SHIFT), - NULL, NULL); + rc = create_perdomain_mapping( + d, GDT_LDT_VIRT_START, GDT_LDT_MBYTES << (20 - PAGE_SHIFT), NULL, NULL); if ( rc ) goto fail; @@ -311,7 +305,7 @@ int pv_domain_initialise(struct domain *d) d->arch.pv.xpti = is_hardware_domain(d) ? opt_xpti_hwdom : opt_xpti_domu; if ( !is_pv_32bit_domain(d) && use_invpcid && cpu_has_pcid ) - switch ( ACCESS_ONCE(opt_pcid) ) + switch (ACCESS_ONCE(opt_pcid)) { case PCID_OFF: break; @@ -335,7 +329,7 @@ int pv_domain_initialise(struct domain *d) return 0; - fail: +fail: pv_domain_destroy(d); return rc; @@ -399,7 +393,7 @@ void toggle_guest_mode(struct vcpu *v) else v->arch.pv.gs_base_user = __rdgsbase(); } - asm volatile ( "swapgs" ); + asm volatile("swapgs"); _toggle_guest_pt(v); } diff --git a/xen/arch/x86/pv/emul-gate-op.c b/xen/arch/x86/pv/emul-gate-op.c index 6dbf3c12a0..1f43c365f7 100644 --- a/xen/arch/x86/pv/emul-gate-op.c +++ b/xen/arch/x86/pv/emul-gate-op.c @@ -41,10 +41,8 @@ #include "emulate.h" -static int read_gate_descriptor(unsigned int gate_sel, - const struct vcpu *v, - unsigned int *sel, - unsigned long *off, +static int read_gate_descriptor(unsigned int gate_sel, const struct vcpu *v, + unsigned int *sel, unsigned long *off, unsigned int *ar) { seg_desc_t desc; @@ -71,15 +69,14 @@ static int read_gate_descriptor(unsigned int gate_sel, { if ( (*ar & 0x1f00) != 0x0c00 || (gate_sel >= FIRST_RESERVED_GDT_BYTE - 8 && !(gate_sel & 4)) || - __get_user(desc, pdesc + 1) || - (desc.b & 0x1f00) ) + __get_user(desc, pdesc + 1) || (desc.b & 0x1f00) ) return 0; *off |= (unsigned long)desc.a << 32; return 1; } - switch ( *ar & 0x1f00 ) + switch (*ar & 0x1f00) { case 0x0400: *off &= 0xffff; @@ -100,9 +97,11 @@ static inline bool check_stack_limit(unsigned int ar, unsigned int limit, (!(ar & _SEGMENT_EC) ? (esp - 1) <= limit : (esp - decr) > limit)); } -struct gate_op_ctxt { +struct gate_op_ctxt +{ struct x86_emulate_ctxt ctxt; - struct { + struct + { unsigned long base, limit; } cs; bool insn_fetch; @@ -116,7 +115,7 @@ static int read_mem(enum x86_segment seg, unsigned long offset, void *p_data, unsigned int rc = bytes, sel = 0; unsigned long addr = offset, limit = 0; - switch ( seg ) + switch (seg) { case x86_seg_cs: addr += goc->cs.base; @@ -146,8 +145,7 @@ static int read_mem(enum x86_segment seg, unsigned long offset, void *p_data, ASSERT(!goc->insn_fetch); if ( !pv_emul_read_descriptor(sel, current, &addr, &limit, &ar, 0) || - !(ar & _SEGMENT_S) || - !(ar & _SEGMENT_P) || + !(ar & _SEGMENT_S) || !(ar & _SEGMENT_P) || ((ar & _SEGMENT_CODE) && !(ar & _SEGMENT_WR)) ) return X86EMUL_UNHANDLEABLE; addr += offset; @@ -179,7 +177,7 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs) { struct vcpu *v = current; unsigned int sel, ar, dpl, nparm, insn_len; - struct gate_op_ctxt ctxt = { .ctxt.regs = regs, .insn_fetch = true }; + struct gate_op_ctxt ctxt = {.ctxt.regs = regs, .insn_fetch = true}; struct x86_emulate_state *state; unsigned long off, base, limit; uint16_t opnd_sel = 0; @@ -207,9 +205,7 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs) */ if ( !pv_emul_read_descriptor(regs->cs, v, &ctxt.cs.base, &ctxt.cs.limit, &ar, 0) || - !(ar & _SEGMENT_S) || - !(ar & _SEGMENT_P) || - !(ar & _SEGMENT_CODE) ) + !(ar & _SEGMENT_S) || !(ar & _SEGMENT_P) || !(ar & _SEGMENT_CODE) ) { pv_inject_hw_exception(TRAP_gp_fault, regs->error_code); return; @@ -228,7 +224,7 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs) return; } - switch ( ctxt.ctxt.opcode ) + switch (ctxt.ctxt.opcode) { unsigned int modrm_345; @@ -242,7 +238,7 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs) case 0xff: if ( x86_insn_modrm(state, NULL, &modrm_345) >= 3 ) break; - switch ( modrm_345 & 7 ) + switch (modrm_345 & 7) { enum x86_segment seg; @@ -252,8 +248,8 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs) case 3: ++jump; base = x86_insn_operand_ea(state, &seg); - rc = read_mem(seg, base + (x86_insn_opsize(state) >> 3), - &opnd_sel, sizeof(opnd_sel), &ctxt.ctxt); + rc = read_mem(seg, base + (x86_insn_opsize(state) >> 3), &opnd_sel, + sizeof(opnd_sel), &ctxt.ctxt); break; } break; @@ -268,21 +264,17 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs) return; } - if ( rc != X86EMUL_OKAY || - jump < 0 || - (opnd_sel & ~3) != regs->error_code || - dpl < (opnd_sel & 3) ) + if ( rc != X86EMUL_OKAY || jump < 0 || + (opnd_sel & ~3) != regs->error_code || dpl < (opnd_sel & 3) ) { pv_inject_hw_exception(TRAP_gp_fault, regs->error_code); return; } if ( !pv_emul_read_descriptor(sel, v, &base, &limit, &ar, 0) || - !(ar & _SEGMENT_S) || - !(ar & _SEGMENT_CODE) || - (!jump || (ar & _SEGMENT_EC) ? - ((ar >> 13) & 3) > (regs->cs & 3) : - ((ar >> 13) & 3) != (regs->cs & 3)) ) + !(ar & _SEGMENT_S) || !(ar & _SEGMENT_CODE) || + (!jump || (ar & _SEGMENT_EC) ? ((ar >> 13) & 3) > (regs->cs & 3) + : ((ar >> 13) & 3) != (regs->cs & 3)) ) { pv_inject_hw_exception(TRAP_gp_fault, sel); return; @@ -302,18 +294,18 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs) { unsigned int ss, esp, *stkp; int rc; -#define push(item) do \ - { \ - --stkp; \ - esp -= 4; \ - rc = __put_user(item, stkp); \ - if ( rc ) \ - { \ - pv_inject_page_fault(PFEC_write_access, \ - (unsigned long)(stkp + 1) - rc); \ - return; \ - } \ - } while ( 0 ) +#define push(item) \ + do { \ + --stkp; \ + esp -= 4; \ + rc = __put_user(item, stkp); \ + if ( rc ) \ + { \ + pv_inject_page_fault(PFEC_write_access, \ + (unsigned long)(stkp + 1) - rc); \ + return; \ + } \ + } while ( 0 ) if ( ((ar >> 13) & 3) < (regs->cs & 3) ) { @@ -328,10 +320,8 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs) ss = v->arch.pv.kernel_ss; if ( (ss & 3) != (sel & 3) || !pv_emul_read_descriptor(ss, v, &base, &limit, &ar, 0) || - ((ar >> 13) & 3) != (sel & 3) || - !(ar & _SEGMENT_S) || - (ar & _SEGMENT_CODE) || - !(ar & _SEGMENT_WR) ) + ((ar >> 13) & 3) != (sel & 3) || !(ar & _SEGMENT_S) || + (ar & _SEGMENT_CODE) || !(ar & _SEGMENT_WR) ) { pv_inject_hw_exception(TRAP_invalid_tss, ss & ~3); return; @@ -354,30 +344,29 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs) { const unsigned int *ustkp; - if ( !pv_emul_read_descriptor(regs->ss, v, &base, - &limit, &ar, 0) || - ((ar >> 13) & 3) != (regs->cs & 3) || - !(ar & _SEGMENT_S) || - (ar & _SEGMENT_CODE) || - !(ar & _SEGMENT_WR) || + if ( !pv_emul_read_descriptor(regs->ss, v, &base, &limit, &ar, + 0) || + ((ar >> 13) & 3) != (regs->cs & 3) || !(ar & _SEGMENT_S) || + (ar & _SEGMENT_CODE) || !(ar & _SEGMENT_WR) || !check_stack_limit(ar, limit, esp + nparm * 4, nparm * 4) ) - return pv_inject_hw_exception(TRAP_gp_fault, regs->error_code); - ustkp = (unsigned int *)(unsigned long) - ((unsigned int)base + regs->esp + nparm * 4); + return pv_inject_hw_exception(TRAP_gp_fault, + regs->error_code); + ustkp = (unsigned int *)(unsigned long)((unsigned int)base + + regs->esp + nparm * 4); if ( !compat_access_ok(ustkp - nparm, 0 + nparm * 4) ) { pv_inject_hw_exception(TRAP_gp_fault, regs->error_code); return; } - do - { + do { unsigned int parm; --ustkp; rc = __get_user(parm, ustkp); if ( rc ) { - pv_inject_page_fault(0, (unsigned long)(ustkp + 1) - rc); + pv_inject_page_fault(0, + (unsigned long)(ustkp + 1) - rc); return; } push(parm); diff --git a/xen/arch/x86/pv/emul-inv-op.c b/xen/arch/x86/pv/emul-inv-op.c index 91d05790c2..ccf3d8543a 100644 --- a/xen/arch/x86/pv/emul-inv-op.c +++ b/xen/arch/x86/pv/emul-inv-op.c @@ -61,7 +61,7 @@ static int emulate_forced_invalid_op(struct cpu_user_regs *regs) eip += sizeof(sig); /* We only emulate CPUID. */ - if ( ( rc = copy_from_user(instr, (char *)eip, sizeof(instr))) != 0 ) + if ( (rc = copy_from_user(instr, (char *)eip, sizeof(instr))) != 0 ) { pv_inject_page_fault(0, eip + sizeof(instr) - rc); return EXCRET_fault_fixed; diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c index 3746e2ad54..0e841b6d96 100644 --- a/xen/arch/x86/pv/emul-priv-op.c +++ b/xen/arch/x86/pv/emul-priv-op.c @@ -45,9 +45,11 @@ #include "emulate.h" #include "mm.h" -struct priv_op_ctxt { +struct priv_op_ctxt +{ struct x86_emulate_ctxt ctxt; - struct { + struct + { unsigned long base, limit; } cs; char *io_emul_stub; @@ -101,12 +103,12 @@ static io_emul_stub_t *io_emul_stub_setup(struct priv_op_ctxt *ctxt, u8 opcode, return (void *)stub_va; } - /* Perform IOPL check between the vcpu's shadowed IOPL, and the assumed cpl. */ static bool iopl_ok(const struct vcpu *v, const struct cpu_user_regs *regs) { - unsigned int cpl = guest_kernel_mode(v, regs) ? - (VM_ASSIST(v->domain, architectural_iopl) ? 0 : 1) : 3; + unsigned int cpl = guest_kernel_mode(v, regs) + ? (VM_ASSIST(v->domain, architectural_iopl) ? 0 : 1) + : 3; ASSERT((v->arch.pv.iopl & ~X86_EFLAGS_IOPL) == 0); @@ -114,8 +116,8 @@ static bool iopl_ok(const struct vcpu *v, const struct cpu_user_regs *regs) } /* Has the guest requested sufficient permission for this I/O access? */ -static bool guest_io_okay(unsigned int port, unsigned int bytes, - struct vcpu *v, struct cpu_user_regs *regs) +static bool guest_io_okay(unsigned int port, unsigned int bytes, struct vcpu *v, + struct cpu_user_regs *regs) { /* If in user mode, switch to kernel mode just to read I/O bitmap. */ const bool user_mode = !(v->arch.flags & TF_kernel_mode); @@ -125,7 +127,10 @@ static bool guest_io_okay(unsigned int port, unsigned int bytes, if ( (port + bytes) <= v->arch.pv.iobmp_limit ) { - union { uint8_t bytes[2]; uint16_t mask; } x; + union { + uint8_t bytes[2]; + uint16_t mask; + } x; /* * Grab permission bytes from guest space. Inaccessible bytes are @@ -134,14 +139,17 @@ static bool guest_io_okay(unsigned int port, unsigned int bytes, if ( user_mode ) toggle_guest_pt(v); - switch ( __copy_from_guest_offset(x.bytes, v->arch.pv.iobmp, - port>>3, 2) ) + switch ( + __copy_from_guest_offset(x.bytes, v->arch.pv.iobmp, port >> 3, 2)) { - default: x.bytes[0] = ~0; + default: + x.bytes[0] = ~0; /* fallthrough */ - case 1: x.bytes[1] = ~0; + case 1: + x.bytes[1] = ~0; /* fallthrough */ - case 0: break; + case 0: + break; } if ( user_mode ) @@ -205,10 +213,10 @@ static bool pci_cfg_ok(struct domain *currd, unsigned int start, start |= CF8_ADDR_HI(currd->arch.pci_cf8); } - return !write ? - xsm_pci_config_permission(XSM_HOOK, currd, machine_bdf, - start, start + size - 1, 0) == 0 : - pci_conf_write_intercept(0, machine_bdf, start, size, write) >= 0; + return !write ? xsm_pci_config_permission(XSM_HOOK, currd, machine_bdf, + start, start + size - 1, 0) == 0 + : pci_conf_write_intercept(0, machine_bdf, start, size, + write) >= 0; } static uint32_t guest_io_read(unsigned int port, unsigned int bytes, @@ -219,11 +227,14 @@ static uint32_t guest_io_read(unsigned int port, unsigned int bytes, if ( admin_io_okay(port, bytes, currd) ) { - switch ( bytes ) + switch (bytes) { - case 1: return inb(port); - case 2: return inw(port); - case 4: return inl(port); + case 1: + return inb(port); + case 2: + return inw(port); + case 4: + return inl(port); } } @@ -276,8 +287,7 @@ static uint32_t guest_io_read(unsigned int port, unsigned int bytes, return data; } -static unsigned int check_guest_io_breakpoint(struct vcpu *v, - unsigned int port, +static unsigned int check_guest_io_breakpoint(struct vcpu *v, unsigned int port, unsigned int len) { unsigned int width, i, match = 0; @@ -294,13 +304,20 @@ static unsigned int check_guest_io_breakpoint(struct vcpu *v, start = v->arch.dr[i]; width = 0; - switch ( (v->arch.dr7 >> - (DR_CONTROL_SHIFT + i * DR_CONTROL_SIZE)) & 0xc ) + switch ((v->arch.dr7 >> (DR_CONTROL_SHIFT + i * DR_CONTROL_SIZE)) & 0xc) { - case DR_LEN_1: width = 1; break; - case DR_LEN_2: width = 2; break; - case DR_LEN_4: width = 4; break; - case DR_LEN_8: width = 8; break; + case DR_LEN_1: + width = 1; + break; + case DR_LEN_2: + width = 2; + break; + case DR_LEN_4: + width = 4; + break; + case DR_LEN_8: + width = 8; + break; } if ( (start < (port + len)) && ((start + width) > port) ) @@ -310,8 +327,8 @@ static unsigned int check_guest_io_breakpoint(struct vcpu *v, return match; } -static int read_io(unsigned int port, unsigned int bytes, - unsigned long *val, struct x86_emulate_ctxt *ctxt) +static int read_io(unsigned int port, unsigned int bytes, unsigned long *val, + struct x86_emulate_ctxt *ctxt) { struct priv_op_ctxt *poc = container_of(ctxt, struct priv_op_ctxt, ctxt); struct vcpu *curr = current; @@ -339,12 +356,12 @@ static int read_io(unsigned int port, unsigned int bytes, return X86EMUL_OKAY; } -static void guest_io_write(unsigned int port, unsigned int bytes, - uint32_t data, struct domain *currd) +static void guest_io_write(unsigned int port, unsigned int bytes, uint32_t data, + struct domain *currd) { if ( admin_io_okay(port, bytes, currd) ) { - switch ( bytes ) + switch (bytes) { case 1: outb((uint8_t)data, port); @@ -408,8 +425,8 @@ static void guest_io_write(unsigned int port, unsigned int bytes, } } -static int write_io(unsigned int port, unsigned int bytes, - unsigned long val, struct x86_emulate_ctxt *ctxt) +static int write_io(unsigned int port, unsigned int bytes, unsigned long val, + struct x86_emulate_ctxt *ctxt) { struct priv_op_ctxt *poc = container_of(ctxt, struct priv_op_ctxt, ctxt); struct vcpu *curr = current; @@ -439,14 +456,13 @@ static int write_io(unsigned int port, unsigned int bytes, return X86EMUL_OKAY; } -static int read_segment(enum x86_segment seg, - struct segment_register *reg, +static int read_segment(enum x86_segment seg, struct segment_register *reg, struct x86_emulate_ctxt *ctxt) { /* Check if this is an attempt to access the I/O bitmap. */ if ( seg == x86_seg_tr ) { - switch ( ctxt->opcode ) + switch (ctxt->opcode) { case 0x6c ... 0x6f: /* ins / outs */ case 0xe4 ... 0xe7: /* in / out (immediate port) */ @@ -461,19 +477,32 @@ static int read_segment(enum x86_segment seg, unsigned long limit; unsigned int sel, ar; - switch ( seg ) + switch (seg) { - case x86_seg_cs: sel = ctxt->regs->cs; break; - case x86_seg_ds: sel = read_sreg(ds); break; - case x86_seg_es: sel = read_sreg(es); break; - case x86_seg_fs: sel = read_sreg(fs); break; - case x86_seg_gs: sel = read_sreg(gs); break; - case x86_seg_ss: sel = ctxt->regs->ss; break; - default: return X86EMUL_UNHANDLEABLE; + case x86_seg_cs: + sel = ctxt->regs->cs; + break; + case x86_seg_ds: + sel = read_sreg(ds); + break; + case x86_seg_es: + sel = read_sreg(es); + break; + case x86_seg_fs: + sel = read_sreg(fs); + break; + case x86_seg_gs: + sel = read_sreg(gs); + break; + case x86_seg_ss: + sel = ctxt->regs->ss; + break; + default: + return X86EMUL_UNHANDLEABLE; } - if ( !pv_emul_read_descriptor(sel, current, ®->base, - &limit, &ar, 0) ) + if ( !pv_emul_read_descriptor(sel, current, ®->base, &limit, &ar, + 0) ) return X86EMUL_UNHANDLEABLE; reg->limit = limit; @@ -481,7 +510,7 @@ static int read_segment(enum x86_segment seg, } else { - switch ( seg ) + switch (seg) { default: if ( !is_x86_user_segment(seg) ) @@ -507,10 +536,10 @@ static int read_segment(enum x86_segment seg, } else reg->db = 1; - reg->s = 1; + reg->s = 1; reg->dpl = 3; - reg->p = 1; - reg->g = 1; + reg->p = 1; + reg->g = 1; } /* @@ -518,8 +547,7 @@ static int read_segment(enum x86_segment seg, * Also do this for consistency for non-conforming code segments. */ if ( (seg == x86_seg_ss || - (seg == x86_seg_cs && - !(reg->type & (_SEGMENT_EC >> 8)))) && + (seg == x86_seg_cs && !(reg->type & (_SEGMENT_EC >> 8)))) && guest_kernel_mode(current, ctxt->regs) ) reg->dpl = 0; @@ -546,15 +574,13 @@ static int pv_emul_virt_to_linear(unsigned long base, unsigned long offset, rc = X86EMUL_EXCEPTION; if ( unlikely(rc == X86EMUL_EXCEPTION) ) - x86_emul_hw_exception(seg != x86_seg_ss ? TRAP_gp_fault - : TRAP_stack_error, - 0, ctxt); + x86_emul_hw_exception( + seg != x86_seg_ss ? TRAP_gp_fault : TRAP_stack_error, 0, ctxt); return rc; } -static int rep_ins(uint16_t port, - enum x86_segment seg, unsigned long offset, +static int rep_ins(uint16_t port, enum x86_segment seg, unsigned long offset, unsigned int bytes_per_rep, unsigned long *reps, struct x86_emulate_ctxt *ctxt) { @@ -578,8 +604,7 @@ static int rep_ins(uint16_t port, if ( !sreg.p ) return X86EMUL_UNHANDLEABLE; - if ( !sreg.s || - (sreg.type & (_SEGMENT_CODE >> 8)) || + if ( !sreg.s || (sreg.type & (_SEGMENT_CODE >> 8)) || !(sreg.type & (_SEGMENT_WR >> 8)) ) { x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt); @@ -600,8 +625,8 @@ static int rep_ins(uint16_t port, if ( (rc = __copy_to_user((void *)addr, &data, bytes_per_rep)) != 0 ) { - x86_emul_pagefault(PFEC_write_access, - addr + bytes_per_rep - rc, ctxt); + x86_emul_pagefault(PFEC_write_access, addr + bytes_per_rep - rc, + ctxt); return X86EMUL_EXCEPTION; } @@ -620,8 +645,7 @@ static int rep_ins(uint16_t port, return X86EMUL_OKAY; } -static int rep_outs(enum x86_segment seg, unsigned long offset, - uint16_t port, +static int rep_outs(enum x86_segment seg, unsigned long offset, uint16_t port, unsigned int bytes_per_rep, unsigned long *reps, struct x86_emulate_ctxt *ctxt) { @@ -643,13 +667,11 @@ static int rep_outs(enum x86_segment seg, unsigned long offset, if ( !sreg.p ) return X86EMUL_UNHANDLEABLE; - if ( !sreg.s || - ((sreg.type & (_SEGMENT_CODE >> 8)) && - !(sreg.type & (_SEGMENT_WR >> 8))) ) + if ( !sreg.s || ((sreg.type & (_SEGMENT_CODE >> 8)) && + !(sreg.type & (_SEGMENT_WR >> 8))) ) { - x86_emul_hw_exception(seg != x86_seg_ss ? TRAP_gp_fault - : TRAP_stack_error, - 0, ctxt); + x86_emul_hw_exception( + seg != x86_seg_ss ? TRAP_gp_fault : TRAP_stack_error, 0, ctxt); return X86EMUL_EXCEPTION; } @@ -693,7 +715,7 @@ static int read_cr(unsigned int reg, unsigned long *val, { const struct vcpu *curr = current; - switch ( reg ) + switch (reg) { case 0: /* Read CR0 */ *val = (read_cr0() & ~X86_CR0_TS) | curr->arch.pv.ctrlreg[0]; @@ -737,7 +759,7 @@ static int write_cr(unsigned int reg, unsigned long val, { struct vcpu *curr = current; - switch ( reg ) + switch (reg) { case 0: /* Write CR0 */ if ( (val ^ read_cr0()) & ~X86_CR0_TS ) @@ -761,15 +783,15 @@ static int write_cr(unsigned int reg, unsigned long val, struct page_info *page; int rc; - gfn = !is_pv_32bit_domain(currd) - ? xen_cr3_to_pfn(val) : compat_cr3_to_pfn(val); + gfn = !is_pv_32bit_domain(currd) ? xen_cr3_to_pfn(val) + : compat_cr3_to_pfn(val); page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC); if ( !page ) break; rc = new_guest_cr3(page_to_mfn(page)); put_page(page); - switch ( rc ) + switch (rc) { case 0: return X86EMUL_OKAY; @@ -832,7 +854,7 @@ static int read_msr(unsigned int reg, uint64_t *val, return ret; } - switch ( reg ) + switch (reg) { int rc; @@ -868,9 +890,9 @@ static int read_msr(unsigned int reg, uint64_t *val, * vendor-dependent behaviour. */ if ( is_pv_32bit_domain(currd) ) - *val &= ~(EFER_LME | EFER_LMA | - (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL - ? EFER_SCE : 0)); + *val &= ~( + EFER_LME | EFER_LMA | + (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ? EFER_SCE : 0)); return X86EMUL_OKAY; case MSR_K7_FID_VID_CTL: @@ -914,16 +936,16 @@ static int read_msr(unsigned int reg, uint64_t *val, *val = 0; return X86EMUL_OKAY; - case MSR_P6_PERFCTR(0) ... MSR_P6_PERFCTR(7): - case MSR_P6_EVNTSEL(0) ... MSR_P6_EVNTSEL(3): + case MSR_P6_PERFCTR(0)... MSR_P6_PERFCTR(7): + case MSR_P6_EVNTSEL(0)... MSR_P6_EVNTSEL(3): case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR2: case MSR_CORE_PERF_FIXED_CTR_CTRL ... MSR_CORE_PERF_GLOBAL_OVF_CTRL: if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) { vpmu_msr = true; /* fall through */ - case MSR_AMD_FAM15H_EVNTSEL0 ... MSR_AMD_FAM15H_PERFCTR5: - case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3: + case MSR_AMD_FAM15H_EVNTSEL0 ... MSR_AMD_FAM15H_PERFCTR5: + case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3: if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) ) { if ( vpmu_do_rdmsr(reg, val) ) @@ -966,7 +988,7 @@ static int write_msr(unsigned int reg, uint64_t val, return ret; } - switch ( reg ) + switch (reg) { uint64_t temp; int rc; @@ -1034,14 +1056,13 @@ static int write_msr(unsigned int reg, uint64_t val, return X86EMUL_OKAY; if ( rdmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, temp) != 0 ) break; - if ( (pci_probe & PCI_PROBE_MASK) == PCI_PROBE_MMCONF ? - temp != val : - ((temp ^ val) & - ~(FAM10H_MMIO_CONF_ENABLE | - (FAM10H_MMIO_CONF_BUSRANGE_MASK << - FAM10H_MMIO_CONF_BUSRANGE_SHIFT) | - ((u64)FAM10H_MMIO_CONF_BASE_MASK << - FAM10H_MMIO_CONF_BASE_SHIFT))) ) + if ( (pci_probe & PCI_PROBE_MASK) == PCI_PROBE_MMCONF + ? temp != val + : ((temp ^ val) & ~(FAM10H_MMIO_CONF_ENABLE | + (FAM10H_MMIO_CONF_BUSRANGE_MASK + << FAM10H_MMIO_CONF_BUSRANGE_SHIFT) | + ((u64)FAM10H_MMIO_CONF_BASE_MASK + << FAM10H_MMIO_CONF_BASE_SHIFT))) ) goto invalid; if ( wrmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, val) == 0 ) return X86EMUL_OKAY; @@ -1091,15 +1112,15 @@ static int write_msr(unsigned int reg, uint64_t val, return X86EMUL_OKAY; break; - case MSR_P6_PERFCTR(0) ... MSR_P6_PERFCTR(7): - case MSR_P6_EVNTSEL(0) ... MSR_P6_EVNTSEL(3): + case MSR_P6_PERFCTR(0)... MSR_P6_PERFCTR(7): + case MSR_P6_EVNTSEL(0)... MSR_P6_EVNTSEL(3): case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR2: case MSR_CORE_PERF_FIXED_CTR_CTRL ... MSR_CORE_PERF_GLOBAL_OVF_CTRL: if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) { vpmu_msr = true; - case MSR_AMD_FAM15H_EVNTSEL0 ... MSR_AMD_FAM15H_PERFCTR5: - case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3: + case MSR_AMD_FAM15H_EVNTSEL0 ... MSR_AMD_FAM15H_PERFCTR5: + case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3: if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) ) { if ( (vpmu_mode & XENPMU_MODE_ALL) && @@ -1120,9 +1141,10 @@ static int write_msr(unsigned int reg, uint64_t val, return X86EMUL_OKAY; if ( (rdmsr_safe(reg, temp) != 0) || (val != temp) ) - invalid: + invalid: gdprintk(XENLOG_WARNING, - "Domain attempted WRMSR %08x from 0x%016"PRIx64" to 0x%016"PRIx64"\n", + "Domain attempted WRMSR %08x from 0x%016" PRIx64 + " to 0x%016" PRIx64 "\n", reg, temp, val); return X86EMUL_OKAY; } @@ -1146,8 +1168,8 @@ static int _wbinvd(struct x86_emulate_ctxt *ctxt) return X86EMUL_OKAY; } -int pv_emul_cpuid(uint32_t leaf, uint32_t subleaf, - struct cpuid_leaf *res, struct x86_emulate_ctxt *ctxt) +int pv_emul_cpuid(uint32_t leaf, uint32_t subleaf, struct cpuid_leaf *res, + struct x86_emulate_ctxt *ctxt) { guest_cpuid(current, leaf, subleaf, res); @@ -1157,22 +1179,23 @@ int pv_emul_cpuid(uint32_t leaf, uint32_t subleaf, static int validate(const struct x86_emulate_state *state, struct x86_emulate_ctxt *ctxt) { - switch ( ctxt->opcode ) + switch (ctxt->opcode) { - case 0x6c ... 0x6f: /* ins / outs */ - case 0xe4 ... 0xe7: /* in / out (immediate port) */ - case 0xec ... 0xef: /* in / out (port in %dx) */ + case 0x6c ... 0x6f: /* ins / outs */ + case 0xe4 ... 0xe7: /* in / out (immediate port) */ + case 0xec ... 0xef: /* in / out (port in %dx) */ case X86EMUL_OPC(0x0f, 0x06): /* clts */ case X86EMUL_OPC(0x0f, 0x09): /* wbinvd */ - case X86EMUL_OPC(0x0f, 0x20) ... - X86EMUL_OPC(0x0f, 0x23): /* mov to/from cr/dr */ - case X86EMUL_OPC(0x0f, 0x30): /* wrmsr */ - case X86EMUL_OPC(0x0f, 0x31): /* rdtsc */ - case X86EMUL_OPC(0x0f, 0x32): /* rdmsr */ - case X86EMUL_OPC(0x0f, 0xa2): /* cpuid */ + case X86EMUL_OPC(0x0f, 0x20)... X86EMUL_OPC(0x0f, + 0x23): /* mov to/from cr/dr */ + case X86EMUL_OPC(0x0f, 0x30): /* wrmsr */ + case X86EMUL_OPC(0x0f, 0x31): /* rdtsc */ + case X86EMUL_OPC(0x0f, 0x32): /* rdmsr */ + case X86EMUL_OPC(0x0f, 0xa2): /* cpuid */ return X86EMUL_OKAY; - case 0xfa: case 0xfb: /* cli / sti */ + case 0xfa: + case 0xfb: /* cli / sti */ if ( !iopl_ok(current, ctxt->regs) ) break; /* @@ -1191,7 +1214,7 @@ static int validate(const struct x86_emulate_state *state, if ( x86_insn_modrm(state, &modrm_rm, &modrm_reg) != 3 || (modrm_rm & 7) != 1 ) break; - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 2: /* xsetbv */ case 7: /* rdtscp */ @@ -1204,11 +1227,8 @@ static int validate(const struct x86_emulate_state *state, return X86EMUL_UNHANDLEABLE; } -static int insn_fetch(enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt) +static int insn_fetch(enum x86_segment seg, unsigned long offset, void *p_data, + unsigned int bytes, struct x86_emulate_ctxt *ctxt) { const struct priv_op_ctxt *poc = container_of(ctxt, struct priv_op_ctxt, ctxt); @@ -1240,25 +1260,24 @@ static int insn_fetch(enum x86_segment seg, return X86EMUL_OKAY; } - static const struct x86_emulate_ops priv_op_ops = { - .insn_fetch = insn_fetch, - .read = x86emul_unhandleable_rw, - .validate = validate, - .read_io = read_io, - .write_io = write_io, - .rep_ins = rep_ins, - .rep_outs = rep_outs, - .read_segment = read_segment, - .read_cr = read_cr, - .write_cr = write_cr, - .read_dr = x86emul_read_dr, - .write_dr = x86emul_write_dr, - .write_xcr = x86emul_write_xcr, - .read_msr = read_msr, - .write_msr = write_msr, - .cpuid = pv_emul_cpuid, - .wbinvd = _wbinvd, + .insn_fetch = insn_fetch, + .read = x86emul_unhandleable_rw, + .validate = validate, + .read_io = read_io, + .write_io = write_io, + .rep_ins = rep_ins, + .rep_outs = rep_outs, + .read_segment = read_segment, + .read_cr = read_cr, + .write_cr = write_cr, + .read_dr = x86emul_read_dr, + .write_dr = x86emul_write_dr, + .write_xcr = x86emul_write_xcr, + .read_msr = read_msr, + .write_msr = write_msr, + .cpuid = pv_emul_cpuid, + .wbinvd = _wbinvd, }; int pv_emulate_privileged_op(struct cpu_user_regs *regs) @@ -1273,11 +1292,9 @@ int pv_emulate_privileged_op(struct cpu_user_regs *regs) int rc; unsigned int eflags, ar; - if ( !pv_emul_read_descriptor(regs->cs, curr, &ctxt.cs.base, - &ctxt.cs.limit, &ar, 1) || - !(ar & _SEGMENT_S) || - !(ar & _SEGMENT_P) || - !(ar & _SEGMENT_CODE) ) + if ( !pv_emul_read_descriptor(regs->cs, curr, &ctxt.cs.base, &ctxt.cs.limit, + &ar, 1) || + !(ar & _SEGMENT_S) || !(ar & _SEGMENT_P) || !(ar & _SEGMENT_CODE) ) return 0; /* Mirror virtualized state into EFLAGS. */ @@ -1302,12 +1319,12 @@ int pv_emulate_privileged_op(struct cpu_user_regs *regs) * Nothing we allow to be emulated can change anything other than the * arithmetic bits, and the resume flag. */ - ASSERT(!((regs->eflags ^ eflags) & - ~(X86_EFLAGS_RF | X86_EFLAGS_ARITH_MASK))); + ASSERT( + !((regs->eflags ^ eflags) & ~(X86_EFLAGS_RF | X86_EFLAGS_ARITH_MASK))); regs->eflags |= X86_EFLAGS_IF; regs->eflags &= ~X86_EFLAGS_IOPL; - switch ( rc ) + switch (rc) { case X86EMUL_OKAY: if ( ctxt.ctxt.retire.singlestep ) diff --git a/xen/arch/x86/pv/emulate.c b/xen/arch/x86/pv/emulate.c index 877dfda75e..6e6e6000ea 100644 --- a/xen/arch/x86/pv/emulate.c +++ b/xen/arch/x86/pv/emulate.c @@ -31,7 +31,7 @@ int pv_emul_read_descriptor(unsigned int sel, const struct vcpu *v, { seg_desc_t desc; - if ( sel < 4) + if ( sel < 4 ) desc.b = desc.a = 0; else if ( __get_user(desc, gdt_ldt_desc_ptr(sel)) ) return 0; @@ -41,8 +41,8 @@ int pv_emul_read_descriptor(unsigned int sel, const struct vcpu *v, *ar = desc.b & 0x00f0ff00; if ( !(desc.b & _SEGMENT_L) ) { - *base = ((desc.a >> 16) + ((desc.b & 0xff) << 16) + - (desc.b & 0xff000000)); + *base = + ((desc.a >> 16) + ((desc.b & 0xff) << 16) + (desc.b & 0xff000000)); *limit = (desc.a & 0xffff) | (desc.b & 0x000f0000); if ( desc.b & _SEGMENT_G ) *limit = ((*limit + 1) << 12) - 1; @@ -52,13 +52,13 @@ int pv_emul_read_descriptor(unsigned int sel, const struct vcpu *v, unsigned int a, l; unsigned char valid; - asm volatile ( - "larl %2,%0 ; setz %1" - : "=r" (a), "=qm" (valid) : "rm" (sel)); + asm volatile("larl %2,%0 ; setz %1" + : "=r"(a), "=qm"(valid) + : "rm"(sel)); BUG_ON(valid && ((a & 0x00f0ff00) != *ar)); - asm volatile ( - "lsll %2,%0 ; setz %1" - : "=r" (l), "=qm" (valid) : "rm" (sel)); + asm volatile("lsll %2,%0 ; setz %1" + : "=r"(l), "=qm"(valid) + : "rm"(sel)); BUG_ON(valid && (l != *limit)); } #endif diff --git a/xen/arch/x86/pv/grant_table.c b/xen/arch/x86/pv/grant_table.c index 5180334f42..283980b8a1 100644 --- a/xen/arch/x86/pv/grant_table.c +++ b/xen/arch/x86/pv/grant_table.c @@ -44,12 +44,12 @@ static unsigned int grant_to_pte_flags(unsigned int grant_flags, return pte_flags; } -int create_grant_pv_mapping(uint64_t addr, mfn_t frame, - unsigned int flags, unsigned int cache_flags) +int create_grant_pv_mapping(uint64_t addr, mfn_t frame, unsigned int flags, + unsigned int cache_flags) { struct vcpu *curr = current; struct domain *currd = curr->domain; - l1_pgentry_t nl1e, ol1e = { }, *pl1e; + l1_pgentry_t nl1e, ol1e = {}, *pl1e; struct page_info *page; mfn_t gl1mfn; int rc = GNTST_general_error; @@ -67,8 +67,8 @@ int create_grant_pv_mapping(uint64_t addr, mfn_t frame, /* addr must be suitably aligned, or we will corrupt adjacent ptes. */ if ( !IS_ALIGNED(addr, sizeof(nl1e)) ) { - gdprintk(XENLOG_WARNING, - "Misaligned PTE address %"PRIx64"\n", addr); + gdprintk(XENLOG_WARNING, "Misaligned PTE address %" PRIx64 "\n", + addr); goto out; } @@ -91,7 +91,7 @@ int create_grant_pv_mapping(uint64_t addr, mfn_t frame, if ( !pl1e ) { gdprintk(XENLOG_WARNING, - "Could not find L1 PTE for linear address %"PRIx64"\n", + "Could not find L1 PTE for linear address %" PRIx64 "\n", addr); goto out; } @@ -111,17 +111,17 @@ int create_grant_pv_mapping(uint64_t addr, mfn_t frame, if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, mfn_x(gl1mfn), curr, 0) ) rc = GNTST_okay; - out_unlock: +out_unlock: page_unlock(page); - out_put: +out_put: put_page(page); - out_unmap: +out_unmap: unmap_domain_page(pl1e); if ( rc == GNTST_okay ) put_page_from_l1e(ol1e, currd); - out: +out: return rc; } @@ -150,7 +150,7 @@ static bool steal_linear_address(unsigned long linear, l1_pgentry_t *out) if ( !pl1e ) { gdprintk(XENLOG_WARNING, - "Could not find L1 PTE for linear %"PRIx64"\n", linear); + "Could not find L1 PTE for linear %" PRIx64 "\n", linear); goto out; } @@ -170,14 +170,14 @@ static bool steal_linear_address(unsigned long linear, l1_pgentry_t *out) if ( okay ) *out = ol1e; - out_unlock: +out_unlock: page_unlock(page); - out_put: +out_put: put_page(page); - out_unmap: +out_unmap: unmap_domain_page(pl1e); - out: +out: return okay; } @@ -186,8 +186,8 @@ static bool steal_linear_address(unsigned long linear, l1_pgentry_t *out) * new_addr has only ever been available via GNTABOP_unmap_and_replace, and * only when !(flags & GNTMAP_contains_pte). */ -int replace_grant_pv_mapping(uint64_t addr, mfn_t frame, - uint64_t new_addr, unsigned int flags) +int replace_grant_pv_mapping(uint64_t addr, mfn_t frame, uint64_t new_addr, + unsigned int flags) { struct vcpu *curr = current; struct domain *currd = curr->domain; @@ -204,8 +204,8 @@ int replace_grant_pv_mapping(uint64_t addr, mfn_t frame, */ if ( !is_pv_32bit_domain(currd) ) grant_pte_flags |= (grant_pte_flags & _PAGE_USER) - ? _PAGE_GLOBAL - : _PAGE_GUEST_KERNEL | _PAGE_USER; + ? _PAGE_GLOBAL + : _PAGE_GUEST_KERNEL | _PAGE_USER; /* * addr comes from Xen's active_entry tracking, and was used successfully @@ -281,7 +281,8 @@ int replace_grant_pv_mapping(uint64_t addr, mfn_t frame, (_PAGE_PRESENT | _PAGE_RW)) ) { gdprintk(XENLOG_ERR, - "PTE %"PRIpte" for %"PRIx64" doesn't match grant (%"PRIpte")\n", + "PTE %" PRIpte " for %" PRIx64 " doesn't match grant (%" PRIpte + ")\n", l1e_get_intpte(ol1e), addr, l1e_get_intpte(l1e_from_mfn(frame, grant_pte_flags))); goto out_unlock; @@ -290,20 +291,20 @@ int replace_grant_pv_mapping(uint64_t addr, mfn_t frame, if ( unlikely((l1e_get_flags(ol1e) ^ grant_pte_flags) & ~(_PAGE_AVAIL | PAGE_CACHE_ATTRS)) ) gdprintk(XENLOG_WARNING, - "PTE flags %x for %"PRIx64" don't match grant (%x)\n", + "PTE flags %x for %" PRIx64 " don't match grant (%x)\n", l1e_get_flags(ol1e), addr, grant_pte_flags); if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, mfn_x(gl1mfn), curr, 0) ) rc = GNTST_okay; - out_unlock: +out_unlock: page_unlock(page); - out_put: +out_put: put_page(page); - out_unmap: +out_unmap: unmap_domain_page(pl1e); - out: +out: /* If there was an error, we are still responsible for the stolen pte. */ if ( rc ) put_page_from_l1e(nl1e, currd); diff --git a/xen/arch/x86/pv/hypercall.c b/xen/arch/x86/pv/hypercall.c index f452dd5c04..3bbe420f3f 100644 --- a/xen/arch/x86/pv/hypercall.c +++ b/xen/arch/x86/pv/hypercall.c @@ -24,14 +24,13 @@ #include #include -#define HYPERCALL(x) \ - [ __HYPERVISOR_ ## x ] = { (hypercall_fn_t *) do_ ## x, \ - (hypercall_fn_t *) do_ ## x } -#define COMPAT_CALL(x) \ - [ __HYPERVISOR_ ## x ] = { (hypercall_fn_t *) do_ ## x, \ - (hypercall_fn_t *) compat_ ## x } +#define HYPERCALL(x) \ + [__HYPERVISOR_##x] = {(hypercall_fn_t *)do_##x, (hypercall_fn_t *)do_##x} +#define COMPAT_CALL(x) \ + [__HYPERVISOR_## \ + x] = {(hypercall_fn_t *)do_##x, (hypercall_fn_t *)compat_##x} -#define do_arch_1 paging_domctl_continuation +#define do_arch_1 paging_domctl_continuation const hypercall_table_t pv_hypercall_table[] = { COMPAT_CALL(set_trap_table), @@ -132,19 +131,25 @@ void pv_hypercall(struct cpu_user_regs *regs) #ifndef NDEBUG /* Deliberately corrupt parameter regs not used by this hypercall. */ - switch ( hypercall_args_table[eax].native ) + switch (hypercall_args_table[eax].native) { - case 0: rdi = 0xdeadbeefdeadf00dUL; - case 1: rsi = 0xdeadbeefdeadf00dUL; - case 2: rdx = 0xdeadbeefdeadf00dUL; - case 3: r10 = 0xdeadbeefdeadf00dUL; - case 4: r8 = 0xdeadbeefdeadf00dUL; - case 5: r9 = 0xdeadbeefdeadf00dUL; + case 0: + rdi = 0xdeadbeefdeadf00dUL; + case 1: + rsi = 0xdeadbeefdeadf00dUL; + case 2: + rdx = 0xdeadbeefdeadf00dUL; + case 3: + r10 = 0xdeadbeefdeadf00dUL; + case 4: + r8 = 0xdeadbeefdeadf00dUL; + case 5: + r9 = 0xdeadbeefdeadf00dUL; } #endif if ( unlikely(tb_init_done) ) { - unsigned long args[6] = { rdi, rsi, rdx, r10, r8, r9 }; + unsigned long args[6] = {rdi, rsi, rdx, r10, r8, r9}; __trace_hypercall(TRC_PV_HYPERCALL_V2, eax, args); } @@ -155,14 +160,20 @@ void pv_hypercall(struct cpu_user_regs *regs) if ( !curr->hcall_preempted ) { /* Deliberately corrupt parameter regs used by this hypercall. */ - switch ( hypercall_args_table[eax].native ) + switch (hypercall_args_table[eax].native) { - case 6: regs->r9 = 0xdeadbeefdeadf00dUL; - case 5: regs->r8 = 0xdeadbeefdeadf00dUL; - case 4: regs->r10 = 0xdeadbeefdeadf00dUL; - case 3: regs->rdx = 0xdeadbeefdeadf00dUL; - case 2: regs->rsi = 0xdeadbeefdeadf00dUL; - case 1: regs->rdi = 0xdeadbeefdeadf00dUL; + case 6: + regs->r9 = 0xdeadbeefdeadf00dUL; + case 5: + regs->r8 = 0xdeadbeefdeadf00dUL; + case 4: + regs->r10 = 0xdeadbeefdeadf00dUL; + case 3: + regs->rdx = 0xdeadbeefdeadf00dUL; + case 2: + regs->rsi = 0xdeadbeefdeadf00dUL; + case 1: + regs->rdi = 0xdeadbeefdeadf00dUL; } } #endif @@ -178,40 +189,53 @@ void pv_hypercall(struct cpu_user_regs *regs) #ifndef NDEBUG /* Deliberately corrupt parameter regs not used by this hypercall. */ - switch ( hypercall_args_table[eax].compat ) + switch (hypercall_args_table[eax].compat) { - case 0: ebx = 0xdeadf00d; - case 1: ecx = 0xdeadf00d; - case 2: edx = 0xdeadf00d; - case 3: esi = 0xdeadf00d; - case 4: edi = 0xdeadf00d; - case 5: ebp = 0xdeadf00d; + case 0: + ebx = 0xdeadf00d; + case 1: + ecx = 0xdeadf00d; + case 2: + edx = 0xdeadf00d; + case 3: + esi = 0xdeadf00d; + case 4: + edi = 0xdeadf00d; + case 5: + ebp = 0xdeadf00d; } #endif if ( unlikely(tb_init_done) ) { - unsigned long args[6] = { ebx, ecx, edx, esi, edi, ebp }; + unsigned long args[6] = {ebx, ecx, edx, esi, edi, ebp}; __trace_hypercall(TRC_PV_HYPERCALL_V2, eax, args); } curr->hcall_compat = true; - regs->eax = pv_hypercall_table[eax].compat(ebx, ecx, edx, esi, edi, ebp); + regs->eax = + pv_hypercall_table[eax].compat(ebx, ecx, edx, esi, edi, ebp); curr->hcall_compat = false; #ifndef NDEBUG if ( !curr->hcall_preempted ) { /* Deliberately corrupt parameter regs used by this hypercall. */ - switch ( hypercall_args_table[eax].compat ) + switch (hypercall_args_table[eax].compat) { - case 6: regs->ebp = 0xdeadf00d; - case 5: regs->edi = 0xdeadf00d; - case 4: regs->esi = 0xdeadf00d; - case 3: regs->edx = 0xdeadf00d; - case 2: regs->ecx = 0xdeadf00d; - case 1: regs->ebx = 0xdeadf00d; + case 6: + regs->ebp = 0xdeadf00d; + case 5: + regs->edi = 0xdeadf00d; + case 4: + regs->esi = 0xdeadf00d; + case 3: + regs->edx = 0xdeadf00d; + case 2: + regs->ecx = 0xdeadf00d; + case 1: + regs->ebx = 0xdeadf00d; } } #endif @@ -241,8 +265,8 @@ enum mc_disposition arch_do_multicall_call(struct mc_state *state) if ( (op < ARRAY_SIZE(pv_hypercall_table)) && pv_hypercall_table[op].native ) call->result = pv_hypercall_table[op].native( - call->args[0], call->args[1], call->args[2], - call->args[3], call->args[4], call->args[5]); + call->args[0], call->args[1], call->args[2], call->args[3], + call->args[4], call->args[5]); else call->result = -ENOSYS; } @@ -255,17 +279,18 @@ enum mc_disposition arch_do_multicall_call(struct mc_state *state) if ( (op < ARRAY_SIZE(pv_hypercall_table)) && pv_hypercall_table[op].compat ) call->result = pv_hypercall_table[op].compat( - call->args[0], call->args[1], call->args[2], - call->args[3], call->args[4], call->args[5]); + call->args[0], call->args[1], call->args[2], call->args[3], + call->args[4], call->args[5]); else call->result = -ENOSYS; } #endif return unlikely(op == __HYPERVISOR_iret) - ? mc_exit - : likely(guest_kernel_mode(curr, guest_cpu_user_regs())) - ? mc_continue : mc_preempt; + ? mc_exit + : likely(guest_kernel_mode(curr, guest_cpu_user_regs())) + ? mc_continue + : mc_preempt; } void hypercall_page_initialise_ring3_kernel(void *hypercall_page) @@ -279,14 +304,14 @@ void hypercall_page_initialise_ring3_kernel(void *hypercall_page) if ( i == __HYPERVISOR_iret ) continue; - *(u8 *)(p+ 0) = 0x51; /* push %rcx */ - *(u16 *)(p+ 1) = 0x5341; /* push %r11 */ - *(u8 *)(p+ 3) = 0xb8; /* mov $,%eax */ - *(u32 *)(p+ 4) = i; - *(u16 *)(p+ 8) = 0x050f; /* syscall */ - *(u16 *)(p+10) = 0x5b41; /* pop %r11 */ - *(u8 *)(p+12) = 0x59; /* pop %rcx */ - *(u8 *)(p+13) = 0xc3; /* ret */ + *(u8 *)(p + 0) = 0x51; /* push %rcx */ + *(u16 *)(p + 1) = 0x5341; /* push %r11 */ + *(u8 *)(p + 3) = 0xb8; /* mov $,%eax */ + *(u32 *)(p + 4) = i; + *(u16 *)(p + 8) = 0x050f; /* syscall */ + *(u16 *)(p + 10) = 0x5b41; /* pop %r11 */ + *(u8 *)(p + 12) = 0x59; /* pop %rcx */ + *(u8 *)(p + 13) = 0xc3; /* ret */ } /* @@ -295,12 +320,12 @@ void hypercall_page_initialise_ring3_kernel(void *hypercall_page) * calling it. */ p = hypercall_page + (__HYPERVISOR_iret * 32); - *(u8 *)(p+ 0) = 0x51; /* push %rcx */ - *(u16 *)(p+ 1) = 0x5341; /* push %r11 */ - *(u8 *)(p+ 3) = 0x50; /* push %rax */ - *(u8 *)(p+ 4) = 0xb8; /* mov $__HYPERVISOR_iret,%eax */ - *(u32 *)(p+ 5) = __HYPERVISOR_iret; - *(u16 *)(p+ 9) = 0x050f; /* syscall */ + *(u8 *)(p + 0) = 0x51; /* push %rcx */ + *(u16 *)(p + 1) = 0x5341; /* push %r11 */ + *(u8 *)(p + 3) = 0x50; /* push %rax */ + *(u8 *)(p + 4) = 0xb8; /* mov $__HYPERVISOR_iret,%eax */ + *(u32 *)(p + 5) = __HYPERVISOR_iret; + *(u16 *)(p + 9) = 0x050f; /* syscall */ } void hypercall_page_initialise_ring1_kernel(void *hypercall_page) @@ -315,10 +340,10 @@ void hypercall_page_initialise_ring1_kernel(void *hypercall_page) if ( i == __HYPERVISOR_iret ) continue; - *(u8 *)(p+ 0) = 0xb8; /* mov $,%eax */ - *(u32 *)(p+ 1) = i; - *(u16 *)(p+ 5) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int $xx */ - *(u8 *)(p+ 7) = 0xc3; /* ret */ + *(u8 *)(p + 0) = 0xb8; /* mov $,%eax */ + *(u32 *)(p + 1) = i; + *(u16 *)(p + 5) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int $xx */ + *(u8 *)(p + 7) = 0xc3; /* ret */ } /* @@ -327,10 +352,10 @@ void hypercall_page_initialise_ring1_kernel(void *hypercall_page) * calling it. */ p = hypercall_page + (__HYPERVISOR_iret * 32); - *(u8 *)(p+ 0) = 0x50; /* push %eax */ - *(u8 *)(p+ 1) = 0xb8; /* mov $__HYPERVISOR_iret,%eax */ - *(u32 *)(p+ 2) = __HYPERVISOR_iret; - *(u16 *)(p+ 6) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int $xx */ + *(u8 *)(p + 0) = 0x50; /* push %eax */ + *(u8 *)(p + 1) = 0xb8; /* mov $__HYPERVISOR_iret,%eax */ + *(u32 *)(p + 2) = __HYPERVISOR_iret; + *(u16 *)(p + 6) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int $xx */ } /* @@ -342,4 +367,3 @@ void hypercall_page_initialise_ring1_kernel(void *hypercall_page) * indent-tabs-mode: nil * End: */ - diff --git a/xen/arch/x86/pv/iret.c b/xen/arch/x86/pv/iret.c index c359a1dbfd..94edfa77ec 100644 --- a/xen/arch/x86/pv/iret.c +++ b/xen/arch/x86/pv/iret.c @@ -53,12 +53,12 @@ unsigned long do_iret(void) if ( VM_ASSIST(v->domain, architectural_iopl) ) v->arch.pv.iopl = iret_saved.rflags & X86_EFLAGS_IOPL; - regs->rip = iret_saved.rip; - regs->cs = iret_saved.cs | 3; /* force guest privilege */ - regs->rflags = ((iret_saved.rflags & ~(X86_EFLAGS_IOPL|X86_EFLAGS_VM)) - | X86_EFLAGS_IF); - regs->rsp = iret_saved.rsp; - regs->ss = iret_saved.ss | 3; /* force guest privilege */ + regs->rip = iret_saved.rip; + regs->cs = iret_saved.cs | 3; /* force guest privilege */ + regs->rflags = ((iret_saved.rflags & ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) | + X86_EFLAGS_IF); + regs->rsp = iret_saved.rsp; + regs->ss = iret_saved.ss | 3; /* force guest privilege */ if ( !(iret_saved.flags & VGCF_in_syscall) ) { @@ -75,7 +75,7 @@ unsigned long do_iret(void) /* Saved %rax gets written back to regs->rax in entry.S. */ return iret_saved.rax; - exit_and_crash: +exit_and_crash: domain_crash(v->domain); return 0; } @@ -98,7 +98,7 @@ unsigned int compat_iret(void) /* Restore CS and EIP. */ if ( unlikely(__get_user(regs->eip, (u32 *)regs->rsp + 1)) || - unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) ) + unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) ) { domain_crash(v->domain); return 0; @@ -138,7 +138,7 @@ unsigned int compat_iret(void) regs->esp, ksp); if ( ksp < regs->esp ) { - for (i = 1; i < 10; ++i) + for ( i = 1; i < 10; ++i ) { rc |= __get_user(x, (u32 *)regs->rsp + i); rc |= __put_user(x, (u32 *)(unsigned long)ksp + i); @@ -163,8 +163,8 @@ unsigned int compat_iret(void) ti = &v->arch.pv.trap_ctxt[TRAP_gp_fault]; if ( TI_GET_IF(ti) ) eflags &= ~X86_EFLAGS_IF; - regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF| - X86_EFLAGS_NT|X86_EFLAGS_TF); + regs->eflags &= + ~(X86_EFLAGS_VM | X86_EFLAGS_RF | X86_EFLAGS_NT | X86_EFLAGS_TF); if ( unlikely(__put_user(0, (u32 *)regs->rsp)) ) { domain_crash(v->domain); diff --git a/xen/arch/x86/pv/misc-hypercalls.c b/xen/arch/x86/pv/misc-hypercalls.c index 136fa10c96..82d1dc5335 100644 --- a/xen/arch/x86/pv/misc-hypercalls.c +++ b/xen/arch/x86/pv/misc-hypercalls.c @@ -65,7 +65,7 @@ long set_debugreg(struct vcpu *v, unsigned int reg, unsigned long value) { struct vcpu *curr = current; - switch ( reg ) + switch (reg) { case 0 ... 3: if ( !access_ok(value, sizeof(long)) ) @@ -74,12 +74,20 @@ long set_debugreg(struct vcpu *v, unsigned int reg, unsigned long value) v->arch.dr[reg] = value; if ( v == curr ) { - switch ( reg ) + switch (reg) { - case 0: write_debugreg(0, value); break; - case 1: write_debugreg(1, value); break; - case 2: write_debugreg(2, value); break; - case 3: write_debugreg(3, value); break; + case 0: + write_debugreg(0, value); + break; + case 1: + write_debugreg(1, value); + break; + case 2: + write_debugreg(2, value); + break; + case 3: + write_debugreg(3, value); + break; } } break; @@ -99,7 +107,7 @@ long set_debugreg(struct vcpu *v, unsigned int reg, unsigned long value) * Bit 12 reserved (set to 0). */ value &= ~DR_STATUS_RESERVED_ZERO; /* reserved bits => 0 */ - value |= DR_STATUS_RESERVED_ONE; /* reserved bits => 1 */ + value |= DR_STATUS_RESERVED_ONE; /* reserved bits => 1 */ v->arch.dr6 = value; if ( v == curr ) @@ -121,7 +129,7 @@ long set_debugreg(struct vcpu *v, unsigned int reg, unsigned long value) * Bits 11-12,14-15 reserved (set to 0). */ value &= ~DR_CONTROL_RESERVED_ZERO; /* reserved bits => 0 */ - value |= DR_CONTROL_RESERVED_ONE; /* reserved bits => 1 */ + value |= DR_CONTROL_RESERVED_ONE; /* reserved bits => 1 */ /* * Privileged bits: * GD (bit 13): must be 0. diff --git a/xen/arch/x86/pv/mm.c b/xen/arch/x86/pv/mm.c index f5ea00ca4e..bfb3922c3c 100644 --- a/xen/arch/x86/pv/mm.c +++ b/xen/arch/x86/pv/mm.c @@ -41,8 +41,7 @@ l1_pgentry_t *map_guest_l1e(unsigned long linear, mfn_t *gl1mfn) return NULL; /* Find this l1e and its enclosing l1mfn in the linear map. */ - if ( __copy_from_user(&l2e, - &__linear_l2_table[l2_linear_offset(linear)], + if ( __copy_from_user(&l2e, &__linear_l2_table[l2_linear_offset(linear)], sizeof(l2_pgentry_t)) ) return NULL; diff --git a/xen/arch/x86/pv/ro-page-fault.c b/xen/arch/x86/pv/ro-page-fault.c index e7a7179dda..911d62d0cf 100644 --- a/xen/arch/x86/pv/ro-page-fault.c +++ b/xen/arch/x86/pv/ro-page-fault.c @@ -38,9 +38,10 @@ * Writable Pagetables */ -struct ptwr_emulate_ctxt { +struct ptwr_emulate_ctxt +{ unsigned long cr2; - l1_pgentry_t pte; + l1_pgentry_t pte; }; static int ptwr_emulated_read(enum x86_segment seg, unsigned long offset, @@ -53,7 +54,7 @@ static int ptwr_emulated_read(enum x86_segment seg, unsigned long offset, if ( !__addr_ok(addr) || (rc = __copy_from_user(p_data, (void *)addr, bytes)) ) { - x86_emul_pagefault(0, addr + bytes - rc, ctxt); /* Read fault. */ + x86_emul_pagefault(0, addr + bytes - rc, ctxt); /* Read fault. */ return X86EMUL_EXCEPTION; } @@ -101,28 +102,27 @@ static int ptwr_emulated_update(unsigned long addr, intpte_t *p_old, if ( (rc = copy_from_user(&full, (void *)addr, sizeof(full))) != 0 ) { x86_emul_pagefault(0, /* Read fault. */ - addr + sizeof(full) - rc, - ctxt); + addr + sizeof(full) - rc, ctxt); return X86EMUL_EXCEPTION; } /* Mask out bits provided by caller. */ full &= ~((((intpte_t)1 << (bytes * 8)) - 1) << offset); /* Shift the caller value and OR in the missing bits. */ - val &= (((intpte_t)1 << (bytes * 8)) - 1); + val &= (((intpte_t)1 << (bytes * 8)) - 1); val <<= offset; - val |= full; + val |= full; /* Also fill in missing parts of the cmpxchg old value. */ - old &= (((intpte_t)1 << (bytes * 8)) - 1); + old &= (((intpte_t)1 << (bytes * 8)) - 1); old <<= offset; - old |= full; + old |= full; } - pte = ptwr_ctxt->pte; - mfn = l1e_get_mfn(pte); + pte = ptwr_ctxt->pte; + mfn = l1e_get_mfn(pte); page = mfn_to_page(mfn); /* We are looking only for read-only mappings of p.t. pages. */ - ASSERT((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) == _PAGE_PRESENT); + ASSERT((l1e_get_flags(pte) & (_PAGE_RW | _PAGE_PRESENT)) == _PAGE_PRESENT); ASSERT(mfn_valid(mfn)); ASSERT((page->u.inuse.type_info & PGT_type_mask) == PGT_l1_page_table); ASSERT((page->u.inuse.type_info & PGT_count_mask) != 0); @@ -138,7 +138,7 @@ static int ptwr_emulated_update(unsigned long addr, intpte_t *p_old, } else { - switch ( ret = get_page_from_l1e(nl1e, d, d) ) + switch (ret = get_page_from_l1e(nl1e, d, d)) { default: if ( !is_pv_32bit_domain(d) || (bytes != 4) || @@ -154,8 +154,9 @@ static int ptwr_emulated_update(unsigned long addr, intpte_t *p_old, * zap the PRESENT bit on the assumption that the bottom half will * be written immediately after we return to the guest. */ - gdprintk(XENLOG_DEBUG, "ptwr_emulate: fixing up invalid PAE PTE %" - PRIpte"\n", l1e_get_intpte(nl1e)); + gdprintk(XENLOG_DEBUG, + "ptwr_emulate: fixing up invalid PAE PTE %" PRIpte "\n", + l1e_get_intpte(nl1e)); l1e_remove_flags(nl1e, _PAGE_PRESENT); break; @@ -176,8 +177,8 @@ static int ptwr_emulated_update(unsigned long addr, intpte_t *p_old, if ( p_old ) { ol1e = l1e_from_intpte(old); - if ( !paging_cmpxchg_guest_entry(v, &l1e_get_intpte(*pl1e), - &old, l1e_get_intpte(nl1e), mfn) ) + if ( !paging_cmpxchg_guest_entry(v, &l1e_get_intpte(*pl1e), &old, + l1e_get_intpte(nl1e), mfn) ) ret = X86EMUL_UNHANDLEABLE; else if ( l1e_get_intpte(ol1e) == old ) ret = X86EMUL_OKAY; @@ -254,17 +255,17 @@ static int ptwr_emulated_cmpxchg(enum x86_segment seg, unsigned long offset, } static const struct x86_emulate_ops ptwr_emulate_ops = { - .read = ptwr_emulated_read, + .read = ptwr_emulated_read, .insn_fetch = ptwr_emulated_read, - .write = ptwr_emulated_write, - .cmpxchg = ptwr_emulated_cmpxchg, - .validate = pv_emul_is_mem_write, - .cpuid = pv_emul_cpuid, + .write = ptwr_emulated_write, + .cmpxchg = ptwr_emulated_cmpxchg, + .validate = pv_emul_is_mem_write, + .cpuid = pv_emul_cpuid, }; /* Write page fault handler: check if guest is trying to modify a PTE. */ -static int ptwr_do_page_fault(struct x86_emulate_ctxt *ctxt, - unsigned long addr, l1_pgentry_t pte) +static int ptwr_do_page_fault(struct x86_emulate_ctxt *ctxt, unsigned long addr, + l1_pgentry_t pte) { struct ptwr_emulate_ctxt ptwr_ctxt = { .cr2 = addr, @@ -304,26 +305,26 @@ static int ptwr_do_page_fault(struct x86_emulate_ctxt *ctxt, */ static const struct x86_emulate_ops mmio_ro_emulate_ops = { - .read = x86emul_unhandleable_rw, + .read = x86emul_unhandleable_rw, .insn_fetch = ptwr_emulated_read, - .write = mmio_ro_emulated_write, - .validate = pv_emul_is_mem_write, - .cpuid = pv_emul_cpuid, + .write = mmio_ro_emulated_write, + .validate = pv_emul_is_mem_write, + .cpuid = pv_emul_cpuid, }; static const struct x86_emulate_ops mmcfg_intercept_ops = { - .read = x86emul_unhandleable_rw, + .read = x86emul_unhandleable_rw, .insn_fetch = ptwr_emulated_read, - .write = mmcfg_intercept_write, - .validate = pv_emul_is_mem_write, - .cpuid = pv_emul_cpuid, + .write = mmcfg_intercept_write, + .validate = pv_emul_is_mem_write, + .cpuid = pv_emul_cpuid, }; /* Check if guest is trying to modify a r/o MMIO page. */ static int mmio_ro_do_page_fault(struct x86_emulate_ctxt *ctxt, unsigned long addr, l1_pgentry_t pte) { - struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = addr }; + struct mmio_ro_emulate_ctxt mmio_ro_ctxt = {.cr2 = addr}; mfn_t mfn = l1e_get_mfn(pte); if ( mfn_valid(mfn) ) @@ -350,11 +351,11 @@ int pv_ro_page_fault(unsigned long addr, struct cpu_user_regs *regs) const struct domain *currd = current->domain; unsigned int addr_size = is_pv_32bit_domain(currd) ? 32 : BITS_PER_LONG; struct x86_emulate_ctxt ctxt = { - .regs = regs, - .vendor = currd->arch.cpuid->x86_vendor, + .regs = regs, + .vendor = currd->arch.cpuid->x86_vendor, .addr_size = addr_size, - .sp_size = addr_size, - .lma = addr_size > 32, + .sp_size = addr_size, + .lma = addr_size > 32, }; int rc; bool mmio_ro; @@ -373,7 +374,7 @@ int pv_ro_page_fault(unsigned long addr, struct cpu_user_regs *regs) else rc = ptwr_do_page_fault(&ctxt, addr, pte); - switch ( rc ) + switch (rc) { case X86EMUL_EXCEPTION: /* diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c index 324ca27f93..8d9b315c35 100644 --- a/xen/arch/x86/pv/shim.c +++ b/xen/arch/x86/pv/shim.c @@ -85,9 +85,9 @@ static int __init parse_shim_mem(const char *s) { do { if ( !strncmp(s, "min:", 4) ) - shim_min_nrpages = parse_size_and_unit(s+4, &s) >> PAGE_SHIFT; + shim_min_nrpages = parse_size_and_unit(s + 4, &s) >> PAGE_SHIFT; else if ( !strncmp(s, "max:", 4) ) - shim_max_nrpages = parse_size_and_unit(s+4, &s) >> PAGE_SHIFT; + shim_max_nrpages = parse_size_and_unit(s + 4, &s) >> PAGE_SHIFT; else shim_nrpages = parse_size_and_unit(s, &s) >> PAGE_SHIFT; } while ( *s++ == ',' ); @@ -123,8 +123,8 @@ static void __init mark_pfn_as_ram(struct e820map *e820, uint64_t pfn) if ( !e820_add_range(e820, pfn << PAGE_SHIFT, (pfn << PAGE_SHIFT) + PAGE_SIZE, E820_RAM) && !e820_change_range_type(e820, pfn << PAGE_SHIFT, - (pfn << PAGE_SHIFT) + PAGE_SIZE, - E820_RESERVED, E820_RAM) ) + (pfn << PAGE_SHIFT) + PAGE_SIZE, E820_RESERVED, + E820_RAM) ) panic("Unable to add/change memory type of pfn %#lx to RAM\n", pfn); } @@ -136,21 +136,23 @@ void __init pv_shim_fixup_e820(struct e820map *e820) ASSERT(xen_guest); -#define MARK_PARAM_RAM(p) ({ \ - rc = xen_hypercall_hvm_get_param(p, &pfn); \ - if ( rc ) \ - panic("Unable to get " #p "\n"); \ - mark_pfn_as_ram(e820, pfn); \ - ASSERT(i < ARRAY_SIZE(reserved_pages)); \ - reserved_pages[i++].mfn = pfn; \ -}) +#define MARK_PARAM_RAM(p) \ + ({ \ + rc = xen_hypercall_hvm_get_param(p, &pfn); \ + if ( rc ) \ + panic("Unable to get " #p "\n"); \ + mark_pfn_as_ram(e820, pfn); \ + ASSERT(i < ARRAY_SIZE(reserved_pages)); \ + reserved_pages[i++].mfn = pfn; \ + }) MARK_PARAM_RAM(HVM_PARAM_STORE_PFN); if ( !pv_console ) MARK_PARAM_RAM(HVM_PARAM_CONSOLE_PFN); #undef MARK_PARAM_RAM } -const struct platform_bad_page *__init pv_shim_reserved_pages(unsigned int *size) +const struct platform_bad_page *__init +pv_shim_reserved_pages(unsigned int *size) { ASSERT(xen_guest); @@ -159,9 +161,10 @@ const struct platform_bad_page *__init pv_shim_reserved_pages(unsigned int *size return reserved_pages; } -#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER| \ - _PAGE_GUEST_KERNEL) -#define COMPAT_L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED) +#define L1_PROT \ + (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_USER | \ + _PAGE_GUEST_KERNEL) +#define COMPAT_L1_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED) static void __init replace_va_mapping(struct domain *d, l4_pgentry_t *l4start, unsigned long va, mfn_t mfn) @@ -174,8 +177,8 @@ static void __init replace_va_mapping(struct domain *d, l4_pgentry_t *l4start, put_page_and_type(page); - *pl1e = l1e_from_mfn(mfn, (!is_pv_32bit_domain(d) ? L1_PROT - : COMPAT_L1_PROT)); + *pl1e = + l1e_from_mfn(mfn, (!is_pv_32bit_domain(d) ? L1_PROT : COMPAT_L1_PROT)); } static void evtchn_reserve(struct domain *d, unsigned int port) @@ -208,23 +211,24 @@ void __init pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start, uint64_t param = 0; long rc; -#define SET_AND_MAP_PARAM(p, si, va) ({ \ - rc = xen_hypercall_hvm_get_param(p, ¶m); \ - if ( rc ) \ - panic("Unable to get " #p "\n"); \ - (si) = param; \ - if ( va ) \ - { \ - share_xen_page_with_guest(mfn_to_page(_mfn(param)), d, SHARE_rw); \ - replace_va_mapping(d, l4start, va, _mfn(param)); \ - dom0_update_physmap(d, PFN_DOWN((va) - va_start), param, vphysmap); \ - } \ - else \ - { \ - BUG_ON(evtchn_allocate_port(d, param)); \ - evtchn_reserve(d, param); \ - } \ -}) +#define SET_AND_MAP_PARAM(p, si, va) \ + ({ \ + rc = xen_hypercall_hvm_get_param(p, ¶m); \ + if ( rc ) \ + panic("Unable to get " #p "\n"); \ + (si) = param; \ + if ( va ) \ + { \ + share_xen_page_with_guest(mfn_to_page(_mfn(param)), d, SHARE_rw); \ + replace_va_mapping(d, l4start, va, _mfn(param)); \ + dom0_update_physmap(d, PFN_DOWN((va)-va_start), param, vphysmap); \ + } \ + else \ + { \ + BUG_ON(evtchn_allocate_port(d, param)); \ + evtchn_reserve(d, param); \ + } \ + }) SET_AND_MAP_PARAM(HVM_PARAM_STORE_PFN, si->store_mfn, store_va); SET_AND_MAP_PARAM(HVM_PARAM_STORE_EVTCHN, si->store_evtchn, 0); SET_AND_MAP_PARAM(HVM_PARAM_CONSOLE_EVTCHN, si->console.domU.evtchn, 0); @@ -256,11 +260,11 @@ void __init pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start, rw_pv_hypercall_table = __va(__pa(pv_hypercall_table)); rw_pv_hypercall_table[__HYPERVISOR_event_channel_op].native = rw_pv_hypercall_table[__HYPERVISOR_event_channel_op].compat = - (hypercall_fn_t *)pv_shim_event_channel_op; + (hypercall_fn_t *)pv_shim_event_channel_op; rw_pv_hypercall_table[__HYPERVISOR_grant_table_op].native = rw_pv_hypercall_table[__HYPERVISOR_grant_table_op].compat = - (hypercall_fn_t *)pv_shim_grant_table_op; + (hypercall_fn_t *)pv_shim_grant_table_op; guest = d; @@ -274,8 +278,8 @@ void __init pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start, static void write_start_info(struct domain *d) { struct cpu_user_regs *regs = guest_cpu_user_regs(); - start_info_t *si = map_domain_page(_mfn(is_pv_32bit_domain(d) ? regs->edx - : regs->rdx)); + start_info_t *si = + map_domain_page(_mfn(is_pv_32bit_domain(d) ? regs->edx : regs->rdx)); uint64_t param; snprintf(si->magic, sizeof(si->magic), "xen-3.0-x86_%s", @@ -321,14 +325,14 @@ int pv_shim_shutdown(uint8_t reason) &old_console_pfn)); /* Pause the other vcpus before starting the migration. */ - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) if ( v != current ) vcpu_pause_by_systemcontroller(v); rc = xen_hypercall_shutdown(SHUTDOWN_suspend); if ( rc ) { - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) if ( v != current ) vcpu_unpause_by_systemcontroller(v); @@ -346,8 +350,8 @@ int pv_shim_shutdown(uint8_t reason) BUG_ON(old_store_pfn != store_pfn); if ( !pv_console ) { - BUG_ON(xen_hypercall_hvm_get_param(HVM_PARAM_CONSOLE_PFN, - &console_pfn)); + BUG_ON( + xen_hypercall_hvm_get_param(HVM_PARAM_CONSOLE_PFN, &console_pfn)); BUG_ON(old_console_pfn != console_pfn); } @@ -378,8 +382,8 @@ int pv_shim_shutdown(uint8_t reason) BUG_ON(xen_hypercall_hvm_get_param(HVM_PARAM_STORE_EVTCHN, &store_evtchn)); BUG_ON(evtchn_allocate_port(d, store_evtchn)); evtchn_reserve(d, store_evtchn); - BUG_ON(xen_hypercall_hvm_get_param(HVM_PARAM_CONSOLE_EVTCHN, - &console_evtchn)); + BUG_ON( + xen_hypercall_hvm_get_param(HVM_PARAM_CONSOLE_EVTCHN, &console_evtchn)); BUG_ON(evtchn_allocate_port(d, console_evtchn)); evtchn_reserve(d, console_evtchn); @@ -404,7 +408,7 @@ int pv_shim_shutdown(uint8_t reason) */ write_start_info(d); - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { /* Unmap guest vcpu_info pages. */ unmap_vcpu_info(v); @@ -432,10 +436,11 @@ static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) struct evtchn_close close; long rc; - switch ( cmd ) + switch (cmd) { #define EVTCHN_FORWARD(cmd, port_field) \ - case EVTCHNOP_##cmd: { \ + case EVTCHNOP_##cmd: \ + { \ struct evtchn_##cmd op; \ \ if ( copy_from_guest(&op, arg, 1) != 0 ) \ @@ -460,13 +465,14 @@ static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) rc = -EFAULT; \ \ break; \ - } + } - EVTCHN_FORWARD(alloc_unbound, port) - EVTCHN_FORWARD(bind_interdomain, local_port) + EVTCHN_FORWARD(alloc_unbound, port) + EVTCHN_FORWARD(bind_interdomain, local_port) #undef EVTCHN_FORWARD - case EVTCHNOP_bind_virq: { + case EVTCHNOP_bind_virq: + { struct evtchn_bind_virq virq; struct evtchn_alloc_unbound alloc = { .dom = DOMID_SELF, @@ -501,7 +507,8 @@ static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_status: { + case EVTCHNOP_status: + { struct evtchn_status status; if ( copy_from_guest(&status, arg, 1) != 0 ) @@ -519,7 +526,8 @@ static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_bind_vcpu: { + case EVTCHNOP_bind_vcpu: + { struct evtchn_bind_vcpu vcpu; if ( copy_from_guest(&vcpu, arg, 1) != 0 ) @@ -540,7 +548,8 @@ static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_close: { + case EVTCHNOP_close: + { if ( copy_from_guest(&close, arg, 1) != 0 ) return -EFAULT; @@ -569,7 +578,8 @@ static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_bind_ipi: { + case EVTCHNOP_bind_ipi: + { struct evtchn_bind_ipi ipi; if ( copy_from_guest(&ipi, arg, 1) != 0 ) @@ -600,7 +610,8 @@ static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_unmask: { + case EVTCHNOP_unmask: + { struct evtchn_unmask unmask; if ( copy_from_guest(&unmask, arg, 1) != 0 ) @@ -612,7 +623,8 @@ static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_send: { + case EVTCHNOP_send: + { struct evtchn_send send; if ( copy_from_guest(&send, arg, 1) != 0 ) @@ -629,7 +641,8 @@ static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_reset: { + case EVTCHNOP_reset: + { struct evtchn_reset reset; if ( copy_from_guest(&reset, arg, 1) != 0 ) @@ -669,7 +682,7 @@ static long pv_shim_grant_table_op(unsigned int cmd, if ( count != 1 ) return -EINVAL; - switch ( cmd ) + switch (cmd) { case GNTTABOP_setup_table: { @@ -680,10 +693,9 @@ static long pv_shim_grant_table_op(unsigned int cmd, if ( unlikely(compat ? copy_from_guest(&cmp, uop, 1) : copy_from_guest(&nat, uop, 1)) || - unlikely(compat ? !compat_handle_okay(cmp.frame_list, - cmp.nr_frames) - : !guest_handle_okay(nat.frame_list, - nat.nr_frames)) ) + unlikely(compat + ? !compat_handle_okay(cmp.frame_list, cmp.nr_frames) + : !guest_handle_okay(nat.frame_list, nat.nr_frames)) ) { rc = -EFAULT; break; @@ -702,8 +714,8 @@ static long pv_shim_grant_table_op(unsigned int cmd, .dom = DOMID_SELF, }; - rc = xen_hypercall_grant_table_op(GNTTABOP_query_size, - &query_size, 1); + rc = xen_hypercall_grant_table_op(GNTTABOP_query_size, &query_size, + 1); if ( rc ) { spin_unlock(&grant_lock); @@ -711,8 +723,8 @@ static long pv_shim_grant_table_op(unsigned int cmd, } ASSERT(!grant_frames); - grant_frames = xzalloc_array(unsigned long, - query_size.max_nr_frames); + grant_frames = + xzalloc_array(unsigned long, query_size.max_nr_frames); if ( !grant_frames ) { spin_unlock(&grant_lock); @@ -835,8 +847,8 @@ long pv_shim_cpu_up(void *data) if ( rc ) { domain_unlock(d); - gprintk(XENLOG_ERR, "Failed to bring up CPU#%u: %ld\n", - v->vcpu_id, rc); + gprintk(XENLOG_ERR, "Failed to bring up CPU#%u: %ld\n", v->vcpu_id, + rc); return rc; } } @@ -888,7 +900,7 @@ static unsigned long batch_memory_op(unsigned int cmd, unsigned int order, unsigned long done = 0; set_xen_guest_handle(xmr.extent_start, pfns); - page_list_for_each ( pg, list ) + page_list_for_each (pg, list) { pfns[xmr.nr_extents++] = mfn_x(page_to_mfn(pg)); if ( xmr.nr_extents == ARRAY_SIZE(pfns) || !page_list_next(pg, list) ) @@ -911,7 +923,7 @@ void pv_shim_online_memory(unsigned int nr, unsigned int order) PAGE_LIST_HEAD(list); spin_lock(&balloon_lock); - page_list_for_each_safe ( page, tmp, &balloon ) + page_list_for_each_safe(page, tmp, &balloon) { /* TODO: add support for splitting high order memory chunks. */ if ( page->v.free.order != order ) @@ -926,8 +938,8 @@ void pv_shim_online_memory(unsigned int nr, unsigned int order) if ( nr ) gprintk(XENLOG_WARNING, - "failed to allocate %u extents of order %u for onlining\n", - nr, order); + "failed to allocate %u extents of order %u for onlining\n", nr, + order); nr = batch_memory_op(XENMEM_populate_physmap, order, &list); while ( nr-- ) @@ -966,7 +978,6 @@ void pv_shim_offline_memory(unsigned int nr, unsigned int order) "failed to reserve %u extents of order %u for offlining\n", nr + 1, order); - nr = batch_memory_op(XENMEM_decrease_reservation, order, &list); spin_lock(&balloon_lock); while ( nr-- ) diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c index 1740784ff2..05a7cba1dc 100644 --- a/xen/arch/x86/pv/traps.c +++ b/xen/arch/x86/pv/traps.c @@ -67,8 +67,8 @@ void pv_inject_event(const struct x86_event *event) ti = &curr->arch.pv.trap_ctxt[vector]; tb->flags = TBF_EXCEPTION; - tb->cs = ti->cs; - tb->eip = ti->address; + tb->cs = ti->cs; + tb->eip = ti->address; if ( event->type == X86_EVENTTYPE_HW_EXCEPTION && vector == TRAP_page_fault ) @@ -97,8 +97,7 @@ void pv_inject_event(const struct x86_event *event) if ( unlikely(null_trap_bounce(curr, tb)) ) { - gprintk(XENLOG_WARNING, - "Unhandled %s fault/trap [#%d, ec=%04x]\n", + gprintk(XENLOG_WARNING, "Unhandled %s fault/trap [#%d, ec=%04x]\n", trapstr(vector), vector, error_code); if ( vector == TRAP_page_fault ) @@ -136,10 +135,11 @@ bool set_guest_nmi_trapbounce(void) return !null_trap_bounce(curr, tb); } -struct softirq_trap { - struct domain *domain; /* domain to inject trap */ - struct vcpu *vcpu; /* vcpu to inject trap */ - unsigned int processor; /* physical cpu to inject trap */ +struct softirq_trap +{ + struct domain *domain; /* domain to inject trap */ + struct vcpu *vcpu; /* vcpu to inject trap */ + unsigned int processor; /* physical cpu to inject trap */ }; static DEFINE_PER_CPU(struct softirq_trap, softirq_trap); @@ -155,13 +155,10 @@ static void nmi_mce_softirq(void) * Set the tmp value unconditionally, so that the check in the iret * hypercall works. */ - cpumask_copy(st->vcpu->cpu_hard_affinity_tmp, - st->vcpu->cpu_hard_affinity); + cpumask_copy(st->vcpu->cpu_hard_affinity_tmp, st->vcpu->cpu_hard_affinity); - if ( (cpu != st->processor) || - (st->processor != st->vcpu->processor) ) + if ( (cpu != st->processor) || (st->processor != st->vcpu->processor) ) { - /* * We are on a different physical cpu. Make sure to wakeup the vcpu on * the specified processor. @@ -182,8 +179,7 @@ static void nmi_mce_softirq(void) void __init pv_trap_init(void) { /* The 32-on-64 hypercall vector is only accessible from ring 1. */ - _set_gate(idt_table + HYPERCALL_VECTOR, - SYS_DESC_trap_gate, 1, entry_int82); + _set_gate(idt_table + HYPERCALL_VECTOR, SYS_DESC_trap_gate, 1, entry_int82); /* Fast trap for int80 (faster than taking the #GP-fixup path). */ _set_gate(idt_table + LEGACY_SYSCALL_VECTOR, SYS_DESC_trap_gate, 3, @@ -196,7 +192,7 @@ int pv_raise_interrupt(struct vcpu *v, uint8_t vector) { struct softirq_trap *st = &per_cpu(softirq_trap, smp_processor_id()); - switch ( vector ) + switch (vector) { case TRAP_nmi: if ( cmpxchgptr(&st->vcpu, NULL, v) ) diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index 3440794275..54870027aa 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -44,7 +44,7 @@ #include #include #include -#include /* for bzimage_headroom */ +#include /* for bzimage_headroom */ #include /* for generic_apic_probe */ #include #include @@ -103,10 +103,10 @@ unsigned long __read_mostly xen_virt_end; DEFINE_PER_CPU(struct tss_struct, init_tss); -char __section(".bss.stack_aligned") __aligned(STACK_SIZE) - cpu0_stack[STACK_SIZE]; +char __section(".bss.stack_aligned") + __aligned(STACK_SIZE) cpu0_stack[STACK_SIZE]; -struct cpuinfo_x86 __read_mostly boot_cpu_data = { 0, 0, 0, 0, -1 }; +struct cpuinfo_x86 __read_mostly boot_cpu_data = {0, 0, 0, 0, -1}; unsigned long __read_mostly mmu_cr4_features = XEN_MINIMAL_CR4; @@ -128,7 +128,7 @@ static int __init parse_smep_param(const char *s) return 0; } - switch ( parse_bool(s, NULL) ) + switch (parse_bool(s, NULL)) { case 0: opt_smep = 0; @@ -159,7 +159,7 @@ static int __init parse_smap_param(const char *s) return 0; } - switch ( parse_bool(s, NULL) ) + switch (parse_bool(s, NULL)) { case 0: opt_smap = 0; @@ -289,7 +289,7 @@ static void __init normalise_cpu_order(void) unsigned int i, j, min_cpu; uint32_t apicid, diff, min_diff; - for_each_present_cpu ( i ) + for_each_present_cpu (i) { apicid = x86_cpu_to_apicid[i]; min_diff = min_cpu = ~0u; @@ -298,13 +298,12 @@ static void __init normalise_cpu_order(void) * Find remaining CPU with longest-prefix match on APIC ID. * Among identical longest-prefix matches, pick the smallest APIC ID. */ - for ( j = cpumask_next(i, &cpu_present_map); - j < nr_cpu_ids; + for ( j = cpumask_next(i, &cpu_present_map); j < nr_cpu_ids; j = cpumask_next(j, &cpu_present_map) ) { diff = x86_cpu_to_apicid[j] ^ apicid; - while ( diff & (diff-1) ) - diff &= diff-1; + while ( diff & (diff - 1) ) + diff &= diff - 1; if ( (diff < min_diff) || ((diff == min_diff) && (x86_cpu_to_apicid[j] < x86_cpu_to_apicid[min_cpu])) ) @@ -329,7 +328,7 @@ static void __init normalise_cpu_order(void) } } -#define BOOTSTRAP_MAP_BASE (16UL << 20) +#define BOOTSTRAP_MAP_BASE (16UL << 20) #define BOOTSTRAP_MAP_LIMIT (1UL << L3_PAGETABLE_SHIFT) /* @@ -363,14 +362,14 @@ void *__init bootstrap_map(const module_t *mod) if ( end - start > BOOTSTRAP_MAP_LIMIT - map_cur ) return NULL; - map_pages_to_xen(map_cur, maddr_to_mfn(start), - PFN_DOWN(end - start), PAGE_HYPERVISOR); + map_pages_to_xen(map_cur, maddr_to_mfn(start), PFN_DOWN(end - start), + PAGE_HYPERVISOR); map_cur += end - start; return ret; } -static void *__init move_memory( - uint64_t dst, uint64_t src, unsigned int size, bool keep) +static void *__init move_memory(uint64_t dst, uint64_t src, unsigned int size, + bool keep) { unsigned int blksz = BOOTSTRAP_MAP_LIMIT - BOOTSTRAP_MAP_BASE; unsigned int mask = (1L << L2_PAGETABLE_SHIFT) - 1; @@ -418,16 +417,17 @@ static void *__init move_memory( #undef BOOTSTRAP_MAP_LIMIT -static uint64_t __init consider_modules( - uint64_t s, uint64_t e, uint32_t size, const module_t *mod, - unsigned int nr_mods, unsigned int this_mod) +static uint64_t __init consider_modules(uint64_t s, uint64_t e, uint32_t size, + const module_t *mod, + unsigned int nr_mods, + unsigned int this_mod) { unsigned int i; if ( s > e || e - s < size ) return 0; - for ( i = 0; i < nr_mods ; ++i ) + for ( i = 0; i < nr_mods; ++i ) { uint64_t start = (uint64_t)mod[i].mod_start << PAGE_SHIFT; uint64_t end = start + PAGE_ALIGN(mod[i].mod_end); @@ -437,8 +437,8 @@ static uint64_t __init consider_modules( if ( s < end && start < e ) { - end = consider_modules(end, e, size, mod + i + 1, - nr_mods - i - 1, this_mod - i - 1); + end = consider_modules(end, e, size, mod + i + 1, nr_mods - i - 1, + this_mod - i - 1); if ( end ) return end; @@ -475,34 +475,35 @@ static void __init setup_max_pdx(unsigned long top_page) static struct e820map __initdata boot_e820; #ifdef CONFIG_VIDEO -struct boot_video_info { - u8 orig_x; /* 0x00 */ - u8 orig_y; /* 0x01 */ - u8 orig_video_mode; /* 0x02 */ - u8 orig_video_cols; /* 0x03 */ - u8 orig_video_lines; /* 0x04 */ - u8 orig_video_isVGA; /* 0x05 */ - u16 orig_video_points; /* 0x06 */ +struct boot_video_info +{ + u8 orig_x; /* 0x00 */ + u8 orig_y; /* 0x01 */ + u8 orig_video_mode; /* 0x02 */ + u8 orig_video_cols; /* 0x03 */ + u8 orig_video_lines; /* 0x04 */ + u8 orig_video_isVGA; /* 0x05 */ + u16 orig_video_points; /* 0x06 */ /* VESA graphic mode -- linear frame buffer */ - u32 capabilities; /* 0x08 */ - u16 lfb_linelength; /* 0x0c */ - u16 lfb_width; /* 0x0e */ - u16 lfb_height; /* 0x10 */ - u16 lfb_depth; /* 0x12 */ - u32 lfb_base; /* 0x14 */ - u32 lfb_size; /* 0x18 */ - u8 red_size; /* 0x1c */ - u8 red_pos; /* 0x1d */ - u8 green_size; /* 0x1e */ - u8 green_pos; /* 0x1f */ - u8 blue_size; /* 0x20 */ - u8 blue_pos; /* 0x21 */ - u8 rsvd_size; /* 0x22 */ - u8 rsvd_pos; /* 0x23 */ - u16 vesapm_seg; /* 0x24 */ - u16 vesapm_off; /* 0x26 */ - u16 vesa_attrib; /* 0x28 */ + u32 capabilities; /* 0x08 */ + u16 lfb_linelength; /* 0x0c */ + u16 lfb_width; /* 0x0e */ + u16 lfb_height; /* 0x10 */ + u16 lfb_depth; /* 0x12 */ + u32 lfb_base; /* 0x14 */ + u32 lfb_size; /* 0x18 */ + u8 red_size; /* 0x1c */ + u8 red_pos; /* 0x1d */ + u8 green_size; /* 0x1e */ + u8 green_pos; /* 0x1f */ + u8 blue_size; /* 0x20 */ + u8 blue_pos; /* 0x21 */ + u8 rsvd_size; /* 0x22 */ + u8 rsvd_pos; /* 0x23 */ + u16 vesapm_seg; /* 0x24 */ + u16 vesapm_off; /* 0x26 */ + u16 vesa_attrib; /* 0x28 */ }; extern struct boot_video_info boot_vid_info; #endif @@ -552,7 +553,7 @@ static void __init kexec_reserve_area(struct e820map *e820) { #ifdef CONFIG_KEXEC unsigned long kdump_start = kexec_crash_area.start; - unsigned long kdump_size = kexec_crash_area.size; + unsigned long kdump_size = kexec_crash_area.size; static bool __initdata is_reserved = false; kdump_size = (kdump_size + PAGE_SIZE - 1) & PAGE_MASK; @@ -565,13 +566,14 @@ static void __init kexec_reserve_area(struct e820map *e820) if ( !reserve_e820_ram(e820, kdump_start, kdump_start + kdump_size) ) { printk("Kdump: DISABLED (failed to reserve %luMB (%lukB) at %#lx)" - "\n", kdump_size >> 20, kdump_size >> 10, kdump_start); + "\n", + kdump_size >> 20, kdump_size >> 10, kdump_start); kexec_crash_area.start = kexec_crash_area.size = 0; } else { - printk("Kdump: %luMB (%lukB) at %#lx\n", - kdump_size >> 20, kdump_size >> 10, kdump_start); + printk("Kdump: %luMB (%lukB) at %#lx\n", kdump_size >> 20, + kdump_size >> 10, kdump_start); } #endif } @@ -607,12 +609,12 @@ static void noinline init_done(void) if ( using_2M_mapping() ) { start = (unsigned long)&__2M_init_start, - end = (unsigned long)&__2M_init_end; + end = (unsigned long)&__2M_init_end; } else { start = (unsigned long)&__init_begin; - end = (unsigned long)&__init_end; + end = (unsigned long)&__init_end; } destroy_xen_mappings(start, end); @@ -625,7 +627,7 @@ static void noinline init_done(void) /* Reinitalise all state referring to the old virtual address of the stack. */ static void __init noreturn reinit_bsp_stack(void) { - unsigned long *stack = (void*)(get_stack_bottom() & ~(STACK_SIZE - 1)); + unsigned long *stack = (void *)(get_stack_bottom() & ~(STACK_SIZE - 1)); /* Update TSS and ISTs */ load_system_tables(); @@ -652,15 +654,16 @@ static bool __init loader_is_grub2(const char *loader_name) return (p != NULL) && (p[5] != '0'); } -static char * __init cmdline_cook(char *p, const char *loader_name) +static char *__init cmdline_cook(char *p, const char *loader_name) { - p = p ? : ""; + p = p ?: ""; /* Strip leading whitespace. */ while ( *p == ' ' ) p++; - /* GRUB2 and PVH don't not include image name as first item on command line. */ + /* GRUB2 and PVH don't not include image name as first item on command line. + */ if ( xen_guest || loader_is_grub2(loader_name) ) return p; @@ -685,10 +688,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) bool acpi_boot_table_init_done = false, relocated = false; int ret; struct ns16550_defaults ns16550 = { - .data_bits = 8, - .parity = 'n', - .stop_bits = 1 - }; + .data_bits = 8, .parity = 'n', .stop_bits = 1}; struct xen_domctl_createdomain dom0_cfg = { .flags = XEN_DOMCTL_CDF_s3_integrity, .max_evtchn_port = -1, @@ -734,13 +734,12 @@ void __init noreturn __start_xen(unsigned long mbi_p) mod = __va(mbi->mods_addr); } - loader = (mbi->flags & MBI_LOADERNAME) - ? (char *)__va(mbi->boot_loader_name) : "unknown"; + loader = (mbi->flags & MBI_LOADERNAME) ? (char *)__va(mbi->boot_loader_name) + : "unknown"; /* Parse the command-line options. */ - cmdline = cmdline_cook((mbi->flags & MBI_CMDLINE) ? - __va(mbi->cmdline) : NULL, - loader); + cmdline = cmdline_cook( + (mbi->flags & MBI_CMDLINE) ? __va(mbi->cmdline) : NULL, loader); if ( (kextra = strstr(cmdline, " -- ")) != NULL ) { /* @@ -750,7 +749,8 @@ void __init noreturn __start_xen(unsigned long mbi_p) */ *kextra = '\0'; kextra += 3; - while ( kextra[1] == ' ' ) kextra++; + while ( kextra[1] == ' ' ) + kextra++; } cmdline_parse(cmdline); @@ -763,14 +763,14 @@ void __init noreturn __start_xen(unsigned long mbi_p) parse_video_info(); rdmsrl(MSR_EFER, this_cpu(efer)); - asm volatile ( "mov %%cr4,%0" : "=r" (get_cpu_info()->cr4) ); + asm volatile("mov %%cr4,%0" : "=r"(get_cpu_info()->cr4)); /* We initialise the serial devices very early so we can get debugging. */ ns16550.io_base = 0x3f8; - ns16550.irq = 4; + ns16550.irq = 4; ns16550_init(0, &ns16550); ns16550.io_base = 0x2f8; - ns16550.irq = 3; + ns16550.irq = 3; ns16550_init(1, &ns16550); ehci_dbgp_init(); console_init_preirq(); @@ -788,7 +788,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) printk("Video information:\n"); /* Print VGA display mode information. */ - switch ( vga_console_info.video_type ) + switch (vga_console_info.video_type) { case XEN_VGATYPE_TEXT_MODE_3: printk(" VGA is text mode %dx%d, font 8x%d\n", @@ -812,10 +812,8 @@ void __init noreturn __start_xen(unsigned long mbi_p) if ( bootsym(boot_edid_caps) != 0x1313 ) { u16 caps = bootsym(boot_edid_caps); - printk(" VBE/DDC methods:%s%s%s; ", - (caps & 1) ? " V1" : "", - (caps & 2) ? " V2" : "", - !(caps & 3) ? " none" : ""); + printk(" VBE/DDC methods:%s%s%s; ", (caps & 1) ? " V1" : "", + (caps & 2) ? " V2" : "", !(caps & 3) ? " none" : ""); printk("EDID transfer time: %d seconds\n", caps >> 8); if ( *(u32 *)bootsym(boot_edid_info) == 0x13131313 ) { @@ -831,10 +829,8 @@ void __init noreturn __start_xen(unsigned long mbi_p) #endif printk("Disc information:\n"); - printk(" Found %d MBR signatures\n", - bootsym(boot_mbr_signature_nr)); - printk(" Found %d EDD information structures\n", - bootsym(boot_edd_info_nr)); + printk(" Found %d MBR signatures\n", bootsym(boot_mbr_signature_nr)); + printk(" Found %d EDD information structures\n", bootsym(boot_edd_info_nr)); /* Check that we have at least one Multiboot module. */ if ( !(mbi->flags & MBI_MODULES) || (mbi->mods_count == 0) ) @@ -862,9 +858,8 @@ void __init noreturn __start_xen(unsigned long mbi_p) } else if ( efi_enabled(EFI_BOOT) ) memmap_type = "EFI"; - else if ( (e820_raw.nr_map = - copy_bios_e820(e820_raw.map, - ARRAY_SIZE(e820_raw.map))) != 0 ) + else if ( (e820_raw.nr_map = copy_bios_e820( + e820_raw.map, ARRAY_SIZE(e820_raw.map))) != 0 ) { memmap_type = "Xen-e820"; } @@ -940,10 +935,10 @@ void __init noreturn __start_xen(unsigned long mbi_p) * Supplement the heuristics in l1tf_calculations() by assuming that * anything referenced in the E820 may be cacheable. */ - l1tf_safe_maddr = - max(l1tf_safe_maddr, - ROUNDUP(e820_raw.map[e820_raw.nr_map - 1].addr + - e820_raw.map[e820_raw.nr_map - 1].size, PAGE_SIZE)); + l1tf_safe_maddr = max( + l1tf_safe_maddr, ROUNDUP(e820_raw.map[e820_raw.nr_map - 1].addr + + e820_raw.map[e820_raw.nr_map - 1].size, + PAGE_SIZE)); } /* Create a temporary copy of the E820 map. */ @@ -1005,7 +1000,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) * we can relocate the dom0 kernel and other multiboot modules. Also, on * x86/64, we relocate Xen to higher memory. */ - for ( i = boot_e820.nr_map-1; i >= 0; i-- ) + for ( i = boot_e820.nr_map - 1; i >= 0; i-- ) { uint64_t s, e, mask = (1UL << L2_PAGETABLE_SHIFT) - 1; uint64_t end, limit = ARRAY_SIZE(l2_identmap) << L2_PAGETABLE_SHIFT; @@ -1034,8 +1029,8 @@ void __init noreturn __start_xen(unsigned long mbi_p) if ( !xen_phys_start && e <= limit ) { /* Don't overlap with modules. */ - end = consider_modules(s, e, reloc_size + mask, - mod, mbi->mods_count, -1); + end = consider_modules(s, e, reloc_size + mask, mod, + mbi->mods_count, -1); end &= ~mask; } else @@ -1059,10 +1054,11 @@ void __init noreturn __start_xen(unsigned long mbi_p) bootsym(trampoline_xen_phys_start) = e; /* - * No PTEs pointing above this address are candidates for relocation. - * Due to possibility of partial overlap of the end of source image - * and the beginning of region for destination image some PTEs may - * point to addresses in range [e, e + XEN_IMG_OFFSET). + * No PTEs pointing above this address are candidates for + * relocation. Due to possibility of partial overlap of the end of + * source image and the beginning of region for destination image + * some PTEs may point to addresses in range [e, e + + * XEN_IMG_OFFSET). */ pte_update_limit = PFN_DOWN(e + XEN_IMG_OFFSET); @@ -1077,12 +1073,11 @@ void __init noreturn __start_xen(unsigned long mbi_p) /* Walk initial pagetables, relocating page directory entries. */ pl4e = __va(__pa(idle_pg_table)); - for ( i = 0 ; i < L4_PAGETABLE_ENTRIES; i++, pl4e++ ) + for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++, pl4e++ ) { if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) ) continue; - *pl4e = l4e_from_intpte(l4e_get_intpte(*pl4e) + - xen_phys_start); + *pl4e = l4e_from_intpte(l4e_get_intpte(*pl4e) + xen_phys_start); pl3e = l4e_to_l3e(*pl4e); for ( j = 0; j < L3_PAGETABLE_ENTRIES; j++, pl3e++ ) { @@ -1091,8 +1086,8 @@ void __init noreturn __start_xen(unsigned long mbi_p) (l3e_get_flags(*pl3e) & _PAGE_PSE) || (l3e_get_pfn(*pl3e) >= pte_update_limit) ) continue; - *pl3e = l3e_from_intpte(l3e_get_intpte(*pl3e) + - xen_phys_start); + *pl3e = + l3e_from_intpte(l3e_get_intpte(*pl3e) + xen_phys_start); pl2e = l3e_to_l2e(*pl3e); for ( k = 0; k < L2_PAGETABLE_ENTRIES; k++, pl2e++ ) { @@ -1115,7 +1110,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) */ BUG_ON(using_2M_mapping() && l2_table_offset((unsigned long)_erodata) == - l2_table_offset((unsigned long)_stext)); + l2_table_offset((unsigned long)_stext)); *pl2e++ = l2e_from_pfn(xen_phys_start >> PAGE_SHIFT, PAGE_HYPERVISOR_RX | _PAGE_PSE); for ( i = 1; i < L2_PAGETABLE_ENTRIES; i++, pl2e++ ) @@ -1128,8 +1123,8 @@ void __init noreturn __start_xen(unsigned long mbi_p) if ( !using_2M_mapping() ) { - *pl2e = l2e_from_intpte(l2e_get_intpte(*pl2e) + - xen_phys_start); + *pl2e = + l2e_from_intpte(l2e_get_intpte(*pl2e) + xen_phys_start); continue; } @@ -1137,18 +1132,22 @@ void __init noreturn __start_xen(unsigned long mbi_p) { flags = PAGE_HYPERVISOR_RX | _PAGE_PSE; } - else if ( i >= l2_table_offset((unsigned long)&__2M_rodata_start) && - i < l2_table_offset((unsigned long)&__2M_rodata_end) ) + else if ( i >= l2_table_offset( + (unsigned long)&__2M_rodata_start) && + i < l2_table_offset((unsigned long)&__2M_rodata_end) ) { flags = PAGE_HYPERVISOR_RO | _PAGE_PSE; } - else if ( i >= l2_table_offset((unsigned long)&__2M_init_start) && - i < l2_table_offset((unsigned long)&__2M_init_end) ) + else if ( i >= l2_table_offset( + (unsigned long)&__2M_init_start) && + i < l2_table_offset((unsigned long)&__2M_init_end) ) { flags = PAGE_HYPERVISOR_RWX | _PAGE_PSE; } - else if ( (i >= l2_table_offset((unsigned long)&__2M_rwdata_start) && - i < l2_table_offset((unsigned long)&__2M_rwdata_end)) ) + else if ( (i >= l2_table_offset( + (unsigned long)&__2M_rwdata_start) && + i < l2_table_offset( + (unsigned long)&__2M_rwdata_end)) ) { flags = PAGE_HYPERVISOR_RW | _PAGE_PSE; } @@ -1158,23 +1157,23 @@ void __init noreturn __start_xen(unsigned long mbi_p) continue; } - *pl2e = l2e_from_paddr( - l2e_get_paddr(*pl2e) + xen_phys_start, flags); + *pl2e = l2e_from_paddr(l2e_get_paddr(*pl2e) + xen_phys_start, + flags); } /* Re-sync the stack and then switch to relocated pagetables. */ - asm volatile ( - "rep movsq ; " /* re-sync the stack */ - "movq %%cr4,%%rsi ; " - "andb $0x7f,%%sil ; " - "movq %%rsi,%%cr4 ; " /* CR4.PGE == 0 */ - "movq %[pg],%%cr3 ; " /* CR3 == new pagetables */ - "orb $0x80,%%sil ; " - "movq %%rsi,%%cr4 " /* CR4.PGE == 1 */ - : "=&S" (i), "=&D" (i), "=&c" (i) /* All outputs discarded. */ - : [pg] "r" (__pa(idle_pg_table)), "0" (cpu0_stack), - "1" (__va(__pa(cpu0_stack))), "2" (STACK_SIZE / 8) - : "memory" ); + asm volatile("rep movsq ; " /* re-sync the stack */ + "movq %%cr4,%%rsi ; " + "andb $0x7f,%%sil ; " + "movq %%rsi,%%cr4 ; " /* CR4.PGE == 0 */ + "movq %[pg],%%cr3 ; " /* CR3 == new pagetables */ + "orb $0x80,%%sil ; " + "movq %%rsi,%%cr4 " /* CR4.PGE == 1 */ + : "=&S"(i), "=&D"(i), + "=&c"(i) /* All outputs discarded. */ + : [pg] "r"(__pa(idle_pg_table)), "0"(cpu0_stack), + "1"(__va(__pa(cpu0_stack))), "2"(STACK_SIZE / 8) + : "memory"); bootstrap_map(NULL); @@ -1191,15 +1190,14 @@ void __init noreturn __start_xen(unsigned long mbi_p) continue; /* Don't overlap with other modules (or Xen itself). */ - end = consider_modules(s, e, size, mod, - mbi->mods_count + relocated, j); + end = consider_modules(s, e, size, mod, mbi->mods_count + relocated, + j); if ( highmem_start && end > highmem_start ) continue; if ( s < end && - (headroom || - ((end - size) >> PAGE_SHIFT) > mod[j].mod_start) ) + (headroom || ((end - size) >> PAGE_SHIFT) > mod[j].mod_start) ) { move_memory(end - size + headroom, (uint64_t)mod[j].mod_start << PAGE_SHIFT, @@ -1266,12 +1264,11 @@ void __init noreturn __start_xen(unsigned long mbi_p) /* Only page alignment required now. */ s = (boot_e820.map[i].addr + mask) & ~mask; e = (boot_e820.map[i].addr + boot_e820.map[i].size) & ~mask; - s = max_t(uint64_t, s, 1<<20); + s = max_t(uint64_t, s, 1 << 20); if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) ) continue; - if ( !acpi_boot_table_init_done && - s >= (1ULL << 32) && + if ( !acpi_boot_table_init_done && s >= (1ULL << 32) && !acpi_boot_table_init() ) { acpi_boot_table_init_done = true; @@ -1283,7 +1280,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) { if ( pfn_to_pdx(s >> PAGE_SHIFT) >= max_pdx ) { - for ( j = i - 1; ; --j ) + for ( j = i - 1;; --j ) { if ( boot_e820.map[j].type == E820_RAM ) break; @@ -1292,8 +1289,8 @@ void __init noreturn __start_xen(unsigned long mbi_p) map_e = boot_e820.map[j].addr + boot_e820.map[j].size; for ( j = 0; j < mbi->mods_count; ++j ) { - uint64_t end = pfn_to_paddr(mod[j].mod_start) + - mod[j].mod_end; + uint64_t end = + pfn_to_paddr(mod[j].mod_start) + mod[j].mod_end; if ( map_e < end ) map_e = end; @@ -1304,14 +1301,14 @@ void __init noreturn __start_xen(unsigned long mbi_p) max_pdx = pfn_to_pdx(max_page - 1) + 1; } printk(XENLOG_WARNING "Ignoring inaccessible memory range" - " %013"PRIx64"-%013"PRIx64"\n", + " %013" PRIx64 "-%013" PRIx64 "\n", s, e); continue; } map_e = e; e = (pdx_to_pfn(max_pdx - 1) + 1ULL) << PAGE_SHIFT; printk(XENLOG_WARNING "Ignoring inaccessible memory range" - " %013"PRIx64"-%013"PRIx64"\n", + " %013" PRIx64 "-%013" PRIx64 "\n", e, map_e); } @@ -1319,8 +1316,8 @@ void __init noreturn __start_xen(unsigned long mbi_p) /* Need to create mappings above BOOTSTRAP_MAP_BASE. */ map_s = max_t(uint64_t, s, BOOTSTRAP_MAP_BASE); - map_e = min_t(uint64_t, e, - ARRAY_SIZE(l2_identmap) << L2_PAGETABLE_SHIFT); + map_e = + min_t(uint64_t, e, ARRAY_SIZE(l2_identmap) << L2_PAGETABLE_SHIFT); /* Pass mapped memory to allocator /before/ creating new mappings. */ init_boot_pages(s, min(map_s, e)); @@ -1345,8 +1342,9 @@ void __init noreturn __start_xen(unsigned long mbi_p) if ( map_e < end ) { - map_pages_to_xen((unsigned long)__va(map_e), maddr_to_mfn(map_e), - PFN_DOWN(end - map_e), PAGE_HYPERVISOR); + map_pages_to_xen((unsigned long)__va(map_e), + maddr_to_mfn(map_e), PFN_DOWN(end - map_e), + PAGE_HYPERVISOR); init_boot_pages(map_e, end); map_e = end; } @@ -1371,8 +1369,8 @@ void __init noreturn __start_xen(unsigned long mbi_p) set_pdx_range(mod[i].mod_start, mod[i].mod_start + PFN_UP(mod[i].mod_end)); map_pages_to_xen((unsigned long)mfn_to_virt(mod[i].mod_start), - _mfn(mod[i].mod_start), - PFN_UP(mod[i].mod_end), PAGE_HYPERVISOR); + _mfn(mod[i].mod_start), PFN_UP(mod[i].mod_end), + PAGE_HYPERVISOR); } #ifdef CONFIG_KEXEC @@ -1382,7 +1380,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) unsigned long e = min(s + PFN_UP(kexec_crash_area.size), PFN_UP(__pa(HYPERVISOR_VIRT_END - 1))); - if ( e > s ) + if ( e > s ) map_pages_to_xen((unsigned long)__va(kexec_crash_area.start), _mfn(s), e - s, PAGE_HYPERVISOR); } @@ -1401,8 +1399,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) { /* Mark .text as RX (avoiding the first 2M superpage). */ modify_xen_mappings(XEN_VIRT_START + MB(2), - (unsigned long)&__2M_text_end, - PAGE_HYPERVISOR_RX); + (unsigned long)&__2M_text_end, PAGE_HYPERVISOR_RX); /* Mark .rodata as RO. */ modify_xen_mappings((unsigned long)&__2M_rodata_start, @@ -1423,8 +1420,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) for ( i = 0; i < e820.nr_map; i++ ) if ( e820.map[i].type == E820_RAM ) nr_pages += e820.map[i].size >> PAGE_SHIFT; - printk("System RAM: %luMB (%lukB)\n", - nr_pages >> (20 - PAGE_SHIFT), + printk("System RAM: %luMB (%lukB)\n", nr_pages >> (20 - PAGE_SHIFT), nr_pages << (PAGE_SHIFT - 10)); total_pages = nr_pages; @@ -1447,9 +1443,9 @@ void __init noreturn __start_xen(unsigned long mbi_p) /* Check definitions in public headers match internal defs. */ BUILD_BUG_ON(__HYPERVISOR_VIRT_START != HYPERVISOR_VIRT_START); - BUILD_BUG_ON(__HYPERVISOR_VIRT_END != HYPERVISOR_VIRT_END); + BUILD_BUG_ON(__HYPERVISOR_VIRT_END != HYPERVISOR_VIRT_END); BUILD_BUG_ON(MACH2PHYS_VIRT_START != RO_MPT_VIRT_START); - BUILD_BUG_ON(MACH2PHYS_VIRT_END != RO_MPT_VIRT_END); + BUILD_BUG_ON(MACH2PHYS_VIRT_END != RO_MPT_VIRT_END); init_frametable(); @@ -1488,9 +1484,9 @@ void __init noreturn __start_xen(unsigned long mbi_p) if ( tmem_enabled() ) { - printk(XENLOG_WARNING - "TMEM physical RAM limit exceeded, disabling TMEM\n"); - tmem_disable(); + printk(XENLOG_WARNING + "TMEM physical RAM limit exceeded, disabling TMEM\n"); + tmem_disable(); } } else @@ -1518,7 +1514,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period); - if ( opt_watchdog ) + if ( opt_watchdog ) nmi_watchdog = NMI_LOCAL_APIC; find_smp_config(); @@ -1527,8 +1523,8 @@ void __init noreturn __start_xen(unsigned long mbi_p) generic_apic_probe(); - mmio_ro_ranges = rangeset_new(NULL, "r/o mmio ranges", - RANGESETF_prettyprint_hex); + mmio_ro_ranges = + rangeset_new(NULL, "r/o mmio ranges", RANGESETF_prettyprint_hex); acpi_boot_init(); @@ -1620,8 +1616,8 @@ void __init noreturn __start_xen(unsigned long mbi_p) init_idle_domain(); - this_cpu(stubs.addr) = alloc_stub_page(smp_processor_id(), - &this_cpu(stubs).mfn); + this_cpu(stubs.addr) = + alloc_stub_page(smp_processor_id(), &this_cpu(stubs).mfn); BUG_ON(!this_cpu(stubs.addr)); trap_init(); @@ -1640,7 +1636,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) early_msi_init(); - iommu_setup(); /* setup iommu if available */ + iommu_setup(); /* setup iommu if available */ smp_prepare_cpus(); @@ -1668,7 +1664,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) */ if ( !pv_shim ) { - for_each_present_cpu ( i ) + for_each_present_cpu (i) { /* Set up cpu_to_node[]. */ srat_detect_node(i); @@ -1703,7 +1699,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) do_initcalls(); - if ( opt_watchdog ) + if ( opt_watchdog ) watchdog_setup(); if ( !tboot_protect_mem_regions() ) @@ -1714,9 +1710,10 @@ void __init noreturn __start_xen(unsigned long mbi_p) if ( dom0_pvh ) { - dom0_cfg.flags |= (XEN_DOMCTL_CDF_hvm_guest | - ((hvm_hap_supported() && !opt_dom0_shadow) ? - XEN_DOMCTL_CDF_hap : 0)); + dom0_cfg.flags |= + (XEN_DOMCTL_CDF_hvm_guest | + ((hvm_hap_supported() && !opt_dom0_shadow) ? XEN_DOMCTL_CDF_hap + : 0)); dom0_cfg.arch.emulation_flags |= XEN_X86_EMU_LAPIC | XEN_X86_EMU_IOAPIC | XEN_X86_EMU_VPCI; @@ -1788,7 +1785,9 @@ void __init noreturn __start_xen(unsigned long mbi_p) */ if ( construct_dom0(dom0, mod, modules_headroom, (initrdidx > 0) && (initrdidx < mbi->mods_count) - ? mod + initrdidx : NULL, cmdline) != 0) + ? mod + initrdidx + : NULL, + cmdline) != 0 ) panic("Could not set up DOM0 guest OS\n"); if ( cpu_has_smap ) @@ -1820,9 +1819,10 @@ void __init noreturn __start_xen(unsigned long mbi_p) } /* Jump to the 1:1 virtual mappings of cpu0_stack. */ - asm volatile ("mov %[stk], %%rsp; jmp %c[fn]" :: - [stk] "g" (__va(__pa(get_stack_bottom()))), - [fn] "i" (reinit_bsp_stack) : "memory"); + asm volatile("mov %[stk], %%rsp; jmp %c[fn]" ::[stk] "g"( + __va(__pa(get_stack_bottom()))), + [fn] "i"(reinit_bsp_stack) + : "memory"); unreachable(); } @@ -1857,8 +1857,15 @@ int __hwdom_init xen_in_range(unsigned long mfn) paddr_t start, end; int i; - enum { region_s3, region_ro, region_rw, nr_regions }; - static struct { + enum + { + region_s3, + region_ro, + region_rw, + nr_regions + }; + static struct + { paddr_t s, e; } xen_regions[nr_regions] __hwdom_initdata; diff --git a/xen/arch/x86/shutdown.c b/xen/arch/x86/shutdown.c index 69b82542b3..43142742d5 100644 --- a/xen/arch/x86/shutdown.c +++ b/xen/arch/x86/shutdown.c @@ -27,15 +27,16 @@ #include #include -enum reboot_type { - BOOT_INVALID, - BOOT_TRIPLE = 't', - BOOT_KBD = 'k', - BOOT_ACPI = 'a', - BOOT_CF9 = 'p', - BOOT_CF9_PWR = 'P', - BOOT_EFI = 'e', - BOOT_XEN = 'x', +enum reboot_type +{ + BOOT_INVALID, + BOOT_TRIPLE = 't', + BOOT_KBD = 'k', + BOOT_ACPI = 'a', + BOOT_CF9 = 'p', + BOOT_CF9_PWR = 'P', + BOOT_EFI = 'e', + BOOT_XEN = 'x', }; static int reboot_mode; @@ -59,9 +60,9 @@ static int __init set_reboot_type(const char *str) { int rc = 0; - for ( ; ; ) + for ( ;; ) { - switch ( *str ) + switch (*str) { case 'n': /* no reboot */ opt_noreboot = 1; @@ -124,7 +125,7 @@ static void noreturn __machine_halt(void *unused) if ( reboot_type == BOOT_XEN ) xen_hypercall_shutdown(SHUTDOWN_poweroff); - for ( ; ; ) + for ( ;; ) halt(); } @@ -166,11 +167,10 @@ static int __init override_reboot(struct dmi_system_id *d) if ( reboot_type != type ) { - static const char *__initdata msg[] = - { - [BOOT_KBD] = "keyboard controller", + static const char *__initdata msg[] = { + [BOOT_KBD] = "keyboard controller", [BOOT_ACPI] = "ACPI", - [BOOT_CF9] = "PCI", + [BOOT_CF9] = "PCI", }; reboot_type = type; @@ -182,355 +182,430 @@ static int __init override_reboot(struct dmi_system_id *d) } static struct dmi_system_id __initdata reboot_dmi_table[] = { - { /* Handle problems with rebooting on Dell E520's */ + { + /* Handle problems with rebooting on Dell E520's */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Dell E520", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"), + }, }, - { /* Handle problems with rebooting on Dell 1300's */ + { + /* Handle problems with rebooting on Dell 1300's */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Dell PowerEdge 1300", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"), + }, }, - { /* Handle problems with rebooting on Dell 300's */ + { + /* Handle problems with rebooting on Dell 300's */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Dell PowerEdge 300", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"), + }, }, - { /* Handle problems with rebooting on Dell Optiplex 745's SFF */ + { + /* Handle problems with rebooting on Dell Optiplex 745's SFF */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Dell OptiPlex 745", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), + }, }, - { /* Handle problems with rebooting on Dell Optiplex 745's DFF */ + { + /* Handle problems with rebooting on Dell Optiplex 745's DFF */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Dell OptiPlex 745", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), - DMI_MATCH(DMI_BOARD_NAME, "0MM599"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), + DMI_MATCH(DMI_BOARD_NAME, "0MM599"), + }, }, - { /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */ + { + /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Dell OptiPlex 745", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), - DMI_MATCH(DMI_BOARD_NAME, "0KW626"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), + DMI_MATCH(DMI_BOARD_NAME, "0KW626"), + }, }, - { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */ + { + /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Dell OptiPlex 330", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"), - DMI_MATCH(DMI_BOARD_NAME, "0KP561"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"), + DMI_MATCH(DMI_BOARD_NAME, "0KP561"), + }, }, - { /* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */ + { + /* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Dell OptiPlex 360", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"), - DMI_MATCH(DMI_BOARD_NAME, "0T656F"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"), + DMI_MATCH(DMI_BOARD_NAME, "0T656F"), + }, }, - { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G */ + { + /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Dell OptiPlex 760", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"), - DMI_MATCH(DMI_BOARD_NAME, "0G919G"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"), + DMI_MATCH(DMI_BOARD_NAME, "0G919G"), + }, }, - { /* Handle problems with rebooting on Dell 2400's */ + { + /* Handle problems with rebooting on Dell 2400's */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Dell PowerEdge 2400", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"), + }, }, - { /* Handle problems with rebooting on Dell T5400's */ + { + /* Handle problems with rebooting on Dell T5400's */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Dell Precision T5400", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"), + }, }, - { /* Handle problems with rebooting on Dell T7400's */ + { + /* Handle problems with rebooting on Dell T7400's */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Dell Precision T7400", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T7400"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T7400"), + }, }, - { /* Handle problems with rebooting on HP laptops */ + { + /* Handle problems with rebooting on HP laptops */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "HP Compaq Laptop", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), + }, }, - { /* Handle problems with rebooting on Dell XPS710 */ + { + /* Handle problems with rebooting on Dell XPS710 */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Dell XPS710", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), + }, }, - { /* Handle problems with rebooting on Dell DXP061 */ + { + /* Handle problems with rebooting on Dell DXP061 */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Dell DXP061", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"), + }, }, - { /* Handle problems with rebooting on Sony VGN-Z540N */ + { + /* Handle problems with rebooting on Sony VGN-Z540N */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Sony VGN-Z540N", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"), + }, }, - { /* Handle problems with rebooting on ASUS P4S800 */ + { + /* Handle problems with rebooting on ASUS P4S800 */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "ASUS P4S800", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), - DMI_MATCH(DMI_BOARD_NAME, "P4S800"), - }, + .matches = + { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P4S800"), + }, }, - { /* Handle reboot issue on Acer Aspire one */ + { + /* Handle reboot issue on Acer Aspire one */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_KBD, .ident = "Acer Aspire One A110", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"), + }, }, - { /* Handle problems with rebooting on Apple MacBook5 */ + { + /* Handle problems with rebooting on Apple MacBook5 */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Apple MacBook5", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), + }, }, - { /* Handle problems with rebooting on Apple MacBookPro5 */ + { + /* Handle problems with rebooting on Apple MacBookPro5 */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Apple MacBookPro5", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"), + }, }, - { /* Handle problems with rebooting on Apple Macmini3,1 */ + { + /* Handle problems with rebooting on Apple Macmini3,1 */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Apple Macmini3,1", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"), + }, }, - { /* Handle problems with rebooting on the iMac9,1. */ + { + /* Handle problems with rebooting on the iMac9,1. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Apple iMac9,1", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"), + }, }, - { /* Handle problems with rebooting on the Latitude E6320. */ + { + /* Handle problems with rebooting on the Latitude E6320. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Dell Latitude E6320", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), + }, }, - { /* Handle problems with rebooting on the Latitude E5420. */ + { + /* Handle problems with rebooting on the Latitude E5420. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Dell Latitude E5420", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"), + }, }, - { /* Handle problems with rebooting on the Latitude E6220. */ + { + /* Handle problems with rebooting on the Latitude E6220. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Dell Latitude E6220", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6220"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6220"), + }, }, - { /* Handle problems with rebooting on the Latitude E6420. */ + { + /* Handle problems with rebooting on the Latitude E6420. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Dell Latitude E6420", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), + }, }, - { /* Handle problems with rebooting on the OptiPlex 990. */ + { + /* Handle problems with rebooting on the OptiPlex 990. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Dell OptiPlex 990", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"), + }, }, - { /* Handle problems with rebooting on the Precision M6600. */ + { + /* Handle problems with rebooting on the Precision M6600. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Dell OptiPlex 990", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"), + }, }, - { /* Handle problems with rebooting on the Latitude E6520. */ + { + /* Handle problems with rebooting on the Latitude E6520. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Dell Latitude E6520", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6520"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6520"), + }, }, - { /* Handle problems with rebooting on the OptiPlex 790. */ + { + /* Handle problems with rebooting on the OptiPlex 790. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Dell OptiPlex 790", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 790"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 790"), + }, }, - { /* Handle problems with rebooting on the OptiPlex 990. */ + { + /* Handle problems with rebooting on the OptiPlex 990. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Dell OptiPlex 990", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"), + }, }, - { /* Handle problems with rebooting on the OptiPlex 390. */ + { + /* Handle problems with rebooting on the OptiPlex 390. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Dell OptiPlex 390", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 390"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 390"), + }, }, - { /* Handle problems with rebooting on Dell OptiPlex 9020. */ + { + /* Handle problems with rebooting on Dell OptiPlex 9020. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_ACPI, .ident = "Dell OptiPlex 9020", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020"), + }, }, - { /* Handle problems with rebooting on the Latitude E6320. */ + { + /* Handle problems with rebooting on the Latitude E6320. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Dell Latitude E6320", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), + }, }, - { /* Handle problems with rebooting on the Latitude E6420. */ + { + /* Handle problems with rebooting on the Latitude E6420. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Dell Latitude E6420", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), + }, }, - { /* Handle problems with rebooting on the Latitude E6520. */ + { + /* Handle problems with rebooting on the Latitude E6520. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_CF9, .ident = "Dell Latitude E6520", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6520"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6520"), + }, }, - { /* Handle problems with rebooting on Dell PowerEdge R540. */ + { + /* Handle problems with rebooting on Dell PowerEdge R540. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_ACPI, .ident = "Dell PowerEdge R540", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"), + }, }, - { /* Handle problems with rebooting on Dell PowerEdge R740. */ + { + /* Handle problems with rebooting on Dell PowerEdge R740. */ .callback = override_reboot, .driver_data = (void *)(long)BOOT_ACPI, .ident = "Dell PowerEdge R740", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"), - }, + .matches = + { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"), + }, }, - { } -}; + {}}; static int __init reboot_init(void) { @@ -556,7 +631,7 @@ void machine_restart(unsigned int delay_millisecs) { unsigned int i, attempt; enum reboot_type orig_reboot_type; - const struct desc_ptr no_idt = { 0 }; + const struct desc_ptr no_idt = {0}; watchdog_disable(); console_start_sync(); @@ -577,9 +652,9 @@ void machine_restart(unsigned int delay_millisecs) if ( get_apic_id() != boot_cpu_physical_apicid ) { /* Send IPI to the boot CPU (logical cpu 0). */ - on_selected_cpus(cpumask_of(0), __machine_restart, - &delay_millisecs, 0); - for ( ; ; ) + on_selected_cpus(cpumask_of(0), __machine_restart, &delay_millisecs, + 0); + for ( ;; ) halt(); } @@ -602,9 +677,9 @@ void machine_restart(unsigned int delay_millisecs) if ( reboot_type != BOOT_EFI ) *((unsigned short *)__va(0x472)) = reboot_mode; - for ( attempt = 0; ; attempt++ ) + for ( attempt = 0;; attempt++ ) { - switch ( reboot_type ) + switch (reboot_type) { case BOOT_INVALID: ASSERT_UNREACHABLE(); @@ -615,7 +690,7 @@ void machine_restart(unsigned int delay_millisecs) { kb_wait(); udelay(50); - outb(0xfe,0x64); /* pulse reset low */ + outb(0xfe, 0x64); /* pulse reset low */ udelay(50); } /* @@ -626,7 +701,8 @@ void machine_restart(unsigned int delay_millisecs) * KBD -> TRIPLE -> KBD -> TRIPLE -> KBD -> ... */ reboot_type = (((attempt == 1) && (orig_reboot_type == BOOT_ACPI)) - ? BOOT_ACPI : BOOT_TRIPLE); + ? BOOT_ACPI + : BOOT_TRIPLE); break; case BOOT_EFI: reboot_type = acpi_disabled ? BOOT_KBD : BOOT_ACPI; @@ -634,7 +710,7 @@ void machine_restart(unsigned int delay_millisecs) *((unsigned short *)__va(0x472)) = reboot_mode; break; case BOOT_TRIPLE: - asm volatile ("lidt %0; int3" : : "m" (no_idt)); + asm volatile("lidt %0; int3" : : "m"(no_idt)); reboot_type = BOOT_KBD; break; case BOOT_ACPI: @@ -643,19 +719,19 @@ void machine_restart(unsigned int delay_millisecs) break; case BOOT_CF9: case BOOT_CF9_PWR: - { - u8 cf9 = inb(0xcf9) & ~0x0e; - - /* Request warm, hard, or power-cycle reset. */ - if ( reboot_type == BOOT_CF9_PWR ) - cf9 |= 0x0a; - else if ( reboot_mode == 0 ) - cf9 |= 0x02; - outb(cf9, 0xcf9); - udelay(50); - outb(cf9 | 0x04, 0xcf9); /* Actually do the reset. */ - udelay(50); - } + { + u8 cf9 = inb(0xcf9) & ~0x0e; + + /* Request warm, hard, or power-cycle reset. */ + if ( reboot_type == BOOT_CF9_PWR ) + cf9 |= 0x0a; + else if ( reboot_mode == 0 ) + cf9 |= 0x02; + outb(cf9, 0xcf9); + udelay(50); + outb(cf9 | 0x04, 0xcf9); /* Actually do the reset. */ + udelay(50); + } reboot_type = BOOT_ACPI; break; diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c index b15d4f05df..88eed5541a 100644 --- a/xen/arch/x86/smp.c +++ b/xen/arch/x86/smp.c @@ -79,12 +79,12 @@ void send_IPI_self(int vector) * The following functions deal with sending IPIs between CPUs. */ -static inline int __prepare_ICR (unsigned int shortcut, int vector) +static inline int __prepare_ICR(unsigned int shortcut, int vector) { return APIC_DM_FIXED | shortcut | vector; } -static inline int __prepare_ICR2 (unsigned int mask) +static inline int __prepare_ICR2(unsigned int mask) { return SET_xAPIC_DEST_FIELD(mask); } @@ -94,12 +94,12 @@ void apic_wait_icr_idle(void) if ( x2apic_enabled ) return; - while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ) + while ( apic_read(APIC_ICR) & APIC_ICR_BUSY ) cpu_relax(); } static void __default_send_IPI_shortcut(unsigned int shortcut, int vector, - unsigned int dest) + unsigned int dest) { unsigned int cfg; @@ -156,7 +156,7 @@ void send_IPI_mask_flat(const cpumask_t *cpumask, int vector) * Send the IPI. The write to APIC_ICR fires this off. */ apic_write(APIC_ICR, cfg); - + local_irq_restore(flags); } @@ -167,7 +167,7 @@ void send_IPI_mask_phys(const cpumask_t *mask, int vector) local_irq_save(flags); - for_each_cpu ( query_cpu, mask ) + for_each_cpu (query_cpu, mask) { if ( !cpu_online(query_cpu) || (query_cpu == smp_processor_id()) ) continue; @@ -224,14 +224,13 @@ void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags) cpumask_test_cpu(cpu, mask) ) flags = flush_area_local(va, flags); - if ( (flags & ~FLUSH_ORDER_MASK) && - !cpumask_subset(mask, cpumask_of(cpu)) ) + if ( (flags & ~FLUSH_ORDER_MASK) && !cpumask_subset(mask, cpumask_of(cpu)) ) { spin_lock(&flush_lock); cpumask_and(&flush_cpumask, mask, &cpu_online_map); cpumask_clear_cpu(cpu, &flush_cpumask); - flush_va = va; - flush_flags = flags; + flush_va = va; + flush_flags = flags; send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR); while ( !cpumask_empty(&flush_cpumask) ) cpu_relax(); @@ -284,7 +283,7 @@ void __stop_this_cpu(void) * some BIOSes. */ clts(); - asm volatile ( "fninit" ); + asm volatile("fninit"); cpumask_clear_cpu(smp_processor_id(), &cpu_online_map); } @@ -292,12 +291,12 @@ void __stop_this_cpu(void) static void stop_this_cpu(void *dummy) { __stop_this_cpu(); - for ( ; ; ) + for ( ;; ) halt(); } /* - * Stop all CPUs and turn off local APICs and the IO-APIC, so other OSs see a + * Stop all CPUs and turn off local APICs and the IO-APIC, so other OSs see a * clean IRQ state. */ void smp_send_stop(void) diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c index 7d1226d7bc..f647286cc7 100644 --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -48,7 +48,7 @@ #include #include -#define setup_trampoline() (bootsym_phys(trampoline_realmode_entry)) +#define setup_trampoline() (bootsym_phys(trampoline_realmode_entry)) unsigned long __read_mostly trampoline_phys; @@ -71,19 +71,23 @@ static cpumask_t *secondary_socket_cpumask; struct cpuinfo_x86 cpu_data[NR_CPUS]; -u32 x86_cpu_to_apicid[NR_CPUS] __read_mostly = - { [0 ... NR_CPUS-1] = BAD_APICID }; +u32 x86_cpu_to_apicid[NR_CPUS] __read_mostly = {[0 ... NR_CPUS - 1] = + BAD_APICID}; static int cpu_error; static enum cpu_state { - CPU_STATE_DYING, /* slave -> master: I am dying */ - CPU_STATE_DEAD, /* slave -> master: I am completely dead */ - CPU_STATE_INIT, /* master -> slave: Early bringup phase 1 */ - CPU_STATE_CALLOUT, /* master -> slave: Early bringup phase 2 */ - CPU_STATE_CALLIN, /* slave -> master: Completed phase 2 */ - CPU_STATE_ONLINE /* master -> slave: Go fully online now. */ + CPU_STATE_DYING, /* slave -> master: I am dying */ + CPU_STATE_DEAD, /* slave -> master: I am completely dead */ + CPU_STATE_INIT, /* master -> slave: Early bringup phase 1 */ + CPU_STATE_CALLOUT, /* master -> slave: Early bringup phase 2 */ + CPU_STATE_CALLIN, /* slave -> master: Completed phase 2 */ + CPU_STATE_ONLINE /* master -> slave: Go fully online now. */ } cpu_state; -#define set_cpu_state(state) do { smp_mb(); cpu_state = (state); } while (0) +#define set_cpu_state(state) \ + do { \ + smp_mb(); \ + cpu_state = (state); \ + } while ( 0 ) void *stack_base[NR_CPUS]; @@ -137,7 +141,7 @@ static void synchronize_tsc_master(unsigned int slave) tsc_value = rdtsc_ordered(); smp_wmb(); atomic_inc(&tsc_count); - while ( atomic_read(&tsc_count) != (i<<1) ) + while ( atomic_read(&tsc_count) != (i << 1) ) cpu_relax(); } @@ -158,7 +162,7 @@ static void synchronize_tsc_slave(unsigned int slave) for ( i = 1; i <= 5; i++ ) { - while ( atomic_read(&tsc_count) != ((i<<1)-1) ) + while ( atomic_read(&tsc_count) != ((i << 1) - 1) ) cpu_relax(); smp_rmb(); /* @@ -249,7 +253,7 @@ static void set_cpu_sibling_map(unsigned int cpu) if ( c[cpu].x86_num_siblings > 1 ) { - for_each_cpu ( i, &cpu_sibling_setup_map ) + for_each_cpu (i, &cpu_sibling_setup_map) { if ( cpu == i || c[cpu].phys_proc_id != c[i].phys_proc_id ) continue; @@ -280,7 +284,7 @@ static void set_cpu_sibling_map(unsigned int cpu) return; } - for_each_cpu ( i, &cpu_sibling_setup_map ) + for_each_cpu (i, &cpu_sibling_setup_map) { if ( c[cpu].phys_proc_id == c[i].phys_proc_id ) { @@ -476,7 +480,7 @@ static int wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) for ( i = 0; i < 2; i++ ) { - Dprintk("Sending STARTUP #%d.\n", i+1); + Dprintk("Sending STARTUP #%d.\n", i + 1); apic_write(APIC_ESR, 0); apic_read(APIC_ESR); Dprintk("After apic_write.\n"); @@ -553,8 +557,7 @@ static int do_boot_cpu(int apicid, int cpu) /* So we see what's up */ if ( opt_cpu_info ) - printk("Booting processor %d/%d eip %lx\n", - cpu, apicid, start_eip); + printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip); stack_start = stack_base[cpu]; @@ -689,7 +692,7 @@ static int clone_mapping(const void *ptr, root_pgentry_t *rpt) return -EINVAL; pl3e = l4e_to_l3e(idle_pg_table[root_table_offset(linear)]) + - l3_table_offset(linear); + l3_table_offset(linear); flags = l3e_get_flags(*pl3e); ASSERT(flags & _PAGE_PRESENT); @@ -810,8 +813,8 @@ static int setup_cpu_root_pgt(unsigned int cpu) { const char *ptr; - for ( rc = 0, ptr = _stextentry; - !rc && ptr < _etextentry; ptr += PAGE_SIZE ) + for ( rc = 0, ptr = _stextentry; !rc && ptr < _etextentry; + ptr += PAGE_SIZE ) rc = clone_mapping(ptr, rpt); if ( rc ) @@ -1037,20 +1040,20 @@ static int cpu_smpboot_alloc(unsigned int cpu) rc = 0; - out: +out: if ( rc ) cpu_smpboot_free(cpu, true); return rc; } -static int cpu_smpboot_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_smpboot_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int rc = 0; - switch ( action ) + switch (action) { case CPU_UP_PREPARE: rc = cpu_smpboot_alloc(cpu); @@ -1067,9 +1070,8 @@ static int cpu_smpboot_callback( return !rc ? NOTIFY_DONE : notifier_from_errno(rc); } -static struct notifier_block cpu_smpboot_nfb = { - .notifier_call = cpu_smpboot_callback -}; +static struct notifier_block cpu_smpboot_nfb = {.notifier_call = + cpu_smpboot_callback}; void __init smp_prepare_cpus(void) { @@ -1128,9 +1130,9 @@ void __init smp_prepare_cpus(void) init_uniprocessor: physids_clear(phys_cpu_present_map); physid_set(0, phys_cpu_present_map); - if (APIC_init_uniprocessor()) + if ( APIC_init_uniprocessor() ) printk(KERN_NOTICE "Local APIC not detected." - " Using dummy APIC emulation.\n"); + " Using dummy APIC emulation.\n"); return; } @@ -1179,14 +1181,13 @@ void __init smp_prepare_boot_cpu(void) get_cpu_info()->pv_cr3 = 0; } -static void -remove_siblinginfo(int cpu) +static void remove_siblinginfo(int cpu) { int sibling; cpumask_clear_cpu(cpu, socket_cpumask[cpu_to_socket(cpu)]); - for_each_cpu ( sibling, per_cpu(cpu_core_mask, cpu) ) + for_each_cpu (sibling, per_cpu(cpu_core_mask, cpu)) { cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, sibling)); /* Last thread sibling in this cpu core going down. */ @@ -1194,7 +1195,7 @@ remove_siblinginfo(int cpu) cpu_data[sibling].booted_cores--; } - for_each_cpu(sibling, per_cpu(cpu_sibling_mask, cpu)) + for_each_cpu (sibling, per_cpu(cpu_sibling_mask, cpu)) cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, sibling)); cpumask_clear(per_cpu(cpu_sibling_mask, cpu)); cpumask_clear(per_cpu(cpu_core_mask, cpu)); @@ -1247,11 +1248,10 @@ int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm) { int cpu = -1; - dprintk(XENLOG_DEBUG, "cpu_add apic_id %x acpi_id %x pxm %x\n", - apic_id, acpi_id, pxm); + dprintk(XENLOG_DEBUG, "cpu_add apic_id %x acpi_id %x pxm %x\n", apic_id, + acpi_id, pxm); - if ( (acpi_id >= MAX_MADT_ENTRIES) || - (apic_id >= MAX_APICS) || + if ( (acpi_id >= MAX_MADT_ENTRIES) || (apic_id >= MAX_APICS) || (pxm >= 256) ) return -EINVAL; @@ -1261,8 +1261,7 @@ int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm) /* Detect if the cpu has been added before */ if ( x86_acpiid_to_apicid[acpi_id] != BAD_APICID ) { - cpu = (x86_acpiid_to_apicid[acpi_id] != apic_id) - ? -EINVAL : -EEXIST; + cpu = (x86_acpiid_to_apicid[acpi_id] != apic_id) ? -EINVAL : -EEXIST; goto out; } @@ -1283,15 +1282,14 @@ int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm) if ( node == NUMA_NO_NODE ) { - dprintk(XENLOG_WARNING, - "Setup node failed for pxm %x\n", pxm); + dprintk(XENLOG_WARNING, "Setup node failed for pxm %x\n", pxm); x86_acpiid_to_apicid[acpi_id] = BAD_APICID; mp_unregister_lapic(apic_id, cpu); cpu = node; goto out; } if ( apic_id < MAX_LOCAL_APIC ) - apicid_to_node[apic_id] = node; + apicid_to_node[apic_id] = node; } /* Physically added CPUs do not have synchronised TSC. */ @@ -1304,19 +1302,19 @@ int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm) " ** New physical CPU %u may have skewed TSC and hence " "break assumed cross-CPU TSC coherency.\n" " ** Consider using boot parameter \"tsc=skewed\" " - "which forces TSC emulation where appropriate.\n", cpu); + "which forces TSC emulation where appropriate.\n", + cpu); cpumask_set_cpu(cpu, &tsc_sync_cpu_mask); } srat_detect_node(cpu); numa_add_cpu(cpu); dprintk(XENLOG_INFO, "Add CPU %x with index %x\n", apic_id, cpu); - out: +out: cpu_hotplug_done(); return cpu; } - int __cpu_up(unsigned int cpu) { int apicid, ret; @@ -1339,7 +1337,6 @@ int __cpu_up(unsigned int cpu) return 0; } - void __init smp_cpus_done(void) { if ( nmi_watchdog == NMI_LOCAL_APIC ) diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c index ad72ecd3a5..0b592fa5e6 100644 --- a/xen/arch/x86/spec_ctrl.c +++ b/xen/arch/x86/spec_ctrl.c @@ -184,7 +184,7 @@ static __init int parse_pv_l1tf(const char *s) if ( !ss ) ss = strchr(s, '\0'); - switch ( parse_bool(s, ss) ) + switch (parse_bool(s, ss)) { case 0: opt_pv_l1tf_hwdom = opt_pv_l1tf_domu = 0; @@ -226,15 +226,15 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps) /* Hardware features which pertain to speculative mitigations. */ printk(" Hardware features:%s%s%s%s%s%s%s%s%s%s\n", (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "", - (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP" : "", + (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP" : "", (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "", - (_7d0 & cpufeat_mask(X86_FEATURE_SSBD)) ? " SSBD" : "", - (e8b & cpufeat_mask(X86_FEATURE_IBPB)) ? " IBPB" : "", - (caps & ARCH_CAPABILITIES_IBRS_ALL) ? " IBRS_ALL" : "", - (caps & ARCH_CAPABILITIES_RDCL_NO) ? " RDCL_NO" : "", - (caps & ARCH_CAPS_RSBA) ? " RSBA" : "", - (caps & ARCH_CAPS_SKIP_L1DFL) ? " SKIP_L1DFL": "", - (caps & ARCH_CAPS_SSB_NO) ? " SSB_NO" : ""); + (_7d0 & cpufeat_mask(X86_FEATURE_SSBD)) ? " SSBD" : "", + (e8b & cpufeat_mask(X86_FEATURE_IBPB)) ? " IBPB" : "", + (caps & ARCH_CAPABILITIES_IBRS_ALL) ? " IBRS_ALL" : "", + (caps & ARCH_CAPABILITIES_RDCL_NO) ? " RDCL_NO" : "", + (caps & ARCH_CAPS_RSBA) ? " RSBA" : "", + (caps & ARCH_CAPS_SKIP_L1DFL) ? " SKIP_L1DFL" : "", + (caps & ARCH_CAPS_SSB_NO) ? " SSB_NO" : ""); /* Compiled-in support which pertains to mitigations. */ if ( IS_ENABLED(CONFIG_INDIRECT_THUNK) || IS_ENABLED(CONFIG_SHADOW_PAGING) ) @@ -249,55 +249,60 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps) /* Settings for Xen's protection, irrespective of guests. */ printk(" Xen settings: BTI-Thunk %s, SPEC_CTRL: %s%s, Other:%s%s\n", - thunk == THUNK_NONE ? "N/A" : - thunk == THUNK_RETPOLINE ? "RETPOLINE" : - thunk == THUNK_LFENCE ? "LFENCE" : - thunk == THUNK_JMP ? "JMP" : "?", - !boot_cpu_has(X86_FEATURE_IBRSB) ? "No" : - (default_xen_spec_ctrl & SPEC_CTRL_IBRS) ? "IBRS+" : "IBRS-", - !boot_cpu_has(X86_FEATURE_SSBD) ? "" : - (default_xen_spec_ctrl & SPEC_CTRL_SSBD) ? " SSBD+" : " SSBD-", - opt_ibpb ? " IBPB" : "", - opt_l1d_flush ? " L1D_FLUSH" : ""); + thunk == THUNK_NONE + ? "N/A" + : thunk == THUNK_RETPOLINE + ? "RETPOLINE" + : thunk == THUNK_LFENCE ? "LFENCE" + : thunk == THUNK_JMP ? "JMP" : "?", + !boot_cpu_has(X86_FEATURE_IBRSB) + ? "No" + : (default_xen_spec_ctrl & SPEC_CTRL_IBRS) ? "IBRS+" : "IBRS-", + !boot_cpu_has(X86_FEATURE_SSBD) + ? "" + : (default_xen_spec_ctrl & SPEC_CTRL_SSBD) ? " SSBD+" : " SSBD-", + opt_ibpb ? " IBPB" : "", opt_l1d_flush ? " L1D_FLUSH" : ""); /* L1TF diagnostics, printed if vulnerable or PV shadowing is in use. */ if ( cpu_has_bug_l1tf || opt_pv_l1tf_hwdom || opt_pv_l1tf_domu ) printk(" L1TF: believed%s vulnerable, maxphysaddr L1D %u, CPUID %u" - ", Safe address %"PRIx64"\n", - cpu_has_bug_l1tf ? "" : " not", - l1d_maxphysaddr, paddr_bits, l1tf_safe_maddr); + ", Safe address %" PRIx64 "\n", + cpu_has_bug_l1tf ? "" : " not", l1d_maxphysaddr, paddr_bits, + l1tf_safe_maddr); - /* - * Alternatives blocks for protecting against and/or virtualising - * mitigation support for guests. - */ + /* + * Alternatives blocks for protecting against and/or virtualising + * mitigation support for guests. + */ #ifdef CONFIG_HVM printk(" Support for HVM VMs:%s%s%s%s\n", (boot_cpu_has(X86_FEATURE_SC_MSR_HVM) || - boot_cpu_has(X86_FEATURE_SC_RSB_HVM) || - opt_eager_fpu) ? "" : " None", - boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ? " MSR_SPEC_CTRL" : "", - boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ? " RSB" : "", - opt_eager_fpu ? " EAGER_FPU" : ""); + boot_cpu_has(X86_FEATURE_SC_RSB_HVM) || opt_eager_fpu) + ? "" + : " None", + boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ? " MSR_SPEC_CTRL" : "", + boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ? " RSB" : "", + opt_eager_fpu ? " EAGER_FPU" : ""); #endif #ifdef CONFIG_PV printk(" Support for PV VMs:%s%s%s%s\n", (boot_cpu_has(X86_FEATURE_SC_MSR_PV) || - boot_cpu_has(X86_FEATURE_SC_RSB_PV) || - opt_eager_fpu) ? "" : " None", - boot_cpu_has(X86_FEATURE_SC_MSR_PV) ? " MSR_SPEC_CTRL" : "", - boot_cpu_has(X86_FEATURE_SC_RSB_PV) ? " RSB" : "", - opt_eager_fpu ? " EAGER_FPU" : ""); + boot_cpu_has(X86_FEATURE_SC_RSB_PV) || opt_eager_fpu) + ? "" + : " None", + boot_cpu_has(X86_FEATURE_SC_MSR_PV) ? " MSR_SPEC_CTRL" : "", + boot_cpu_has(X86_FEATURE_SC_RSB_PV) ? " RSB" : "", + opt_eager_fpu ? " EAGER_FPU" : ""); printk(" XPTI (64-bit PV only): Dom0 %s, DomU %s (with%s PCID)\n", opt_xpti_hwdom ? "enabled" : "disabled", - opt_xpti_domu ? "enabled" : "disabled", + opt_xpti_domu ? "enabled" : "disabled", xpti_pcid_enabled() ? "" : "out"); printk(" PV L1TF shadowing: Dom0 %s, DomU %s\n", - opt_pv_l1tf_hwdom ? "enabled" : "disabled", - opt_pv_l1tf_domu ? "enabled" : "disabled"); + opt_pv_l1tf_hwdom ? "enabled" : "disabled", + opt_pv_l1tf_domu ? "enabled" : "disabled"); #endif } @@ -320,7 +325,7 @@ static bool __init retpoline_safe(uint64_t caps) if ( caps & ARCH_CAPS_RSBA ) return false; - switch ( boot_cpu_data.x86_model ) + switch (boot_cpu_data.x86_model) { case 0x17: /* Penryn */ case 0x1d: /* Dunnington */ @@ -352,14 +357,19 @@ static bool __init retpoline_safe(uint64_t caps) case 0x4f: /* Broadwell EP/EX */ return ucode_rev >= 0xb000021; case 0x56: /* Broadwell D */ - switch ( boot_cpu_data.x86_mask ) + switch (boot_cpu_data.x86_mask) { - case 2: return ucode_rev >= 0x15; - case 3: return ucode_rev >= 0x7000012; - case 4: return ucode_rev >= 0xf000011; - case 5: return ucode_rev >= 0xe000009; + case 2: + return ucode_rev >= 0x15; + case 3: + return ucode_rev >= 0x7000012; + case 4: + return ucode_rev >= 0xf000011; + case 5: + return ucode_rev >= 0xe000009; default: - printk("Unrecognised CPU stepping %#x - assuming not reptpoline safe\n", + printk("Unrecognised CPU stepping %#x - assuming not reptpoline " + "safe\n", boot_cpu_data.x86_mask); return false; } @@ -395,7 +405,7 @@ static bool __init should_use_eager_fpu(void) boot_cpu_data.x86 != 6 ) return false; - switch ( boot_cpu_data.x86_model ) + switch (boot_cpu_data.x86_model) { /* * Core processors since at least Nehalem are vulnerable. @@ -471,7 +481,7 @@ static __init void l1tf_calculations(uint64_t caps) if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 6 ) { - switch ( boot_cpu_data.x86_model ) + switch (boot_cpu_data.x86_model) { /* * Core processors since at least Penryn are vulnerable. @@ -603,8 +613,8 @@ static __init void l1tf_calculations(uint64_t caps) * information is in terms of guest physical layout. */ l1tf_safe_maddr = max(l1tf_safe_maddr, ((l1d_maxphysaddr > paddr_bits) - ? (1ul << paddr_bits) - : (3ul << (paddr_bits - 2)))); + ? (1ul << paddr_bits) + : (3ul << (paddr_bits - 2)))); } int8_t __read_mostly opt_xpti_hwdom = -1; @@ -645,7 +655,7 @@ static __init int parse_xpti(const char *s) if ( !ss ) ss = strchr(s, '\0'); - switch ( parse_bool(s, ss) ) + switch (parse_bool(s, ss)) { case 0: opt_xpti_hwdom = opt_xpti_domu = 0; @@ -690,7 +700,7 @@ void __init init_speculation_mitigations(void) if ( opt_thunk != THUNK_DEFAULT || opt_ibrs != -1 ) { thunk = opt_thunk; - ibrs = !!opt_ibrs; + ibrs = !!opt_ibrs; } else { @@ -810,7 +820,8 @@ void __init init_speculation_mitigations(void) if ( opt_eager_fpu == -1 ) opt_eager_fpu = should_use_eager_fpu(); - /* (Re)init BSP state now that default_spec_ctrl_flags has been calculated. */ + /* (Re)init BSP state now that default_spec_ctrl_flags has been calculated. + */ init_shadow_spec_ctrl_state(); /* If Xen is using any MSR_SPEC_CTRL settings, adjust the idle path. */ diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c index 2d70b45909..15ad3df93b 100644 --- a/xen/arch/x86/srat.c +++ b/xen/arch/x86/srat.c @@ -7,7 +7,7 @@ * Called from acpi_numa_init while reading the SRAT and SLIT tables. * Assumes all memory regions belonging to a single proximity domain * are in one chunk. Holes between them will be included in the node. - * + * * Adapted for Xen: Ryan Harper */ @@ -28,12 +28,13 @@ static nodemask_t memory_nodes_parsed __initdata; static nodemask_t processor_nodes_parsed __initdata; static struct node nodes[MAX_NUMNODES] __initdata; -struct pxm2node { - unsigned pxm; - nodeid_t node; +struct pxm2node +{ + unsigned pxm; + nodeid_t node; }; -static struct pxm2node __read_mostly pxm2node[MAX_NUMNODES] = - { [0 ... MAX_NUMNODES - 1] = {.node = NUMA_NO_NODE} }; +static struct pxm2node __read_mostly pxm2node[MAX_NUMNODES] = { + [0 ... MAX_NUMNODES - 1] = {.node = NUMA_NO_NODE}}; static unsigned node_to_pxm(nodeid_t n); @@ -44,122 +45,126 @@ static __initdata DECLARE_BITMAP(memblk_hotplug, NR_NODE_MEMBLKS); static inline bool node_found(unsigned idx, unsigned pxm) { - return ((pxm2node[idx].pxm == pxm) && - (pxm2node[idx].node != NUMA_NO_NODE)); + return ((pxm2node[idx].pxm == pxm) && (pxm2node[idx].node != NUMA_NO_NODE)); } nodeid_t pxm_to_node(unsigned pxm) { - unsigned i; + unsigned i; - if ((pxm < ARRAY_SIZE(pxm2node)) && node_found(pxm, pxm)) - return pxm2node[pxm].node; + if ( (pxm < ARRAY_SIZE(pxm2node)) && node_found(pxm, pxm) ) + return pxm2node[pxm].node; - for (i = 0; i < ARRAY_SIZE(pxm2node); i++) - if (node_found(i, pxm)) - return pxm2node[i].node; + for ( i = 0; i < ARRAY_SIZE(pxm2node); i++ ) + if ( node_found(i, pxm) ) + return pxm2node[i].node; - return NUMA_NO_NODE; + return NUMA_NO_NODE; } nodeid_t setup_node(unsigned pxm) { - nodeid_t node; - unsigned idx; - static bool warned; - static unsigned nodes_found; - - BUILD_BUG_ON(MAX_NUMNODES >= NUMA_NO_NODE); - - if (pxm < ARRAY_SIZE(pxm2node)) { - if (node_found(pxm, pxm)) - return pxm2node[pxm].node; - - /* Try to maintain indexing of pxm2node by pxm */ - if (pxm2node[pxm].node == NUMA_NO_NODE) { - idx = pxm; - goto finish; - } - } - - for (idx = 0; idx < ARRAY_SIZE(pxm2node); idx++) - if (pxm2node[idx].node == NUMA_NO_NODE) - goto finish; - - if (!warned) { - printk(KERN_WARNING "SRAT: Too many proximity domains (%#x)\n", - pxm); - warned = true; - } - - return NUMA_NO_NODE; - - finish: - node = nodes_found++; - if (node >= MAX_NUMNODES) - return NUMA_NO_NODE; - pxm2node[idx].pxm = pxm; - pxm2node[idx].node = node; - - return node; + nodeid_t node; + unsigned idx; + static bool warned; + static unsigned nodes_found; + + BUILD_BUG_ON(MAX_NUMNODES >= NUMA_NO_NODE); + + if ( pxm < ARRAY_SIZE(pxm2node) ) + { + if ( node_found(pxm, pxm) ) + return pxm2node[pxm].node; + + /* Try to maintain indexing of pxm2node by pxm */ + if ( pxm2node[pxm].node == NUMA_NO_NODE ) + { + idx = pxm; + goto finish; + } + } + + for ( idx = 0; idx < ARRAY_SIZE(pxm2node); idx++ ) + if ( pxm2node[idx].node == NUMA_NO_NODE ) + goto finish; + + if ( !warned ) + { + printk(KERN_WARNING "SRAT: Too many proximity domains (%#x)\n", pxm); + warned = true; + } + + return NUMA_NO_NODE; + +finish: + node = nodes_found++; + if ( node >= MAX_NUMNODES ) + return NUMA_NO_NODE; + pxm2node[idx].pxm = pxm; + pxm2node[idx].node = node; + + return node; } int valid_numa_range(u64 start, u64 end, nodeid_t node) { - int i; + int i; - for (i = 0; i < num_node_memblks; i++) { - struct node *nd = &node_memblk_range[i]; + for ( i = 0; i < num_node_memblks; i++ ) + { + struct node *nd = &node_memblk_range[i]; - if (nd->start <= start && nd->end >= end && - memblk_nodeid[i] == node) - return 1; - } + if ( nd->start <= start && nd->end >= end && memblk_nodeid[i] == node ) + return 1; + } - return 0; + return 0; } static __init int conflicting_memblks(u64 start, u64 end) { - int i; - - for (i = 0; i < num_node_memblks; i++) { - struct node *nd = &node_memblk_range[i]; - if (nd->start == nd->end) - continue; - if (nd->end > start && nd->start < end) - return i; - if (nd->end == end && nd->start == start) - return i; - } - return -1; + int i; + + for ( i = 0; i < num_node_memblks; i++ ) + { + struct node *nd = &node_memblk_range[i]; + if ( nd->start == nd->end ) + continue; + if ( nd->end > start && nd->start < end ) + return i; + if ( nd->end == end && nd->start == start ) + return i; + } + return -1; } static __init void cutoff_node(int i, u64 start, u64 end) { - struct node *nd = &nodes[i]; - if (nd->start < start) { - nd->start = start; - if (nd->end < nd->start) - nd->start = nd->end; - } - if (nd->end > end) { - nd->end = end; - if (nd->start > nd->end) - nd->start = nd->end; - } + struct node *nd = &nodes[i]; + if ( nd->start < start ) + { + nd->start = start; + if ( nd->end < nd->start ) + nd->start = nd->end; + } + if ( nd->end > end ) + { + nd->end = end; + if ( nd->start > nd->end ) + nd->start = nd->end; + } } static __init void bad_srat(void) { - int i; - printk(KERN_ERR "SRAT: SRAT not used.\n"); - acpi_numa = -1; - for (i = 0; i < MAX_LOCAL_APIC; i++) - apicid_to_node[i] = NUMA_NO_NODE; - for (i = 0; i < ARRAY_SIZE(pxm2node); i++) - pxm2node[i].node = NUMA_NO_NODE; - mem_hotplug = 0; + int i; + printk(KERN_ERR "SRAT: SRAT not used.\n"); + acpi_numa = -1; + for ( i = 0; i < MAX_LOCAL_APIC; i++ ) + apicid_to_node[i] = NUMA_NO_NODE; + for ( i = 0; i < ARRAY_SIZE(pxm2node); i++ ) + pxm2node[i].node = NUMA_NO_NODE; + mem_hotplug = 0; } /* @@ -170,365 +175,400 @@ static __init void bad_srat(void) */ static __init int slit_valid(struct acpi_table_slit *slit) { - int i, j; - int d = slit->locality_count; - for (i = 0; i < d; i++) { - for (j = 0; j < d; j++) { - u8 val = slit->entry[d*i + j]; - if (i == j) { - if (val != 10) - return 0; - } else if (val <= 10) - return 0; - } - } - return 1; + int i, j; + int d = slit->locality_count; + for ( i = 0; i < d; i++ ) + { + for ( j = 0; j < d; j++ ) + { + u8 val = slit->entry[d * i + j]; + if ( i == j ) + { + if ( val != 10 ) + return 0; + } + else if ( val <= 10 ) + return 0; + } + } + return 1; } /* Callback for SLIT parsing */ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) { - mfn_t mfn; - - if (!slit_valid(slit)) { - printk(KERN_INFO "ACPI: SLIT table looks invalid. " - "Not used.\n"); - return; - } - mfn = alloc_boot_pages(PFN_UP(slit->header.length), 1); - acpi_slit = mfn_to_virt(mfn_x(mfn)); - memcpy(acpi_slit, slit, slit->header.length); + mfn_t mfn; + + if ( !slit_valid(slit) ) + { + printk(KERN_INFO "ACPI: SLIT table looks invalid. " + "Not used.\n"); + return; + } + mfn = alloc_boot_pages(PFN_UP(slit->header.length), 1); + acpi_slit = mfn_to_virt(mfn_x(mfn)); + memcpy(acpi_slit, slit, slit->header.length); } /* Callback for Proximity Domain -> x2APIC mapping */ void __init acpi_numa_x2apic_affinity_init(const struct acpi_srat_x2apic_cpu_affinity *pa) { - unsigned pxm; - nodeid_t node; - - if (srat_disabled()) - return; - if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) { - bad_srat(); - return; - } - if (!(pa->flags & ACPI_SRAT_CPU_ENABLED)) - return; - if (pa->apic_id >= MAX_LOCAL_APIC) { - printk(KERN_INFO "SRAT: APIC %08x ignored\n", pa->apic_id); - return; - } - - pxm = pa->proximity_domain; - node = setup_node(pxm); - if (node == NUMA_NO_NODE) { - bad_srat(); - return; - } - - apicid_to_node[pa->apic_id] = node; - node_set(node, processor_nodes_parsed); - acpi_numa = 1; - printk(KERN_INFO "SRAT: PXM %u -> APIC %08x -> Node %u\n", - pxm, pa->apic_id, node); + unsigned pxm; + nodeid_t node; + + if ( srat_disabled() ) + return; + if ( pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity) ) + { + bad_srat(); + return; + } + if ( !(pa->flags & ACPI_SRAT_CPU_ENABLED) ) + return; + if ( pa->apic_id >= MAX_LOCAL_APIC ) + { + printk(KERN_INFO "SRAT: APIC %08x ignored\n", pa->apic_id); + return; + } + + pxm = pa->proximity_domain; + node = setup_node(pxm); + if ( node == NUMA_NO_NODE ) + { + bad_srat(); + return; + } + + apicid_to_node[pa->apic_id] = node; + node_set(node, processor_nodes_parsed); + acpi_numa = 1; + printk(KERN_INFO "SRAT: PXM %u -> APIC %08x -> Node %u\n", pxm, pa->apic_id, + node); } /* Callback for Proximity Domain -> LAPIC mapping */ void __init acpi_numa_processor_affinity_init(const struct acpi_srat_cpu_affinity *pa) { - unsigned pxm; - nodeid_t node; - - if (srat_disabled()) - return; - if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { - bad_srat(); - return; - } - if (!(pa->flags & ACPI_SRAT_CPU_ENABLED)) - return; - pxm = pa->proximity_domain_lo; - if (srat_rev >= 2) { - pxm |= pa->proximity_domain_hi[0] << 8; - pxm |= pa->proximity_domain_hi[1] << 16; - pxm |= pa->proximity_domain_hi[2] << 24; - } - node = setup_node(pxm); - if (node == NUMA_NO_NODE) { - bad_srat(); - return; - } - apicid_to_node[pa->apic_id] = node; - node_set(node, processor_nodes_parsed); - acpi_numa = 1; - printk(KERN_INFO "SRAT: PXM %u -> APIC %02x -> Node %u\n", - pxm, pa->apic_id, node); + unsigned pxm; + nodeid_t node; + + if ( srat_disabled() ) + return; + if ( pa->header.length != sizeof(struct acpi_srat_cpu_affinity) ) + { + bad_srat(); + return; + } + if ( !(pa->flags & ACPI_SRAT_CPU_ENABLED) ) + return; + pxm = pa->proximity_domain_lo; + if ( srat_rev >= 2 ) + { + pxm |= pa->proximity_domain_hi[0] << 8; + pxm |= pa->proximity_domain_hi[1] << 16; + pxm |= pa->proximity_domain_hi[2] << 24; + } + node = setup_node(pxm); + if ( node == NUMA_NO_NODE ) + { + bad_srat(); + return; + } + apicid_to_node[pa->apic_id] = node; + node_set(node, processor_nodes_parsed); + acpi_numa = 1; + printk(KERN_INFO "SRAT: PXM %u -> APIC %02x -> Node %u\n", pxm, pa->apic_id, + node); } /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ void __init acpi_numa_memory_affinity_init(const struct acpi_srat_mem_affinity *ma) { - u64 start, end; - unsigned pxm; - nodeid_t node; - int i; - - if (srat_disabled()) - return; - if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) { - bad_srat(); - return; - } - if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) - return; - - start = ma->base_address; - end = start + ma->length; - /* Supplement the heuristics in l1tf_calculations(). */ - l1tf_safe_maddr = max(l1tf_safe_maddr, ROUNDUP(end, PAGE_SIZE)); - - if (num_node_memblks >= NR_NODE_MEMBLKS) - { - dprintk(XENLOG_WARNING, + u64 start, end; + unsigned pxm; + nodeid_t node; + int i; + + if ( srat_disabled() ) + return; + if ( ma->header.length != sizeof(struct acpi_srat_mem_affinity) ) + { + bad_srat(); + return; + } + if ( !(ma->flags & ACPI_SRAT_MEM_ENABLED) ) + return; + + start = ma->base_address; + end = start + ma->length; + /* Supplement the heuristics in l1tf_calculations(). */ + l1tf_safe_maddr = max(l1tf_safe_maddr, ROUNDUP(end, PAGE_SIZE)); + + if ( num_node_memblks >= NR_NODE_MEMBLKS ) + { + dprintk(XENLOG_WARNING, "Too many numa entry, try bigger NR_NODE_MEMBLKS \n"); - bad_srat(); - return; - } - - pxm = ma->proximity_domain; - if (srat_rev < 2) - pxm &= 0xff; - node = setup_node(pxm); - if (node == NUMA_NO_NODE) { - bad_srat(); - return; - } - /* It is fine to add this area to the nodes data it will be used later*/ - i = conflicting_memblks(start, end); - if (i < 0) - /* everything fine */; - else if (memblk_nodeid[i] == node) { - bool mismatch = !(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) != - !test_bit(i, memblk_hotplug); - - printk("%sSRAT: PXM %u (%"PRIx64"-%"PRIx64") overlaps with itself (%"PRIx64"-%"PRIx64")\n", - mismatch ? KERN_ERR : KERN_WARNING, pxm, start, end, - node_memblk_range[i].start, node_memblk_range[i].end); - if (mismatch) { - bad_srat(); - return; - } - } else { - printk(KERN_ERR - "SRAT: PXM %u (%"PRIx64"-%"PRIx64") overlaps with PXM %u (%"PRIx64"-%"PRIx64")\n", - pxm, start, end, node_to_pxm(memblk_nodeid[i]), - node_memblk_range[i].start, node_memblk_range[i].end); - bad_srat(); - return; - } - if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) { - struct node *nd = &nodes[node]; - - if (!node_test_and_set(node, memory_nodes_parsed)) { - nd->start = start; - nd->end = end; - } else { - if (start < nd->start) - nd->start = start; - if (nd->end < end) - nd->end = end; - } - } - printk(KERN_INFO "SRAT: Node %u PXM %u %"PRIx64"-%"PRIx64"%s\n", - node, pxm, start, end, - ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE ? " (hotplug)" : ""); - - node_memblk_range[num_node_memblks].start = start; - node_memblk_range[num_node_memblks].end = end; - memblk_nodeid[num_node_memblks] = node; - if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) { - __set_bit(num_node_memblks, memblk_hotplug); - if (end > mem_hotplug) - mem_hotplug = end; - } - num_node_memblks++; + bad_srat(); + return; + } + + pxm = ma->proximity_domain; + if ( srat_rev < 2 ) + pxm &= 0xff; + node = setup_node(pxm); + if ( node == NUMA_NO_NODE ) + { + bad_srat(); + return; + } + /* It is fine to add this area to the nodes data it will be used later*/ + i = conflicting_memblks(start, end); + if ( i < 0 ) + /* everything fine */; + else if ( memblk_nodeid[i] == node ) + { + bool mismatch = !(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) != + !test_bit(i, memblk_hotplug); + + printk("%sSRAT: PXM %u (%" PRIx64 "-%" PRIx64 + ") overlaps with itself (%" PRIx64 "-%" PRIx64 ")\n", + mismatch ? KERN_ERR : KERN_WARNING, pxm, start, end, + node_memblk_range[i].start, node_memblk_range[i].end); + if ( mismatch ) + { + bad_srat(); + return; + } + } + else + { + printk(KERN_ERR "SRAT: PXM %u (%" PRIx64 "-%" PRIx64 + ") overlaps with PXM %u (%" PRIx64 "-%" PRIx64 ")\n", + pxm, start, end, node_to_pxm(memblk_nodeid[i]), + node_memblk_range[i].start, node_memblk_range[i].end); + bad_srat(); + return; + } + if ( !(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ) + { + struct node *nd = &nodes[node]; + + if ( !node_test_and_set(node, memory_nodes_parsed) ) + { + nd->start = start; + nd->end = end; + } + else + { + if ( start < nd->start ) + nd->start = start; + if ( nd->end < end ) + nd->end = end; + } + } + printk(KERN_INFO "SRAT: Node %u PXM %u %" PRIx64 "-%" PRIx64 "%s\n", node, + pxm, start, end, + ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE ? " (hotplug)" : ""); + + node_memblk_range[num_node_memblks].start = start; + node_memblk_range[num_node_memblks].end = end; + memblk_nodeid[num_node_memblks] = node; + if ( ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE ) + { + __set_bit(num_node_memblks, memblk_hotplug); + if ( end > mem_hotplug ) + mem_hotplug = end; + } + num_node_memblks++; } /* Sanity check to catch more bad SRATs (they are amazingly common). Make sure the PXMs cover all memory. */ static int __init nodes_cover_memory(void) { - int i; - - for (i = 0; i < e820.nr_map; i++) { - int j, found; - unsigned long long start, end; - - if (e820.map[i].type != E820_RAM) { - continue; - } - - start = e820.map[i].addr; - end = e820.map[i].addr + e820.map[i].size; - - do { - found = 0; - for_each_node_mask(j, memory_nodes_parsed) - if (start < nodes[j].end - && end > nodes[j].start) { - if (start >= nodes[j].start) { - start = nodes[j].end; - found = 1; - } - if (end <= nodes[j].end) { - end = nodes[j].start; - found = 1; - } - } - } while (found && start < end); - - if (start < end) { - printk(KERN_ERR "SRAT: No PXM for e820 range: " - "%016Lx - %016Lx\n", start, end); - return 0; - } - } - return 1; + int i; + + for ( i = 0; i < e820.nr_map; i++ ) + { + int j, found; + unsigned long long start, end; + + if ( e820.map[i].type != E820_RAM ) + { + continue; + } + + start = e820.map[i].addr; + end = e820.map[i].addr + e820.map[i].size; + + do { + found = 0; + for_each_node_mask(j, + memory_nodes_parsed) if ( start < nodes[j].end && + end > nodes[j].start ) + { + if ( start >= nodes[j].start ) + { + start = nodes[j].end; + found = 1; + } + if ( end <= nodes[j].end ) + { + end = nodes[j].start; + found = 1; + } + } + } while ( found && start < end ); + + if ( start < end ) + { + printk(KERN_ERR "SRAT: No PXM for e820 range: " + "%016Lx - %016Lx\n", + start, end); + return 0; + } + } + return 1; } -void __init acpi_numa_arch_fixup(void) {} +void __init acpi_numa_arch_fixup(void) +{ +} static u64 __initdata srat_region_mask; static int __init srat_parse_region(struct acpi_subtable_header *header, - const unsigned long end) + const unsigned long end) { - struct acpi_srat_mem_affinity *ma; + struct acpi_srat_mem_affinity *ma; - if (!header) - return -EINVAL; + if ( !header ) + return -EINVAL; - ma = container_of(header, struct acpi_srat_mem_affinity, header); + ma = container_of(header, struct acpi_srat_mem_affinity, header); - if (!ma->length || - !(ma->flags & ACPI_SRAT_MEM_ENABLED) || - (ma->flags & ACPI_SRAT_MEM_NON_VOLATILE)) - return 0; + if ( !ma->length || !(ma->flags & ACPI_SRAT_MEM_ENABLED) || + (ma->flags & ACPI_SRAT_MEM_NON_VOLATILE) ) + return 0; - if (numa_off) - printk(KERN_INFO "SRAT: %013"PRIx64"-%013"PRIx64"\n", - ma->base_address, ma->base_address + ma->length - 1); + if ( numa_off ) + printk(KERN_INFO "SRAT: %013" PRIx64 "-%013" PRIx64 "\n", + ma->base_address, ma->base_address + ma->length - 1); - srat_region_mask |= ma->base_address | - pdx_region_mask(ma->base_address, ma->length); + srat_region_mask |= + ma->base_address | pdx_region_mask(ma->base_address, ma->length); - return 0; + return 0; } void __init srat_parse_regions(u64 addr) { - u64 mask; - unsigned int i; + u64 mask; + unsigned int i; - if (acpi_disabled || acpi_numa < 0 || - acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) - return; + if ( acpi_disabled || acpi_numa < 0 || + acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat) ) + return; - srat_region_mask = pdx_init_mask(addr); - acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, - srat_parse_region, 0); + srat_region_mask = pdx_init_mask(addr); + acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, srat_parse_region, 0); - for (mask = srat_region_mask, i = 0; mask && i < e820.nr_map; i++) { - if (e820.map[i].type != E820_RAM) - continue; + for ( mask = srat_region_mask, i = 0; mask && i < e820.nr_map; i++ ) + { + if ( e820.map[i].type != E820_RAM ) + continue; - if (~mask & pdx_region_mask(e820.map[i].addr, e820.map[i].size)) - mask = 0; - } + if ( ~mask & pdx_region_mask(e820.map[i].addr, e820.map[i].size) ) + mask = 0; + } - pfn_pdx_hole_setup(mask >> PAGE_SHIFT); + pfn_pdx_hole_setup(mask >> PAGE_SHIFT); } /* Use the information discovered above to actually set up the nodes. */ int __init acpi_scan_nodes(u64 start, u64 end) { - int i; - nodemask_t all_nodes_parsed; - - /* First clean up the node list */ - for (i = 0; i < MAX_NUMNODES; i++) - cutoff_node(i, start, end); - - if (acpi_numa <= 0) - return -1; - - if (!nodes_cover_memory()) { - bad_srat(); - return -1; - } - - memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks, - memblk_nodeid); - - if (memnode_shift < 0) { - printk(KERN_ERR - "SRAT: No NUMA node hash function found. Contact maintainer\n"); - bad_srat(); - return -1; - } - - nodes_or(all_nodes_parsed, memory_nodes_parsed, processor_nodes_parsed); - - /* Finally register nodes */ - for_each_node_mask(i, all_nodes_parsed) - { - u64 size = nodes[i].end - nodes[i].start; - if ( size == 0 ) - printk(KERN_WARNING "SRAT: Node %u has no memory. " - "BIOS Bug or mis-configured hardware?\n", i); - - setup_node_bootmem(i, nodes[i].start, nodes[i].end); - } - for (i = 0; i < nr_cpu_ids; i++) { - if (cpu_to_node[i] == NUMA_NO_NODE) - continue; - if (!node_isset(cpu_to_node[i], processor_nodes_parsed)) - numa_set_node(i, NUMA_NO_NODE); - } - numa_init_array(); - return 0; + int i; + nodemask_t all_nodes_parsed; + + /* First clean up the node list */ + for ( i = 0; i < MAX_NUMNODES; i++ ) + cutoff_node(i, start, end); + + if ( acpi_numa <= 0 ) + return -1; + + if ( !nodes_cover_memory() ) + { + bad_srat(); + return -1; + } + + memnode_shift = + compute_hash_shift(node_memblk_range, num_node_memblks, memblk_nodeid); + + if ( memnode_shift < 0 ) + { + printk(KERN_ERR + "SRAT: No NUMA node hash function found. Contact maintainer\n"); + bad_srat(); + return -1; + } + + nodes_or(all_nodes_parsed, memory_nodes_parsed, processor_nodes_parsed); + + /* Finally register nodes */ + for_each_node_mask(i, all_nodes_parsed) + { + u64 size = nodes[i].end - nodes[i].start; + if ( size == 0 ) + printk(KERN_WARNING "SRAT: Node %u has no memory. " + "BIOS Bug or mis-configured hardware?\n", + i); + + setup_node_bootmem(i, nodes[i].start, nodes[i].end); + } + for ( i = 0; i < nr_cpu_ids; i++ ) + { + if ( cpu_to_node[i] == NUMA_NO_NODE ) + continue; + if ( !node_isset(cpu_to_node[i], processor_nodes_parsed) ) + numa_set_node(i, NUMA_NO_NODE); + } + numa_init_array(); + return 0; } static unsigned node_to_pxm(nodeid_t n) { - unsigned i; - - if ((n < ARRAY_SIZE(pxm2node)) && (pxm2node[n].node == n)) - return pxm2node[n].pxm; - for (i = 0; i < ARRAY_SIZE(pxm2node); i++) - if (pxm2node[i].node == n) - return pxm2node[i].pxm; - return 0; + unsigned i; + + if ( (n < ARRAY_SIZE(pxm2node)) && (pxm2node[n].node == n) ) + return pxm2node[n].pxm; + for ( i = 0; i < ARRAY_SIZE(pxm2node); i++ ) + if ( pxm2node[i].node == n ) + return pxm2node[i].pxm; + return 0; } u8 __node_distance(nodeid_t a, nodeid_t b) { - unsigned index; - u8 slit_val; - - if (!acpi_slit) - return a == b ? 10 : 20; - index = acpi_slit->locality_count * node_to_pxm(a); - slit_val = acpi_slit->entry[index + node_to_pxm(b)]; - - /* ACPI defines 0xff as an unreachable node and 0-9 are undefined */ - if ((slit_val == 0xff) || (slit_val <= 9)) - return NUMA_NO_DISTANCE; - else - return slit_val; + unsigned index; + u8 slit_val; + + if ( !acpi_slit ) + return a == b ? 10 : 20; + index = acpi_slit->locality_count * node_to_pxm(a); + slit_val = acpi_slit->entry[index + node_to_pxm(b)]; + + /* ACPI defines 0xff as an unreachable node and 0-9 are undefined */ + if ( (slit_val == 0xff) || (slit_val <= 9) ) + return NUMA_NO_DISTANCE; + else + return slit_val; } EXPORT_SYMBOL(__node_distance); diff --git a/xen/arch/x86/string.c b/xen/arch/x86/string.c index e2f84638c4..dc43ff7931 100644 --- a/xen/arch/x86/string.c +++ b/xen/arch/x86/string.c @@ -11,13 +11,13 @@ void *(memcpy)(void *dest, const void *src, size_t n) { long d0, d1, d2; - asm volatile ( - " rep ; movs"__OS" ; " - " mov %k4,%k3 ; " - " rep ; movsb " - : "=&c" (d0), "=&D" (d1), "=&S" (d2) - : "0" (n/BYTES_PER_LONG), "r" (n%BYTES_PER_LONG), "1" (dest), "2" (src) - : "memory" ); + asm volatile(" rep ; movs" __OS " ; " + " mov %k4,%k3 ; " + " rep ; movsb " + : "=&c"(d0), "=&D"(d1), "=&S"(d2) + : "0"(n / BYTES_PER_LONG), "r"(n % BYTES_PER_LONG), "1"(dest), + "2"(src) + : "memory"); return dest; } @@ -26,11 +26,10 @@ void *(memset)(void *s, int c, size_t n) { long d0, d1; - asm volatile ( - "rep stosb" - : "=&c" (d0), "=&D" (d1) - : "a" (c), "1" (s), "0" (n) - : "memory"); + asm volatile("rep stosb" + : "=&c"(d0), "=&D"(d1) + : "a"(c), "1"(s), "0"(n) + : "memory"); return s; } @@ -45,13 +44,13 @@ void *(memmove)(void *dest, const void *src, size_t n) if ( dest < src ) return memcpy(dest, src, n); - asm volatile ( - " std ; " - " rep movsb ; " - " cld " - : "=&c" (d0), "=&S" (d1), "=&D" (d2) - : "0" (n), "1" (n-1+(const char *)src), "2" (n-1+(char *)dest) - : "memory"); + asm volatile(" std ; " + " rep movsb ; " + " cld " + : "=&c"(d0), "=&S"(d1), "=&D"(d2) + : "0"(n), "1"(n - 1 + (const char *)src), + "2"(n - 1 + (char *)dest) + : "memory"); return dest; } diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c index 1916a3de1b..162f7c3772 100644 --- a/xen/arch/x86/sysctl.c +++ b/xen/arch/x86/sysctl.c @@ -1,8 +1,8 @@ /****************************************************************************** * Arch-specific sysctl.c - * + * * System management operations. For use by node control stack. - * + * * Copyright (c) 2002-2006, K Fraser */ @@ -34,33 +34,40 @@ #include const struct cpu_policy system_policies[] = { - [ XEN_SYSCTL_cpu_policy_raw ] = { - &raw_cpuid_policy, - &raw_msr_policy, - }, - [ XEN_SYSCTL_cpu_policy_host ] = { - &host_cpuid_policy, - &host_msr_policy, - }, - [ XEN_SYSCTL_cpu_policy_pv_max ] = { - &pv_max_cpuid_policy, - &pv_max_msr_policy, - }, - [ XEN_SYSCTL_cpu_policy_hvm_max ] = { - &hvm_max_cpuid_policy, - &hvm_max_msr_policy, - }, - [ XEN_SYSCTL_cpu_policy_pv_default ] = { - &pv_max_cpuid_policy, - &pv_max_msr_policy, - }, - [ XEN_SYSCTL_cpu_policy_hvm_default ] = { - &hvm_max_cpuid_policy, - &hvm_max_msr_policy, - }, + [XEN_SYSCTL_cpu_policy_raw] = + { + &raw_cpuid_policy, + &raw_msr_policy, + }, + [XEN_SYSCTL_cpu_policy_host] = + { + &host_cpuid_policy, + &host_msr_policy, + }, + [XEN_SYSCTL_cpu_policy_pv_max] = + { + &pv_max_cpuid_policy, + &pv_max_msr_policy, + }, + [XEN_SYSCTL_cpu_policy_hvm_max] = + { + &hvm_max_cpuid_policy, + &hvm_max_msr_policy, + }, + [XEN_SYSCTL_cpu_policy_pv_default] = + { + &pv_max_cpuid_policy, + &pv_max_msr_policy, + }, + [XEN_SYSCTL_cpu_policy_hvm_default] = + { + &hvm_max_cpuid_policy, + &hvm_max_msr_policy, + }, }; -struct l3_cache_info { +struct l3_cache_info +{ int ret; unsigned long size; }; @@ -87,8 +94,7 @@ long cpu_up_helper(void *data) ret = cpu_up(cpu); } - if ( !ret && !opt_smt && - cpu_data[cpu].compute_unit_id == INVALID_CUID && + if ( !ret && !opt_smt && cpu_data[cpu].compute_unit_id == INVALID_CUID && cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) > 1 ) { ret = cpu_down_helper(data); @@ -126,33 +132,32 @@ void arch_do_physinfo(struct xen_sysctl_physinfo *pi) pi->capabilities |= XEN_SYSCTL_PHYSCAP_directio; } -long arch_do_sysctl( - struct xen_sysctl *sysctl, XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) +long arch_do_sysctl(struct xen_sysctl *sysctl, + XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) { long ret = 0; - switch ( sysctl->cmd ) + switch (sysctl->cmd) { - case XEN_SYSCTL_cpu_hotplug: { unsigned int cpu = sysctl->u.cpu_hotplug.cpu; - switch ( sysctl->u.cpu_hotplug.op ) + switch (sysctl->u.cpu_hotplug.op) { case XEN_SYSCTL_CPU_HOTPLUG_ONLINE: ret = xsm_resource_plug_core(XSM_HOOK); if ( ret ) break; - ret = continue_hypercall_on_cpu( - 0, cpu_up_helper, (void *)(unsigned long)cpu); + ret = continue_hypercall_on_cpu(0, cpu_up_helper, + (void *)(unsigned long)cpu); break; case XEN_SYSCTL_CPU_HOTPLUG_OFFLINE: ret = xsm_resource_unplug_core(XSM_HOOK); if ( ret ) break; - ret = continue_hypercall_on_cpu( - 0, cpu_down_helper, (void *)(unsigned long)cpu); + ret = continue_hypercall_on_cpu(0, cpu_down_helper, + (void *)(unsigned long)cpu); break; default: ret = -EINVAL; @@ -168,7 +173,7 @@ long arch_do_sysctl( if ( sysctl->u.psr_cmt_op.flags != 0 ) return -EINVAL; - switch ( sysctl->u.psr_cmt_op.cmd ) + switch (sysctl->u.psr_cmt_op.cmd) { case XEN_SYSCTL_PSR_CMT_enabled: sysctl->u.psr_cmt_op.u.data = @@ -217,56 +222,50 @@ long arch_do_sysctl( case XEN_SYSCTL_psr_alloc: { - uint32_t data[PSR_INFO_ARRAY_SIZE] = { }; + uint32_t data[PSR_INFO_ARRAY_SIZE] = {}; - switch ( sysctl->u.psr_alloc.cmd ) + switch (sysctl->u.psr_alloc.cmd) { case XEN_SYSCTL_PSR_get_l3_info: - ret = psr_get_info(sysctl->u.psr_alloc.target, - PSR_TYPE_L3_CBM, data, ARRAY_SIZE(data)); + ret = psr_get_info(sysctl->u.psr_alloc.target, PSR_TYPE_L3_CBM, + data, ARRAY_SIZE(data)); if ( ret ) break; - sysctl->u.psr_alloc.u.cat_info.cos_max = - data[PSR_INFO_IDX_COS_MAX]; + sysctl->u.psr_alloc.u.cat_info.cos_max = data[PSR_INFO_IDX_COS_MAX]; sysctl->u.psr_alloc.u.cat_info.cbm_len = - data[PSR_INFO_IDX_CAT_CBM_LEN]; - sysctl->u.psr_alloc.u.cat_info.flags = - data[PSR_INFO_IDX_CAT_FLAGS]; + data[PSR_INFO_IDX_CAT_CBM_LEN]; + sysctl->u.psr_alloc.u.cat_info.flags = data[PSR_INFO_IDX_CAT_FLAGS]; if ( __copy_field_to_guest(u_sysctl, sysctl, u.psr_alloc) ) ret = -EFAULT; break; case XEN_SYSCTL_PSR_get_l2_info: - ret = psr_get_info(sysctl->u.psr_alloc.target, - PSR_TYPE_L2_CBM, data, ARRAY_SIZE(data)); + ret = psr_get_info(sysctl->u.psr_alloc.target, PSR_TYPE_L2_CBM, + data, ARRAY_SIZE(data)); if ( ret ) break; - sysctl->u.psr_alloc.u.cat_info.cos_max = - data[PSR_INFO_IDX_COS_MAX]; + sysctl->u.psr_alloc.u.cat_info.cos_max = data[PSR_INFO_IDX_COS_MAX]; sysctl->u.psr_alloc.u.cat_info.cbm_len = - data[PSR_INFO_IDX_CAT_CBM_LEN]; - sysctl->u.psr_alloc.u.cat_info.flags = - data[PSR_INFO_IDX_CAT_FLAGS]; + data[PSR_INFO_IDX_CAT_CBM_LEN]; + sysctl->u.psr_alloc.u.cat_info.flags = data[PSR_INFO_IDX_CAT_FLAGS]; if ( __copy_field_to_guest(u_sysctl, sysctl, u.psr_alloc) ) ret = -EFAULT; break; case XEN_SYSCTL_PSR_get_mba_info: - ret = psr_get_info(sysctl->u.psr_alloc.target, - PSR_TYPE_MBA_THRTL, data, ARRAY_SIZE(data)); + ret = psr_get_info(sysctl->u.psr_alloc.target, PSR_TYPE_MBA_THRTL, + data, ARRAY_SIZE(data)); if ( ret ) break; - sysctl->u.psr_alloc.u.mba_info.cos_max = - data[PSR_INFO_IDX_COS_MAX]; + sysctl->u.psr_alloc.u.mba_info.cos_max = data[PSR_INFO_IDX_COS_MAX]; sysctl->u.psr_alloc.u.mba_info.thrtl_max = - data[PSR_INFO_IDX_MBA_THRTL_MAX]; - sysctl->u.psr_alloc.u.mba_info.flags = - data[PSR_INFO_IDX_MBA_FLAGS]; + data[PSR_INFO_IDX_MBA_THRTL_MAX]; + sysctl->u.psr_alloc.u.mba_info.flags = data[PSR_INFO_IDX_MBA_FLAGS]; if ( __copy_field_to_guest(u_sysctl, sysctl, u.psr_alloc) ) ret = -EFAULT; @@ -281,17 +280,18 @@ long arch_do_sysctl( case XEN_SYSCTL_get_cpu_levelling_caps: sysctl->u.cpu_levelling_caps.caps = levelling_caps; - if ( __copy_field_to_guest(u_sysctl, sysctl, u.cpu_levelling_caps.caps) ) + if ( __copy_field_to_guest(u_sysctl, sysctl, + u.cpu_levelling_caps.caps) ) ret = -EFAULT; break; case XEN_SYSCTL_get_cpu_featureset: { static const struct cpuid_policy *const policy_table[] = { - [XEN_SYSCTL_cpu_featureset_raw] = &raw_cpuid_policy, + [XEN_SYSCTL_cpu_featureset_raw] = &raw_cpuid_policy, [XEN_SYSCTL_cpu_featureset_host] = &host_cpuid_policy, - [XEN_SYSCTL_cpu_featureset_pv] = &pv_max_cpuid_policy, - [XEN_SYSCTL_cpu_featureset_hvm] = &hvm_max_cpuid_policy, + [XEN_SYSCTL_cpu_featureset_pv] = &pv_max_cpuid_policy, + [XEN_SYSCTL_cpu_featureset_hvm] = &hvm_max_cpuid_policy, }; const struct cpuid_policy *p = NULL; uint32_t featureset[FSCAPINTS]; @@ -322,8 +322,8 @@ long arch_do_sysctl( cpuid_policy_to_featureset(p, featureset); /* Copy the requested featureset into place. */ - if ( !ret && copy_to_guest(sysctl->u.cpu_featureset.features, - featureset, nr) ) + if ( !ret && + copy_to_guest(sysctl->u.cpu_featureset.features, featureset, nr) ) ret = -EFAULT; /* Inform the caller of how many features we wrote. */ @@ -350,21 +350,18 @@ long arch_do_sysctl( ret = -EINVAL; break; } - policy = &system_policies[ - array_index_nospec(sysctl->u.cpu_policy.index, - ARRAY_SIZE(system_policies))]; + policy = &system_policies[array_index_nospec( + sysctl->u.cpu_policy.index, ARRAY_SIZE(system_policies))]; /* Process the CPUID leaves. */ if ( guest_handle_is_null(sysctl->u.cpu_policy.cpuid_policy) ) sysctl->u.cpu_policy.nr_leaves = CPUID_MAX_SERIALISED_LEAVES; else if ( (ret = x86_cpuid_copy_to_buffer( - policy->cpuid, - sysctl->u.cpu_policy.cpuid_policy, + policy->cpuid, sysctl->u.cpu_policy.cpuid_policy, &sysctl->u.cpu_policy.nr_leaves)) ) break; - if ( __copy_field_to_guest(u_sysctl, sysctl, - u.cpu_policy.nr_leaves) ) + if ( __copy_field_to_guest(u_sysctl, sysctl, u.cpu_policy.nr_leaves) ) { ret = -EFAULT; break; @@ -374,13 +371,11 @@ long arch_do_sysctl( if ( guest_handle_is_null(sysctl->u.cpu_policy.msr_policy) ) sysctl->u.cpu_policy.nr_msrs = MSR_MAX_SERIALISED_ENTRIES; else if ( (ret = x86_msr_copy_to_buffer( - policy->msr, - sysctl->u.cpu_policy.msr_policy, + policy->msr, sysctl->u.cpu_policy.msr_policy, &sysctl->u.cpu_policy.nr_msrs)) ) break; - if ( __copy_field_to_guest(u_sysctl, sysctl, - u.cpu_policy.nr_msrs) ) + if ( __copy_field_to_guest(u_sysctl, sysctl, u.cpu_policy.nr_msrs) ) ret = -EFAULT; break; diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c index f3fdee4d39..93e1bd44d2 100644 --- a/xen/arch/x86/tboot.c +++ b/xen/arch/x86/tboot.c @@ -35,38 +35,39 @@ static uint64_t __initdata sinit_base, __initdata sinit_size; * TXT configuration registers (offsets from TXT_{PUB, PRIV}_CONFIG_REGS_BASE) */ -#define TXT_PUB_CONFIG_REGS_BASE 0xfed30000 -#define TXT_PRIV_CONFIG_REGS_BASE 0xfed20000 +#define TXT_PUB_CONFIG_REGS_BASE 0xfed30000 +#define TXT_PRIV_CONFIG_REGS_BASE 0xfed20000 /* # pages for each config regs space - used by fixmap */ -#define NR_TXT_CONFIG_PAGES ((TXT_PUB_CONFIG_REGS_BASE - \ - TXT_PRIV_CONFIG_REGS_BASE) >> PAGE_SHIFT) +#define NR_TXT_CONFIG_PAGES \ + ((TXT_PUB_CONFIG_REGS_BASE - TXT_PRIV_CONFIG_REGS_BASE) >> PAGE_SHIFT) /* offsets from pub/priv config space */ -#define TXTCR_SINIT_BASE 0x0270 -#define TXTCR_SINIT_SIZE 0x0278 -#define TXTCR_HEAP_BASE 0x0300 -#define TXTCR_HEAP_SIZE 0x0308 - -#define SHA1_SIZE 20 -typedef uint8_t sha1_hash_t[SHA1_SIZE]; - -typedef struct __packed { - uint32_t version; /* currently 6 */ - sha1_hash_t bios_acm_id; - uint32_t edx_senter_flags; - uint64_t mseg_valid; - sha1_hash_t sinit_hash; - sha1_hash_t mle_hash; - sha1_hash_t stm_hash; - sha1_hash_t lcp_policy_hash; - uint32_t lcp_policy_control; - uint32_t rlp_wakeup_addr; - uint32_t reserved; - uint32_t num_mdrs; - uint32_t mdrs_off; - uint32_t num_vtd_dmars; - uint32_t vtd_dmars_off; +#define TXTCR_SINIT_BASE 0x0270 +#define TXTCR_SINIT_SIZE 0x0278 +#define TXTCR_HEAP_BASE 0x0300 +#define TXTCR_HEAP_SIZE 0x0308 + +#define SHA1_SIZE 20 +typedef uint8_t sha1_hash_t[SHA1_SIZE]; + +typedef struct __packed +{ + uint32_t version; /* currently 6 */ + sha1_hash_t bios_acm_id; + uint32_t edx_senter_flags; + uint64_t mseg_valid; + sha1_hash_t sinit_hash; + sha1_hash_t mle_hash; + sha1_hash_t stm_hash; + sha1_hash_t lcp_policy_hash; + uint32_t lcp_policy_control; + uint32_t rlp_wakeup_addr; + uint32_t reserved; + uint32_t num_mdrs; + uint32_t mdrs_off; + uint32_t num_vtd_dmars; + uint32_t vtd_dmars_off; } sinit_mle_data_t; static void __init tboot_copy_memory(unsigned char *va, uint32_t size, @@ -142,11 +143,12 @@ void __init tboot_probe(void) /* definitions from xen/drivers/passthrough/vtd/iommu.h * used to walk through vtd page tables */ #define LEVEL_STRIDE (9) -#define PTE_NUM (1<arch.s3_integrity ) continue; printk("MACing Domain %u\n", d->domain_id); spin_lock(&d->page_alloc_lock); - page_list_for_each(page, &d->page_list) + page_list_for_each (page, &d->page_list) { void *pg = __map_domain_page(page); vmac_update(pg, PAGE_SIZE, &ctx); @@ -282,10 +284,9 @@ static void tboot_gen_xenheap_integrity(const uint8_t key[TB_KEY_SIZE], continue; if ( is_xen_fixed_mfn(mfn) ) continue; /* skip Xen */ - if ( (mfn >= PFN_DOWN(g_tboot_shared->tboot_base - 3 * PAGE_SIZE)) - && (mfn < PFN_UP(g_tboot_shared->tboot_base - + g_tboot_shared->tboot_size - + 3 * PAGE_SIZE)) ) + if ( (mfn >= PFN_DOWN(g_tboot_shared->tboot_base - 3 * PAGE_SIZE)) && + (mfn < PFN_UP(g_tboot_shared->tboot_base + + g_tboot_shared->tboot_size + 3 * PAGE_SIZE)) ) continue; /* skip tboot and its page tables */ if ( is_page_in_use(page) && is_xen_heap_page(page) ) @@ -293,7 +294,8 @@ static void tboot_gen_xenheap_integrity(const uint8_t key[TB_KEY_SIZE], void *pg; if ( mfn_in_guarded_stack(mfn) ) - continue; /* skip guard stack, see memguard_guard_stack() in mm.c */ + continue; /* skip guard stack, see memguard_guard_stack() in + mm.c */ pg = mfn_to_virt(mfn); vmac_update((uint8_t *)pg, PAGE_SIZE, &ctx); @@ -309,24 +311,26 @@ static void tboot_gen_frametable_integrity(const uint8_t key[TB_KEY_SIZE], vmac_t *mac) { unsigned int sidx, eidx, nidx; - unsigned int max_idx = (max_pdx + PDX_GROUP_COUNT - 1)/PDX_GROUP_COUNT; + unsigned int max_idx = (max_pdx + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT; uint8_t nonce[16] = {}; vmac_ctx_t ctx; vmac_set_key((uint8_t *)key, &ctx); - for ( sidx = 0; ; sidx = nidx ) + for ( sidx = 0;; sidx = nidx ) { eidx = find_next_zero_bit(pdx_group_valid, max_idx, sidx); nidx = find_next_bit(pdx_group_valid, max_idx, eidx); if ( nidx >= max_idx ) break; vmac_update((uint8_t *)pdx_to_page(sidx * PDX_GROUP_COUNT), - pdx_to_page(eidx * PDX_GROUP_COUNT) - - pdx_to_page(sidx * PDX_GROUP_COUNT), &ctx); + pdx_to_page(eidx * PDX_GROUP_COUNT) - + pdx_to_page(sidx * PDX_GROUP_COUNT), + &ctx); } vmac_update((uint8_t *)pdx_to_page(sidx * PDX_GROUP_COUNT), - pdx_to_page(max_pdx - 1) + 1 - - pdx_to_page(sidx * PDX_GROUP_COUNT), &ctx); + pdx_to_page(max_pdx - 1) + 1 - + pdx_to_page(sidx * PDX_GROUP_COUNT), + &ctx); *mac = vmac(NULL, 0, nonce, NULL, &ctx); @@ -351,7 +355,7 @@ void tboot_shutdown(uint32_t shutdown_type) __PAGE_HYPERVISOR); if ( err != 0 ) { - printk("error (%#x) mapping tboot pages (mfns) @ %"PRI_mfn", %#x\n", + printk("error (%#x) mapping tboot pages (mfns) @ %" PRI_mfn ", %#x\n", err, mfn_x(map_base), map_size); return; } @@ -370,16 +374,17 @@ void tboot_shutdown(uint32_t shutdown_type) g_tboot_shared->num_mac_regions = 3; /* S3 resume code (and other real mode trampoline code) */ g_tboot_shared->mac_regions[0].start = bootsym_phys(trampoline_start); - g_tboot_shared->mac_regions[0].size = bootsym_phys(trampoline_end) - - bootsym_phys(trampoline_start); + g_tboot_shared->mac_regions[0].size = + bootsym_phys(trampoline_end) - bootsym_phys(trampoline_start); /* hypervisor .text + .rodata */ g_tboot_shared->mac_regions[1].start = (uint64_t)__pa(&_stext); - g_tboot_shared->mac_regions[1].size = __pa(&__2M_rodata_end) - - __pa(&_stext); + g_tboot_shared->mac_regions[1].size = + __pa(&__2M_rodata_end) - __pa(&_stext); /* hypervisor .data + .bss */ - g_tboot_shared->mac_regions[2].start = (uint64_t)__pa(&__2M_rwdata_start); - g_tboot_shared->mac_regions[2].size = __pa(&__2M_rwdata_end) - - __pa(&__2M_rwdata_start); + g_tboot_shared->mac_regions[2].start = + (uint64_t)__pa(&__2M_rwdata_start); + g_tboot_shared->mac_regions[2].size = + __pa(&__2M_rwdata_end) - __pa(&__2M_rwdata_start); /* * MAC domains and other Xen memory @@ -398,7 +403,7 @@ void tboot_shutdown(uint32_t shutdown_type) if ( idle_vcpu[0] != INVALID_VCPU ) write_ptbase(idle_vcpu[0]); - ((void(*)(void))(unsigned long)g_tboot_shared->shutdown_entry)(); + ((void (*)(void))(unsigned long)g_tboot_shared->shutdown_entry)(); BUG(); /* should not reach here */ } @@ -419,24 +424,24 @@ int __init tboot_protect_mem_regions(void) if ( txt_heap_base == 0 ) return 0; rc = e820_change_range_type(&e820, txt_heap_base, - txt_heap_base + txt_heap_size, - E820_RESERVED, E820_UNUSABLE); + txt_heap_base + txt_heap_size, E820_RESERVED, + E820_UNUSABLE); if ( !rc ) return 0; /* SINIT */ if ( sinit_base == 0 ) return 0; - rc = e820_change_range_type(&e820, sinit_base, - sinit_base + sinit_size, + rc = e820_change_range_type(&e820, sinit_base, sinit_base + sinit_size, E820_RESERVED, E820_UNUSABLE); if ( !rc ) return 0; /* TXT Private Space */ rc = e820_change_range_type(&e820, TXT_PRIV_CONFIG_REGS_BASE, - TXT_PRIV_CONFIG_REGS_BASE + NR_TXT_CONFIG_PAGES * PAGE_SIZE, - E820_RESERVED, E820_UNUSABLE); + TXT_PRIV_CONFIG_REGS_BASE + + NR_TXT_CONFIG_PAGES * PAGE_SIZE, + E820_RESERVED, E820_UNUSABLE); if ( !rc ) return 0; @@ -523,15 +528,21 @@ void tboot_s3_error(int error) BUG_ON(!error || !tboot_in_measured_env()); - switch ( error ) + switch (error) { - case -1: what = "Xen heap"; break; - case -2: what = "frame table"; break; - case -3: what = "domains"; break; + case -1: + what = "Xen heap"; + break; + case -2: + what = "frame table"; + break; + case -3: + what = "domains"; + break; } - printk("MAC for %s before S3 is: 0x%08"PRIx64"\n", what, orig_mac); - printk("MAC for %s after S3 is: 0x%08"PRIx64"\n", what, resume_mac); + printk("MAC for %s before S3 is: 0x%08" PRIx64 "\n", what, orig_mac); + printk("MAC for %s after S3 is: 0x%08" PRIx64 "\n", what, resume_mac); panic("Memory integrity was lost on resume (%d)\n", error); } diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c index 9a6ea8ffcb..98d02d51ec 100644 --- a/xen/arch/x86/time.c +++ b/xen/arch/x86/time.c @@ -1,10 +1,10 @@ /****************************************************************************** * arch/x86/time.c - * + * * Per-CPU time calibration and management. - * + * * Copyright (c) 2002-2005, K A Fraser - * + * * Portions from Linux are: * Copyright (c) 1991, 1992, 1995 Linus Torvalds */ @@ -42,22 +42,25 @@ static char __initdata opt_clocksource[10]; string_param("clocksource", opt_clocksource); -unsigned long __read_mostly cpu_khz; /* CPU clock frequency in kHz. */ +unsigned long __read_mostly cpu_khz; /* CPU clock frequency in kHz. */ DEFINE_SPINLOCK(rtc_lock); unsigned long pit0_ticks; -struct cpu_time_stamp { +struct cpu_time_stamp +{ u64 local_tsc; s_time_t local_stime; s_time_t master_stime; }; -struct cpu_time { +struct cpu_time +{ struct cpu_time_stamp stamp; struct time_scale tsc_scale; }; -struct platform_timesource { +struct platform_timesource +{ char *id; char *name; u64 frequency; @@ -77,8 +80,8 @@ static struct timer calibration_timer; * We simulate a 32-bit platform timer from the 16-bit PIT ch2 counter. * Otherwise overflow happens too quickly (~50ms) for us to guarantee that * softirq handling will happen in time. - * - * The pit_lock protects the 16- and 32-bit stamp fields as well as the + * + * The pit_lock protects the 16- and 32-bit stamp fields as well as the */ static DEFINE_SPINLOCK(pit_lock); static u16 pit_stamp16; @@ -99,10 +102,9 @@ static inline u32 div_frac(u32 dividend, u32 divisor) { u32 quotient, remainder; ASSERT(dividend < divisor); - asm ( - "divl %4" - : "=a" (quotient), "=d" (remainder) - : "0" (0), "1" (dividend), "r" (divisor) ); + asm("divl %4" + : "=a"(quotient), "=d"(remainder) + : "0"(0), "1"(dividend), "r"(divisor)); return quotient; } @@ -113,10 +115,9 @@ static inline u32 div_frac(u32 dividend, u32 divisor) static inline u32 mul_frac(u32 multiplicand, u32 multiplier) { u32 product_int, product_frac; - asm ( - "mul %3" - : "=a" (product_frac), "=d" (product_int) - : "0" (multiplicand), "r" (multiplier) ); + asm("mul %3" + : "=a"(product_frac), "=d"(product_int) + : "0"(multiplicand), "r"(multiplier)); return product_int; } @@ -133,10 +134,9 @@ u64 scale_delta(u64 delta, const struct time_scale *scale) else delta <<= scale->shift; - asm ( - "mulq %2 ; shrd $32,%1,%0" - : "=a" (product), "=d" (delta) - : "rm" (delta), "0" ((u64)scale->mul_frac) ); + asm("mulq %2 ; shrd $32,%1,%0" + : "=a"(product), "=d"(delta) + : "rm"(delta), "0"((u64)scale->mul_frac)); return product; } @@ -158,10 +158,9 @@ static inline struct time_scale scale_reciprocal(struct time_scale scale) reciprocal.shift++; } - asm ( - "divl %4" - : "=a" (reciprocal.mul_frac), "=d" (dividend) - : "0" (0), "1" (dividend), "r" (scale.mul_frac) ); + asm("divl %4" + : "=a"(reciprocal.mul_frac), "=d"(dividend) + : "0"(0), "1"(dividend), "r"(scale.mul_frac)); return reciprocal; } @@ -216,7 +215,7 @@ static void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) spin_lock_irq(&pit_lock); outb(0x80, PIT_MODE); - count = inb(PIT_CH2); + count = inb(PIT_CH2); count |= inb(PIT_CH2) << 8; pit_stamp32 += (u16)(pit_stamp16 - count); @@ -226,18 +225,16 @@ static void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) } } -static struct irqaction __read_mostly irq0 = { - timer_interrupt, "timer", NULL -}; +static struct irqaction __read_mostly irq0 = {timer_interrupt, "timer", NULL}; #define CLOCK_TICK_RATE 1193182 /* system crystal frequency (Hz) */ -#define CALIBRATE_FRAC 20 /* calibrate over 50ms */ +#define CALIBRATE_FRAC 20 /* calibrate over 50ms */ #define CALIBRATE_VALUE(freq) (((freq) + CALIBRATE_FRAC / 2) / CALIBRATE_FRAC) static void preinit_pit(void) { /* Set PIT channel 0 to HZ Hz. */ -#define LATCH (((CLOCK_TICK_RATE)+(HZ/2))/HZ) +#define LATCH (((CLOCK_TICK_RATE) + (HZ / 2)) / HZ) outb_p(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ outb_p(LATCH & 0xff, PIT_CH0); /* LSB */ outb(LATCH >> 8, PIT_CH0); /* MSB */ @@ -252,7 +249,7 @@ void set_time_scale(struct time_scale *ts, u64 ticks_per_sec) ASSERT(tps64 != 0); - while ( tps64 > (MILLISECS(1000)*2) ) + while ( tps64 > (MILLISECS(1000) * 2) ) { tps64 >>= 1; shift--; @@ -266,7 +263,7 @@ void set_time_scale(struct time_scale *ts, u64 ticks_per_sec) } ts->mul_frac = div_frac(MILLISECS(1000), tps32); - ts->shift = shift; + ts->shift = shift; } static char *freq_string(u64 freq) @@ -292,7 +289,7 @@ static u64 read_pit_count(void) spin_lock_irqsave(&pit_lock, flags); outb(0x80, PIT_MODE); - count16 = inb(PIT_CH2); + count16 = inb(PIT_CH2); count16 |= inb(PIT_CH2) << 8; count32 = pit_stamp32 + (u16)(pit_stamp16 - count16); @@ -347,8 +344,7 @@ static void resume_pit(struct platform_timesource *pts) outb(0, PIT_CH2); /* MSB of count */ } -static struct platform_timesource __initdata plt_pit = -{ +static struct platform_timesource __initdata plt_pit = { .id = "pit", .name = "PIT", .frequency = CLOCK_TICK_RATE, @@ -394,15 +390,13 @@ static void resume_hpet(struct platform_timesource *pts) hpet_resume(NULL); } -static struct platform_timesource __initdata plt_hpet = -{ - .id = "hpet", - .name = "HPET", - .read_counter = read_hpet_count, - .counter_bits = 32, - .init = init_hpet, - .resume = resume_hpet -}; +static struct platform_timesource __initdata plt_hpet = {.id = "hpet", + .name = "HPET", + .read_counter = + read_hpet_count, + .counter_bits = 32, + .init = init_hpet, + .resume = resume_hpet}; /************************************************************ * PLATFORM TIMER 3: ACPI PM TIMER @@ -445,15 +439,13 @@ static s64 __init init_pmtimer(struct platform_timesource *pts) return (rdtsc_ordered() - start) * CALIBRATE_FRAC; } -static struct platform_timesource __initdata plt_pmtimer = -{ +static struct platform_timesource __initdata plt_pmtimer = { .id = "acpi", .name = "ACPI PM Timer", .frequency = ACPI_PM_FREQUENCY, .read_counter = read_pmtimer_count, .counter_bits = 24, - .init = init_pmtimer -}; + .init = init_pmtimer}; static struct time_scale __read_mostly pmt_scale; static struct time_scale __read_mostly pmt_scale_r; @@ -515,8 +507,7 @@ static u64 read_tsc(void) return rdtsc_ordered(); } -static struct platform_timesource __initdata plt_tsc = -{ +static struct platform_timesource __initdata plt_tsc = { .id = "tsc", .name = "TSC", .read_counter = read_tsc, @@ -566,7 +557,7 @@ static always_inline uint64_t read_cycle(const struct vcpu_time_info *info, { uint64_t delta = tsc - info->tsc_timestamp; struct time_scale ts = { - .shift = info->tsc_shift, + .shift = info->tsc_shift, .mul_frac = info->tsc_to_system_mul, }; uint64_t offset = scale_delta(delta, &ts); @@ -604,8 +595,7 @@ static uint64_t read_xen_timer(void) return ret; } -static struct platform_timesource __initdata plt_xen_timer = -{ +static struct platform_timesource __initdata plt_xen_timer = { .id = "xen", .name = "XEN PV CLOCK", .read_counter = read_xen_timer, @@ -622,7 +612,7 @@ static struct platform_timesource __initdata plt_xen_timer = static struct platform_timesource __read_mostly plt_src; /* hardware-width mask */ static u64 __read_mostly plt_mask; - /* ns between calls to plt_overflow() */ +/* ns between calls to plt_overflow() */ static u64 __read_mostly plt_overflow_period; /* scale: platform counter -> nanosecs */ static struct time_scale __read_mostly plt_scale; @@ -631,8 +621,8 @@ static struct time_scale __read_mostly plt_scale; static DEFINE_SPINLOCK(platform_timer_lock); static s_time_t stime_platform_stamp; /* System time at below platform time */ static u64 platform_timer_stamp; /* Platform time at above system time */ -static u64 plt_stamp64; /* 64-bit platform counter stamp */ -static u64 plt_stamp; /* hardware-width platform counter stamp */ +static u64 plt_stamp64; /* 64-bit platform counter stamp */ +static u64 plt_stamp; /* hardware-width platform counter stamp */ static struct timer plt_overflow_timer; static s_time_t __read_platform_stime(u64 platform_time) @@ -670,7 +660,8 @@ static void plt_overflow(void *unused) if ( !test_and_set_bool(warned_once) ) printk("Platform timer appears to have unexpectedly wrapped " - "%u%s times.\n", i, (i == 10) ? " or more" : ""); + "%u%s times.\n", + i, (i == 10) ? " or more" : ""); } spin_unlock_irq(&platform_timer_lock); @@ -751,8 +742,8 @@ static s64 __init try_platform_timer(struct platform_timesource *pts) set_time_scale(&plt_scale, pts->frequency); - plt_overflow_period = scale_delta( - 1ull << (pts->counter_bits - 1), &plt_scale); + plt_overflow_period = + scale_delta(1ull << (pts->counter_bits - 1), &plt_scale); plt_src = *pts; return rc; @@ -760,12 +751,11 @@ static s64 __init try_platform_timer(struct platform_timesource *pts) static u64 __init init_platform_timer(void) { - static struct platform_timesource * __initdata plt_timers[] = { + static struct platform_timesource *__initdata plt_timers[] = { #ifdef CONFIG_XEN_GUEST &plt_xen_timer, #endif - &plt_hpet, &plt_pmtimer, &plt_pit - }; + &plt_hpet, &plt_pmtimer, &plt_pit}; struct platform_timesource *pts = NULL; unsigned int i; @@ -803,8 +793,7 @@ static u64 __init init_platform_timer(void) if ( rc <= 0 ) panic("Unable to find usable platform timer\n"); - printk("Platform timer is %s %s\n", - freq_string(pts->frequency), pts->name); + printk("Platform timer is %s %s\n", freq_string(pts->frequency), pts->name); return rc; } @@ -852,38 +841,41 @@ void cstate_restore_tsc(void) * machines were long is 32-bit! (However, as time_t is signed, we * will already get problems at other places on 2038-01-19 03:14:08) */ -unsigned long -mktime (unsigned int year, unsigned int mon, - unsigned int day, unsigned int hour, - unsigned int min, unsigned int sec) +unsigned long mktime(unsigned int year, unsigned int mon, unsigned int day, + unsigned int hour, unsigned int min, unsigned int sec) { /* 1..12 -> 11,12,1..10: put Feb last since it has a leap day. */ - if ( 0 >= (int) (mon -= 2) ) + if ( 0 >= (int)(mon -= 2) ) { mon += 12; year -= 1; } - return ((((unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day)+ - year*365 - 719499 - )*24 + hour /* now have hours */ - )*60 + min /* now have minutes */ - )*60 + sec; /* finally seconds */ + return ((((unsigned long)(year / 4 - year / 100 + year / 400 + + 367 * mon / 12 + day) + + year * 365 - 719499) * + 24 + + hour /* now have hours */ + ) * 60 + + min /* now have minutes */ + ) * 60 + + sec; /* finally seconds */ } -struct rtc_time { +struct rtc_time +{ unsigned int year, mon, day, hour, min, sec; }; static void __get_cmos_time(struct rtc_time *rtc) { - rtc->sec = CMOS_READ(RTC_SECONDS); - rtc->min = CMOS_READ(RTC_MINUTES); + rtc->sec = CMOS_READ(RTC_SECONDS); + rtc->min = CMOS_READ(RTC_MINUTES); rtc->hour = CMOS_READ(RTC_HOURS); - rtc->day = CMOS_READ(RTC_DAY_OF_MONTH); - rtc->mon = CMOS_READ(RTC_MONTH); + rtc->day = CMOS_READ(RTC_DAY_OF_MONTH); + rtc->mon = CMOS_READ(RTC_MONTH); rtc->year = CMOS_READ(RTC_YEAR); - + if ( RTC_ALWAYS_BCD || !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) ) { BCD_TO_BIN(rtc->sec); @@ -919,7 +911,7 @@ static unsigned long get_cmos_time(void) panic("System with no CMOS RTC advertised must be booted from EFI" " (or with command line option \"cmos-rtc-probe\")\n"); - for ( ; ; ) + for ( ;; ) { s_time_t start, t1, t2; @@ -929,24 +921,20 @@ static unsigned long get_cmos_time(void) start = NOW(); do { /* may take up to 1 second... */ t1 = NOW() - start; - } while ( !(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) && - t1 <= SECONDS(1) ); + } while ( !(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) && t1 <= SECONDS(1) ); start = NOW(); do { /* must try at least 2.228 ms */ t2 = NOW() - start; - } while ( (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) && - t2 < MILLISECS(3) ); + } while ( (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) && t2 < MILLISECS(3) ); __get_cmos_time(&rtc); spin_unlock_irqrestore(&rtc_lock, flags); - if ( likely(!cmos_rtc_probe) || - t1 > SECONDS(1) || t2 >= MILLISECS(3) || - rtc.sec >= 60 || rtc.min >= 60 || rtc.hour >= 24 || - !rtc.day || rtc.day > 31 || - !rtc.mon || rtc.mon > 12 ) + if ( likely(!cmos_rtc_probe) || t1 > SECONDS(1) || t2 >= MILLISECS(3) || + rtc.sec >= 60 || rtc.min >= 60 || rtc.hour >= 24 || !rtc.day || + rtc.day > 31 || !rtc.mon || rtc.mon > 12 ) break; if ( seconds < 60 ) @@ -980,7 +968,7 @@ static unsigned long get_wallclock_time(void) wc_version = sh_info->wc_version & ~1; smp_rmb(); - wc_sec = sh_info->wc_sec; + wc_sec = sh_info->wc_sec; smp_rmb(); } while ( wc_version != sh_info->wc_version ); @@ -1054,26 +1042,26 @@ static void __update_vcpu_system_time(struct vcpu *v, int force) tsc_stamp = gtime_to_gtsc(d, stime); _u.tsc_to_system_mul = d->arch.vtsc_to_ns.mul_frac; - _u.tsc_shift = d->arch.vtsc_to_ns.shift; + _u.tsc_shift = d->arch.vtsc_to_ns.shift; } else { if ( is_hvm_domain(d) && hvm_tsc_scaling_supported ) { - tsc_stamp = hvm_scale_tsc(d, t->stamp.local_tsc); + tsc_stamp = hvm_scale_tsc(d, t->stamp.local_tsc); _u.tsc_to_system_mul = d->arch.vtsc_to_ns.mul_frac; - _u.tsc_shift = d->arch.vtsc_to_ns.shift; + _u.tsc_shift = d->arch.vtsc_to_ns.shift; } else { - tsc_stamp = t->stamp.local_tsc; + tsc_stamp = t->stamp.local_tsc; _u.tsc_to_system_mul = t->tsc_scale.mul_frac; - _u.tsc_shift = t->tsc_scale.shift; + _u.tsc_shift = t->tsc_scale.shift; } } _u.tsc_timestamp = tsc_stamp; - _u.system_time = t->stamp.local_stime; + _u.system_time = t->stamp.local_stime; /* * It's expected that domains cope with this bit changing on every @@ -1105,11 +1093,10 @@ static void __update_vcpu_system_time(struct vcpu *v, int force) v->arch.pv.pending_system_time = _u; } -bool update_secondary_system_time(struct vcpu *v, - struct vcpu_time_info *u) +bool update_secondary_system_time(struct vcpu *v, struct vcpu_time_info *u) { XEN_GUEST_HANDLE(vcpu_time_info_t) user_u = v->arch.time_info_guest; - struct guest_memory_policy policy = { .nested_guest_mode = false }; + struct guest_memory_policy policy = {.nested_guest_mode = false}; if ( guest_handle_is_null(user_u) ) return true; @@ -1151,7 +1138,7 @@ static void update_domain_rtc(void) rcu_read_lock(&domlist_read_lock); - for_each_domain ( d ) + for_each_domain (d) if ( is_hvm_domain(d) ) rtc_update_clock(d); @@ -1175,7 +1162,8 @@ int cpu_frequency_change(u64 freq) if ( freq < 1000000u ) { printk(XENLOG_WARNING "Rejecting CPU frequency change " - "to %"PRIu64" Hz\n", freq); + "to %" PRIu64 " Hz\n", + freq); return -EINVAL; } @@ -1264,7 +1252,7 @@ static void local_time_calibration(void) curr.local_stime = curr.master_stime; stime_elapsed64 = curr.master_stime - prev.master_stime; - tsc_elapsed64 = curr.local_tsc - prev.local_tsc; + tsc_elapsed64 = curr.local_tsc - prev.local_tsc; /* * Weirdness can happen if we lose sync with the platform timer. @@ -1296,7 +1284,7 @@ static void local_time_calibration(void) ((s32)stime_elapsed64 < 0) ) { stime_elapsed64 >>= 1; - tsc_elapsed64 >>= 1; + tsc_elapsed64 >>= 1; } /* stime_master_diff now fits in a 32-bit word. */ @@ -1333,13 +1321,13 @@ static void local_time_calibration(void) /* Record new timestamp information, atomically w.r.t. interrupts. */ local_irq_disable(); t->tsc_scale.mul_frac = calibration_mul_frac; - t->tsc_scale.shift = tsc_shift; - t->stamp = curr; + t->tsc_scale.shift = tsc_shift; + t->stamp = curr; local_irq_enable(); update_vcpu_system_time(current); - out: +out: if ( smp_processor_id() == 0 ) { set_timer(&calibration_timer, NOW() + EPOCH); @@ -1369,7 +1357,7 @@ static void check_tsc_warp(unsigned long tsc_khz, unsigned long *max_warp) end = start + tsc_khz * 20ULL; now = start; - for ( i = 0; ; i++ ) + for ( i = 0;; i++ ) { /* * We take the global lock, measure TSC, save the @@ -1383,8 +1371,8 @@ static void check_tsc_warp(unsigned long tsc_khz, unsigned long *max_warp) spin_unlock(&sync_lock); /* - * Be nice every now and then (and also check whether measurement is - * done [we also insert a 10 million loops safety exit, so we dont + * Be nice every now and then (and also check whether measurement is + * done [we also insert a 10 million loops safety exit, so we dont * lock up in case the TSC readout is totally broken]): */ if ( unlikely(!(i & 7)) ) @@ -1396,7 +1384,7 @@ static void check_tsc_warp(unsigned long tsc_khz, unsigned long *max_warp) } /* - * Outside the critical section we can now see whether we saw a + * Outside the critical section we can now see whether we saw a * time-warp of the TSC going backwards: */ if ( unlikely(prev > now) ) @@ -1447,7 +1435,8 @@ static void tsc_check_reliability(void) * Master CPU snapshots the platform timer. * All CPUS snapshot their local TSC and extrapolation of system time. */ -struct calibration_rendezvous { +struct calibration_rendezvous +{ cpumask_t cpu_calibration_map; atomic_t semaphore; s_time_t master_stime; @@ -1459,8 +1448,8 @@ time_calibration_rendezvous_tail(const struct calibration_rendezvous *r) { struct cpu_time_stamp *c = &this_cpu(cpu_calibration); - c->local_tsc = rdtsc_ordered(); - c->local_stime = get_s_time_fixed(c->local_tsc); + c->local_tsc = rdtsc_ordered(); + c->local_stime = get_s_time_fixed(c->local_tsc); c->master_stime = r->master_stime; raise_softirq(TIME_CALIBRATE_SOFTIRQ); @@ -1494,7 +1483,7 @@ static void time_calibration_tsc_rendezvous(void *_r) if ( i == 0 ) write_tsc(r->master_tsc_stamp); - while ( atomic_read(&r->semaphore) != (2*total_cpus - 1) ) + while ( atomic_read(&r->semaphore) != (2 * total_cpus - 1) ) cpu_relax(); atomic_set(&r->semaphore, 0); } @@ -1550,8 +1539,8 @@ static void time_calibration_nop_rendezvous(void *rv) const struct calibration_rendezvous *r = rv; struct cpu_time_stamp *c = &this_cpu(cpu_calibration); - c->local_tsc = r->master_tsc_stamp; - c->local_stime = r->master_stime; + c->local_tsc = r->master_tsc_stamp; + c->local_stime = r->master_stime; c->master_stime = r->master_stime; raise_softirq(TIME_CALIBRATE_SOFTIRQ); @@ -1562,9 +1551,7 @@ static void (*time_calibration_rendezvous_fn)(void *) = static void time_calibration(void *unused) { - struct calibration_rendezvous r = { - .semaphore = ATOMIC_INIT(0) - }; + struct calibration_rendezvous r = {.semaphore = ATOMIC_INIT(0)}; if ( clocksource_is_tsc() ) { @@ -1576,9 +1563,8 @@ static void time_calibration(void *unused) cpumask_copy(&r.cpu_calibration_map, &cpu_online_map); /* @wait=1 because we must wait for all cpus before freeing @r. */ - on_selected_cpus(&r.cpu_calibration_map, - time_calibration_rendezvous_fn, - &r, 1); + on_selected_cpus(&r.cpu_calibration_map, time_calibration_rendezvous_fn, &r, + 1); } static struct cpu_time_stamp ap_bringup_ref; @@ -1648,8 +1634,8 @@ void init_percpu_time(void) if ( !warned ) { warned = true; - printk(XENLOG_WARNING - "Differing TSC ADJUST values within socket(s) - fixing all\n"); + printk(XENLOG_WARNING "Differing TSC ADJUST values within " + "socket(s) - fixing all\n"); } wrmsrl(MSR_IA32_TSC_ADJUST, tsc_adjust[socket]); } @@ -1673,16 +1659,16 @@ void init_percpu_time(void) else now += ap_bringup_ref.local_stime - ap_bringup_ref.master_stime; } - t->stamp.local_tsc = tsc; + t->stamp.local_tsc = tsc; t->stamp.local_stime = now; } /* - * On certain older Intel CPUs writing the TSC MSR clears the upper 32 bits. + * On certain older Intel CPUs writing the TSC MSR clears the upper 32 bits. * Obviously we must not use write_tsc() on such CPUs. * - * Additionally, AMD specifies that being able to write the TSC MSR is not an - * architectural feature (but, other than their manual says, also cannot be + * Additionally, AMD specifies that being able to write the TSC MSR is not an + * architectural feature (but, other than their manual says, also cannot be * determined from CPUID bits). */ static void __init tsc_check_writability(void) @@ -1853,7 +1839,6 @@ int __init init_xen_time(void) return 0; } - /* Early init function. */ void __init early_time_init(void) { @@ -1867,7 +1852,8 @@ void __init early_time_init(void) if ( tmp ) { printk(XENLOG_WARNING - "TSC ADJUST set to %lx on boot CPU - clearing\n", tmp); + "TSC ADJUST set to %lx on boot CPU - clearing\n", + tmp); wrmsrl(MSR_IA32_TSC_ADJUST, 0); boot_tsc_stamp -= tmp; } @@ -1882,14 +1868,14 @@ void __init early_time_init(void) do_div(tmp, 1000); cpu_khz = (unsigned long)tmp; - printk("Detected %lu.%03lu MHz processor.\n", - cpu_khz / 1000, cpu_khz % 1000); + printk("Detected %lu.%03lu MHz processor.\n", cpu_khz / 1000, + cpu_khz % 1000); setup_irq(0, 0, &irq0); } /* keep pit enabled for pit_broadcast working while cpuidle enabled */ -static int _disable_pit_irq(void(*hpet_broadcast_setup)(void)) +static int _disable_pit_irq(void (*hpet_broadcast_setup)(void)) { int ret = 1; @@ -1897,7 +1883,7 @@ static int _disable_pit_irq(void(*hpet_broadcast_setup)(void)) return -1; /* - * If we do not rely on PIT CH0 then we can use HPET for one-shot timer + * If we do not rely on PIT CH0 then we can use HPET for one-shot timer * emulation when entering deep C states. * XXX dom0 may rely on RTC interrupt delivery, so only enable * hpet_broadcast if FSB mode available or if force_hpet_broadcast. @@ -2010,7 +1996,7 @@ int hwdom_pit_access(struct ioreq *ioreq) if ( using_pit ) return 0; - switch ( ioreq->addr ) + switch (ioreq->addr) { case PIT_CH2: if ( ioreq->dir == IOREQ_READ ) @@ -2022,17 +2008,17 @@ int hwdom_pit_access(struct ioreq *ioreq) case PIT_MODE: if ( ioreq->dir == IOREQ_READ ) return 0; /* urk! */ - switch ( ioreq->data & 0xc0 ) + switch (ioreq->data & 0xc0) { - case 0xc0: /* Read Back */ - if ( ioreq->data & 0x08 ) /* Select Channel 2? */ + case 0xc0: /* Read Back */ + if ( ioreq->data & 0x08 ) /* Select Channel 2? */ outb(ioreq->data & 0xf8, PIT_MODE); if ( !(ioreq->data & 0x06) ) /* Select Channel 0/1? */ - return 1; /* no - we're done */ + return 1; /* no - we're done */ /* Filter Channel 2 and reserved bit 0. */ ioreq->data &= ~0x09; return 0; /* emulate ch0/1 readback */ - case 0x80: /* Select Counter 2 */ + case 0x80: /* Select Counter 2 */ outb(ioreq->data, PIT_MODE); return 1; } @@ -2136,17 +2122,16 @@ int host_tsc_is_safe(void) * called to collect tsc-related data only for save file or live * migrate; called after last rdtsc is done on this incarnation */ -void tsc_get_info(struct domain *d, uint32_t *tsc_mode, - uint64_t *elapsed_nsec, uint32_t *gtsc_khz, - uint32_t *incarnation) +void tsc_get_info(struct domain *d, uint32_t *tsc_mode, uint64_t *elapsed_nsec, + uint32_t *gtsc_khz, uint32_t *incarnation) { - bool enable_tsc_scaling = is_hvm_domain(d) && - hvm_tsc_scaling_supported && !d->arch.vtsc; + bool enable_tsc_scaling = + is_hvm_domain(d) && hvm_tsc_scaling_supported && !d->arch.vtsc; *incarnation = d->arch.incarnation; *tsc_mode = d->arch.tsc_mode; - switch ( *tsc_mode ) + switch (*tsc_mode) { uint64_t tsc; @@ -2156,7 +2141,7 @@ void tsc_get_info(struct domain *d, uint32_t *tsc_mode, case TSC_MODE_DEFAULT: if ( d->arch.vtsc ) { - case TSC_MODE_ALWAYS_EMULATE: + case TSC_MODE_ALWAYS_EMULATE: *elapsed_nsec = get_s_time() - d->arch.vtsc_offset; *gtsc_khz = d->arch.tsc_khz; break; @@ -2179,8 +2164,7 @@ void tsc_get_info(struct domain *d, uint32_t *tsc_mode, * only the last "sticks" and all are completed before the guest executes * an rdtsc instruction */ -int tsc_set_info(struct domain *d, - uint32_t tsc_mode, uint64_t elapsed_nsec, +int tsc_set_info(struct domain *d, uint32_t tsc_mode, uint64_t elapsed_nsec, uint32_t gtsc_khz, uint32_t incarnation) { ASSERT(!is_system_domain(d)); @@ -2191,7 +2175,7 @@ int tsc_set_info(struct domain *d, return 0; } - switch ( tsc_mode ) + switch (tsc_mode) { case TSC_MODE_DEFAULT: case TSC_MODE_ALWAYS_EMULATE: @@ -2211,7 +2195,7 @@ int tsc_set_info(struct domain *d, (is_hvm_domain(d) && hvm_get_tsc_scaling_ratio(d->arch.tsc_khz))) ) { - case TSC_MODE_NEVER_EMULATE: + case TSC_MODE_NEVER_EMULATE: d->arch.vtsc = 0; break; } @@ -2264,33 +2248,36 @@ static void dump_softtsc(unsigned char key) tsc_check_reliability(); if ( boot_cpu_has(X86_FEATURE_TSC_RELIABLE) ) printk("TSC marked as reliable, " - "warp = %lu (count=%lu)\n", tsc_max_warp, tsc_check_count); - else if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC ) ) + "warp = %lu (count=%lu)\n", + tsc_max_warp, tsc_check_count); + else if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ) { printk("TSC has constant rate, "); - if (max_cstate <= 2 && tsc_max_warp == 0) + if ( max_cstate <= 2 && tsc_max_warp == 0 ) printk("no deep Cstates, passed warp test, deemed reliable, "); else printk("deep Cstates possible, so not reliable, "); printk("warp=%lu (count=%lu)\n", tsc_max_warp, tsc_check_count); - } else + } + else printk("TSC not marked as either constant or reliable, " - "warp=%lu (count=%lu)\n", tsc_max_warp, tsc_check_count); - for_each_domain ( d ) + "warp=%lu (count=%lu)\n", + tsc_max_warp, tsc_check_count); + for_each_domain (d) { if ( is_hardware_domain(d) && d->arch.tsc_mode == TSC_MODE_DEFAULT ) continue; - printk("dom%u%s: mode=%d",d->domain_id, - is_hvm_domain(d) ? "(hvm)" : "", d->arch.tsc_mode); + printk("dom%u%s: mode=%d", d->domain_id, + is_hvm_domain(d) ? "(hvm)" : "", d->arch.tsc_mode); if ( d->arch.vtsc_offset ) - printk(",ofs=%#"PRIx64, d->arch.vtsc_offset); + printk(",ofs=%#" PRIx64, d->arch.vtsc_offset); if ( d->arch.tsc_khz ) - printk(",khz=%"PRIu32, d->arch.tsc_khz); + printk(",khz=%" PRIu32, d->arch.tsc_khz); if ( d->arch.incarnation ) - printk(",inc=%"PRIu32, d->arch.incarnation); + printk(",inc=%" PRIu32, d->arch.incarnation); #if !defined(NDEBUG) || defined(CONFIG_PERF_COUNTERS) if ( d->arch.vtsc_kerncount | d->arch.vtsc_usercount ) - printk(",vtsc count: %"PRIu64" kernel,%"PRIu64" user", + printk(",vtsc count: %" PRIu64 " kernel,%" PRIu64 " user", d->arch.vtsc_kerncount, d->arch.vtsc_usercount); #endif printk("\n"); @@ -2298,7 +2285,7 @@ static void dump_softtsc(unsigned char key) } if ( !domcnt ) - printk("No domains have emulated TSC\n"); + printk("No domains have emulated TSC\n"); } static int __init setup_dump_softtsc(void) diff --git a/xen/arch/x86/trace.c b/xen/arch/x86/trace.c index 4a953c5b2f..d0d642f288 100644 --- a/xen/arch/x86/trace.c +++ b/xen/arch/x86/trace.c @@ -5,40 +5,37 @@ #include #include -void __trace_pv_trap(int trapnr, unsigned long eip, - int use_error_code, unsigned error_code) +void __trace_pv_trap(int trapnr, unsigned long eip, int use_error_code, + unsigned error_code) { if ( is_pv_32bit_vcpu(current) ) { - struct __packed { - unsigned eip:32, - trapnr:15, - use_error_code:1, - error_code:16; + struct __packed + { + unsigned eip : 32, trapnr : 15, use_error_code : 1, error_code : 16; } d; d.eip = eip; d.trapnr = trapnr; d.error_code = error_code; - d.use_error_code=!!use_error_code; - + d.use_error_code = !!use_error_code; + __trace_var(TRC_PV_TRAP, 1, sizeof(d), &d); } else { - struct __packed { + struct __packed + { unsigned long eip; - unsigned trapnr:15, - use_error_code:1, - error_code:16; + unsigned trapnr : 15, use_error_code : 1, error_code : 16; } d; unsigned event; d.eip = eip; d.trapnr = trapnr; d.error_code = error_code; - d.use_error_code=!!use_error_code; - + d.use_error_code = !!use_error_code; + event = TRC_PV_TRAP; event |= TRC_64_FLAG; __trace_var(event, 1, sizeof(d), &d); @@ -51,19 +48,21 @@ void __trace_pv_page_fault(unsigned long addr, unsigned error_code) if ( is_pv_32bit_vcpu(current) ) { - struct __packed { + struct __packed + { u32 eip, addr, error_code; } d; d.eip = eip; d.addr = addr; d.error_code = error_code; - + __trace_var(TRC_PV_PAGE_FAULT, 1, sizeof(d), &d); } else { - struct __packed { + struct __packed + { unsigned long eip, addr; u32 error_code; } d; @@ -92,25 +91,26 @@ void __trace_trap_one_addr(unsigned event, unsigned long va) } } -void __trace_trap_two_addr(unsigned event, unsigned long va1, - unsigned long va2) +void __trace_trap_two_addr(unsigned event, unsigned long va1, unsigned long va2) { if ( is_pv_32bit_vcpu(current) ) { - struct __packed { + struct __packed + { u32 va1, va2; } d; - d.va1=va1; - d.va2=va2; + d.va1 = va1; + d.va2 = va2; __trace_var(event, 1, sizeof(d), &d); } else { - struct __packed { + struct __packed + { unsigned long va1, va2; } d; - d.va1=va1; - d.va2=va2; + d.va1 = va1; + d.va2 = va2; event |= TRC_64_FLAG; __trace_var(event, 1, sizeof(d), &d); } @@ -130,7 +130,8 @@ void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte) if ( is_pv_32bit_vcpu(current) ) { - struct __packed { + struct __packed + { l1_pgentry_t pte; u32 addr, eip; } d; @@ -142,7 +143,8 @@ void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte) } else { - struct { + struct + { l1_pgentry_t pte; unsigned long addr, eip; } d; @@ -154,6 +156,6 @@ void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte) event = TRC_PV_PTWR_EMULATION; event |= TRC_64_FLAG; - __trace_var(event, 1/*tsc*/, sizeof(d), &d); + __trace_var(event, 1 /*tsc*/, sizeof(d), &d); } } diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 05ddc39bfe..dbb79ba66d 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -100,14 +100,14 @@ DEFINE_PER_CPU_READ_MOSTLY(seg_desc_t *, gdt_table); DEFINE_PER_CPU_READ_MOSTLY(seg_desc_t *, compat_gdt_table); /* Master table, used by CPU0. */ -idt_entry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE) - idt_table[IDT_ENTRIES]; +idt_entry_t __section(".bss.page_aligned") + __aligned(PAGE_SIZE) idt_table[IDT_ENTRIES]; /* Pointer to the IDT of every CPU. */ idt_entry_t *idt_tables[NR_CPUS] __read_mostly; -bool (*ioemul_handle_quirk)( - u8 opcode, char *io_emul_stub, struct cpu_user_regs *regs); +bool (*ioemul_handle_quirk)(u8 opcode, char *io_emul_stub, + struct cpu_user_regs *regs); static int debug_stack_lines = 20; integer_param("debug_stack_lines", debug_stack_lines); @@ -124,29 +124,29 @@ unsigned int __read_mostly ler_msr; static void do_trap(struct cpu_user_regs *regs); static void do_reserved_trap(struct cpu_user_regs *regs); -void (* const exception_table[TRAP_nr])(struct cpu_user_regs *regs) = { - [TRAP_divide_error] = do_trap, - [TRAP_debug] = do_debug, - [TRAP_nmi] = (void *)do_nmi, - [TRAP_int3] = do_int3, - [TRAP_overflow] = do_trap, - [TRAP_bounds] = do_trap, - [TRAP_invalid_op] = do_invalid_op, - [TRAP_no_device] = do_device_not_available, - [TRAP_double_fault] = do_reserved_trap, - [TRAP_copro_seg] = do_reserved_trap, - [TRAP_invalid_tss] = do_trap, - [TRAP_no_segment] = do_trap, - [TRAP_stack_error] = do_trap, - [TRAP_gp_fault] = do_general_protection, - [TRAP_page_fault] = do_page_fault, - [TRAP_spurious_int] = do_reserved_trap, - [TRAP_copro_error] = do_trap, - [TRAP_alignment_check] = do_trap, - [TRAP_machine_check] = (void *)do_machine_check, - [TRAP_simd_error] = do_trap, - [TRAP_virtualisation ... - (ARRAY_SIZE(exception_table) - 1)] = do_reserved_trap, +void (*const exception_table[TRAP_nr])(struct cpu_user_regs *regs) = { + [TRAP_divide_error] = do_trap, + [TRAP_debug] = do_debug, + [TRAP_nmi] = (void *)do_nmi, + [TRAP_int3] = do_int3, + [TRAP_overflow] = do_trap, + [TRAP_bounds] = do_trap, + [TRAP_invalid_op] = do_invalid_op, + [TRAP_no_device] = do_device_not_available, + [TRAP_double_fault] = do_reserved_trap, + [TRAP_copro_seg] = do_reserved_trap, + [TRAP_invalid_tss] = do_trap, + [TRAP_no_segment] = do_trap, + [TRAP_stack_error] = do_trap, + [TRAP_gp_fault] = do_general_protection, + [TRAP_page_fault] = do_page_fault, + [TRAP_spurious_int] = do_reserved_trap, + [TRAP_copro_error] = do_trap, + [TRAP_alignment_check] = do_trap, + [TRAP_machine_check] = (void *)do_machine_check, + [TRAP_simd_error] = do_trap, + [TRAP_virtualisation...(ARRAY_SIZE(exception_table) - + 1)] = do_reserved_trap, }; void show_code(const struct cpu_user_regs *regs) @@ -163,31 +163,25 @@ void show_code(const struct cpu_user_regs *regs) * Copy forward from regs->rip. In the case of a fault, %ecx contains the * number of bytes remaining to copy. */ - asm volatile ("1: rep movsb; 2:" - _ASM_EXTABLE(1b, 2b) - : "=&c" (missing_after), - "=&D" (tmp), "=&S" (tmp) - : "0" (ARRAY_SIZE(insns_after)), - "1" (insns_after), - "2" (regs->rip)); + asm volatile("1: rep movsb; 2:" _ASM_EXTABLE(1b, 2b) + : "=&c"(missing_after), "=&D"(tmp), "=&S"(tmp) + : "0"(ARRAY_SIZE(insns_after)), "1"(insns_after), + "2"(regs->rip)); /* * Copy backwards from regs->rip - 1. In the case of a fault, %ecx * contains the number of bytes remaining to copy. */ - asm volatile ("std;" - "1: rep movsb;" - "2: cld;" - _ASM_EXTABLE(1b, 2b) - : "=&c" (missing_before), - "=&D" (tmp), "=&S" (tmp) - : "0" (ARRAY_SIZE(insns_before)), - "1" (insns_before + ARRAY_SIZE(insns_before) - 1), - "2" (regs->rip - 1)); + asm volatile("std;" + "1: rep movsb;" + "2: cld;" _ASM_EXTABLE(1b, 2b) + : "=&c"(missing_before), "=&D"(tmp), "=&S"(tmp) + : "0"(ARRAY_SIZE(insns_before)), + "1"(insns_before + ARRAY_SIZE(insns_before) - 1), + "2"(regs->rip - 1)); clac(); - printk("Xen code around <%p> (%ps)%s:\n", - _p(regs->rip), _p(regs->rip), + printk("Xen code around <%p> (%ps)%s:\n", _p(regs->rip), _p(regs->rip), (missing_before || missing_after) ? " [fault on access]" : ""); /* Print bytes from insns_before[]. */ @@ -239,7 +233,7 @@ static void compat_show_guest_stack(struct vcpu *v, ASSERT(guest_kernel_mode(v, regs)); mfn = read_cr3() >> PAGE_SHIFT; - for_each_vcpu( v->domain, vcpu ) + for_each_vcpu (v->domain, vcpu) if ( pagetable_get_pfn(vcpu->arch.guest_table) == mfn ) break; if ( !vcpu ) @@ -298,7 +292,7 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) } stack = (unsigned long *)regs->rsp; - printk("Guest stack trace from "__OP"sp=%p:\n ", stack); + printk("Guest stack trace from " __OP "sp=%p:\n ", stack); if ( !access_ok(stack, sizeof(*stack)) ) { @@ -324,7 +318,7 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) } } - for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ ) + for ( i = 0; i < (debug_stack_lines * stack_words_per_line); i++ ) { if ( (((long)stack - 1) ^ ((long)(stack + 1) - 1)) & mask ) break; @@ -379,18 +373,18 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) */ unsigned long get_stack_trace_bottom(unsigned long sp) { - switch ( get_stack_page(sp) ) + switch (get_stack_page(sp)) { case 0 ... 3: - return ROUNDUP(sp, PAGE_SIZE) - - offsetof(struct cpu_user_regs, es) - sizeof(unsigned long); + return ROUNDUP(sp, PAGE_SIZE) - offsetof(struct cpu_user_regs, es) - + sizeof(unsigned long); #ifndef MEMORY_GUARD case 4 ... 5: #endif case 6 ... 7: - return ROUNDUP(sp, STACK_SIZE) - - sizeof(struct cpu_info) - sizeof(unsigned long); + return ROUNDUP(sp, STACK_SIZE) - sizeof(struct cpu_info) - + sizeof(unsigned long); default: return sp - sizeof(unsigned long); @@ -399,7 +393,7 @@ unsigned long get_stack_trace_bottom(unsigned long sp) unsigned long get_stack_dump_bottom(unsigned long sp) { - switch ( get_stack_page(sp) ) + switch (get_stack_page(sp)) { case 0 ... 3: return ROUNDUP(sp, PAGE_SIZE) - sizeof(unsigned long); @@ -448,7 +442,7 @@ static void _show_trace(unsigned long sp, unsigned long bp) /* The initial frame pointer. */ next = bp; - for ( ; ; ) + for ( ;; ) { /* Valid frame pointer? */ if ( (next < low) || (next >= high) ) @@ -461,17 +455,17 @@ static void _show_trace(unsigned long sp, unsigned long bp) if ( (next < low) || (next >= high) ) break; frame = (unsigned long *)next; - next = frame[0]; - addr = frame[(offsetof(struct cpu_user_regs, rip) - - offsetof(struct cpu_user_regs, rbp)) - / BYTES_PER_LONG]; + next = frame[0]; + addr = frame[(offsetof(struct cpu_user_regs, rip) - + offsetof(struct cpu_user_regs, rbp)) / + BYTES_PER_LONG]; } else { /* Ordinary stack frame. */ frame = (unsigned long *)next; - next = frame[0]; - addr = frame[1]; + next = frame[0]; + addr = frame[1]; } printk(" [<%p>] %pS\n", _p(addr), _p(addr)); @@ -492,8 +486,7 @@ static void show_trace(const struct cpu_user_regs *regs) * If RIP looks sensible, or the top of the stack doesn't, print RIP at * the top of the stack trace. */ - if ( is_active_kernel_text(regs->rip) || - !is_active_kernel_text(*sp) ) + if ( is_active_kernel_text(regs->rip) || !is_active_kernel_text(*sp) ) printk(" [<%p>] %pS\n", _p(regs->rip), _p(regs->rip)); /* * Else RIP looks bad but the top of the stack looks good. Perhaps we @@ -520,12 +513,13 @@ void show_stack(const struct cpu_user_regs *regs) if ( guest_mode(regs) ) return show_guest_stack(current, regs); - printk("Xen stack trace from "__OP"sp=%p:\n ", stack); + printk("Xen stack trace from " __OP "sp=%p:\n ", stack); stack_bottom = _p(get_stack_dump_bottom(regs->rsp)); - for ( i = 0; i < (debug_stack_lines*stack_words_per_line) && - (stack <= stack_bottom); i++ ) + for ( i = 0; i < (debug_stack_lines * stack_words_per_line) && + (stack <= stack_bottom); + i++ ) { if ( (i != 0) && ((i % stack_words_per_line) == 0) ) printk("\n "); @@ -553,18 +547,17 @@ void show_stack_overflow(unsigned int cpu, const struct cpu_user_regs *regs) #ifdef MEMORY_GUARD esp_bottom = (esp | (STACK_SIZE - 1)) + 1; - esp_top = esp_bottom - PRIMARY_STACK_SIZE; + esp_top = esp_bottom - PRIMARY_STACK_SIZE; - printk("Valid stack range: %p-%p, sp=%p, tss.rsp0=%p\n", - (void *)esp_top, (void *)esp_bottom, (void *)esp, + printk("Valid stack range: %p-%p, sp=%p, tss.rsp0=%p\n", (void *)esp_top, + (void *)esp_bottom, (void *)esp, (void *)per_cpu(init_tss, cpu).rsp0); /* * Trigger overflow trace if %esp is anywhere within the guard page, or * with fewer than 512 bytes remaining on the primary stack. */ - if ( (esp > (esp_top + 512)) || - (esp < (esp_top - PAGE_SIZE)) ) + if ( (esp > (esp_top + 512)) || (esp < (esp_top - PAGE_SIZE)) ) { printk("No stack overflow detected. Skipping stack trace.\n"); return; @@ -573,8 +566,8 @@ void show_stack_overflow(unsigned int cpu, const struct cpu_user_regs *regs) if ( esp < esp_top ) esp = esp_top; - printk("Xen stack overflow (dumping trace %p-%p):\n", - (void *)esp, (void *)esp_bottom); + printk("Xen stack overflow (dumping trace %p-%p):\n", (void *)esp, + (void *)esp_bottom); _show_trace(esp, regs->rbp); @@ -598,8 +591,8 @@ void vcpu_show_execution_state(struct vcpu *v) { unsigned long flags; - printk("*** Dumping Dom%d vcpu#%d state: ***\n", - v->domain->domain_id, v->vcpu_id); + printk("*** Dumping Dom%d vcpu#%d state: ***\n", v->domain->domain_id, + v->vcpu_id); if ( v == current ) { @@ -642,14 +635,27 @@ static int nmi_show_execution_state(const struct cpu_user_regs *regs, int cpu) const char *trapstr(unsigned int trapnr) { - static const char * const strings[] = { - "divide error", "debug", "nmi", "bkpt", "overflow", "bounds", - "invalid opcode", "device not available", "double fault", - "coprocessor segment", "invalid tss", "segment not found", - "stack error", "general protection fault", "page fault", - "spurious interrupt", "coprocessor error", "alignment check", - "machine check", "simd error", "virtualisation exception" - }; + static const char *const strings[] = {"divide error", + "debug", + "nmi", + "bkpt", + "overflow", + "bounds", + "invalid opcode", + "device not available", + "double fault", + "coprocessor segment", + "invalid tss", + "segment not found", + "stack error", + "general protection fault", + "page fault", + "spurious interrupt", + "coprocessor error", + "alignment check", + "machine check", + "simd error", + "virtualisation exception"}; return trapnr < ARRAY_SIZE(strings) ? strings[trapnr] : "???"; } @@ -743,22 +749,22 @@ static void do_trap(struct cpu_user_regs *regs) if ( guest_mode(regs) ) { - pv_inject_hw_exception(trapnr, - (TRAP_HAVE_EC & (1u << trapnr)) - ? regs->error_code : X86_EVENT_NO_EC); + pv_inject_hw_exception(trapnr, (TRAP_HAVE_EC & (1u << trapnr)) + ? regs->error_code + : X86_EVENT_NO_EC); return; } if ( likely((fixup = search_exception_table(regs)) != 0) ) { - dprintk(XENLOG_ERR, "Trap %u: %p [%ps] -> %p\n", - trapnr, _p(regs->rip), _p(regs->rip), _p(fixup)); + dprintk(XENLOG_ERR, "Trap %u: %p [%ps] -> %p\n", trapnr, _p(regs->rip), + _p(regs->rip), _p(fixup)); this_cpu(last_extable_addr) = regs->rip; regs->rip = fixup; return; } - hardware_trap: +hardware_trap: if ( debugger_trap_fatal(trapnr, regs) ) return; @@ -774,7 +780,7 @@ int guest_rdmsr_xen(const struct vcpu *v, uint32_t idx, uint64_t *val) /* Optionally shift out of the way of Viridian architectural MSRs. */ uint32_t base = is_viridian_domain(d) ? 0x40000200 : 0x40000000; - switch ( idx - base ) + switch (idx - base) { case 0: /* Write hypercall page MSR. Read as zero. */ *val = 0; @@ -790,7 +796,7 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val) /* Optionally shift out of the way of Viridian architectural MSRs. */ uint32_t base = is_viridian_domain(d) ? 0x40000200 : 0x40000000; - switch ( idx - base ) + switch (idx - base) { case 0: /* Write hypercall page */ { @@ -822,8 +828,8 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val) } gdprintk(XENLOG_WARNING, - "Bad GMFN %lx (MFN %#"PRI_mfn") to MSR %08x\n", - gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN), base); + "Bad GMFN %lx (MFN %#" PRI_mfn ") to MSR %08x\n", gmfn, + mfn_x(page ? page_to_mfn(page) : INVALID_MFN), base); return X86EMUL_EXCEPTION; } @@ -846,7 +852,7 @@ void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf, const struct domain *d = v->domain; const struct cpuid_policy *p = d->arch.cpuid; uint32_t base = is_viridian_domain(d) ? 0x40000100 : 0x40000000; - uint32_t idx = leaf - base; + uint32_t idx = leaf - base; unsigned int limit = is_viridian_domain(d) ? p->hv2_limit : p->hv_limit; if ( limit == 0 ) @@ -859,7 +865,7 @@ void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf, if ( idx > limit ) return; - switch ( idx ) + switch (idx) { case 0: res->a = base + limit; /* Largest leaf */ @@ -873,19 +879,18 @@ void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf, break; case 2: - res->a = 1; /* Number of hypercall-transfer pages */ - /* MSR base address */ + res->a = 1; /* Number of hypercall-transfer pages */ + /* MSR base address */ res->b = is_viridian_domain(d) ? 0x40000200 : 0x40000000; if ( is_pv_domain(d) ) /* Features */ res->c |= XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD; break; case 3: /* Time leaf. */ - switch ( subleaf ) + switch (subleaf) { case 0: /* features */ - res->a = ((d->arch.vtsc << 0) | - (!!host_tsc_is_safe() << 1) | + res->a = ((d->arch.vtsc << 0) | (!!host_tsc_is_safe() << 1) | (!!boot_cpu_has(X86_FEATURE_RDTSCP) << 2)); res->b = d->arch.tsc_mode; res->c = d->arch.tsc_khz; @@ -927,8 +932,7 @@ void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf, * and wrmsr in the guest will run without VMEXITs (see * vmx_vlapic_msr_changed()). */ - if ( cpu_has_vmx_virtualize_x2apic_mode && - cpu_has_vmx_apic_reg_virt && + if ( cpu_has_vmx_virtualize_x2apic_mode && cpu_has_vmx_apic_reg_virt && cpu_has_vmx_virtual_intr_delivery ) res->a |= XEN_HVM_CPUID_X2APIC_VIRT; @@ -1004,7 +1008,7 @@ void do_invalid_op(struct cpu_user_regs *regs) } } - found: +found: if ( !bug ) goto die; eip += sizeof(bug_insn); @@ -1029,7 +1033,7 @@ void do_invalid_op(struct cpu_user_regs *regs) } lineno = bug_line(bug); - switch ( id ) + switch (id) { case BUGFRAME_warn: printk("Xen WARN at %s%s:%d\n", prefix, filename, lineno); @@ -1052,18 +1056,18 @@ void do_invalid_op(struct cpu_user_regs *regs) if ( !is_kernel(predicate) && !is_patch(predicate) ) predicate = ""; - printk("Assertion '%s' failed at %s%s:%d\n", - predicate, prefix, filename, lineno); + printk("Assertion '%s' failed at %s%s:%d\n", predicate, prefix, + filename, lineno); if ( debugger_trap_fatal(TRAP_invalid_op, regs) ) return; show_execution_state(regs); - panic("Assertion '%s' failed at %s%s:%d\n", - predicate, prefix, filename, lineno); + panic("Assertion '%s' failed at %s%s:%d\n", predicate, prefix, filename, + lineno); } - die: +die: if ( (fixup = search_exception_table(regs)) != 0 ) { this_cpu(last_extable_addr) = regs->rip; @@ -1090,8 +1094,8 @@ void do_int3(struct cpu_user_regs *regs) if ( (fixup = search_exception_table(regs)) != 0 ) { this_cpu(last_extable_addr) = regs->rip; - dprintk(XENLOG_DEBUG, "Trap %u: %p [%ps] -> %p\n", - TRAP_int3, _p(regs->rip), _p(regs->rip), _p(fixup)); + dprintk(XENLOG_DEBUG, "Trap %u: %p [%ps] -> %p\n", TRAP_int3, + _p(regs->rip), _p(regs->rip), _p(fixup)); regs->rip = fixup; return; } @@ -1109,8 +1113,8 @@ void do_int3(struct cpu_user_regs *regs) static void reserved_bit_page_fault(unsigned long addr, struct cpu_user_regs *regs) { - printk("%pv: reserved bit in page table (ec=%04X)\n", - current, regs->error_code); + printk("%pv: reserved bit in page table (ec=%04X)\n", current, + regs->error_code); show_page_walk(addr); show_execution_state(regs); } @@ -1132,8 +1136,8 @@ static int handle_ldt_mapping_fault(unsigned int offset, if ( likely(pv_map_ldt_shadow_page(offset)) ) { if ( guest_mode(regs) ) - trace_trap_two_addr(TRC_PV_GDT_LDT_MAPPING_FAULT, - regs->rip, offset); + trace_trap_two_addr(TRC_PV_GDT_LDT_MAPPING_FAULT, regs->rip, + offset); } else { @@ -1162,8 +1166,8 @@ static int handle_gdt_ldt_mapping_fault(unsigned long offset, { struct vcpu *curr = current; /* Which vcpu's area did we fault in, and is it in the ldt sub-area? */ - unsigned int is_ldt_area = (offset >> (GDT_LDT_VCPU_VA_SHIFT-1)) & 1; - unsigned int vcpu_area = (offset >> GDT_LDT_VCPU_VA_SHIFT); + unsigned int is_ldt_area = (offset >> (GDT_LDT_VCPU_VA_SHIFT - 1)) & 1; + unsigned int vcpu_area = (offset >> GDT_LDT_VCPU_VA_SHIFT); /* * If the fault is in another vcpu's area, it cannot be due to @@ -1175,7 +1179,7 @@ static int handle_gdt_ldt_mapping_fault(unsigned long offset, return 0; /* Byte offset within the gdt/ldt sub-area. */ - offset &= (1UL << (GDT_LDT_VCPU_VA_SHIFT-1)) - 1UL; + offset &= (1UL << (GDT_LDT_VCPU_VA_SHIFT - 1)) - 1UL; if ( likely(is_ldt_area) ) return handle_ldt_mapping_fault(offset, regs); @@ -1191,7 +1195,8 @@ static int handle_gdt_ldt_mapping_fault(unsigned long offset, #define IN_HYPERVISOR_RANGE(va) \ (((va) >= HYPERVISOR_VIRT_START) && ((va) < HYPERVISOR_VIRT_END)) -enum pf_type { +enum pf_type +{ real_fault, smep_fault, smap_fault, @@ -1221,7 +1226,7 @@ static enum pf_type __page_fault_type(unsigned long addr, if ( error_code & PFEC_reserved_bit ) return real_fault; - required_flags = _PAGE_PRESENT; + required_flags = _PAGE_PRESENT; if ( error_code & PFEC_write_access ) required_flags |= _PAGE_RW; if ( error_code & PFEC_user_mode ) @@ -1244,7 +1249,7 @@ static enum pf_type __page_fault_type(unsigned long addr, return real_fault; page_user &= l4e_get_flags(l4e); - l3t = map_domain_page(_mfn(mfn)); + l3t = map_domain_page(_mfn(mfn)); l3e = l3e_read_atomic(&l3t[l3_table_offset(addr)]); mfn = l3e_get_pfn(l3e); unmap_domain_page(l3t); @@ -1284,7 +1289,8 @@ leaf: * Disallow supervisor execution from user-accessible mappings */ if ( (cr4 & X86_CR4_SMEP) && - ((error_code & (PFEC_insn_fetch|PFEC_user_mode)) == PFEC_insn_fetch) ) + ((error_code & (PFEC_insn_fetch | PFEC_user_mode)) == + PFEC_insn_fetch) ) return smep_fault; /* @@ -1324,7 +1330,7 @@ static enum pf_type spurious_page_fault(unsigned long addr, static int fixup_page_fault(unsigned long addr, struct cpu_user_regs *regs) { - struct vcpu *v = current; + struct vcpu *v = current; struct domain *d = v->domain; /* No fixups in interrupt context or when interrupts are disabled. */ @@ -1332,7 +1338,7 @@ static int fixup_page_fault(unsigned long addr, struct cpu_user_regs *regs) return 0; if ( !(regs->error_code & PFEC_page_present) && - (pagefault_by_memadd(addr, regs)) ) + (pagefault_by_memadd(addr, regs)) ) return handle_memadd_fault(addr, regs); if ( unlikely(IN_HYPERVISOR_RANGE(addr)) ) @@ -1340,8 +1346,8 @@ static int fixup_page_fault(unsigned long addr, struct cpu_user_regs *regs) #ifdef CONFIG_PV if ( !(regs->error_code & (PFEC_user_mode | PFEC_reserved_bit)) && (addr >= GDT_LDT_VIRT_START) && (addr < GDT_LDT_VIRT_END) ) - return handle_gdt_ldt_mapping_fault( - addr - GDT_LDT_VIRT_START, regs); + return handle_gdt_ldt_mapping_fault(addr - GDT_LDT_VIRT_START, + regs); #endif return 0; } @@ -1352,14 +1358,14 @@ static int fixup_page_fault(unsigned long addr, struct cpu_user_regs *regs) { bool ptwr, mmio_ro; - ptwr = VM_ASSIST(d, writable_pagetables) && - /* Do not check if access-protection fault since the page may - legitimately be not present in shadow page tables */ - (paging_mode_enabled(d) || - (regs->error_code & PFEC_page_present)); + ptwr = + VM_ASSIST(d, writable_pagetables) && + /* Do not check if access-protection fault since the page may + legitimately be not present in shadow page tables */ + (paging_mode_enabled(d) || (regs->error_code & PFEC_page_present)); - mmio_ro = is_hardware_domain(d) && - (regs->error_code & PFEC_page_present); + mmio_ro = + is_hardware_domain(d) && (regs->error_code & PFEC_page_present); if ( (ptwr || mmio_ro) && pv_ro_page_fault(addr, regs) ) return EXCRET_fault_fixed; @@ -1447,10 +1453,10 @@ void do_page_fault(struct cpu_user_regs *regs) if ( unlikely(current->domain->arch.suppress_spurious_page_faults) ) { pf_type = spurious_page_fault(addr, regs); - if ( (pf_type == smep_fault) || (pf_type == smap_fault)) + if ( (pf_type == smep_fault) || (pf_type == smap_fault) ) { - printk(XENLOG_G_ERR "%pv fatal SM%cP violation\n", - current, (pf_type == smep_fault) ? 'E' : 'A'); + printk(XENLOG_G_ERR "%pv fatal SM%cP violation\n", current, + (pf_type == smep_fault) ? 'E' : 'A'); domain_crash(current->domain); } @@ -1483,7 +1489,7 @@ void __init do_early_page_fault(struct cpu_user_regs *regs) { prev_eip = regs->rip; prev_cr2 = cr2; - stuck = 0; + stuck = 0; return; } @@ -1554,8 +1560,7 @@ void do_general_protection(struct cpu_user_regs *regs) } /* Emulate some simple privileged and I/O instructions. */ - if ( (regs->error_code == 0) && - pv_emulate_privileged_op(regs) ) + if ( (regs->error_code == 0) && pv_emulate_privileged_op(regs) ) { trace_trap_one_addr(TRC_PV_EMULATE_PRIVOP, regs->rip); return; @@ -1566,18 +1571,18 @@ void do_general_protection(struct cpu_user_regs *regs) return; #endif - gp_in_kernel: +gp_in_kernel: if ( likely((fixup = search_exception_table(regs)) != 0) ) { - dprintk(XENLOG_INFO, "GPF (%04x): %p [%ps] -> %p\n", - regs->error_code, _p(regs->rip), _p(regs->rip), _p(fixup)); + dprintk(XENLOG_INFO, "GPF (%04x): %p [%ps] -> %p\n", regs->error_code, + _p(regs->rip), _p(regs->rip), _p(fixup)); this_cpu(last_extable_addr) = regs->rip; regs->rip = fixup; return; } - hardware_gp: +hardware_gp: if ( debugger_trap_fatal(TRAP_gp_fault, regs) ) return; @@ -1637,9 +1642,10 @@ static void nmi_hwdom_report(unsigned int reason_idx) static void pci_serr_error(const struct cpu_user_regs *regs) { - outb((inb(0x61) & 0x0f) | 0x04, 0x61); /* clear-and-disable the PCI SERR error line. */ + outb((inb(0x61) & 0x0f) | 0x04, + 0x61); /* clear-and-disable the PCI SERR error line. */ - switch ( opt_nmi[0] ) + switch (opt_nmi[0]) { case 'd': /* 'dom0' */ nmi_hwdom_report(_XEN_NMIREASON_pci_serr); @@ -1649,7 +1655,7 @@ static void pci_serr_error(const struct cpu_user_regs *regs) from NMI context -- raise a softirq instead. */ raise_softirq(PCI_SERR_SOFTIRQ); break; - default: /* 'fatal' */ + default: /* 'fatal' */ console_force_unlock(); printk("\n\nNMI - PCI system error (SERR)\n"); fatal_trap(regs, 0); @@ -1658,13 +1664,13 @@ static void pci_serr_error(const struct cpu_user_regs *regs) static void io_check_error(const struct cpu_user_regs *regs) { - switch ( opt_nmi[0] ) + switch (opt_nmi[0]) { case 'd': /* 'dom0' */ nmi_hwdom_report(_XEN_NMIREASON_io_error); case 'i': /* 'ignore' */ break; - default: /* 'fatal' */ + default: /* 'fatal' */ console_force_unlock(); printk("\n\nNMI - I/O ERROR\n"); fatal_trap(regs, 0); @@ -1678,13 +1684,13 @@ static void io_check_error(const struct cpu_user_regs *regs) static void unknown_nmi_error(const struct cpu_user_regs *regs, unsigned char reason) { - switch ( opt_nmi[0] ) + switch (opt_nmi[0]) { case 'd': /* 'dom0' */ nmi_hwdom_report(_XEN_NMIREASON_unknown); case 'i': /* 'ignore' */ break; - default: /* 'fatal' */ + default: /* 'fatal' */ console_force_unlock(); printk("Uhhuh. NMI received for unknown reason %02x.\n", reason); printk("Do you have a strange power saving mode enabled?\n"); @@ -1760,8 +1766,8 @@ void do_device_not_available(struct cpu_user_regs *regs) { unsigned long fixup = search_exception_table(regs); - gprintk(XENLOG_ERR, "#NM: %p [%ps] -> %p\n", - _p(regs->rip), _p(regs->rip), _p(fixup)); + gprintk(XENLOG_ERR, "#NM: %p [%ps] -> %p\n", _p(regs->rip), + _p(regs->rip), _p(fixup)); /* * We shouldn't be able to reach here, but for release builds have * the recovery logic in place nevertheless. @@ -1896,8 +1902,8 @@ void do_debug(struct cpu_user_regs *regs) */ gprintk(XENLOG_WARNING, "Hit #DB in Xen context: %04x:%p [%ps], stk %04x:%p, dr6 %lx\n", - regs->cs, _p(regs->rip), _p(regs->rip), - regs->ss, _p(regs->rsp), dr6); + regs->cs, _p(regs->rip), _p(regs->rip), regs->ss, _p(regs->rsp), + dr6); return; } @@ -1909,8 +1915,8 @@ void do_debug(struct cpu_user_regs *regs) pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC); } -static void __init noinline __set_intr_gate(unsigned int n, - uint32_t dpl, void *addr) +static void __init noinline __set_intr_gate(unsigned int n, uint32_t dpl, + void *addr) { _set_gate(&idt_table[n], SYS_DESC_irq_gate, dpl, addr); } @@ -1928,34 +1934,33 @@ static void __init set_intr_gate(unsigned int n, void *addr) void load_TR(void) { struct tss_struct *tss = &this_cpu(init_tss); - struct desc_ptr old_gdt, tss_gdt = { - .base = (long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY), - .limit = LAST_RESERVED_GDT_BYTE - }; - - _set_tssldt_desc( - this_cpu(gdt_table) + TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY, - (unsigned long)tss, - offsetof(struct tss_struct, __cacheline_filler) - 1, - SYS_DESC_tss_avail); + struct desc_ptr old_gdt, + tss_gdt = {.base = + (long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY), + .limit = LAST_RESERVED_GDT_BYTE}; + + _set_tssldt_desc(this_cpu(gdt_table) + TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY, + (unsigned long)tss, + offsetof(struct tss_struct, __cacheline_filler) - 1, + SYS_DESC_tss_avail); _set_tssldt_desc( this_cpu(compat_gdt_table) + TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY, - (unsigned long)tss, - offsetof(struct tss_struct, __cacheline_filler) - 1, + (unsigned long)tss, offsetof(struct tss_struct, __cacheline_filler) - 1, SYS_DESC_tss_busy); /* Switch to non-compat GDT (which has B bit clear) to execute LTR. */ - asm volatile ( - "sgdt %0; lgdt %2; ltr %w1; lgdt %0" - : "=m" (old_gdt) : "rm" (TSS_ENTRY << 3), "m" (tss_gdt) : "memory" ); + asm volatile("sgdt %0; lgdt %2; ltr %w1; lgdt %0" + : "=m"(old_gdt) + : "rm"(TSS_ENTRY << 3), "m"(tss_gdt) + : "memory"); } static unsigned int calc_ler_msr(void) { - switch ( boot_cpu_data.x86_vendor ) + switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: - switch ( boot_cpu_data.x86 ) + switch (boot_cpu_data.x86) { case 6: return MSR_IA32_LASTINTFROMIP; @@ -1966,7 +1971,7 @@ static unsigned int calc_ler_msr(void) break; case X86_VENDOR_AMD: - switch ( boot_cpu_data.x86 ) + switch (boot_cpu_data.x86) { case 6: case 0xf ... 0x17: @@ -2002,24 +2007,24 @@ void __init init_idt_traps(void) * saved. The page-fault handler also needs interrupts disabled until %cr2 * has been read and saved on the stack. */ - set_intr_gate(TRAP_divide_error,÷_error); - set_intr_gate(TRAP_debug,&debug); - set_intr_gate(TRAP_nmi,&nmi); - set_swint_gate(TRAP_int3,&int3); /* usable from all privileges */ - set_swint_gate(TRAP_overflow,&overflow); /* usable from all privileges */ - set_intr_gate(TRAP_bounds,&bounds); - set_intr_gate(TRAP_invalid_op,&invalid_op); - set_intr_gate(TRAP_no_device,&device_not_available); - set_intr_gate(TRAP_double_fault,&double_fault); - set_intr_gate(TRAP_invalid_tss,&invalid_TSS); - set_intr_gate(TRAP_no_segment,&segment_not_present); - set_intr_gate(TRAP_stack_error,&stack_segment); - set_intr_gate(TRAP_gp_fault,&general_protection); - set_intr_gate(TRAP_page_fault,&early_page_fault); - set_intr_gate(TRAP_copro_error,&coprocessor_error); - set_intr_gate(TRAP_alignment_check,&alignment_check); - set_intr_gate(TRAP_machine_check,&machine_check); - set_intr_gate(TRAP_simd_error,&simd_coprocessor_error); + set_intr_gate(TRAP_divide_error, ÷_error); + set_intr_gate(TRAP_debug, &debug); + set_intr_gate(TRAP_nmi, &nmi); + set_swint_gate(TRAP_int3, &int3); /* usable from all privileges */ + set_swint_gate(TRAP_overflow, &overflow); /* usable from all privileges */ + set_intr_gate(TRAP_bounds, &bounds); + set_intr_gate(TRAP_invalid_op, &invalid_op); + set_intr_gate(TRAP_no_device, &device_not_available); + set_intr_gate(TRAP_double_fault, &double_fault); + set_intr_gate(TRAP_invalid_tss, &invalid_TSS); + set_intr_gate(TRAP_no_segment, &segment_not_present); + set_intr_gate(TRAP_stack_error, &stack_segment); + set_intr_gate(TRAP_gp_fault, &general_protection); + set_intr_gate(TRAP_page_fault, &early_page_fault); + set_intr_gate(TRAP_copro_error, &coprocessor_error); + set_intr_gate(TRAP_alignment_check, &alignment_check); + set_intr_gate(TRAP_machine_check, &machine_check); + set_intr_gate(TRAP_simd_error, &simd_coprocessor_error); /* Specify dedicated interrupt stacks for NMI, #DF, and #MC. */ enable_each_ist(idt_table); @@ -2107,12 +2112,12 @@ void asm_domain_crash_synchronous(unsigned long addr) if ( addr == 0 ) addr = this_cpu(last_extable_addr); - printk("domain_crash_sync called from entry.S: fault at %p %pS\n", - _p(addr), _p(addr)); + printk("domain_crash_sync called from entry.S: fault at %p %pS\n", _p(addr), + _p(addr)); __domain_crash(current->domain); - for ( ; ; ) + for ( ;; ) do_softirq(); } diff --git a/xen/arch/x86/usercopy.c b/xen/arch/x86/usercopy.c index 1d1ef075c9..e4d0bf382c 100644 --- a/xen/arch/x86/usercopy.c +++ b/xen/arch/x86/usercopy.c @@ -1,4 +1,4 @@ -/* +/* * User address space access functions. * * Copyright 1997 Andi Kleen @@ -15,35 +15,54 @@ unsigned __copy_to_user_ll(void __user *to, const void *from, unsigned n) unsigned dummy; stac(); - asm volatile ( - " cmp $"STR(2*BYTES_PER_LONG-1)", %[cnt]\n" - " jbe 1f\n" - " mov %k[to], %[cnt]\n" - " neg %[cnt]\n" - " and $"STR(BYTES_PER_LONG-1)", %[cnt]\n" - " sub %[cnt], %[aux]\n" - "4: rep movsb\n" /* make 'to' address aligned */ - " mov %[aux], %[cnt]\n" - " shr $"STR(LONG_BYTEORDER)", %[cnt]\n" - " and $"STR(BYTES_PER_LONG-1)", %[aux]\n" - " .align 2,0x90\n" - "0: rep movs"__OS"\n" /* as many words as possible... */ - " mov %[aux],%[cnt]\n" - "1: rep movsb\n" /* ...remainder copied as bytes */ - "2:\n" - ".section .fixup,\"ax\"\n" - "5: add %[aux], %[cnt]\n" - " jmp 2b\n" - "3: lea (%q[aux], %q[cnt], "STR(BYTES_PER_LONG)"), %[cnt]\n" - " jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(4b, 5b) - _ASM_EXTABLE(0b, 3b) - _ASM_EXTABLE(1b, 2b) - : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from), - [aux] "=&r" (dummy) - : "[aux]" (n) - : "memory" ); + asm volatile( + " cmp $" STR( + 2 * BYTES_PER_LONG - + 1) ", %[cnt]\n" + " jbe 1f\n" + " mov %k[to], %[cnt]\n" + " neg %[cnt]\n" + " and $" STR( + BYTES_PER_LONG - + 1) ", %[cnt]\n" + " sub %[cnt], %[aux]\n" + "4: rep movsb\n" /* make 'to' address aligned */ + " mov %[aux], %[cnt]\n" + " shr $" STR( + LONG_BYTEORDER) ", %[cnt]\n" + " and $" STR( + BYTES_PER_LONG - + 1) ", %[aux]\n" + " .align 2,0x90\n" + "0: rep movs" __OS + "\n" /* as many words as + possible... */ + " mov %[aux],%[cnt]\n" + "1: rep movsb\n" /* ...remainder + copied as + bytes */ + "2:\n" + ".section .fixup,\"ax\"\n" + "5: add %[aux], %[cnt]\n" + " jmp 2b\n" + "3: lea (%q[aux], " + "%q[cnt], " STR( + BYTES_PER_LONG) "), " + "%[cnt]\n" + " jmp " + "2b\n" + ".previous" + "\n" _ASM_EXTABLE( + 4b, 5b) + _ASM_EXTABLE( + 0b, + 3b) + _ASM_EXTABLE( + 1b, + 2b) + : [cnt] "+c"(n), [to] "+D"(to), [from] "+S"(from), [aux] "=&r"(dummy) + : "[aux]"(n) + : "memory"); clac(); return n; @@ -54,41 +73,72 @@ unsigned __copy_from_user_ll(void *to, const void __user *from, unsigned n) unsigned dummy; stac(); - asm volatile ( - " cmp $"STR(2*BYTES_PER_LONG-1)", %[cnt]\n" - " jbe 1f\n" - " mov %k[to], %[cnt]\n" - " neg %[cnt]\n" - " and $"STR(BYTES_PER_LONG-1)", %[cnt]\n" - " sub %[cnt], %[aux]\n" - "4: rep movsb\n" /* make 'to' address aligned */ - " mov %[aux],%[cnt]\n" - " shr $"STR(LONG_BYTEORDER)", %[cnt]\n" - " and $"STR(BYTES_PER_LONG-1)", %[aux]\n" - " .align 2,0x90\n" - "0: rep movs"__OS"\n" /* as many words as possible... */ - " mov %[aux], %[cnt]\n" - "1: rep movsb\n" /* ...remainder copied as bytes */ - "2:\n" - ".section .fixup,\"ax\"\n" - "5: add %[aux], %[cnt]\n" - " jmp 6f\n" - "3: lea (%q[aux], %q[cnt], "STR(BYTES_PER_LONG)"), %[cnt]\n" - "6: mov %[cnt], %k[from]\n" - " xchg %%eax, %[aux]\n" - " xor %%eax, %%eax\n" - " rep stosb\n" - " xchg %[aux], %%eax\n" - " mov %k[from], %[cnt]\n" - " jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(4b, 5b) - _ASM_EXTABLE(0b, 3b) - _ASM_EXTABLE(1b, 6b) - : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from), - [aux] "=&r" (dummy) - : "[aux]" (n) - : "memory" ); + asm volatile( + " cmp $" STR( + 2 * BYTES_PER_LONG - + 1) ", %[cnt]\n" + " jbe 1f\n" + " mov %k[to], %[cnt]\n" + " neg %[cnt]\n" + " and $" STR( + BYTES_PER_LONG - + 1) ", %[cnt]\n" + " sub %[cnt], %[aux]\n" + "4: rep movsb\n" /* make 'to' address aligned */ + " mov %[aux],%[cnt]\n" + " shr $" STR( + LONG_BYTEORDER) ", %[cnt]\n" + " and $" STR( + BYTES_PER_LONG - + 1) ", %[aux]\n" + " .align 2,0x90\n" + "0: rep movs" __OS + "\n" /* as many words as + possible... */ + " mov %[aux], %[cnt]\n" + "1: rep movsb\n" /* ...remainder + copied as + bytes */ + "2:\n" + ".section .fixup,\"ax\"\n" + "5: add %[aux], %[cnt]\n" + " jmp 6f\n" + "3: lea (%q[aux], " + "%q[cnt], " STR( + BYTES_PER_LONG) "), " + "%[cnt]\n" + "6: mov " + "%[cnt], " + "%k[from]" + "\n" + " xchg " + "%%eax, " + "%[aux]\n" + " xor " + "%%eax, " + "%%eax\n" + " rep " + "stosb\n" + " xchg " + "%[aux], " + "%%eax\n" + " mov " + "%k[from]," + " %[cnt]\n" + " jmp " + "2b\n" + ".previous" + "\n" _ASM_EXTABLE( + 4b, 5b) + _ASM_EXTABLE( + 0b, + 3b) + _ASM_EXTABLE( + 1b, + 6b) + : [cnt] "+c"(n), [to] "+D"(to), [from] "+S"(from), [aux] "=&r"(dummy) + : "[aux]"(n) + : "memory"); clac(); return n; @@ -129,20 +179,19 @@ unsigned clear_user(void __user *to, unsigned n) if ( access_ok(to, n) ) { stac(); - asm volatile ( - "0: rep stos"__OS"\n" - " mov %[bytes], %[cnt]\n" - "1: rep stosb\n" - "2:\n" - ".section .fixup,\"ax\"\n" - "3: lea (%q[bytes], %q[longs], "STR(BYTES_PER_LONG)"), %[cnt]\n" - " jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(0b,3b) - _ASM_EXTABLE(1b,2b) - : [cnt] "=&c" (n), [to] "+D" (to) - : [bytes] "r" (n & (BYTES_PER_LONG - 1)), - [longs] "0" (n / BYTES_PER_LONG), "a" (0) ); + asm volatile("0: rep stos" __OS "\n" + " mov %[bytes], %[cnt]\n" + "1: rep stosb\n" + "2:\n" + ".section .fixup,\"ax\"\n" + "3: lea (%q[bytes], %q[longs], " STR( + BYTES_PER_LONG) "), %[cnt]\n" + " jmp 2b\n" + ".previous\n" _ASM_EXTABLE(0b, 3b) + _ASM_EXTABLE(1b, 2b) + : [cnt] "=&c"(n), [to] "+D"(to) + : [bytes] "r"(n & (BYTES_PER_LONG - 1)), + [longs] "0"(n / BYTES_PER_LONG), "a"(0)); clac(); } diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c index 51c3493b1d..9752ba895e 100644 --- a/xen/arch/x86/vm_event.c +++ b/xen/arch/x86/vm_event.c @@ -27,7 +27,7 @@ int vm_event_init_domain(struct domain *d) { struct vcpu *v; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { if ( v->arch.vm_event ) continue; @@ -49,7 +49,7 @@ void vm_event_cleanup_domain(struct domain *d) { struct vcpu *v; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { xfree(v->arch.vm_event); v->arch.vm_event = NULL; @@ -86,13 +86,13 @@ void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp) w = &v->arch.vm_event->write_data; - switch ( rsp->reason ) + switch (rsp->reason) { case VM_EVENT_REASON_MOV_TO_MSR: w->do_write.msr = 0; break; case VM_EVENT_REASON_WRITE_CTRLREG: - switch ( rsp->u.write_ctrlreg.index ) + switch (rsp->u.write_ctrlreg.index) { case VM_EVENT_X86_CR0: w->do_write.cr0 = 0; @@ -135,7 +135,7 @@ static void vm_event_pack_segment_register(enum x86_segment segment, hvm_get_segment_register(current, segment, &seg); - switch ( segment ) + switch (segment) { case x86_seg_ss: reg->ss_base = seg.base; @@ -206,8 +206,8 @@ void vm_event_fill_regs(vm_event_request_t *req) req->data.regs.x86.rsi = regs->rsi; req->data.regs.x86.rdi = regs->rdi; - req->data.regs.x86.r8 = regs->r8; - req->data.regs.x86.r9 = regs->r9; + req->data.regs.x86.r8 = regs->r8; + req->data.regs.x86.r9 = regs->r9; req->data.regs.x86.r10 = regs->r10; req->data.regs.x86.r11 = regs->r11; req->data.regs.x86.r12 = regs->r12; @@ -216,7 +216,7 @@ void vm_event_fill_regs(vm_event_request_t *req) req->data.regs.x86.r15 = regs->r15; req->data.regs.x86.rflags = regs->rflags; - req->data.regs.x86.rip = regs->rip; + req->data.regs.x86.rip = regs->rip; req->data.regs.x86.dr7 = curr->arch.dr7; req->data.regs.x86.cr0 = curr->arch.hvm.guest_cr[0]; @@ -252,7 +252,7 @@ void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp) return; } - switch ( rsp->reason ) + switch (rsp->reason) { case VM_EVENT_REASON_MEM_ACCESS: /* diff --git a/xen/arch/x86/x86_64/acpi_mmcfg.c b/xen/arch/x86/x86_64/acpi_mmcfg.c index 650140eec5..750cd536b5 100644 --- a/xen/arch/x86/x86_64/acpi_mmcfg.c +++ b/xen/arch/x86/x86_64/acpi_mmcfg.c @@ -51,18 +51,17 @@ static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg, { int year; - if (cfg->address < 0xFFFFFFFF) + if ( cfg->address < 0xFFFFFFFF ) return 0; - if (!strncmp(mcfg->header.oem_id, "SGI", 3)) + if ( !strncmp(mcfg->header.oem_id, "SGI", 3) ) return 0; - if (mcfg->header.revision >= 1 && - dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && - year >= 2010) - return 0; + if ( mcfg->header.revision >= 1 && + dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2010 ) + return 0; - printk(KERN_ERR "MCFG region for %04x:%02x-%02x at %#"PRIx64 + printk(KERN_ERR "MCFG region for %04x:%02x-%02x at %#" PRIx64 " (above 4GB) ignored\n", cfg->pci_segment, cfg->start_bus_number, cfg->end_bus_number, cfg->address); @@ -74,7 +73,7 @@ int __init acpi_parse_mcfg(struct acpi_table_header *header) struct acpi_table_mcfg *mcfg; unsigned long i; - if (!header) + if ( !header ) return -EINVAL; mcfg = (struct acpi_table_mcfg *)header; @@ -82,20 +81,22 @@ int __init acpi_parse_mcfg(struct acpi_table_header *header) /* how many config structures do we have */ pci_mmcfg_config_num = 0; i = header->length - sizeof(struct acpi_table_mcfg); - while (i >= sizeof(struct acpi_mcfg_allocation)) { + while ( i >= sizeof(struct acpi_mcfg_allocation) ) + { ++pci_mmcfg_config_num; i -= sizeof(struct acpi_mcfg_allocation); }; - if (pci_mmcfg_config_num == 0) { + if ( pci_mmcfg_config_num == 0 ) + { printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); return -ENODEV; } - pci_mmcfg_config = xmalloc_array(struct acpi_mcfg_allocation, - pci_mmcfg_config_num); - if (!pci_mmcfg_config) { - printk(KERN_WARNING PREFIX - "No memory for MCFG config tables\n"); + pci_mmcfg_config = + xmalloc_array(struct acpi_mcfg_allocation, pci_mmcfg_config_num); + if ( !pci_mmcfg_config ) + { + printk(KERN_WARNING PREFIX "No memory for MCFG config tables\n"); pci_mmcfg_config_num = 0; return -ENOMEM; } @@ -103,8 +104,10 @@ int __init acpi_parse_mcfg(struct acpi_table_header *header) memcpy(pci_mmcfg_config, &mcfg[1], pci_mmcfg_config_num * sizeof(*pci_mmcfg_config)); - for (i = 0; i < pci_mmcfg_config_num; ++i) { - if (acpi_mcfg_check_entry(mcfg, &pci_mmcfg_config[i])) { + for ( i = 0; i < pci_mmcfg_config_num; ++i ) + { + if ( acpi_mcfg_check_entry(mcfg, &pci_mmcfg_config[i]) ) + { xfree(pci_mmcfg_config); pci_mmcfg_config_num = 0; return -ENODEV; diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c index 052228cdda..96d6356f6e 100644 --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -14,13 +14,12 @@ #include #include -#define DEFINE(_sym, _val) \ - asm volatile ("\n.ascii\"==>#define " #_sym " %0 /* " #_val " */<==\"" \ - : : "i" (_val) ) -#define BLANK() \ - asm volatile ( "\n.ascii\"==><==\"" : : ) -#define OFFSET(_sym, _str, _mem) \ - DEFINE(_sym, offsetof(_str, _mem)); +#define DEFINE(_sym, _val) \ + asm volatile("\n.ascii\"==>#define " #_sym " %0 /* " #_val " */<==\"" \ + : \ + : "i"(_val)) +#define BLANK() asm volatile("\n.ascii\"==><==\"" : :) +#define OFFSET(_sym, _str, _mem) DEFINE(_sym, offsetof(_str, _mem)); void __dummy__(void) { @@ -59,12 +58,12 @@ void __dummy__(void) OFFSET(VCPU_syscall_addr, struct vcpu, arch.pv.syscall_callback_eip); OFFSET(VCPU_syscall32_addr, struct vcpu, arch.pv.syscall32_callback_eip); OFFSET(VCPU_syscall32_sel, struct vcpu, arch.pv.syscall32_callback_cs); - OFFSET(VCPU_syscall32_disables_events, - struct vcpu, arch.pv.syscall32_disables_events); + OFFSET(VCPU_syscall32_disables_events, struct vcpu, + arch.pv.syscall32_disables_events); OFFSET(VCPU_sysenter_addr, struct vcpu, arch.pv.sysenter_callback_eip); OFFSET(VCPU_sysenter_sel, struct vcpu, arch.pv.sysenter_callback_cs); - OFFSET(VCPU_sysenter_disables_events, - struct vcpu, arch.pv.sysenter_disables_events); + OFFSET(VCPU_sysenter_disables_events, struct vcpu, + arch.pv.sysenter_disables_events); OFFSET(VCPU_trap_ctxt, struct vcpu, arch.pv.trap_ctxt); OFFSET(VCPU_kernel_sp, struct vcpu, arch.pv.kernel_sp); OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv.kernel_ss); @@ -79,7 +78,7 @@ void __dummy__(void) OFFSET(VCPU_async_exception_mask, struct vcpu, async_exception_mask); DEFINE(VCPU_TRAP_NMI, VCPU_TRAP_NMI); DEFINE(VCPU_TRAP_MCE, VCPU_TRAP_MCE); - DEFINE(_VGCF_syscall_disables_events, _VGCF_syscall_disables_events); + DEFINE(_VGCF_syscall_disables_events, _VGCF_syscall_disables_events); BLANK(); OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm.svm.vmcb_pa); @@ -95,7 +94,8 @@ void __dummy__(void) OFFSET(VCPU_nhvm_guestmode, struct vcpu, arch.hvm.nvcpu.nv_guestmode); OFFSET(VCPU_nhvm_p2m, struct vcpu, arch.hvm.nvcpu.nv_p2m); - OFFSET(VCPU_nsvm_hap_enabled, struct vcpu, arch.hvm.nvcpu.u.nsvm.ns_hap_enabled); + OFFSET(VCPU_nsvm_hap_enabled, struct vcpu, + arch.hvm.nvcpu.u.nsvm.ns_hap_enabled); BLANK(); OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv); @@ -105,8 +105,10 @@ void __dummy__(void) OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask); BLANK(); - OFFSET(COMPAT_VCPUINFO_upcall_pending, struct compat_vcpu_info, evtchn_upcall_pending); - OFFSET(COMPAT_VCPUINFO_upcall_mask, struct compat_vcpu_info, evtchn_upcall_mask); + OFFSET(COMPAT_VCPUINFO_upcall_pending, struct compat_vcpu_info, + evtchn_upcall_pending); + OFFSET(COMPAT_VCPUINFO_upcall_mask, struct compat_vcpu_info, + evtchn_upcall_mask); BLANK(); OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs); diff --git a/xen/arch/x86/x86_64/compat.c b/xen/arch/x86/x86_64/compat.c index edc3115902..58e80fb01b 100644 --- a/xen/arch/x86/x86_64/compat.c +++ b/xen/arch/x86/x86_64/compat.c @@ -8,10 +8,10 @@ asm(".file \"" __FILE__ "\""); #include #include -#define physdev_op compat_physdev_op -#define physdev_op_t physdev_op_compat_t -#define do_physdev_op compat_physdev_op -#define do_physdev_op_compat(x) compat_physdev_op_compat(_##x) +#define physdev_op compat_physdev_op +#define physdev_op_t physdev_op_compat_t +#define do_physdev_op compat_physdev_op +#define do_physdev_op_compat(x) compat_physdev_op_compat(_##x) #define COMPAT #define _XEN_GUEST_HANDLE(t) XEN_GUEST_HANDLE(t) diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c index 32410ed273..ef9a5b2b19 100644 --- a/xen/arch/x86/x86_64/compat/mm.c +++ b/xen/arch/x86/x86_64/compat/mm.c @@ -17,7 +17,7 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) unsigned int i; int rc = 0; - switch ( cmd ) + switch (cmd) { case XENMEM_set_memory_map: { @@ -28,7 +28,7 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) return -EFAULT; #define XLAT_memory_map_HNDL_buffer(_d_, _s_) \ - guest_from_compat_handle((_d_)->buffer, (_s_)->buffer) + guest_from_compat_handle((_d_)->buffer, (_s_)->buffer) XLAT_foreign_memory_map(nat, &cmp); #undef XLAT_memory_map_HNDL_buffer @@ -47,7 +47,7 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) return -EFAULT; #define XLAT_memory_map_HNDL_buffer(_d_, _s_) \ - guest_from_compat_handle((_d_)->buffer, (_s_)->buffer) + guest_from_compat_handle((_d_)->buffer, (_s_)->buffer) XLAT_memory_map(nat, &cmp); #undef XLAT_memory_map_HNDL_buffer @@ -99,9 +99,8 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) struct domain *d = current->domain; struct compat_machphys_mapping mapping = { .v_start = MACH2PHYS_COMPAT_VIRT_START(d), - .v_end = MACH2PHYS_COMPAT_VIRT_END, - .max_mfn = MACH2PHYS_COMPAT_NR_ENTRIES(d) - 1 - }; + .v_end = MACH2PHYS_COMPAT_VIRT_END, + .max_mfn = MACH2PHYS_COMPAT_NR_ENTRIES(d) - 1}; if ( copy_to_guest(arg, &mapping, 1) ) rc = -EFAULT; @@ -166,15 +165,14 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) #ifdef CONFIG_PV DEFINE_XEN_GUEST_HANDLE(mmuext_op_compat_t); -int compat_mmuext_op(XEN_GUEST_HANDLE_PARAM(void) arg, - unsigned int count, +int compat_mmuext_op(XEN_GUEST_HANDLE_PARAM(void) arg, unsigned int count, XEN_GUEST_HANDLE_PARAM(uint) pdone, unsigned int foreigndom) { unsigned int i, preempt_mask; int rc = 0; - XEN_GUEST_HANDLE_PARAM(mmuext_op_compat_t) cmp_uops = - guest_handle_cast(arg, mmuext_op_compat_t); + XEN_GUEST_HANDLE_PARAM(mmuext_op_compat_t) + cmp_uops = guest_handle_cast(arg, mmuext_op_compat_t); XEN_GUEST_HANDLE_PARAM(mmuext_op_t) nat_ops; if ( unlikely(count == MMU_UPDATE_PREEMPTED) && @@ -210,7 +208,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE_PARAM(void) arg, break; } - switch ( cmp_op.cmd ) + switch (cmp_op.cmd) { case MMUEXT_PIN_L1_TABLE: case MMUEXT_PIN_L2_TABLE: @@ -239,7 +237,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE_PARAM(void) arg, if ( rc ) break; - switch ( cmp_op.cmd ) + switch (cmp_op.cmd) { case MMUEXT_SET_LDT: arg2 = XLAT_mmuext_op_arg2_nr_ents; @@ -257,7 +255,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE_PARAM(void) arg, } #define XLAT_mmuext_op_HNDL_arg2_vcpumask(_d_, _s_) \ - guest_from_compat_handle((_d_)->arg2.vcpumask, (_s_)->arg2.vcpumask) + guest_from_compat_handle((_d_)->arg2.vcpumask, (_s_)->arg2.vcpumask) XLAT_mmuext_op(nat_op, &cmp_op); #undef XLAT_mmuext_op_HNDL_arg2_vcpumask @@ -278,8 +276,8 @@ int compat_mmuext_op(XEN_GUEST_HANDLE_PARAM(void) arg, struct cpu_user_regs *regs = guest_cpu_user_regs(); struct mc_state *mcs = ¤t->mc_state; unsigned int arg1 = !(mcs->flags & MCSF_in_multicall) - ? regs->ecx - : mcs->call.args[1]; + ? regs->ecx + : mcs->call.args[1]; unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED; BUG_ON(left == arg1 && left != i); diff --git a/xen/arch/x86/x86_64/cpu_idle.c b/xen/arch/x86/x86_64/cpu_idle.c index f991fd900a..c6079cb6dd 100644 --- a/xen/arch/x86/x86_64/cpu_idle.c +++ b/xen/arch/x86/x86_64/cpu_idle.c @@ -42,7 +42,7 @@ void *xlat_malloc(unsigned long *xlat_page_current, size_t size) if ( unlikely(size > xlat_page_left_size(*xlat_page_current)) ) return NULL; - ret = (void *) *xlat_page_current; + ret = (void *)*xlat_page_current; *xlat_page_current += size; return ret; @@ -51,15 +51,16 @@ void *xlat_malloc(unsigned long *xlat_page_current, size_t size) static int copy_from_compat_state(xen_processor_cx_t *xen_state, compat_processor_cx_t *state) { -#define XLAT_processor_cx_HNDL_dp(_d_, _s_) do { \ - XEN_GUEST_HANDLE(compat_processor_csd_t) dps; \ - XEN_GUEST_HANDLE_PARAM(xen_processor_csd_t) dps_param; \ - if ( unlikely(!compat_handle_okay((_s_)->dp, (_s_)->dpcnt)) ) \ - return -EFAULT; \ - guest_from_compat_handle(dps, (_s_)->dp); \ - dps_param = guest_handle_cast(dps, xen_processor_csd_t); \ - (_d_)->dp = guest_handle_from_param(dps_param, xen_processor_csd_t); \ -} while (0) +#define XLAT_processor_cx_HNDL_dp(_d_, _s_) \ + do { \ + XEN_GUEST_HANDLE(compat_processor_csd_t) dps; \ + XEN_GUEST_HANDLE_PARAM(xen_processor_csd_t) dps_param; \ + if ( unlikely(!compat_handle_okay((_s_)->dp, (_s_)->dpcnt)) ) \ + return -EFAULT; \ + guest_from_compat_handle(dps, (_s_)->dp); \ + dps_param = guest_handle_cast(dps, xen_processor_csd_t); \ + (_d_)->dp = guest_handle_from_param(dps_param, xen_processor_csd_t); \ + } while ( 0 ) XLAT_processor_cx(xen_state, state); #undef XLAT_processor_cx_HNDL_dp @@ -73,40 +74,42 @@ long compat_set_cx_pminfo(uint32_t cpu, struct compat_processor_power *power) xlat_malloc_init(xlat_page_current); - xen_power = xlat_malloc_array(xlat_page_current, - struct xen_processor_power, 1); + xen_power = + xlat_malloc_array(xlat_page_current, struct xen_processor_power, 1); if ( unlikely(xen_power == NULL) ) - return -EFAULT; + return -EFAULT; -#define XLAT_processor_power_HNDL_states(_d_, _s_) do { \ - xen_processor_cx_t *xen_states = NULL; \ -\ - if ( likely((_s_)->count > 0) ) \ - { \ - XEN_GUEST_HANDLE(compat_processor_cx_t) states; \ - compat_processor_cx_t state; \ - int i; \ -\ - xen_states = xlat_malloc_array(xlat_page_current, \ - xen_processor_cx_t, (_s_)->count); \ - if ( unlikely(xen_states == NULL) ) \ - return -EFAULT; \ -\ - if ( unlikely(!compat_handle_okay((_s_)->states, (_s_)->count)) ) \ - return -EFAULT; \ - guest_from_compat_handle(states, (_s_)->states); \ -\ - for ( i = 0; i < _s_->count; i++ ) \ - { \ - if ( unlikely(copy_from_guest_offset(&state, states, i, 1)) ) \ - return -EFAULT; \ - if ( unlikely(copy_from_compat_state(&xen_states[i], &state)) ) \ - return -EFAULT; \ - } \ - } \ -\ - set_xen_guest_handle((_d_)->states, xen_states); \ -} while (0) +#define XLAT_processor_power_HNDL_states(_d_, _s_) \ + do { \ + xen_processor_cx_t *xen_states = NULL; \ + \ + if ( likely((_s_)->count > 0) ) \ + { \ + XEN_GUEST_HANDLE(compat_processor_cx_t) states; \ + compat_processor_cx_t state; \ + int i; \ + \ + xen_states = xlat_malloc_array(xlat_page_current, \ + xen_processor_cx_t, (_s_)->count); \ + if ( unlikely(xen_states == NULL) ) \ + return -EFAULT; \ + \ + if ( unlikely(!compat_handle_okay((_s_)->states, (_s_)->count)) ) \ + return -EFAULT; \ + guest_from_compat_handle(states, (_s_)->states); \ + \ + for ( i = 0; i < _s_->count; i++ ) \ + { \ + if ( unlikely(copy_from_guest_offset(&state, states, i, 1)) ) \ + return -EFAULT; \ + if ( unlikely( \ + copy_from_compat_state(&xen_states[i], &state)) ) \ + return -EFAULT; \ + } \ + } \ + \ + set_xen_guest_handle((_d_)->states, xen_states); \ + } while ( 0 ) XLAT_processor_power(xen_power, power); #undef XLAT_processor_power_HNDL_states diff --git a/xen/arch/x86/x86_64/cpufreq.c b/xen/arch/x86/x86_64/cpufreq.c index e018b5e198..f18d7e6722 100644 --- a/xen/arch/x86/x86_64/cpufreq.c +++ b/xen/arch/x86/x86_64/cpufreq.c @@ -28,8 +28,8 @@ DEFINE_XEN_GUEST_HANDLE(compat_processor_px_t); -int -compat_set_px_pminfo(uint32_t cpu, struct compat_processor_performance *perf) +int compat_set_px_pminfo(uint32_t cpu, + struct compat_processor_performance *perf) { struct xen_processor_performance *xen_perf; unsigned long xlat_page_current; @@ -37,19 +37,21 @@ compat_set_px_pminfo(uint32_t cpu, struct compat_processor_performance *perf) xlat_malloc_init(xlat_page_current); xen_perf = xlat_malloc_array(xlat_page_current, - struct xen_processor_performance, 1); + struct xen_processor_performance, 1); if ( unlikely(xen_perf == NULL) ) - return -EFAULT; + return -EFAULT; -#define XLAT_processor_performance_HNDL_states(_d_, _s_) do { \ - XEN_GUEST_HANDLE(compat_processor_px_t) states; \ - XEN_GUEST_HANDLE_PARAM(xen_processor_px_t) states_t; \ - if ( unlikely(!compat_handle_okay((_s_)->states, (_s_)->state_count)) ) \ - return -EFAULT; \ - guest_from_compat_handle(states, (_s_)->states); \ - states_t = guest_handle_cast(states, xen_processor_px_t); \ - (_d_)->states = guest_handle_from_param(states_t, xen_processor_px_t); \ -} while (0) +#define XLAT_processor_performance_HNDL_states(_d_, _s_) \ + do { \ + XEN_GUEST_HANDLE(compat_processor_px_t) states; \ + XEN_GUEST_HANDLE_PARAM(xen_processor_px_t) states_t; \ + if ( unlikely( \ + !compat_handle_okay((_s_)->states, (_s_)->state_count)) ) \ + return -EFAULT; \ + guest_from_compat_handle(states, (_s_)->states); \ + states_t = guest_handle_cast(states, xen_processor_px_t); \ + (_d_)->states = guest_handle_from_param(states_t, xen_processor_px_t); \ + } while ( 0 ) XLAT_processor_performance(xen_perf, perf); #undef XLAT_processor_performance_HNDL_states diff --git a/xen/arch/x86/x86_64/domain.c b/xen/arch/x86/x86_64/domain.c index 9734cd39ef..fb349c30a4 100644 --- a/xen/arch/x86/x86_64/domain.c +++ b/xen/arch/x86/x86_64/domain.c @@ -12,20 +12,19 @@ CHECK_vcpu_get_physid; #undef xen_vcpu_get_physid -int -arch_compat_vcpu_op( - int cmd, struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg) +int arch_compat_vcpu_op(int cmd, struct vcpu *v, + XEN_GUEST_HANDLE_PARAM(void) arg) { int rc = -ENOSYS; - switch ( cmd ) + switch (cmd) { case VCPUOP_register_runstate_memory_area: { union { struct compat_vcpu_register_runstate_memory_area compat; struct vcpu_register_runstate_memory_area native; - } area = { }; + } area = {}; rc = -EFAULT; if ( copy_from_guest(&area.compat.addr.v, arg, 1) ) diff --git a/xen/arch/x86/x86_64/gdbstub.c b/xen/arch/x86/x86_64/gdbstub.c index 2626519c89..082991260d 100644 --- a/xen/arch/x86/x86_64/gdbstub.c +++ b/xen/arch/x86/x86_64/gdbstub.c @@ -1,18 +1,18 @@ /* * x86_64 -specific gdb stub routines - * + * * Copyright (C) 2007 Dan Doucette ddoucette@teradici.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; If not, see . */ @@ -20,10 +20,10 @@ #include #define GDB_REG64(r) gdb_write_to_packet_hex(r, sizeof(u64), ctx) -#define GDB_REG32(r) gdb_write_to_packet_hex(r, sizeof(u32), ctx) +#define GDB_REG32(r) gdb_write_to_packet_hex(r, sizeof(u32), ctx) -void -gdb_arch_read_reg_array(struct cpu_user_regs *regs, struct gdb_context *ctx) +void gdb_arch_read_reg_array(struct cpu_user_regs *regs, + struct gdb_context *ctx) { GDB_REG64(regs->rax); GDB_REG64(regs->rbx); @@ -56,86 +56,179 @@ gdb_arch_read_reg_array(struct cpu_user_regs *regs, struct gdb_context *ctx) gdb_send_packet(ctx); } -void -gdb_arch_write_reg_array(struct cpu_user_regs *regs, const char* buf, - struct gdb_context *ctx) +void gdb_arch_write_reg_array(struct cpu_user_regs *regs, const char *buf, + struct gdb_context *ctx) { gdb_send_reply("", ctx); } -void -gdb_arch_read_reg(unsigned long regnum, struct cpu_user_regs *regs, - struct gdb_context *ctx) +void gdb_arch_read_reg(unsigned long regnum, struct cpu_user_regs *regs, + struct gdb_context *ctx) { switch (regnum) { - case 0: GDB_REG64(regs->rax); break; - case 1: GDB_REG64(regs->rbx); break; - case 2: GDB_REG64(regs->rcx); break; - case 3: GDB_REG64(regs->rdx); break; - case 4: GDB_REG64(regs->rsi); break; - case 5: GDB_REG64(regs->rdi); break; - case 6: GDB_REG64(regs->rbp); break; - case 7: GDB_REG64(regs->rsp); break; + case 0: + GDB_REG64(regs->rax); + break; + case 1: + GDB_REG64(regs->rbx); + break; + case 2: + GDB_REG64(regs->rcx); + break; + case 3: + GDB_REG64(regs->rdx); + break; + case 4: + GDB_REG64(regs->rsi); + break; + case 5: + GDB_REG64(regs->rdi); + break; + case 6: + GDB_REG64(regs->rbp); + break; + case 7: + GDB_REG64(regs->rsp); + break; - case 8: GDB_REG64(regs->r8); break; - case 9: GDB_REG64(regs->r9); break; - case 10: GDB_REG64(regs->r10); break; - case 11: GDB_REG64(regs->r11); break; - case 12: GDB_REG64(regs->r12); break; - case 13: GDB_REG64(regs->r13); break; - case 14: GDB_REG64(regs->r14); break; - case 15: GDB_REG64(regs->r15); break; + case 8: + GDB_REG64(regs->r8); + break; + case 9: + GDB_REG64(regs->r9); + break; + case 10: + GDB_REG64(regs->r10); + break; + case 11: + GDB_REG64(regs->r11); + break; + case 12: + GDB_REG64(regs->r12); + break; + case 13: + GDB_REG64(regs->r13); + break; + case 14: + GDB_REG64(regs->r14); + break; + case 15: + GDB_REG64(regs->r15); + break; - case 16: GDB_REG64(regs->rip); break; - case 17: GDB_REG32(regs->rflags); break; - case 18: GDB_REG32(regs->cs); break; - case 19: GDB_REG32(regs->ss); break; - case 20: GDB_REG32(regs->ds); break; - case 21: GDB_REG32(regs->es); break; - case 22: GDB_REG32(regs->fs); break; - case 23: GDB_REG32(regs->gs); break; - default: - GDB_REG64(0xbaadf00ddeadbeef); - break; + case 16: + GDB_REG64(regs->rip); + break; + case 17: + GDB_REG32(regs->rflags); + break; + case 18: + GDB_REG32(regs->cs); + break; + case 19: + GDB_REG32(regs->ss); + break; + case 20: + GDB_REG32(regs->ds); + break; + case 21: + GDB_REG32(regs->es); + break; + case 22: + GDB_REG32(regs->fs); + break; + case 23: + GDB_REG32(regs->gs); + break; + default: + GDB_REG64(0xbaadf00ddeadbeef); + break; } gdb_send_packet(ctx); } -void -gdb_arch_write_reg(unsigned long regnum, unsigned long val, - struct cpu_user_regs *regs, struct gdb_context *ctx) +void gdb_arch_write_reg(unsigned long regnum, unsigned long val, + struct cpu_user_regs *regs, struct gdb_context *ctx) { switch (regnum) { - case 0: regs->rax = val; break; - case 1: regs->rbx = val; break; - case 2: regs->rcx = val; break; - case 3: regs->rdx = val; break; - case 4: regs->rsi = val; break; - case 5: regs->rdi = val; break; - case 6: regs->rbp = val; break; - case 7: regs->rsp = val; break; + case 0: + regs->rax = val; + break; + case 1: + regs->rbx = val; + break; + case 2: + regs->rcx = val; + break; + case 3: + regs->rdx = val; + break; + case 4: + regs->rsi = val; + break; + case 5: + regs->rdi = val; + break; + case 6: + regs->rbp = val; + break; + case 7: + regs->rsp = val; + break; - case 8: regs->r8 = val; break; - case 9: regs->r9 = val; break; - case 10: regs->r10 = val; break; - case 11: regs->r11 = val; break; - case 12: regs->r12 = val; break; - case 13: regs->r13 = val; break; - case 14: regs->r14 = val; break; - case 15: regs->r15 = val; break; + case 8: + regs->r8 = val; + break; + case 9: + regs->r9 = val; + break; + case 10: + regs->r10 = val; + break; + case 11: + regs->r11 = val; + break; + case 12: + regs->r12 = val; + break; + case 13: + regs->r13 = val; + break; + case 14: + regs->r14 = val; + break; + case 15: + regs->r15 = val; + break; - case 16: regs->rip = val; break; - case 17: regs->rflags = (u32)val; break; - case 18: regs->cs = (u16)val; break; - case 19: regs->ss = (u16)val; break; - case 20: regs->ds = (u16)val; break; - case 21: regs->es = (u16)val; break; - case 22: regs->fs = (u16)val; break; - case 23: regs->gs = (u16)val; break; - default: - break; + case 16: + regs->rip = val; + break; + case 17: + regs->rflags = (u32)val; + break; + case 18: + regs->cs = (u16)val; + break; + case 19: + regs->ss = (u16)val; + break; + case 20: + regs->ds = (u16)val; + break; + case 21: + regs->es = (u16)val; + break; + case 22: + regs->fs = (u16)val; + break; + case 23: + regs->gs = (u16)val; + break; + default: + break; } gdb_send_reply("OK", ctx); } diff --git a/xen/arch/x86/x86_64/machine_kexec.c b/xen/arch/x86/x86_64/machine_kexec.c index f4a005cd0c..efa772fe9a 100644 --- a/xen/arch/x86/x86_64/machine_kexec.c +++ b/xen/arch/x86/x86_64/machine_kexec.c @@ -13,9 +13,9 @@ int machine_kexec_get_xen(xen_kexec_range_t *range) { - range->start = virt_to_maddr(_start); - range->size = virt_to_maddr(_end) - (unsigned long)range->start; - return 0; + range->start = virt_to_maddr(_start); + range->size = virt_to_maddr(_end) - (unsigned long)range->start; + return 0; } /* diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index d8f558bc3a..8b7da06166 100644 --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -1,18 +1,18 @@ /****************************************************************************** * arch/x86/x86_64/mm.c - * - * Modifications to Linux original are copyright (c) 2004, K A Fraser tr This - * program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) + * + * Modifications to Linux original are copyright (c) 2004, K A Fraser tr This + * program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. - * - * You should have received a copy of the GNU General Public License along + * + * You should have received a copy of the GNU General Public License along * with this program; If not, see . */ @@ -92,7 +92,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr) if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(_mfn(mfn)) ) return NULL; - ret: +ret: return map_domain_page(_mfn(mfn)) + (addr & ~PAGE_MASK); } @@ -115,17 +115,17 @@ static mfn_t alloc_hotadd_mfn(struct mem_hotadd_info *info) { mfn_t mfn; - ASSERT((info->cur + ( 1UL << PAGETABLE_ORDER) < info->epfn) && - info->cur >= info->spfn); + ASSERT((info->cur + (1UL << PAGETABLE_ORDER) < info->epfn) && + info->cur >= info->spfn); mfn = _mfn(info->cur); info->cur += (1UL << PAGETABLE_ORDER); return mfn; } -#define M2P_NO_MAPPED 0 -#define M2P_2M_MAPPED 1 -#define M2P_1G_MAPPED 2 +#define M2P_NO_MAPPED 0 +#define M2P_2M_MAPPED 1 +#define M2P_1G_MAPPED 2 static int m2p_mapped(unsigned long spfn) { unsigned long va; @@ -135,20 +135,20 @@ static int m2p_mapped(unsigned long spfn) va = RO_MPT_VIRT_START + spfn * sizeof(*machine_to_phys_mapping); l3_ro_mpt = l4e_to_l3e(idle_pg_table[l4_table_offset(va)]); - switch ( l3e_get_flags(l3_ro_mpt[l3_table_offset(va)]) & - (_PAGE_PRESENT |_PAGE_PSE)) + switch (l3e_get_flags(l3_ro_mpt[l3_table_offset(va)]) & + (_PAGE_PRESENT | _PAGE_PSE)) { - case _PAGE_PSE|_PAGE_PRESENT: - return M2P_1G_MAPPED; - /* Check for next level */ - case _PAGE_PRESENT: - break; - default: - return M2P_NO_MAPPED; + case _PAGE_PSE | _PAGE_PRESENT: + return M2P_1G_MAPPED; + /* Check for next level */ + case _PAGE_PRESENT: + break; + default: + return M2P_NO_MAPPED; } l2_ro_mpt = l3e_to_l2e(l3_ro_mpt[l3_table_offset(va)]); - if (l2e_get_flags(l2_ro_mpt[l2_table_offset(va)]) & _PAGE_PRESENT) + if ( l2e_get_flags(l2_ro_mpt[l2_table_offset(va)]) & _PAGE_PRESENT ) return M2P_2M_MAPPED; return M2P_NO_MAPPED; @@ -162,13 +162,11 @@ static int share_hotadd_m2p_table(struct mem_hotadd_info *info) l2_pgentry_t l2e; /* M2P table is mappable read-only by privileged domains. */ - for ( v = RDWR_MPT_VIRT_START; - v != RDWR_MPT_VIRT_END; + for ( v = RDWR_MPT_VIRT_START; v != RDWR_MPT_VIRT_END; v += n << PAGE_SHIFT ) { n = L2_PAGETABLE_ENTRIES * L1_PAGETABLE_ENTRIES; - l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[ - l3_table_offset(v)]; + l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[l3_table_offset(v)]; if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) continue; if ( !(l3e_get_flags(l3e) & _PAGE_PSE) ) @@ -191,12 +189,10 @@ static int share_hotadd_m2p_table(struct mem_hotadd_info *info) } } - for ( v = RDWR_COMPAT_MPT_VIRT_START; - v != RDWR_COMPAT_MPT_VIRT_END; + for ( v = RDWR_COMPAT_MPT_VIRT_START; v != RDWR_COMPAT_MPT_VIRT_END; v += 1 << L2_PAGETABLE_SHIFT ) { - l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[ - l3_table_offset(v)]; + l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[l3_table_offset(v)]; if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) continue; l2e = l3e_to_l2e(l3e)[l2_table_offset(v)]; @@ -229,25 +225,28 @@ static void destroy_compat_m2p_mapping(struct mem_hotadd_info *info) if ( emap > ((RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) >> 2) ) emap = (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) >> 2; - l3_ro_mpt = l4e_to_l3e(idle_pg_table[l4_table_offset(HIRO_COMPAT_MPT_VIRT_START)]); + l3_ro_mpt = + l4e_to_l3e(idle_pg_table[l4_table_offset(HIRO_COMPAT_MPT_VIRT_START)]); - ASSERT(l3e_get_flags(l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)]) & _PAGE_PRESENT); + ASSERT( + l3e_get_flags(l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)]) & + _PAGE_PRESENT); - l2_ro_mpt = l3e_to_l2e(l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)]); + l2_ro_mpt = + l3e_to_l2e(l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)]); for ( i = smap; i < emap; ) { va = HIRO_COMPAT_MPT_VIRT_START + - i * sizeof(*compat_machine_to_phys_mapping); - rwva = RDWR_COMPAT_MPT_VIRT_START + i * sizeof(*compat_machine_to_phys_mapping); + rwva = RDWR_COMPAT_MPT_VIRT_START + + i * sizeof(*compat_machine_to_phys_mapping); if ( l2e_get_flags(l2_ro_mpt[l2_table_offset(va)]) & _PAGE_PRESENT ) { pt_pfn = l2e_get_pfn(l2_ro_mpt[l2_table_offset(va)]); if ( hotadd_mem_valid(pt_pfn, info) ) { - destroy_xen_mappings(rwva, rwva + - (1UL << L2_PAGETABLE_SHIFT)); + destroy_xen_mappings(rwva, rwva + (1UL << L2_PAGETABLE_SHIFT)); l2e_write(&l2_ro_mpt[l2_table_offset(va)], l2e_empty()); } } @@ -269,7 +268,7 @@ static void destroy_m2p_mapping(struct mem_hotadd_info *info) /* * No need to clean m2p structure existing before the hotplug */ - for (i = smap; i < emap;) + for ( i = smap; i < emap; ) { unsigned long pt_pfn; l2_pgentry_t *l2_ro_mpt; @@ -278,19 +277,19 @@ static void destroy_m2p_mapping(struct mem_hotadd_info *info) rwva = RDWR_MPT_VIRT_START + i * sizeof(*machine_to_phys_mapping); /* 1G mapping should not be created by mem hotadd */ - if (!(l3e_get_flags(l3_ro_mpt[l3_table_offset(va)]) & _PAGE_PRESENT) || - (l3e_get_flags(l3_ro_mpt[l3_table_offset(va)]) & _PAGE_PSE)) + if ( !(l3e_get_flags(l3_ro_mpt[l3_table_offset(va)]) & _PAGE_PRESENT) || + (l3e_get_flags(l3_ro_mpt[l3_table_offset(va)]) & _PAGE_PSE) ) { - i = ( i & ~((1UL << (L3_PAGETABLE_SHIFT - 3)) - 1)) + - (1UL << (L3_PAGETABLE_SHIFT - 3) ); + i = (i & ~((1UL << (L3_PAGETABLE_SHIFT - 3)) - 1)) + + (1UL << (L3_PAGETABLE_SHIFT - 3)); continue; } l2_ro_mpt = l3e_to_l2e(l3_ro_mpt[l3_table_offset(va)]); - if (!(l2e_get_flags(l2_ro_mpt[l2_table_offset(va)]) & _PAGE_PRESENT)) + if ( !(l2e_get_flags(l2_ro_mpt[l2_table_offset(va)]) & _PAGE_PRESENT) ) { - i = ( i & ~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1)) + - (1UL << (L2_PAGETABLE_SHIFT - 3)) ; + i = (i & ~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1)) + + (1UL << (L2_PAGETABLE_SHIFT - 3)); continue; } @@ -302,8 +301,8 @@ static void destroy_m2p_mapping(struct mem_hotadd_info *info) l2_ro_mpt = l3e_to_l2e(l3_ro_mpt[l3_table_offset(va)]); l2e_write(&l2_ro_mpt[l2_table_offset(va)], l2e_empty()); } - i = ( i & ~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1)) + - (1UL << (L2_PAGETABLE_SHIFT - 3)); + i = (i & ~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1)) + + (1UL << (L2_PAGETABLE_SHIFT - 3)); } destroy_compat_m2p_mapping(info); @@ -327,20 +326,21 @@ static int setup_compat_m2p_table(struct mem_hotadd_info *info) l2_pgentry_t *l2_ro_mpt = NULL; int err = 0; - smap = info->spfn & (~((1UL << (L2_PAGETABLE_SHIFT - 2)) -1)); + smap = info->spfn & (~((1UL << (L2_PAGETABLE_SHIFT - 2)) - 1)); /* * Notice: For hot-added memory, only range below m2p_compat_vstart * will be filled up (assuming memory is discontinous when booting). */ - if ((smap > ((RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) >> 2)) ) + if ( (smap > + ((RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) >> 2)) ) return 0; if ( epfn > ((RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) >> 2) ) epfn = (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) >> 2; - emap = ( (epfn + ((1UL << (L2_PAGETABLE_SHIFT - 2)) - 1 )) & - ~((1UL << (L2_PAGETABLE_SHIFT - 2)) - 1) ); + emap = ((epfn + ((1UL << (L2_PAGETABLE_SHIFT - 2)) - 1)) & + ~((1UL << (L2_PAGETABLE_SHIFT - 2)) - 1)); va = HIRO_COMPAT_MPT_VIRT_START + smap * sizeof(*compat_machine_to_phys_mapping); @@ -351,23 +351,24 @@ static int setup_compat_m2p_table(struct mem_hotadd_info *info) l2_ro_mpt = l3e_to_l2e(l3_ro_mpt[l3_table_offset(va)]); #define MFN(x) (((x) << L2_PAGETABLE_SHIFT) / sizeof(unsigned int)) -#define CNT ((sizeof(*frame_table) & -sizeof(*frame_table)) / \ - sizeof(*compat_machine_to_phys_mapping)) - BUILD_BUG_ON((sizeof(*frame_table) & -sizeof(*frame_table)) % \ +#define CNT \ + ((sizeof(*frame_table) & -sizeof(*frame_table)) / \ + sizeof(*compat_machine_to_phys_mapping)) + BUILD_BUG_ON((sizeof(*frame_table) & -sizeof(*frame_table)) % sizeof(*compat_machine_to_phys_mapping)); for ( i = smap; i < emap; i += (1UL << (L2_PAGETABLE_SHIFT - 2)) ) { va = HIRO_COMPAT_MPT_VIRT_START + - i * sizeof(*compat_machine_to_phys_mapping); + i * sizeof(*compat_machine_to_phys_mapping); rwva = RDWR_COMPAT_MPT_VIRT_START + - i * sizeof(*compat_machine_to_phys_mapping); + i * sizeof(*compat_machine_to_phys_mapping); - if (l2e_get_flags(l2_ro_mpt[l2_table_offset(va)]) & _PAGE_PRESENT) + if ( l2e_get_flags(l2_ro_mpt[l2_table_offset(va)]) & _PAGE_PRESENT ) continue; - for ( n = 0; n < CNT; ++n) + for ( n = 0; n < CNT; ++n ) if ( mfn_valid(_mfn(i + n * PDX_GROUP_COUNT)) ) break; if ( n == CNT ) @@ -382,7 +383,7 @@ static int setup_compat_m2p_table(struct mem_hotadd_info *info) memset((void *)rwva, 0xFF, 1UL << L2_PAGETABLE_SHIFT); /* NB. Cannot be GLOBAL as the ptes get copied into per-VM space. */ l2e_write(&l2_ro_mpt[l2_table_offset(va)], - l2e_from_mfn(mfn, _PAGE_PSE|_PAGE_PRESENT)); + l2e_from_mfn(mfn, _PAGE_PSE | _PAGE_PRESENT)); } #undef CNT #undef MFN @@ -401,30 +402,31 @@ static int setup_m2p_table(struct mem_hotadd_info *info) l3_pgentry_t *l3_ro_mpt = NULL; int ret = 0; - ASSERT(l4e_get_flags(idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]) - & _PAGE_PRESENT); + ASSERT(l4e_get_flags(idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]) & + _PAGE_PRESENT); l3_ro_mpt = l4e_to_l3e(idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]); - smap = (info->spfn & (~((1UL << (L2_PAGETABLE_SHIFT - 3)) -1))); - emap = ((info->epfn + ((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1 )) & - ~((1UL << (L2_PAGETABLE_SHIFT - 3)) -1)); + smap = (info->spfn & (~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1))); + emap = ((info->epfn + ((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1)) & + ~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1)); va = RO_MPT_VIRT_START + smap * sizeof(*machine_to_phys_mapping); #define MFN(x) (((x) << L2_PAGETABLE_SHIFT) / sizeof(unsigned long)) -#define CNT ((sizeof(*frame_table) & -sizeof(*frame_table)) / \ - sizeof(*machine_to_phys_mapping)) +#define CNT \ + ((sizeof(*frame_table) & -sizeof(*frame_table)) / \ + sizeof(*machine_to_phys_mapping)) - BUILD_BUG_ON((sizeof(*frame_table) & -sizeof(*frame_table)) % \ + BUILD_BUG_ON((sizeof(*frame_table) & -sizeof(*frame_table)) % sizeof(*machine_to_phys_mapping)); i = smap; while ( i < emap ) { - switch ( m2p_mapped(i) ) + switch (m2p_mapped(i)) { case M2P_1G_MAPPED: - i = ( i & ~((1UL << (L3_PAGETABLE_SHIFT - 3)) - 1)) + + i = (i & ~((1UL << (L3_PAGETABLE_SHIFT - 3)) - 1)) + (1UL << (L3_PAGETABLE_SHIFT - 3)); continue; case M2P_2M_MAPPED: @@ -437,7 +439,7 @@ static int setup_m2p_table(struct mem_hotadd_info *info) va = RO_MPT_VIRT_START + i * sizeof(*machine_to_phys_mapping); - for ( n = 0; n < CNT; ++n) + for ( n = 0; n < CNT; ++n ) if ( mfn_valid(_mfn(i + n * PDX_GROUP_COUNT)) ) break; if ( n < CNT ) @@ -445,21 +447,19 @@ static int setup_m2p_table(struct mem_hotadd_info *info) mfn_t mfn = alloc_hotadd_mfn(info); ret = map_pages_to_xen( - RDWR_MPT_VIRT_START + i * sizeof(unsigned long), - mfn, 1UL << PAGETABLE_ORDER, - PAGE_HYPERVISOR); + RDWR_MPT_VIRT_START + i * sizeof(unsigned long), mfn, + 1UL << PAGETABLE_ORDER, PAGE_HYPERVISOR); if ( ret ) goto error; /* Fill with INVALID_M2P_ENTRY. */ memset((void *)(RDWR_MPT_VIRT_START + i * sizeof(unsigned long)), 0xFF, 1UL << L2_PAGETABLE_SHIFT); - ASSERT(!(l3e_get_flags(l3_ro_mpt[l3_table_offset(va)]) & - _PAGE_PSE)); - if ( l3e_get_flags(l3_ro_mpt[l3_table_offset(va)]) & - _PAGE_PRESENT ) + ASSERT( + !(l3e_get_flags(l3_ro_mpt[l3_table_offset(va)]) & _PAGE_PSE)); + if ( l3e_get_flags(l3_ro_mpt[l3_table_offset(va)]) & _PAGE_PRESENT ) l2_ro_mpt = l3e_to_l2e(l3_ro_mpt[l3_table_offset(va)]) + - l2_table_offset(va); + l2_table_offset(va); else { l2_ro_mpt = alloc_xen_pagetable(); @@ -478,11 +478,12 @@ static int setup_m2p_table(struct mem_hotadd_info *info) /* NB. Cannot be GLOBAL: guest user mode should not see it. */ l2e_write(l2_ro_mpt, l2e_from_mfn(mfn, - /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT)); + /*_PAGE_GLOBAL|*/ _PAGE_PSE | + _PAGE_USER | _PAGE_PRESENT)); } if ( !((unsigned long)l2_ro_mpt & ~PAGE_MASK) ) l2_ro_mpt = NULL; - i += ( 1UL << (L2_PAGETABLE_SHIFT - 3)); + i += (1UL << (L2_PAGETABLE_SHIFT - 3)); } #undef CNT #undef MFN @@ -509,7 +510,7 @@ void __init paging_init(void) va += (1UL << L4_PAGETABLE_SHIFT) ) { if ( !(l4e_get_flags(idle_pg_table[l4_table_offset(va)]) & - _PAGE_PRESENT) ) + _PAGE_PRESENT) ) { l3_pgentry_t *pl3t = alloc_xen_pagetable(); @@ -525,36 +526,37 @@ void __init paging_init(void) if ( (l3_ro_mpt = alloc_xen_pagetable()) == NULL ) goto nomem; clear_page(l3_ro_mpt); - l4e_write(&idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)], - l4e_from_paddr(__pa(l3_ro_mpt), __PAGE_HYPERVISOR_RO | _PAGE_USER)); + l4e_write( + &idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)], + l4e_from_paddr(__pa(l3_ro_mpt), __PAGE_HYPERVISOR_RO | _PAGE_USER)); /* * Allocate and map the machine-to-phys table. * This also ensures L3 is present for fixmaps. */ - mpt_size = (max_page * BYTES_PER_LONG) + (1UL << L2_PAGETABLE_SHIFT) - 1; + mpt_size = (max_page * BYTES_PER_LONG) + (1UL << L2_PAGETABLE_SHIFT) - 1; mpt_size &= ~((1UL << L2_PAGETABLE_SHIFT) - 1UL); #define MFN(x) (((x) << L2_PAGETABLE_SHIFT) / sizeof(unsigned long)) -#define CNT ((sizeof(*frame_table) & -sizeof(*frame_table)) / \ - sizeof(*machine_to_phys_mapping)) - BUILD_BUG_ON((sizeof(*frame_table) & ~sizeof(*frame_table)) % \ +#define CNT \ + ((sizeof(*frame_table) & -sizeof(*frame_table)) / \ + sizeof(*machine_to_phys_mapping)) + BUILD_BUG_ON((sizeof(*frame_table) & ~sizeof(*frame_table)) % sizeof(*machine_to_phys_mapping)); for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ ) { BUILD_BUG_ON(RO_MPT_VIRT_START & ((1UL << L3_PAGETABLE_SHIFT) - 1)); va = RO_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT); - memflags = MEMF_node(phys_to_nid(i << - (L2_PAGETABLE_SHIFT - 3 + PAGE_SHIFT))); + memflags = + MEMF_node(phys_to_nid(i << (L2_PAGETABLE_SHIFT - 3 + PAGE_SHIFT))); - if ( cpu_has_page1gb && - !((unsigned long)l2_ro_mpt & ~PAGE_MASK) && + if ( cpu_has_page1gb && !((unsigned long)l2_ro_mpt & ~PAGE_MASK) && (mpt_size >> L3_PAGETABLE_SHIFT) > (i >> PAGETABLE_ORDER) ) { unsigned int k, holes; - for ( holes = k = 0; k < 1 << PAGETABLE_ORDER; ++k) + for ( holes = k = 0; k < 1 << PAGETABLE_ORDER; ++k ) { - for ( n = 0; n < CNT; ++n) + for ( n = 0; n < CNT; ++n ) if ( mfn_valid(_mfn(MFN(i + k) + n * PDX_GROUP_COUNT)) ) break; if ( n == CNT ) @@ -569,25 +571,26 @@ void __init paging_init(void) (l1_pg = alloc_domheap_pages(NULL, 2 * PAGETABLE_ORDER, memflags)) != NULL ) { - map_pages_to_xen( - RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), - page_to_mfn(l1_pg), - 1UL << (2 * PAGETABLE_ORDER), - PAGE_HYPERVISOR); - memset((void *)(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)), - 0x77, 1UL << L3_PAGETABLE_SHIFT); + map_pages_to_xen(RDWR_MPT_VIRT_START + + (i << L2_PAGETABLE_SHIFT), + page_to_mfn(l1_pg), + 1UL << (2 * PAGETABLE_ORDER), PAGE_HYPERVISOR); + memset( + (void *)(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)), + 0x77, 1UL << L3_PAGETABLE_SHIFT); ASSERT(!l2_table_offset(va)); /* NB. Cannot be GLOBAL: guest user mode should not see it. */ l3e_write(&l3_ro_mpt[l3_table_offset(va)], - l3e_from_page(l1_pg, - /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT)); + l3e_from_page(l1_pg, + /*_PAGE_GLOBAL|*/ _PAGE_PSE | + _PAGE_USER | _PAGE_PRESENT)); i += (1UL << PAGETABLE_ORDER) - 1; continue; } } - for ( n = 0; n < CNT; ++n) + for ( n = 0; n < CNT; ++n ) if ( mfn_valid(_mfn(MFN(i) + n * PDX_GROUP_COUNT)) ) break; if ( n == CNT ) @@ -597,11 +600,9 @@ void __init paging_init(void) goto nomem; else { - map_pages_to_xen( - RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), - page_to_mfn(l1_pg), - 1UL << PAGETABLE_ORDER, - PAGE_HYPERVISOR); + map_pages_to_xen(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), + page_to_mfn(l1_pg), 1UL << PAGETABLE_ORDER, + PAGE_HYPERVISOR); /* Fill with INVALID_M2P_ENTRY. */ memset((void *)(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)), 0xFF, 1UL << L2_PAGETABLE_SHIFT); @@ -618,8 +619,9 @@ void __init paging_init(void) } /* NB. Cannot be GLOBAL: guest user mode should not see it. */ if ( l1_pg ) - l2e_write(l2_ro_mpt, l2e_from_page( - l1_pg, /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT)); + l2e_write(l2_ro_mpt, + l2e_from_page(l1_pg, /*_PAGE_GLOBAL|*/ _PAGE_PSE | + _PAGE_USER | _PAGE_PRESENT)); l2_ro_mpt++; } #undef CNT @@ -628,8 +630,8 @@ void __init paging_init(void) /* Create user-accessible L2 directory to map the MPT for compat guests. */ BUILD_BUG_ON(l4_table_offset(RDWR_MPT_VIRT_START) != l4_table_offset(HIRO_COMPAT_MPT_VIRT_START)); - l3_ro_mpt = l4e_to_l3e(idle_pg_table[l4_table_offset( - HIRO_COMPAT_MPT_VIRT_START)]); + l3_ro_mpt = + l4e_to_l3e(idle_pg_table[l4_table_offset(HIRO_COMPAT_MPT_VIRT_START)]); if ( (l2_ro_mpt = alloc_xen_pagetable()) == NULL ) goto nomem; compat_idle_pg_table_l2 = l2_ro_mpt; @@ -645,33 +647,30 @@ void __init paging_init(void) if ( (m2p_compat_vstart + mpt_size) < MACH2PHYS_COMPAT_VIRT_END ) m2p_compat_vstart = MACH2PHYS_COMPAT_VIRT_END - mpt_size; #define MFN(x) (((x) << L2_PAGETABLE_SHIFT) / sizeof(unsigned int)) -#define CNT ((sizeof(*frame_table) & -sizeof(*frame_table)) / \ - sizeof(*compat_machine_to_phys_mapping)) - BUILD_BUG_ON((sizeof(*frame_table) & ~sizeof(*frame_table)) % \ +#define CNT \ + ((sizeof(*frame_table) & -sizeof(*frame_table)) / \ + sizeof(*compat_machine_to_phys_mapping)) + BUILD_BUG_ON((sizeof(*frame_table) & ~sizeof(*frame_table)) % sizeof(*compat_machine_to_phys_mapping)); for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++, l2_ro_mpt++ ) { - memflags = MEMF_node(phys_to_nid(i << - (L2_PAGETABLE_SHIFT - 2 + PAGE_SHIFT))); - for ( n = 0; n < CNT; ++n) + memflags = + MEMF_node(phys_to_nid(i << (L2_PAGETABLE_SHIFT - 2 + PAGE_SHIFT))); + for ( n = 0; n < CNT; ++n ) if ( mfn_valid(_mfn(MFN(i) + n * PDX_GROUP_COUNT)) ) break; if ( n == CNT ) continue; - if ( (l1_pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, - memflags)) == NULL ) + if ( (l1_pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, memflags)) == + NULL ) goto nomem; - map_pages_to_xen( - RDWR_COMPAT_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), - page_to_mfn(l1_pg), - 1UL << PAGETABLE_ORDER, - PAGE_HYPERVISOR); - memset((void *)(RDWR_COMPAT_MPT_VIRT_START + - (i << L2_PAGETABLE_SHIFT)), - 0x55, - 1UL << L2_PAGETABLE_SHIFT); + map_pages_to_xen(RDWR_COMPAT_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), + page_to_mfn(l1_pg), 1UL << PAGETABLE_ORDER, + PAGE_HYPERVISOR); + memset((void *)(RDWR_COMPAT_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)), + 0x55, 1UL << L2_PAGETABLE_SHIFT); /* NB. Cannot be GLOBAL as the ptes get copied into per-VM space. */ - l2e_write(l2_ro_mpt, l2e_from_page(l1_pg, _PAGE_PSE|_PAGE_PRESENT)); + l2e_write(l2_ro_mpt, l2e_from_page(l1_pg, _PAGE_PSE | _PAGE_PRESENT)); } #undef CNT #undef MFN @@ -683,7 +682,7 @@ void __init paging_init(void) l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR_RW)); return; - nomem: +nomem: panic("Not enough memory for m2p table\n"); } @@ -704,8 +703,8 @@ void __init zap_low_mappings(void) int setup_compat_arg_xlat(struct vcpu *v) { return create_perdomain_mapping(v->domain, ARG_XLAT_START(v), - PFN_UP(COMPAT_ARG_XLAT_SIZE), - NULL, NIL(struct page_info *)); + PFN_UP(COMPAT_ARG_XLAT_SIZE), NULL, + NIL(struct page_info *)); } void free_compat_arg_xlat(struct vcpu *v) @@ -730,15 +729,15 @@ static void cleanup_frame_table(struct mem_hotadd_info *info) /* Intialize all page */ memset((void *)sva, -1, eva - sva); - while (sva < eva) + while ( sva < eva ) { - l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(sva)])[ - l3_table_offset(sva)]; + l3e = l4e_to_l3e( + idle_pg_table[l4_table_offset(sva)])[l3_table_offset(sva)]; if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || (l3e_get_flags(l3e) & _PAGE_PSE) ) { sva = (sva & ~((1UL << L3_PAGETABLE_SHIFT) - 1)) + - (1UL << L3_PAGETABLE_SHIFT); + (1UL << L3_PAGETABLE_SHIFT); continue; } @@ -746,22 +745,22 @@ static void cleanup_frame_table(struct mem_hotadd_info *info) ASSERT(l2e_get_flags(l2e) & _PAGE_PRESENT); if ( (l2e_get_flags(l2e) & (_PAGE_PRESENT | _PAGE_PSE)) == - (_PAGE_PSE | _PAGE_PRESENT) ) + (_PAGE_PSE | _PAGE_PRESENT) ) { - if (hotadd_mem_valid(l2e_get_pfn(l2e), info)) - destroy_xen_mappings(sva & ~((1UL << L2_PAGETABLE_SHIFT) - 1), - ((sva & ~((1UL << L2_PAGETABLE_SHIFT) -1 )) + - (1UL << L2_PAGETABLE_SHIFT) - 1)); + if ( hotadd_mem_valid(l2e_get_pfn(l2e), info) ) + destroy_xen_mappings( + sva & ~((1UL << L2_PAGETABLE_SHIFT) - 1), + ((sva & ~((1UL << L2_PAGETABLE_SHIFT) - 1)) + + (1UL << L2_PAGETABLE_SHIFT) - 1)); - sva = (sva & ~((1UL << L2_PAGETABLE_SHIFT) -1 )) + + sva = (sva & ~((1UL << L2_PAGETABLE_SHIFT) - 1)) + (1UL << L2_PAGETABLE_SHIFT); continue; } ASSERT(l1e_get_flags(l2e_to_l1e(l2e)[l1_table_offset(sva)]) & - _PAGE_PRESENT); - sva = (sva & ~((1UL << PAGE_SHIFT) - 1)) + - (1UL << PAGE_SHIFT); + _PAGE_PRESENT); + sva = (sva & ~((1UL << PAGE_SHIFT) - 1)) + (1UL << PAGE_SHIFT); } /* Brute-Force flush all TLB */ @@ -779,11 +778,10 @@ static int setup_frametable_chunk(void *start, void *end, ASSERT(!(s & ((1 << L2_PAGETABLE_SHIFT) - 1))); ASSERT(!(e & ((1 << L2_PAGETABLE_SHIFT) - 1))); - for ( ; s < e; s += (1UL << L2_PAGETABLE_SHIFT)) + for ( ; s < e; s += (1UL << L2_PAGETABLE_SHIFT) ) { mfn = alloc_hotadd_mfn(info); - err = map_pages_to_xen(s, mfn, 1UL << PAGETABLE_ORDER, - PAGE_HYPERVISOR); + err = map_pages_to_xen(s, mfn, 1UL << PAGETABLE_ORDER, PAGE_HYPERVISOR); if ( err ) return err; } @@ -801,10 +799,10 @@ static int extend_frame_table(struct mem_hotadd_info *info) epfn = _mfn(info->epfn); eidx = (mfn_to_pdx(epfn) + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT; - nidx = cidx = mfn_to_pdx(spfn)/PDX_GROUP_COUNT; + nidx = cidx = mfn_to_pdx(spfn) / PDX_GROUP_COUNT; - ASSERT( mfn_to_pdx(epfn) <= (DIRECTMAP_SIZE >> PAGE_SHIFT) && - mfn_to_pdx(epfn) <= FRAMETABLE_NR ); + ASSERT(mfn_to_pdx(epfn) <= (DIRECTMAP_SIZE >> PAGE_SHIFT) && + mfn_to_pdx(epfn) <= FRAMETABLE_NR); if ( test_bit(cidx, pdx_group_valid) ) cidx = find_next_zero_bit(pdx_group_valid, eidx, cidx); @@ -819,9 +817,8 @@ static int extend_frame_table(struct mem_hotadd_info *info) nidx = find_next_bit(pdx_group_valid, eidx, cidx); if ( nidx >= eidx ) nidx = eidx; - err = setup_frametable_chunk(pdx_to_page(cidx * PDX_GROUP_COUNT ), - pdx_to_page(nidx * PDX_GROUP_COUNT), - info); + err = setup_frametable_chunk(pdx_to_page(cidx * PDX_GROUP_COUNT), + pdx_to_page(nidx * PDX_GROUP_COUNT), info); if ( err ) return err; @@ -840,15 +837,13 @@ void __init subarch_init_memory(void) l2_pgentry_t l2e; BUILD_BUG_ON(RDWR_MPT_VIRT_START & ((1UL << L3_PAGETABLE_SHIFT) - 1)); - BUILD_BUG_ON(RDWR_MPT_VIRT_END & ((1UL << L3_PAGETABLE_SHIFT) - 1)); + BUILD_BUG_ON(RDWR_MPT_VIRT_END & ((1UL << L3_PAGETABLE_SHIFT) - 1)); /* M2P table is mappable read-only by privileged domains. */ - for ( v = RDWR_MPT_VIRT_START; - v != RDWR_MPT_VIRT_END; + for ( v = RDWR_MPT_VIRT_START; v != RDWR_MPT_VIRT_END; v += n << PAGE_SHIFT ) { n = L2_PAGETABLE_ENTRIES * L1_PAGETABLE_ENTRIES; - l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[ - l3_table_offset(v)]; + l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[l3_table_offset(v)]; if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) continue; if ( !(l3e_get_flags(l3e) & _PAGE_PSE) ) @@ -869,12 +864,10 @@ void __init subarch_init_memory(void) mfn_to_page(_mfn(m2p_start_mfn + i)), SHARE_ro); } - for ( v = RDWR_COMPAT_MPT_VIRT_START; - v != RDWR_COMPAT_MPT_VIRT_END; + for ( v = RDWR_COMPAT_MPT_VIRT_START; v != RDWR_COMPAT_MPT_VIRT_END; v += 1 << L2_PAGETABLE_SHIFT ) { - l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[ - l3_table_offset(v)]; + l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[l3_table_offset(v)]; if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) continue; l2e = l3e_to_l2e(l3e)[l2_table_offset(v)]; @@ -914,21 +907,21 @@ long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) unsigned int i; long rc = 0; - switch ( cmd ) + switch (cmd) { case XENMEM_machphys_mfn_list: if ( copy_from_guest(&xmml, arg, 1) ) return -EFAULT; BUILD_BUG_ON(RDWR_MPT_VIRT_START & ((1UL << L3_PAGETABLE_SHIFT) - 1)); - BUILD_BUG_ON(RDWR_MPT_VIRT_END & ((1UL << L3_PAGETABLE_SHIFT) - 1)); + BUILD_BUG_ON(RDWR_MPT_VIRT_END & ((1UL << L3_PAGETABLE_SHIFT) - 1)); for ( i = 0, v = RDWR_MPT_VIRT_START, last_mfn = 0; (i != xmml.max_extents) && (v < (unsigned long)(machine_to_phys_mapping + max_page)); i++, v += 1UL << L2_PAGETABLE_SHIFT ) { - l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[ - l3_table_offset(v)]; + l3e = l4e_to_l3e( + idle_pg_table[l4_table_offset(v)])[l3_table_offset(v)]; if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) mfn = last_mfn; else if ( !(l3e_get_flags(l3e) & _PAGE_PSE) ) @@ -941,8 +934,8 @@ long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) } else { - mfn = l3e_get_pfn(l3e) - + (l2_table_offset(v) << PAGETABLE_ORDER); + mfn = + l3e_get_pfn(l3e) + (l2_table_offset(v) << PAGETABLE_ORDER); } ASSERT(mfn); if ( copy_to_guest_offset(xmml.extent_start, i, &mfn, 1) ) @@ -1020,7 +1013,7 @@ long do_set_segment_base(unsigned int which, unsigned long base) if ( is_pv_32bit_vcpu(v) ) return -ENOSYS; /* x86/64 only. */ - switch ( which ) + switch (which) { case SEGBASE_FS: if ( is_canonical_address(base) ) @@ -1053,16 +1046,14 @@ long do_set_segment_base(unsigned int which, unsigned long base) break; case SEGBASE_GS_USER_SEL: - __asm__ __volatile__ ( - " swapgs \n" - "1: movl %k0,%%gs \n" - " "safe_swapgs" \n" - ".section .fixup,\"ax\" \n" - "2: xorl %k0,%k0 \n" - " jmp 1b \n" - ".previous \n" - _ASM_EXTABLE(1b, 2b) - : "+r" (base) ); + __asm__ __volatile__(" swapgs \n" + "1: movl %k0,%%gs \n" + " " safe_swapgs " \n" + ".section .fixup,\"ax\" \n" + "2: xorl %k0,%k0 \n" + " jmp 1b \n" + ".previous \n" _ASM_EXTABLE(1b, 2b) + : "+r"(base)); break; default: @@ -1073,7 +1064,6 @@ long do_set_segment_base(unsigned int which, unsigned long base) return ret; } - /* Returns TRUE if given descriptor is valid for GDT or LDT. */ int check_descriptor(const struct domain *dom, seg_desc_t *d) { @@ -1082,7 +1072,7 @@ int check_descriptor(const struct domain *dom, seg_desc_t *d) unsigned int dpl; /* A not-present descriptor will always fault, so is safe. */ - if ( !(b & _SEGMENT_P) ) + if ( !(b & _SEGMENT_P) ) return 1; /* Check and fix up the DPL. */ @@ -1152,12 +1142,12 @@ int check_descriptor(const struct domain *dom, seg_desc_t *d) /* Reserved bits must be zero. */ if ( b & (is_pv_32bit_domain(dom) ? 0xe0 : 0xff) ) goto bad; - - good: + +good: d->a = a; d->b = b; return 1; - bad: +bad: return 0; } @@ -1175,14 +1165,14 @@ int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs) struct domain *d = current->domain; l4_pgentry_t *pl4e = NULL; l4_pgentry_t l4e; - l3_pgentry_t *pl3e = NULL; + l3_pgentry_t *pl3e = NULL; l3_pgentry_t l3e; l2_pgentry_t *pl2e = NULL; l2_pgentry_t l2e, idle_l2e; unsigned long mfn, idle_index; int ret = 0; - if (!is_pv_32bit_domain(d)) + if ( !is_pv_32bit_domain(d) ) return 0; if ( (addr < HYPERVISOR_COMPAT_VIRT_START(d)) || @@ -1195,7 +1185,7 @@ int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs) l4e = pl4e[0]; - if (!(l4e_get_flags(l4e) & _PAGE_PRESENT)) + if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) ) goto unmap; mfn = l4e_get_pfn(l4e); @@ -1212,19 +1202,18 @@ int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs) l2e = pl2e[l2_table_offset(addr)]; - if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT)) + if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) goto unmap; - idle_index = (l2_table_offset(addr) - - COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d))/ - sizeof(l2_pgentry_t); + idle_index = + (l2_table_offset(addr) - COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)) / + sizeof(l2_pgentry_t); idle_l2e = compat_idle_pg_table_l2[idle_index]; - if (!(l2e_get_flags(idle_l2e) & _PAGE_PRESENT)) + if ( !(l2e_get_flags(idle_l2e) & _PAGE_PRESENT) ) goto unmap; - memcpy(&pl2e[l2_table_offset(addr)], - &compat_idle_pg_table_l2[idle_index], - sizeof(l2_pgentry_t)); + memcpy(&pl2e[l2_table_offset(addr)], &compat_idle_pg_table_l2[idle_index], + sizeof(l2_pgentry_t)); ret = EXCRET_fault_fixed; @@ -1247,7 +1236,8 @@ void domain_set_alloc_bitsize(struct domain *d) return; d->arch.physaddr_bitsize = /* 2^n entries can be contained in guest's p2m mapping space */ - fls(MACH2PHYS_COMPAT_NR_ENTRIES(d)) - 1 + fls(MACH2PHYS_COMPAT_NR_ENTRIES(d)) - + 1 /* 2^n pages -> 2^(n+PAGE_SHIFT) bits */ + PAGE_SHIFT; } @@ -1268,7 +1258,7 @@ static int transfer_pages_to_heap(struct mem_hotadd_info *info) * Mark the allocated page before put free pages to buddy allocator * to avoid merge in free_heap_pages */ - for (i = info->spfn; i < info->cur; i++) + for ( i = info->spfn; i < info->cur; i++ ) { pg = mfn_to_page(_mfn(i)); pg->count_info = PGC_state_inuse; @@ -1286,7 +1276,7 @@ static int mem_hotadd_check(unsigned long spfn, unsigned long epfn) if ( (spfn >= epfn) ) return 0; - if (pfn_to_pdx(epfn) > FRAMETABLE_NR) + if ( pfn_to_pdx(epfn) > FRAMETABLE_NR ) return 0; if ( (spfn | epfn) & ((1UL << PAGETABLE_ORDER) - 1) ) @@ -1296,10 +1286,10 @@ static int mem_hotadd_check(unsigned long spfn, unsigned long epfn) return 0; /* Make sure the new range is not present now */ - sidx = ((pfn_to_pdx(spfn) + PDX_GROUP_COUNT - 1) & ~(PDX_GROUP_COUNT - 1)) - / PDX_GROUP_COUNT; + sidx = ((pfn_to_pdx(spfn) + PDX_GROUP_COUNT - 1) & ~(PDX_GROUP_COUNT - 1)) / + PDX_GROUP_COUNT; eidx = (pfn_to_pdx(epfn - 1) & ~(PDX_GROUP_COUNT - 1)) / PDX_GROUP_COUNT; - if (sidx >= eidx) + if ( sidx >= eidx ) return 0; s = find_next_zero_bit(pdx_group_valid, eidx, sidx); @@ -1312,26 +1302,26 @@ static int mem_hotadd_check(unsigned long spfn, unsigned long epfn) /* Caculate at most required m2p/compat m2p/frametable pages */ s = (spfn & ~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1)); e = (epfn + (1UL << (L2_PAGETABLE_SHIFT - 3)) - 1) & - ~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1); + ~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1); length = (e - s) * sizeof(unsigned long); s = (spfn & ~((1UL << (L2_PAGETABLE_SHIFT - 2)) - 1)); e = (epfn + (1UL << (L2_PAGETABLE_SHIFT - 2)) - 1) & - ~((1UL << (L2_PAGETABLE_SHIFT - 2)) - 1); + ~((1UL << (L2_PAGETABLE_SHIFT - 2)) - 1); e = min_t(unsigned long, e, - (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) >> 2); + (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) >> 2); if ( e > s ) - length += (e -s) * sizeof(unsigned int); + length += (e - s) * sizeof(unsigned int); s = pfn_to_pdx(spfn) & ~(PDX_GROUP_COUNT - 1); - e = ( pfn_to_pdx(epfn) + (PDX_GROUP_COUNT - 1) ) & ~(PDX_GROUP_COUNT - 1); + e = (pfn_to_pdx(epfn) + (PDX_GROUP_COUNT - 1)) & ~(PDX_GROUP_COUNT - 1); length += (e - s) * sizeof(struct page_info); - if ((length >> PAGE_SHIFT) > (epfn - spfn)) + if ( (length >> PAGE_SHIFT) > (epfn - spfn) ) return 0; return 1; @@ -1378,8 +1368,8 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm) { if ( i < spfn ) i = spfn; - ret = map_pages_to_xen((unsigned long)mfn_to_virt(i), _mfn(i), - epfn - i, __PAGE_HYPERVISOR_RW); + ret = map_pages_to_xen((unsigned long)mfn_to_virt(i), _mfn(i), epfn - i, + __PAGE_HYPERVISOR_RW); if ( ret ) goto destroy_directmap; } @@ -1390,17 +1380,16 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm) if ( !orig_online ) { - dprintk(XENLOG_WARNING, "node %x pxm %x is not online\n",node, pxm); + dprintk(XENLOG_WARNING, "node %x pxm %x is not online\n", node, pxm); NODE_DATA(node)->node_start_pfn = spfn; - NODE_DATA(node)->node_spanned_pages = - epfn - node_start_pfn(node); + NODE_DATA(node)->node_spanned_pages = epfn - node_start_pfn(node); node_set_online(node); } else { - if (node_start_pfn(node) > spfn) + if ( node_start_pfn(node) > spfn ) NODE_DATA(node)->node_start_pfn = spfn; - if (node_end_pfn(node) < epfn) + if ( node_end_pfn(node) < epfn ) NODE_DATA(node)->node_spanned_pages = epfn - node_start_pfn(node); } @@ -1409,11 +1398,11 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm) info.cur = spfn; ret = extend_frame_table(&info); - if (ret) + if ( ret ) goto destroy_frametable; /* Set max_page as setup_m2p_table will use it*/ - if (max_page < epfn) + if ( max_page < epfn ) { max_page = epfn; max_pdx = pfn_to_pdx(max_page - 1) + 1; @@ -1431,8 +1420,7 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm) * shared or being kept in sync then newly added memory needs to be * mapped here. */ - if ( has_iommu_pt(hardware_domain) && - !iommu_use_hap_pt(hardware_domain) && + if ( has_iommu_pt(hardware_domain) && !iommu_use_hap_pt(hardware_domain) && !need_iommu_pt_sync(hardware_domain) ) { for ( i = spfn; i < epfn; i++ ) @@ -1442,7 +1430,7 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm) break; if ( i != epfn ) { - while (i-- > old_max) + while ( i-- > old_max ) /* If statement to satisfy __must_check. */ if ( iommu_legacy_unmap(hardware_domain, _dfn(i), PAGE_ORDER_4K) ) @@ -1469,7 +1457,7 @@ destroy_frametable: node_set_offline(node); NODE_DATA(node)->node_start_pfn = old_node_start; NODE_DATA(node)->node_spanned_pages = old_node_span; - destroy_directmap: +destroy_directmap: destroy_xen_mappings((unsigned long)mfn_to_virt(spfn), (unsigned long)mfn_to_virt(epfn)); diff --git a/xen/arch/x86/x86_64/mmconf-fam10h.c b/xen/arch/x86/x86_64/mmconf-fam10h.c index ed0acb9968..9ea3dfd40a 100644 --- a/xen/arch/x86/x86_64/mmconf-fam10h.c +++ b/xen/arch/x86/x86_64/mmconf-fam10h.c @@ -16,191 +16,204 @@ #include "mmconfig.h" -struct pci_hostbridge_probe { - u32 bus; - u32 slot; - u32 vendor; - u32 device; +struct pci_hostbridge_probe +{ + u32 bus; + u32 slot; + u32 vendor; + u32 device; }; static u64 fam10h_pci_mmconf_base; static struct pci_hostbridge_probe pci_probes[] = { - { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, - { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, + {0, 0x18, PCI_VENDOR_ID_AMD, 0x1200}, + {0xff, 0, PCI_VENDOR_ID_AMD, 0x1200}, }; #define UNIT (1ULL << FAM10H_MMIO_CONF_BASE_SHIFT) #define MASK (~(UNIT - 1)) #define SIZE (UNIT << 8) /* need to avoid (0xfd<<32) and (0xfe<<32), ht used space */ -#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) -#define BASE_VALID(b) ((b) + SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) +#define FAM10H_PCI_MMCONF_BASE (0xfcULL << 32) +#define BASE_VALID(b) ((b) + SIZE <= (0xfdULL << 32) || (b) >= (1ULL << 40)) static void __init get_fam10h_pci_mmconf_base(void) { - unsigned int i, j, bus, slot, hi_mmio_num; - u32 address; - u64 val, tom2, start, end; - struct range { - u64 start, end; - } range[8]; - - for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { - u32 id; - u16 device; - u16 vendor; - - bus = pci_probes[i].bus; - slot = pci_probes[i].slot; - id = pci_conf_read32(0, bus, slot, 0, PCI_VENDOR_ID); - - vendor = id & 0xffff; - device = (id>>16) & 0xffff; - if (pci_probes[i].vendor == vendor && - pci_probes[i].device == device) - break; - } - - if (i >= ARRAY_SIZE(pci_probes)) - return; - - /* SYS_CFG */ - address = MSR_K8_SYSCFG; - rdmsrl(address, val); - - /* TOP_MEM2 is not enabled? */ - if (!(val & (1<<21))) { - tom2 = 1ULL << 32; - } else { - /* TOP_MEM2 */ - address = MSR_K8_TOP_MEM2; - rdmsrl(address, val); - tom2 = max(val & 0xffffff800000ULL, 1ULL << 32); - } - - /* - * need to check if the range is in the high mmio range that is - * above 4G - */ - for (hi_mmio_num = i = 0; i < 8; i++) { - val = pci_conf_read32(0, bus, slot, 1, 0x80 + (i << 3)); - if (!(val & 3)) - continue; - - start = (val & 0xffffff00) << 8; /* 39:16 on 31:8*/ - val = pci_conf_read32(0, bus, slot, 1, 0x84 + (i << 3)); - end = ((val & 0xffffff00) << 8) | 0xffff; /* 39:16 on 31:8*/ - - if (end < tom2) - continue; - - for (j = hi_mmio_num; j; --j) { - if (range[j - 1].start < start) - break; - range[j] = range[j - 1]; - } - range[j].start = start; - range[j].end = end; - hi_mmio_num++; - } - - start = FAM10H_PCI_MMCONF_BASE; - if (start <= tom2) - start = (tom2 + 2 * UNIT - 1) & MASK; - - if (!hi_mmio_num) - goto out; - - if (range[hi_mmio_num - 1].end < start) - goto out; - if (range[0].start > start + SIZE) - goto out; - - /* need to find one window */ - start = (range[0].start & MASK) - UNIT; - if (start > tom2 && BASE_VALID(start)) - goto out; - start = (range[hi_mmio_num - 1].end + UNIT) & MASK; - if (BASE_VALID(start)) - goto out; - /* need to find window between ranges */ - for (i = 1; i < hi_mmio_num; i++) { - start = (range[i - 1].end + UNIT) & MASK; - end = range[i].start & MASK; - if (end >= start + SIZE && BASE_VALID(start)) - goto out; - } - return; + unsigned int i, j, bus, slot, hi_mmio_num; + u32 address; + u64 val, tom2, start, end; + struct range + { + u64 start, end; + } range[8]; + + for ( i = 0; i < ARRAY_SIZE(pci_probes); i++ ) + { + u32 id; + u16 device; + u16 vendor; + + bus = pci_probes[i].bus; + slot = pci_probes[i].slot; + id = pci_conf_read32(0, bus, slot, 0, PCI_VENDOR_ID); + + vendor = id & 0xffff; + device = (id >> 16) & 0xffff; + if ( pci_probes[i].vendor == vendor && pci_probes[i].device == device ) + break; + } + + if ( i >= ARRAY_SIZE(pci_probes) ) + return; + + /* SYS_CFG */ + address = MSR_K8_SYSCFG; + rdmsrl(address, val); + + /* TOP_MEM2 is not enabled? */ + if ( !(val & (1 << 21)) ) + { + tom2 = 1ULL << 32; + } + else + { + /* TOP_MEM2 */ + address = MSR_K8_TOP_MEM2; + rdmsrl(address, val); + tom2 = max(val & 0xffffff800000ULL, 1ULL << 32); + } + + /* + * need to check if the range is in the high mmio range that is + * above 4G + */ + for ( hi_mmio_num = i = 0; i < 8; i++ ) + { + val = pci_conf_read32(0, bus, slot, 1, 0x80 + (i << 3)); + if ( !(val & 3) ) + continue; + + start = (val & 0xffffff00) << 8; /* 39:16 on 31:8*/ + val = pci_conf_read32(0, bus, slot, 1, 0x84 + (i << 3)); + end = ((val & 0xffffff00) << 8) | 0xffff; /* 39:16 on 31:8*/ + + if ( end < tom2 ) + continue; + + for ( j = hi_mmio_num; j; --j ) + { + if ( range[j - 1].start < start ) + break; + range[j] = range[j - 1]; + } + range[j].start = start; + range[j].end = end; + hi_mmio_num++; + } + + start = FAM10H_PCI_MMCONF_BASE; + if ( start <= tom2 ) + start = (tom2 + 2 * UNIT - 1) & MASK; + + if ( !hi_mmio_num ) + goto out; + + if ( range[hi_mmio_num - 1].end < start ) + goto out; + if ( range[0].start > start + SIZE ) + goto out; + + /* need to find one window */ + start = (range[0].start & MASK) - UNIT; + if ( start > tom2 && BASE_VALID(start) ) + goto out; + start = (range[hi_mmio_num - 1].end + UNIT) & MASK; + if ( BASE_VALID(start) ) + goto out; + /* need to find window between ranges */ + for ( i = 1; i < hi_mmio_num; i++ ) + { + start = (range[i - 1].end + UNIT) & MASK; + end = range[i].start & MASK; + if ( end >= start + SIZE && BASE_VALID(start) ) + goto out; + } + return; out: - if (e820_add_range(&e820, start, start + SIZE, E820_RESERVED)) - fam10h_pci_mmconf_base = start; + if ( e820_add_range(&e820, start, start + SIZE, E820_RESERVED) ) + fam10h_pci_mmconf_base = start; } void fam10h_check_enable_mmcfg(void) { - u64 val; - bool_t print = opt_cpu_info; - - if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF)) - return; - - rdmsrl(MSR_FAM10H_MMIO_CONF_BASE, val); - - /* try to make sure that AP's setting is identical to BSP setting */ - if (val & FAM10H_MMIO_CONF_ENABLE) { - u64 base = val & MASK; - - if (!fam10h_pci_mmconf_base) { - fam10h_pci_mmconf_base = base; - return; - } - if (fam10h_pci_mmconf_base == base) - return; - } - - /* - * if it is not enabled, try to enable it and assume only one segment - * with 256 buses - */ - /* only try to get setting from BSP */ - if (!fam10h_pci_mmconf_base) { - get_fam10h_pci_mmconf_base(); - print = 1; - } - if (!fam10h_pci_mmconf_base) { - pci_probe &= ~PCI_CHECK_ENABLE_AMD_MMCONF; - return; - } - - if (print) - printk(KERN_INFO "Enable MMCONFIG on AMD Fam10h at %"PRIx64"\n", - fam10h_pci_mmconf_base); - val &= ~((FAM10H_MMIO_CONF_BASE_MASK<> 1) & 3) { + switch ((pciexbar >> 1) & 3) + { case 0: mask = 0xf0000000U; - len = 0x10000000U; + len = 0x10000000U; break; case 1: mask = 0xf8000000U; - len = 0x08000000U; + len = 0x08000000U; break; case 2: mask = 0xfc000000U; - len = 0x04000000U; + len = 0x04000000U; break; default: pci_mmcfg_config_num = 0; @@ -116,16 +118,17 @@ static const char __init *pci_mmcfg_intel_945(void) /* Errata #2, things break when not aligned on a 256Mb boundary */ /* Can only happen in 64M/128M mode */ - if ((pciexbar & mask) & 0x0fffffffU) + if ( (pciexbar & mask) & 0x0fffffffU ) pci_mmcfg_config_num = 0; /* Don't hit the APIC registers and their friends */ - if ((pciexbar & mask) >= 0xf0000000U) + if ( (pciexbar & mask) >= 0xf0000000U ) pci_mmcfg_config_num = 0; - if (pci_mmcfg_config_num) { + if ( pci_mmcfg_config_num ) + { pci_mmcfg_config = xzalloc(struct acpi_mcfg_allocation); - if (!pci_mmcfg_config) + if ( !pci_mmcfg_config ) return NULL; pci_mmcfg_config[0].address = pciexbar & mask; pci_mmcfg_config[0].pci_segment = 0; @@ -143,42 +146,44 @@ static const char __init *pci_mmcfg_amd_fam10h(void) int i; unsigned segnbits = 0, busnbits; - if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF)) + if ( !(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF) ) return NULL; address = MSR_FAM10H_MMIO_CONF_BASE; - if (rdmsr_safe(address, msr_content)) + if ( rdmsr_safe(address, msr_content) ) return NULL; /* mmconfig is not enable */ - if (!(msr_content & FAM10H_MMIO_CONF_ENABLE)) + if ( !(msr_content & FAM10H_MMIO_CONF_ENABLE) ) return NULL; base = msr_content & - (FAM10H_MMIO_CONF_BASE_MASK<> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & - FAM10H_MMIO_CONF_BUSRANGE_MASK; + FAM10H_MMIO_CONF_BUSRANGE_MASK; /* * only handle bus 0 ? * need to skip it */ - if (!busnbits) + if ( !busnbits ) return NULL; - if (busnbits > 8) { + if ( busnbits > 8 ) + { segnbits = busnbits - 8; busnbits = 8; } pci_mmcfg_config_num = (1 << segnbits); - pci_mmcfg_config = xmalloc_array(struct acpi_mcfg_allocation, - pci_mmcfg_config_num); - if (!pci_mmcfg_config) + pci_mmcfg_config = + xmalloc_array(struct acpi_mcfg_allocation, pci_mmcfg_config_num); + if ( !pci_mmcfg_config ) return NULL; - for (i = 0; i < (1 << segnbits); i++) { + for ( i = 0; i < (1 << segnbits); i++ ) + { pci_mmcfg_config[i].address = base + ((unsigned long)i << 28); pci_mmcfg_config[i].pci_segment = i; pci_mmcfg_config[i].start_bus_number = 0; @@ -194,22 +199,23 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void) static bool_t __initdata mcp55_checked; int bus, i; - static const u32 extcfg_regnum = 0x90; + static const u32 extcfg_regnum = 0x90; static const u32 extcfg_enable_mask = 1u << 31; - static const u32 extcfg_start_mask = 0xffu << 16; + static const u32 extcfg_start_mask = 0xffu << 16; static const int extcfg_start_shift = 16; - static const u32 extcfg_size_mask = 3u << 28; - static const int extcfg_size_shift = 28; - static const int extcfg_sizebus[] = {0xff, 0x7f, 0x3f, 0x1f}; + static const u32 extcfg_size_mask = 3u << 28; + static const int extcfg_size_shift = 28; + static const int extcfg_sizebus[] = {0xff, 0x7f, 0x3f, 0x1f}; static const u32 extcfg_base_mask[] = {0x7ff8, 0x7ffc, 0x7ffe, 0x7fff}; static const int extcfg_base_lshift = 25; /* check if amd fam10h already took over */ - if (!acpi_disabled || pci_mmcfg_config_num || mcp55_checked) + if ( !acpi_disabled || pci_mmcfg_config_num || mcp55_checked ) return NULL; mcp55_checked = 1; - for (i = bus = 0; bus < 256; bus++) { + for ( i = bus = 0; bus < 256; bus++ ) + { u32 l, extcfg; u16 vendor, device; @@ -217,23 +223,24 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void) vendor = l & 0xffff; device = (l >> 16) & 0xffff; - if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device) + if ( PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device ) continue; extcfg = pci_conf_read32(0, bus, 0, 0, extcfg_regnum); - if (extcfg & extcfg_enable_mask) + if ( extcfg & extcfg_enable_mask ) i++; } - if (!i) + if ( !i ) return NULL; pci_mmcfg_config_num = i; - pci_mmcfg_config = xmalloc_array(struct acpi_mcfg_allocation, - pci_mmcfg_config_num); + pci_mmcfg_config = + xmalloc_array(struct acpi_mcfg_allocation, pci_mmcfg_config_num); - for (i = bus = 0; bus < 256; bus++) { + for ( i = bus = 0; bus < 256; bus++ ) + { u64 base; u32 l, extcfg; u16 vendor, device; @@ -243,15 +250,15 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void) vendor = l & 0xffff; device = (l >> 16) & 0xffff; - if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device) + if ( PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device ) continue; extcfg = pci_conf_read32(0, bus, 0, 0, extcfg_regnum); - if (!(extcfg & extcfg_enable_mask)) + if ( !(extcfg & extcfg_enable_mask) ) continue; - if (i >= pci_mmcfg_config_num) + if ( i >= pci_mmcfg_config_num ) break; size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift; @@ -266,7 +273,7 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void) i++; } - if (bus == 256) + if ( bus == 256 ) return "nVidia MCP55"; pci_mmcfg_config_num = 0; @@ -276,7 +283,8 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void) return NULL; } -struct pci_mmcfg_hostbridge_probe { +struct pci_mmcfg_hostbridge_probe +{ u32 bus; u32 devfn; u32 vendor; @@ -285,16 +293,13 @@ struct pci_mmcfg_hostbridge_probe { }; static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = { - { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 }, - { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_82945G_HB, pci_mmcfg_intel_945 }, - { 0, PCI_DEVFN(0x18, 0), PCI_VENDOR_ID_AMD, - 0x1200, pci_mmcfg_amd_fam10h }, - { 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD, - 0x1200, pci_mmcfg_amd_fam10h }, - { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_NVIDIA, - 0x0369, pci_mmcfg_nvidia_mcp55 }, + {0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, + pci_mmcfg_e7520}, + {0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82945G_HB, + pci_mmcfg_intel_945}, + {0, PCI_DEVFN(0x18, 0), PCI_VENDOR_ID_AMD, 0x1200, pci_mmcfg_amd_fam10h}, + {0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD, 0x1200, pci_mmcfg_amd_fam10h}, + {0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_NVIDIA, 0x0369, pci_mmcfg_nvidia_mcp55}, }; static int __init pci_mmcfg_check_hostbridge(void) @@ -309,48 +314,52 @@ static int __init pci_mmcfg_check_hostbridge(void) pci_mmcfg_config = NULL; name = NULL; - for (i = 0; !name && i < ARRAY_SIZE(pci_mmcfg_probes); i++) { - bus = pci_mmcfg_probes[i].bus; + for ( i = 0; !name && i < ARRAY_SIZE(pci_mmcfg_probes); i++ ) + { + bus = pci_mmcfg_probes[i].bus; devfn = pci_mmcfg_probes[i].devfn; l = pci_conf_read32(0, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 0); vendor = l & 0xffff; device = (l >> 16) & 0xffff; - if (pci_mmcfg_probes[i].vendor == vendor && - pci_mmcfg_probes[i].device == device) + if ( pci_mmcfg_probes[i].vendor == vendor && + pci_mmcfg_probes[i].device == device ) name = pci_mmcfg_probes[i].probe(); } - if (name) { - printk(KERN_INFO "PCI: Found %s %s MMCONFIG support.\n", - name, pci_mmcfg_config_num ? "with" : "without"); + if ( name ) + { + printk(KERN_INFO "PCI: Found %s %s MMCONFIG support.\n", name, + pci_mmcfg_config_num ? "with" : "without"); } return name != NULL; } -static int __init is_mmconf_reserved( - u64 addr, u64 size, int i, - typeof(pci_mmcfg_config[0]) *cfg) +static int __init is_mmconf_reserved(u64 addr, u64 size, int i, + typeof(pci_mmcfg_config[0]) *cfg) { u64 old_size = size; int valid = 0; - while (!e820_all_mapped(addr, addr + size - 1, E820_RESERVED)) { + while ( !e820_all_mapped(addr, addr + size - 1, E820_RESERVED) ) + { size >>= 1; - if (size < (16UL<<20)) + if ( size < (16UL << 20) ) break; } - if (size >= (16UL<<20) || size == old_size) { + if ( size >= (16UL << 20) || size == old_size ) + { printk(KERN_NOTICE "PCI: MCFG area at %lx reserved in E820\n", addr); valid = 1; - if (old_size != size) { + if ( old_size != size ) + { /* update end_bus_number */ - cfg->end_bus_number = cfg->start_bus_number + ((size>>20) - 1); + cfg->end_bus_number = cfg->start_bus_number + ((size >> 20) - 1); printk(KERN_NOTICE "PCI: updated MCFG configuration %d: base %lx " - "segment %hu buses %u - %u\n", + "segment %hu buses %u - %u\n", i, (unsigned long)cfg->address, cfg->pci_segment, (unsigned int)cfg->start_bus_number, (unsigned int)cfg->end_bus_number); @@ -366,12 +375,12 @@ static bool_t __init pci_mmcfg_reject_broken(void) int i; bool_t valid = 1; - if ((pci_mmcfg_config_num == 0) || - (pci_mmcfg_config == NULL) || - (pci_mmcfg_config[0].address == 0)) + if ( (pci_mmcfg_config_num == 0) || (pci_mmcfg_config == NULL) || + (pci_mmcfg_config[0].address == 0) ) return 0; - for (i = 0; i < pci_mmcfg_config_num; i++) { + for ( i = 0; i < pci_mmcfg_config_num; i++ ) + { u64 addr, size; cfg = &pci_mmcfg_config[i]; @@ -381,13 +390,14 @@ static bool_t __init pci_mmcfg_reject_broken(void) size = cfg->end_bus_number + 1 - cfg->start_bus_number; size <<= 20; printk(KERN_NOTICE "PCI: MCFG configuration %d: base %lx " - "segment %04x buses %02x - %02x\n", + "segment %04x buses %02x - %02x\n", i, (unsigned long)cfg->address, cfg->pci_segment, (unsigned int)cfg->start_bus_number, (unsigned int)cfg->end_bus_number); - if (!is_mmconf_reserved(addr, size, i, cfg) || - pci_mmcfg_arch_enable(i)) { + if ( !is_mmconf_reserved(addr, size, i, cfg) || + pci_mmcfg_arch_enable(i) ) + { pci_mmcfg_arch_disable(i); valid = 0; } @@ -403,32 +413,34 @@ void __init acpi_mmcfg_init(void) pci_segments_init(); /* MMCONFIG disabled */ - if ((pci_probe & PCI_PROBE_MMCONF) == 0) + if ( (pci_probe & PCI_PROBE_MMCONF) == 0 ) return; /* MMCONFIG already enabled */ - if (!(pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF)) + if ( !(pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF) ) return; - if (pci_mmcfg_check_hostbridge()) { + if ( pci_mmcfg_check_hostbridge() ) + { unsigned int i; pci_mmcfg_arch_init(); - for (i = 0; i < pci_mmcfg_config_num; ++i) - if (pci_mmcfg_arch_enable(i)) + for ( i = 0; i < pci_mmcfg_config_num; ++i ) + if ( pci_mmcfg_arch_enable(i) ) valid = 0; - } else { + } + else + { acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg); pci_mmcfg_arch_init(); valid = pci_mmcfg_reject_broken(); } - if ((pci_mmcfg_config_num == 0) || - (pci_mmcfg_config == NULL) || - (pci_mmcfg_config[0].address == 0)) + if ( (pci_mmcfg_config_num == 0) || (pci_mmcfg_config == NULL) || + (pci_mmcfg_config[0].address == 0) ) return; - if (valid) + if ( valid ) pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; } @@ -438,16 +450,19 @@ int pci_mmcfg_reserved(uint64_t address, unsigned int segment, { unsigned int i; - if (flags & ~XEN_PCI_MMCFG_RESERVED) + if ( flags & ~XEN_PCI_MMCFG_RESERVED ) return -EINVAL; - for (i = 0; i < pci_mmcfg_config_num; ++i) { + for ( i = 0; i < pci_mmcfg_config_num; ++i ) + { const typeof(pci_mmcfg_config[0]) *cfg = &pci_mmcfg_config[i]; - if (cfg->pci_segment == segment && - cfg->start_bus_number == start_bus && - cfg->end_bus_number == end_bus) { - if (cfg->address != address) { + if ( cfg->pci_segment == segment && + cfg->start_bus_number == start_bus && + cfg->end_bus_number == end_bus ) + { + if ( cfg->address != address ) + { printk(KERN_WARNING "Base address presented for segment %04x bus %02x-%02x" " (%08" PRIx64 ") does not match previously obtained" @@ -455,7 +470,7 @@ int pci_mmcfg_reserved(uint64_t address, unsigned int segment, segment, start_bus, end_bus, address, cfg->address); return -EIO; } - if (flags & XEN_PCI_MMCFG_RESERVED) + if ( flags & XEN_PCI_MMCFG_RESERVED ) return pci_mmcfg_arch_enable(i); pci_mmcfg_arch_disable(i); return 0; diff --git a/xen/arch/x86/x86_64/mmconfig_64.c b/xen/arch/x86/x86_64/mmconfig_64.c index 2b3085931e..7c0f36b5f8 100644 --- a/xen/arch/x86/x86_64/mmconfig_64.c +++ b/xen/arch/x86/x86_64/mmconfig_64.c @@ -19,7 +19,8 @@ #include "mmconfig.h" /* Static virtual mapping of the MMCONFIG aperture */ -struct mmcfg_virt { +struct mmcfg_virt +{ struct acpi_mcfg_allocation *cfg; char __iomem *virt; }; @@ -31,11 +32,12 @@ static char __iomem *get_virt(unsigned int seg, unsigned int *bus) struct acpi_mcfg_allocation *cfg; int cfg_num; - for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) { + for ( cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++ ) + { cfg = pci_mmcfg_virt[cfg_num].cfg; - if (cfg->pci_segment == seg && - (cfg->start_bus_number <= *bus) && - (cfg->end_bus_number >= *bus)) { + if ( cfg->pci_segment == seg && (cfg->start_bus_number <= *bus) && + (cfg->end_bus_number >= *bus) ) + { *bus -= cfg->start_bus_number; return pci_mmcfg_virt[cfg_num].virt; } @@ -45,32 +47,36 @@ static char __iomem *get_virt(unsigned int seg, unsigned int *bus) return NULL; } -static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) +static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, + unsigned int devfn) { char __iomem *addr; addr = get_virt(seg, &bus); - if (!addr) + if ( !addr ) return NULL; - return addr + ((bus << 20) | (devfn << 12)); + return addr + ((bus << 20) | (devfn << 12)); } -int pci_mmcfg_read(unsigned int seg, unsigned int bus, - unsigned int devfn, int reg, int len, u32 *value) +int pci_mmcfg_read(unsigned int seg, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *value) { char __iomem *addr; /* Why do we have this when nobody checks it. How about a BUG()!? -AK */ - if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) { -err: *value = -1; + if ( unlikely((bus > 255) || (devfn > 255) || (reg > 4095)) ) + { + err: + *value = -1; return -EINVAL; } addr = pci_dev_base(seg, bus, devfn); - if (!addr) + if ( !addr ) goto err; - switch (len) { + switch (len) + { case 1: *value = mmio_config_readb(addr + reg); break; @@ -85,20 +91,21 @@ err: *value = -1; return 0; } -int pci_mmcfg_write(unsigned int seg, unsigned int bus, - unsigned int devfn, int reg, int len, u32 value) +int pci_mmcfg_write(unsigned int seg, unsigned int bus, unsigned int devfn, + int reg, int len, u32 value) { char __iomem *addr; /* Why do we have this when nobody checks it. How about a BUG()!? -AK */ - if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) + if ( unlikely((bus > 255) || (devfn > 255) || (reg > 4095)) ) return -EINVAL; addr = pci_dev_base(seg, bus, devfn); - if (!addr) + if ( !addr ) return -EINVAL; - switch (len) { + switch (len) + { case 1: mmio_config_writeb(addr + reg, value); break; @@ -121,16 +128,16 @@ static void __iomem *mcfg_ioremap(const struct acpi_mcfg_allocation *cfg, virt = PCI_MCFG_VIRT_START + (idx << mmcfg_pci_segment_shift) + (cfg->start_bus_number << 20); size = (cfg->end_bus_number - cfg->start_bus_number + 1) << 20; - if (virt + size < virt || virt + size > PCI_MCFG_VIRT_END) + if ( virt + size < virt || virt + size > PCI_MCFG_VIRT_END ) return NULL; - if (map_pages_to_xen(virt, - mfn_add(maddr_to_mfn(cfg->address), - (cfg->start_bus_number << (20 - PAGE_SHIFT))), - PFN_DOWN(size), prot)) + if ( map_pages_to_xen(virt, + mfn_add(maddr_to_mfn(cfg->address), + (cfg->start_bus_number << (20 - PAGE_SHIFT))), + PFN_DOWN(size), prot) ) return NULL; - return (void __iomem *) virt; + return (void __iomem *)virt; } int pci_mmcfg_arch_enable(unsigned int idx) @@ -138,10 +145,11 @@ int pci_mmcfg_arch_enable(unsigned int idx) const typeof(pci_mmcfg_config[0]) *cfg = pci_mmcfg_virt[idx].cfg; unsigned long start_mfn, end_mfn; - if (pci_mmcfg_virt[idx].virt) + if ( pci_mmcfg_virt[idx].virt ) return 0; pci_mmcfg_virt[idx].virt = mcfg_ioremap(cfg, idx, PAGE_HYPERVISOR_UC); - if (!pci_mmcfg_virt[idx].virt) { + if ( !pci_mmcfg_virt[idx].virt ) + { printk(KERN_ERR "PCI: Cannot map MCFG aperture for segment %04x\n", cfg->pci_segment); return -ENOMEM; @@ -175,19 +183,20 @@ void pci_mmcfg_arch_disable(unsigned int idx) cfg->pci_segment, cfg->start_bus_number, cfg->end_bus_number); } -bool_t pci_mmcfg_decode(unsigned long mfn, unsigned int *seg, - unsigned int *bdf) +bool_t pci_mmcfg_decode(unsigned long mfn, unsigned int *seg, unsigned int *bdf) { unsigned int idx; - for (idx = 0; idx < pci_mmcfg_config_num; ++idx) { + for ( idx = 0; idx < pci_mmcfg_config_num; ++idx ) + { const struct acpi_mcfg_allocation *cfg = pci_mmcfg_virt[idx].cfg; - if (pci_mmcfg_virt[idx].virt && - mfn >= PFN_DOWN(cfg->address) + PCI_BDF(cfg->start_bus_number, - 0, 0) && - mfn <= PFN_DOWN(cfg->address) + PCI_BDF(cfg->end_bus_number, - ~0, ~0)) { + if ( pci_mmcfg_virt[idx].virt && + mfn >= PFN_DOWN(cfg->address) + + PCI_BDF(cfg->start_bus_number, 0, 0) && + mfn <= + PFN_DOWN(cfg->address) + PCI_BDF(cfg->end_bus_number, ~0, ~0) ) + { *seg = cfg->pci_segment; *bdf = mfn - PFN_DOWN(cfg->address); return 1; @@ -203,27 +212,29 @@ bool_t pci_ro_mmcfg_decode(unsigned long mfn, unsigned int *seg, const unsigned long *ro_map; return pci_mmcfg_decode(mfn, seg, bdf) && - ((ro_map = pci_get_ro_map(*seg)) == NULL || - !test_bit(*bdf, ro_map)); + ((ro_map = pci_get_ro_map(*seg)) == NULL || !test_bit(*bdf, ro_map)); } int __init pci_mmcfg_arch_init(void) { int i; - if (pci_mmcfg_virt) + if ( pci_mmcfg_virt ) return 0; pci_mmcfg_virt = xzalloc_array(struct mmcfg_virt, pci_mmcfg_config_num); - if (pci_mmcfg_virt == NULL) { - printk(KERN_ERR "PCI: Can not allocate memory for mmconfig structures\n"); + if ( pci_mmcfg_virt == NULL ) + { + printk(KERN_ERR + "PCI: Can not allocate memory for mmconfig structures\n"); pci_mmcfg_config_num = 0; return 0; } - for (i = 0; i < pci_mmcfg_config_num; ++i) { + for ( i = 0; i < pci_mmcfg_config_num; ++i ) + { pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; - while (pci_mmcfg_config[i].end_bus_number >> mmcfg_pci_segment_shift) + while ( pci_mmcfg_config[i].end_bus_number >> mmcfg_pci_segment_shift ) ++mmcfg_pci_segment_shift; } mmcfg_pci_segment_shift += 20; diff --git a/xen/arch/x86/x86_64/pci.c b/xen/arch/x86/x86_64/pci.c index 6e3f5cf203..6912a314c5 100644 --- a/xen/arch/x86/x86_64/pci.c +++ b/xen/arch/x86/x86_64/pci.c @@ -1,6 +1,6 @@ /****************************************************************************** * pci.c - * + * * Architecture-dependent PCI access functions. */ @@ -11,9 +11,8 @@ #define PCI_CONF_ADDRESS(bus, dev, func, reg) \ (0x80000000 | (bus << 16) | (dev << 11) | (func << 8) | (reg & ~3)) -uint8_t pci_conf_read8( - unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func, - unsigned int reg) +uint8_t pci_conf_read8(unsigned int seg, unsigned int bus, unsigned int dev, + unsigned int func, unsigned int reg) { u32 value; @@ -29,9 +28,8 @@ uint8_t pci_conf_read8( } } -uint16_t pci_conf_read16( - unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func, - unsigned int reg) +uint16_t pci_conf_read16(unsigned int seg, unsigned int bus, unsigned int dev, + unsigned int func, unsigned int reg) { u32 value; @@ -47,9 +45,8 @@ uint16_t pci_conf_read16( } } -uint32_t pci_conf_read32( - unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func, - unsigned int reg) +uint32_t pci_conf_read32(unsigned int seg, unsigned int bus, unsigned int dev, + unsigned int func, unsigned int reg) { u32 value; @@ -65,9 +62,8 @@ uint32_t pci_conf_read32( } } -void pci_conf_write8( - unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func, - unsigned int reg, uint8_t data) +void pci_conf_write8(unsigned int seg, unsigned int bus, unsigned int dev, + unsigned int func, unsigned int reg, uint8_t data) { if ( seg || reg > 255 ) pci_mmcfg_write(seg, bus, PCI_DEVFN(dev, func), reg, 1, data); @@ -78,9 +74,8 @@ void pci_conf_write8( } } -void pci_conf_write16( - unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func, - unsigned int reg, uint16_t data) +void pci_conf_write16(unsigned int seg, unsigned int bus, unsigned int dev, + unsigned int func, unsigned int reg, uint16_t data) { if ( seg || reg > 255 ) pci_mmcfg_write(seg, bus, PCI_DEVFN(dev, func), reg, 2, data); @@ -91,9 +86,8 @@ void pci_conf_write16( } } -void pci_conf_write32( - unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func, - unsigned int reg, uint32_t data) +void pci_conf_write32(unsigned int seg, unsigned int bus, unsigned int dev, + unsigned int func, unsigned int reg, uint32_t data) { if ( seg || reg > 255 ) pci_mmcfg_write(seg, bus, PCI_DEVFN(dev, func), reg, 4, data); diff --git a/xen/arch/x86/x86_64/physdev.c b/xen/arch/x86/x86_64/physdev.c index c5a00ea93f..34002937a7 100644 --- a/xen/arch/x86/x86_64/physdev.c +++ b/xen/arch/x86/x86_64/physdev.c @@ -13,18 +13,18 @@ asm(".file \"" __FILE__ "\""); #define do_physdev_op compat_physdev_op -#define physdev_apic compat_physdev_apic -#define physdev_apic_t physdev_apic_compat_t +#define physdev_apic compat_physdev_apic +#define physdev_apic_t physdev_apic_compat_t #define xen_physdev_eoi physdev_eoi CHECK_physdev_eoi; #undef xen_physdev_eoi -#define physdev_pirq_eoi_gmfn compat_physdev_pirq_eoi_gmfn -#define physdev_pirq_eoi_gmfn_t physdev_pirq_eoi_gmfn_compat_t +#define physdev_pirq_eoi_gmfn compat_physdev_pirq_eoi_gmfn +#define physdev_pirq_eoi_gmfn_t physdev_pirq_eoi_gmfn_compat_t -#define physdev_set_iobitmap compat_physdev_set_iobitmap -#define physdev_set_iobitmap_t physdev_set_iobitmap_compat_t +#define physdev_set_iobitmap compat_physdev_set_iobitmap +#define physdev_set_iobitmap_t physdev_set_iobitmap_compat_t #define xen_physdev_set_iopl physdev_set_iopl CHECK_physdev_set_iopl; @@ -38,7 +38,7 @@ CHECK_physdev_irq; CHECK_physdev_irq_status_query; #undef xen_physdev_irq_status_query -#define physdev_map_pirq_t physdev_map_pirq_compat_t +#define physdev_map_pirq_t physdev_map_pirq_compat_t #define xen_physdev_unmap_pirq physdev_unmap_pirq CHECK_physdev_unmap_pirq; @@ -73,13 +73,13 @@ CHECK_physdev_pci_device_add #undef xen_physdev_pci_device_add #define xen_physdev_pci_device physdev_pci_device -CHECK_physdev_pci_device + CHECK_physdev_pci_device #undef xen_physdev_pci_device #define COMPAT #undef guest_handle_okay -#define guest_handle_okay compat_handle_okay -typedef int ret_t; +#define guest_handle_okay compat_handle_okay + typedef int ret_t; #include "../physdev.c" diff --git a/xen/arch/x86/x86_64/platform_hypercall.c b/xen/arch/x86/x86_64/platform_hypercall.c index 8fa2543a2d..56cf1c0a09 100644 --- a/xen/arch/x86/x86_64/platform_hypercall.c +++ b/xen/arch/x86/x86_64/platform_hypercall.c @@ -8,18 +8,18 @@ asm(".file \"" __FILE__ "\""); #include DEFINE_XEN_GUEST_HANDLE(compat_platform_op_t); -#define xen_platform_op compat_platform_op -#define xen_platform_op_t compat_platform_op_t -#define do_platform_op(x) compat_platform_op(_##x) +#define xen_platform_op compat_platform_op +#define xen_platform_op_t compat_platform_op_t +#define do_platform_op(x) compat_platform_op(_##x) -#define efi_get_info efi_compat_get_info +#define efi_get_info efi_compat_get_info #define efi_runtime_call(x) efi_compat_runtime_call(x) #define xen_processor_performance compat_processor_performance -#define set_px_pminfo compat_set_px_pminfo +#define set_px_pminfo compat_set_px_pminfo #define xen_processor_power compat_processor_power -#define set_cx_pminfo compat_set_cx_pminfo +#define set_cx_pminfo compat_set_cx_pminfo #define xen_pf_pcpuinfo xenpf_pcpuinfo CHECK_pf_pcpuinfo; diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c index bf7870ef6d..d8d3cdd002 100644 --- a/xen/arch/x86/x86_64/traps.c +++ b/xen/arch/x86/x86_64/traps.c @@ -24,7 +24,6 @@ #include #include - static void print_xen_info(void) { char taint_str[TAINT_STRING_MAX_LEN]; @@ -34,7 +33,12 @@ static void print_xen_info(void) debug_build() ? 'y' : 'n', print_tainted(taint_str)); } -enum context { CTXT_hypervisor, CTXT_pv_guest, CTXT_hvm_guest }; +enum context +{ + CTXT_hypervisor, + CTXT_pv_guest, + CTXT_hvm_guest +}; /* (ab)use crs[5..7] for fs/gs bases. */ static void read_registers(struct cpu_user_regs *regs, unsigned long crs[8]) @@ -52,15 +56,14 @@ static void read_registers(struct cpu_user_regs *regs, unsigned long crs[8]) crs[7] = rdgsshadow(); } -static void _show_registers( - const struct cpu_user_regs *regs, unsigned long crs[8], - enum context context, const struct vcpu *v) +static void _show_registers(const struct cpu_user_regs *regs, + unsigned long crs[8], enum context context, + const struct vcpu *v) { - static const char *const context_names[] = { - [CTXT_hypervisor] = "hypervisor", - [CTXT_pv_guest] = "pv guest", - [CTXT_hvm_guest] = "hvm guest" - }; + static const char *const context_names[] = {[CTXT_hypervisor] = + "hypervisor", + [CTXT_pv_guest] = "pv guest", + [CTXT_hvm_guest] = "hvm guest"}; printk("RIP: %04x:[<%016lx>]", regs->cs, regs->rip); if ( context == CTXT_hypervisor ) @@ -72,25 +75,23 @@ static void _show_registers( if ( v && !is_idle_vcpu(v) ) printk(" (%pv)", v); - printk("\nrax: %016lx rbx: %016lx rcx: %016lx\n", - regs->rax, regs->rbx, regs->rcx); - printk("rdx: %016lx rsi: %016lx rdi: %016lx\n", - regs->rdx, regs->rsi, regs->rdi); - printk("rbp: %016lx rsp: %016lx r8: %016lx\n", - regs->rbp, regs->rsp, regs->r8); - printk("r9: %016lx r10: %016lx r11: %016lx\n", - regs->r9, regs->r10, regs->r11); - printk("r12: %016lx r13: %016lx r14: %016lx\n", - regs->r12, regs->r13, regs->r14); - printk("r15: %016lx cr0: %016lx cr4: %016lx\n", - regs->r15, crs[0], crs[4]); + printk("\nrax: %016lx rbx: %016lx rcx: %016lx\n", regs->rax, regs->rbx, + regs->rcx); + printk("rdx: %016lx rsi: %016lx rdi: %016lx\n", regs->rdx, regs->rsi, + regs->rdi); + printk("rbp: %016lx rsp: %016lx r8: %016lx\n", regs->rbp, regs->rsp, + regs->r8); + printk("r9: %016lx r10: %016lx r11: %016lx\n", regs->r9, regs->r10, + regs->r11); + printk("r12: %016lx r13: %016lx r14: %016lx\n", regs->r12, regs->r13, + regs->r14); + printk("r15: %016lx cr0: %016lx cr4: %016lx\n", regs->r15, crs[0], + crs[4]); printk("cr3: %016lx cr2: %016lx\n", crs[3], crs[2]); - printk("fsb: %016lx gsb: %016lx gss: %016lx\n", - crs[5], crs[6], crs[7]); + printk("fsb: %016lx gsb: %016lx gss: %016lx\n", crs[5], crs[6], crs[7]); printk("ds: %04x es: %04x fs: %04x gs: %04x " "ss: %04x cs: %04x\n", - regs->ds, regs->es, regs->fs, - regs->gs, regs->ss, regs->cs); + regs->ds, regs->es, regs->fs, regs->gs, regs->ss, regs->cs); } void show_registers(const struct cpu_user_regs *regs) @@ -152,10 +153,10 @@ void show_registers(const struct cpu_user_regs *regs) rdmsrl(ler_msr + 1, to); /* Upper bits may store metadata. Re-canonicalise for printing. */ - printk("ler: from %016"PRIx64" [%ps]\n", - from, _p(canonicalise_addr(from))); - printk(" to %016"PRIx64" [%ps]\n", - to, _p(canonicalise_addr(to))); + printk("ler: from %016" PRIx64 " [%ps]\n", from, + _p(canonicalise_addr(from))); + printk(" to %016" PRIx64 " [%ps]\n", to, + _p(canonicalise_addr(to))); } } @@ -171,9 +172,8 @@ void vcpu_show_registers(const struct vcpu *v) crs[0] = v->arch.pv.ctrlreg[0]; crs[2] = arch_get_cr2(v); - crs[3] = pagetable_get_paddr(kernel ? - v->arch.guest_table : - v->arch.guest_table_user); + crs[3] = pagetable_get_paddr(kernel ? v->arch.guest_table + : v->arch.guest_table_user); crs[4] = v->arch.pv.ctrlreg[4]; crs[5] = v->arch.pv.fs_base; crs[6 + !kernel] = v->arch.pv.gs_base_kernel; @@ -198,50 +198,51 @@ void show_page_walk(unsigned long addr) l4e = l4t[l4_table_offset(addr)]; unmap_domain_page(l4t); mfn = l4e_get_pfn(l4e); - pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ? - get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; - printk(" L4[0x%03lx] = %"PRIpte" %016lx\n", - l4_table_offset(addr), l4e_get_intpte(l4e), pfn); - if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) || - !mfn_valid(_mfn(mfn)) ) + pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid + ? get_gpfn_from_mfn(mfn) + : INVALID_M2P_ENTRY; + printk(" L4[0x%03lx] = %" PRIpte " %016lx\n", l4_table_offset(addr), + l4e_get_intpte(l4e), pfn); + if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) || !mfn_valid(_mfn(mfn)) ) return; l3t = map_domain_page(_mfn(mfn)); l3e = l3t[l3_table_offset(addr)]; unmap_domain_page(l3t); mfn = l3e_get_pfn(l3e); - pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ? - get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; - printk(" L3[0x%03lx] = %"PRIpte" %016lx%s\n", - l3_table_offset(addr), l3e_get_intpte(l3e), pfn, + pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid + ? get_gpfn_from_mfn(mfn) + : INVALID_M2P_ENTRY; + printk(" L3[0x%03lx] = %" PRIpte " %016lx%s\n", l3_table_offset(addr), + l3e_get_intpte(l3e), pfn, (l3e_get_flags(l3e) & _PAGE_PSE) ? " (PSE)" : ""); if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || - (l3e_get_flags(l3e) & _PAGE_PSE) || - !mfn_valid(_mfn(mfn)) ) + (l3e_get_flags(l3e) & _PAGE_PSE) || !mfn_valid(_mfn(mfn)) ) return; l2t = map_domain_page(_mfn(mfn)); l2e = l2t[l2_table_offset(addr)]; unmap_domain_page(l2t); mfn = l2e_get_pfn(l2e); - pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ? - get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; - printk(" L2[0x%03lx] = %"PRIpte" %016lx%s\n", - l2_table_offset(addr), l2e_get_intpte(l2e), pfn, + pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid + ? get_gpfn_from_mfn(mfn) + : INVALID_M2P_ENTRY; + printk(" L2[0x%03lx] = %" PRIpte " %016lx%s\n", l2_table_offset(addr), + l2e_get_intpte(l2e), pfn, (l2e_get_flags(l2e) & _PAGE_PSE) ? " (PSE)" : ""); if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || - (l2e_get_flags(l2e) & _PAGE_PSE) || - !mfn_valid(_mfn(mfn)) ) + (l2e_get_flags(l2e) & _PAGE_PSE) || !mfn_valid(_mfn(mfn)) ) return; l1t = map_domain_page(_mfn(mfn)); l1e = l1t[l1_table_offset(addr)]; unmap_domain_page(l1t); mfn = l1e_get_pfn(l1e); - pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ? - get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; - printk(" L1[0x%03lx] = %"PRIpte" %016lx\n", - l1_table_offset(addr), l1e_get_intpte(l1e), pfn); + pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid + ? get_gpfn_from_mfn(mfn) + : INVALID_M2P_ENTRY; + printk(" L1[0x%03lx] = %" PRIpte " %016lx\n", l1_table_offset(addr), + l1e_get_intpte(l1e), pfn); } void do_double_fault(struct cpu_user_regs *regs) @@ -251,7 +252,7 @@ void do_double_fault(struct cpu_user_regs *regs) console_force_unlock(); - asm ( "lsll %1, %0" : "=r" (cpu) : "rm" (PER_CPU_GDT_ENTRY << 3) ); + asm("lsll %1, %0" : "=r"(cpu) : "rm"(PER_CPU_GDT_ENTRY << 3)); /* Find information saved during fault and dump it to the console. */ printk("*** DOUBLE FAULT ***\n"); @@ -267,9 +268,10 @@ void do_double_fault(struct cpu_user_regs *regs) panic("DOUBLE FAULT -- system shutdown\n"); } -static unsigned int write_stub_trampoline( - unsigned char *stub, unsigned long stub_va, - unsigned long stack_bottom, unsigned long target_va) +static unsigned int write_stub_trampoline(unsigned char *stub, + unsigned long stub_va, + unsigned long stack_bottom, + unsigned long target_va) { /* movabsq %rax, stack_bottom - 8 */ stub[0] = 0x48; @@ -329,9 +331,8 @@ void subarch_percpu_traps_init(void) * start of the stubs. */ wrmsrl(MSR_LSTAR, stub_va); - offset = write_stub_trampoline(stub_page + (stub_va & ~PAGE_MASK), - stub_va, stack_bottom, - (unsigned long)lstar_enter); + offset = write_stub_trampoline(stub_page + (stub_va & ~PAGE_MASK), stub_va, + stack_bottom, (unsigned long)lstar_enter); stub_va += offset; if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || @@ -341,15 +342,14 @@ void subarch_percpu_traps_init(void) wrmsrl(MSR_IA32_SYSENTER_ESP, stack_bottom); wrmsrl(MSR_IA32_SYSENTER_EIP, IS_ENABLED(CONFIG_PV) ? (unsigned long)sysenter_entry : 0); - wrmsr(MSR_IA32_SYSENTER_CS, - IS_ENABLED(CONFIG_PV) ? __HYPERVISOR_CS : 0, 0); + wrmsr(MSR_IA32_SYSENTER_CS, IS_ENABLED(CONFIG_PV) ? __HYPERVISOR_CS : 0, + 0); } /* Trampoline for SYSCALL entry from compatibility mode. */ wrmsrl(MSR_CSTAR, stub_va); - offset += write_stub_trampoline(stub_page + (stub_va & ~PAGE_MASK), - stub_va, stack_bottom, - (unsigned long)cstar_enter); + offset += write_stub_trampoline(stub_page + (stub_va & ~PAGE_MASK), stub_va, + stack_bottom, (unsigned long)cstar_enter); /* Don't consume more than half of the stub space here. */ ASSERT(offset <= STUB_BUF_SIZE / 2); diff --git a/xen/arch/x86/x86_emulate.c b/xen/arch/x86/x86_emulate.c index b1dfc9f261..de9c508ce0 100644 --- a/xen/arch/x86/x86_emulate.c +++ b/xen/arch/x86/x86_emulate.c @@ -1,10 +1,10 @@ /****************************************************************************** * x86_emulate.c - * + * * Wrapper for generic x86 instruction decoder and emulator. - * + * * Copyright (c) 2008, Citrix Systems, Inc. - * + * * Authors: * Keir Fraser */ @@ -22,32 +22,35 @@ #undef cpuid #undef wbinvd -#define r(name) r ## name +#define r(name) r##name #define cpu_has_amd_erratum(nr) \ - cpu_has_amd_erratum(¤t_cpu_data, AMD_ERRATUM_##nr) - -#define get_stub(stb) ({ \ - BUILD_BUG_ON(STUB_BUF_SIZE / 2 < MAX_INST_LEN + 1); \ - ASSERT(!(stb).ptr); \ - (stb).addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2; \ - memset(((stb).ptr = map_domain_page(_mfn(this_cpu(stubs.mfn)))) + \ - ((stb).addr & ~PAGE_MASK), 0xcc, STUB_BUF_SIZE / 2); \ -}) -#define put_stub(stb) ({ \ - if ( (stb).ptr ) \ - { \ - unmap_domain_page((stb).ptr); \ - (stb).ptr = NULL; \ - } \ -}) + cpu_has_amd_erratum(¤t_cpu_data, AMD_ERRATUM_##nr) + +#define get_stub(stb) \ + ({ \ + BUILD_BUG_ON(STUB_BUF_SIZE / 2 < MAX_INST_LEN + 1); \ + ASSERT(!(stb).ptr); \ + (stb).addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2; \ + memset(((stb).ptr = map_domain_page(_mfn(this_cpu(stubs.mfn)))) + \ + ((stb).addr & ~PAGE_MASK), \ + 0xcc, STUB_BUF_SIZE / 2); \ + }) +#define put_stub(stb) \ + ({ \ + if ( (stb).ptr ) \ + { \ + unmap_domain_page((stb).ptr); \ + (stb).ptr = NULL; \ + } \ + }) #include "x86_emulate/x86_emulate.c" int x86emul_read_xcr(unsigned int reg, uint64_t *val, struct x86_emulate_ctxt *ctxt) { - switch ( reg ) + switch (reg) { case 0: *val = current->arch.xcr0; @@ -71,7 +74,7 @@ int x86emul_read_xcr(unsigned int reg, uint64_t *val, int x86emul_write_xcr(unsigned int reg, uint64_t val, struct x86_emulate_ctxt *ctxt) { - switch ( reg ) + switch (reg) { case 0: break; @@ -99,7 +102,7 @@ int x86emul_read_dr(unsigned int reg, unsigned long *val, /* HVM support requires a bit more plumbing before it will work. */ ASSERT(is_pv_vcpu(curr)); - switch ( reg ) + switch (reg) { case 0 ... 3: *val = curr->arch.dr[reg]; @@ -142,7 +145,7 @@ int x86emul_write_dr(unsigned int reg, unsigned long val, /* HVM support requires a bit more plumbing before it will work. */ ASSERT(is_pv_vcpu(curr)); - switch ( set_debugreg(curr, reg, val) ) + switch (set_debugreg(curr, reg, val)) { case 0: return X86EMUL_OKAY; diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c index e3b7e8c396..99eaba312d 100644 --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -21,170 +21,172 @@ */ /* Operand sizes: 8-bit operands or specified/overridden size. */ -#define ByteOp (1<<0) /* 8-bit operands. */ +#define ByteOp (1 << 0) /* 8-bit operands. */ /* Destination operand type. */ -#define DstNone (0<<1) /* No destination operand. */ -#define DstImplicit (0<<1) /* Destination operand is implicit in the opcode. */ -#define DstBitBase (1<<1) /* Memory operand, bit string. */ -#define DstReg (2<<1) /* Register operand. */ -#define DstEax DstReg /* Register EAX (aka DstReg with no ModRM) */ -#define DstMem (3<<1) /* Memory operand. */ -#define DstMask (3<<1) +#define DstNone (0 << 1) /* No destination operand. */ +#define DstImplicit \ + (0 << 1) /* Destination operand is implicit in the opcode. */ +#define DstBitBase (1 << 1) /* Memory operand, bit string. */ +#define DstReg (2 << 1) /* Register operand. */ +#define DstEax DstReg /* Register EAX (aka DstReg with no ModRM) */ +#define DstMem (3 << 1) /* Memory operand. */ +#define DstMask (3 << 1) /* Source operand type. */ -#define SrcNone (0<<3) /* No source operand. */ -#define SrcImplicit (0<<3) /* Source operand is implicit in the opcode. */ -#define SrcReg (1<<3) /* Register operand. */ -#define SrcEax SrcReg /* Register EAX (aka SrcReg with no ModRM) */ -#define SrcMem (2<<3) /* Memory operand. */ -#define SrcMem16 (3<<3) /* Memory operand (16-bit). */ -#define SrcImm (4<<3) /* Immediate operand. */ -#define SrcImmByte (5<<3) /* 8-bit sign-extended immediate operand. */ -#define SrcImm16 (6<<3) /* 16-bit zero-extended immediate operand. */ -#define SrcMask (7<<3) +#define SrcNone (0 << 3) /* No source operand. */ +#define SrcImplicit (0 << 3) /* Source operand is implicit in the opcode. */ +#define SrcReg (1 << 3) /* Register operand. */ +#define SrcEax SrcReg /* Register EAX (aka SrcReg with no ModRM) */ +#define SrcMem (2 << 3) /* Memory operand. */ +#define SrcMem16 (3 << 3) /* Memory operand (16-bit). */ +#define SrcImm (4 << 3) /* Immediate operand. */ +#define SrcImmByte (5 << 3) /* 8-bit sign-extended immediate operand. */ +#define SrcImm16 (6 << 3) /* 16-bit zero-extended immediate operand. */ +#define SrcMask (7 << 3) /* Generic ModRM decode. */ -#define ModRM (1<<6) +#define ModRM (1 << 6) /* vSIB addressing mode (0f38 extension opcodes only), aliasing ModRM. */ -#define vSIB (1<<6) +#define vSIB (1 << 6) /* Destination is only written; never read. */ -#define Mov (1<<7) +#define Mov (1 << 7) /* VEX/EVEX (SIMD only): 2nd source operand unused (must be all ones) */ -#define TwoOp Mov +#define TwoOp Mov /* All operands are implicit in the opcode. */ -#define ImplicitOps (DstImplicit|SrcImplicit) +#define ImplicitOps (DstImplicit | SrcImplicit) typedef uint8_t opcode_desc_t; static const opcode_desc_t opcode_table[256] = { /* 0x00 - 0x07 */ - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, - ByteOp|DstEax|SrcImm, DstEax|SrcImm, ImplicitOps|Mov, ImplicitOps|Mov, + ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, + ByteOp | DstEax | SrcImm, DstEax | SrcImm, ImplicitOps | Mov, + ImplicitOps | Mov, /* 0x08 - 0x0F */ - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, - ByteOp|DstEax|SrcImm, DstEax|SrcImm, ImplicitOps|Mov, 0, + ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, + ByteOp | DstEax | SrcImm, DstEax | SrcImm, ImplicitOps | Mov, 0, /* 0x10 - 0x17 */ - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, - ByteOp|DstEax|SrcImm, DstEax|SrcImm, ImplicitOps|Mov, ImplicitOps|Mov, + ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, + ByteOp | DstEax | SrcImm, DstEax | SrcImm, ImplicitOps | Mov, + ImplicitOps | Mov, /* 0x18 - 0x1F */ - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, - ByteOp|DstEax|SrcImm, DstEax|SrcImm, ImplicitOps|Mov, ImplicitOps|Mov, + ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, + ByteOp | DstEax | SrcImm, DstEax | SrcImm, ImplicitOps | Mov, + ImplicitOps | Mov, /* 0x20 - 0x27 */ - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, - ByteOp|DstEax|SrcImm, DstEax|SrcImm, 0, ImplicitOps, + ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, + ByteOp | DstEax | SrcImm, DstEax | SrcImm, 0, ImplicitOps, /* 0x28 - 0x2F */ - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, - ByteOp|DstEax|SrcImm, DstEax|SrcImm, 0, ImplicitOps, + ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, + ByteOp | DstEax | SrcImm, DstEax | SrcImm, 0, ImplicitOps, /* 0x30 - 0x37 */ - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, - ByteOp|DstEax|SrcImm, DstEax|SrcImm, 0, ImplicitOps, + ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, + ByteOp | DstEax | SrcImm, DstEax | SrcImm, 0, ImplicitOps, /* 0x38 - 0x3F */ - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, - ByteOp|DstEax|SrcImm, DstEax|SrcImm, 0, ImplicitOps, + ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, + ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, + ByteOp | DstEax | SrcImm, DstEax | SrcImm, 0, ImplicitOps, /* 0x40 - 0x4F */ - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, + ImplicitOps, /* 0x50 - 0x5F */ - ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, - ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, - ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, - ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, + ImplicitOps | Mov, ImplicitOps | Mov, ImplicitOps | Mov, ImplicitOps | Mov, + ImplicitOps | Mov, ImplicitOps | Mov, ImplicitOps | Mov, ImplicitOps | Mov, + ImplicitOps | Mov, ImplicitOps | Mov, ImplicitOps | Mov, ImplicitOps | Mov, + ImplicitOps | Mov, ImplicitOps | Mov, ImplicitOps | Mov, ImplicitOps | Mov, /* 0x60 - 0x67 */ - ImplicitOps, ImplicitOps, DstReg|SrcMem|ModRM, DstReg|SrcNone|ModRM|Mov, - 0, 0, 0, 0, + ImplicitOps, ImplicitOps, DstReg | SrcMem | ModRM, + DstReg | SrcNone | ModRM | Mov, 0, 0, 0, 0, /* 0x68 - 0x6F */ - DstImplicit|SrcImm|Mov, DstReg|SrcImm|ModRM|Mov, - DstImplicit|SrcImmByte|Mov, DstReg|SrcImmByte|ModRM|Mov, - ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, + DstImplicit | SrcImm | Mov, DstReg | SrcImm | ModRM | Mov, + DstImplicit | SrcImmByte | Mov, DstReg | SrcImmByte | ModRM | Mov, + ImplicitOps | Mov, ImplicitOps | Mov, ImplicitOps | Mov, ImplicitOps | Mov, /* 0x70 - 0x77 */ - DstImplicit|SrcImmByte, DstImplicit|SrcImmByte, - DstImplicit|SrcImmByte, DstImplicit|SrcImmByte, - DstImplicit|SrcImmByte, DstImplicit|SrcImmByte, - DstImplicit|SrcImmByte, DstImplicit|SrcImmByte, + DstImplicit | SrcImmByte, DstImplicit | SrcImmByte, + DstImplicit | SrcImmByte, DstImplicit | SrcImmByte, + DstImplicit | SrcImmByte, DstImplicit | SrcImmByte, + DstImplicit | SrcImmByte, DstImplicit | SrcImmByte, /* 0x78 - 0x7F */ - DstImplicit|SrcImmByte, DstImplicit|SrcImmByte, - DstImplicit|SrcImmByte, DstImplicit|SrcImmByte, - DstImplicit|SrcImmByte, DstImplicit|SrcImmByte, - DstImplicit|SrcImmByte, DstImplicit|SrcImmByte, + DstImplicit | SrcImmByte, DstImplicit | SrcImmByte, + DstImplicit | SrcImmByte, DstImplicit | SrcImmByte, + DstImplicit | SrcImmByte, DstImplicit | SrcImmByte, + DstImplicit | SrcImmByte, DstImplicit | SrcImmByte, /* 0x80 - 0x87 */ - ByteOp|DstMem|SrcImm|ModRM, DstMem|SrcImm|ModRM, - ByteOp|DstMem|SrcImm|ModRM, DstMem|SrcImmByte|ModRM, - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, + ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM, + ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, + ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, + ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, /* 0x88 - 0x8F */ - ByteOp|DstMem|SrcReg|ModRM|Mov, DstMem|SrcReg|ModRM|Mov, - ByteOp|DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, - DstMem|SrcReg|ModRM|Mov, DstReg|SrcNone|ModRM, - DstReg|SrcMem16|ModRM|Mov, DstMem|SrcNone|ModRM|Mov, + ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov, + ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, + DstMem | SrcReg | ModRM | Mov, DstReg | SrcNone | ModRM, + DstReg | SrcMem16 | ModRM | Mov, DstMem | SrcNone | ModRM | Mov, /* 0x90 - 0x97 */ - DstImplicit|SrcEax, DstImplicit|SrcEax, - DstImplicit|SrcEax, DstImplicit|SrcEax, - DstImplicit|SrcEax, DstImplicit|SrcEax, - DstImplicit|SrcEax, DstImplicit|SrcEax, + DstImplicit | SrcEax, DstImplicit | SrcEax, DstImplicit | SrcEax, + DstImplicit | SrcEax, DstImplicit | SrcEax, DstImplicit | SrcEax, + DstImplicit | SrcEax, DstImplicit | SrcEax, /* 0x98 - 0x9F */ - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, - ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps, ImplicitOps, + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps | Mov, + ImplicitOps | Mov, ImplicitOps, ImplicitOps, /* 0xA0 - 0xA7 */ - ByteOp|DstEax|SrcMem|Mov, DstEax|SrcMem|Mov, - ByteOp|DstMem|SrcEax|Mov, DstMem|SrcEax|Mov, - ByteOp|ImplicitOps|Mov, ImplicitOps|Mov, - ByteOp|ImplicitOps, ImplicitOps, + ByteOp | DstEax | SrcMem | Mov, DstEax | SrcMem | Mov, + ByteOp | DstMem | SrcEax | Mov, DstMem | SrcEax | Mov, + ByteOp | ImplicitOps | Mov, ImplicitOps | Mov, ByteOp | ImplicitOps, + ImplicitOps, /* 0xA8 - 0xAF */ - ByteOp|DstEax|SrcImm, DstEax|SrcImm, - ByteOp|DstImplicit|SrcEax|Mov, DstImplicit|SrcEax|Mov, - ByteOp|DstEax|SrcImplicit|Mov, DstEax|SrcImplicit|Mov, - ByteOp|DstImplicit|SrcEax, DstImplicit|SrcEax, + ByteOp | DstEax | SrcImm, DstEax | SrcImm, + ByteOp | DstImplicit | SrcEax | Mov, DstImplicit | SrcEax | Mov, + ByteOp | DstEax | SrcImplicit | Mov, DstEax | SrcImplicit | Mov, + ByteOp | DstImplicit | SrcEax, DstImplicit | SrcEax, /* 0xB0 - 0xB7 */ - ByteOp|DstReg|SrcImm|Mov, ByteOp|DstReg|SrcImm|Mov, - ByteOp|DstReg|SrcImm|Mov, ByteOp|DstReg|SrcImm|Mov, - ByteOp|DstReg|SrcImm|Mov, ByteOp|DstReg|SrcImm|Mov, - ByteOp|DstReg|SrcImm|Mov, ByteOp|DstReg|SrcImm|Mov, + ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, + ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, + ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, + ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, /* 0xB8 - 0xBF */ - DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, - DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, + DstReg | SrcImm | Mov, DstReg | SrcImm | Mov, DstReg | SrcImm | Mov, + DstReg | SrcImm | Mov, DstReg | SrcImm | Mov, DstReg | SrcImm | Mov, + DstReg | SrcImm | Mov, DstReg | SrcImm | Mov, /* 0xC0 - 0xC7 */ - ByteOp|DstMem|SrcImm|ModRM, DstMem|SrcImmByte|ModRM, - DstImplicit|SrcImm16, ImplicitOps, - DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, - ByteOp|DstMem|SrcImm|ModRM|Mov, DstMem|SrcImm|ModRM|Mov, + ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, + DstImplicit | SrcImm16, ImplicitOps, DstReg | SrcMem | ModRM | Mov, + DstReg | SrcMem | ModRM | Mov, ByteOp | DstMem | SrcImm | ModRM | Mov, + DstMem | SrcImm | ModRM | Mov, /* 0xC8 - 0xCF */ - DstImplicit|SrcImm16, ImplicitOps, DstImplicit|SrcImm16, ImplicitOps, - ImplicitOps, DstImplicit|SrcImmByte, ImplicitOps, ImplicitOps, + DstImplicit | SrcImm16, ImplicitOps, DstImplicit | SrcImm16, ImplicitOps, + ImplicitOps, DstImplicit | SrcImmByte, ImplicitOps, ImplicitOps, /* 0xD0 - 0xD7 */ - ByteOp|DstMem|SrcImplicit|ModRM, DstMem|SrcImplicit|ModRM, - ByteOp|DstMem|SrcImplicit|ModRM, DstMem|SrcImplicit|ModRM, - DstImplicit|SrcImmByte, DstImplicit|SrcImmByte, ImplicitOps, ImplicitOps, + ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, + ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, + DstImplicit | SrcImmByte, DstImplicit | SrcImmByte, ImplicitOps, + ImplicitOps, /* 0xD8 - 0xDF */ - ImplicitOps|ModRM, ImplicitOps|ModRM|Mov, - ImplicitOps|ModRM, ImplicitOps|ModRM|Mov, - ImplicitOps|ModRM, ImplicitOps|ModRM|Mov, - DstImplicit|SrcMem16|ModRM, ImplicitOps|ModRM|Mov, + ImplicitOps | ModRM, ImplicitOps | ModRM | Mov, ImplicitOps | ModRM, + ImplicitOps | ModRM | Mov, ImplicitOps | ModRM, ImplicitOps | ModRM | Mov, + DstImplicit | SrcMem16 | ModRM, ImplicitOps | ModRM | Mov, /* 0xE0 - 0xE7 */ - DstImplicit|SrcImmByte, DstImplicit|SrcImmByte, - DstImplicit|SrcImmByte, DstImplicit|SrcImmByte, - DstEax|SrcImmByte, DstEax|SrcImmByte, - DstImplicit|SrcImmByte, DstImplicit|SrcImmByte, + DstImplicit | SrcImmByte, DstImplicit | SrcImmByte, + DstImplicit | SrcImmByte, DstImplicit | SrcImmByte, DstEax | SrcImmByte, + DstEax | SrcImmByte, DstImplicit | SrcImmByte, DstImplicit | SrcImmByte, /* 0xE8 - 0xEF */ - DstImplicit|SrcImm|Mov, DstImplicit|SrcImm, - ImplicitOps, DstImplicit|SrcImmByte, - DstEax|SrcImplicit, DstEax|SrcImplicit, ImplicitOps, ImplicitOps, + DstImplicit | SrcImm | Mov, DstImplicit | SrcImm, ImplicitOps, + DstImplicit | SrcImmByte, DstEax | SrcImplicit, DstEax | SrcImplicit, + ImplicitOps, ImplicitOps, /* 0xF0 - 0xF7 */ - 0, ImplicitOps, 0, 0, - ImplicitOps, ImplicitOps, ByteOp|ModRM, ModRM, + 0, ImplicitOps, 0, 0, ImplicitOps, ImplicitOps, ByteOp | ModRM, ModRM, /* 0xF8 - 0xFF */ - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, - ImplicitOps, ImplicitOps, ByteOp|DstMem|SrcNone|ModRM, DstMem|SrcNone|ModRM -}; + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, + ImplicitOps, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM}; -enum simd_opsize { +enum simd_opsize +{ simd_none, /* @@ -249,7 +251,8 @@ enum simd_opsize { }; typedef uint8_t simd_opsize_t; -enum disp8scale { +enum disp8scale +{ /* Values 0 ... 4 are explicit sizes. */ d8s_bw = 5, d8s_dq, @@ -266,124 +269,124 @@ enum disp8scale { }; typedef uint8_t disp8scale_t; -static const struct twobyte_table { +static const struct twobyte_table +{ opcode_desc_t desc; - simd_opsize_t size:4; - disp8scale_t d8s:4; + simd_opsize_t size : 4; + disp8scale_t d8s : 4; } twobyte_table[256] = { - [0x00] = { ModRM }, - [0x01] = { ImplicitOps|ModRM }, - [0x02] = { DstReg|SrcMem16|ModRM }, - [0x03] = { DstReg|SrcMem16|ModRM }, - [0x05] = { ImplicitOps }, - [0x06] = { ImplicitOps }, - [0x07] = { ImplicitOps }, - [0x08] = { ImplicitOps }, - [0x09] = { ImplicitOps }, - [0x0b] = { ImplicitOps }, - [0x0d] = { ImplicitOps|ModRM }, - [0x0e] = { ImplicitOps }, - [0x0f] = { ModRM|SrcImmByte }, - [0x10] = { DstImplicit|SrcMem|ModRM|Mov, simd_any_fp, d8s_vl }, - [0x11] = { DstMem|SrcImplicit|ModRM|Mov, simd_any_fp, d8s_vl }, - [0x12] = { DstImplicit|SrcMem|ModRM|Mov, simd_other }, - [0x13] = { DstMem|SrcImplicit|ModRM|Mov, simd_other }, - [0x14 ... 0x15] = { DstImplicit|SrcMem|ModRM, simd_packed_fp, d8s_vl }, - [0x16] = { DstImplicit|SrcMem|ModRM|Mov, simd_other }, - [0x17] = { DstMem|SrcImplicit|ModRM|Mov, simd_other }, - [0x18 ... 0x1f] = { ImplicitOps|ModRM }, - [0x20 ... 0x21] = { DstMem|SrcImplicit|ModRM }, - [0x22 ... 0x23] = { DstImplicit|SrcMem|ModRM }, - [0x28] = { DstImplicit|SrcMem|ModRM|Mov, simd_packed_fp, d8s_vl }, - [0x29] = { DstMem|SrcImplicit|ModRM|Mov, simd_packed_fp, d8s_vl }, - [0x2a] = { DstImplicit|SrcMem|ModRM|Mov, simd_other }, - [0x2b] = { DstMem|SrcImplicit|ModRM|Mov, simd_any_fp, d8s_vl }, - [0x2c ... 0x2d] = { DstImplicit|SrcMem|ModRM|Mov, simd_other }, - [0x2e ... 0x2f] = { ImplicitOps|ModRM|TwoOp, simd_none, d8s_dq }, - [0x30 ... 0x35] = { ImplicitOps }, - [0x37] = { ImplicitOps }, - [0x38] = { DstReg|SrcMem|ModRM }, - [0x3a] = { DstReg|SrcImmByte|ModRM }, - [0x40 ... 0x4f] = { DstReg|SrcMem|ModRM|Mov }, - [0x50] = { DstReg|SrcImplicit|ModRM|Mov }, - [0x51] = { DstImplicit|SrcMem|ModRM|TwoOp, simd_any_fp, d8s_vl }, - [0x52 ... 0x53] = { DstImplicit|SrcMem|ModRM|TwoOp, simd_single_fp }, - [0x54 ... 0x57] = { DstImplicit|SrcMem|ModRM, simd_packed_fp, d8s_vl }, - [0x58 ... 0x59] = { DstImplicit|SrcMem|ModRM, simd_any_fp, d8s_vl }, - [0x5a ... 0x5b] = { DstImplicit|SrcMem|ModRM|Mov, simd_other }, - [0x5c ... 0x5f] = { DstImplicit|SrcMem|ModRM, simd_any_fp, d8s_vl }, - [0x60 ... 0x62] = { DstImplicit|SrcMem|ModRM, simd_other }, - [0x63 ... 0x67] = { DstImplicit|SrcMem|ModRM, simd_packed_int, d8s_vl }, - [0x68 ... 0x6a] = { DstImplicit|SrcMem|ModRM, simd_other }, - [0x6b ... 0x6d] = { DstImplicit|SrcMem|ModRM, simd_packed_int }, - [0x6e] = { DstImplicit|SrcMem|ModRM|Mov, simd_none, d8s_dq64 }, - [0x6f] = { DstImplicit|SrcMem|ModRM|Mov, simd_packed_int, d8s_vl }, - [0x70] = { SrcImmByte|ModRM|TwoOp, simd_other }, - [0x71 ... 0x73] = { DstImplicit|SrcImmByte|ModRM, simd_none, d8s_vl }, - [0x74 ... 0x76] = { DstImplicit|SrcMem|ModRM, simd_packed_int, d8s_vl }, - [0x77] = { DstImplicit|SrcNone }, - [0x78] = { ImplicitOps|ModRM }, - [0x79] = { DstReg|SrcMem|ModRM, simd_packed_int }, - [0x7c ... 0x7d] = { DstImplicit|SrcMem|ModRM, simd_other }, - [0x7e] = { DstMem|SrcImplicit|ModRM|Mov, simd_none, d8s_dq64 }, - [0x7f] = { DstMem|SrcImplicit|ModRM|Mov, simd_packed_int, d8s_vl }, - [0x80 ... 0x8f] = { DstImplicit|SrcImm }, - [0x90 ... 0x9f] = { ByteOp|DstMem|SrcNone|ModRM|Mov }, - [0xa0 ... 0xa1] = { ImplicitOps|Mov }, - [0xa2] = { ImplicitOps }, - [0xa3] = { DstBitBase|SrcReg|ModRM }, - [0xa4] = { DstMem|SrcImmByte|ModRM }, - [0xa5] = { DstMem|SrcReg|ModRM }, - [0xa6 ... 0xa7] = { ModRM }, - [0xa8 ... 0xa9] = { ImplicitOps|Mov }, - [0xaa] = { ImplicitOps }, - [0xab] = { DstBitBase|SrcReg|ModRM }, - [0xac] = { DstMem|SrcImmByte|ModRM }, - [0xad] = { DstMem|SrcReg|ModRM }, - [0xae] = { ImplicitOps|ModRM }, - [0xaf] = { DstReg|SrcMem|ModRM }, - [0xb0] = { ByteOp|DstMem|SrcReg|ModRM }, - [0xb1] = { DstMem|SrcReg|ModRM }, - [0xb2] = { DstReg|SrcMem|ModRM|Mov }, - [0xb3] = { DstBitBase|SrcReg|ModRM }, - [0xb4 ... 0xb5] = { DstReg|SrcMem|ModRM|Mov }, - [0xb6] = { ByteOp|DstReg|SrcMem|ModRM|Mov }, - [0xb7] = { DstReg|SrcMem16|ModRM|Mov }, - [0xb8] = { DstReg|SrcMem|ModRM }, - [0xb9] = { ModRM }, - [0xba] = { DstBitBase|SrcImmByte|ModRM }, - [0xbb] = { DstBitBase|SrcReg|ModRM }, - [0xbc ... 0xbd] = { DstReg|SrcMem|ModRM }, - [0xbe] = { ByteOp|DstReg|SrcMem|ModRM|Mov }, - [0xbf] = { DstReg|SrcMem16|ModRM|Mov }, - [0xc0] = { ByteOp|DstMem|SrcReg|ModRM }, - [0xc1] = { DstMem|SrcReg|ModRM }, - [0xc2] = { DstImplicit|SrcImmByte|ModRM, simd_any_fp, d8s_vl }, - [0xc3] = { DstMem|SrcReg|ModRM|Mov }, - [0xc4] = { DstReg|SrcImmByte|ModRM, simd_packed_int }, - [0xc5] = { DstReg|SrcImmByte|ModRM|Mov }, - [0xc6] = { DstImplicit|SrcImmByte|ModRM, simd_packed_fp, d8s_vl }, - [0xc7] = { ImplicitOps|ModRM }, - [0xc8 ... 0xcf] = { ImplicitOps }, - [0xd0] = { DstImplicit|SrcMem|ModRM, simd_other }, - [0xd1 ... 0xd3] = { DstImplicit|SrcMem|ModRM, simd_128, 4 }, - [0xd4 ... 0xd5] = { DstImplicit|SrcMem|ModRM, simd_packed_int, d8s_vl }, - [0xd6] = { DstMem|SrcImplicit|ModRM|Mov, simd_other, 3 }, - [0xd7] = { DstReg|SrcImplicit|ModRM|Mov }, - [0xd8 ... 0xdf] = { DstImplicit|SrcMem|ModRM, simd_packed_int, d8s_vl }, - [0xe0] = { DstImplicit|SrcMem|ModRM, simd_packed_int, d8s_vl }, - [0xe1 ... 0xe2] = { DstImplicit|SrcMem|ModRM, simd_128, 4 }, - [0xe3 ... 0xe5] = { DstImplicit|SrcMem|ModRM, simd_packed_int, d8s_vl }, - [0xe6] = { DstImplicit|SrcMem|ModRM|Mov, simd_other }, - [0xe7] = { DstMem|SrcImplicit|ModRM|Mov, simd_packed_int, d8s_vl }, - [0xe8 ... 0xef] = { DstImplicit|SrcMem|ModRM, simd_packed_int, d8s_vl }, - [0xf0] = { DstImplicit|SrcMem|ModRM|Mov, simd_other }, - [0xf1 ... 0xf3] = { DstImplicit|SrcMem|ModRM, simd_128, 4 }, - [0xf4 ... 0xf6] = { DstImplicit|SrcMem|ModRM, simd_packed_int, d8s_vl }, - [0xf7] = { DstMem|SrcMem|ModRM|Mov, simd_packed_int }, - [0xf8 ... 0xfe] = { DstImplicit|SrcMem|ModRM, simd_packed_int, d8s_vl }, - [0xff] = { ModRM } -}; + [0x00] = {ModRM}, + [0x01] = {ImplicitOps | ModRM}, + [0x02] = {DstReg | SrcMem16 | ModRM}, + [0x03] = {DstReg | SrcMem16 | ModRM}, + [0x05] = {ImplicitOps}, + [0x06] = {ImplicitOps}, + [0x07] = {ImplicitOps}, + [0x08] = {ImplicitOps}, + [0x09] = {ImplicitOps}, + [0x0b] = {ImplicitOps}, + [0x0d] = {ImplicitOps | ModRM}, + [0x0e] = {ImplicitOps}, + [0x0f] = {ModRM | SrcImmByte}, + [0x10] = {DstImplicit | SrcMem | ModRM | Mov, simd_any_fp, d8s_vl}, + [0x11] = {DstMem | SrcImplicit | ModRM | Mov, simd_any_fp, d8s_vl}, + [0x12] = {DstImplicit | SrcMem | ModRM | Mov, simd_other}, + [0x13] = {DstMem | SrcImplicit | ModRM | Mov, simd_other}, + [0x14 ... 0x15] = {DstImplicit | SrcMem | ModRM, simd_packed_fp, d8s_vl}, + [0x16] = {DstImplicit | SrcMem | ModRM | Mov, simd_other}, + [0x17] = {DstMem | SrcImplicit | ModRM | Mov, simd_other}, + [0x18 ... 0x1f] = {ImplicitOps | ModRM}, + [0x20 ... 0x21] = {DstMem | SrcImplicit | ModRM}, + [0x22 ... 0x23] = {DstImplicit | SrcMem | ModRM}, + [0x28] = {DstImplicit | SrcMem | ModRM | Mov, simd_packed_fp, d8s_vl}, + [0x29] = {DstMem | SrcImplicit | ModRM | Mov, simd_packed_fp, d8s_vl}, + [0x2a] = {DstImplicit | SrcMem | ModRM | Mov, simd_other}, + [0x2b] = {DstMem | SrcImplicit | ModRM | Mov, simd_any_fp, d8s_vl}, + [0x2c ... 0x2d] = {DstImplicit | SrcMem | ModRM | Mov, simd_other}, + [0x2e ... 0x2f] = {ImplicitOps | ModRM | TwoOp, simd_none, d8s_dq}, + [0x30 ... 0x35] = {ImplicitOps}, + [0x37] = {ImplicitOps}, + [0x38] = {DstReg | SrcMem | ModRM}, + [0x3a] = {DstReg | SrcImmByte | ModRM}, + [0x40 ... 0x4f] = {DstReg | SrcMem | ModRM | Mov}, + [0x50] = {DstReg | SrcImplicit | ModRM | Mov}, + [0x51] = {DstImplicit | SrcMem | ModRM | TwoOp, simd_any_fp, d8s_vl}, + [0x52 ... 0x53] = {DstImplicit | SrcMem | ModRM | TwoOp, simd_single_fp}, + [0x54 ... 0x57] = {DstImplicit | SrcMem | ModRM, simd_packed_fp, d8s_vl}, + [0x58 ... 0x59] = {DstImplicit | SrcMem | ModRM, simd_any_fp, d8s_vl}, + [0x5a ... 0x5b] = {DstImplicit | SrcMem | ModRM | Mov, simd_other}, + [0x5c ... 0x5f] = {DstImplicit | SrcMem | ModRM, simd_any_fp, d8s_vl}, + [0x60 ... 0x62] = {DstImplicit | SrcMem | ModRM, simd_other}, + [0x63 ... 0x67] = {DstImplicit | SrcMem | ModRM, simd_packed_int, d8s_vl}, + [0x68 ... 0x6a] = {DstImplicit | SrcMem | ModRM, simd_other}, + [0x6b ... 0x6d] = {DstImplicit | SrcMem | ModRM, simd_packed_int}, + [0x6e] = {DstImplicit | SrcMem | ModRM | Mov, simd_none, d8s_dq64}, + [0x6f] = {DstImplicit | SrcMem | ModRM | Mov, simd_packed_int, d8s_vl}, + [0x70] = {SrcImmByte | ModRM | TwoOp, simd_other}, + [0x71 ... 0x73] = {DstImplicit | SrcImmByte | ModRM, simd_none, d8s_vl}, + [0x74 ... 0x76] = {DstImplicit | SrcMem | ModRM, simd_packed_int, d8s_vl}, + [0x77] = {DstImplicit | SrcNone}, + [0x78] = {ImplicitOps | ModRM}, + [0x79] = {DstReg | SrcMem | ModRM, simd_packed_int}, + [0x7c ... 0x7d] = {DstImplicit | SrcMem | ModRM, simd_other}, + [0x7e] = {DstMem | SrcImplicit | ModRM | Mov, simd_none, d8s_dq64}, + [0x7f] = {DstMem | SrcImplicit | ModRM | Mov, simd_packed_int, d8s_vl}, + [0x80 ... 0x8f] = {DstImplicit | SrcImm}, + [0x90 ... 0x9f] = {ByteOp | DstMem | SrcNone | ModRM | Mov}, + [0xa0 ... 0xa1] = {ImplicitOps | Mov}, + [0xa2] = {ImplicitOps}, + [0xa3] = {DstBitBase | SrcReg | ModRM}, + [0xa4] = {DstMem | SrcImmByte | ModRM}, + [0xa5] = {DstMem | SrcReg | ModRM}, + [0xa6 ... 0xa7] = {ModRM}, + [0xa8 ... 0xa9] = {ImplicitOps | Mov}, + [0xaa] = {ImplicitOps}, + [0xab] = {DstBitBase | SrcReg | ModRM}, + [0xac] = {DstMem | SrcImmByte | ModRM}, + [0xad] = {DstMem | SrcReg | ModRM}, + [0xae] = {ImplicitOps | ModRM}, + [0xaf] = {DstReg | SrcMem | ModRM}, + [0xb0] = {ByteOp | DstMem | SrcReg | ModRM}, + [0xb1] = {DstMem | SrcReg | ModRM}, + [0xb2] = {DstReg | SrcMem | ModRM | Mov}, + [0xb3] = {DstBitBase | SrcReg | ModRM}, + [0xb4 ... 0xb5] = {DstReg | SrcMem | ModRM | Mov}, + [0xb6] = {ByteOp | DstReg | SrcMem | ModRM | Mov}, + [0xb7] = {DstReg | SrcMem16 | ModRM | Mov}, + [0xb8] = {DstReg | SrcMem | ModRM}, + [0xb9] = {ModRM}, + [0xba] = {DstBitBase | SrcImmByte | ModRM}, + [0xbb] = {DstBitBase | SrcReg | ModRM}, + [0xbc ... 0xbd] = {DstReg | SrcMem | ModRM}, + [0xbe] = {ByteOp | DstReg | SrcMem | ModRM | Mov}, + [0xbf] = {DstReg | SrcMem16 | ModRM | Mov}, + [0xc0] = {ByteOp | DstMem | SrcReg | ModRM}, + [0xc1] = {DstMem | SrcReg | ModRM}, + [0xc2] = {DstImplicit | SrcImmByte | ModRM, simd_any_fp, d8s_vl}, + [0xc3] = {DstMem | SrcReg | ModRM | Mov}, + [0xc4] = {DstReg | SrcImmByte | ModRM, simd_packed_int}, + [0xc5] = {DstReg | SrcImmByte | ModRM | Mov}, + [0xc6] = {DstImplicit | SrcImmByte | ModRM, simd_packed_fp, d8s_vl}, + [0xc7] = {ImplicitOps | ModRM}, + [0xc8 ... 0xcf] = {ImplicitOps}, + [0xd0] = {DstImplicit | SrcMem | ModRM, simd_other}, + [0xd1 ... 0xd3] = {DstImplicit | SrcMem | ModRM, simd_128, 4}, + [0xd4 ... 0xd5] = {DstImplicit | SrcMem | ModRM, simd_packed_int, d8s_vl}, + [0xd6] = {DstMem | SrcImplicit | ModRM | Mov, simd_other, 3}, + [0xd7] = {DstReg | SrcImplicit | ModRM | Mov}, + [0xd8 ... 0xdf] = {DstImplicit | SrcMem | ModRM, simd_packed_int, d8s_vl}, + [0xe0] = {DstImplicit | SrcMem | ModRM, simd_packed_int, d8s_vl}, + [0xe1 ... 0xe2] = {DstImplicit | SrcMem | ModRM, simd_128, 4}, + [0xe3 ... 0xe5] = {DstImplicit | SrcMem | ModRM, simd_packed_int, d8s_vl}, + [0xe6] = {DstImplicit | SrcMem | ModRM | Mov, simd_other}, + [0xe7] = {DstMem | SrcImplicit | ModRM | Mov, simd_packed_int, d8s_vl}, + [0xe8 ... 0xef] = {DstImplicit | SrcMem | ModRM, simd_packed_int, d8s_vl}, + [0xf0] = {DstImplicit | SrcMem | ModRM | Mov, simd_other}, + [0xf1 ... 0xf3] = {DstImplicit | SrcMem | ModRM, simd_128, 4}, + [0xf4 ... 0xf6] = {DstImplicit | SrcMem | ModRM, simd_packed_int, d8s_vl}, + [0xf7] = {DstMem | SrcMem | ModRM | Mov, simd_packed_int}, + [0xf8 ... 0xfe] = {DstImplicit | SrcMem | ModRM, simd_packed_int, d8s_vl}, + [0xff] = {ModRM}}; /* * The next two tables are indexed by high opcode extension byte (the one @@ -393,30 +396,21 @@ static const struct twobyte_table { static const uint16_t _3dnow_table[16] = { [0x0] = (1 << 0xd) /* pi2fd */, [0x1] = (1 << 0xd) /* pf2id */, - [0x9] = (1 << 0x0) /* pfcmpge */ | - (1 << 0x4) /* pfmin */ | - (1 << 0x6) /* pfrcp */ | - (1 << 0x7) /* pfrsqrt */ | - (1 << 0xa) /* pfsub */ | - (1 << 0xe) /* pfadd */, - [0xa] = (1 << 0x0) /* pfcmpgt */ | - (1 << 0x4) /* pfmax */ | - (1 << 0x6) /* pfrcpit1 */ | - (1 << 0x7) /* pfrsqit1 */ | - (1 << 0xa) /* pfsubr */ | - (1 << 0xe) /* pfacc */, - [0xb] = (1 << 0x0) /* pfcmpeq */ | - (1 << 0x4) /* pfmul */ | - (1 << 0x6) /* pfrcpit2 */ | - (1 << 0x7) /* pmulhrw */ | + [0x9] = (1 << 0x0) /* pfcmpge */ | (1 << 0x4) /* pfmin */ | + (1 << 0x6) /* pfrcp */ | (1 << 0x7) /* pfrsqrt */ | + (1 << 0xa) /* pfsub */ | (1 << 0xe) /* pfadd */, + [0xa] = (1 << 0x0) /* pfcmpgt */ | (1 << 0x4) /* pfmax */ | + (1 << 0x6) /* pfrcpit1 */ | (1 << 0x7) /* pfrsqit1 */ | + (1 << 0xa) /* pfsubr */ | (1 << 0xe) /* pfacc */, + [0xb] = (1 << 0x0) /* pfcmpeq */ | (1 << 0x4) /* pfmul */ | + (1 << 0x6) /* pfrcpit2 */ | (1 << 0x7) /* pmulhrw */ | (1 << 0xf) /* pavgusb */, }; static const uint16_t _3dnow_ext_table[16] = { [0x0] = (1 << 0xc) /* pi2fw */, [0x1] = (1 << 0xc) /* pf2iw */, - [0x8] = (1 << 0xa) /* pfnacc */ | - (1 << 0xe) /* pfpnacc */, + [0x8] = (1 << 0xa) /* pfnacc */ | (1 << 0xe) /* pfpnacc */, [0xb] = (1 << 0xb) /* pswapd */, }; @@ -425,167 +419,171 @@ static const uint16_t _3dnow_ext_table[16] = { * (one of which possibly also allowing to be a memory one). The named * operand counts do not include any immediate operands. */ -static const struct ext0f38_table { - uint8_t simd_size:5; - uint8_t to_mem:1; - uint8_t two_op:1; - uint8_t vsib:1; - disp8scale_t d8s:4; +static const struct ext0f38_table +{ + uint8_t simd_size : 5; + uint8_t to_mem : 1; + uint8_t two_op : 1; + uint8_t vsib : 1; + disp8scale_t d8s : 4; } ext0f38_table[256] = { - [0x00 ... 0x0b] = { .simd_size = simd_packed_int }, - [0x0c ... 0x0f] = { .simd_size = simd_packed_fp }, - [0x10 ... 0x12] = { .simd_size = simd_packed_int, .d8s = d8s_vl }, - [0x13] = { .simd_size = simd_other, .two_op = 1 }, - [0x14 ... 0x16] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, - [0x17] = { .simd_size = simd_packed_int, .two_op = 1 }, - [0x18] = { .simd_size = simd_scalar_opc, .two_op = 1, .d8s = 2 }, - [0x19] = { .simd_size = simd_scalar_opc, .two_op = 1, .d8s = 3 }, - [0x1a] = { .simd_size = simd_128, .two_op = 1, .d8s = 4 }, - [0x1b] = { .simd_size = simd_256, .two_op = 1, .d8s = d8s_vl_by_2 }, - [0x1c ... 0x1e] = { .simd_size = simd_packed_int, .two_op = 1 }, - [0x20 ... 0x25] = { .simd_size = simd_other, .two_op = 1 }, - [0x26 ... 0x29] = { .simd_size = simd_packed_int, .d8s = d8s_vl }, - [0x2a] = { .simd_size = simd_packed_int, .two_op = 1, .d8s = d8s_vl }, - [0x2b] = { .simd_size = simd_packed_int }, - [0x2c ... 0x2d] = { .simd_size = simd_packed_fp }, - [0x2e ... 0x2f] = { .simd_size = simd_packed_fp, .to_mem = 1 }, - [0x30 ... 0x35] = { .simd_size = simd_other, .two_op = 1 }, - [0x36 ... 0x3f] = { .simd_size = simd_packed_int, .d8s = d8s_vl }, - [0x40] = { .simd_size = simd_packed_int, .d8s = d8s_vl }, - [0x41] = { .simd_size = simd_packed_int, .two_op = 1 }, - [0x45 ... 0x47] = { .simd_size = simd_packed_int, .d8s = d8s_vl }, - [0x58 ... 0x59] = { .simd_size = simd_other, .two_op = 1 }, - [0x5a] = { .simd_size = simd_128, .two_op = 1 }, - [0x78 ... 0x79] = { .simd_size = simd_other, .two_op = 1 }, - [0x8c] = { .simd_size = simd_packed_int }, - [0x8e] = { .simd_size = simd_packed_int, .to_mem = 1 }, - [0x90 ... 0x93] = { .simd_size = simd_other, .vsib = 1 }, - [0x96 ... 0x98] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, - [0x99] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, - [0x9a] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, - [0x9b] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, - [0x9c] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, - [0x9d] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, - [0x9e] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, - [0x9f] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, - [0xa6 ... 0xa8] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, - [0xa9] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, - [0xaa] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, - [0xab] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, - [0xac] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, - [0xad] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, - [0xae] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, - [0xaf] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, - [0xb6 ... 0xb8] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, - [0xb9] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, - [0xba] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, - [0xbb] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, - [0xbc] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, - [0xbd] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, - [0xbe] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, - [0xbf] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, - [0xc8 ... 0xcd] = { .simd_size = simd_other }, - [0xdb] = { .simd_size = simd_packed_int, .two_op = 1 }, - [0xdc ... 0xdf] = { .simd_size = simd_packed_int }, - [0xf0] = { .two_op = 1 }, - [0xf1] = { .to_mem = 1, .two_op = 1 }, + [0x00 ... 0x0b] = {.simd_size = simd_packed_int}, + [0x0c ... 0x0f] = {.simd_size = simd_packed_fp}, + [0x10 ... 0x12] = {.simd_size = simd_packed_int, .d8s = d8s_vl}, + [0x13] = {.simd_size = simd_other, .two_op = 1}, + [0x14 ... 0x16] = {.simd_size = simd_packed_fp, .d8s = d8s_vl}, + [0x17] = {.simd_size = simd_packed_int, .two_op = 1}, + [0x18] = {.simd_size = simd_scalar_opc, .two_op = 1, .d8s = 2}, + [0x19] = {.simd_size = simd_scalar_opc, .two_op = 1, .d8s = 3}, + [0x1a] = {.simd_size = simd_128, .two_op = 1, .d8s = 4}, + [0x1b] = {.simd_size = simd_256, .two_op = 1, .d8s = d8s_vl_by_2}, + [0x1c ... 0x1e] = {.simd_size = simd_packed_int, .two_op = 1}, + [0x20 ... 0x25] = {.simd_size = simd_other, .two_op = 1}, + [0x26 ... 0x29] = {.simd_size = simd_packed_int, .d8s = d8s_vl}, + [0x2a] = {.simd_size = simd_packed_int, .two_op = 1, .d8s = d8s_vl}, + [0x2b] = {.simd_size = simd_packed_int}, + [0x2c ... 0x2d] = {.simd_size = simd_packed_fp}, + [0x2e ... 0x2f] = {.simd_size = simd_packed_fp, .to_mem = 1}, + [0x30 ... 0x35] = {.simd_size = simd_other, .two_op = 1}, + [0x36 ... 0x3f] = {.simd_size = simd_packed_int, .d8s = d8s_vl}, + [0x40] = {.simd_size = simd_packed_int, .d8s = d8s_vl}, + [0x41] = {.simd_size = simd_packed_int, .two_op = 1}, + [0x45 ... 0x47] = {.simd_size = simd_packed_int, .d8s = d8s_vl}, + [0x58 ... 0x59] = {.simd_size = simd_other, .two_op = 1}, + [0x5a] = {.simd_size = simd_128, .two_op = 1}, + [0x78 ... 0x79] = {.simd_size = simd_other, .two_op = 1}, + [0x8c] = {.simd_size = simd_packed_int}, + [0x8e] = {.simd_size = simd_packed_int, .to_mem = 1}, + [0x90 ... 0x93] = {.simd_size = simd_other, .vsib = 1}, + [0x96 ... 0x98] = {.simd_size = simd_packed_fp, .d8s = d8s_vl}, + [0x99] = {.simd_size = simd_scalar_vexw, .d8s = d8s_dq}, + [0x9a] = {.simd_size = simd_packed_fp, .d8s = d8s_vl}, + [0x9b] = {.simd_size = simd_scalar_vexw, .d8s = d8s_dq}, + [0x9c] = {.simd_size = simd_packed_fp, .d8s = d8s_vl}, + [0x9d] = {.simd_size = simd_scalar_vexw, .d8s = d8s_dq}, + [0x9e] = {.simd_size = simd_packed_fp, .d8s = d8s_vl}, + [0x9f] = {.simd_size = simd_scalar_vexw, .d8s = d8s_dq}, + [0xa6 ... 0xa8] = {.simd_size = simd_packed_fp, .d8s = d8s_vl}, + [0xa9] = {.simd_size = simd_scalar_vexw, .d8s = d8s_dq}, + [0xaa] = {.simd_size = simd_packed_fp, .d8s = d8s_vl}, + [0xab] = {.simd_size = simd_scalar_vexw, .d8s = d8s_dq}, + [0xac] = {.simd_size = simd_packed_fp, .d8s = d8s_vl}, + [0xad] = {.simd_size = simd_scalar_vexw, .d8s = d8s_dq}, + [0xae] = {.simd_size = simd_packed_fp, .d8s = d8s_vl}, + [0xaf] = {.simd_size = simd_scalar_vexw, .d8s = d8s_dq}, + [0xb6 ... 0xb8] = {.simd_size = simd_packed_fp, .d8s = d8s_vl}, + [0xb9] = {.simd_size = simd_scalar_vexw, .d8s = d8s_dq}, + [0xba] = {.simd_size = simd_packed_fp, .d8s = d8s_vl}, + [0xbb] = {.simd_size = simd_scalar_vexw, .d8s = d8s_dq}, + [0xbc] = {.simd_size = simd_packed_fp, .d8s = d8s_vl}, + [0xbd] = {.simd_size = simd_scalar_vexw, .d8s = d8s_dq}, + [0xbe] = {.simd_size = simd_packed_fp, .d8s = d8s_vl}, + [0xbf] = {.simd_size = simd_scalar_vexw, .d8s = d8s_dq}, + [0xc8 ... 0xcd] = {.simd_size = simd_other}, + [0xdb] = {.simd_size = simd_packed_int, .two_op = 1}, + [0xdc ... 0xdf] = {.simd_size = simd_packed_int}, + [0xf0] = {.two_op = 1}, + [0xf1] = {.to_mem = 1, .two_op = 1}, [0xf2 ... 0xf3] = {}, [0xf5 ... 0xf7] = {}, }; /* Shift values between src and dst sizes of pmov{s,z}x{b,w,d}{w,d,q}. */ -static const uint8_t pmov_convert_delta[] = { 1, 2, 3, 1, 2, 1 }; - -static const struct ext0f3a_table { - uint8_t simd_size:5; - uint8_t to_mem:1; - uint8_t two_op:1; - uint8_t four_op:1; - disp8scale_t d8s:4; +static const uint8_t pmov_convert_delta[] = {1, 2, 3, 1, 2, 1}; + +static const struct ext0f3a_table +{ + uint8_t simd_size : 5; + uint8_t to_mem : 1; + uint8_t two_op : 1; + uint8_t four_op : 1; + disp8scale_t d8s : 4; } ext0f3a_table[256] = { - [0x00] = { .simd_size = simd_packed_int, .two_op = 1 }, - [0x01] = { .simd_size = simd_packed_fp, .two_op = 1 }, - [0x02] = { .simd_size = simd_packed_int }, - [0x04 ... 0x05] = { .simd_size = simd_packed_fp, .two_op = 1 }, - [0x06] = { .simd_size = simd_packed_fp }, - [0x08 ... 0x09] = { .simd_size = simd_packed_fp, .two_op = 1 }, - [0x0a ... 0x0b] = { .simd_size = simd_scalar_opc }, - [0x0c ... 0x0d] = { .simd_size = simd_packed_fp }, - [0x0e ... 0x0f] = { .simd_size = simd_packed_int }, - [0x14 ... 0x17] = { .simd_size = simd_none, .to_mem = 1, .two_op = 1 }, - [0x18] = { .simd_size = simd_128 }, - [0x19] = { .simd_size = simd_128, .to_mem = 1, .two_op = 1 }, - [0x1d] = { .simd_size = simd_other, .to_mem = 1, .two_op = 1 }, - [0x1e ... 0x1f] = { .simd_size = simd_packed_int, .d8s = d8s_vl }, - [0x20] = { .simd_size = simd_none }, - [0x21] = { .simd_size = simd_other }, - [0x22] = { .simd_size = simd_none }, - [0x25] = { .simd_size = simd_packed_int, .d8s = d8s_vl }, - [0x30 ... 0x33] = { .simd_size = simd_other, .two_op = 1 }, - [0x38] = { .simd_size = simd_128 }, - [0x39] = { .simd_size = simd_128, .to_mem = 1, .two_op = 1 }, - [0x3e ... 0x3f] = { .simd_size = simd_packed_int, .d8s = d8s_vl }, - [0x40 ... 0x41] = { .simd_size = simd_packed_fp }, - [0x42] = { .simd_size = simd_packed_int }, - [0x44] = { .simd_size = simd_packed_int }, - [0x46] = { .simd_size = simd_packed_int }, - [0x48 ... 0x49] = { .simd_size = simd_packed_fp, .four_op = 1 }, - [0x4a ... 0x4b] = { .simd_size = simd_packed_fp, .four_op = 1 }, - [0x4c] = { .simd_size = simd_packed_int, .four_op = 1 }, - [0x5c ... 0x5f] = { .simd_size = simd_packed_fp, .four_op = 1 }, - [0x60 ... 0x63] = { .simd_size = simd_packed_int, .two_op = 1 }, - [0x68 ... 0x69] = { .simd_size = simd_packed_fp, .four_op = 1 }, - [0x6a ... 0x6b] = { .simd_size = simd_scalar_opc, .four_op = 1 }, - [0x6c ... 0x6d] = { .simd_size = simd_packed_fp, .four_op = 1 }, - [0x6e ... 0x6f] = { .simd_size = simd_scalar_opc, .four_op = 1 }, - [0x78 ... 0x79] = { .simd_size = simd_packed_fp, .four_op = 1 }, - [0x7a ... 0x7b] = { .simd_size = simd_scalar_opc, .four_op = 1 }, - [0x7c ... 0x7d] = { .simd_size = simd_packed_fp, .four_op = 1 }, - [0x7e ... 0x7f] = { .simd_size = simd_scalar_opc, .four_op = 1 }, - [0xcc] = { .simd_size = simd_other }, - [0xdf] = { .simd_size = simd_packed_int, .two_op = 1 }, + [0x00] = {.simd_size = simd_packed_int, .two_op = 1}, + [0x01] = {.simd_size = simd_packed_fp, .two_op = 1}, + [0x02] = {.simd_size = simd_packed_int}, + [0x04 ... 0x05] = {.simd_size = simd_packed_fp, .two_op = 1}, + [0x06] = {.simd_size = simd_packed_fp}, + [0x08 ... 0x09] = {.simd_size = simd_packed_fp, .two_op = 1}, + [0x0a ... 0x0b] = {.simd_size = simd_scalar_opc}, + [0x0c ... 0x0d] = {.simd_size = simd_packed_fp}, + [0x0e ... 0x0f] = {.simd_size = simd_packed_int}, + [0x14 ... 0x17] = {.simd_size = simd_none, .to_mem = 1, .two_op = 1}, + [0x18] = {.simd_size = simd_128}, + [0x19] = {.simd_size = simd_128, .to_mem = 1, .two_op = 1}, + [0x1d] = {.simd_size = simd_other, .to_mem = 1, .two_op = 1}, + [0x1e ... 0x1f] = {.simd_size = simd_packed_int, .d8s = d8s_vl}, + [0x20] = {.simd_size = simd_none}, + [0x21] = {.simd_size = simd_other}, + [0x22] = {.simd_size = simd_none}, + [0x25] = {.simd_size = simd_packed_int, .d8s = d8s_vl}, + [0x30 ... 0x33] = {.simd_size = simd_other, .two_op = 1}, + [0x38] = {.simd_size = simd_128}, + [0x39] = {.simd_size = simd_128, .to_mem = 1, .two_op = 1}, + [0x3e ... 0x3f] = {.simd_size = simd_packed_int, .d8s = d8s_vl}, + [0x40 ... 0x41] = {.simd_size = simd_packed_fp}, + [0x42] = {.simd_size = simd_packed_int}, + [0x44] = {.simd_size = simd_packed_int}, + [0x46] = {.simd_size = simd_packed_int}, + [0x48 ... 0x49] = {.simd_size = simd_packed_fp, .four_op = 1}, + [0x4a ... 0x4b] = {.simd_size = simd_packed_fp, .four_op = 1}, + [0x4c] = {.simd_size = simd_packed_int, .four_op = 1}, + [0x5c ... 0x5f] = {.simd_size = simd_packed_fp, .four_op = 1}, + [0x60 ... 0x63] = {.simd_size = simd_packed_int, .two_op = 1}, + [0x68 ... 0x69] = {.simd_size = simd_packed_fp, .four_op = 1}, + [0x6a ... 0x6b] = {.simd_size = simd_scalar_opc, .four_op = 1}, + [0x6c ... 0x6d] = {.simd_size = simd_packed_fp, .four_op = 1}, + [0x6e ... 0x6f] = {.simd_size = simd_scalar_opc, .four_op = 1}, + [0x78 ... 0x79] = {.simd_size = simd_packed_fp, .four_op = 1}, + [0x7a ... 0x7b] = {.simd_size = simd_scalar_opc, .four_op = 1}, + [0x7c ... 0x7d] = {.simd_size = simd_packed_fp, .four_op = 1}, + [0x7e ... 0x7f] = {.simd_size = simd_scalar_opc, .four_op = 1}, + [0xcc] = {.simd_size = simd_other}, + [0xdf] = {.simd_size = simd_packed_int, .two_op = 1}, [0xf0] = {}, }; static const opcode_desc_t xop_table[] = { - DstReg|SrcImmByte|ModRM, - DstReg|SrcMem|ModRM, - DstReg|SrcImm|ModRM, + DstReg | SrcImmByte | ModRM, + DstReg | SrcMem | ModRM, + DstReg | SrcImm | ModRM, }; -static const struct ext8f08_table { - uint8_t simd_size:5; - uint8_t two_op:1; - uint8_t four_op:1; +static const struct ext8f08_table +{ + uint8_t simd_size : 5; + uint8_t two_op : 1; + uint8_t four_op : 1; } ext8f08_table[256] = { - [0xa2] = { .simd_size = simd_packed_int, .four_op = 1 }, - [0x85 ... 0x87] = { .simd_size = simd_packed_int, .four_op = 1 }, - [0x8e ... 0x8f] = { .simd_size = simd_packed_int, .four_op = 1 }, - [0x95 ... 0x97] = { .simd_size = simd_packed_int, .four_op = 1 }, - [0x9e ... 0x9f] = { .simd_size = simd_packed_int, .four_op = 1 }, - [0xa3] = { .simd_size = simd_packed_int, .four_op = 1 }, - [0xa6] = { .simd_size = simd_packed_int, .four_op = 1 }, - [0xb6] = { .simd_size = simd_packed_int, .four_op = 1 }, - [0xc0 ... 0xc3] = { .simd_size = simd_packed_int, .two_op = 1 }, - [0xcc ... 0xcf] = { .simd_size = simd_packed_int }, - [0xec ... 0xef] = { .simd_size = simd_packed_int }, + [0xa2] = {.simd_size = simd_packed_int, .four_op = 1}, + [0x85 ... 0x87] = {.simd_size = simd_packed_int, .four_op = 1}, + [0x8e ... 0x8f] = {.simd_size = simd_packed_int, .four_op = 1}, + [0x95 ... 0x97] = {.simd_size = simd_packed_int, .four_op = 1}, + [0x9e ... 0x9f] = {.simd_size = simd_packed_int, .four_op = 1}, + [0xa3] = {.simd_size = simd_packed_int, .four_op = 1}, + [0xa6] = {.simd_size = simd_packed_int, .four_op = 1}, + [0xb6] = {.simd_size = simd_packed_int, .four_op = 1}, + [0xc0 ... 0xc3] = {.simd_size = simd_packed_int, .two_op = 1}, + [0xcc ... 0xcf] = {.simd_size = simd_packed_int}, + [0xec ... 0xef] = {.simd_size = simd_packed_int}, }; -static const struct ext8f09_table { - uint8_t simd_size:5; - uint8_t two_op:1; +static const struct ext8f09_table +{ + uint8_t simd_size : 5; + uint8_t two_op : 1; } ext8f09_table[256] = { - [0x01 ... 0x02] = { .two_op = 1 }, - [0x80 ... 0x81] = { .simd_size = simd_packed_fp, .two_op = 1 }, - [0x82 ... 0x83] = { .simd_size = simd_scalar_opc, .two_op = 1 }, - [0x90 ... 0x9b] = { .simd_size = simd_packed_int }, - [0xc1 ... 0xc3] = { .simd_size = simd_packed_int, .two_op = 1 }, - [0xc6 ... 0xc7] = { .simd_size = simd_packed_int, .two_op = 1 }, - [0xcb] = { .simd_size = simd_packed_int, .two_op = 1 }, - [0xd1 ... 0xd3] = { .simd_size = simd_packed_int, .two_op = 1 }, - [0xd6 ... 0xd7] = { .simd_size = simd_packed_int, .two_op = 1 }, - [0xdb] = { .simd_size = simd_packed_int, .two_op = 1 }, - [0xe1 ... 0xe3] = { .simd_size = simd_packed_int, .two_op = 1 }, + [0x01 ... 0x02] = {.two_op = 1}, + [0x80 ... 0x81] = {.simd_size = simd_packed_fp, .two_op = 1}, + [0x82 ... 0x83] = {.simd_size = simd_scalar_opc, .two_op = 1}, + [0x90 ... 0x9b] = {.simd_size = simd_packed_int}, + [0xc1 ... 0xc3] = {.simd_size = simd_packed_int, .two_op = 1}, + [0xc6 ... 0xc7] = {.simd_size = simd_packed_int, .two_op = 1}, + [0xcb] = {.simd_size = simd_packed_int, .two_op = 1}, + [0xd1 ... 0xd3] = {.simd_size = simd_packed_int, .two_op = 1}, + [0xd6 ... 0xd7] = {.simd_size = simd_packed_int, .two_op = 1}, + [0xdb] = {.simd_size = simd_packed_int, .two_op = 1}, + [0xe1 ... 0xe3] = {.simd_size = simd_packed_int, .two_op = 1}, }; #define REX_PREFIX 0x40 @@ -596,13 +594,15 @@ static const struct ext8f09_table { #define vex_none 0 -enum vex_opcx { +enum vex_opcx +{ vex_0f = vex_none + 1, vex_0f38, vex_0f3a, }; -enum vex_pfx { +enum vex_pfx +{ vex_66 = vex_none + 1, vex_f3, vex_f2 @@ -611,105 +611,119 @@ enum vex_pfx { #define VEX_PREFIX_DOUBLE_MASK 0x1 #define VEX_PREFIX_SCALAR_MASK 0x2 -static const uint8_t sse_prefix[] = { 0x66, 0xf3, 0xf2 }; +static const uint8_t sse_prefix[] = {0x66, 0xf3, 0xf2}; union vex { uint8_t raw[2]; - struct { /* SDM names */ - uint8_t opcx:5; /* mmmmm */ - uint8_t b:1; /* B */ - uint8_t x:1; /* X */ - uint8_t r:1; /* R */ - uint8_t pfx:2; /* pp */ - uint8_t l:1; /* L */ - uint8_t reg:4; /* vvvv */ - uint8_t w:1; /* W */ + struct + { /* SDM names */ + uint8_t opcx : 5; /* mmmmm */ + uint8_t b : 1; /* B */ + uint8_t x : 1; /* X */ + uint8_t r : 1; /* R */ + uint8_t pfx : 2; /* pp */ + uint8_t l : 1; /* L */ + uint8_t reg : 4; /* vvvv */ + uint8_t w : 1; /* W */ }; }; #ifdef __x86_64__ -# define PFX2 REX_PREFIX +#define PFX2 REX_PREFIX #else -# define PFX2 0x3e +#define PFX2 0x3e #endif #define PFX_BYTES 3 -#define init_prefixes(stub) ({ \ - uint8_t *buf_ = get_stub(stub); \ - buf_[0] = 0x3e; \ - buf_[1] = PFX2; \ - buf_[2] = 0x0f; \ - buf_ + 3; \ -}) - -#define copy_VEX(ptr, vex) ({ \ - if ( !mode_64bit() ) \ - (vex).reg |= 8; \ - (ptr)[0 - PFX_BYTES] = ext < ext_8f08 ? 0xc4 : 0x8f; \ - (ptr)[1 - PFX_BYTES] = (vex).raw[0]; \ - (ptr)[2 - PFX_BYTES] = (vex).raw[1]; \ - container_of((ptr) + 1 - PFX_BYTES, typeof(vex), raw[0]); \ -}) - -#define copy_REX_VEX(ptr, rex, vex) do { \ - if ( (vex).opcx != vex_none ) \ - copy_VEX(ptr, vex); \ - else \ - { \ - if ( (vex).pfx ) \ - (ptr)[0 - PFX_BYTES] = sse_prefix[(vex).pfx - 1]; \ - /* \ - * "rex" is always zero for other than 64-bit mode, so OR-ing it \ - * into any prefix (and not just REX_PREFIX) is safe on 32-bit \ - * (test harness) builds. \ - */ \ - (ptr)[1 - PFX_BYTES] |= rex; \ - } \ -} while (0) +#define init_prefixes(stub) \ + ({ \ + uint8_t *buf_ = get_stub(stub); \ + buf_[0] = 0x3e; \ + buf_[1] = PFX2; \ + buf_[2] = 0x0f; \ + buf_ + 3; \ + }) + +#define copy_VEX(ptr, vex) \ + ({ \ + if ( !mode_64bit() ) \ + (vex).reg |= 8; \ + (ptr)[0 - PFX_BYTES] = ext < ext_8f08 ? 0xc4 : 0x8f; \ + (ptr)[1 - PFX_BYTES] = (vex).raw[0]; \ + (ptr)[2 - PFX_BYTES] = (vex).raw[1]; \ + container_of((ptr) + 1 - PFX_BYTES, typeof(vex), raw[0]); \ + }) + +#define copy_REX_VEX(ptr, rex, vex) \ + do { \ + if ( (vex).opcx != vex_none ) \ + copy_VEX(ptr, vex); \ + else \ + { \ + if ( (vex).pfx ) \ + (ptr)[0 - PFX_BYTES] = sse_prefix[(vex).pfx - 1]; \ + /* \ + * "rex" is always zero for other than 64-bit mode, so OR-ing it \ + * into any prefix (and not just REX_PREFIX) is safe on 32-bit \ + * (test harness) builds. \ + */ \ + (ptr)[1 - PFX_BYTES] |= rex; \ + } \ + } while ( 0 ) union evex { uint8_t raw[3]; - struct { /* SDM names */ - uint8_t opcx:2; /* mm */ - uint8_t mbz:2; - uint8_t R:1; /* R' */ - uint8_t b:1; /* B */ - uint8_t x:1; /* X */ - uint8_t r:1; /* R */ - uint8_t pfx:2; /* pp */ - uint8_t mbs:1; - uint8_t reg:4; /* vvvv */ - uint8_t w:1; /* W */ - uint8_t opmsk:3; /* aaa */ - uint8_t RX:1; /* V' */ - uint8_t brs:1; /* b */ - uint8_t lr:2; /* L'L */ - uint8_t z:1; /* z */ + struct + { /* SDM names */ + uint8_t opcx : 2; /* mm */ + uint8_t mbz : 2; + uint8_t R : 1; /* R' */ + uint8_t b : 1; /* B */ + uint8_t x : 1; /* X */ + uint8_t r : 1; /* R */ + uint8_t pfx : 2; /* pp */ + uint8_t mbs : 1; + uint8_t reg : 4; /* vvvv */ + uint8_t w : 1; /* W */ + uint8_t opmsk : 3; /* aaa */ + uint8_t RX : 1; /* V' */ + uint8_t brs : 1; /* b */ + uint8_t lr : 2; /* L'L */ + uint8_t z : 1; /* z */ }; }; #define EVEX_PFX_BYTES 4 -#define init_evex(stub) ({ \ - uint8_t *buf_ = get_stub(stub); \ - buf_[0] = 0x62; \ - buf_ + EVEX_PFX_BYTES; \ -}) - -#define copy_EVEX(ptr, evex) ({ \ - if ( !mode_64bit() ) \ - (evex).reg |= 8; \ - (ptr)[1 - EVEX_PFX_BYTES] = (evex).raw[0]; \ - (ptr)[2 - EVEX_PFX_BYTES] = (evex).raw[1]; \ - (ptr)[3 - EVEX_PFX_BYTES] = (evex).raw[2]; \ - container_of((ptr) + 1 - EVEX_PFX_BYTES, typeof(evex), raw[0]); \ -}) - -#define rep_prefix() (vex.pfx >= vex_f3) -#define repe_prefix() (vex.pfx == vex_f3) +#define init_evex(stub) \ + ({ \ + uint8_t *buf_ = get_stub(stub); \ + buf_[0] = 0x62; \ + buf_ + EVEX_PFX_BYTES; \ + }) + +#define copy_EVEX(ptr, evex) \ + ({ \ + if ( !mode_64bit() ) \ + (evex).reg |= 8; \ + (ptr)[1 - EVEX_PFX_BYTES] = (evex).raw[0]; \ + (ptr)[2 - EVEX_PFX_BYTES] = (evex).raw[1]; \ + (ptr)[3 - EVEX_PFX_BYTES] = (evex).raw[2]; \ + container_of((ptr) + 1 - EVEX_PFX_BYTES, typeof(evex), raw[0]); \ + }) + +#define rep_prefix() (vex.pfx >= vex_f3) +#define repe_prefix() (vex.pfx == vex_f3) #define repne_prefix() (vex.pfx == vex_f2) /* Type, address-of, and value of an instruction's operand. */ -struct operand { - enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; +struct operand +{ + enum + { + OP_REG, + OP_MEM, + OP_IMM, + OP_NONE + } type; unsigned int bytes; /* Operand value. */ @@ -722,18 +736,21 @@ struct operand { unsigned long *reg; /* OP_MEM: Segment and offset. */ - struct { + struct + { enum x86_segment seg; - unsigned long off; + unsigned long off; } mem; }; -struct x86_emulate_state { +struct x86_emulate_state +{ unsigned int op_bytes, ad_bytes; - enum { + enum + { ext_none = vex_none, - ext_0f = vex_0f, + ext_0f = vex_0f, ext_0f38 = vex_0f38, ext_0f3a = vex_0f3a, /* @@ -744,7 +761,8 @@ struct x86_emulate_state { ext_8f09, ext_8f0a, } ext; - enum { + enum + { rmw_NONE, rmw_adc, rmw_add, @@ -813,9 +831,9 @@ struct x86_emulate_state { typedef union { uint64_t mmx; - uint64_t __attribute__ ((aligned(16))) xmm[2]; - uint64_t __attribute__ ((aligned(32))) ymm[4]; - uint64_t __attribute__ ((aligned(64))) zmm[8]; + uint64_t __attribute__((aligned(16))) xmm[2]; + uint64_t __attribute__((aligned(32))) ymm[4]; + uint64_t __attribute__((aligned(64))) zmm[8]; } mmval_t; /* @@ -826,30 +844,30 @@ typedef union { #define DECLARE_ALIGNED(type, var) \ long __##var[(sizeof(type) + __alignof(type)) / __alignof(long) - 1]; \ type *const var##p = \ - (void *)(((long)__##var + __alignof(type) - __alignof(__##var)) \ - & -__alignof(type)) + (void *)(((long)__##var + __alignof(type) - __alignof(__##var)) & \ + -__alignof(type)) #ifdef __GCC_ASM_FLAG_OUTPUTS__ -# define ASM_FLAG_OUT(yes, no) yes +#define ASM_FLAG_OUT(yes, no) yes #else -# define ASM_FLAG_OUT(yes, no) no +#define ASM_FLAG_OUT(yes, no) no #endif /* Floating point status word definitions. */ -#define FSW_ES (1U << 7) +#define FSW_ES (1U << 7) /* MXCSR bit definitions. */ -#define MXCSR_MM (1U << 17) +#define MXCSR_MM (1U << 17) /* Exception definitions. */ -#define EXC_DE 0 -#define EXC_DB 1 -#define EXC_BP 3 -#define EXC_OF 4 -#define EXC_BR 5 -#define EXC_UD 6 -#define EXC_NM 7 -#define EXC_DF 8 +#define EXC_DE 0 +#define EXC_DB 1 +#define EXC_BP 3 +#define EXC_OF 4 +#define EXC_BR 5 +#define EXC_UD 6 +#define EXC_NM 7 +#define EXC_DF 8 #define EXC_TS 10 #define EXC_NP 11 #define EXC_SS 12 @@ -859,14 +877,14 @@ typedef union { #define EXC_AC 17 #define EXC_XM 19 -#define EXC_HAS_EC \ - ((1u << EXC_DF) | (1u << EXC_TS) | (1u << EXC_NP) | \ - (1u << EXC_SS) | (1u << EXC_GP) | (1u << EXC_PF) | (1u << EXC_AC)) +#define EXC_HAS_EC \ + ((1u << EXC_DF) | (1u << EXC_TS) | (1u << EXC_NP) | (1u << EXC_SS) | \ + (1u << EXC_GP) | (1u << EXC_PF) | (1u << EXC_AC)) /* Segment selector error code bits. */ #define ECODE_EXT (1 << 0) #define ECODE_IDT (1 << 1) -#define ECODE_TI (1 << 2) +#define ECODE_TI (1 << 2) /* * Instruction emulation: @@ -876,12 +894,12 @@ typedef union { */ #if defined(__x86_64__) -#define _LO32 "k" /* force 32-bit operand */ -#define _STK "%%rsp" /* stack pointer */ +#define _LO32 "k" /* force 32-bit operand */ +#define _STK "%%rsp" /* stack pointer */ #define _BYTES_PER_LONG "8" #elif defined(__i386__) -#define _LO32 "" /* force 32-bit operand */ -#define _STK "%%esp" /* stack pointer */ +#define _LO32 "" /* force 32-bit operand */ +#define _STK "%%esp" /* stack pointer */ #define _BYTES_PER_LONG "4" #endif @@ -889,264 +907,263 @@ typedef union { * These EFLAGS bits are restored from saved value during emulation, and * any changes are written back to the saved value after emulation. */ -#define EFLAGS_MASK (X86_EFLAGS_OF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \ - X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF) +#define EFLAGS_MASK \ + (X86_EFLAGS_OF | X86_EFLAGS_SF | X86_EFLAGS_ZF | X86_EFLAGS_AF | \ + X86_EFLAGS_PF | X86_EFLAGS_CF) /* * These EFLAGS bits are modifiable (by POPF and IRET), possibly subject * to further CPL and IOPL constraints. */ -#define EFLAGS_MODIFIABLE (X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_RF | \ - X86_EFLAGS_NT | X86_EFLAGS_IOPL | X86_EFLAGS_DF | \ - X86_EFLAGS_IF | X86_EFLAGS_TF | EFLAGS_MASK) +#define EFLAGS_MODIFIABLE \ + (X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_RF | X86_EFLAGS_NT | \ + X86_EFLAGS_IOPL | X86_EFLAGS_DF | X86_EFLAGS_IF | X86_EFLAGS_TF | \ + EFLAGS_MASK) /* Before executing instruction: restore necessary bits in EFLAGS. */ -#define _PRE_EFLAGS(_sav, _msk, _tmp) \ -/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \ -"movl %"_LO32 _sav",%"_LO32 _tmp"; " \ -"push %"_tmp"; " \ -"push %"_tmp"; " \ -"movl %"_msk",%"_LO32 _tmp"; " \ -"andl %"_LO32 _tmp",("_STK"); " \ -"pushf; " \ -"notl %"_LO32 _tmp"; " \ -"andl %"_LO32 _tmp",("_STK"); " \ -"andl %"_LO32 _tmp",2*"_BYTES_PER_LONG"("_STK"); " \ -"pop %"_tmp"; " \ -"orl %"_LO32 _tmp",("_STK"); " \ -"popf; " \ -"pop %"_tmp"; " \ -"movl %"_LO32 _tmp",%"_LO32 _sav"; " +#define _PRE_EFLAGS(_sav, _msk, _tmp) \ + /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \ + "movl %" _LO32 _sav ",%" _LO32 _tmp "; " \ + "push %" _tmp "; " \ + "push %" _tmp "; " \ + "movl %" _msk ",%" _LO32 _tmp "; " \ + "andl %" _LO32 _tmp ",(" _STK "); " \ + "pushf; " \ + "notl %" _LO32 _tmp "; " \ + "andl %" _LO32 _tmp ",(" _STK "); " \ + "andl %" _LO32 _tmp ",2*" _BYTES_PER_LONG "(" _STK "); " \ + "pop %" _tmp "; " \ + "orl %" _LO32 _tmp ",(" _STK "); " \ + "popf; " \ + "pop %" _tmp "; " \ + "movl %" _LO32 _tmp ",%" _LO32 _sav "; " /* After executing instruction: write-back necessary bits in EFLAGS. */ -#define _POST_EFLAGS(_sav, _msk, _tmp) \ -/* _sav |= EFLAGS & _msk; */ \ -"pushf; " \ -"pop %"_tmp"; " \ -"andl %"_msk",%"_LO32 _tmp"; " \ -"orl %"_LO32 _tmp",%"_LO32 _sav"; " +#define _POST_EFLAGS(_sav, _msk, _tmp) \ + /* _sav |= EFLAGS & _msk; */ \ + "pushf; " \ + "pop %" _tmp "; " \ + "andl %" _msk ",%" _LO32 _tmp "; " \ + "orl %" _LO32 _tmp ",%" _LO32 _sav "; " /* Raw emulation: instruction has two explicit operands. */ -#define __emulate_2op_nobyte(_op, src, dst, sz, eflags, wsx,wsy,wdx,wdy, \ - lsx,lsy,ldx,ldy, qsx,qsy,qdx,qdy, extra...) \ -do{ unsigned long _tmp; \ - switch ( sz ) \ - { \ - case 2: \ - asm volatile ( \ - _PRE_EFLAGS("0","4","2") \ - _op"w %"wsx"3,%"wdx"1; " \ - _POST_EFLAGS("0","4","2") \ - : "+g" (eflags), "+" wdy (*(dst)), "=&r" (_tmp) \ - : wsy (src), "i" (EFLAGS_MASK), ## extra ); \ - break; \ - case 4: \ - asm volatile ( \ - _PRE_EFLAGS("0","4","2") \ - _op"l %"lsx"3,%"ldx"1; " \ - _POST_EFLAGS("0","4","2") \ - : "+g" (eflags), "+" ldy (*(dst)), "=&r" (_tmp) \ - : lsy (src), "i" (EFLAGS_MASK), ## extra ); \ - break; \ - case 8: \ - __emulate_2op_8byte(_op, src, dst, eflags, qsx, qsy, qdx, qdy, \ - ## extra); \ - break; \ - } \ -} while (0) -#define __emulate_2op(_op, src, dst, sz, eflags, _bx, by, wx, wy, \ - lx, ly, qx, qy, extra...) \ -do{ unsigned long _tmp; \ - switch ( sz ) \ - { \ - case 1: \ - asm volatile ( \ - _PRE_EFLAGS("0","4","2") \ - _op"b %"_bx"3,%1; " \ - _POST_EFLAGS("0","4","2") \ - : "+g" (eflags), "+m" (*(dst)), "=&r" (_tmp) \ - : by (src), "i" (EFLAGS_MASK), ##extra ); \ - break; \ - default: \ - __emulate_2op_nobyte(_op, src, dst, sz, eflags, wx, wy, "", "m", \ - lx, ly, "", "m", qx, qy, "", "m", ##extra); \ - break; \ - } \ -} while (0) +#define __emulate_2op_nobyte(_op, src, dst, sz, eflags, wsx, wsy, wdx, wdy, \ + lsx, lsy, ldx, ldy, qsx, qsy, qdx, qdy, extra...) \ + do { \ + unsigned long _tmp; \ + switch (sz) \ + { \ + case 2: \ + asm volatile(_PRE_EFLAGS("0", "4", "2") _op \ + "w %" wsx "3,%" wdx "1; " _POST_EFLAGS("0", "4", "2") \ + : "+g"(eflags), "+" wdy(*(dst)), "=&r"(_tmp) \ + : wsy(src), "i"(EFLAGS_MASK), ##extra); \ + break; \ + case 4: \ + asm volatile(_PRE_EFLAGS("0", "4", "2") _op \ + "l %" lsx "3,%" ldx "1; " _POST_EFLAGS("0", "4", "2") \ + : "+g"(eflags), "+" ldy(*(dst)), "=&r"(_tmp) \ + : lsy(src), "i"(EFLAGS_MASK), ##extra); \ + break; \ + case 8: \ + __emulate_2op_8byte(_op, src, dst, eflags, qsx, qsy, qdx, qdy, \ + ##extra); \ + break; \ + } \ + } while ( 0 ) +#define __emulate_2op(_op, src, dst, sz, eflags, _bx, by, wx, wy, lx, ly, qx, \ + qy, extra...) \ + do { \ + unsigned long _tmp; \ + switch (sz) \ + { \ + case 1: \ + asm volatile(_PRE_EFLAGS("0", "4", "2") _op \ + "b %" _bx "3,%1; " _POST_EFLAGS("0", "4", "2") \ + : "+g"(eflags), "+m"(*(dst)), "=&r"(_tmp) \ + : by(src), "i"(EFLAGS_MASK), ##extra); \ + break; \ + default: \ + __emulate_2op_nobyte(_op, src, dst, sz, eflags, wx, wy, "", "m", \ + lx, ly, "", "m", qx, qy, "", "m", ##extra); \ + break; \ + } \ + } while ( 0 ) /* Source operand is byte-sized and may be restricted to just %cl. */ -#define _emulate_2op_SrcB(op, src, dst, sz, eflags) \ - __emulate_2op(op, src, dst, sz, eflags, \ - "b", "c", "b", "c", "b", "c", "b", "c") -#define emulate_2op_SrcB(op, src, dst, eflags) \ +#define _emulate_2op_SrcB(op, src, dst, sz, eflags) \ + __emulate_2op(op, src, dst, sz, eflags, "b", "c", "b", "c", "b", "c", "b", \ + "c") +#define emulate_2op_SrcB(op, src, dst, eflags) \ _emulate_2op_SrcB(op, (src).val, &(dst).val, (dst).bytes, eflags) /* Source operand is byte, word, long or quad sized. */ -#define _emulate_2op_SrcV(op, src, dst, sz, eflags, extra...) \ - __emulate_2op(op, src, dst, sz, eflags, \ - "b", "q", "w", "r", _LO32, "r", "", "r", ##extra) -#define emulate_2op_SrcV(_op, _src, _dst, _eflags) \ +#define _emulate_2op_SrcV(op, src, dst, sz, eflags, extra...) \ + __emulate_2op(op, src, dst, sz, eflags, "b", "q", "w", "r", _LO32, "r", \ + "", "r", ##extra) +#define emulate_2op_SrcV(_op, _src, _dst, _eflags) \ _emulate_2op_SrcV(_op, (_src).val, &(_dst).val, (_dst).bytes, _eflags) /* Source operand is word, long or quad sized. */ -#define _emulate_2op_SrcV_nobyte(op, src, dst, sz, eflags, extra...) \ - __emulate_2op_nobyte(op, src, dst, sz, eflags, "w", "r", "", "m", \ - _LO32, "r", "", "m", "", "r", "", "m", ##extra) -#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \ - _emulate_2op_SrcV_nobyte(_op, (_src).val, &(_dst).val, (_dst).bytes, \ +#define _emulate_2op_SrcV_nobyte(op, src, dst, sz, eflags, extra...) \ + __emulate_2op_nobyte(op, src, dst, sz, eflags, "w", "r", "", "m", _LO32, \ + "r", "", "m", "", "r", "", "m", ##extra) +#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \ + _emulate_2op_SrcV_nobyte(_op, (_src).val, &(_dst).val, (_dst).bytes, \ _eflags) /* Operands are word, long or quad sized and source may be in memory. */ -#define emulate_2op_SrcV_srcmem(_op, _src, _dst, _eflags) \ - __emulate_2op_nobyte(_op, (_src).val, &(_dst).val, (_dst).bytes, \ - _eflags, "", "m", "w", "r", \ - "", "m", _LO32, "r", "", "m", "", "r") +#define emulate_2op_SrcV_srcmem(_op, _src, _dst, _eflags) \ + __emulate_2op_nobyte(_op, (_src).val, &(_dst).val, (_dst).bytes, _eflags, \ + "", "m", "w", "r", "", "m", _LO32, "r", "", "m", "", \ + "r") /* Instruction has only one explicit operand (no source operand). */ -#define _emulate_1op(_op, dst, sz, eflags, extra...) \ -do{ unsigned long _tmp; \ - switch ( sz ) \ - { \ - case 1: \ - asm volatile ( \ - _PRE_EFLAGS("0","3","2") \ - _op"b %1; " \ - _POST_EFLAGS("0","3","2") \ - : "+g" (eflags), "+m" (*(dst)), "=&r" (_tmp) \ - : "i" (EFLAGS_MASK), ##extra ); \ - break; \ - case 2: \ - asm volatile ( \ - _PRE_EFLAGS("0","3","2") \ - _op"w %1; " \ - _POST_EFLAGS("0","3","2") \ - : "+g" (eflags), "+m" (*(dst)), "=&r" (_tmp) \ - : "i" (EFLAGS_MASK), ##extra ); \ - break; \ - case 4: \ - asm volatile ( \ - _PRE_EFLAGS("0","3","2") \ - _op"l %1; " \ - _POST_EFLAGS("0","3","2") \ - : "+g" (eflags), "+m" (*(dst)), "=&r" (_tmp) \ - : "i" (EFLAGS_MASK), ##extra ); \ - break; \ - case 8: \ - __emulate_1op_8byte(_op, dst, eflags, ##extra); \ - break; \ - } \ -} while (0) -#define emulate_1op(op, dst, eflags) \ +#define _emulate_1op(_op, dst, sz, eflags, extra...) \ + do { \ + unsigned long _tmp; \ + switch (sz) \ + { \ + case 1: \ + asm volatile(_PRE_EFLAGS("0", "3", "2") _op \ + "b %1; " _POST_EFLAGS("0", "3", "2") \ + : "+g"(eflags), "+m"(*(dst)), "=&r"(_tmp) \ + : "i"(EFLAGS_MASK), ##extra); \ + break; \ + case 2: \ + asm volatile(_PRE_EFLAGS("0", "3", "2") _op \ + "w %1; " _POST_EFLAGS("0", "3", "2") \ + : "+g"(eflags), "+m"(*(dst)), "=&r"(_tmp) \ + : "i"(EFLAGS_MASK), ##extra); \ + break; \ + case 4: \ + asm volatile(_PRE_EFLAGS("0", "3", "2") _op \ + "l %1; " _POST_EFLAGS("0", "3", "2") \ + : "+g"(eflags), "+m"(*(dst)), "=&r"(_tmp) \ + : "i"(EFLAGS_MASK), ##extra); \ + break; \ + case 8: \ + __emulate_1op_8byte(_op, dst, eflags, ##extra); \ + break; \ + } \ + } while ( 0 ) +#define emulate_1op(op, dst, eflags) \ _emulate_1op(op, &(dst).val, (dst).bytes, eflags) /* Emulate an instruction with quadword operands (x86/64 only). */ #if defined(__x86_64__) -#define __emulate_2op_8byte(_op, src, dst, eflags, \ - qsx, qsy, qdx, qdy, extra...) \ -do{ asm volatile ( \ - _PRE_EFLAGS("0","4","2") \ - _op"q %"qsx"3,%"qdx"1; " \ - _POST_EFLAGS("0","4","2") \ - : "+g" (eflags), "+" qdy (*(dst)), "=&r" (_tmp) \ - : qsy (src), "i" (EFLAGS_MASK), ##extra ); \ -} while (0) -#define __emulate_1op_8byte(_op, dst, eflags, extra...) \ -do{ asm volatile ( \ - _PRE_EFLAGS("0","3","2") \ - _op"q %1; " \ - _POST_EFLAGS("0","3","2") \ - : "+g" (eflags), "+m" (*(dst)), "=&r" (_tmp) \ - : "i" (EFLAGS_MASK), ##extra ); \ -} while (0) +#define __emulate_2op_8byte(_op, src, dst, eflags, qsx, qsy, qdx, qdy, \ + extra...) \ + do { \ + asm volatile(_PRE_EFLAGS("0", "4", "2") _op \ + "q %" qsx "3,%" qdx "1; " _POST_EFLAGS("0", "4", "2") \ + : "+g"(eflags), "+" qdy(*(dst)), "=&r"(_tmp) \ + : qsy(src), "i"(EFLAGS_MASK), ##extra); \ + } while ( 0 ) +#define __emulate_1op_8byte(_op, dst, eflags, extra...) \ + do { \ + asm volatile(_PRE_EFLAGS("0", "3", "2") _op \ + "q %1; " _POST_EFLAGS("0", "3", "2") \ + : "+g"(eflags), "+m"(*(dst)), "=&r"(_tmp) \ + : "i"(EFLAGS_MASK), ##extra); \ + } while ( 0 ) #elif defined(__i386__) #define __emulate_2op_8byte(op, src, dst, eflags, qsx, qsy, qdx, qdy, extra...) #define __emulate_1op_8byte(op, dst, eflags, extra...) #endif /* __i386__ */ #define fail_if(p) \ -do { \ - rc = (p) ? X86EMUL_UNHANDLEABLE : X86EMUL_OKAY; \ - if ( rc ) goto done; \ -} while (0) - -#define EXPECT(p) \ -do { \ - if ( unlikely(!(p)) ) \ - { \ - ASSERT_UNREACHABLE(); \ - goto unhandleable; \ - } \ -} while (0) + do { \ + rc = (p) ? X86EMUL_UNHANDLEABLE : X86EMUL_OKAY; \ + if ( rc ) \ + goto done; \ + } while ( 0 ) + +#define EXPECT(p) \ + do { \ + if ( unlikely(!(p)) ) \ + { \ + ASSERT_UNREACHABLE(); \ + goto unhandleable; \ + } \ + } while ( 0 ) static inline int mkec(uint8_t e, int32_t ec, ...) { return (e < 32 && ((1u << e) & EXC_HAS_EC)) ? ec : X86_EVENT_NO_EC; } -#define generate_exception_if(p, e, ec...) \ -({ if ( (p) ) { \ - x86_emul_hw_exception(e, mkec(e, ##ec, 0), ctxt); \ - rc = X86EMUL_EXCEPTION; \ - goto done; \ - } \ -}) +#define generate_exception_if(p, e, ec...) \ + ({ \ + if ( (p) ) \ + { \ + x86_emul_hw_exception(e, mkec(e, ##ec, 0), ctxt); \ + rc = X86EMUL_EXCEPTION; \ + goto done; \ + } \ + }) #define generate_exception(e, ec...) generate_exception_if(true, e, ##ec) #ifdef __XEN__ -# define invoke_stub(pre, post, constraints...) do { \ - stub_exn.info = (union stub_exception_token) { .raw = ~0 }; \ - stub_exn.line = __LINE__; /* Utility outweighs livepatching cost */ \ - asm volatile ( pre "\n\tINDIRECT_CALL %[stub]\n\t" post "\n" \ - ".Lret%=:\n\t" \ - ".pushsection .fixup,\"ax\"\n" \ - ".Lfix%=:\n\t" \ - "pop %[exn]\n\t" \ - "jmp .Lret%=\n\t" \ - ".popsection\n\t" \ - _ASM_EXTABLE(.Lret%=, .Lfix%=) \ - : [exn] "+g" (stub_exn.info), constraints, \ - [stub] "r" (stub.func), \ - "m" (*(uint8_t(*)[MAX_INST_LEN + 1])stub.ptr) ); \ - if ( unlikely(~stub_exn.info.raw) ) \ - goto emulation_stub_failure; \ -} while (0) +#define invoke_stub(pre, post, constraints...) \ + do { \ + stub_exn.info = (union stub_exception_token){.raw = ~0}; \ + stub_exn.line = __LINE__; /* Utility outweighs livepatching cost */ \ + asm volatile(pre "\n\tINDIRECT_CALL %[stub]\n\t" post "\n" \ + ".Lret%=:\n\t" \ + ".pushsection .fixup,\"ax\"\n" \ + ".Lfix%=:\n\t" \ + "pop %[exn]\n\t" \ + "jmp .Lret%=\n\t" \ + ".popsection\n\t" _ASM_EXTABLE(.Lret %=, .Lfix %=) \ + : [exn] "+g"(stub_exn.info), constraints, \ + [stub] "r"(stub.func), \ + "m"(*(uint8_t(*)[MAX_INST_LEN + 1]) stub.ptr)); \ + if ( unlikely(~stub_exn.info.raw) ) \ + goto emulation_stub_failure; \ + } while ( 0 ) #else -# define invoke_stub(pre, post, constraints...) \ - asm volatile ( pre "\n\tcall *%[stub]\n\t" post \ - : constraints, [stub] "rm" (stub.func), \ - "m" (*(typeof(stub.buf) *)stub.addr) ) +#define invoke_stub(pre, post, constraints...) \ + asm volatile(pre "\n\tcall *%[stub]\n\t" post \ + : constraints, [stub] "rm"(stub.func), \ + "m"(*(typeof(stub.buf) *)stub.addr)) #endif -#define emulate_stub(dst, src...) do { \ - unsigned long tmp; \ - invoke_stub(_PRE_EFLAGS("[efl]", "[msk]", "[tmp]"), \ - _POST_EFLAGS("[efl]", "[msk]", "[tmp]"), \ - dst, [tmp] "=&r" (tmp), [efl] "+g" (_regs.eflags) \ - : [msk] "i" (EFLAGS_MASK), ## src); \ -} while (0) +#define emulate_stub(dst, src...) \ + do { \ + unsigned long tmp; \ + invoke_stub(_PRE_EFLAGS("[efl]", "[msk]", "[tmp]"), \ + _POST_EFLAGS("[efl]", "[msk]", "[tmp]"), \ + dst, [tmp] "=&r"(tmp), [efl] "+g"(_regs.eflags) \ + : [msk] "i"(EFLAGS_MASK), ##src); \ + } while ( 0 ) /* Fetch next part of the instruction being emulated. */ -#define insn_fetch_bytes(_size) \ -({ unsigned long _x = 0, _ip = state->ip; \ - state->ip += (_size); /* real hardware doesn't truncate */ \ - generate_exception_if((uint8_t)(state->ip - \ - ctxt->regs->r(ip)) > MAX_INST_LEN, \ - EXC_GP, 0); \ - rc = ops->insn_fetch(x86_seg_cs, _ip, &_x, (_size), ctxt); \ - if ( rc ) goto done; \ - _x; \ -}) +#define insn_fetch_bytes(_size) \ + ({ \ + unsigned long _x = 0, _ip = state->ip; \ + state->ip += (_size); /* real hardware doesn't truncate */ \ + generate_exception_if((uint8_t)(state->ip - ctxt->regs->r(ip)) > \ + MAX_INST_LEN, \ + EXC_GP, 0); \ + rc = ops->insn_fetch(x86_seg_cs, _ip, &_x, (_size), ctxt); \ + if ( rc ) \ + goto done; \ + _x; \ + }) #define insn_fetch_type(_type) ((_type)insn_fetch_bytes(sizeof(_type))) -#define truncate_word(ea, byte_width) \ -({ unsigned long __ea = (ea); \ - unsigned int _width = (byte_width); \ - ((_width == sizeof(unsigned long)) ? __ea : \ - (__ea & ((1UL << (_width << 3)) - 1))); \ -}) +#define truncate_word(ea, byte_width) \ + ({ \ + unsigned long __ea = (ea); \ + unsigned int _width = (byte_width); \ + ((_width == sizeof(unsigned long)) \ + ? __ea \ + : (__ea & ((1UL << (_width << 3)) - 1))); \ + }) #define truncate_ea(ea) truncate_word((ea), ad_bytes) #ifdef __x86_64__ -# define mode_64bit() (ctxt->addr_size == 64) +#define mode_64bit() (ctxt->addr_size == 64) #else -# define mode_64bit() false +#define mode_64bit() false #endif /* @@ -1155,75 +1172,81 @@ static inline int mkec(uint8_t e, int32_t ec, ...) */ static bool even_parity(uint8_t v) { - asm ( "test %1,%1" ASM_FLAG_OUT(, "; setp %0") - : ASM_FLAG_OUT("=@ccp", "=qm") (v) : "q" (v) ); + asm("test %1,%1" ASM_FLAG_OUT(, "; setp %0") + : ASM_FLAG_OUT("=@ccp", "=qm")(v) + : "q"(v)); return v; } /* Update address held in a register, based on addressing mode. */ -#define _register_address_increment(reg, inc, byte_width) \ -do { \ - int _inc = (inc); /* signed type ensures sign extension to long */ \ - unsigned int _width = (byte_width); \ - if ( _width == sizeof(unsigned long) ) \ - (reg) += _inc; \ - else if ( mode_64bit() ) \ - (reg) = ((reg) + _inc) & ((1UL << (_width << 3)) - 1); \ - else \ - (reg) = ((reg) & ~((1UL << (_width << 3)) - 1)) | \ - (((reg) + _inc) & ((1UL << (_width << 3)) - 1)); \ -} while (0) -#define register_address_adjust(reg, adj) \ - _register_address_increment(reg, \ - _regs.eflags & X86_EFLAGS_DF ? \ - -(adj) : (adj), \ - ad_bytes) - -#define sp_pre_dec(dec) ({ \ - _register_address_increment(_regs.r(sp), -(dec), ctxt->sp_size/8); \ - truncate_word(_regs.r(sp), ctxt->sp_size/8); \ -}) -#define sp_post_inc(inc) ({ \ - unsigned long sp = truncate_word(_regs.r(sp), ctxt->sp_size/8); \ - _register_address_increment(_regs.r(sp), (inc), ctxt->sp_size/8); \ - sp; \ -}) - -#define jmp_rel(rel) \ -do { \ - unsigned long ip = _regs.r(ip) + (int)(rel); \ - if ( op_bytes == 2 ) \ - ip = (uint16_t)ip; \ - else if ( !mode_64bit() ) \ - ip = (uint32_t)ip; \ - rc = ops->insn_fetch(x86_seg_cs, ip, NULL, 0, ctxt); \ - if ( rc ) goto done; \ - _regs.r(ip) = ip; \ - singlestep = _regs.eflags & X86_EFLAGS_TF; \ -} while (0) - -#define validate_far_branch(cs, ip) ({ \ - if ( sizeof(ip) <= 4 ) { \ - ASSERT(!ctxt->lma); \ - generate_exception_if((ip) > (cs)->limit, EXC_GP, 0); \ - } else \ - generate_exception_if(ctxt->lma && (cs)->l \ - ? !is_canonical_address(ip) \ - : (ip) > (cs)->limit, EXC_GP, 0); \ -}) - -#define commit_far_branch(cs, newip) ({ \ - validate_far_branch(cs, newip); \ - _regs.r(ip) = (newip); \ - singlestep = _regs.eflags & X86_EFLAGS_TF; \ - ops->write_segment(x86_seg_cs, cs, ctxt); \ -}) - -static int _get_fpu( - enum x86_emulate_fpu_type type, - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +#define _register_address_increment(reg, inc, byte_width) \ + do { \ + int _inc = (inc); /* signed type ensures sign extension to long */ \ + unsigned int _width = (byte_width); \ + if ( _width == sizeof(unsigned long) ) \ + (reg) += _inc; \ + else if ( mode_64bit() ) \ + (reg) = ((reg) + _inc) & ((1UL << (_width << 3)) - 1); \ + else \ + (reg) = ((reg) & ~((1UL << (_width << 3)) - 1)) | \ + (((reg) + _inc) & ((1UL << (_width << 3)) - 1)); \ + } while ( 0 ) +#define register_address_adjust(reg, adj) \ + _register_address_increment( \ + reg, _regs.eflags &X86_EFLAGS_DF ? -(adj) : (adj), ad_bytes) + +#define sp_pre_dec(dec) \ + ({ \ + _register_address_increment(_regs.r(sp), -(dec), ctxt->sp_size / 8); \ + truncate_word(_regs.r(sp), ctxt->sp_size / 8); \ + }) +#define sp_post_inc(inc) \ + ({ \ + unsigned long sp = truncate_word(_regs.r(sp), ctxt->sp_size / 8); \ + _register_address_increment(_regs.r(sp), (inc), ctxt->sp_size / 8); \ + sp; \ + }) + +#define jmp_rel(rel) \ + do { \ + unsigned long ip = _regs.r(ip) + (int)(rel); \ + if ( op_bytes == 2 ) \ + ip = (uint16_t)ip; \ + else if ( !mode_64bit() ) \ + ip = (uint32_t)ip; \ + rc = ops->insn_fetch(x86_seg_cs, ip, NULL, 0, ctxt); \ + if ( rc ) \ + goto done; \ + _regs.r(ip) = ip; \ + singlestep = _regs.eflags & X86_EFLAGS_TF; \ + } while ( 0 ) + +#define validate_far_branch(cs, ip) \ + ({ \ + if ( sizeof(ip) <= 4 ) \ + { \ + ASSERT(!ctxt->lma); \ + generate_exception_if((ip) > (cs)->limit, EXC_GP, 0); \ + } \ + else \ + generate_exception_if(ctxt->lma && (cs)->l \ + ? !is_canonical_address(ip) \ + : (ip) > (cs)->limit, \ + EXC_GP, 0); \ + }) + +#define commit_far_branch(cs, newip) \ + ({ \ + validate_far_branch(cs, newip); \ + _regs.r(ip) = (newip); \ + singlestep = _regs.eflags & X86_EFLAGS_TF; \ + ops->write_segment(x86_seg_cs, cs, ctxt); \ + }) + +static int _get_fpu(enum x86_emulate_fpu_type type, + struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { uint64_t xcr0; int rc; @@ -1238,7 +1261,7 @@ static int _get_fpu( xcr0 = 0; } - switch ( type ) + switch (type) { case X86EMUL_FPU_zmm: if ( !(xcr0 & X86_XCR0_ZMM) || !(xcr0 & X86_XCR0_HI_ZMM) || @@ -1275,9 +1298,10 @@ static int _get_fpu( rc = ops->read_cr(4, &cr4, ctxt); if ( rc != X86EMUL_OKAY ) return rc; - generate_exception_if(!(cr4 & ((type == X86EMUL_FPU_xmm) - ? X86_CR4_OSFXSR : X86_CR4_OSXSAVE)), - EXC_UD); + generate_exception_if( + !(cr4 & ((type == X86EMUL_FPU_xmm) ? X86_CR4_OSFXSR + : X86_CR4_OSXSAVE)), + EXC_UD); } rc = ops->read_cr(0, &cr0, ctxt); @@ -1294,27 +1318,26 @@ static int _get_fpu( generate_exception_if(type == X86EMUL_FPU_mmx, EXC_UD); generate_exception_if(type == X86EMUL_FPU_xmm, EXC_UD); } - generate_exception_if((cr0 & X86_CR0_TS) && - (type != X86EMUL_FPU_wait || (cr0 & X86_CR0_MP)), + generate_exception_if((cr0 & X86_CR0_TS) && (type != X86EMUL_FPU_wait || + (cr0 & X86_CR0_MP)), EXC_NM); } - done: +done: return rc; } -#define get_fpu(type) \ -do { \ - rc = _get_fpu(fpu_type = (type), ctxt, ops); \ - if ( rc ) goto done; \ -} while (0) - -static void put_fpu( - enum x86_emulate_fpu_type type, - bool failed_late, - const struct x86_emulate_state *state, - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +#define get_fpu(type) \ + do { \ + rc = _get_fpu(fpu_type = (type), ctxt, ops); \ + if ( rc ) \ + goto done; \ + } while ( 0 ) + +static void put_fpu(enum x86_emulate_fpu_type type, bool failed_late, + const struct x86_emulate_state *state, + struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { if ( unlikely(failed_late) && type == X86EMUL_FPU_fpu ) ops->put_fpu(ctxt, X86EMUL_FPU_fpu, NULL); @@ -1334,19 +1357,33 @@ static void put_fpu( { aux.dp = state->ea.mem.off; if ( ops->read_segment && - ops->read_segment(state->ea.mem.seg, &sreg, - ctxt) == X86EMUL_OKAY ) + ops->read_segment(state->ea.mem.seg, &sreg, ctxt) == + X86EMUL_OKAY ) aux.ds = sreg.sel; else - switch ( state->ea.mem.seg ) + switch (state->ea.mem.seg) { - case x86_seg_cs: aux.ds = ctxt->regs->cs; break; - case x86_seg_ds: aux.ds = ctxt->regs->ds; break; - case x86_seg_es: aux.ds = ctxt->regs->es; break; - case x86_seg_fs: aux.ds = ctxt->regs->fs; break; - case x86_seg_gs: aux.ds = ctxt->regs->gs; break; - case x86_seg_ss: aux.ds = ctxt->regs->ss; break; - default: ASSERT_UNREACHABLE(); break; + case x86_seg_cs: + aux.ds = ctxt->regs->cs; + break; + case x86_seg_ds: + aux.ds = ctxt->regs->ds; + break; + case x86_seg_es: + aux.ds = ctxt->regs->es; + break; + case x86_seg_fs: + aux.ds = ctxt->regs->fs; + break; + case x86_seg_gs: + aux.ds = ctxt->regs->gs; + break; + case x86_seg_ss: + aux.ds = ctxt->regs->ss; + break; + default: + ASSERT_UNREACHABLE(); + break; } aux.dval = true; } @@ -1360,62 +1397,57 @@ static inline bool fpu_check_write(void) { uint16_t fsw; - asm ( "fnstsw %0" : "=am" (fsw) ); + asm("fnstsw %0" : "=am"(fsw)); return !(fsw & FSW_ES); } -#define emulate_fpu_insn_memdst(opc, ext, arg) \ -do { \ - /* ModRM: mod=0, reg=ext, rm=0, i.e. a (%rax) operand */ \ - insn_bytes = 2; \ - memcpy(get_stub(stub), \ - ((uint8_t[]){ opc, ((ext) & 7) << 3, 0xc3 }), 3); \ - invoke_stub("", "", "+m" (arg) : "a" (&(arg))); \ - put_stub(stub); \ -} while (0) - -#define emulate_fpu_insn_memsrc(opc, ext, arg) \ -do { \ - /* ModRM: mod=0, reg=ext, rm=0, i.e. a (%rax) operand */ \ - memcpy(get_stub(stub), \ - ((uint8_t[]){ opc, ((ext) & 7) << 3, 0xc3 }), 3); \ - invoke_stub("", "", "=m" (dummy) : "m" (arg), "a" (&(arg))); \ - put_stub(stub); \ -} while (0) - -#define emulate_fpu_insn_stub(bytes...) \ -do { \ - unsigned int nr_ = sizeof((uint8_t[]){ bytes }); \ - memcpy(get_stub(stub), ((uint8_t[]){ bytes, 0xc3 }), nr_ + 1); \ - invoke_stub("", "", "=m" (dummy) : "i" (0)); \ - put_stub(stub); \ -} while (0) - -#define emulate_fpu_insn_stub_eflags(bytes...) \ -do { \ - unsigned int nr_ = sizeof((uint8_t[]){ bytes }); \ - unsigned long tmp_; \ - memcpy(get_stub(stub), ((uint8_t[]){ bytes, 0xc3 }), nr_ + 1); \ - invoke_stub(_PRE_EFLAGS("[eflags]", "[mask]", "[tmp]"), \ - _POST_EFLAGS("[eflags]", "[mask]", "[tmp]"), \ - [eflags] "+g" (_regs.eflags), [tmp] "=&r" (tmp_) \ - : [mask] "i" (X86_EFLAGS_ZF|X86_EFLAGS_PF|X86_EFLAGS_CF)); \ - put_stub(stub); \ -} while (0) - -static inline unsigned long get_loop_count( - const struct cpu_user_regs *regs, - int ad_bytes) +#define emulate_fpu_insn_memdst(opc, ext, arg) \ + do { \ + /* ModRM: mod=0, reg=ext, rm=0, i.e. a (%rax) operand */ \ + insn_bytes = 2; \ + memcpy(get_stub(stub), ((uint8_t[]){opc, ((ext)&7) << 3, 0xc3}), 3); \ + invoke_stub("", "", "+m"(arg) : "a"(&(arg))); \ + put_stub(stub); \ + } while ( 0 ) + +#define emulate_fpu_insn_memsrc(opc, ext, arg) \ + do { \ + /* ModRM: mod=0, reg=ext, rm=0, i.e. a (%rax) operand */ \ + memcpy(get_stub(stub), ((uint8_t[]){opc, ((ext)&7) << 3, 0xc3}), 3); \ + invoke_stub("", "", "=m"(dummy) : "m"(arg), "a"(&(arg))); \ + put_stub(stub); \ + } while ( 0 ) + +#define emulate_fpu_insn_stub(bytes...) \ + do { \ + unsigned int nr_ = sizeof((uint8_t[]){bytes}); \ + memcpy(get_stub(stub), ((uint8_t[]){bytes, 0xc3}), nr_ + 1); \ + invoke_stub("", "", "=m"(dummy) : "i"(0)); \ + put_stub(stub); \ + } while ( 0 ) + +#define emulate_fpu_insn_stub_eflags(bytes...) \ + do { \ + unsigned int nr_ = sizeof((uint8_t[]){bytes}); \ + unsigned long tmp_; \ + memcpy(get_stub(stub), ((uint8_t[]){bytes, 0xc3}), nr_ + 1); \ + invoke_stub( \ + _PRE_EFLAGS("[eflags]", "[mask]", "[tmp]"), \ + _POST_EFLAGS("[eflags]", "[mask]", "[tmp]"), \ + [eflags] "+g"(_regs.eflags), [tmp] "=&r"(tmp_) \ + : [mask] "i"(X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF)); \ + put_stub(stub); \ + } while ( 0 ) + +static inline unsigned long get_loop_count(const struct cpu_user_regs *regs, + int ad_bytes) { - return (ad_bytes > 4) ? regs->r(cx) - : (ad_bytes < 4) ? regs->cx : regs->ecx; + return (ad_bytes > 4) ? regs->r(cx) : (ad_bytes < 4) ? regs->cx : regs->ecx; } -static inline void put_loop_count( - struct cpu_user_regs *regs, - int ad_bytes, - unsigned long count) +static inline void put_loop_count(struct cpu_user_regs *regs, int ad_bytes, + unsigned long count) { if ( ad_bytes == 2 ) regs->cx = count; @@ -1423,36 +1455,37 @@ static inline void put_loop_count( regs->r(cx) = ad_bytes == 4 ? (uint32_t)count : count; } -#define get_rep_prefix(using_si, using_di) ({ \ - unsigned long max_reps = 1; \ - if ( rep_prefix() ) \ - max_reps = get_loop_count(&_regs, ad_bytes); \ - if ( max_reps == 0 ) \ - { \ - /* \ - * Skip the instruction if no repetitions are required, but \ - * zero extend involved registers first when using 32-bit \ - * addressing in 64-bit mode. \ - */ \ - if ( mode_64bit() && ad_bytes == 4 ) \ +#define get_rep_prefix(using_si, using_di) \ + ({ \ + unsigned long max_reps = 1; \ + if ( rep_prefix() ) \ + max_reps = get_loop_count(&_regs, ad_bytes); \ + if ( max_reps == 0 ) \ { \ - _regs.r(cx) = 0; \ - if ( using_si ) _regs.r(si) = _regs.esi; \ - if ( using_di ) _regs.r(di) = _regs.edi; \ + /* \ + * Skip the instruction if no repetitions are required, but \ + * zero extend involved registers first when using 32-bit \ + * addressing in 64-bit mode. \ + */ \ + if ( mode_64bit() && ad_bytes == 4 ) \ + { \ + _regs.r(cx) = 0; \ + if ( using_si ) \ + _regs.r(si) = _regs.esi; \ + if ( using_di ) \ + _regs.r(di) = _regs.edi; \ + } \ + goto complete_insn; \ } \ - goto complete_insn; \ - } \ - if ( max_reps > 1 && (_regs.eflags & X86_EFLAGS_TF) && \ - !is_branch_step(ctxt, ops) ) \ - max_reps = 1; \ - max_reps; \ -}) - -static void __put_rep_prefix( - struct cpu_user_regs *int_regs, - struct cpu_user_regs *ext_regs, - int ad_bytes, - unsigned long reps_completed) + if ( max_reps > 1 && (_regs.eflags & X86_EFLAGS_TF) && \ + !is_branch_step(ctxt, ops) ) \ + max_reps = 1; \ + max_reps; \ + }) + +static void __put_rep_prefix(struct cpu_user_regs *int_regs, + struct cpu_user_regs *ext_regs, int ad_bytes, + unsigned long reps_completed) { unsigned long ecx = get_loop_count(int_regs, ad_bytes); @@ -1464,39 +1497,38 @@ static void __put_rep_prefix( put_loop_count(int_regs, ad_bytes, ecx); } -#define put_rep_prefix(reps_completed) ({ \ - if ( rep_prefix() ) \ - { \ - __put_rep_prefix(&_regs, ctxt->regs, ad_bytes, reps_completed); \ - if ( unlikely(rc == X86EMUL_EXCEPTION) ) \ - goto complete_insn; \ - } \ -}) +#define put_rep_prefix(reps_completed) \ + ({ \ + if ( rep_prefix() ) \ + { \ + __put_rep_prefix(&_regs, ctxt->regs, ad_bytes, reps_completed); \ + if ( unlikely(rc == X86EMUL_EXCEPTION) ) \ + goto complete_insn; \ + } \ + }) /* Clip maximum repetitions so that the index register at most just wraps. */ -#define truncate_ea_and_reps(ea, reps, bytes_per_rep) ({ \ - unsigned long todo__, ea__ = truncate_ea(ea); \ - if ( !(_regs.eflags & X86_EFLAGS_DF) ) \ - todo__ = truncate_ea(-ea__) / (bytes_per_rep); \ - else if ( truncate_ea(ea__ + (bytes_per_rep) - 1) < ea__ ) \ - todo__ = 1; \ - else \ - todo__ = ea__ / (bytes_per_rep) + 1; \ - if ( !todo__ ) \ - (reps) = 1; \ - else if ( todo__ < (reps) ) \ - (reps) = todo__; \ - ea__; \ -}) +#define truncate_ea_and_reps(ea, reps, bytes_per_rep) \ + ({ \ + unsigned long todo__, ea__ = truncate_ea(ea); \ + if ( !(_regs.eflags & X86_EFLAGS_DF) ) \ + todo__ = truncate_ea(-ea__) / (bytes_per_rep); \ + else if ( truncate_ea(ea__ + (bytes_per_rep)-1) < ea__ ) \ + todo__ = 1; \ + else \ + todo__ = ea__ / (bytes_per_rep) + 1; \ + if ( !todo__ ) \ + (reps) = 1; \ + else if ( todo__ < (reps) ) \ + (reps) = todo__; \ + ea__; \ + }) /* Compatibility function: read guest memory, zero-extend result to a ulong. */ -static int read_ulong( - enum x86_segment seg, - unsigned long offset, - unsigned long *val, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +static int read_ulong(enum x86_segment seg, unsigned long offset, + unsigned long *val, unsigned int bytes, + struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { *val = 0; return ops->read(seg, offset, val, bytes, ctxt); @@ -1511,8 +1543,8 @@ static bool mul_dbl(unsigned long m[2]) { bool rc; - asm ( "mul %1" ASM_FLAG_OUT(, "; seto %2") - : "+a" (m[0]), "+d" (m[1]), ASM_FLAG_OUT("=@cco", "=qm") (rc) ); + asm("mul %1" ASM_FLAG_OUT(, "; seto %2") + : "+a"(m[0]), "+d"(m[1]), ASM_FLAG_OUT("=@cco", "=qm")(rc)); return rc; } @@ -1526,8 +1558,8 @@ static bool imul_dbl(unsigned long m[2]) { bool rc; - asm ( "imul %1" ASM_FLAG_OUT(, "; seto %2") - : "+a" (m[0]), "+d" (m[1]), ASM_FLAG_OUT("=@cco", "=qm") (rc) ); + asm("imul %1" ASM_FLAG_OUT(, "; seto %2") + : "+a"(m[0]), "+d"(m[1]), ASM_FLAG_OUT("=@cco", "=qm")(rc)); return rc; } @@ -1542,7 +1574,7 @@ static bool div_dbl(unsigned long u[2], unsigned long v) { if ( (v == 0) || (u[1] >= v) ) return 1; - asm ( "div"__OS" %2" : "+a" (u[0]), "+d" (u[1]) : "rm" (v) ); + asm("div" __OS " %2" : "+a"(u[0]), "+d"(u[1]) : "rm"(v)); return 0; } @@ -1588,13 +1620,11 @@ static bool idiv_dbl(unsigned long u[2], long v) return 0; } -static bool -test_cc( - unsigned int condition, unsigned int flags) +static bool test_cc(unsigned int condition, unsigned int flags) { int rc = 0; - switch ( (condition & 15) >> 1 ) + switch ((condition & 15) >> 1) { case 0: /* o */ rc |= (flags & X86_EFLAGS_OF); @@ -1626,10 +1656,8 @@ test_cc( return (!!rc ^ (condition & 1)); } -static int -get_cpl( - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +static int get_cpl(struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { struct segment_register reg; @@ -1643,10 +1671,8 @@ get_cpl( return reg.dpl; } -static int -_mode_iopl( - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +static int _mode_iopl(struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { int cpl = get_cpl(ctxt, ops); if ( cpl == -1 ) @@ -1654,31 +1680,33 @@ _mode_iopl( return cpl <= MASK_EXTR(ctxt->regs->eflags, X86_EFLAGS_IOPL); } -#define mode_ring0() ({ \ - int _cpl = get_cpl(ctxt, ops); \ - fail_if(_cpl < 0); \ - (_cpl == 0); \ -}) -#define mode_iopl() ({ \ - int _iopl = _mode_iopl(ctxt, ops); \ - fail_if(_iopl < 0); \ - _iopl; \ -}) -#define mode_vif() ({ \ - cr4 = 0; \ - if ( ops->read_cr && get_cpl(ctxt, ops) == 3 ) \ - { \ - rc = ops->read_cr(4, &cr4, ctxt); \ - if ( rc != X86EMUL_OKAY ) goto done; \ - } \ - !!(cr4 & (_regs.eflags & X86_EFLAGS_VM ? X86_CR4_VME : X86_CR4_PVI)); \ -}) - -static int ioport_access_check( - unsigned int first_port, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +#define mode_ring0() \ + ({ \ + int _cpl = get_cpl(ctxt, ops); \ + fail_if(_cpl < 0); \ + (_cpl == 0); \ + }) +#define mode_iopl() \ + ({ \ + int _iopl = _mode_iopl(ctxt, ops); \ + fail_if(_iopl < 0); \ + _iopl; \ + }) +#define mode_vif() \ + ({ \ + cr4 = 0; \ + if ( ops->read_cr && get_cpl(ctxt, ops) == 3 ) \ + { \ + rc = ops->read_cr(4, &cr4, ctxt); \ + if ( rc != X86EMUL_OKAY ) \ + goto done; \ + } \ + !!(cr4 & (_regs.eflags & X86_EFLAGS_VM ? X86_CR4_VME : X86_CR4_PVI)); \ + }) + +static int ioport_access_check(unsigned int first_port, unsigned int bytes, + struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { unsigned long iobmp; struct segment_register tr; @@ -1698,7 +1726,7 @@ static int ioport_access_check( /* Ensure the TSS has an io-bitmap-offset field. */ generate_exception_if(tr.type != 0xb, EXC_GP, 0); - switch ( rc = read_ulong(x86_seg_tr, 0x66, &iobmp, 2, ctxt, ops) ) + switch (rc = read_ulong(x86_seg_tr, 0x66, &iobmp, 2, ctxt, ops)) { case X86EMUL_OKAY: break; @@ -1712,8 +1740,8 @@ static int ioport_access_check( } /* Read two bytes including byte containing first port. */ - switch ( rc = read_ulong(x86_seg_tr, iobmp + first_port / 8, - &iobmp, 2, ctxt, ops) ) + switch (rc = read_ulong(x86_seg_tr, iobmp + first_port / 8, &iobmp, 2, ctxt, + ops)) { case X86EMUL_OKAY: break; @@ -1729,14 +1757,12 @@ static int ioport_access_check( generate_exception_if(iobmp & (((1 << bytes) - 1) << (first_port & 7)), EXC_GP, 0); - done: +done: return rc; } -static bool -in_realmode( - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +static bool in_realmode(struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { unsigned long cr0; int rc; @@ -1748,10 +1774,8 @@ in_realmode( return (!rc && !(cr0 & X86_CR0_PE)); } -static bool -in_protmode( - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +static bool in_protmode(struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { return !(in_realmode(ctxt, ops) || (ctxt->regs->eflags & X86_EFLAGS_VM)); } @@ -1761,12 +1785,9 @@ in_protmode( #define EDX 2 #define EBX 3 -static bool vcpu_has( - unsigned int eax, - unsigned int reg, - unsigned int bit, - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +static bool vcpu_has(unsigned int eax, unsigned int reg, unsigned int bit, + struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { struct cpuid_leaf res; int rc = X86EMUL_OKAY; @@ -1775,76 +1796,84 @@ static bool vcpu_has( rc = ops->cpuid(eax, 0, &res, ctxt); if ( rc == X86EMUL_OKAY ) { - switch ( reg ) + switch (reg) { - case EAX: reg = res.a; break; - case EBX: reg = res.b; break; - case ECX: reg = res.c; break; - case EDX: reg = res.d; break; - default: BUG(); + case EAX: + reg = res.a; + break; + case EBX: + reg = res.b; + break; + case ECX: + reg = res.c; + break; + case EDX: + reg = res.d; + break; + default: + BUG(); } if ( !(reg & (1U << bit)) ) rc = ~X86EMUL_OKAY; } - done: +done: return rc == X86EMUL_OKAY; } -#define vcpu_has_fpu() vcpu_has( 1, EDX, 0, ctxt, ops) -#define vcpu_has_sep() vcpu_has( 1, EDX, 11, ctxt, ops) -#define vcpu_has_cx8() vcpu_has( 1, EDX, 8, ctxt, ops) -#define vcpu_has_cmov() vcpu_has( 1, EDX, 15, ctxt, ops) -#define vcpu_has_clflush() vcpu_has( 1, EDX, 19, ctxt, ops) -#define vcpu_has_mmx() vcpu_has( 1, EDX, 23, ctxt, ops) -#define vcpu_has_sse() vcpu_has( 1, EDX, 25, ctxt, ops) -#define vcpu_has_sse2() vcpu_has( 1, EDX, 26, ctxt, ops) -#define vcpu_has_sse3() vcpu_has( 1, ECX, 0, ctxt, ops) -#define vcpu_has_pclmulqdq() vcpu_has( 1, ECX, 1, ctxt, ops) -#define vcpu_has_ssse3() vcpu_has( 1, ECX, 9, ctxt, ops) -#define vcpu_has_fma() vcpu_has( 1, ECX, 12, ctxt, ops) -#define vcpu_has_cx16() vcpu_has( 1, ECX, 13, ctxt, ops) -#define vcpu_has_sse4_1() vcpu_has( 1, ECX, 19, ctxt, ops) -#define vcpu_has_sse4_2() vcpu_has( 1, ECX, 20, ctxt, ops) -#define vcpu_has_movbe() vcpu_has( 1, ECX, 22, ctxt, ops) -#define vcpu_has_popcnt() vcpu_has( 1, ECX, 23, ctxt, ops) -#define vcpu_has_aesni() vcpu_has( 1, ECX, 25, ctxt, ops) -#define vcpu_has_avx() vcpu_has( 1, ECX, 28, ctxt, ops) -#define vcpu_has_f16c() vcpu_has( 1, ECX, 29, ctxt, ops) -#define vcpu_has_rdrand() vcpu_has( 1, ECX, 30, ctxt, ops) -#define vcpu_has_mmxext() (vcpu_has(0x80000001, EDX, 22, ctxt, ops) || \ - vcpu_has_sse()) -#define vcpu_has_3dnow_ext() vcpu_has(0x80000001, EDX, 30, ctxt, ops) -#define vcpu_has_3dnow() vcpu_has(0x80000001, EDX, 31, ctxt, ops) -#define vcpu_has_lahf_lm() vcpu_has(0x80000001, ECX, 0, ctxt, ops) -#define vcpu_has_cr8_legacy() vcpu_has(0x80000001, ECX, 4, ctxt, ops) -#define vcpu_has_lzcnt() vcpu_has(0x80000001, ECX, 5, ctxt, ops) -#define vcpu_has_sse4a() vcpu_has(0x80000001, ECX, 6, ctxt, ops) -#define vcpu_has_misalignsse() vcpu_has(0x80000001, ECX, 7, ctxt, ops) -#define vcpu_has_xop() vcpu_has(0x80000001, ECX, 12, ctxt, ops) -#define vcpu_has_fma4() vcpu_has(0x80000001, ECX, 16, ctxt, ops) -#define vcpu_has_tbm() vcpu_has(0x80000001, ECX, 21, ctxt, ops) -#define vcpu_has_bmi1() vcpu_has( 7, EBX, 3, ctxt, ops) -#define vcpu_has_hle() vcpu_has( 7, EBX, 4, ctxt, ops) -#define vcpu_has_avx2() vcpu_has( 7, EBX, 5, ctxt, ops) -#define vcpu_has_bmi2() vcpu_has( 7, EBX, 8, ctxt, ops) -#define vcpu_has_rtm() vcpu_has( 7, EBX, 11, ctxt, ops) -#define vcpu_has_mpx() vcpu_has( 7, EBX, 14, ctxt, ops) -#define vcpu_has_avx512f() vcpu_has( 7, EBX, 16, ctxt, ops) -#define vcpu_has_avx512dq() vcpu_has( 7, EBX, 17, ctxt, ops) -#define vcpu_has_rdseed() vcpu_has( 7, EBX, 18, ctxt, ops) -#define vcpu_has_adx() vcpu_has( 7, EBX, 19, ctxt, ops) -#define vcpu_has_smap() vcpu_has( 7, EBX, 20, ctxt, ops) -#define vcpu_has_clflushopt() vcpu_has( 7, EBX, 23, ctxt, ops) -#define vcpu_has_clwb() vcpu_has( 7, EBX, 24, ctxt, ops) -#define vcpu_has_sha() vcpu_has( 7, EBX, 29, ctxt, ops) -#define vcpu_has_avx512bw() vcpu_has( 7, EBX, 30, ctxt, ops) -#define vcpu_has_avx512vl() vcpu_has( 7, EBX, 31, ctxt, ops) -#define vcpu_has_rdpid() vcpu_has( 7, ECX, 22, ctxt, ops) -#define vcpu_has_clzero() vcpu_has(0x80000008, EBX, 0, ctxt, ops) - -#define vcpu_must_have(feat) \ - generate_exception_if(!vcpu_has_##feat(), EXC_UD) +#define vcpu_has_fpu() vcpu_has(1, EDX, 0, ctxt, ops) +#define vcpu_has_sep() vcpu_has(1, EDX, 11, ctxt, ops) +#define vcpu_has_cx8() vcpu_has(1, EDX, 8, ctxt, ops) +#define vcpu_has_cmov() vcpu_has(1, EDX, 15, ctxt, ops) +#define vcpu_has_clflush() vcpu_has(1, EDX, 19, ctxt, ops) +#define vcpu_has_mmx() vcpu_has(1, EDX, 23, ctxt, ops) +#define vcpu_has_sse() vcpu_has(1, EDX, 25, ctxt, ops) +#define vcpu_has_sse2() vcpu_has(1, EDX, 26, ctxt, ops) +#define vcpu_has_sse3() vcpu_has(1, ECX, 0, ctxt, ops) +#define vcpu_has_pclmulqdq() vcpu_has(1, ECX, 1, ctxt, ops) +#define vcpu_has_ssse3() vcpu_has(1, ECX, 9, ctxt, ops) +#define vcpu_has_fma() vcpu_has(1, ECX, 12, ctxt, ops) +#define vcpu_has_cx16() vcpu_has(1, ECX, 13, ctxt, ops) +#define vcpu_has_sse4_1() vcpu_has(1, ECX, 19, ctxt, ops) +#define vcpu_has_sse4_2() vcpu_has(1, ECX, 20, ctxt, ops) +#define vcpu_has_movbe() vcpu_has(1, ECX, 22, ctxt, ops) +#define vcpu_has_popcnt() vcpu_has(1, ECX, 23, ctxt, ops) +#define vcpu_has_aesni() vcpu_has(1, ECX, 25, ctxt, ops) +#define vcpu_has_avx() vcpu_has(1, ECX, 28, ctxt, ops) +#define vcpu_has_f16c() vcpu_has(1, ECX, 29, ctxt, ops) +#define vcpu_has_rdrand() vcpu_has(1, ECX, 30, ctxt, ops) +#define vcpu_has_mmxext() \ + (vcpu_has(0x80000001, EDX, 22, ctxt, ops) || vcpu_has_sse()) +#define vcpu_has_3dnow_ext() vcpu_has(0x80000001, EDX, 30, ctxt, ops) +#define vcpu_has_3dnow() vcpu_has(0x80000001, EDX, 31, ctxt, ops) +#define vcpu_has_lahf_lm() vcpu_has(0x80000001, ECX, 0, ctxt, ops) +#define vcpu_has_cr8_legacy() vcpu_has(0x80000001, ECX, 4, ctxt, ops) +#define vcpu_has_lzcnt() vcpu_has(0x80000001, ECX, 5, ctxt, ops) +#define vcpu_has_sse4a() vcpu_has(0x80000001, ECX, 6, ctxt, ops) +#define vcpu_has_misalignsse() vcpu_has(0x80000001, ECX, 7, ctxt, ops) +#define vcpu_has_xop() vcpu_has(0x80000001, ECX, 12, ctxt, ops) +#define vcpu_has_fma4() vcpu_has(0x80000001, ECX, 16, ctxt, ops) +#define vcpu_has_tbm() vcpu_has(0x80000001, ECX, 21, ctxt, ops) +#define vcpu_has_bmi1() vcpu_has(7, EBX, 3, ctxt, ops) +#define vcpu_has_hle() vcpu_has(7, EBX, 4, ctxt, ops) +#define vcpu_has_avx2() vcpu_has(7, EBX, 5, ctxt, ops) +#define vcpu_has_bmi2() vcpu_has(7, EBX, 8, ctxt, ops) +#define vcpu_has_rtm() vcpu_has(7, EBX, 11, ctxt, ops) +#define vcpu_has_mpx() vcpu_has(7, EBX, 14, ctxt, ops) +#define vcpu_has_avx512f() vcpu_has(7, EBX, 16, ctxt, ops) +#define vcpu_has_avx512dq() vcpu_has(7, EBX, 17, ctxt, ops) +#define vcpu_has_rdseed() vcpu_has(7, EBX, 18, ctxt, ops) +#define vcpu_has_adx() vcpu_has(7, EBX, 19, ctxt, ops) +#define vcpu_has_smap() vcpu_has(7, EBX, 20, ctxt, ops) +#define vcpu_has_clflushopt() vcpu_has(7, EBX, 23, ctxt, ops) +#define vcpu_has_clwb() vcpu_has(7, EBX, 24, ctxt, ops) +#define vcpu_has_sha() vcpu_has(7, EBX, 29, ctxt, ops) +#define vcpu_has_avx512bw() vcpu_has(7, EBX, 30, ctxt, ops) +#define vcpu_has_avx512vl() vcpu_has(7, EBX, 31, ctxt, ops) +#define vcpu_has_rdpid() vcpu_has(7, ECX, 22, ctxt, ops) +#define vcpu_has_clzero() vcpu_has(0x80000008, EBX, 0, ctxt, ops) + +#define vcpu_must_have(feat) generate_exception_if(!vcpu_has_##feat(), EXC_UD) #ifdef __XEN__ /* @@ -1853,10 +1882,11 @@ static bool vcpu_has( * emulation code is using the same instruction class for carrying out * the actual operation. */ -#define host_and_vcpu_must_have(feat) ({ \ - generate_exception_if(!cpu_has_##feat, EXC_UD); \ - vcpu_must_have(feat); \ -}) +#define host_and_vcpu_must_have(feat) \ + ({ \ + generate_exception_if(!cpu_has_##feat, EXC_UD); \ + vcpu_must_have(feat); \ + }) #else /* * For the test harness both are fine to be used interchangeably, i.e. @@ -1873,13 +1903,10 @@ static void init_context(struct x86_emulate_ctxt *ctxt) x86_emul_reset_event(ctxt); } -static int -realmode_load_seg( - enum x86_segment seg, - uint16_t sel, - struct segment_register *sreg, - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +static int realmode_load_seg(enum x86_segment seg, uint16_t sel, + struct segment_register *sreg, + struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { int rc; @@ -1888,7 +1915,7 @@ realmode_load_seg( if ( (rc = ops->read_segment(seg, sreg, ctxt)) == X86EMUL_OKAY ) { - sreg->sel = sel; + sreg->sel = sel; sreg->base = (uint32_t)sel << 4; } @@ -1900,16 +1927,16 @@ realmode_load_seg( * - suppress any exceptions other than #PF, * - don't commit any state. */ -static int -protmode_load_seg( - enum x86_segment seg, - uint16_t sel, bool is_ret, - struct segment_register *sreg, - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +static int protmode_load_seg(enum x86_segment seg, uint16_t sel, bool is_ret, + struct segment_register *sreg, + struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { enum x86_segment sel_seg = (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr; - struct { uint32_t a, b; } desc, desc_hi = {}; + struct + { + uint32_t a, b; + } desc, desc_hi = {}; uint8_t dpl, rpl; int cpl = get_cpl(ctxt, ops); uint32_t a_flag = 0x100; @@ -1921,11 +1948,11 @@ protmode_load_seg( /* NULL selector? */ if ( (sel & 0xfffc) == 0 ) { - switch ( seg ) + switch (seg) { case x86_seg_ss: if ( mode_64bit() && (cpl != 3) && (cpl == sel) ) - default: + default: break; /* fall through */ case x86_seg_cs: @@ -1950,7 +1977,7 @@ protmode_load_seg( if ( is_x86_system_segment(seg) && (sel & 4) ) goto raise_exn; - switch ( rc = ops->read(sel_seg, sel & 0xfff8, &desc, sizeof(desc), ctxt) ) + switch (rc = ops->read(sel_seg, sel & 0xfff8, &desc, sizeof(desc), ctxt)) { case X86EMUL_OKAY: break; @@ -1974,23 +2001,23 @@ protmode_load_seg( dpl = (desc.b >> 13) & 3; rpl = sel & 3; - switch ( seg ) + switch (seg) { case x86_seg_cs: /* Code segment? */ - if ( !(desc.b & (1u<<11)) ) + if ( !(desc.b & (1u << 11)) ) goto raise_exn; - if ( is_ret - ? /* - * Really rpl < cpl, but our sole caller doesn't handle - * privilege level changes. - */ - rpl != cpl || (desc.b & (1 << 10) ? dpl > rpl : dpl != rpl) - : desc.b & (1 << 10) - /* Conforming segment: check DPL against CPL. */ - ? dpl > cpl - /* Non-conforming segment: check RPL and DPL against CPL. */ - : rpl > cpl || dpl != cpl ) + if ( is_ret ? /* + * Really rpl < cpl, but our sole caller doesn't handle + * privilege level changes. + */ + rpl != cpl || (desc.b & (1 << 10) ? dpl > rpl : dpl != rpl) + : desc.b & (1 << 10) + /* Conforming segment: check DPL against CPL. */ + ? dpl > cpl + /* Non-conforming segment: check RPL and DPL against + CPL. */ + : rpl > cpl || dpl != cpl ) goto raise_exn; /* * 64-bit code segments (L bit set) must have D bit clear. @@ -2003,29 +2030,29 @@ protmode_load_seg( break; case x86_seg_ss: /* Writable data segment? */ - if ( (desc.b & (5u<<9)) != (1u<<9) ) + if ( (desc.b & (5u << 9)) != (1u << 9) ) goto raise_exn; if ( (dpl != cpl) || (dpl != rpl) ) goto raise_exn; break; case x86_seg_ldtr: /* LDT system segment? */ - if ( (desc.b & (15u<<8)) != (2u<<8) ) + if ( (desc.b & (15u << 8)) != (2u << 8) ) goto raise_exn; a_flag = 0; break; case x86_seg_tr: /* Available TSS system segment? */ - if ( (desc.b & (15u<<8)) != (9u<<8) ) + if ( (desc.b & (15u << 8)) != (9u << 8) ) goto raise_exn; a_flag = 0x200; /* busy flag */ break; default: /* Readable code or data segment? */ - if ( (desc.b & (5u<<9)) == (4u<<9) ) + if ( (desc.b & (5u << 9)) == (4u << 9) ) goto raise_exn; /* Non-conforming segment: check DPL against RPL and CPL. */ - if ( ((desc.b & (6u<<9)) != (6u<<9)) && + if ( ((desc.b & (6u << 9)) != (6u << 9)) && ((dpl < cpl) || (dpl < rpl)) ) goto raise_exn; break; @@ -2057,15 +2084,16 @@ protmode_load_seg( * - only their low 8-byte bytes read on Intel, * - all 16 bytes read with the high 8 bytes ignored on AMD. */ - bool wide = desc.b & 0x1000 - ? false : (desc.b & 0xf00) != 0xc00 && - ctxt->vendor != X86_VENDOR_AMD - ? mode_64bit() : ctxt->lma; + bool wide = desc.b & 0x1000 ? false + : (desc.b & 0xf00) != 0xc00 && + ctxt->vendor != X86_VENDOR_AMD + ? mode_64bit() + : ctxt->lma; if ( wide ) { - switch ( rc = ops->read(sel_seg, (sel & 0xfff8) + 8, - &desc_hi, sizeof(desc_hi), ctxt) ) + switch (rc = ops->read(sel_seg, (sel & 0xfff8) + 8, &desc_hi, + sizeof(desc_hi), ctxt)) { case X86EMUL_OKAY: break; @@ -2093,8 +2121,8 @@ protmode_load_seg( uint32_t new_desc_b = desc.b | a_flag; fail_if(!ops->cmpxchg); - switch ( (rc = ops->cmpxchg(sel_seg, (sel & 0xfff8) + 4, &desc.b, - &new_desc_b, sizeof(desc.b), true, ctxt)) ) + switch ((rc = ops->cmpxchg(sel_seg, (sel & 0xfff8) + 4, &desc.b, + &new_desc_b, sizeof(desc.b), true, ctxt))) { case X86EMUL_OKAY: break; @@ -2115,32 +2143,27 @@ protmode_load_seg( desc.b = new_desc_b; } - sreg->base = (((uint64_t)desc_hi.a << 32) | - ((desc.b << 0) & 0xff000000u) | - ((desc.b << 16) & 0x00ff0000u) | - ((desc.a >> 16) & 0x0000ffffu)); - sreg->attr = (((desc.b >> 8) & 0x00ffu) | - ((desc.b >> 12) & 0x0f00u)); + sreg->base = + (((uint64_t)desc_hi.a << 32) | ((desc.b << 0) & 0xff000000u) | + ((desc.b << 16) & 0x00ff0000u) | ((desc.a >> 16) & 0x0000ffffu)); + sreg->attr = (((desc.b >> 8) & 0x00ffu) | ((desc.b >> 12) & 0x0f00u)); sreg->limit = (desc.b & 0x000f0000u) | (desc.a & 0x0000ffffu); if ( sreg->g ) sreg->limit = (sreg->limit << 12) | 0xfffu; sreg->sel = sel; return X86EMUL_OKAY; - raise_exn: +raise_exn: generate_exception_if(seg != x86_seg_none, fault_type, sel & 0xfffc); rc = X86EMUL_EXCEPTION; - done: +done: return rc; } -static int -load_seg( - enum x86_segment seg, - uint16_t sel, bool is_ret, - struct segment_register *sreg, - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +static int load_seg(enum x86_segment seg, uint16_t sel, bool is_ret, + struct segment_register *sreg, + struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { struct segment_register reg; int rc; @@ -2184,18 +2207,14 @@ const uint8_t cpu_user_regs_gpr_offsets[] = { #endif }; -static void *_decode_gpr( - struct cpu_user_regs *regs, unsigned int modrm_reg, bool legacy) +static void *_decode_gpr(struct cpu_user_regs *regs, unsigned int modrm_reg, + bool legacy) { static const uint8_t byte_reg_offsets[] = { - offsetof(struct cpu_user_regs, al), - offsetof(struct cpu_user_regs, cl), - offsetof(struct cpu_user_regs, dl), - offsetof(struct cpu_user_regs, bl), - offsetof(struct cpu_user_regs, ah), - offsetof(struct cpu_user_regs, ch), - offsetof(struct cpu_user_regs, dh), - offsetof(struct cpu_user_regs, bh), + offsetof(struct cpu_user_regs, al), offsetof(struct cpu_user_regs, cl), + offsetof(struct cpu_user_regs, dl), offsetof(struct cpu_user_regs, bl), + offsetof(struct cpu_user_regs, ah), offsetof(struct cpu_user_regs, ch), + offsetof(struct cpu_user_regs, dh), offsetof(struct cpu_user_regs, bh), }; if ( !legacy ) @@ -2213,9 +2232,9 @@ static void *_decode_gpr( return (void *)regs + byte_reg_offsets[modrm_reg]; } -static unsigned long *decode_vex_gpr( - unsigned int vex_reg, struct cpu_user_regs *regs, - const struct x86_emulate_ctxt *ctxt) +static unsigned long *decode_vex_gpr(unsigned int vex_reg, + struct cpu_user_regs *regs, + const struct x86_emulate_ctxt *ctxt) { return decode_gpr(regs, ~vex_reg & (mode_64bit() ? 0xf : 7)); } @@ -2223,7 +2242,7 @@ static unsigned long *decode_vex_gpr( static unsigned int decode_disp8scale(enum disp8scale scale, const struct x86_emulate_state *state) { - switch ( scale ) + switch (scale) { case d8s_bw: return state->evex.w; @@ -2233,7 +2252,7 @@ static unsigned int decode_disp8scale(enum disp8scale scale, return scale; if ( state->evex.brs ) { - case d8s_dq: + case d8s_dq: return 2 + state->evex.w; } break; @@ -2242,7 +2261,7 @@ static unsigned int decode_disp8scale(enum disp8scale scale, return 2 + (state->op_bytes == 8); } - switch ( state->simd_size ) + switch (state->simd_size) { case simd_any_fp: case simd_single_fp: @@ -2265,19 +2284,21 @@ static unsigned int decode_disp8scale(enum disp8scale scale, return 4 + state->evex.lr - (scale - d8s_vl); } -#define avx512_vlen_check(lig) do { \ - switch ( evex.lr ) \ - { \ - default: \ - generate_exception(EXC_UD); \ - case 2: \ - break; \ - case 0: case 1: \ - if ( !(lig) ) \ - host_and_vcpu_must_have(avx512vl); \ - break; \ - } \ -} while ( false ) +#define avx512_vlen_check(lig) \ + do { \ + switch (evex.lr) \ + { \ + default: \ + generate_exception(EXC_UD); \ + case 2: \ + break; \ + case 0: \ + case 1: \ + if ( !(lig) ) \ + host_and_vcpu_must_have(avx512vl); \ + break; \ + } \ + } while ( false ) static bool is_aligned(enum x86_segment seg, unsigned long offs, unsigned int size, struct x86_emulate_ctxt *ctxt, @@ -2309,7 +2330,8 @@ static bool is_branch_step(struct x86_emulate_ctxt *ctxt, uint64_t debugctl; return ops->read_msr && - ops->read_msr(MSR_IA32_DEBUGCTLMSR, &debugctl, ctxt) == X86EMUL_OKAY && + ops->read_msr(MSR_IA32_DEBUGCTLMSR, &debugctl, ctxt) == + X86EMUL_OKAY && (debugctl & IA32_DEBUGCTLMSR_BTF); } @@ -2319,9 +2341,8 @@ static bool umip_active(struct x86_emulate_ctxt *ctxt, unsigned long cr4; /* Intentionally not using mode_ring0() here to avoid its fail_if(). */ - return get_cpl(ctxt, ops) > 0 && - ops->read_cr && ops->read_cr(4, &cr4, ctxt) == X86EMUL_OKAY && - (cr4 & X86_CR4_UMIP); + return get_cpl(ctxt, ops) > 0 && ops->read_cr && + ops->read_cr(4, &cr4, ctxt) == X86EMUL_OKAY && (cr4 & X86_CR4_UMIP); } static void adjust_bnd(struct x86_emulate_ctxt *ctxt, @@ -2353,15 +2374,12 @@ static void adjust_bnd(struct x86_emulate_ctxt *ctxt, */ xstate_set_init(X86_XCR0_BNDREGS); } - done:; +done:; } -int x86emul_unhandleable_rw( - enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt) +int x86emul_unhandleable_rw(enum x86_segment seg, unsigned long offset, + void *p_data, unsigned int bytes, + struct x86_emulate_ctxt *ctxt) { return X86EMUL_UNHANDLEABLE; } @@ -2381,15 +2399,13 @@ int x86emul_unhandleable_rw( #define evex_encoded() (evex.mbs) #define ea (state->ea) -static int -x86_decode_onebyte( - struct x86_emulate_state *state, - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +static int x86_decode_onebyte(struct x86_emulate_state *state, + struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { int rc = X86EMUL_OKAY; - switch ( ctxt->opcode ) + switch (ctxt->opcode) { case 0x06: /* push %%es */ case 0x07: /* pop %%es */ @@ -2428,17 +2444,19 @@ x86_decode_onebyte( imm2 = insn_fetch_type(uint16_t); break; - case 0xa0: case 0xa1: /* mov mem.offs,{%al,%ax,%eax,%rax} */ - case 0xa2: case 0xa3: /* mov {%al,%ax,%eax,%rax},mem.offs */ + case 0xa0: + case 0xa1: /* mov mem.offs,{%al,%ax,%eax,%rax} */ + case 0xa2: + case 0xa3: /* mov {%al,%ax,%eax,%rax},mem.offs */ /* Source EA is not encoded via ModRM. */ ea.type = OP_MEM; ea.mem.off = insn_fetch_bytes(ad_bytes); break; - case 0xb8 ... 0xbf: /* mov imm{16,32,64},r{16,32,64} */ + case 0xb8 ... 0xbf: /* mov imm{16,32,64},r{16,32,64} */ if ( op_bytes == 8 ) /* Fetch more bytes to obtain imm64. */ - imm1 = ((uint32_t)imm1 | - ((uint64_t)insn_fetch_type(uint32_t) << 32)); + imm1 = + ((uint32_t)imm1 | ((uint64_t)insn_fetch_type(uint32_t) << 32)); break; case 0xc8: /* enter imm16,imm8 */ @@ -2446,7 +2464,7 @@ x86_decode_onebyte( break; case 0xff: /* Grp5 */ - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 2: /* call (near) */ case 4: /* jmp (near) */ @@ -2462,34 +2480,33 @@ x86_decode_onebyte( break; } - done: +done: return rc; } -static int -x86_decode_twobyte( - struct x86_emulate_state *state, - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +static int x86_decode_twobyte(struct x86_emulate_state *state, + struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { int rc = X86EMUL_OKAY; - switch ( ctxt->opcode & X86EMUL_OPC_MASK ) + switch (ctxt->opcode & X86EMUL_OPC_MASK) { case 0x00: /* Grp6 */ - switch ( modrm_reg & 6 ) + switch (modrm_reg & 6) { case 0: state->desc |= DstMem | SrcImplicit | Mov; break; - case 2: case 4: + case 2: + case 4: state->desc |= SrcMem16; break; } break; case 0x78: - switch ( vex.pfx ) + switch (vex.pfx) { case vex_66: /* extrq $imm8, $imm8, xmm */ case vex_f2: /* insertq $imm8, $imm8, xmm, xmm */ @@ -2510,14 +2527,16 @@ x86_decode_twobyte( ctxt->opcode |= MASK_INSR(vex.pfx, X86EMUL_OPC_PFX_MASK); break; - case 0x20: case 0x22: /* mov to/from cr */ + case 0x20: + case 0x22: /* mov to/from cr */ if ( lock_prefix && vcpu_has_cr8_legacy() ) { modrm_reg += 8; lock_prefix = false; } /* fall through */ - case 0x21: case 0x23: /* mov to/from dr */ + case 0x21: + case 0x23: /* mov to/from dr */ ASSERT(ea.type == OP_REG); /* Early operand adjustment ensures this. */ generate_exception_if(lock_prefix, EXC_UD); op_bytes = mode_64bit() ? 8 : 4; @@ -2527,8 +2546,8 @@ x86_decode_twobyte( ctxt->opcode |= MASK_INSR(vex.pfx, X86EMUL_OPC_PFX_MASK); if ( vex.pfx == vex_f3 ) /* movq xmm/m64,xmm */ { - case X86EMUL_OPC_VEX_F3(0, 0x7e): /* vmovq xmm/m64,xmm */ - case X86EMUL_OPC_EVEX_F3(0, 0x7e): /* vmovq xmm/m64,xmm */ + case X86EMUL_OPC_VEX_F3(0, 0x7e): /* vmovq xmm/m64,xmm */ + case X86EMUL_OPC_EVEX_F3(0, 0x7e): /* vmovq xmm/m64,xmm */ state->desc = DstImplicit | SrcMem | TwoOp; state->simd_size = simd_other; /* Avoid the state->desc clobbering of TwoOp below. */ @@ -2552,7 +2571,7 @@ x86_decode_twobyte( ctxt->opcode |= MASK_INSR(vex.pfx, X86EMUL_OPC_PFX_MASK); /* fall through */ case X86EMUL_OPC_VEX(0, 0xae): - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 2: /* {,v}ldmxcsr */ state->desc = DstImplicit | SrcMem | Mov; @@ -2588,8 +2607,8 @@ x86_decode_twobyte( ctxt->opcode |= MASK_INSR(vex.pfx, X86EMUL_OPC_PFX_MASK); if ( vex.pfx == vex_f2 ) /* lddqu mem,xmm */ { - /* fall through */ - case X86EMUL_OPC_VEX_F2(0, 0xf0): /* vlddqu mem,{x,y}mm */ + /* fall through */ + case X86EMUL_OPC_VEX_F2(0, 0xf0): /* vlddqu mem,{x,y}mm */ state->desc = DstImplicit | SrcMem | TwoOp; state->simd_size = simd_other; /* Avoid the state->desc clobbering of TwoOp below. */ @@ -2603,21 +2622,18 @@ x86_decode_twobyte( * three operands. Those which do really have two operands * should have exited earlier. */ - if ( state->simd_size && vex.opcx && - (vex.pfx & VEX_PREFIX_SCALAR_MASK) ) + if ( state->simd_size && vex.opcx && (vex.pfx & VEX_PREFIX_SCALAR_MASK) ) state->desc &= ~TwoOp; - done: +done: return rc; } -static int -x86_decode_0f38( - struct x86_emulate_state *state, - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +static int x86_decode_0f38(struct x86_emulate_state *state, + struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { - switch ( ctxt->opcode & X86EMUL_OPC_MASK ) + switch (ctxt->opcode & X86EMUL_OPC_MASK) { case 0x00 ... 0xef: case 0xf2 ... 0xf5: @@ -2661,21 +2677,19 @@ x86_decode_0f38( return X86EMUL_OKAY; } -static int -x86_decode_0f3a( - struct x86_emulate_state *state, - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +static int x86_decode_0f3a(struct x86_emulate_state *state, + struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { if ( !vex.opcx ) ctxt->opcode |= MASK_INSR(vex.pfx, X86EMUL_OPC_PFX_MASK); - switch ( ctxt->opcode & X86EMUL_OPC_MASK ) + switch (ctxt->opcode & X86EMUL_OPC_MASK) { - case X86EMUL_OPC_66(0, 0x14) - ... X86EMUL_OPC_66(0, 0x17): /* pextr*, extractps */ - case X86EMUL_OPC_VEX_66(0, 0x14) - ... X86EMUL_OPC_VEX_66(0, 0x17): /* vpextr*, vextractps */ + case X86EMUL_OPC_66(0, 0x14)... X86EMUL_OPC_66( + 0, 0x17): /* pextr*, extractps */ + case X86EMUL_OPC_VEX_66(0, 0x14)... X86EMUL_OPC_VEX_66( + 0, 0x17): /* vpextr*, vextractps */ case X86EMUL_OPC_VEX_F2(0, 0xf0): /* rorx */ break; @@ -2699,11 +2713,9 @@ x86_decode_0f3a( return X86EMUL_OKAY; } -static int -x86_decode( - struct x86_emulate_state *state, - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +static int x86_decode(struct x86_emulate_state *state, + struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { uint8_t b, d; unsigned int def_op_bytes, def_ad_bytes, opcode; @@ -2720,7 +2732,7 @@ x86_decode( state->regs = ctxt->regs; state->ip = ctxt->regs->r(ip); - op_bytes = def_op_bytes = ad_bytes = def_ad_bytes = ctxt->addr_size/8; + op_bytes = def_op_bytes = ad_bytes = def_ad_bytes = ctxt->addr_size / 8; if ( op_bytes == 8 ) { op_bytes = def_op_bytes = 4; @@ -2730,9 +2742,9 @@ x86_decode( } /* Prefix bytes. */ - for ( ; ; ) + for ( ;; ) { - switch ( b = insn_fetch_type(uint8_t) ) + switch (b = insn_fetch_type(uint8_t)) { case 0x66: /* operand-size override */ op_bytes = def_op_bytes ^ 6; @@ -2781,7 +2793,7 @@ x86_decode( /* Any legacy prefix after a REX prefix nullifies its effect. */ rex_prefix = 0; } - done_prefixes: +done_prefixes: /* %{e,c,s,d}s overrides are ignored in 64bit mode. */ if ( mode_64bit() && override_seg < x86_seg_fs ) @@ -2797,7 +2809,7 @@ x86_decode( /* Two-byte opcode. */ b = insn_fetch_type(uint8_t); d = twobyte_table[b].desc; - switch ( b ) + switch (b) { default: opcode = b | MASK_INSR(0x0f, X86EMUL_OPC_EXT_MASK); @@ -2825,9 +2837,9 @@ x86_decode( modrm = insn_fetch_type(uint8_t); modrm_mod = (modrm & 0xc0) >> 6; - if ( !ext && ((b & ~1) == 0xc4 || (b == 0x8f && (modrm & 0x18)) || - b == 0x62) ) - switch ( def_ad_bytes ) + if ( !ext && + ((b & ~1) == 0xc4 || (b == 0x8f && (modrm & 0x18)) || b == 0x62) ) + switch (def_ad_bytes) { default: BUG(); /* Shouldn't be possible. */ @@ -2879,7 +2891,7 @@ x86_decode( op_bytes = 4; vex.b = 1; } - switch ( b ) + switch (b) { case 0x62: opcode = X86EMUL_OPC_EVEX_; @@ -2910,7 +2922,7 @@ x86_decode( if ( b != 0x8f ) { b = insn_fetch_type(uint8_t); - switch ( ext ) + switch (ext) { case vex_0f: opcode |= MASK_INSR(0x0f, X86EMUL_OPC_EXT_MASK); @@ -2965,7 +2977,7 @@ x86_decode( d &= ~ModRM; #undef ModRM /* Only its aliases are valid to use from here on. */ modrm_reg = ((rex_prefix & 4) << 1) | ((modrm & 0x38) >> 3); - modrm_rm = modrm & 0x07; + modrm_rm = modrm & 0x07; /* * Early operand adjustments. Only ones affecting further processing @@ -2973,13 +2985,13 @@ x86_decode( * normally be only addition/removal of SrcImm/SrcImm16, so their * fetching can be taken care of by the common code below. */ - switch ( ext ) + switch (ext) { case ext_none: - switch ( b ) + switch (b) { case 0xf6 ... 0xf7: /* Grp3 */ - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 0 ... 1: /* test */ d |= DstMem | SrcImm; @@ -3007,7 +3019,7 @@ x86_decode( if ( evex_encoded() ) disp8scale = decode_disp8scale(twobyte_table[b].d8s, state); - switch ( b ) + switch (b) { case 0x20: /* mov cr,reg */ case 0x21: /* mov dr,reg */ @@ -3029,8 +3041,7 @@ x86_decode( break; case ext_0f38: - d = ext0f38_table[b].to_mem ? DstMem | SrcReg - : DstReg | SrcMem; + d = ext0f38_table[b].to_mem ? DstMem | SrcReg : DstReg | SrcMem; if ( ext0f38_table[b].two_op ) d |= TwoOp; if ( ext0f38_table[b].vsib ) @@ -3080,7 +3091,7 @@ x86_decode( /* 16-bit ModR/M decode. */ generate_exception_if(d & vSIB, EXC_UD); ea.type = OP_MEM; - switch ( modrm_rm ) + switch (modrm_rm) { case 0: ea.mem.off = state->regs->bx + state->regs->si; @@ -3112,7 +3123,7 @@ x86_decode( ea.mem.off = state->regs->bx; break; } - switch ( modrm_mod ) + switch (modrm_mod) { case 0: if ( modrm_rm == 6 ) @@ -3146,16 +3157,16 @@ x86_decode( ea.mem.off += insn_fetch_type(int32_t); else if ( sib_base == 4 ) { - ea.mem.seg = x86_seg_ss; + ea.mem.seg = x86_seg_ss; ea.mem.off += state->regs->r(sp); if ( !ext && (b == 0x8f) ) /* POP computes its EA post increment. */ - ea.mem.off += ((mode_64bit() && (op_bytes == 4)) - ? 8 : op_bytes); + ea.mem.off += + ((mode_64bit() && (op_bytes == 4)) ? 8 : op_bytes); } else if ( sib_base == 5 ) { - ea.mem.seg = x86_seg_ss; + ea.mem.seg = x86_seg_ss; ea.mem.off += state->regs->r(bp); } else @@ -3169,7 +3180,7 @@ x86_decode( if ( (modrm_rm == 5) && (modrm_mod != 0) ) ea.mem.seg = x86_seg_ss; } - switch ( modrm_mod ) + switch (modrm_mod) { case 0: if ( (modrm_rm & 7) != 5 ) @@ -3196,7 +3207,7 @@ x86_decode( ea.mem.seg = override_seg; /* Fetch the immediate operand, if present. */ - switch ( d & SrcMask ) + switch (d & SrcMask) { unsigned int bytes; @@ -3205,15 +3216,21 @@ x86_decode( bytes = op_bytes != 8 ? op_bytes : 4; else { - case SrcImmByte: + case SrcImmByte: bytes = 1; } /* NB. Immediates are sign-extended as necessary. */ - switch ( bytes ) + switch (bytes) { - case 1: imm1 = insn_fetch_type(int8_t); break; - case 2: imm1 = insn_fetch_type(int16_t); break; - case 4: imm1 = insn_fetch_type(int32_t); break; + case 1: + imm1 = insn_fetch_type(int8_t); + break; + case 2: + imm1 = insn_fetch_type(int16_t); + break; + case 4: + imm1 = insn_fetch_type(int32_t); + break; } break; case SrcImm16: @@ -3224,7 +3241,7 @@ x86_decode( ctxt->opcode = opcode; state->desc = d; - switch ( ext ) + switch (ext) { case ext_none: rc = x86_decode_onebyte(state, ctxt, ops); @@ -3279,7 +3296,7 @@ x86_decode( * Simple op_bytes calculations. More complicated cases produce 0 * and are further handled during execute. */ - switch ( state->simd_size ) + switch (state->simd_size) { case simd_none: /* @@ -3292,7 +3309,7 @@ x86_decode( break; case simd_packed_int: - switch ( vex.pfx ) + switch (vex.pfx) { case vex_none: if ( !vex.opcx ) @@ -3315,7 +3332,7 @@ x86_decode( { op_bytes = 0; break; - case simd_packed_fp: + case simd_packed_fp: if ( vex.pfx & VEX_PREFIX_SCALAR_MASK ) { op_bytes = 0; @@ -3324,7 +3341,7 @@ x86_decode( } /* fall through */ case simd_any_fp: - switch ( vex.pfx ) + switch (vex.pfx) { default: op_bytes = 16 << evex.lr; @@ -3362,7 +3379,7 @@ x86_decode( break; } - done: +done: return rc; } @@ -3373,10 +3390,8 @@ x86_decode( /* Undo DEBUG wrapper. */ #undef x86_emulate -int -x86_emulate( - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +int x86_emulate(struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { /* Shadow copy of register state. Committed on successful emulation. */ struct cpu_user_regs _regs = *ctxt->regs; @@ -3385,17 +3400,18 @@ x86_emulate( uint8_t b, d, *opc = NULL; unsigned int first_byte = 0, elem_bytes, insn_bytes = 0; uint64_t op_mask = ~0ULL; - bool singlestep = (_regs.eflags & X86_EFLAGS_TF) && - !is_branch_step(ctxt, ops); + bool singlestep = + (_regs.eflags & X86_EFLAGS_TF) && !is_branch_step(ctxt, ops); bool sfence = false, fault_suppression = false; - struct operand src = { .reg = PTR_POISON }; - struct operand dst = { .reg = PTR_POISON }; + struct operand src = {.reg = PTR_POISON}; + struct operand dst = {.reg = PTR_POISON}; unsigned long cr4; enum x86_emulate_fpu_type fpu_type = X86EMUL_FPU_none; struct x86_emulate_stub stub = {}; DECLARE_ALIGNED(mmval_t, mmval); #ifdef __XEN__ - struct { + struct + { union stub_exception_token info; unsigned int line; } stub_exn; @@ -3405,8 +3421,7 @@ x86_emulate( init_context(ctxt); - generate_exception_if((mode_vif() && - (_regs.eflags & X86_EFLAGS_VIF) && + generate_exception_if((mode_vif() && (_regs.eflags & X86_EFLAGS_VIF) && (_regs.eflags & X86_EFLAGS_VIP)), EXC_GP, 0); @@ -3445,7 +3460,7 @@ x86_emulate( memset(mmvalp, 0xaa /* arbitrary */, sizeof(*mmvalp)); /* Decode and fetch the source operand: register, memory or immediate. */ - switch ( d & SrcMask ) + switch (d & SrcMask) { case SrcNone: /* case SrcImplicit: */ src.type = OP_NONE; @@ -3461,11 +3476,17 @@ x86_emulate( else { src.reg = decode_gpr(&_regs, modrm_reg); - switch ( (src.bytes = op_bytes) ) + switch ((src.bytes = op_bytes)) { - case 2: src.val = *(uint16_t *)src.reg; break; - case 4: src.val = *(uint32_t *)src.reg; break; - case 8: src.val = *(uint64_t *)src.reg; break; + case 2: + src.val = *(uint16_t *)src.reg; + break; + case 4: + src.val = *(uint32_t *)src.reg; + break; + case 8: + src.val = *(uint64_t *)src.reg; + break; } } break; @@ -3480,16 +3501,24 @@ x86_emulate( src = ea; if ( src.type == OP_REG ) { - switch ( src.bytes ) + switch (src.bytes) { - case 1: src.val = *(uint8_t *)src.reg; break; - case 2: src.val = *(uint16_t *)src.reg; break; - case 4: src.val = *(uint32_t *)src.reg; break; - case 8: src.val = *(uint64_t *)src.reg; break; + case 1: + src.val = *(uint8_t *)src.reg; + break; + case 2: + src.val = *(uint16_t *)src.reg; + break; + case 4: + src.val = *(uint32_t *)src.reg; + break; + case 8: + src.val = *(uint64_t *)src.reg; + break; } } - else if ( (rc = read_ulong(src.mem.seg, src.mem.off, - &src.val, src.bytes, ctxt, ops)) ) + else if ( (rc = read_ulong(src.mem.seg, src.mem.off, &src.val, + src.bytes, ctxt, ops)) ) goto done; break; case SrcImm: @@ -3497,16 +3526,16 @@ x86_emulate( src.bytes = op_bytes != 8 ? op_bytes : 4; else { - case SrcImmByte: + case SrcImmByte: src.bytes = 1; } - src.type = OP_IMM; - src.val = imm1; + src.type = OP_IMM; + src.val = imm1; break; case SrcImm16: - src.type = OP_IMM; + src.type = OP_IMM; src.bytes = 2; - src.val = imm1; + src.val = imm1; break; } @@ -3524,7 +3553,7 @@ x86_emulate( insn_bytes = 5; stb[5] = 0xc3; - invoke_stub("", "", "+m" (op_mask) : "a" (&op_mask)); + invoke_stub("", "", "+m"(op_mask) : "a"(&op_mask)); insn_bytes = 0; put_stub(stub); @@ -3533,7 +3562,7 @@ x86_emulate( } /* Decode (but don't fetch) the destination operand: register or memory. */ - switch ( d & DstMask ) + switch (d & DstMask) { case DstNone: /* case DstImplicit: */ /* @@ -3541,8 +3570,8 @@ x86_emulate( * CMPXCHG{8,16}B (MOV CRn is being handled elsewhere). */ generate_exception_if(lock_prefix && - (vex.opcx || ext != ext_0f || b != 0xc7 || - (modrm_reg & 7) != 1 || ea.type != OP_MEM), + (vex.opcx || ext != ext_0f || b != 0xc7 || + (modrm_reg & 7) != 1 || ea.type != OP_MEM), EXC_UD); dst.type = OP_NONE; break; @@ -3559,11 +3588,17 @@ x86_emulate( else { dst.reg = decode_gpr(&_regs, modrm_reg); - switch ( (dst.bytes = op_bytes) ) + switch ((dst.bytes = op_bytes)) { - case 2: dst.val = *(uint16_t *)dst.reg; break; - case 4: dst.val = *(uint32_t *)dst.reg; break; - case 8: dst.val = *(uint64_t *)dst.reg; break; + case 2: + dst.val = *(uint16_t *)dst.reg; + break; + case 4: + dst.val = *(uint32_t *)dst.reg; + break; + case 8: + dst.val = *(uint64_t *)dst.reg; + break; } } break; @@ -3611,12 +3646,20 @@ x86_emulate( if ( dst.type == OP_REG ) { generate_exception_if(lock_prefix, EXC_UD); - switch ( dst.bytes ) + switch (dst.bytes) { - case 1: dst.val = *(uint8_t *)dst.reg; break; - case 2: dst.val = *(uint16_t *)dst.reg; break; - case 4: dst.val = *(uint32_t *)dst.reg; break; - case 8: dst.val = *(uint64_t *)dst.reg; break; + case 1: + dst.val = *(uint8_t *)dst.reg; + break; + case 2: + dst.val = *(uint16_t *)dst.reg; + break; + case 4: + dst.val = *(uint32_t *)dst.reg; + break; + case 8: + dst.val = *(uint64_t *)dst.reg; + break; } } else if ( d & Mov ) /* optimisation - avoid slow emulated read */ @@ -3628,15 +3671,15 @@ x86_emulate( else if ( !ops->rmw ) { fail_if(lock_prefix ? !ops->cmpxchg : !ops->write); - if ( (rc = read_ulong(dst.mem.seg, dst.mem.off, - &dst.val, dst.bytes, ctxt, ops)) ) + if ( (rc = read_ulong(dst.mem.seg, dst.mem.off, &dst.val, dst.bytes, + ctxt, ops)) ) goto done; dst.orig_val = dst.val; } break; } - switch ( ctxt->opcode ) + switch (ctxt->opcode) { enum x86_segment seg; struct segment_register cs, sreg; @@ -3645,80 +3688,93 @@ x86_emulate( unsigned int i, n; unsigned long dummy; - case 0x00: case 0x01: add: /* add reg,mem */ + case 0x00: + case 0x01: + add: /* add reg,mem */ if ( ops->rmw && dst.type == OP_MEM ) state->rmw = rmw_add; else { - case 0x02 ... 0x05: /* add */ + case 0x02 ... 0x05: /* add */ emulate_2op_SrcV("add", src, dst, _regs.eflags); } break; - case 0x08: case 0x09: or: /* or reg,mem */ - if ( ops->rmw && dst.type == OP_MEM ) - state->rmw = rmw_or; + case 0x08: + case 0x09: + or: /* or reg,mem */ + if ( ops->rmw && dst.type == OP_MEM ) state->rmw = rmw_or; else { - case 0x0a ... 0x0d: /* or */ + case 0x0a ... 0x0d: /* or */ emulate_2op_SrcV("or", src, dst, _regs.eflags); } break; - case 0x10: case 0x11: adc: /* adc reg,mem */ + case 0x10: + case 0x11: + adc: /* adc reg,mem */ if ( ops->rmw && dst.type == OP_MEM ) state->rmw = rmw_adc; else { - case 0x12 ... 0x15: /* adc */ + case 0x12 ... 0x15: /* adc */ emulate_2op_SrcV("adc", src, dst, _regs.eflags); } break; - case 0x18: case 0x19: sbb: /* sbb reg,mem */ + case 0x18: + case 0x19: + sbb: /* sbb reg,mem */ if ( ops->rmw && dst.type == OP_MEM ) state->rmw = rmw_sbb; else { - case 0x1a ... 0x1d: /* sbb */ + case 0x1a ... 0x1d: /* sbb */ emulate_2op_SrcV("sbb", src, dst, _regs.eflags); } break; - case 0x20: case 0x21: and: /* and reg,mem */ - if ( ops->rmw && dst.type == OP_MEM ) - state->rmw = rmw_and; + case 0x20: + case 0x21: + and: /* and reg,mem */ + if ( ops->rmw && dst.type == OP_MEM ) state->rmw = rmw_and; else { - case 0x22 ... 0x25: /* and */ + case 0x22 ... 0x25: /* and */ emulate_2op_SrcV("and", src, dst, _regs.eflags); } break; - case 0x28: case 0x29: sub: /* sub reg,mem */ + case 0x28: + case 0x29: + sub: /* sub reg,mem */ if ( ops->rmw && dst.type == OP_MEM ) state->rmw = rmw_sub; else { - case 0x2a ... 0x2d: /* sub */ + case 0x2a ... 0x2d: /* sub */ emulate_2op_SrcV("sub", src, dst, _regs.eflags); } break; - case 0x30: case 0x31: xor: /* xor reg,mem */ - if ( ops->rmw && dst.type == OP_MEM ) - state->rmw = rmw_xor; + case 0x30: + case 0x31: + xor: /* xor reg,mem */ + if ( ops->rmw && dst.type == OP_MEM ) state->rmw = rmw_xor; else { - case 0x32 ... 0x35: /* xor */ + case 0x32 ... 0x35: /* xor */ emulate_2op_SrcV("xor", src, dst, _regs.eflags); } break; - case 0x38: case 0x39: cmp: /* cmp reg,mem */ + case 0x38: + case 0x39: + cmp: /* cmp reg,mem */ if ( ops->rmw && dst.type == OP_MEM && - (rc = read_ulong(dst.mem.seg, dst.mem.off, &dst.val, - dst.bytes, ctxt, ops)) != X86EMUL_OKAY ) + (rc = read_ulong(dst.mem.seg, dst.mem.off, &dst.val, dst.bytes, + ctxt, ops)) != X86EMUL_OKAY ) goto done; /* fall through */ case 0x3a ... 0x3d: /* cmp */ @@ -3727,22 +3783,22 @@ x86_emulate( dst.type = OP_NONE; break; - case 0x06: /* push %%es */ - case 0x0e: /* push %%cs */ - case 0x16: /* push %%ss */ - case 0x1e: /* push %%ds */ + case 0x06: /* push %%es */ + case 0x0e: /* push %%cs */ + case 0x16: /* push %%ss */ + case 0x1e: /* push %%ds */ case X86EMUL_OPC(0x0f, 0xa0): /* push %%fs */ case X86EMUL_OPC(0x0f, 0xa8): /* push %%gs */ fail_if(ops->read_segment == NULL); - if ( (rc = ops->read_segment((b >> 3) & 7, &sreg, - ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read_segment((b >> 3) & 7, &sreg, ctxt)) != + X86EMUL_OKAY ) goto done; src.val = sreg.sel; goto push; - case 0x07: /* pop %%es */ - case 0x17: /* pop %%ss */ - case 0x1f: /* pop %%ds */ + case 0x07: /* pop %%es */ + case 0x17: /* pop %%ss */ + case 0x1f: /* pop %%ds */ case X86EMUL_OPC(0x0f, 0xa1): /* pop %%fs */ case X86EMUL_OPC(0x0f, 0xa9): /* pop %%gs */ fail_if(ops->write_segment == NULL); @@ -3759,7 +3815,8 @@ x86_emulate( break; case 0x27: /* daa */ - case 0x2f: /* das */ { + case 0x2f: /* das */ + { uint8_t al = _regs.al; unsigned int eflags = _regs.eflags; @@ -3796,10 +3853,10 @@ x86_emulate( break; case 0x40 ... 0x4f: /* inc/dec reg */ - dst.type = OP_REG; - dst.reg = decode_gpr(&_regs, b & 7); + dst.type = OP_REG; + dst.reg = decode_gpr(&_regs, b & 7); dst.bytes = op_bytes; - dst.val = *dst.reg; + dst.val = *dst.reg; if ( b & 8 ) emulate_1op("dec", dst, _regs.eflags); else @@ -3811,13 +3868,13 @@ x86_emulate( goto push; case 0x58 ... 0x5f: /* pop reg */ - dst.type = OP_REG; - dst.reg = decode_gpr(&_regs, (b & 7) | ((rex_prefix & 1) << 3)); + dst.type = OP_REG; + dst.reg = decode_gpr(&_regs, (b & 7) | ((rex_prefix & 1) << 3)); dst.bytes = op_bytes; if ( mode_64bit() && (dst.bytes == 4) ) dst.bytes = 8; - if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes), - &dst.val, dst.bytes, ctxt, ops)) != 0 ) + if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes), &dst.val, + dst.bytes, ctxt, ops)) != 0 ) goto done; break; @@ -3829,8 +3886,8 @@ x86_emulate( void *reg = decode_gpr(&_regs, i); if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes), - reg != &_regs.esp ? reg : &ea.val, - op_bytes, ctxt)) != 0 ) + reg != &_regs.esp ? reg : &ea.val, op_bytes, + ctxt)) != 0 ) goto done; } break; @@ -3840,8 +3897,8 @@ x86_emulate( { void *reg = decode_gpr(&_regs, 7 - i); - if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), - &dst.val, op_bytes, ctxt, ops)) != 0 ) + if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), &dst.val, + op_bytes, ctxt, ops)) != 0 ) goto done; if ( reg == &_regs.r(sp) ) continue; @@ -3852,16 +3909,17 @@ x86_emulate( } break; - case 0x62: /* bound */ { + case 0x62: /* bound */ + { int lb, ub, idx; generate_exception_if(src.type != OP_MEM, EXC_UD); if ( (rc = read_ulong(src.mem.seg, truncate_ea(src.mem.off + op_bytes), &ea.val, op_bytes, ctxt, ops)) ) goto done; - ub = (op_bytes == 2) ? (int16_t)ea.val : (int32_t)ea.val; - lb = (op_bytes == 2) ? (int16_t)src.val : (int32_t)src.val; - idx = (op_bytes == 2) ? (int16_t)dst.val : (int32_t)dst.val; + ub = (op_bytes == 2) ? (int16_t)ea.val : (int32_t)ea.val; + lb = (op_bytes == 2) ? (int16_t)src.val : (int32_t)src.val; + idx = (op_bytes == 2) ? (int16_t)dst.val : (int32_t)dst.val; generate_exception_if((idx < lb) || (idx > ub), EXC_BR); dst.type = OP_NONE; break; @@ -3873,8 +3931,8 @@ x86_emulate( /* movsxd */ if ( ea.type == OP_REG ) src.val = *ea.reg; - else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off, - &src.val, 4, ctxt, ops)) ) + else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off, &src.val, 4, + ctxt, ops)) ) goto done; dst.val = (int32_t)src.val; } @@ -3887,8 +3945,8 @@ x86_emulate( dst.bytes = 2; if ( dst.type == OP_REG ) dst.val = *dst.reg; - else if ( (rc = read_ulong(dst.mem.seg, dst.mem.off, - &dst.val, 2, ctxt, ops)) ) + else if ( (rc = read_ulong(dst.mem.seg, dst.mem.off, &dst.val, 2, + ctxt, ops)) ) goto done; if ( src_rpl > (dst.val & 3) ) { @@ -3908,7 +3966,7 @@ x86_emulate( case 0x6a: /* push imm8 */ push: ASSERT(d & Mov); /* writeback needed */ - dst.type = OP_MEM; + dst.type = OP_MEM; dst.bytes = mode_64bit() && (op_bytes == 4) ? 8 : op_bytes; dst.val = src.val; dst.mem.seg = x86_seg_ss; @@ -3919,12 +3977,13 @@ x86_emulate( case 0x6b: /* imul imm8 */ if ( ea.type == OP_REG ) dst.val = *ea.reg; - else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off, - &dst.val, op_bytes, ctxt, ops)) ) + else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off, &dst.val, op_bytes, + ctxt, ops)) ) goto done; goto imul; - case 0x6c ... 0x6d: /* ins %dx,%es:%edi */ { + case 0x6c ... 0x6d: /* ins %dx,%es:%edi */ + { unsigned long nr_reps = get_rep_prefix(false, true); unsigned int port = _regs.dx; @@ -3965,7 +4024,8 @@ x86_emulate( break; } - case 0x6e ... 0x6f: /* outs %esi,%dx */ { + case 0x6e ... 0x6f: /* outs %esi,%dx */ + { unsigned long nr_reps = get_rep_prefix(true, false); unsigned int port = _regs.dx; @@ -3979,8 +4039,8 @@ x86_emulate( rc = X86EMUL_UNHANDLEABLE; if ( nr_reps == 1 && ops->write_io ) { - rc = read_ulong(ea.mem.seg, ea.mem.off, &dst.val, dst.bytes, - ctxt, ops); + rc = read_ulong(ea.mem.seg, ea.mem.off, &dst.val, dst.bytes, ctxt, + ops); if ( rc != X86EMUL_UNHANDLEABLE ) nr_reps = 0; } @@ -3989,8 +4049,8 @@ x86_emulate( &nr_reps, ctxt); if ( nr_reps >= 1 && rc == X86EMUL_UNHANDLEABLE ) { - if ( (rc = read_ulong(ea.mem.seg, ea.mem.off, &dst.val, - dst.bytes, ctxt, ops)) != X86EMUL_OKAY ) + if ( (rc = read_ulong(ea.mem.seg, ea.mem.off, &dst.val, dst.bytes, + ctxt, ops)) != X86EMUL_OKAY ) goto done; fail_if(ops->write_io == NULL); nr_reps = 0; @@ -4014,27 +4074,40 @@ x86_emulate( adjust_bnd(ctxt, ops, vex.pfx); break; - case 0x80: case 0x81: case 0x82: case 0x83: /* Grp1 */ - switch ( modrm_reg & 7 ) + case 0x80: + case 0x81: + case 0x82: + case 0x83: /* Grp1 */ + switch (modrm_reg & 7) { - case 0: goto add; - case 1: goto or; - case 2: goto adc; - case 3: goto sbb; - case 4: goto and; - case 5: goto sub; - case 6: goto xor; - case 7: goto cmp; + case 0: + goto add; + case 1: + goto or ; + case 2: + goto adc; + case 3: + goto sbb; + case 4: + goto and; + case 5: + goto sub; + case 6: + goto xor ; + case 7: + goto cmp; } break; case 0xa8 ... 0xa9: /* test imm,%%eax */ - case 0x84 ... 0x85: test: /* test */ + case 0x84 ... 0x85: + test: /* test */ emulate_2op_SrcV("test", src, dst, _regs.eflags); dst.type = OP_NONE; break; - case 0x86 ... 0x87: xchg: /* xchg */ + case 0x86 ... 0x87: + xchg: /* xchg */ /* * The lock prefix is implied for this insn (and setting it for the * register operands case here is benign to subsequent code). @@ -4046,12 +4119,20 @@ x86_emulate( break; } /* Write back the register source. */ - switch ( dst.bytes ) + switch (dst.bytes) { - case 1: *(uint8_t *)src.reg = (uint8_t)dst.val; break; - case 2: *(uint16_t *)src.reg = (uint16_t)dst.val; break; - case 4: *src.reg = (uint32_t)dst.val; break; /* 64b reg: zero-extend */ - case 8: *src.reg = dst.val; break; + case 1: + *(uint8_t *)src.reg = (uint8_t)dst.val; + break; + case 2: + *(uint16_t *)src.reg = (uint16_t)dst.val; + break; + case 4: + *src.reg = (uint32_t)dst.val; + break; /* 64b reg: zero-extend */ + case 8: + *src.reg = dst.val; + break; } /* Arrange for write back of the memory destination. */ dst.val = src.val; @@ -4080,7 +4161,7 @@ x86_emulate( dst.val = src.val; break; - case 0x8c: /* mov Sreg,r/m */ + case 0x8c: /* mov Sreg,r/m */ seg = modrm_reg & 7; /* REX.R is ignored. */ generate_exception_if(!is_x86_user_segment(seg), EXC_UD); store_selector: @@ -4097,10 +4178,10 @@ x86_emulate( dst.val = ea.mem.off; break; - case 0x8e: /* mov r/m,Sreg */ + case 0x8e: /* mov r/m,Sreg */ seg = modrm_reg & 7; /* REX.R is ignored. */ - generate_exception_if(!is_x86_user_segment(seg) || - seg == x86_seg_cs, EXC_UD); + generate_exception_if(!is_x86_user_segment(seg) || seg == x86_seg_cs, + EXC_UD); if ( (rc = load_seg(seg, src.val, 0, NULL, ctxt, ops)) != 0 ) goto done; if ( seg == x86_seg_ss ) @@ -4113,12 +4194,12 @@ x86_emulate( /* 64-bit mode: POP defaults to a 64-bit operand. */ if ( mode_64bit() && (dst.bytes == 4) ) dst.bytes = 8; - if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes), - &dst.val, dst.bytes, ctxt, ops)) != 0 ) + if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes), &dst.val, + dst.bytes, ctxt, ops)) != 0 ) goto done; break; - case 0x90: /* nop / xchg %%r8,%%rax */ + case 0x90: /* nop / xchg %%r8,%%rax */ case X86EMUL_OPC_F3(0, 0x90): /* pause / xchg %%r8,%%rax */ if ( !(rex_prefix & REX_B) ) break; /* nop / pause */ @@ -4127,26 +4208,38 @@ x86_emulate( case 0x91 ... 0x97: /* xchg reg,%%rax */ dst.type = OP_REG; dst.bytes = op_bytes; - dst.reg = decode_gpr(&_regs, (b & 7) | ((rex_prefix & 1) << 3)); - dst.val = *dst.reg; + dst.reg = decode_gpr(&_regs, (b & 7) | ((rex_prefix & 1) << 3)); + dst.val = *dst.reg; goto xchg; case 0x98: /* cbw/cwde/cdqe */ - switch ( op_bytes ) + switch (op_bytes) { - case 2: _regs.ax = (int8_t)_regs.al; break; /* cbw */ - case 4: _regs.r(ax) = (uint32_t)(int16_t)_regs.ax; break; /* cwde */ - case 8: _regs.r(ax) = (int32_t)_regs.eax; break; /* cdqe */ + case 2: + _regs.ax = (int8_t)_regs.al; + break; /* cbw */ + case 4: + _regs.r(ax) = (uint32_t)(int16_t)_regs.ax; + break; /* cwde */ + case 8: + _regs.r(ax) = (int32_t)_regs.eax; + break; /* cdqe */ } break; case 0x99: /* cwd/cdq/cqo */ - switch ( op_bytes ) + switch (op_bytes) { - case 2: _regs.dx = -((int16_t)_regs.ax < 0); break; - case 4: _regs.r(dx) = (uint32_t)-((int32_t)_regs.eax < 0); break; + case 2: + _regs.dx = -((int16_t)_regs.ax < 0); + break; + case 4: + _regs.r(dx) = (uint32_t) - ((int32_t)_regs.eax < 0); + break; #ifdef __x86_64__ - case 8: _regs.rdx = -((int64_t)_regs.rax < 0); break; + case 8: + _regs.rdx = -((int64_t)_regs.rax < 0); + break; #endif } break; @@ -4158,12 +4251,11 @@ x86_emulate( if ( (rc = ops->read_segment(x86_seg_cs, &sreg, ctxt)) || (rc = load_seg(x86_seg_cs, imm2, 0, &cs, ctxt, ops)) || - (validate_far_branch(&cs, imm1), - src.val = sreg.sel, - rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes), - &src.val, op_bytes, ctxt)) || - (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes), - &_regs.r(ip), op_bytes, ctxt)) || + (validate_far_branch(&cs, imm1), src.val = sreg.sel, + rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes), &src.val, + op_bytes, ctxt)) || + (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes), &_regs.r(ip), + op_bytes, ctxt)) || (rc = ops->write_segment(x86_seg_cs, &cs, ctxt)) ) goto done; @@ -4171,7 +4263,7 @@ x86_emulate( singlestep = _regs.eflags & X86_EFLAGS_TF; break; - case 0x9b: /* wait/fwait */ + case 0x9b: /* wait/fwait */ host_and_vcpu_must_have(fpu); get_fpu(X86EMUL_FPU_wait); emulate_fpu_insn_stub(b); @@ -4197,7 +4289,8 @@ x86_emulate( src.val = _regs.r(flags) & ~(X86_EFLAGS_VM | X86_EFLAGS_RF); goto push; - case 0x9d: /* popf */ { + case 0x9d: /* popf */ + { /* * Bits which may not be modified by this instruction. RF is handled * uniformly during instruction retirement. @@ -4216,9 +4309,10 @@ x86_emulate( goto done; } /* All IOPL != 3 POPFs fail, except in vm86 mode. */ - generate_exception_if(!(cr4 & X86_CR4_VME) && - MASK_EXTR(_regs.eflags, X86_EFLAGS_IOPL) != 3, - EXC_GP, 0); + generate_exception_if( + !(cr4 & X86_CR4_VME) && + MASK_EXTR(_regs.eflags, X86_EFLAGS_IOPL) != 3, + EXC_GP, 0); } /* * IOPL cannot be modified outside of CPL 0. IF cannot be @@ -4231,8 +4325,8 @@ x86_emulate( /* 64-bit mode: POPF defaults to a 64-bit operand. */ if ( mode_64bit() && (op_bytes == 4) ) op_bytes = 8; - if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), - &dst.val, op_bytes, ctxt, ops)) != 0 ) + if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), &dst.val, + op_bytes, ctxt, ops)) != 0 ) goto done; if ( op_bytes == 2 ) { @@ -4245,8 +4339,8 @@ x86_emulate( generate_exception_if(dst.val & X86_EFLAGS_TF, EXC_GP, 0); if ( dst.val & X86_EFLAGS_IF ) { - generate_exception_if(_regs.eflags & X86_EFLAGS_VIP, - EXC_GP, 0); + generate_exception_if(_regs.eflags & X86_EFLAGS_VIP, EXC_GP, + 0); dst.val |= X86_EFLAGS_VIF; } else @@ -4272,7 +4366,8 @@ x86_emulate( _regs.ah = (_regs.eflags & EFLAGS_MASK) | X86_EFLAGS_MBS; break; - case 0xa4 ... 0xa5: /* movs */ { + case 0xa4 ... 0xa5: /* movs */ + { unsigned long nr_reps = get_rep_prefix(true, true); dst.bytes = (d & ByteOp) ? 1 : op_bytes; @@ -4280,12 +4375,12 @@ x86_emulate( dst.mem.off = truncate_ea_and_reps(_regs.r(di), nr_reps, dst.bytes); src.mem.off = truncate_ea_and_reps(_regs.r(si), nr_reps, dst.bytes); if ( (nr_reps == 1) || !ops->rep_movs || - ((rc = ops->rep_movs(ea.mem.seg, src.mem.off, - dst.mem.seg, dst.mem.off, dst.bytes, - &nr_reps, ctxt)) == X86EMUL_UNHANDLEABLE) ) + ((rc = ops->rep_movs(ea.mem.seg, src.mem.off, dst.mem.seg, + dst.mem.off, dst.bytes, &nr_reps, ctxt)) == + X86EMUL_UNHANDLEABLE) ) { - if ( (rc = read_ulong(ea.mem.seg, src.mem.off, - &dst.val, dst.bytes, ctxt, ops)) != 0 ) + if ( (rc = read_ulong(ea.mem.seg, src.mem.off, &dst.val, dst.bytes, + ctxt, ops)) != 0 ) goto done; dst.type = OP_MEM; nr_reps = 1; @@ -4298,15 +4393,16 @@ x86_emulate( break; } - case 0xa6 ... 0xa7: /* cmps */ { + case 0xa6 ... 0xa7: /* cmps */ + { unsigned long next_eip = _regs.r(ip); get_rep_prefix(true, true); src.bytes = dst.bytes = (d & ByteOp) ? 1 : op_bytes; - if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.r(si)), - &dst.val, dst.bytes, ctxt, ops)) || - (rc = read_ulong(x86_seg_es, truncate_ea(_regs.r(di)), - &src.val, src.bytes, ctxt, ops)) ) + if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.r(si)), &dst.val, + dst.bytes, ctxt, ops)) || + (rc = read_ulong(x86_seg_es, truncate_ea(_regs.r(di)), &src.val, + src.bytes, ctxt, ops)) ) goto done; register_address_adjust(_regs.r(si), dst.bytes); register_address_adjust(_regs.r(di), src.bytes); @@ -4319,15 +4415,15 @@ x86_emulate( break; } - case 0xaa ... 0xab: /* stos */ { + case 0xaa ... 0xab: /* stos */ + { unsigned long nr_reps = get_rep_prefix(false, true); dst.bytes = src.bytes; dst.mem.seg = x86_seg_es; dst.mem.off = truncate_ea(_regs.r(di)); if ( (nr_reps == 1) || !ops->rep_stos || - ((rc = ops->rep_stos(&src.val, - dst.mem.seg, dst.mem.off, dst.bytes, + ((rc = ops->rep_stos(&src.val, dst.mem.seg, dst.mem.off, dst.bytes, &nr_reps, ctxt)) == X86EMUL_UNHANDLEABLE) ) { dst.val = src.val; @@ -4344,19 +4440,20 @@ x86_emulate( case 0xac ... 0xad: /* lods */ get_rep_prefix(true, false); - if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.r(si)), - &dst.val, dst.bytes, ctxt, ops)) != 0 ) + if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.r(si)), &dst.val, + dst.bytes, ctxt, ops)) != 0 ) goto done; register_address_adjust(_regs.r(si), dst.bytes); put_rep_prefix(1); break; - case 0xae ... 0xaf: /* scas */ { + case 0xae ... 0xaf: /* scas */ + { unsigned long next_eip = _regs.r(ip); get_rep_prefix(false, true); - if ( (rc = read_ulong(x86_seg_es, truncate_ea(_regs.r(di)), - &dst.val, src.bytes, ctxt, ops)) != 0 ) + if ( (rc = read_ulong(x86_seg_es, truncate_ea(_regs.r(di)), &dst.val, + src.bytes, ctxt, ops)) != 0 ) goto done; register_address_adjust(_regs.r(di), src.bytes); put_rep_prefix(1); @@ -4370,8 +4467,8 @@ x86_emulate( } case 0xb0 ... 0xb7: /* mov imm8,r8 */ - dst.reg = _decode_gpr(&_regs, (b & 7) | ((rex_prefix & 1) << 3), - !rex_prefix); + dst.reg = + _decode_gpr(&_regs, (b & 7) | ((rex_prefix & 1) << 3), !rex_prefix); dst.val = src.val; break; @@ -4380,27 +4477,28 @@ x86_emulate( dst.val = src.val; break; - case 0xc0 ... 0xc1: grp2: /* Grp2 */ + case 0xc0 ... 0xc1: + grp2: /* Grp2 */ generate_exception_if(lock_prefix, EXC_UD); - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { -#define GRP2(name, ext) \ - case ext: \ - if ( ops->rmw && dst.type == OP_MEM ) \ - state->rmw = rmw_##name; \ - else \ - emulate_2op_SrcB(#name, src, dst, _regs.eflags); \ - break +#define GRP2(name, ext) \ + case ext: \ + if ( ops->rmw && dst.type == OP_MEM ) \ + state->rmw = rmw_##name; \ + else \ + emulate_2op_SrcB(#name, src, dst, _regs.eflags); \ + break - GRP2(rol, 0); - GRP2(ror, 1); - GRP2(rcl, 2); - GRP2(rcr, 3); + GRP2(rol, 0); + GRP2(ror, 1); + GRP2(rcl, 2); + GRP2(rcr, 3); case 6: /* sal/shl alias */ - GRP2(shl, 4); - GRP2(shr, 5); - GRP2(sar, 7); + GRP2(shl, 4); + GRP2(shr, 5); + GRP2(sar, 7); #undef GRP2 } break; @@ -4416,8 +4514,8 @@ x86_emulate( adjust_bnd(ctxt, ops, vex.pfx); break; - case 0xc4: /* les */ - case 0xc5: /* lds */ + case 0xc4: /* les */ + case 0xc5: /* lds */ seg = (b & 1) * 3; /* es = 0, ds = 3 */ les: generate_exception_if(src.type != OP_MEM, EXC_UD); @@ -4435,8 +4533,8 @@ x86_emulate( dst.bytes = (mode_64bit() && (op_bytes == 4)) ? 8 : op_bytes; dst.reg = (unsigned long *)&_regs.r(bp); fail_if(!ops->write); - if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes), - &_regs.r(bp), dst.bytes, ctxt)) ) + if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes), &_regs.r(bp), + dst.bytes, ctxt)) ) goto done; dst.val = _regs.r(sp); @@ -4446,15 +4544,16 @@ x86_emulate( for ( i = 1; i < n; i++ ) { unsigned long ebp, temp_data; - ebp = truncate_word(_regs.r(bp) - i*dst.bytes, ctxt->sp_size/8); - if ( (rc = read_ulong(x86_seg_ss, ebp, - &temp_data, dst.bytes, ctxt, ops)) || + ebp = truncate_word(_regs.r(bp) - i * dst.bytes, + ctxt->sp_size / 8); + if ( (rc = read_ulong(x86_seg_ss, ebp, &temp_data, dst.bytes, + ctxt, ops)) || (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes), &temp_data, dst.bytes, ctxt)) ) goto done; } - if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes), - &dst.val, dst.bytes, ctxt)) ) + if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes), &dst.val, + dst.bytes, ctxt)) ) goto done; } @@ -4472,15 +4571,15 @@ x86_emulate( /* Second writeback, to %%ebp. */ dst.type = OP_REG; dst.reg = (unsigned long *)&_regs.r(bp); - if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes), - &dst.val, dst.bytes, ctxt, ops)) ) + if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes), &dst.val, + dst.bytes, ctxt, ops)) ) goto done; break; case 0xca: /* ret imm16 (far) */ case 0xcb: /* ret (far) */ - if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), - &dst.val, op_bytes, ctxt, ops)) || + if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), &dst.val, + op_bytes, ctxt, ops)) || (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes + src.val), &src.val, op_bytes, ctxt, ops)) || (rc = load_seg(x86_seg_cs, src.val, 1, &cs, ctxt, ops)) || @@ -4496,7 +4595,7 @@ x86_emulate( case 0xcd: /* int imm8 */ case 0xf1: /* int1 (icebp) */ ASSERT(!ctxt->event_pending); - switch ( ctxt->opcode ) + switch (ctxt->opcode) { case 0xcc: /* int3 */ ctxt->event.vector = EXC_BP; @@ -4521,18 +4620,19 @@ x86_emulate( rc = X86EMUL_EXCEPTION; goto done; - case 0xcf: /* iret */ { + case 0xcf: /* iret */ + { unsigned long sel, eip, eflags; uint32_t mask = X86_EFLAGS_VIP | X86_EFLAGS_VIF | X86_EFLAGS_VM; fail_if(!in_realmode(ctxt, ops)); ctxt->retire.unblock_nmi = true; - if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), - &eip, op_bytes, ctxt, ops)) || - (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), - &sel, op_bytes, ctxt, ops)) || - (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), - &eflags, op_bytes, ctxt, ops)) ) + if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), &eip, op_bytes, + ctxt, ops)) || + (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), &sel, op_bytes, + ctxt, ops)) || + (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), &eflags, + op_bytes, ctxt, ops)) ) goto done; if ( op_bytes == 2 ) eflags = (uint16_t)eflags | (_regs.eflags & 0xffff0000u); @@ -4574,7 +4674,8 @@ x86_emulate( _regs.al = (_regs.eflags & X86_EFLAGS_CF) ? 0xff : 0x00; break; - case 0xd7: /* xlat */ { + case 0xd7: /* xlat */ + { unsigned long al; if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.r(bx) + _regs.al), @@ -4587,7 +4688,7 @@ x86_emulate( case 0xd8: /* FPU 0xd8 */ host_and_vcpu_must_have(fpu); get_fpu(X86EMUL_FPU_fpu); - switch ( modrm ) + switch (modrm) { case 0xc0 ... 0xc7: /* fadd %stN,%st */ case 0xc8 ... 0xcf: /* fmul %stN,%st */ @@ -4602,8 +4703,8 @@ x86_emulate( default: fpu_memsrc32: ASSERT(ea.type == OP_MEM); - if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &src.val, - 4, ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &src.val, 4, ctxt)) != + X86EMUL_OKAY ) goto done; emulate_fpu_insn_memsrc(b, modrm_reg & 7, src.val); break; @@ -4613,46 +4714,46 @@ x86_emulate( case 0xd9: /* FPU 0xd9 */ host_and_vcpu_must_have(fpu); get_fpu(X86EMUL_FPU_fpu); - switch ( modrm ) + switch (modrm) { case 0xfb: /* fsincos */ fail_if(cpu_has_amd_erratum(573)); /* fall through */ case 0xc0 ... 0xc7: /* fld %stN */ case 0xc8 ... 0xcf: /* fxch %stN */ - case 0xd0: /* fnop */ + case 0xd0: /* fnop */ case 0xd8 ... 0xdf: /* fstp %stN (alternative encoding) */ - case 0xe0: /* fchs */ - case 0xe1: /* fabs */ - case 0xe4: /* ftst */ - case 0xe5: /* fxam */ - case 0xe8: /* fld1 */ - case 0xe9: /* fldl2t */ - case 0xea: /* fldl2e */ - case 0xeb: /* fldpi */ - case 0xec: /* fldlg2 */ - case 0xed: /* fldln2 */ - case 0xee: /* fldz */ - case 0xf0: /* f2xm1 */ - case 0xf1: /* fyl2x */ - case 0xf2: /* fptan */ - case 0xf3: /* fpatan */ - case 0xf4: /* fxtract */ - case 0xf5: /* fprem1 */ - case 0xf6: /* fdecstp */ - case 0xf7: /* fincstp */ - case 0xf8: /* fprem */ - case 0xf9: /* fyl2xp1 */ - case 0xfa: /* fsqrt */ - case 0xfc: /* frndint */ - case 0xfd: /* fscale */ - case 0xfe: /* fsin */ - case 0xff: /* fcos */ + case 0xe0: /* fchs */ + case 0xe1: /* fabs */ + case 0xe4: /* ftst */ + case 0xe5: /* fxam */ + case 0xe8: /* fld1 */ + case 0xe9: /* fldl2t */ + case 0xea: /* fldl2e */ + case 0xeb: /* fldpi */ + case 0xec: /* fldlg2 */ + case 0xed: /* fldln2 */ + case 0xee: /* fldz */ + case 0xf0: /* f2xm1 */ + case 0xf1: /* fyl2x */ + case 0xf2: /* fptan */ + case 0xf3: /* fpatan */ + case 0xf4: /* fxtract */ + case 0xf5: /* fprem1 */ + case 0xf6: /* fdecstp */ + case 0xf7: /* fincstp */ + case 0xf8: /* fprem */ + case 0xf9: /* fyl2xp1 */ + case 0xfa: /* fsqrt */ + case 0xfc: /* frndint */ + case 0xfd: /* fscale */ + case 0xfe: /* fsin */ + case 0xff: /* fcos */ emulate_fpu_insn_stub(0xd9, modrm); break; default: generate_exception_if(ea.type != OP_MEM, EXC_UD); - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 0: /* fld m32fp */ goto fpu_memsrc32; @@ -4669,8 +4770,8 @@ x86_emulate( case 5: /* fldcw m2byte */ state->fpu_ctrl = true; fpu_memsrc16: - if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &src.val, - 2, ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &src.val, 2, + ctxt)) != X86EMUL_OKAY ) goto done; emulate_fpu_insn_memsrc(b, modrm_reg & 7, src.val); break; @@ -4699,7 +4800,7 @@ x86_emulate( case 0xda: /* FPU 0xda */ host_and_vcpu_must_have(fpu); get_fpu(X86EMUL_FPU_fpu); - switch ( modrm ) + switch (modrm) { case 0xc0 ... 0xc7: /* fcmovb %stN */ case 0xc8 ... 0xcf: /* fcmove %stN */ @@ -4708,7 +4809,7 @@ x86_emulate( vcpu_must_have(cmov); emulate_fpu_insn_stub_eflags(0xda, modrm); break; - case 0xe9: /* fucompp */ + case 0xe9: /* fucompp */ emulate_fpu_insn_stub(0xda, modrm); break; default: @@ -4720,7 +4821,7 @@ x86_emulate( case 0xdb: /* FPU 0xdb */ host_and_vcpu_must_have(fpu); get_fpu(X86EMUL_FPU_fpu); - switch ( modrm ) + switch (modrm) { case 0xc0 ... 0xc7: /* fcmovnb %stN */ case 0xc8 ... 0xcf: /* fcmovne %stN */ @@ -4736,13 +4837,13 @@ x86_emulate( case 0xe2: /* fnclex */ case 0xe3: /* fninit */ case 0xe4: /* fnsetpm - 287 only, ignored by 387 */ - /* case 0xe5: frstpm - 287 only, #UD on 387 */ + /* case 0xe5: frstpm - 287 only, #UD on 387 */ state->fpu_ctrl = true; emulate_fpu_insn_stub(0xdb, modrm); break; default: generate_exception_if(ea.type != OP_MEM, EXC_UD); - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 0: /* fild m32i */ goto fpu_memsrc32; @@ -4754,8 +4855,8 @@ x86_emulate( goto fpu_memdst32; case 5: /* fld m80fp */ fpu_memsrc80: - if ( (rc = ops->read(ea.mem.seg, ea.mem.off, mmvalp, - 10, ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read(ea.mem.seg, ea.mem.off, mmvalp, 10, + ctxt)) != X86EMUL_OKAY ) goto done; emulate_fpu_insn_memsrc(b, modrm_reg & 7, *mmvalp); break; @@ -4764,8 +4865,8 @@ x86_emulate( fail_if(!ops->write); emulate_fpu_insn_memdst(b, modrm_reg & 7, *mmvalp); if ( fpu_check_write() && - (rc = ops->write(ea.mem.seg, ea.mem.off, mmvalp, - 10, ctxt)) != X86EMUL_OKAY ) + (rc = ops->write(ea.mem.seg, ea.mem.off, mmvalp, 10, + ctxt)) != X86EMUL_OKAY ) goto done; break; default: @@ -4777,7 +4878,7 @@ x86_emulate( case 0xdc: /* FPU 0xdc */ host_and_vcpu_must_have(fpu); get_fpu(X86EMUL_FPU_fpu); - switch ( modrm ) + switch (modrm) { case 0xc0 ... 0xc7: /* fadd %st,%stN */ case 0xc8 ... 0xcf: /* fmul %st,%stN */ @@ -4792,8 +4893,8 @@ x86_emulate( default: fpu_memsrc64: ASSERT(ea.type == OP_MEM); - if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &src.val, - 8, ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &src.val, 8, ctxt)) != + X86EMUL_OKAY ) goto done; emulate_fpu_insn_memsrc(b, modrm_reg & 7, src.val); break; @@ -4803,7 +4904,7 @@ x86_emulate( case 0xdd: /* FPU 0xdd */ host_and_vcpu_must_have(fpu); get_fpu(X86EMUL_FPU_fpu); - switch ( modrm ) + switch (modrm) { case 0xc0 ... 0xc7: /* ffree %stN */ case 0xc8 ... 0xcf: /* fxch %stN (alternative encoding) */ @@ -4815,7 +4916,7 @@ x86_emulate( break; default: generate_exception_if(ea.type != OP_MEM, EXC_UD); - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 0: /* fld m64fp */; goto fpu_memsrc64; @@ -4851,12 +4952,12 @@ x86_emulate( case 0xde: /* FPU 0xde */ host_and_vcpu_must_have(fpu); get_fpu(X86EMUL_FPU_fpu); - switch ( modrm ) + switch (modrm) { case 0xc0 ... 0xc7: /* faddp %stN */ case 0xc8 ... 0xcf: /* fmulp %stN */ case 0xd0 ... 0xd7: /* fcomp %stN (alternative encoding) */ - case 0xd9: /* fcompp */ + case 0xd9: /* fcompp */ case 0xe0 ... 0xe7: /* fsubrp %stN */ case 0xe8 ... 0xef: /* fsubp %stN */ case 0xf0 ... 0xf7: /* fdivrp %stN */ @@ -4873,7 +4974,7 @@ x86_emulate( case 0xdf: /* FPU 0xdf */ host_and_vcpu_must_have(fpu); get_fpu(X86EMUL_FPU_fpu); - switch ( modrm ) + switch (modrm) { case 0xe0: /* fnstsw %ax */ @@ -4896,7 +4997,7 @@ x86_emulate( break; default: generate_exception_if(ea.type != OP_MEM, EXC_UD); - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 0: /* fild m16i */ goto fpu_memsrc16; @@ -4919,7 +5020,8 @@ x86_emulate( } break; - case 0xe0 ... 0xe2: /* loop{,z,nz} */ { + case 0xe0 ... 0xe2: /* loop{,z,nz} */ + { unsigned long count = get_loop_count(&_regs, ad_bytes); int do_jmp = !(_regs.eflags & X86_EFLAGS_ZF); /* loopnz */ @@ -4945,7 +5047,8 @@ x86_emulate( case 0xec: /* in %dx,%al */ case 0xed: /* in %dx,%eax */ case 0xee: /* out %al,%dx */ - case 0xef: /* out %eax,%dx */ { + case 0xef: /* out %eax,%dx */ + { unsigned int port = ((b < 0xe8) ? (uint8_t)src.val : _regs.dx); op_bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes; @@ -4973,7 +5076,8 @@ x86_emulate( break; } - case 0xe8: /* call (near) */ { + case 0xe8: /* call (near) */ + { int32_t rel = src.val; op_bytes = ((op_bytes == 4) && mode_64bit()) ? 8 : op_bytes; @@ -5010,15 +5114,15 @@ x86_emulate( case 0xf6 ... 0xf7: /* Grp3 */ if ( (d & DstMask) == DstEax ) dst.reg = (unsigned long *)&_regs.r(ax); - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { unsigned long u[2], v; case 0 ... 1: /* test */ generate_exception_if(lock_prefix, EXC_UD); if ( ops->rmw && dst.type == OP_MEM && - (rc = read_ulong(dst.mem.seg, dst.mem.off, &dst.val, - dst.bytes, ctxt, ops)) != X86EMUL_OKAY ) + (rc = read_ulong(dst.mem.seg, dst.mem.off, &dst.val, dst.bytes, + ctxt, ops)) != X86EMUL_OKAY ) goto done; goto test; case 2: /* not */ @@ -5035,7 +5139,7 @@ x86_emulate( break; case 4: /* mul */ _regs.eflags &= ~(X86_EFLAGS_OF | X86_EFLAGS_CF); - switch ( dst.bytes ) + switch (dst.bytes) { case 1: dst.val = _regs.al; @@ -5074,7 +5178,7 @@ x86_emulate( dst.val = _regs.r(ax); imul: _regs.eflags &= ~(X86_EFLAGS_OF | X86_EFLAGS_CF); - switch ( dst.bytes ) + switch (dst.bytes) { case 1: dst.val = (int8_t)src.val * (int8_t)dst.val; @@ -5084,8 +5188,8 @@ x86_emulate( dst.bytes = 2; break; case 2: - dst.val = ((uint32_t)(int16_t)src.val * - (uint32_t)(int16_t)dst.val); + dst.val = + ((uint32_t)(int16_t)src.val * (uint32_t)(int16_t)dst.val); if ( (int16_t)dst.val != (int32_t)dst.val ) _regs.eflags |= X86_EFLAGS_OF | X86_EFLAGS_CF; if ( b > 0x6b ) @@ -5093,8 +5197,8 @@ x86_emulate( break; #ifdef __x86_64__ case 4: - dst.val = ((uint64_t)(int32_t)src.val * - (uint64_t)(int32_t)dst.val); + dst.val = + ((uint64_t)(int32_t)src.val * (uint64_t)(int32_t)dst.val); if ( (int32_t)dst.val != dst.val ) _regs.eflags |= X86_EFLAGS_OF | X86_EFLAGS_CF; if ( b > 0x6b ) @@ -5113,25 +5217,24 @@ x86_emulate( } break; case 6: /* div */ - switch ( src.bytes ) + switch (src.bytes) { case 1: u[0] = _regs.ax; u[1] = 0; - v = (uint8_t)src.val; + v = (uint8_t)src.val; generate_exception_if( - div_dbl(u, v) || ((uint8_t)u[0] != (uint16_t)u[0]), - EXC_DE); + div_dbl(u, v) || ((uint8_t)u[0] != (uint16_t)u[0]), EXC_DE); dst.val = (uint8_t)u[0]; _regs.ah = u[1]; break; case 2: u[0] = (_regs.edx << 16) | _regs.ax; u[1] = 0; - v = (uint16_t)src.val; - generate_exception_if( - div_dbl(u, v) || ((uint16_t)u[0] != (uint32_t)u[0]), - EXC_DE); + v = (uint16_t)src.val; + generate_exception_if(div_dbl(u, v) || + ((uint16_t)u[0] != (uint32_t)u[0]), + EXC_DE); dst.val = (uint16_t)u[0]; _regs.dx = u[1]; break; @@ -5139,44 +5242,41 @@ x86_emulate( case 4: u[0] = (_regs.rdx << 32) | _regs.eax; u[1] = 0; - v = (uint32_t)src.val; - generate_exception_if( - div_dbl(u, v) || ((uint32_t)u[0] != u[0]), - EXC_DE); - dst.val = (uint32_t)u[0]; + v = (uint32_t)src.val; + generate_exception_if(div_dbl(u, v) || ((uint32_t)u[0] != u[0]), + EXC_DE); + dst.val = (uint32_t)u[0]; _regs.rdx = (uint32_t)u[1]; break; #endif default: u[0] = _regs.r(ax); u[1] = _regs.r(dx); - v = src.val; + v = src.val; generate_exception_if(div_dbl(u, v), EXC_DE); - dst.val = u[0]; + dst.val = u[0]; _regs.r(dx) = u[1]; break; } break; case 7: /* idiv */ - switch ( src.bytes ) + switch (src.bytes) { case 1: u[0] = (int16_t)_regs.ax; u[1] = ((long)u[0] < 0) ? ~0UL : 0UL; - v = (int8_t)src.val; + v = (int8_t)src.val; generate_exception_if( - idiv_dbl(u, v) || ((int8_t)u[0] != (int16_t)u[0]), - EXC_DE); + idiv_dbl(u, v) || ((int8_t)u[0] != (int16_t)u[0]), EXC_DE); dst.val = (int8_t)u[0]; _regs.ah = u[1]; break; case 2: u[0] = (int32_t)((_regs.edx << 16) | _regs.ax); u[1] = ((long)u[0] < 0) ? ~0UL : 0UL; - v = (int16_t)src.val; + v = (int16_t)src.val; generate_exception_if( - idiv_dbl(u, v) || ((int16_t)u[0] != (int32_t)u[0]), - EXC_DE); + idiv_dbl(u, v) || ((int16_t)u[0] != (int32_t)u[0]), EXC_DE); dst.val = (int16_t)u[0]; _regs.dx = u[1]; break; @@ -5184,20 +5284,19 @@ x86_emulate( case 4: u[0] = (_regs.rdx << 32) | _regs.eax; u[1] = ((long)u[0] < 0) ? ~0UL : 0UL; - v = (int32_t)src.val; - generate_exception_if( - idiv_dbl(u, v) || ((int32_t)u[0] != u[0]), - EXC_DE); - dst.val = (int32_t)u[0]; + v = (int32_t)src.val; + generate_exception_if(idiv_dbl(u, v) || ((int32_t)u[0] != u[0]), + EXC_DE); + dst.val = (int32_t)u[0]; _regs.rdx = (uint32_t)u[1]; break; #endif default: u[0] = _regs.r(ax); u[1] = _regs.r(dx); - v = src.val; + v = src.val; generate_exception_if(idiv_dbl(u, v), EXC_DE); - dst.val = u[0]; + dst.val = u[0]; _regs.r(dx) = u[1]; break; } @@ -5232,9 +5331,8 @@ x86_emulate( } else { - generate_exception_if((_regs.eflags & X86_EFLAGS_VIP) || - !mode_vif(), - EXC_GP, 0); + generate_exception_if( + (_regs.eflags & X86_EFLAGS_VIP) || !mode_vif(), EXC_GP, 0); if ( !(_regs.eflags & X86_EFLAGS_VIF) ) ctxt->retire.sti = true; _regs.eflags |= X86_EFLAGS_VIF; @@ -5253,7 +5351,7 @@ x86_emulate( generate_exception_if((modrm_reg & 7) >= 2, EXC_UD); /* Fallthrough. */ case 0xff: /* Grp5 */ - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 0: /* inc */ if ( ops->rmw && dst.type == OP_MEM ) @@ -5287,8 +5385,8 @@ x86_emulate( generate_exception_if(src.type != OP_MEM, EXC_UD); if ( (rc = read_ulong(src.mem.seg, - truncate_ea(src.mem.off + op_bytes), - &imm2, 2, ctxt, ops)) ) + truncate_ea(src.mem.off + op_bytes), &imm2, 2, + ctxt, ops)) ) goto done; imm1 = src.val; if ( !(modrm_reg & 4) ) @@ -5304,7 +5402,7 @@ x86_emulate( case X86EMUL_OPC(0x0f, 0x00): /* Grp6 */ seg = (modrm_reg & 1) ? x86_seg_tr : x86_seg_ldtr; generate_exception_if(!in_protmode(ctxt, ops), EXC_UD); - switch ( modrm_reg & 6 ) + switch (modrm_reg & 6) { case 0: /* sldt / str */ generate_exception_if(umip_active(ctxt, ops), EXC_GP, 0); @@ -5316,20 +5414,19 @@ x86_emulate( break; case 4: /* verr / verw */ _regs.eflags &= ~X86_EFLAGS_ZF; - switch ( rc = protmode_load_seg(x86_seg_none, src.val, false, - &sreg, ctxt, ops) ) + switch (rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg, + ctxt, ops)) { case X86EMUL_OKAY: - if ( sreg.s && - ((modrm_reg & 1) ? ((sreg.type & 0xa) == 0x2) - : ((sreg.type & 0xa) != 0x8)) ) + if ( sreg.s && ((modrm_reg & 1) ? ((sreg.type & 0xa) == 0x2) + : ((sreg.type & 0xa) != 0x8)) ) _regs.eflags |= X86_EFLAGS_ZF; break; case X86EMUL_EXCEPTION: if ( ctxt->event_pending ) { ASSERT(ctxt->event.vector == EXC_PF); - default: + default: goto done; } /* Instead of the exception, ZF remains cleared. */ @@ -5349,7 +5446,7 @@ x86_emulate( seg = (modrm_reg & 1) ? x86_seg_idtr : x86_seg_gdtr; - switch( modrm ) + switch (modrm) { case 0xca: /* clac */ case 0xcb: /* stac */ @@ -5402,20 +5499,19 @@ x86_emulate( case 0xd6: /* xtest */ generate_exception_if(vex.pfx, EXC_UD); - generate_exception_if(!vcpu_has_rtm() && !vcpu_has_hle(), - EXC_UD); + generate_exception_if(!vcpu_has_rtm() && !vcpu_has_hle(), EXC_UD); /* Neither HLE nor RTM can be active when we get here. */ _regs.eflags |= X86_EFLAGS_ZF; break; case 0xdf: /* invlpga */ fail_if(!ops->read_msr); - if ( (rc = ops->read_msr(MSR_EFER, - &msr_val, ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read_msr(MSR_EFER, &msr_val, ctxt)) != + X86EMUL_OKAY ) goto done; /* Finding SVME set implies vcpu_has_svm(). */ - generate_exception_if(!(msr_val & EFER_SVME) || - !in_protmode(ctxt, ops), EXC_UD); + generate_exception_if( + !(msr_val & EFER_SVME) || !in_protmode(ctxt, ops), EXC_UD); generate_exception_if(!mode_ring0(), EXC_GP, 0); generate_exception_if(_regs.ecx, EXC_UD); /* TODO: Support ASIDs. */ fail_if(ops->invlpg == NULL); @@ -5429,16 +5525,16 @@ x86_emulate( generate_exception_if(!mode_ring0(), EXC_GP, 0); fail_if(!ops->read_segment || !ops->read_msr || !ops->write_segment || !ops->write_msr); - if ( (rc = ops->read_segment(x86_seg_gs, &sreg, - ctxt)) != X86EMUL_OKAY || - (rc = ops->read_msr(MSR_SHADOW_GS_BASE, &msr_val, - ctxt)) != X86EMUL_OKAY || - (rc = ops->write_msr(MSR_SHADOW_GS_BASE, sreg.base, - ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read_segment(x86_seg_gs, &sreg, ctxt)) != + X86EMUL_OKAY || + (rc = ops->read_msr(MSR_SHADOW_GS_BASE, &msr_val, ctxt)) != + X86EMUL_OKAY || + (rc = ops->write_msr(MSR_SHADOW_GS_BASE, sreg.base, ctxt)) != + X86EMUL_OKAY ) goto done; sreg.base = msr_val; - if ( (rc = ops->write_segment(x86_seg_gs, &sreg, - ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->write_segment(x86_seg_gs, &sreg, ctxt)) != + X86EMUL_OKAY ) { /* Best effort unwind (i.e. no error checking). */ ops->write_msr(MSR_SHADOW_GS_BASE, msr_val, ctxt); @@ -5448,8 +5544,8 @@ x86_emulate( case 0xf9: /* rdtscp */ fail_if(ops->read_msr == NULL); - if ( (rc = ops->read_msr(MSR_TSC_AUX, - &msr_val, ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read_msr(MSR_TSC_AUX, &msr_val, ctxt)) != + X86EMUL_OKAY ) goto done; _regs.r(cx) = (uint32_t)msr_val; goto rdtsc; @@ -5460,14 +5556,14 @@ x86_emulate( vcpu_must_have(clzero); - base = ad_bytes == 8 ? _regs.r(ax) : - ad_bytes == 4 ? _regs.eax : _regs.ax; + base = ad_bytes == 8 ? _regs.r(ax) + : ad_bytes == 4 ? _regs.eax : _regs.ax; limit = 0; if ( vcpu_has_clflush() && ops->cpuid(1, 0, &cpuid_leaf, ctxt) == X86EMUL_OKAY ) limit = ((cpuid_leaf.b >> 8) & 0xff) * 8; - generate_exception_if(limit < sizeof(long) || - (limit & (limit - 1)), EXC_UD); + generate_exception_if(limit < sizeof(long) || (limit & (limit - 1)), + EXC_UD); base &= ~(limit - 1); if ( ops->rep_stos ) { @@ -5496,7 +5592,7 @@ x86_emulate( } #define _GRP7(mod, reg) \ - (((mod) << 6) | ((reg) << 3)) ... (((mod) << 6) | ((reg) << 3) | 7) + (((mod) << 6) | ((reg) << 3))...(((mod) << 6) | ((reg) << 3) | 7) #define GRP7_MEM(reg) _GRP7(0, reg): case _GRP7(1, reg): case _GRP7(2, reg) #define GRP7_ALL(reg) GRP7_MEM(reg): case _GRP7(3, reg) @@ -5514,8 +5610,8 @@ x86_emulate( sreg.base &= 0xffffff; op_bytes = 4; } - if ( (rc = ops->write(ea.mem.seg, ea.mem.off, &sreg.limit, - 2, ctxt)) != X86EMUL_OKAY || + if ( (rc = ops->write(ea.mem.seg, ea.mem.off, &sreg.limit, 2, + ctxt)) != X86EMUL_OKAY || (rc = ops->write(ea.mem.seg, truncate_ea(ea.mem.off + 2), &sreg.base, op_bytes, ctxt)) != X86EMUL_OKAY ) goto done; @@ -5527,8 +5623,8 @@ x86_emulate( generate_exception_if(!mode_ring0(), EXC_GP, 0); fail_if(ops->write_segment == NULL); memset(&sreg, 0, sizeof(sreg)); - if ( (rc = read_ulong(ea.mem.seg, ea.mem.off, - &limit, 2, ctxt, ops)) || + if ( (rc = read_ulong(ea.mem.seg, ea.mem.off, &limit, 2, ctxt, + ops)) || (rc = read_ulong(ea.mem.seg, truncate_ea(ea.mem.off + 2), &base, mode_64bit() ? 8 : 4, ctxt, ops)) ) goto done; @@ -5565,8 +5661,8 @@ x86_emulate( goto done; if ( ea.type == OP_REG ) cr0w = *ea.reg; - else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off, - &cr0w, 2, ctxt, ops)) ) + else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off, &cr0w, 2, ctxt, + ops)) ) goto done; /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */ cr0 = (cr0 & ~0xe) | (cr0w & 0xf); @@ -5595,13 +5691,13 @@ x86_emulate( case X86EMUL_OPC(0x0f, 0x02): /* lar */ generate_exception_if(!in_protmode(ctxt, ops), EXC_UD); _regs.eflags &= ~X86_EFLAGS_ZF; - switch ( rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg, - ctxt, ops) ) + switch (rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg, + ctxt, ops)) { case X86EMUL_OKAY: if ( !sreg.s ) { - switch ( sreg.type ) + switch (sreg.type) { case 0x01: /* available 16-bit TSS */ case 0x03: /* busy 16-bit TSS */ @@ -5625,7 +5721,7 @@ x86_emulate( if ( ctxt->event_pending ) { ASSERT(ctxt->event.vector == EXC_PF); - default: + default: goto done; } /* Instead of the exception, ZF remains cleared. */ @@ -5643,13 +5739,13 @@ x86_emulate( case X86EMUL_OPC(0x0f, 0x03): /* lsl */ generate_exception_if(!in_protmode(ctxt, ops), EXC_UD); _regs.eflags &= ~X86_EFLAGS_ZF; - switch ( rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg, - ctxt, ops) ) + switch (rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg, + ctxt, ops)) { case X86EMUL_OKAY: if ( !sreg.s ) { - switch ( sreg.type ) + switch (sreg.type) { case 0x01: /* available 16-bit TSS */ case 0x03: /* busy 16-bit TSS */ @@ -5670,7 +5766,7 @@ x86_emulate( if ( ctxt->event_pending ) { ASSERT(ctxt->event.vector == EXC_PF); - default: + default: goto done; } /* Instead of the exception, ZF remains cleared. */ @@ -5698,9 +5794,9 @@ x86_emulate( cs.sel = (msr_val >> 32) & ~3; /* SELECTOR_RPL_MASK */ sreg.sel = cs.sel + 8; - cs.base = sreg.base = 0; /* flat segment */ - cs.limit = sreg.limit = ~0u; /* 4GB limit */ - sreg.attr = 0xc93; /* G+DB+P+S+Data */ + cs.base = sreg.base = 0; /* flat segment */ + cs.limit = sreg.limit = ~0u; /* 4GB limit */ + sreg.attr = 0xc93; /* G+DB+P+S+Data */ #ifdef __x86_64__ if ( ctxt->lma ) @@ -5715,8 +5811,8 @@ x86_emulate( goto done; _regs.rip = msr_val; - if ( (rc = ops->read_msr(MSR_SYSCALL_MASK, - &msr_val, ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read_msr(MSR_SYSCALL_MASK, &msr_val, ctxt)) != + X86EMUL_OKAY ) goto done; _regs.eflags &= ~(msr_val | X86_EFLAGS_RF); } @@ -5757,7 +5853,8 @@ x86_emulate( generate_exception_if(!mode_ring0(), EXC_GP, 0); fail_if((ops->read_cr == NULL) || (ops->write_cr == NULL)); if ( (rc = ops->read_cr(0, &dst.val, ctxt)) != X86EMUL_OKAY || - (rc = ops->write_cr(0, dst.val & ~X86_CR0_TS, ctxt)) != X86EMUL_OKAY ) + (rc = ops->write_cr(0, dst.val & ~X86_CR0_TS, ctxt)) != + X86EMUL_OKAY ) goto done; break; @@ -5776,12 +5873,12 @@ x86_emulate( case X86EMUL_OPC(0x0f, 0x0d): /* GrpP (prefetch) */ case X86EMUL_OPC(0x0f, 0x18): /* Grp16 (prefetch/nop) */ - case X86EMUL_OPC(0x0f, 0x19) ... X86EMUL_OPC(0x0f, 0x1f): /* nop */ + case X86EMUL_OPC(0x0f, 0x19)... X86EMUL_OPC(0x0f, 0x1f): /* nop */ break; case X86EMUL_OPC(0x0f, 0x0e): /* femms */ host_and_vcpu_must_have(3dnow); - asm volatile ( "femms" ); + asm volatile("femms"); break; case X86EMUL_OPC(0x0f, 0x0f): /* 3DNow! */ @@ -5799,89 +5896,130 @@ x86_emulate( state->simd_size = simd_other; goto simd_0f_imm8; -#define CASE_SIMD_PACKED_INT(pfx, opc) \ - case X86EMUL_OPC(pfx, opc): \ +#define CASE_SIMD_PACKED_INT(pfx, opc) \ + case X86EMUL_OPC(pfx, opc): \ case X86EMUL_OPC_66(pfx, opc) -#define CASE_SIMD_SINGLE_FP(kind, pfx, opc) \ - case X86EMUL_OPC##kind(pfx, opc): \ +#define CASE_SIMD_SINGLE_FP(kind, pfx, opc) \ + case X86EMUL_OPC##kind(pfx, opc): \ case X86EMUL_OPC##kind##_F3(pfx, opc) -#define CASE_SIMD_DOUBLE_FP(kind, pfx, opc) \ - case X86EMUL_OPC##kind##_66(pfx, opc): \ +#define CASE_SIMD_DOUBLE_FP(kind, pfx, opc) \ + case X86EMUL_OPC##kind##_66(pfx, opc): \ case X86EMUL_OPC##kind##_F2(pfx, opc) -#define CASE_SIMD_ALL_FP(kind, pfx, opc) \ - CASE_SIMD_SINGLE_FP(kind, pfx, opc): \ - CASE_SIMD_DOUBLE_FP(kind, pfx, opc) -#define CASE_SIMD_PACKED_FP(kind, pfx, opc) \ - case X86EMUL_OPC##kind(pfx, opc): \ +#define CASE_SIMD_ALL_FP(kind, pfx, opc) \ + CASE_SIMD_SINGLE_FP(kind, pfx, opc): CASE_SIMD_DOUBLE_FP(kind, pfx, opc) +#define CASE_SIMD_PACKED_FP(kind, pfx, opc) \ + case X86EMUL_OPC##kind(pfx, opc): \ case X86EMUL_OPC##kind##_66(pfx, opc) -#define CASE_SIMD_SCALAR_FP(kind, pfx, opc) \ - case X86EMUL_OPC##kind##_F3(pfx, opc): \ +#define CASE_SIMD_SCALAR_FP(kind, pfx, opc) \ + case X86EMUL_OPC##kind##_F3(pfx, opc): \ case X86EMUL_OPC##kind##_F2(pfx, opc) - CASE_SIMD_SCALAR_FP(, 0x0f, 0x2b): /* movnts{s,d} xmm,mem */ - host_and_vcpu_must_have(sse4a); + CASE_SIMD_SCALAR_FP(, 0x0f, 0x2b) + : /* movnts{s,d} xmm,mem */ + host_and_vcpu_must_have(sse4a); /* fall through */ - CASE_SIMD_PACKED_FP(, 0x0f, 0x2b): /* movntp{s,d} xmm,m128 */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x2b): /* vmovntp{s,d} {x,y}mm,mem */ - generate_exception_if(ea.type != OP_MEM, EXC_UD); + CASE_SIMD_PACKED_FP(, 0x0f, 0x2b) + : /* movntp{s,d} xmm,m128 */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x2b) + : /* vmovntp{s,d} {x,y}mm,mem */ + generate_exception_if(ea.type != OP_MEM, EXC_UD); sfence = true; /* fall through */ - CASE_SIMD_ALL_FP(, 0x0f, 0x10): /* mov{up,s}{s,d} xmm/mem,xmm */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x10): /* vmovup{s,d} {x,y}mm/mem,{x,y}mm */ - CASE_SIMD_SCALAR_FP(_VEX, 0x0f, 0x10): /* vmovs{s,d} mem,xmm */ - /* vmovs{s,d} xmm,xmm,xmm */ - CASE_SIMD_ALL_FP(, 0x0f, 0x11): /* mov{up,s}{s,d} xmm,xmm/mem */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x11): /* vmovup{s,d} {x,y}mm,{x,y}mm/mem */ - CASE_SIMD_SCALAR_FP(_VEX, 0x0f, 0x11): /* vmovs{s,d} xmm,mem */ - /* vmovs{s,d} xmm,xmm,xmm */ - CASE_SIMD_PACKED_FP(, 0x0f, 0x14): /* unpcklp{s,d} xmm/m128,xmm */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x14): /* vunpcklp{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_FP(, 0x0f, 0x15): /* unpckhp{s,d} xmm/m128,xmm */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x15): /* vunpckhp{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_FP(, 0x0f, 0x28): /* movap{s,d} xmm/m128,xmm */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x28): /* vmovap{s,d} {x,y}mm/mem,{x,y}mm */ - CASE_SIMD_PACKED_FP(, 0x0f, 0x29): /* movap{s,d} xmm,xmm/m128 */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x29): /* vmovap{s,d} {x,y}mm,{x,y}mm/mem */ - CASE_SIMD_ALL_FP(, 0x0f, 0x51): /* sqrt{p,s}{s,d} xmm/mem,xmm */ - CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x51): /* vsqrtp{s,d} {x,y}mm/mem,{x,y}mm */ - /* vsqrts{s,d} xmm/m32,xmm,xmm */ - CASE_SIMD_SINGLE_FP(, 0x0f, 0x52): /* rsqrt{p,s}s xmm/mem,xmm */ - CASE_SIMD_SINGLE_FP(_VEX, 0x0f, 0x52): /* vrsqrtps {x,y}mm/mem,{x,y}mm */ - /* vrsqrtss xmm/m32,xmm,xmm */ - CASE_SIMD_SINGLE_FP(, 0x0f, 0x53): /* rcp{p,s}s xmm/mem,xmm */ - CASE_SIMD_SINGLE_FP(_VEX, 0x0f, 0x53): /* vrcpps {x,y}mm/mem,{x,y}mm */ - /* vrcpss xmm/m32,xmm,xmm */ - CASE_SIMD_PACKED_FP(, 0x0f, 0x54): /* andp{s,d} xmm/m128,xmm */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x54): /* vandp{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_FP(, 0x0f, 0x55): /* andnp{s,d} xmm/m128,xmm */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x55): /* vandnp{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_FP(, 0x0f, 0x56): /* orp{s,d} xmm/m128,xmm */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x56): /* vorp{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_FP(, 0x0f, 0x57): /* xorp{s,d} xmm/m128,xmm */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x57): /* vxorp{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_ALL_FP(, 0x0f, 0x58): /* add{p,s}{s,d} xmm/mem,xmm */ - CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x58): /* vadd{p,s}{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_ALL_FP(, 0x0f, 0x59): /* mul{p,s}{s,d} xmm/mem,xmm */ - CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x59): /* vmul{p,s}{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_ALL_FP(, 0x0f, 0x5c): /* sub{p,s}{s,d} xmm/mem,xmm */ - CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x5c): /* vsub{p,s}{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_ALL_FP(, 0x0f, 0x5d): /* min{p,s}{s,d} xmm/mem,xmm */ - CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x5d): /* vmin{p,s}{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_ALL_FP(, 0x0f, 0x5e): /* div{p,s}{s,d} xmm/mem,xmm */ - CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x5e): /* vdiv{p,s}{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_ALL_FP(, 0x0f, 0x5f): /* max{p,s}{s,d} xmm/mem,xmm */ - CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x5f): /* vmax{p,s}{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - simd_0f_fp: - if ( vex.opcx == vex_none ) + CASE_SIMD_ALL_FP(, 0x0f, 0x10) + : /* mov{up,s}{s,d} xmm/mem,xmm */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x10) + : /* vmovup{s,d} {x,y}mm/mem,{x,y}mm */ + CASE_SIMD_SCALAR_FP(_VEX, 0x0f, 0x10) + : /* vmovs{s,d} mem,xmm */ + /* vmovs{s,d} xmm,xmm,xmm */ + CASE_SIMD_ALL_FP(, 0x0f, 0x11) + : /* mov{up,s}{s,d} xmm,xmm/mem */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x11) + : /* vmovup{s,d} {x,y}mm,{x,y}mm/mem */ + CASE_SIMD_SCALAR_FP(_VEX, 0x0f, 0x11) + : /* vmovs{s,d} xmm,mem */ + /* vmovs{s,d} xmm,xmm,xmm */ + CASE_SIMD_PACKED_FP(, 0x0f, 0x14) + : /* unpcklp{s,d} xmm/m128,xmm */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x14) + : /* vunpcklp{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_FP(, 0x0f, 0x15) + : /* unpckhp{s,d} xmm/m128,xmm */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x15) + : /* vunpckhp{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_FP(, 0x0f, 0x28) + : /* movap{s,d} xmm/m128,xmm */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x28) + : /* vmovap{s,d} {x,y}mm/mem,{x,y}mm */ + CASE_SIMD_PACKED_FP(, 0x0f, 0x29) + : /* movap{s,d} xmm,xmm/m128 */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x29) + : /* vmovap{s,d} {x,y}mm,{x,y}mm/mem */ + CASE_SIMD_ALL_FP(, 0x0f, 0x51) + : /* sqrt{p,s}{s,d} xmm/mem,xmm */ + CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x51) + : /* vsqrtp{s,d} {x,y}mm/mem,{x,y}mm */ + /* vsqrts{s,d} xmm/m32,xmm,xmm */ + CASE_SIMD_SINGLE_FP(, 0x0f, 0x52) + : /* rsqrt{p,s}s xmm/mem,xmm */ + CASE_SIMD_SINGLE_FP(_VEX, 0x0f, 0x52) + : /* vrsqrtps {x,y}mm/mem,{x,y}mm */ + /* vrsqrtss xmm/m32,xmm,xmm */ + CASE_SIMD_SINGLE_FP(, 0x0f, 0x53) + : /* rcp{p,s}s xmm/mem,xmm */ + CASE_SIMD_SINGLE_FP(_VEX, 0x0f, 0x53) + : /* vrcpps {x,y}mm/mem,{x,y}mm */ + /* vrcpss xmm/m32,xmm,xmm */ + CASE_SIMD_PACKED_FP(, 0x0f, 0x54) + : /* andp{s,d} xmm/m128,xmm */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x54) + : /* vandp{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_FP(, 0x0f, 0x55) + : /* andnp{s,d} xmm/m128,xmm */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x55) + : /* vandnp{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_FP(, 0x0f, 0x56) + : /* orp{s,d} xmm/m128,xmm */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x56) + : /* vorp{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_FP(, 0x0f, 0x57) + : /* xorp{s,d} xmm/m128,xmm */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x57) + : /* vxorp{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_ALL_FP(, 0x0f, 0x58) + : /* add{p,s}{s,d} xmm/mem,xmm */ + CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x58) + : /* vadd{p,s}{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_ALL_FP(, 0x0f, 0x59) + : /* mul{p,s}{s,d} xmm/mem,xmm */ + CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x59) + : /* vmul{p,s}{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_ALL_FP(, 0x0f, 0x5c) + : /* sub{p,s}{s,d} xmm/mem,xmm */ + CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x5c) + : /* vsub{p,s}{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_ALL_FP(, 0x0f, 0x5d) + : /* min{p,s}{s,d} xmm/mem,xmm */ + CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x5d) + : /* vmin{p,s}{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_ALL_FP(, 0x0f, 0x5e) + : /* div{p,s}{s,d} xmm/mem,xmm */ + CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x5e) + : /* vdiv{p,s}{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_ALL_FP(, 0x0f, 0x5f) + : /* max{p,s}{s,d} xmm/mem,xmm */ + CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x5f) + : /* vmax{p,s}{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + simd_0f_fp: if ( vex.opcx == vex_none ) { if ( vex.pfx & VEX_PREFIX_DOUBLE_MASK ) { - simd_0f_sse2: + simd_0f_sse2: vcpu_must_have(sse2); } else vcpu_must_have(sse); - simd_0f_xmm: + simd_0f_xmm: get_fpu(X86EMUL_FPU_xmm); } else @@ -5889,9 +6027,9 @@ x86_emulate( /* vmovs{s,d} to/from memory have only two operands. */ if ( (b & ~1) == 0x10 && ea.type == OP_MEM ) d |= TwoOp; - simd_0f_avx: + simd_0f_avx: host_and_vcpu_must_have(avx); - simd_0f_ymm: + simd_0f_ymm: get_fpu(X86EMUL_FPU_ymm); } simd_0f_common: @@ -5908,36 +6046,50 @@ x86_emulate( insn_bytes = PFX_BYTES + 2; break; - CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x2b): /* vmovntp{s,d} [xyz]mm,mem */ - generate_exception_if(ea.type != OP_MEM || evex.opmsk, EXC_UD); + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x2b) + : /* vmovntp{s,d} [xyz]mm,mem */ + generate_exception_if(ea.type != OP_MEM || evex.opmsk, EXC_UD); sfence = true; fault_suppression = false; /* fall through */ - CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x10): /* vmovup{s,d} [xyz]mm/mem,[xyz]mm{k} */ - CASE_SIMD_SCALAR_FP(_EVEX, 0x0f, 0x10): /* vmovs{s,d} mem,xmm{k} */ - /* vmovs{s,d} xmm,xmm,xmm{k} */ - CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x11): /* vmovup{s,d} [xyz]mm,[xyz]mm/mem{k} */ - CASE_SIMD_SCALAR_FP(_EVEX, 0x0f, 0x11): /* vmovs{s,d} xmm,mem{k} */ - /* vmovs{s,d} xmm,xmm,xmm{k} */ - CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x28): /* vmovap{s,d} [xyz]mm/mem,[xyz]mm{k} */ - CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x29): /* vmovap{s,d} [xyz]mm,[xyz]mm/mem{k} */ - /* vmovs{s,d} to/from memory have only two operands. */ - if ( (b & ~1) == 0x10 && ea.type == OP_MEM ) - d |= TwoOp; + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x10) + : /* vmovup{s,d} [xyz]mm/mem,[xyz]mm{k} */ + CASE_SIMD_SCALAR_FP(_EVEX, 0x0f, 0x10) + : /* vmovs{s,d} mem,xmm{k} */ + /* vmovs{s,d} xmm,xmm,xmm{k} */ + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x11) + : /* vmovup{s,d} [xyz]mm,[xyz]mm/mem{k} */ + CASE_SIMD_SCALAR_FP(_EVEX, 0x0f, 0x11) + : /* vmovs{s,d} xmm,mem{k} */ + /* vmovs{s,d} xmm,xmm,xmm{k} */ + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x28) + : /* vmovap{s,d} [xyz]mm/mem,[xyz]mm{k} */ + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x29) + : /* vmovap{s,d} [xyz]mm,[xyz]mm/mem{k} */ + /* vmovs{s,d} to/from memory have only two operands. */ + if ( (b & ~1) == 0x10 && ea.type == OP_MEM ) d |= TwoOp; generate_exception_if(evex.brs, EXC_UD); /* fall through */ - CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x51): /* vsqrtp{s,d} [xyz]mm/mem,[xyz]mm{k} */ - /* vsqrts{s,d} xmm/m32,xmm,xmm{k} */ - CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x58): /* vadd{p,s}{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x59): /* vmul{p,s}{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x5c): /* vsub{p,s}{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x5d): /* vmin{p,s}{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x5e): /* vdiv{p,s}{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x5f): /* vmax{p,s}{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - generate_exception_if((evex.w != (evex.pfx & VEX_PREFIX_DOUBLE_MASK) || - (ea.type != OP_REG && evex.brs && - (evex.pfx & VEX_PREFIX_SCALAR_MASK))), - EXC_UD); + CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x51) + : /* vsqrtp{s,d} [xyz]mm/mem,[xyz]mm{k} */ + /* vsqrts{s,d} xmm/m32,xmm,xmm{k} */ + CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x58) + : /* vadd{p,s}{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x59) + : /* vmul{p,s}{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x5c) + : /* vsub{p,s}{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x5d) + : /* vmin{p,s}{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x5e) + : /* vdiv{p,s}{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x5f) + : /* vmax{p,s}{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + generate_exception_if( + (evex.w != (evex.pfx & VEX_PREFIX_DOUBLE_MASK) || + (ea.type != OP_REG && evex.brs && + (evex.pfx & VEX_PREFIX_SCALAR_MASK))), + EXC_UD); host_and_vcpu_must_have(avx512f); if ( ea.type != OP_REG || !evex.brs ) avx512_vlen_check(evex.pfx & VEX_PREFIX_SCALAR_MASK); @@ -5955,64 +6107,85 @@ x86_emulate( insn_bytes = EVEX_PFX_BYTES + 2; break; - case X86EMUL_OPC_66(0x0f, 0x12): /* movlpd m64,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x12): /* vmovlpd m64,xmm,xmm */ - CASE_SIMD_PACKED_FP(, 0x0f, 0x13): /* movlp{s,d} xmm,m64 */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x13): /* vmovlp{s,d} xmm,m64 */ - case X86EMUL_OPC_66(0x0f, 0x16): /* movhpd m64,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x16): /* vmovhpd m64,xmm,xmm */ - CASE_SIMD_PACKED_FP(, 0x0f, 0x17): /* movhp{s,d} xmm,m64 */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x17): /* vmovhp{s,d} xmm,m64 */ - generate_exception_if(ea.type != OP_MEM, EXC_UD); + case X86EMUL_OPC_66(0x0f, 0x12): /* movlpd m64,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x12): /* vmovlpd m64,xmm,xmm */ + CASE_SIMD_PACKED_FP(, 0x0f, 0x13) + : /* movlp{s,d} xmm,m64 */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x13) + : /* vmovlp{s,d} xmm,m64 */ + case X86EMUL_OPC_66(0x0f, 0x16) + : /* movhpd m64,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x16) + : /* vmovhpd m64,xmm,xmm */ + CASE_SIMD_PACKED_FP(, 0x0f, 0x17) + : /* movhp{s,d} xmm,m64 */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x17) + : /* vmovhp{s,d} xmm,m64 */ + generate_exception_if(ea.type != OP_MEM, EXC_UD); /* fall through */ - case X86EMUL_OPC(0x0f, 0x12): /* movlps m64,xmm */ - /* movhlps xmm,xmm */ - case X86EMUL_OPC_VEX(0x0f, 0x12): /* vmovlps m64,xmm,xmm */ - /* vmovhlps xmm,xmm,xmm */ - case X86EMUL_OPC(0x0f, 0x16): /* movhps m64,xmm */ - /* movlhps xmm,xmm */ - case X86EMUL_OPC_VEX(0x0f, 0x16): /* vmovhps m64,xmm,xmm */ - /* vmovlhps xmm,xmm,xmm */ + case X86EMUL_OPC(0x0f, 0x12): /* movlps m64,xmm */ + /* movhlps xmm,xmm */ + case X86EMUL_OPC_VEX(0x0f, 0x12): /* vmovlps m64,xmm,xmm */ + /* vmovhlps xmm,xmm,xmm */ + case X86EMUL_OPC(0x0f, 0x16): /* movhps m64,xmm */ + /* movlhps xmm,xmm */ + case X86EMUL_OPC_VEX(0x0f, 0x16): /* vmovhps m64,xmm,xmm */ + /* vmovlhps xmm,xmm,xmm */ generate_exception_if(vex.l, EXC_UD); if ( (d & DstMask) != DstMem ) d &= ~TwoOp; op_bytes = 8; goto simd_0f_fp; - case X86EMUL_OPC_F3(0x0f, 0x12): /* movsldup xmm/m128,xmm */ - case X86EMUL_OPC_VEX_F3(0x0f, 0x12): /* vmovsldup {x,y}mm/mem,{x,y}mm */ - case X86EMUL_OPC_F2(0x0f, 0x12): /* movddup xmm/m64,xmm */ - case X86EMUL_OPC_VEX_F2(0x0f, 0x12): /* vmovddup {x,y}mm/mem,{x,y}mm */ - case X86EMUL_OPC_F3(0x0f, 0x16): /* movshdup xmm/m128,xmm */ - case X86EMUL_OPC_VEX_F3(0x0f, 0x16): /* vmovshdup {x,y}mm/mem,{x,y}mm */ + case X86EMUL_OPC_F3(0x0f, 0x12): /* movsldup xmm/m128,xmm */ + case X86EMUL_OPC_VEX_F3(0x0f, 0x12): /* vmovsldup {x,y}mm/mem,{x,y}mm */ + case X86EMUL_OPC_F2(0x0f, 0x12): /* movddup xmm/m64,xmm */ + case X86EMUL_OPC_VEX_F2(0x0f, 0x12): /* vmovddup {x,y}mm/mem,{x,y}mm */ + case X86EMUL_OPC_F3(0x0f, 0x16): /* movshdup xmm/m128,xmm */ + case X86EMUL_OPC_VEX_F3(0x0f, 0x16): /* vmovshdup {x,y}mm/mem,{x,y}mm */ d |= TwoOp; - op_bytes = !(vex.pfx & VEX_PREFIX_DOUBLE_MASK) || vex.l - ? 16 << vex.l : 8; + op_bytes = + !(vex.pfx & VEX_PREFIX_DOUBLE_MASK) || vex.l ? 16 << vex.l : 8; simd_0f_sse3_avx: if ( vex.opcx != vex_none ) goto simd_0f_avx; host_and_vcpu_must_have(sse3); goto simd_0f_xmm; - CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x14): /* vunpcklp{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x15): /* vunpckhp{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - generate_exception_if(evex.w != (evex.pfx & VEX_PREFIX_DOUBLE_MASK), - EXC_UD); + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x14) + : /* vunpcklp{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x15) + : /* vunpckhp{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + generate_exception_if( + evex.w != (evex.pfx & VEX_PREFIX_DOUBLE_MASK), EXC_UD); fault_suppression = false; /* fall through */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xdb): /* vpand{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xdf): /* vpandn{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xeb): /* vpor{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xef): /* vpxor{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x14): /* vprorv{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x15): /* vprolv{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x39): /* vpmins{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x3b): /* vpminu{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x3d): /* vpmaxs{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x3f): /* vpmaxu{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x45): /* vpsrlv{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x46): /* vpsrav{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x47): /* vpsllv{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f, 0xdb): /* vpand{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f, 0xdf): /* vpandn{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f, 0xeb): /* vpor{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f, 0xef): /* vpxor{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x14): /* vprorv{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x15): /* vprolv{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x39): /* vpmins{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x3b): /* vpminu{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x3d): /* vpmaxs{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x3f): /* vpmaxu{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x45): /* vpsrlv{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x46): /* vpsrav{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x47): /* vpsllv{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ avx512f_no_sae: host_and_vcpu_must_have(avx512f); generate_exception_if(ea.type != OP_MEM && evex.brs, EXC_UD); @@ -6027,8 +6200,8 @@ x86_emulate( if ( b & 2 ) { /* Write to CR/DR. */ - typeof(ops->write_cr) write = (b & 1) ? ops->write_dr - : ops->write_cr; + typeof(ops->write_cr) write = + (b & 1) ? ops->write_dr : ops->write_cr; fail_if(!write); rc = write(modrm_reg, src.val, ctxt); @@ -6045,20 +6218,24 @@ x86_emulate( goto done; break; - case X86EMUL_OPC_66(0x0f, 0x2a): /* cvtpi2pd mm/m64,xmm */ + case X86EMUL_OPC_66(0x0f, 0x2a): /* cvtpi2pd mm/m64,xmm */ if ( ea.type == OP_REG ) { - case X86EMUL_OPC(0x0f, 0x2a): /* cvtpi2ps mm/m64,xmm */ - CASE_SIMD_PACKED_FP(, 0x0f, 0x2c): /* cvttp{s,d}2pi xmm/mem,mm */ - CASE_SIMD_PACKED_FP(, 0x0f, 0x2d): /* cvtp{s,d}2pi xmm/mem,mm */ - host_and_vcpu_must_have(mmx); + case X86EMUL_OPC(0x0f, 0x2a): /* cvtpi2ps mm/m64,xmm */ + CASE_SIMD_PACKED_FP(, 0x0f, 0x2c) + : /* cvttp{s,d}2pi xmm/mem,mm */ + CASE_SIMD_PACKED_FP(, 0x0f, 0x2d) + : /* cvtp{s,d}2pi xmm/mem,mm */ + host_and_vcpu_must_have(mmx); } op_bytes = (b & 4) && (vex.pfx & VEX_PREFIX_DOUBLE_MASK) ? 16 : 8; goto simd_0f_fp; - CASE_SIMD_SCALAR_FP(, 0x0f, 0x2a): /* cvtsi2s{s,d} r/m,xmm */ - CASE_SIMD_SCALAR_FP(_VEX, 0x0f, 0x2a): /* vcvtsi2s{s,d} r/m,xmm,xmm */ - if ( vex.opcx == vex_none ) + CASE_SIMD_SCALAR_FP(, 0x0f, 0x2a) + : /* cvtsi2s{s,d} r/m,xmm */ + CASE_SIMD_SCALAR_FP(_VEX, 0x0f, 0x2a) + : /* vcvtsi2s{s,d} r/m,xmm,xmm */ + if ( vex.opcx == vex_none ) { if ( vex.pfx & VEX_PREFIX_DOUBLE_MASK ) vcpu_must_have(sse2); @@ -6085,11 +6262,15 @@ x86_emulate( state->simd_size = simd_none; goto simd_0f_rm; - CASE_SIMD_SCALAR_FP(, 0x0f, 0x2c): /* cvtts{s,d}2si xmm/mem,reg */ - CASE_SIMD_SCALAR_FP(_VEX, 0x0f, 0x2c): /* vcvtts{s,d}2si xmm/mem,reg */ - CASE_SIMD_SCALAR_FP(, 0x0f, 0x2d): /* cvts{s,d}2si xmm/mem,reg */ - CASE_SIMD_SCALAR_FP(_VEX, 0x0f, 0x2d): /* vcvts{s,d}2si xmm/mem,reg */ - if ( vex.opcx == vex_none ) + CASE_SIMD_SCALAR_FP(, 0x0f, 0x2c) + : /* cvtts{s,d}2si xmm/mem,reg */ + CASE_SIMD_SCALAR_FP(_VEX, 0x0f, 0x2c) + : /* vcvtts{s,d}2si xmm/mem,reg */ + CASE_SIMD_SCALAR_FP(, 0x0f, 0x2d) + : /* cvts{s,d}2si xmm/mem,reg */ + CASE_SIMD_SCALAR_FP(_VEX, 0x0f, 0x2d) + : /* vcvts{s,d}2si xmm/mem,reg */ + if ( vex.opcx == vex_none ) { if ( vex.pfx & VEX_PREFIX_DOUBLE_MASK ) vcpu_must_have(sse2); @@ -6132,17 +6313,21 @@ x86_emulate( copy_REX_VEX(opc, rex_prefix, vex); ea.reg = decode_gpr(&_regs, modrm_reg); - invoke_stub("", "", "=a" (*ea.reg) : "c" (mmvalp), "m" (*mmvalp)); + invoke_stub("", "", "=a"(*ea.reg) : "c"(mmvalp), "m"(*mmvalp)); put_stub(stub); state->simd_size = simd_none; break; - CASE_SIMD_PACKED_FP(, 0x0f, 0x2e): /* ucomis{s,d} xmm/mem,xmm */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x2e): /* vucomis{s,d} xmm/mem,xmm */ - CASE_SIMD_PACKED_FP(, 0x0f, 0x2f): /* comis{s,d} xmm/mem,xmm */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x2f): /* vcomis{s,d} xmm/mem,xmm */ - if ( vex.opcx == vex_none ) + CASE_SIMD_PACKED_FP(, 0x0f, 0x2e) + : /* ucomis{s,d} xmm/mem,xmm */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x2e) + : /* vucomis{s,d} xmm/mem,xmm */ + CASE_SIMD_PACKED_FP(, 0x0f, 0x2f) + : /* comis{s,d} xmm/mem,xmm */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x2f) + : /* vcomis{s,d} xmm/mem,xmm */ + if ( vex.opcx == vex_none ) { if ( vex.pfx ) vcpu_must_have(sse2); @@ -6188,20 +6373,22 @@ x86_emulate( invoke_stub(_PRE_EFLAGS("[eflags]", "[mask]", "[tmp]"), _POST_EFLAGS("[eflags]", "[mask]", "[tmp]"), - [eflags] "+g" (_regs.eflags), - [tmp] "=&r" (dummy), "+m" (*mmvalp) - : "a" (mmvalp), [mask] "i" (EFLAGS_MASK)); + [eflags] "+g"(_regs.eflags), [tmp] "=&r"(dummy), + "+m"(*mmvalp) + : "a"(mmvalp), [mask] "i"(EFLAGS_MASK)); put_stub(stub); ASSERT(!state->simd_size); break; - CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x2e): /* vucomis{s,d} xmm/mem,xmm */ - CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x2f): /* vcomis{s,d} xmm/mem,xmm */ - generate_exception_if((evex.reg != 0xf || !evex.RX || evex.opmsk || - (ea.type != OP_REG && evex.brs) || - evex.w != evex.pfx), - EXC_UD); + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x2e) + : /* vucomis{s,d} xmm/mem,xmm */ + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x2f) + : /* vcomis{s,d} xmm/mem,xmm */ + generate_exception_if( + (evex.reg != 0xf || !evex.RX || evex.opmsk || + (ea.type != OP_REG && evex.brs) || evex.w != evex.pfx), + EXC_UD); host_and_vcpu_must_have(avx512f); if ( !evex.brs ) avx512_vlen_check(true); @@ -6220,7 +6407,8 @@ x86_emulate( goto done; break; - case X86EMUL_OPC(0x0f, 0x31): rdtsc: /* rdtsc */ + case X86EMUL_OPC(0x0f, 0x31): + rdtsc: /* rdtsc */ if ( !mode_ring0() ) { fail_if(ops->read_cr == NULL); @@ -6229,8 +6417,8 @@ x86_emulate( generate_exception_if(cr4 & X86_CR4_TSD, EXC_GP, 0); } fail_if(ops->read_msr == NULL); - if ( (rc = ops->read_msr(MSR_IA32_TSC, - &msr_val, ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read_msr(MSR_IA32_TSC, &msr_val, ctxt)) != + X86EMUL_OKAY ) goto done; _regs.r(dx) = msr_val >> 32; _regs.r(ax) = (uint32_t)msr_val; @@ -6251,22 +6439,22 @@ x86_emulate( generate_exception_if(!in_protmode(ctxt, ops), EXC_GP, 0); fail_if(ops->read_msr == NULL); - if ( (rc = ops->read_msr(MSR_IA32_SYSENTER_CS, - &msr_val, ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read_msr(MSR_IA32_SYSENTER_CS, &msr_val, ctxt)) != + X86EMUL_OKAY ) goto done; generate_exception_if(!(msr_val & 0xfffc), EXC_GP, 0); _regs.eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF | X86_EFLAGS_RF); - cs.sel = msr_val & ~3; /* SELECTOR_RPL_MASK */ - cs.base = 0; /* flat segment */ - cs.limit = ~0u; /* 4GB limit */ + cs.sel = msr_val & ~3; /* SELECTOR_RPL_MASK */ + cs.base = 0; /* flat segment */ + cs.limit = ~0u; /* 4GB limit */ cs.attr = ctxt->lma ? 0xa9b /* G+L+P+S+Code */ : 0xc9b; /* G+DB+P+S+Code */ sreg.sel = cs.sel + 8; - sreg.base = 0; /* flat segment */ + sreg.base = 0; /* flat segment */ sreg.limit = ~0u; /* 4GB limit */ sreg.attr = 0xc93; /* G+DB+P+S+Data */ @@ -6275,13 +6463,13 @@ x86_emulate( (rc = ops->write_segment(x86_seg_ss, &sreg, ctxt)) != 0 ) goto done; - if ( (rc = ops->read_msr(MSR_IA32_SYSENTER_EIP, - &msr_val, ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read_msr(MSR_IA32_SYSENTER_EIP, &msr_val, ctxt)) != + X86EMUL_OKAY ) goto done; _regs.r(ip) = ctxt->lma ? msr_val : (uint32_t)msr_val; - if ( (rc = ops->read_msr(MSR_IA32_SYSENTER_ESP, - &msr_val, ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read_msr(MSR_IA32_SYSENTER_ESP, &msr_val, ctxt)) != + X86EMUL_OKAY ) goto done; _regs.r(sp) = ctxt->lma ? msr_val : (uint32_t)msr_val; @@ -6294,25 +6482,25 @@ x86_emulate( generate_exception_if(!in_protmode(ctxt, ops), EXC_GP, 0); fail_if(ops->read_msr == NULL); - if ( (rc = ops->read_msr(MSR_IA32_SYSENTER_CS, - &msr_val, ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read_msr(MSR_IA32_SYSENTER_CS, &msr_val, ctxt)) != + X86EMUL_OKAY ) goto done; generate_exception_if(!(msr_val & 0xfffc), EXC_GP, 0); generate_exception_if(op_bytes == 8 && - (!is_canonical_address(_regs.r(dx)) || - !is_canonical_address(_regs.r(cx))), + (!is_canonical_address(_regs.r(dx)) || + !is_canonical_address(_regs.r(cx))), EXC_GP, 0); cs.sel = (msr_val | 3) + /* SELECTOR_RPL_MASK */ (op_bytes == 8 ? 32 : 16); - cs.base = 0; /* flat segment */ - cs.limit = ~0u; /* 4GB limit */ + cs.base = 0; /* flat segment */ + cs.limit = ~0u; /* 4GB limit */ cs.attr = op_bytes == 8 ? 0xafb /* L+DB+P+DPL3+S+Code */ : 0xcfb; /* G+DB+P+DPL3+S+Code */ sreg.sel = cs.sel + 8; - sreg.base = 0; /* flat segment */ + sreg.base = 0; /* flat segment */ sreg.limit = ~0u; /* 4GB limit */ sreg.attr = 0xcf3; /* G+DB+P+DPL3+S+Data */ @@ -6327,13 +6515,13 @@ x86_emulate( singlestep = _regs.eflags & X86_EFLAGS_TF; break; - case X86EMUL_OPC(0x0f, 0x40) ... X86EMUL_OPC(0x0f, 0x4f): /* cmovcc */ + case X86EMUL_OPC(0x0f, 0x40)... X86EMUL_OPC(0x0f, 0x4f): /* cmovcc */ vcpu_must_have(cmov); if ( test_cc(b, _regs.eflags) ) dst.val = src.val; break; - case X86EMUL_OPC_VEX(0x0f, 0x4a): /* kadd{w,q} k,k,k */ + case X86EMUL_OPC_VEX(0x0f, 0x4a): /* kadd{w,q} k,k,k */ if ( !vex.w ) host_and_vcpu_must_have(avx512dq); /* fall through */ @@ -6357,7 +6545,8 @@ x86_emulate( opmask_common: host_and_vcpu_must_have(avx512f); generate_exception_if(!vex.r || (mode_64bit() && !(vex.reg & 8)) || - ea.type != OP_REG, EXC_UD); + ea.type != OP_REG, + EXC_UD); vex.reg |= 8; d &= ~TwoOp; @@ -6378,7 +6567,7 @@ x86_emulate( generate_exception_if(vex.l || vex.reg != 0xf, EXC_UD); goto opmask_basic; - case X86EMUL_OPC_VEX(0x0f, 0x4b): /* kunpck{w,d}{d,q} k,k,k */ + case X86EMUL_OPC_VEX(0x0f, 0x4b): /* kunpck{w,d}{d,q} k,k,k */ generate_exception_if(!vex.l, EXC_UD); host_and_vcpu_must_have(avx512bw); goto opmask_common; @@ -6387,11 +6576,15 @@ x86_emulate( generate_exception_if(!vex.l || vex.w, EXC_UD); goto opmask_common; - CASE_SIMD_PACKED_FP(, 0x0f, 0x50): /* movmskp{s,d} xmm,reg */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x50): /* vmovmskp{s,d} {x,y}mm,reg */ - CASE_SIMD_PACKED_INT(0x0f, 0xd7): /* pmovmskb {,x}mm,reg */ - case X86EMUL_OPC_VEX_66(0x0f, 0xd7): /* vpmovmskb {x,y}mm,reg */ - opc = init_prefixes(stub); + CASE_SIMD_PACKED_FP(, 0x0f, 0x50) + : /* movmskp{s,d} xmm,reg */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x50) + : /* vmovmskp{s,d} {x,y}mm,reg */ + CASE_SIMD_PACKED_INT(0x0f, 0xd7) + : /* pmovmskb {,x}mm,reg */ + case X86EMUL_OPC_VEX_66(0x0f, 0xd7) + : /* vpmovmskb {x,y}mm,reg */ + opc = init_prefixes(stub); opc[0] = b; /* Convert GPR destination to %rAX. */ rex_prefix &= ~REX_R; @@ -6435,7 +6628,7 @@ x86_emulate( } copy_REX_VEX(opc, rex_prefix, vex); - invoke_stub("", "", "=a" (dst.val) : [dummy] "i" (0)); + invoke_stub("", "", "=a"(dst.val) : [dummy] "i"(0)); put_stub(stub); @@ -6443,186 +6636,336 @@ x86_emulate( dst.bytes = 4; break; - CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x54): /* vandp{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x55): /* vandnp{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x56): /* vorp{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x57): /* vxorp{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - generate_exception_if((evex.w != (evex.pfx & VEX_PREFIX_DOUBLE_MASK) || - (ea.type != OP_MEM && evex.brs)), - EXC_UD); + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x54) + : /* vandp{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x55) + : /* vandnp{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x56) + : /* vorp{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x57) + : /* vxorp{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + generate_exception_if( + (evex.w != (evex.pfx & VEX_PREFIX_DOUBLE_MASK) || + (ea.type != OP_MEM && evex.brs)), + EXC_UD); host_and_vcpu_must_have(avx512dq); avx512_vlen_check(false); goto simd_zmm; - CASE_SIMD_ALL_FP(, 0x0f, 0x5a): /* cvt{p,s}{s,d}2{p,s}{s,d} xmm/mem,xmm */ - CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x5a): /* vcvtp{s,d}2p{s,d} xmm/mem,xmm */ - /* vcvts{s,d}2s{s,d} xmm/mem,xmm,xmm */ - op_bytes = 4 << (((vex.pfx & VEX_PREFIX_SCALAR_MASK) ? 0 : 1 + vex.l) + - !!(vex.pfx & VEX_PREFIX_DOUBLE_MASK)); + CASE_SIMD_ALL_FP(, 0x0f, 0x5a) + : /* cvt{p,s}{s,d}2{p,s}{s,d} xmm/mem,xmm */ + CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x5a) + : /* vcvtp{s,d}2p{s,d} xmm/mem,xmm */ + /* vcvts{s,d}2s{s,d} xmm/mem,xmm,xmm */ + op_bytes = + 4 << (((vex.pfx & VEX_PREFIX_SCALAR_MASK) ? 0 : 1 + vex.l) + + !!(vex.pfx & VEX_PREFIX_DOUBLE_MASK)); simd_0f_cvt: if ( vex.opcx == vex_none ) goto simd_0f_sse2; goto simd_0f_avx; - CASE_SIMD_PACKED_FP(, 0x0f, 0x5b): /* cvt{ps,dq}2{dq,ps} xmm/mem,xmm */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x5b): /* vcvt{ps,dq}2{dq,ps} {x,y}mm/mem,{x,y}mm */ - case X86EMUL_OPC_F3(0x0f, 0x5b): /* cvttps2dq xmm/mem,xmm */ - case X86EMUL_OPC_VEX_F3(0x0f, 0x5b): /* vcvttps2dq {x,y}mm/mem,{x,y}mm */ - d |= TwoOp; + CASE_SIMD_PACKED_FP(, 0x0f, 0x5b) + : /* cvt{ps,dq}2{dq,ps} xmm/mem,xmm */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x5b) + : /* vcvt{ps,dq}2{dq,ps} {x,y}mm/mem,{x,y}mm */ + case X86EMUL_OPC_F3(0x0f, 0x5b) + : /* cvttps2dq xmm/mem,xmm */ + case X86EMUL_OPC_VEX_F3(0x0f, 0x5b) + : /* vcvttps2dq {x,y}mm/mem,{x,y}mm */ + d |= TwoOp; op_bytes = 16 << vex.l; goto simd_0f_cvt; - CASE_SIMD_PACKED_INT(0x0f, 0x60): /* punpcklbw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x60): /* vpunpcklbw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0x61): /* punpcklwd {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x61): /* vpunpcklwd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0x62): /* punpckldq {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x62): /* vpunpckldq {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0x68): /* punpckhbw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x68): /* vpunpckhbw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0x69): /* punpckhwd {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x69): /* vpunpckhwd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0x6a): /* punpckhdq {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x6a): /* vpunpckhdq {x,y}mm/mem,{x,y}mm,{x,y}mm */ - op_bytes = vex.pfx ? 16 << vex.l : b & 8 ? 8 : 4; + CASE_SIMD_PACKED_INT(0x0f, 0x60) + : /* punpcklbw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x60) + : /* vpunpcklbw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0x61) + : /* punpcklwd {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x61) + : /* vpunpcklwd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0x62) + : /* punpckldq {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x62) + : /* vpunpckldq {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0x68) + : /* punpckhbw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x68) + : /* vpunpckhbw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0x69) + : /* punpckhwd {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x69) + : /* vpunpckhwd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0x6a) + : /* punpckhdq {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x6a) + : /* vpunpckhdq {x,y}mm/mem,{x,y}mm,{x,y}mm */ + op_bytes = vex.pfx ? 16 << vex.l : b & 8 ? 8 : 4; /* fall through */ - CASE_SIMD_PACKED_INT(0x0f, 0x63): /* packssbw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x63): /* vpackssbw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0x64): /* pcmpgtb {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x64): /* vpcmpgtb {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0x65): /* pcmpgtw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x65): /* vpcmpgtw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0x66): /* pcmpgtd {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x66): /* vpcmpgtd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0x67): /* packusbw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x67): /* vpackusbw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0x6b): /* packsswd {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x6b): /* vpacksswd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0x6c): /* punpcklqdq xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x6c): /* vpunpcklqdq {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0x6d): /* punpckhqdq xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x6d): /* vpunpckhqdq {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0x74): /* pcmpeqb {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x74): /* vpcmpeqb {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0x75): /* pcmpeqw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x75): /* vpcmpeqw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0x76): /* pcmpeqd {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x76): /* vpcmpeqd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xd1): /* psrlw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xd1): /* vpsrlw xmm/m128,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xd2): /* psrld {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xd2): /* vpsrld xmm/m128,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xd3): /* psrlq {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xd3): /* vpsrlq xmm/m128,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0xd4): /* paddq xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xd4): /* vpaddq {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xd5): /* pmullw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xd5): /* vpmullw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xd8): /* psubusb {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xd8): /* vpsubusb {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xd9): /* psubusw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xd9): /* vpsubusw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0xda): /* pminub xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xda): /* vpminub {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xdb): /* pand {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xdb): /* vpand {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xdc): /* paddusb {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xdc): /* vpaddusb {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xdd): /* paddusw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xdd): /* vpaddusw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0xde): /* pmaxub xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xde): /* vpmaxub {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xdf): /* pandn {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xdf): /* vpandn {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0xe0): /* pavgb xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xe0): /* vpavgb {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xe1): /* psraw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xe1): /* vpsraw xmm/m128,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xe2): /* psrad {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xe2): /* vpsrad xmm/m128,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0xe3): /* pavgw xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xe3): /* vpavgw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0xe4): /* pmulhuw xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xe4): /* vpmulhuw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xe5): /* pmulhw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xe5): /* vpmulhw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xe8): /* psubsb {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xe8): /* vpsubsb {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xe9): /* psubsw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xe9): /* vpsubsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0xea): /* pminsw xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xea): /* vpminsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xeb): /* por {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xeb): /* vpor {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xec): /* paddsb {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xec): /* vpaddsb {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xed): /* paddsw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xed): /* vpaddsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0xee): /* pmaxsw xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xee): /* vpmaxsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xef): /* pxor {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xef): /* vpxor {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xf1): /* psllw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xf1): /* vpsllw xmm/m128,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xf2): /* pslld {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xf2): /* vpslld xmm/m128,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xf3): /* psllq {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xf3): /* vpsllq xmm/m128,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0xf4): /* pmuludq xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xf4): /* vpmuludq {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0xf6): /* psadbw xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xf6): /* vpsadbw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xf8): /* psubb {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xf8): /* vpsubb {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xf9): /* psubw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xf9): /* vpsubw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xfa): /* psubd {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xfa): /* vpsubd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0xfb): /* psubq xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xfb): /* vpsubq {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xfc): /* paddb {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xfc): /* vpaddb {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xfd): /* paddw {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xfd): /* vpaddw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_INT(0x0f, 0xfe): /* paddd {,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xfe): /* vpaddd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - simd_0f_int: - if ( vex.opcx != vex_none ) - { - case X86EMUL_OPC_VEX_66(0x0f38, 0x00): /* vpshufb {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x01): /* vphaddw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x02): /* vphaddd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x03): /* vphaddsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x04): /* vpmaddubsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x05): /* vphsubw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x06): /* vphsubd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x07): /* vphsubsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x08): /* vpsignb {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x09): /* vpsignw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x0a): /* vpsignd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x0b): /* vpmulhrsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x1c): /* vpabsb {x,y}mm/mem,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x1d): /* vpabsw {x,y}mm/mem,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x1e): /* vpabsd {x,y}mm/mem,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x28): /* vpmuldq {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x29): /* vpcmpeqq {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x2b): /* vpackusdw {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x37): /* vpcmpgtq {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x38): /* vpminsb {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x39): /* vpminsd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x3a): /* vpminub {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x3b): /* vpminud {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x3c): /* vpmaxsb {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x3d): /* vpmaxsd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x3e): /* vpmaxub {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x3f): /* vpmaxud {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x40): /* vpmulld {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0x63) + : /* packssbw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x63) + : /* vpackssbw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0x64) + : /* pcmpgtb {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x64) + : /* vpcmpgtb {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0x65) + : /* pcmpgtw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x65) + : /* vpcmpgtw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0x66) + : /* pcmpgtd {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x66) + : /* vpcmpgtd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0x67) + : /* packusbw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x67) + : /* vpackusbw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0x6b) + : /* packsswd {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x6b) + : /* vpacksswd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0x6c) + : /* punpcklqdq xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x6c) + : /* vpunpcklqdq {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0x6d) + : /* punpckhqdq xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x6d) + : /* vpunpckhqdq {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0x74) + : /* pcmpeqb {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x74) + : /* vpcmpeqb {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0x75) + : /* pcmpeqw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x75) + : /* vpcmpeqw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0x76) + : /* pcmpeqd {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x76) + : /* vpcmpeqd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xd1) + : /* psrlw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xd1) + : /* vpsrlw xmm/m128,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xd2) + : /* psrld {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xd2) + : /* vpsrld xmm/m128,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xd3) + : /* psrlq {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xd3) + : /* vpsrlq xmm/m128,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0xd4) + : /* paddq xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xd4) + : /* vpaddq {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xd5) + : /* pmullw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xd5) + : /* vpmullw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xd8) + : /* psubusb {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xd8) + : /* vpsubusb {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xd9) + : /* psubusw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xd9) + : /* vpsubusw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0xda) + : /* pminub xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xda) + : /* vpminub {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xdb) + : /* pand {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xdb) + : /* vpand {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xdc) + : /* paddusb {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xdc) + : /* vpaddusb {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xdd) + : /* paddusw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xdd) + : /* vpaddusw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0xde) + : /* pmaxub xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xde) + : /* vpmaxub {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xdf) + : /* pandn {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xdf) + : /* vpandn {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0xe0) + : /* pavgb xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xe0) + : /* vpavgb {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xe1) + : /* psraw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xe1) + : /* vpsraw xmm/m128,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xe2) + : /* psrad {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xe2) + : /* vpsrad xmm/m128,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0xe3) + : /* pavgw xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xe3) + : /* vpavgw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0xe4) + : /* pmulhuw xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xe4) + : /* vpmulhuw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xe5) + : /* pmulhw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xe5) + : /* vpmulhw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xe8) + : /* psubsb {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xe8) + : /* vpsubsb {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xe9) + : /* psubsw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xe9) + : /* vpsubsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0xea) + : /* pminsw xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xea) + : /* vpminsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xeb) + : /* por {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xeb) + : /* vpor {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xec) + : /* paddsb {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xec) + : /* vpaddsb {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xed) + : /* paddsw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xed) + : /* vpaddsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0xee) + : /* pmaxsw xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xee) + : /* vpmaxsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xef) + : /* pxor {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xef) + : /* vpxor {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xf1) + : /* psllw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xf1) + : /* vpsllw xmm/m128,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xf2) + : /* pslld {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xf2) + : /* vpslld xmm/m128,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xf3) + : /* psllq {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xf3) + : /* vpsllq xmm/m128,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0xf4) + : /* pmuludq xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xf4) + : /* vpmuludq {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0xf6) + : /* psadbw xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xf6) + : /* vpsadbw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xf8) + : /* psubb {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xf8) + : /* vpsubb {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xf9) + : /* psubw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xf9) + : /* vpsubw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xfa) + : /* psubd {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xfa) + : /* vpsubd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0xfb) + : /* psubq xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xfb) + : /* vpsubq {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xfc) + : /* paddb {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xfc) + : /* vpaddb {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xfd) + : /* paddw {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xfd) + : /* vpaddw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_INT(0x0f, 0xfe) + : /* paddd {,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xfe) + : /* vpaddd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + simd_0f_int: if ( vex.opcx != vex_none ) + { + case X86EMUL_OPC_VEX_66(0x0f38, + 0x00): /* vpshufb {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x01): /* vphaddw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x02): /* vphaddd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x03): /* vphaddsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x04): /* vpmaddubsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x05): /* vphsubw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x06): /* vphsubd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x07): /* vphsubsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x08): /* vpsignb {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x09): /* vpsignw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x0a): /* vpsignd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x0b): /* vpmulhrsw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, 0x1c): /* vpabsb {x,y}mm/mem,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, 0x1d): /* vpabsw {x,y}mm/mem,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, 0x1e): /* vpabsd {x,y}mm/mem,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x28): /* vpmuldq {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x29): /* vpcmpeqq {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x2b): /* vpackusdw {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x37): /* vpcmpgtq {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x38): /* vpminsb {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x39): /* vpminsd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x3a): /* vpminub {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x3b): /* vpminud {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x3c): /* vpmaxsb {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x3d): /* vpmaxsd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x3e): /* vpmaxub {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x3f): /* vpmaxud {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x40): /* vpmulld {x,y}mm/mem,{x,y}mm,{x,y}mm */ if ( !vex.l ) goto simd_0f_avx; /* fall through */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x45): /* vpsrlv{d,q} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x47): /* vpsllv{d,q} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - simd_0f_avx2: + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x45): /* vpsrlv{d,q} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x47): /* vpsllv{d,q} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + simd_0f_avx2: host_and_vcpu_must_have(avx2); goto simd_0f_ymm; } @@ -6633,50 +6976,83 @@ x86_emulate( get_fpu(X86EMUL_FPU_mmx); goto simd_0f_common; - case X86EMUL_OPC_EVEX_66(0x0f, 0xf6): /* vpsadbw [xyz]mm/mem,[xyz]mm,[xyz]mm */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xf6): /* vpsadbw [xyz]mm/mem,[xyz]mm,[xyz]mm */ generate_exception_if(evex.opmsk, EXC_UD); /* fall through */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xd1): /* vpsrlw xmm/m128,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xe1): /* vpsraw xmm/m128,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xf1): /* vpsllw xmm/m128,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xf5): /* vpmaddwd [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xd1): /* vpsrlw xmm/m128,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xe1): /* vpsraw xmm/m128,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xf1): /* vpsllw xmm/m128,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f, 0xf5): /* vpmaddwd [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ fault_suppression = false; /* fall through */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xd5): /* vpmullw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xd8): /* vpsubusb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xd9): /* vpsubusw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xdc): /* vpaddusb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xdd): /* vpaddusw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xe0): /* vpavgb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xe3): /* vpavgw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xe5): /* vpmulhw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xe8): /* vpsubsb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xe9): /* vpsubsw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xec): /* vpaddsb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xed): /* vpaddsw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xf8): /* vpsubb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xf9): /* vpsubw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xfc): /* vpaddb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xfd): /* vpaddw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xd5): /* vpmullw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f, 0xd8): /* vpsubusb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f, 0xd9): /* vpsubusw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f, 0xdc): /* vpaddusb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f, 0xdd): /* vpaddusw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xe0): /* vpavgb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xe3): /* vpavgw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xe5): /* vpmulhw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xe8): /* vpsubsb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xe9): /* vpsubsw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xec): /* vpaddsb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xed): /* vpaddsw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xf8): /* vpsubb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xf9): /* vpsubw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xfc): /* vpaddb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xfd): /* vpaddw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ host_and_vcpu_must_have(avx512bw); generate_exception_if(evex.brs, EXC_UD); elem_bytes = 1 << (b & 1); goto avx512f_no_sae; - case X86EMUL_OPC_EVEX_F3(0x0f38, 0x26): /* vptestnm{b,w} [xyz]mm/mem,[xyz]mm,k{k} */ - case X86EMUL_OPC_EVEX_F3(0x0f38, 0x27): /* vptestnm{d,q} [xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_F3(0x0f38, + 0x26): /* vptestnm{b,w} [xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_F3(0x0f38, + 0x27): /* vptestnm{d,q} [xyz]mm/mem,[xyz]mm,k{k} */ op_bytes = 16 << evex.lr; /* fall through */ - case X86EMUL_OPC_EVEX_66(0x0f, 0x64): /* vpcmpeqb [xyz]mm/mem,[xyz]mm,k{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0x65): /* vpcmpeqw [xyz]mm/mem,[xyz]mm,k{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0x66): /* vpcmpeqd [xyz]mm/mem,[xyz]mm,k{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0x74): /* vpcmpgtb [xyz]mm/mem,[xyz]mm,k{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0x75): /* vpcmpgtw [xyz]mm/mem,[xyz]mm,k{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0x76): /* vpcmpgtd [xyz]mm/mem,[xyz]mm,k{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x26): /* vptestm{b,w} [xyz]mm/mem,[xyz]mm,k{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x27): /* vptestm{d,q} [xyz]mm/mem,[xyz]mm,k{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x29): /* vpcmpeqq [xyz]mm/mem,[xyz]mm,k{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x37): /* vpcmpgtq [xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0x64): /* vpcmpeqb [xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0x65): /* vpcmpeqw [xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0x66): /* vpcmpeqd [xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0x74): /* vpcmpgtb [xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0x75): /* vpcmpgtw [xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0x76): /* vpcmpgtd [xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x26): /* vptestm{b,w} [xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x27): /* vptestm{d,q} [xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x29): /* vpcmpeqq [xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x37): /* vpcmpgtq [xyz]mm/mem,[xyz]mm,k{k} */ generate_exception_if(!evex.r || !evex.R || evex.z, EXC_UD); if ( b & (ext == ext_0f38 ? 1 : 2) ) { @@ -6689,17 +7065,24 @@ x86_emulate( avx512_vlen_check(false); goto simd_zmm; - case X86EMUL_OPC_EVEX_66(0x0f, 0xd4): /* vpaddq [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xf4): /* vpmuludq [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x28): /* vpmuldq [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xd4): /* vpaddq [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f, 0xf4): /* vpmuludq [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x28): /* vpmuldq [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ generate_exception_if(!evex.w, EXC_UD); goto avx512f_no_sae; - CASE_SIMD_PACKED_INT(0x0f, 0x6e): /* mov{d,q} r/m,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x6e): /* vmov{d,q} r/m,xmm */ - CASE_SIMD_PACKED_INT(0x0f, 0x7e): /* mov{d,q} {,x}mm,r/m */ - case X86EMUL_OPC_VEX_66(0x0f, 0x7e): /* vmov{d,q} xmm,r/m */ - if ( vex.opcx != vex_none ) + CASE_SIMD_PACKED_INT(0x0f, 0x6e) + : /* mov{d,q} r/m,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x6e) + : /* vmov{d,q} r/m,xmm */ + CASE_SIMD_PACKED_INT(0x0f, 0x7e) + : /* mov{d,q} {,x}mm,r/m */ + case X86EMUL_OPC_VEX_66(0x0f, 0x7e) + : /* vmov{d,q} xmm,r/m */ + if ( vex.opcx != vex_none ) { generate_exception_if(vex.l || vex.reg != 0xf, EXC_UD); host_and_vcpu_must_have(avx); @@ -6729,7 +7112,7 @@ x86_emulate( opc[2] = 0xc3; copy_REX_VEX(opc, rex_prefix, vex); - invoke_stub("", "", "+m" (src.val) : "a" (&src.val)); + invoke_stub("", "", "+m"(src.val) : "a"(&src.val)); dst.val = src.val; put_stub(stub); @@ -6738,9 +7121,9 @@ x86_emulate( case X86EMUL_OPC_EVEX_66(0x0f, 0x6e): /* vmov{d,q} r/m,xmm */ case X86EMUL_OPC_EVEX_66(0x0f, 0x7e): /* vmov{d,q} xmm,r/m */ - generate_exception_if((evex.lr || evex.opmsk || evex.brs || - evex.reg != 0xf || !evex.RX), - EXC_UD); + generate_exception_if( + (evex.lr || evex.opmsk || evex.brs || evex.reg != 0xf || !evex.RX), + EXC_UD); host_and_vcpu_must_have(avx512f); get_fpu(X86EMUL_FPU_zmm); @@ -6755,7 +7138,7 @@ x86_emulate( opc[2] = 0xc3; copy_EVEX(opc, evex); - invoke_stub("", "", "+m" (src.val) : "a" (&src.val)); + invoke_stub("", "", "+m"(src.val) : "a"(&src.val)); dst.val = src.val; put_stub(stub); @@ -6797,18 +7180,24 @@ x86_emulate( sfence = true; fault_suppression = false; /* fall through */ - case X86EMUL_OPC_EVEX_66(0x0f, 0x6f): /* vmovdqa{32,64} [xyz]mm/mem,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_F3(0x0f, 0x6f): /* vmovdqu{32,64} [xyz]mm/mem,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0x7f): /* vmovdqa{32,64} [xyz]mm,[xyz]mm/mem{k} */ - case X86EMUL_OPC_EVEX_F3(0x0f, 0x7f): /* vmovdqu{32,64} [xyz]mm,[xyz]mm/mem{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0x6f): /* vmovdqa{32,64} [xyz]mm/mem,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_F3(0x0f, + 0x6f): /* vmovdqu{32,64} [xyz]mm/mem,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0x7f): /* vmovdqa{32,64} [xyz]mm,[xyz]mm/mem{k} */ + case X86EMUL_OPC_EVEX_F3(0x0f, + 0x7f): /* vmovdqu{32,64} [xyz]mm,[xyz]mm/mem{k} */ vmovdqa: generate_exception_if(evex.brs, EXC_UD); d |= TwoOp; op_bytes = 16 << evex.lr; goto avx512f_no_sae; - case X86EMUL_OPC_EVEX_F2(0x0f, 0x6f): /* vmovdqu{8,16} [xyz]mm/mem,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_F2(0x0f, 0x7f): /* vmovdqu{8,16} [xyz]mm,[xyz]mm/mem{k} */ + case X86EMUL_OPC_EVEX_F2(0x0f, + 0x6f): /* vmovdqu{8,16} [xyz]mm/mem,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_F2(0x0f, + 0x7f): /* vmovdqu{8,16} [xyz]mm,[xyz]mm/mem{k} */ host_and_vcpu_must_have(avx512bw); elem_bytes = 1 << evex.w; goto vmovdqa; @@ -6817,49 +7206,65 @@ x86_emulate( generate_exception_if(vex.l, EXC_UD); d |= TwoOp; /* fall through */ - case X86EMUL_OPC_66(0x0f, 0xd6): /* movq xmm,xmm/m64 */ - case X86EMUL_OPC(0x0f, 0x6f): /* movq mm/m64,mm */ - case X86EMUL_OPC(0x0f, 0x7f): /* movq mm,mm/m64 */ + case X86EMUL_OPC_66(0x0f, 0xd6): /* movq xmm,xmm/m64 */ + case X86EMUL_OPC(0x0f, 0x6f): /* movq mm/m64,mm */ + case X86EMUL_OPC(0x0f, 0x7f): /* movq mm,mm/m64 */ op_bytes = 8; goto simd_0f_int; - CASE_SIMD_PACKED_INT(0x0f, 0x70): /* pshuf{w,d} $imm8,{,x}mm/mem,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x70): /* vpshufd $imm8,{x,y}mm/mem,{x,y}mm */ - case X86EMUL_OPC_F3(0x0f, 0x70): /* pshufhw $imm8,xmm/m128,xmm */ - case X86EMUL_OPC_VEX_F3(0x0f, 0x70): /* vpshufhw $imm8,{x,y}mm/mem,{x,y}mm */ - case X86EMUL_OPC_F2(0x0f, 0x70): /* pshuflw $imm8,xmm/m128,xmm */ - case X86EMUL_OPC_VEX_F2(0x0f, 0x70): /* vpshuflw $imm8,{x,y}mm/mem,{x,y}mm */ - d = (d & ~SrcMask) | SrcMem | TwoOp; + CASE_SIMD_PACKED_INT(0x0f, 0x70) + : /* pshuf{w,d} $imm8,{,x}mm/mem,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0x70) + : /* vpshufd $imm8,{x,y}mm/mem,{x,y}mm */ + case X86EMUL_OPC_F3(0x0f, 0x70) + : /* pshufhw $imm8,xmm/m128,xmm */ + case X86EMUL_OPC_VEX_F3(0x0f, 0x70) + : /* vpshufhw $imm8,{x,y}mm/mem,{x,y}mm */ + case X86EMUL_OPC_F2(0x0f, 0x70) + : /* pshuflw $imm8,xmm/m128,xmm */ + case X86EMUL_OPC_VEX_F2(0x0f, 0x70) + : /* vpshuflw $imm8,{x,y}mm/mem,{x,y}mm */ + d = (d & ~SrcMask) | SrcMem | TwoOp; op_bytes = vex.pfx ? 16 << vex.l : 8; simd_0f_int_imm8: if ( vex.opcx != vex_none ) { - case X86EMUL_OPC_VEX_66(0x0f3a, 0x0e): /* vpblendw $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x0f): /* vpalignr $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x42): /* vmpsadbw $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x0e): /* vpblendw $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x0f): /* vpalignr $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x42): /* vmpsadbw $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ if ( vex.l ) { - simd_0f_imm8_avx2: + simd_0f_imm8_avx2: host_and_vcpu_must_have(avx2); } else { - case X86EMUL_OPC_VEX_66(0x0f3a, 0x08): /* vroundps $imm8,{x,y}mm/mem,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x09): /* vroundpd $imm8,{x,y}mm/mem,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x0a): /* vroundss $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x0b): /* vroundsd $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x0c): /* vblendps $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x0d): /* vblendpd $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x40): /* vdpps $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - simd_0f_imm8_avx: + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x08): /* vroundps $imm8,{x,y}mm/mem,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x09): /* vroundpd $imm8,{x,y}mm/mem,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x0a): /* vroundss $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x0b): /* vroundsd $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x0c): /* vblendps $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x0d): /* vblendpd $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x40): /* vdpps $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + simd_0f_imm8_avx: host_and_vcpu_must_have(avx); } - simd_0f_imm8_ymm: + simd_0f_imm8_ymm: get_fpu(X86EMUL_FPU_ymm); } else if ( vex.pfx ) { - simd_0f_imm8_sse2: + simd_0f_imm8_sse2: vcpu_must_have(sse2); get_fpu(X86EMUL_FPU_xmm); } @@ -6884,18 +7289,19 @@ x86_emulate( insn_bytes = PFX_BYTES + 3; break; - CASE_SIMD_PACKED_INT(0x0f, 0x71): /* Grp12 */ - case X86EMUL_OPC_VEX_66(0x0f, 0x71): - CASE_SIMD_PACKED_INT(0x0f, 0x72): /* Grp13 */ - case X86EMUL_OPC_VEX_66(0x0f, 0x72): - switch ( modrm_reg & 7 ) + CASE_SIMD_PACKED_INT(0x0f, 0x71) + : /* Grp12 */ + case X86EMUL_OPC_VEX_66(0x0f, 0x71) + : CASE_SIMD_PACKED_INT(0x0f, 0x72) + : /* Grp13 */ + case X86EMUL_OPC_VEX_66(0x0f, 0x72): switch (modrm_reg & 7) { case 2: /* psrl{w,d} $imm8,{,x}mm */ /* vpsrl{w,d} $imm8,{x,y}mm,{x,y}mm */ case 4: /* psra{w,d} $imm8,{,x}mm */ /* vpsra{w,d} $imm8,{x,y}mm,{x,y}mm */ case 6: /* psll{w,d} $imm8,{,x}mm */ - /* vpsll{w,d} $imm8,{x,y}mm,{x,y}mm */ + /* vpsll{w,d} $imm8,{x,y}mm,{x,y}mm */ break; default: goto unrecognized_insn; @@ -6931,14 +7337,14 @@ x86_emulate( opc[insn_bytes - PFX_BYTES] = 0xc3; copy_REX_VEX(opc, rex_prefix, vex); - invoke_stub("", "", [dummy_out] "=g" (dummy) : [dummy_in] "i" (0) ); + invoke_stub("", "", [dummy_out] "=g"(dummy) : [dummy_in] "i"(0)); put_stub(stub); ASSERT(!state->simd_size); break; case X86EMUL_OPC_EVEX_66(0x0f, 0x71): /* Grp12 */ - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 2: /* vpsrlw $imm8,[xyz]mm/mem,[xyz]mm{k} */ case 4: /* vpsraw $imm8,[xyz]mm/mem,[xyz]mm{k} */ @@ -6952,7 +7358,7 @@ x86_emulate( goto unrecognized_insn; case X86EMUL_OPC_EVEX_66(0x0f, 0x72): /* Grp13 */ - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 2: /* vpsrld $imm8,[xyz]mm/mem,[xyz]mm{k} */ case 6: /* vpslld $imm8,[xyz]mm/mem,[xyz]mm{k} */ @@ -6968,8 +7374,8 @@ x86_emulate( } goto unrecognized_insn; - case X86EMUL_OPC(0x0f, 0x73): /* Grp14 */ - switch ( modrm_reg & 7 ) + case X86EMUL_OPC(0x0f, 0x73): /* Grp14 */ + switch (modrm_reg & 7) { case 2: /* psrlq $imm8,mm */ case 6: /* psllq $imm8,mm */ @@ -6979,7 +7385,7 @@ x86_emulate( case X86EMUL_OPC_66(0x0f, 0x73): case X86EMUL_OPC_VEX_66(0x0f, 0x73): - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 2: /* psrlq $imm8,xmm */ /* vpsrlq $imm8,{x,y}mm,{x,y}mm */ @@ -6988,13 +7394,13 @@ x86_emulate( case 6: /* psllq $imm8,xmm */ /* vpsllq $imm8,{x,y}mm,{x,y}mm */ case 7: /* pslldq $imm8,xmm */ - /* vpslldq $imm8,{x,y}mm,{x,y}mm */ + /* vpslldq $imm8,{x,y}mm,{x,y}mm */ goto simd_0f_shift_imm; } goto unrecognized_insn; case X86EMUL_OPC_EVEX_66(0x0f, 0x73): /* Grp14 */ - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 2: /* vpsrlq $imm8,[xyz]mm/mem,[xyz]mm{k} */ case 6: /* vpsllq $imm8,[xyz]mm/mem,[xyz]mm{k} */ @@ -7007,8 +7413,8 @@ x86_emulate( } goto unrecognized_insn; - case X86EMUL_OPC(0x0f, 0x77): /* emms */ - case X86EMUL_OPC_VEX(0x0f, 0x77): /* vzero{all,upper} */ + case X86EMUL_OPC(0x0f, 0x77): /* emms */ + case X86EMUL_OPC_VEX(0x0f, 0x77): /* vzero{all,upper} */ if ( vex.opcx != vex_none ) { generate_exception_if(vex.reg != 0xf, EXC_UD); @@ -7025,26 +7431,26 @@ x86_emulate( if ( vex.l ) { /* vpxor %xmmN, %xmmN, %xmmN */ - asm volatile ( ".byte 0xc5,0xf9,0xef,0xc0" ); - asm volatile ( ".byte 0xc5,0xf1,0xef,0xc9" ); - asm volatile ( ".byte 0xc5,0xe9,0xef,0xd2" ); - asm volatile ( ".byte 0xc5,0xe1,0xef,0xdb" ); - asm volatile ( ".byte 0xc5,0xd9,0xef,0xe4" ); - asm volatile ( ".byte 0xc5,0xd1,0xef,0xed" ); - asm volatile ( ".byte 0xc5,0xc9,0xef,0xf6" ); - asm volatile ( ".byte 0xc5,0xc1,0xef,0xff" ); + asm volatile(".byte 0xc5,0xf9,0xef,0xc0"); + asm volatile(".byte 0xc5,0xf1,0xef,0xc9"); + asm volatile(".byte 0xc5,0xe9,0xef,0xd2"); + asm volatile(".byte 0xc5,0xe1,0xef,0xdb"); + asm volatile(".byte 0xc5,0xd9,0xef,0xe4"); + asm volatile(".byte 0xc5,0xd1,0xef,0xed"); + asm volatile(".byte 0xc5,0xc9,0xef,0xf6"); + asm volatile(".byte 0xc5,0xc1,0xef,0xff"); } else { /* vpor %xmmN, %xmmN, %xmmN */ - asm volatile ( ".byte 0xc5,0xf9,0xeb,0xc0" ); - asm volatile ( ".byte 0xc5,0xf1,0xeb,0xc9" ); - asm volatile ( ".byte 0xc5,0xe9,0xeb,0xd2" ); - asm volatile ( ".byte 0xc5,0xe1,0xeb,0xdb" ); - asm volatile ( ".byte 0xc5,0xd9,0xeb,0xe4" ); - asm volatile ( ".byte 0xc5,0xd1,0xeb,0xed" ); - asm volatile ( ".byte 0xc5,0xc9,0xeb,0xf6" ); - asm volatile ( ".byte 0xc5,0xc1,0xeb,0xff" ); + asm volatile(".byte 0xc5,0xf9,0xeb,0xc0"); + asm volatile(".byte 0xc5,0xf1,0xeb,0xc9"); + asm volatile(".byte 0xc5,0xe9,0xeb,0xd2"); + asm volatile(".byte 0xc5,0xe1,0xeb,0xdb"); + asm volatile(".byte 0xc5,0xd9,0xeb,0xe4"); + asm volatile(".byte 0xc5,0xd1,0xeb,0xed"); + asm volatile(".byte 0xc5,0xc9,0xeb,0xf6"); + asm volatile(".byte 0xc5,0xc1,0xeb,0xff"); } ASSERT(!state->simd_size); @@ -7066,8 +7472,8 @@ x86_emulate( insn_bytes = PFX_BYTES + 1; goto simd_0f_reg_only; - case X86EMUL_OPC_66(0x0f, 0x78): /* Grp17 */ - switch ( modrm_reg & 7 ) + case X86EMUL_OPC_66(0x0f, 0x78): /* Grp17 */ + switch (modrm_reg & 7) { case 0: /* extrq $imm8,$imm8,xmm */ break; @@ -7075,7 +7481,7 @@ x86_emulate( goto unrecognized_insn; } /* fall through */ - case X86EMUL_OPC_F2(0x0f, 0x78): /* insertq $imm8,$imm8,xmm,xmm */ + case X86EMUL_OPC_F2(0x0f, 0x78): /* insertq $imm8,$imm8,xmm,xmm */ generate_exception_if(ea.type != OP_REG, EXC_UD); host_and_vcpu_must_have(sse4a); @@ -7089,8 +7495,8 @@ x86_emulate( insn_bytes = PFX_BYTES + 4; goto simd_0f_reg_only; - case X86EMUL_OPC_66(0x0f, 0x79): /* extrq xmm,xmm */ - case X86EMUL_OPC_F2(0x0f, 0x79): /* insertq xmm,xmm */ + case X86EMUL_OPC_66(0x0f, 0x79): /* extrq xmm,xmm */ + case X86EMUL_OPC_F2(0x0f, 0x79): /* insertq xmm,xmm */ generate_exception_if(ea.type != OP_REG, EXC_UD); host_and_vcpu_must_have(sse4a); op_bytes = 8; @@ -7100,18 +7506,24 @@ x86_emulate( case X86EMUL_OPC_VEX_F2(0x0f, 0xf0): /* vlddqu mem,{x,y}mm */ generate_exception_if(ea.type != OP_MEM, EXC_UD); /* fall through */ - case X86EMUL_OPC_66(0x0f, 0x7c): /* haddpd xmm/m128,xmm */ - case X86EMUL_OPC_F2(0x0f, 0x7c): /* haddps xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x7c): /* vhaddpd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_F2(0x0f, 0x7c): /* vhaddps {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0x7d): /* hsubpd xmm/m128,xmm */ - case X86EMUL_OPC_F2(0x0f, 0x7d): /* hsubps xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0x7d): /* vhsubpd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_F2(0x0f, 0x7d): /* vhsubps {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_66(0x0f, 0xd0): /* addsubpd xmm/m128,xmm */ - case X86EMUL_OPC_F2(0x0f, 0xd0): /* addsubps xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xd0): /* vaddsubpd {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_F2(0x0f, 0xd0): /* vaddsubps {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0x7c): /* haddpd xmm/m128,xmm */ + case X86EMUL_OPC_F2(0x0f, 0x7c): /* haddps xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, + 0x7c): /* vhaddpd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_F2(0x0f, + 0x7c): /* vhaddps {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0x7d): /* hsubpd xmm/m128,xmm */ + case X86EMUL_OPC_F2(0x0f, 0x7d): /* hsubps xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, + 0x7d): /* vhsubpd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_F2(0x0f, + 0x7d): /* vhsubps {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_66(0x0f, 0xd0): /* addsubpd xmm/m128,xmm */ + case X86EMUL_OPC_F2(0x0f, 0xd0): /* addsubps xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, + 0xd0): /* vaddsubpd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_F2(0x0f, + 0xd0): /* vaddsubps {x,y}mm/mem,{x,y}mm,{x,y}mm */ op_bytes = 16 << vex.l; goto simd_0f_sse3_avx; @@ -7121,13 +7533,13 @@ x86_emulate( op_bytes = 8; goto simd_0f_int; - case X86EMUL_OPC(0x0f, 0x80) ... X86EMUL_OPC(0x0f, 0x8f): /* jcc (near) */ + case X86EMUL_OPC(0x0f, 0x80)... X86EMUL_OPC(0x0f, 0x8f): /* jcc (near) */ if ( test_cc(b, _regs.eflags) ) jmp_rel((int32_t)src.val); adjust_bnd(ctxt, ops, vex.pfx); break; - case X86EMUL_OPC(0x0f, 0x90) ... X86EMUL_OPC(0x0f, 0x9f): /* setcc */ + case X86EMUL_OPC(0x0f, 0x90)... X86EMUL_OPC(0x0f, 0x9f): /* setcc */ dst.val = test_cc(b, _regs.eflags); break; @@ -7169,8 +7581,8 @@ x86_emulate( case X86EMUL_OPC_VEX(0x0f, 0x92): /* kmovw r32,k */ case X86EMUL_OPC_VEX_66(0x0f, 0x92): /* kmovb r32,k */ case X86EMUL_OPC_VEX_F2(0x0f, 0x92): /* kmov{d,q} reg,k */ - generate_exception_if(vex.l || !vex.r || vex.reg != 0xf || - ea.type != OP_REG, EXC_UD); + generate_exception_if( + vex.l || !vex.r || vex.reg != 0xf || ea.type != OP_REG, EXC_UD); host_and_vcpu_must_have(avx512f); if ( vex.pfx == vex_f2 ) @@ -7195,7 +7607,7 @@ x86_emulate( copy_VEX(opc, vex); ea.reg = decode_gpr(&_regs, modrm_rm); - invoke_stub("", "", "=m" (dummy) : "a" (*ea.reg)); + invoke_stub("", "", "=m"(dummy) : "a"(*ea.reg)); put_stub(stub); @@ -7237,22 +7649,22 @@ x86_emulate( opc[2] = 0xc3; copy_VEX(opc, vex); - invoke_stub("", "", "=a" (dst.val) : [dummy] "i" (0)); + invoke_stub("", "", "=a"(dst.val) : [dummy] "i"(0)); put_stub(stub); ASSERT(!state->simd_size); break; - case X86EMUL_OPC_VEX(0x0f, 0x99): /* ktest{w,q} k,k */ + case X86EMUL_OPC_VEX(0x0f, 0x99): /* ktest{w,q} k,k */ if ( !vex.w ) host_and_vcpu_must_have(avx512dq); /* fall through */ case X86EMUL_OPC_VEX(0x0f, 0x98): /* kortest{w,q} k,k */ case X86EMUL_OPC_VEX_66(0x0f, 0x98): /* kortest{b,d} k,k */ case X86EMUL_OPC_VEX_66(0x0f, 0x99): /* ktest{b,d} k,k */ - generate_exception_if(vex.l || !vex.r || vex.reg != 0xf || - ea.type != OP_REG, EXC_UD); + generate_exception_if( + vex.l || !vex.r || vex.reg != 0xf || ea.type != OP_REG, EXC_UD); host_and_vcpu_must_have(avx512f); if ( vex.w ) host_and_vcpu_must_have(avx512bw); @@ -7269,9 +7681,9 @@ x86_emulate( copy_VEX(opc, vex); invoke_stub(_PRE_EFLAGS("[eflags]", "[mask]", "[tmp]"), _POST_EFLAGS("[eflags]", "[mask]", "[tmp]"), - [eflags] "+g" (_regs.eflags), - "=a" (dst.val), [tmp] "=&r" (dummy) - : [mask] "i" (EFLAGS_MASK)); + [eflags] "+g"(_regs.eflags), + "=a"(dst.val), [tmp] "=&r"(dummy) + : [mask] "i"(EFLAGS_MASK)); put_stub(stub); @@ -7285,8 +7697,8 @@ x86_emulate( /* Speculatively read MSR_INTEL_MISC_FEATURES_ENABLES. */ if ( ops->read_msr && !mode_ring0() && - (rc = ops->read_msr(MSR_INTEL_MISC_FEATURES_ENABLES, - &msr_val, ctxt)) == X86EMUL_EXCEPTION ) + (rc = ops->read_msr(MSR_INTEL_MISC_FEATURES_ENABLES, &msr_val, + ctxt)) == X86EMUL_EXCEPTION ) { /* Not implemented. Squash the exception and proceed normally. */ x86_emul_reset_event(ctxt); @@ -7307,12 +7719,13 @@ x86_emulate( _regs.r(dx) = cpuid_leaf.d; break; - case X86EMUL_OPC(0x0f, 0xa3): bt: /* bt */ + case X86EMUL_OPC(0x0f, 0xa3): + bt: /* bt */ generate_exception_if(lock_prefix, EXC_UD); if ( ops->rmw && dst.type == OP_MEM && - (rc = read_ulong(dst.mem.seg, dst.mem.off, &dst.val, - dst.bytes, ctxt, ops)) != X86EMUL_OKAY ) + (rc = read_ulong(dst.mem.seg, dst.mem.off, &dst.val, dst.bytes, + ctxt, ops)) != X86EMUL_OKAY ) goto done; emulate_2op_SrcV_nobyte("bt", src, dst, _regs.eflags); @@ -7322,7 +7735,8 @@ x86_emulate( case X86EMUL_OPC(0x0f, 0xa4): /* shld imm8,r,r/m */ case X86EMUL_OPC(0x0f, 0xa5): /* shld %%cl,r,r/m */ case X86EMUL_OPC(0x0f, 0xac): /* shrd imm8,r,r/m */ - case X86EMUL_OPC(0x0f, 0xad): /* shrd %%cl,r,r/m */ { + case X86EMUL_OPC(0x0f, 0xad): /* shrd %%cl,r,r/m */ + { uint8_t shift, width = dst.bytes << 3; generate_exception_if(lock_prefix, EXC_UD); @@ -7347,12 +7761,13 @@ x86_emulate( break; dst.orig_val = dst.val; dst.val = (b & 8) ? - /* shrd */ - ((dst.orig_val >> shift) | - truncate_word(src.val << (width - shift), dst.bytes)) : - /* shld */ - (truncate_word(dst.orig_val << shift, dst.bytes) | - (src.val >> (width - shift))); + /* shrd */ + ((dst.orig_val >> shift) | + truncate_word(src.val << (width - shift), dst.bytes)) + : + /* shld */ + (truncate_word(dst.orig_val << shift, dst.bytes) | + (src.val >> (width - shift))); _regs.eflags &= ~(X86_EFLAGS_OF | X86_EFLAGS_SF | X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF); if ( (dst.orig_val >> ((b & 8) ? (shift - 1) : (width - shift))) & 1 ) @@ -7365,15 +7780,17 @@ x86_emulate( break; } - case X86EMUL_OPC(0x0f, 0xab): bts: /* bts */ + case X86EMUL_OPC(0x0f, 0xab): + bts: /* bts */ if ( ops->rmw && dst.type == OP_MEM ) state->rmw = rmw_bts; else emulate_2op_SrcV_nobyte("bts", src, dst, _regs.eflags); break; - case X86EMUL_OPC(0x0f, 0xae): case X86EMUL_OPC_66(0x0f, 0xae): /* Grp15 */ - switch ( modrm_reg & 7 ) + case X86EMUL_OPC(0x0f, 0xae): + case X86EMUL_OPC_66(0x0f, 0xae): /* Grp15 */ + switch (modrm_reg & 7) { case 2: /* ldmxcsr */ generate_exception_if(vex.pfx, EXC_UD); @@ -7382,7 +7799,7 @@ x86_emulate( generate_exception_if(src.type != OP_MEM, EXC_UD); get_fpu(vex.opcx ? X86EMUL_FPU_ymm : X86EMUL_FPU_xmm); generate_exception_if(src.val & ~mxcsr_mask, EXC_GP, 0); - asm volatile ( "ldmxcsr %0" :: "m" (src.val) ); + asm volatile("ldmxcsr %0" ::"m"(src.val)); break; case 3: /* stmxcsr */ @@ -7391,21 +7808,21 @@ x86_emulate( stmxcsr: generate_exception_if(dst.type != OP_MEM, EXC_UD); get_fpu(vex.opcx ? X86EMUL_FPU_ymm : X86EMUL_FPU_xmm); - asm volatile ( "stmxcsr %0" : "=m" (dst.val) ); + asm volatile("stmxcsr %0" : "=m"(dst.val)); break; case 5: /* lfence */ fail_if(modrm_mod != 3); generate_exception_if(vex.pfx, EXC_UD); vcpu_must_have(sse2); - asm volatile ( "lfence" ::: "memory" ); + asm volatile("lfence" ::: "memory"); break; case 6: if ( modrm_mod == 3 ) /* mfence */ { generate_exception_if(vex.pfx, EXC_UD); vcpu_must_have(sse2); - asm volatile ( "mfence" ::: "memory" ); + asm volatile("mfence" ::: "memory"); break; } /* else clwb */ @@ -7420,7 +7837,7 @@ x86_emulate( { generate_exception_if(vex.pfx, EXC_UD); vcpu_must_have(mmxext); - asm volatile ( "sfence" ::: "memory" ); + asm volatile("sfence" ::: "memory"); break; } /* else clflush{,opt} */ @@ -7438,7 +7855,7 @@ x86_emulate( break; case X86EMUL_OPC_VEX(0x0f, 0xae): /* Grp15 */ - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 2: /* vldmxcsr */ generate_exception_if(vex.l || vex.reg != 0xf, EXC_UD); @@ -7476,8 +7893,8 @@ x86_emulate( if ( op_bytes == 8 ) { sreg.base = *dst.reg; - generate_exception_if(!is_canonical_address(sreg.base), - EXC_GP, 0); + generate_exception_if(!is_canonical_address(sreg.base), EXC_GP, + 0); } else sreg.base = (uint32_t)*dst.reg; @@ -7491,12 +7908,13 @@ x86_emulate( emulate_2op_SrcV_srcmem("imul", src, dst, _regs.eflags); break; - case X86EMUL_OPC(0x0f, 0xb0): case X86EMUL_OPC(0x0f, 0xb1): /* cmpxchg */ + case X86EMUL_OPC(0x0f, 0xb0): + case X86EMUL_OPC(0x0f, 0xb1): /* cmpxchg */ fail_if(!ops->cmpxchg); if ( ops->rmw && dst.type == OP_MEM && - (rc = read_ulong(dst.mem.seg, dst.mem.off, &dst.val, - dst.bytes, ctxt, ops)) != X86EMUL_OKAY ) + (rc = read_ulong(dst.mem.seg, dst.mem.off, &dst.val, dst.bytes, + ctxt, ops)) != X86EMUL_OKAY ) goto done; _regs.eflags &= ~EFLAGS_MASK; @@ -7507,9 +7925,9 @@ x86_emulate( if ( dst.type == OP_MEM ) { dst.val = _regs.r(ax); - switch ( rc = ops->cmpxchg(dst.mem.seg, dst.mem.off, &dst.val, - &src.val, dst.bytes, lock_prefix, - ctxt) ) + switch (rc = ops->cmpxchg(dst.mem.seg, dst.mem.off, &dst.val, + &src.val, dst.bytes, lock_prefix, + ctxt)) { case X86EMUL_OKAY: dst.type = OP_NONE; @@ -7532,7 +7950,7 @@ x86_emulate( { /* Failure: write the value we saw to EAX. */ dst.type = OP_REG; - dst.reg = (unsigned long *)&_regs.r(ax); + dst.reg = (unsigned long *)&_regs.r(ax); /* cmp: %%eax - dst ==> dst and src swapped for macro invocation */ src.val = _regs.r(ax); emulate_2op_SrcV("cmp", dst, src, _regs.eflags); @@ -7546,7 +7964,8 @@ x86_emulate( seg = b & 7; goto les; - case X86EMUL_OPC(0x0f, 0xb3): btr: /* btr */ + case X86EMUL_OPC(0x0f, 0xb3): + btr: /* btr */ if ( ops->rmw && dst.type == OP_MEM ) state->rmw = rmw_btr; else @@ -7555,9 +7974,9 @@ x86_emulate( case X86EMUL_OPC(0x0f, 0xb6): /* movzx rm8,r{16,32,64} */ /* Recompute DstReg as we may have decoded AH/BH/CH/DH. */ - dst.reg = decode_gpr(&_regs, modrm_reg); + dst.reg = decode_gpr(&_regs, modrm_reg); dst.bytes = op_bytes; - dst.val = (uint8_t)src.val; + dst.val = (uint8_t)src.val; break; case X86EMUL_OPC(0x0f, 0xb7): /* movzx rm16,r{16,32,64} */ @@ -7566,24 +7985,30 @@ x86_emulate( case X86EMUL_OPC_F3(0x0f, 0xb8): /* popcnt r/m,r */ host_and_vcpu_must_have(popcnt); - asm ( "popcnt %1,%0" : "=r" (dst.val) : "rm" (src.val) ); + asm("popcnt %1,%0" : "=r"(dst.val) : "rm"(src.val)); _regs.eflags &= ~EFLAGS_MASK; if ( !dst.val ) _regs.eflags |= X86_EFLAGS_ZF; break; case X86EMUL_OPC(0x0f, 0xba): /* Grp8 */ - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { - case 4: goto bt; - case 5: goto bts; - case 6: goto btr; - case 7: goto btc; - default: generate_exception(EXC_UD); + case 4: + goto bt; + case 5: + goto bts; + case 6: + goto btr; + case 7: + goto btc; + default: + generate_exception(EXC_UD); } break; - case X86EMUL_OPC(0x0f, 0xbb): btc: /* btc */ + case X86EMUL_OPC(0x0f, 0xbb): + btc: /* btc */ if ( ops->rmw && dst.type == OP_MEM ) state->rmw = rmw_btc; else @@ -7594,9 +8019,9 @@ x86_emulate( { bool zf; - asm ( "bsf %2,%0" ASM_FLAG_OUT(, "; setz %1") - : "=r" (dst.val), ASM_FLAG_OUT("=@ccz", "=qm") (zf) - : "rm" (src.val) ); + asm("bsf %2,%0" ASM_FLAG_OUT(, "; setz %1") + : "=r"(dst.val), ASM_FLAG_OUT("=@ccz", "=qm")(zf) + : "rm"(src.val)); _regs.eflags &= ~X86_EFLAGS_ZF; if ( (vex.pfx == vex_f3) && vcpu_has_bmi1() ) { @@ -7621,9 +8046,9 @@ x86_emulate( { bool zf; - asm ( "bsr %2,%0" ASM_FLAG_OUT(, "; setz %1") - : "=r" (dst.val), ASM_FLAG_OUT("=@ccz", "=qm") (zf) - : "rm" (src.val) ); + asm("bsr %2,%0" ASM_FLAG_OUT(, "; setz %1") + : "=r"(dst.val), ASM_FLAG_OUT("=@ccz", "=qm")(zf) + : "rm"(src.val)); _regs.eflags &= ~X86_EFLAGS_ZF; if ( (vex.pfx == vex_f3) && vcpu_has_lzcnt() ) { @@ -7650,36 +8075,49 @@ x86_emulate( case X86EMUL_OPC(0x0f, 0xbe): /* movsx rm8,r{16,32,64} */ /* Recompute DstReg as we may have decoded AH/BH/CH/DH. */ - dst.reg = decode_gpr(&_regs, modrm_reg); + dst.reg = decode_gpr(&_regs, modrm_reg); dst.bytes = op_bytes; - dst.val = (int8_t)src.val; + dst.val = (int8_t)src.val; break; case X86EMUL_OPC(0x0f, 0xbf): /* movsx rm16,r{16,32,64} */ dst.val = (int16_t)src.val; break; - case X86EMUL_OPC(0x0f, 0xc0): case X86EMUL_OPC(0x0f, 0xc1): /* xadd */ + case X86EMUL_OPC(0x0f, 0xc0): + case X86EMUL_OPC(0x0f, 0xc1): /* xadd */ if ( ops->rmw && dst.type == OP_MEM ) { state->rmw = rmw_xadd; break; } /* Write back the register source. */ - switch ( dst.bytes ) + switch (dst.bytes) { - case 1: *(uint8_t *)src.reg = (uint8_t)dst.val; break; - case 2: *(uint16_t *)src.reg = (uint16_t)dst.val; break; - case 4: *src.reg = (uint32_t)dst.val; break; /* 64b reg: zero-extend */ - case 8: *src.reg = dst.val; break; + case 1: + *(uint8_t *)src.reg = (uint8_t)dst.val; + break; + case 2: + *(uint16_t *)src.reg = (uint16_t)dst.val; + break; + case 4: + *src.reg = (uint32_t)dst.val; + break; /* 64b reg: zero-extend */ + case 8: + *src.reg = dst.val; + break; } goto add; - CASE_SIMD_ALL_FP(, 0x0f, 0xc2): /* cmp{p,s}{s,d} $imm8,xmm/mem,xmm */ - CASE_SIMD_ALL_FP(_VEX, 0x0f, 0xc2): /* vcmp{p,s}{s,d} $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - CASE_SIMD_PACKED_FP(, 0x0f, 0xc6): /* shufp{s,d} $imm8,xmm/mem,xmm */ - CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0xc6): /* vshufp{s,d} $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - d = (d & ~SrcMask) | SrcMem; + CASE_SIMD_ALL_FP(, 0x0f, 0xc2) + : /* cmp{p,s}{s,d} $imm8,xmm/mem,xmm */ + CASE_SIMD_ALL_FP(_VEX, 0x0f, 0xc2) + : /* vcmp{p,s}{s,d} $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + CASE_SIMD_PACKED_FP(, 0x0f, 0xc6) + : /* shufp{s,d} $imm8,xmm/mem,xmm */ + CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0xc6) + : /* vshufp{s,d} $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + d = (d & ~SrcMask) | SrcMem; if ( vex.opcx == vex_none ) { if ( vex.pfx & VEX_PREFIX_DOUBLE_MASK ) @@ -7690,12 +8128,14 @@ x86_emulate( } goto simd_0f_imm8_avx; - CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0xc2): /* vcmp{p,s}{s,d} $imm8,[xyz]mm/mem,[xyz]mm,k{k} */ - generate_exception_if((evex.w != (evex.pfx & VEX_PREFIX_DOUBLE_MASK) || - (ea.type != OP_REG && evex.brs && - (evex.pfx & VEX_PREFIX_SCALAR_MASK)) || - !evex.r || !evex.R || evex.z), - EXC_UD); + CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0xc2) + : /* vcmp{p,s}{s,d} $imm8,[xyz]mm/mem,[xyz]mm,k{k} */ + generate_exception_if( + (evex.w != (evex.pfx & VEX_PREFIX_DOUBLE_MASK) || + (ea.type != OP_REG && evex.brs && + (evex.pfx & VEX_PREFIX_SCALAR_MASK)) || + !evex.r || !evex.R || evex.z), + EXC_UD); host_and_vcpu_must_have(avx512f); if ( ea.type != OP_REG || !evex.brs ) avx512_vlen_check(evex.pfx & VEX_PREFIX_SCALAR_MASK); @@ -7723,16 +8163,20 @@ x86_emulate( sfence = true; break; - CASE_SIMD_PACKED_INT(0x0f, 0xc4): /* pinsrw $imm8,r32/m16,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xc4): /* vpinsrw $imm8,r32/m16,xmm,xmm */ - generate_exception_if(vex.l, EXC_UD); + CASE_SIMD_PACKED_INT(0x0f, 0xc4) + : /* pinsrw $imm8,r32/m16,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xc4) + : /* vpinsrw $imm8,r32/m16,xmm,xmm */ + generate_exception_if(vex.l, EXC_UD); memcpy(mmvalp, &src.val, 2); ea.type = OP_MEM; goto simd_0f_int_imm8; - CASE_SIMD_PACKED_INT(0x0f, 0xc5): /* pextrw $imm8,{,x}mm,reg */ - case X86EMUL_OPC_VEX_66(0x0f, 0xc5): /* vpextrw $imm8,xmm,reg */ - generate_exception_if(vex.l, EXC_UD); + CASE_SIMD_PACKED_INT(0x0f, 0xc5) + : /* pextrw $imm8,{,x}mm,reg */ + case X86EMUL_OPC_VEX_66(0x0f, 0xc5) + : /* vpextrw $imm8,xmm,reg */ + generate_exception_if(vex.l, EXC_UD); opc = init_prefixes(stub); opc[0] = b; /* Convert GPR destination to %rAX. */ @@ -7745,12 +8189,14 @@ x86_emulate( insn_bytes = PFX_BYTES + 3; goto simd_0f_to_gpr; - CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0xc6): /* vshufp{s,d} $imm8,[xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - fault_suppression = false; + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0xc6) + : /* vshufp{s,d} $imm8,[xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + fault_suppression = false; generate_exception_if(evex.w != (evex.pfx & VEX_PREFIX_DOUBLE_MASK), EXC_UD); /* fall through */ - case X86EMUL_OPC_EVEX_66(0x0f3a, 0x25): /* vpternlog{d,q} $imm8,[xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f3a, 0x25): /* vpternlog{d,q} $imm8,[xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ avx512f_imm8_no_sae: host_and_vcpu_must_have(avx512f); generate_exception_if(ea.type != OP_MEM && evex.brs, EXC_UD); @@ -7762,13 +8208,13 @@ x86_emulate( union { uint32_t u32[2]; uint64_t u64[2]; - } *old, *aux; + } * old, *aux; if ( ea.type == OP_REG ) { bool __maybe_unused carry; - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { default: goto unrecognized_insn; @@ -7778,21 +8224,21 @@ x86_emulate( generate_exception_if(rep_prefix(), EXC_UD); host_and_vcpu_must_have(rdrand); dst = ea; - switch ( op_bytes ) + switch (op_bytes) { case 2: - asm ( "rdrand %w0" ASM_FLAG_OUT(, "; setc %1") - : "=r" (dst.val), ASM_FLAG_OUT("=@ccc", "=qm") (carry) ); + asm("rdrand %w0" ASM_FLAG_OUT(, "; setc %1") + : "=r"(dst.val), ASM_FLAG_OUT("=@ccc", "=qm")(carry)); break; default: -# ifdef __x86_64__ - asm ( "rdrand %k0" ASM_FLAG_OUT(, "; setc %1") - : "=r" (dst.val), ASM_FLAG_OUT("=@ccc", "=qm") (carry) ); +#ifdef __x86_64__ + asm("rdrand %k0" ASM_FLAG_OUT(, "; setc %1") + : "=r"(dst.val), ASM_FLAG_OUT("=@ccc", "=qm")(carry)); break; case 8: -# endif - asm ( "rdrand %0" ASM_FLAG_OUT(, "; setc %1") - : "=r" (dst.val), ASM_FLAG_OUT("=@ccc", "=qm") (carry) ); +#endif + asm("rdrand %0" ASM_FLAG_OUT(, "; setc %1") + : "=r"(dst.val), ASM_FLAG_OUT("=@ccc", "=qm")(carry)); break; } _regs.eflags &= ~EFLAGS_MASK; @@ -7803,14 +8249,14 @@ x86_emulate( goto unimplemented_insn; #endif - case 7: /* rdseed / rdpid */ + case 7: /* rdseed / rdpid */ if ( repe_prefix() ) /* rdpid */ { generate_exception_if(ea.type != OP_REG, EXC_UD); vcpu_must_have(rdpid); fail_if(!ops->read_msr); - if ( (rc = ops->read_msr(MSR_TSC_AUX, &msr_val, - ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read_msr(MSR_TSC_AUX, &msr_val, ctxt)) != + X86EMUL_OKAY ) goto done; dst = ea; dst.val = msr_val; @@ -7821,21 +8267,21 @@ x86_emulate( generate_exception_if(rep_prefix(), EXC_UD); host_and_vcpu_must_have(rdseed); dst = ea; - switch ( op_bytes ) + switch (op_bytes) { case 2: - asm ( "rdseed %w0" ASM_FLAG_OUT(, "; setc %1") - : "=r" (dst.val), ASM_FLAG_OUT("=@ccc", "=qm") (carry) ); + asm("rdseed %w0" ASM_FLAG_OUT(, "; setc %1") + : "=r"(dst.val), ASM_FLAG_OUT("=@ccc", "=qm")(carry)); break; default: -# ifdef __x86_64__ - asm ( "rdseed %k0" ASM_FLAG_OUT(, "; setc %1") - : "=r" (dst.val), ASM_FLAG_OUT("=@ccc", "=qm") (carry) ); +#ifdef __x86_64__ + asm("rdseed %k0" ASM_FLAG_OUT(, "; setc %1") + : "=r"(dst.val), ASM_FLAG_OUT("=@ccc", "=qm")(carry)); break; case 8: -# endif - asm ( "rdseed %0" ASM_FLAG_OUT(, "; setc %1") - : "=r" (dst.val), ASM_FLAG_OUT("=@ccc", "=qm") (carry) ); +#endif + asm("rdseed %0" ASM_FLAG_OUT(, "; setc %1") + : "=r"(dst.val), ASM_FLAG_OUT("=@ccc", "=qm")(carry)); break; } _regs.eflags &= ~EFLAGS_MASK; @@ -7853,9 +8299,8 @@ x86_emulate( if ( rex_prefix & REX_W ) { host_and_vcpu_must_have(cx16); - generate_exception_if(!is_aligned(ea.mem.seg, ea.mem.off, 16, - ctxt, ops), - EXC_GP, 0); + generate_exception_if( + !is_aligned(ea.mem.seg, ea.mem.off, 16, ctxt, ops), EXC_GP, 0); op_bytes = 16; } else @@ -7868,8 +8313,8 @@ x86_emulate( aux = container_of(&mmvalp->ymm[2], typeof(*aux), u64[0]); /* Get actual old value. */ - if ( (rc = ops->read(ea.mem.seg, ea.mem.off, old, op_bytes, - ctxt)) != X86EMUL_OKAY ) + if ( (rc = ops->read(ea.mem.seg, ea.mem.off, old, op_bytes, ctxt)) != + X86EMUL_OKAY ) goto done; /* Get expected value. */ @@ -7909,8 +8354,8 @@ x86_emulate( aux->u64[1] = _regs.r(cx); } - switch ( rc = ops->cmpxchg(ea.mem.seg, ea.mem.off, old, aux, - op_bytes, lock_prefix, ctxt) ) + switch (rc = ops->cmpxchg(ea.mem.seg, ea.mem.off, old, aux, + op_bytes, lock_prefix, ctxt)) { case X86EMUL_OKAY: _regs.eflags |= X86_EFLAGS_ZF; @@ -7927,10 +8372,10 @@ x86_emulate( break; } - case X86EMUL_OPC(0x0f, 0xc8) ... X86EMUL_OPC(0x0f, 0xcf): /* bswap */ + case X86EMUL_OPC(0x0f, 0xc8)... X86EMUL_OPC(0x0f, 0xcf): /* bswap */ dst.type = OP_REG; - dst.reg = decode_gpr(&_regs, (b & 7) | ((rex_prefix & 1) << 3)); - switch ( dst.bytes = op_bytes ) + dst.reg = decode_gpr(&_regs, (b & 7) | ((rex_prefix & 1) << 3)); + switch (dst.bytes = op_bytes) { default: /* case 2: */ /* Undefined behaviour. Writes zero on all tested CPUs. */ @@ -7938,82 +8383,97 @@ x86_emulate( break; case 4: #ifdef __x86_64__ - asm ( "bswap %k0" : "=r" (dst.val) : "0" (*(uint32_t *)dst.reg) ); + asm("bswap %k0" : "=r"(dst.val) : "0"(*(uint32_t *)dst.reg)); break; case 8: #endif - asm ( "bswap %0" : "=r" (dst.val) : "0" (*dst.reg) ); + asm("bswap %0" : "=r"(dst.val) : "0"(*dst.reg)); break; } break; - case X86EMUL_OPC_EVEX_66(0x0f, 0xd2): /* vpsrld xmm/m128,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xd3): /* vpsrlq xmm/m128,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xe2): /* vpsra{d,q} xmm/m128,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xf2): /* vpslld xmm/m128,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xf3): /* vpsllq xmm/m128,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xd2): /* vpsrld xmm/m128,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xd3): /* vpsrlq xmm/m128,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xe2): /* vpsra{d,q} xmm/m128,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xf2): /* vpslld xmm/m128,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xf3): /* vpsllq xmm/m128,[xyz]mm,[xyz]mm{k} */ generate_exception_if(evex.brs, EXC_UD); fault_suppression = false; if ( b == 0xe2 ) goto avx512f_no_sae; /* fall through */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xfa): /* vpsubd [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xfb): /* vpsubq [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xfe): /* vpaddd [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xfa): /* vpsubd [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xfb): /* vpsubq [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xfe): /* vpaddd [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ generate_exception_if(evex.w != (b & 1), EXC_UD); goto avx512f_no_sae; - case X86EMUL_OPC(0x0f, 0xd4): /* paddq mm/m64,mm */ - case X86EMUL_OPC(0x0f, 0xf4): /* pmuludq mm/m64,mm */ - case X86EMUL_OPC(0x0f, 0xfb): /* psubq mm/m64,mm */ + case X86EMUL_OPC(0x0f, 0xd4): /* paddq mm/m64,mm */ + case X86EMUL_OPC(0x0f, 0xf4): /* pmuludq mm/m64,mm */ + case X86EMUL_OPC(0x0f, 0xfb): /* psubq mm/m64,mm */ vcpu_must_have(sse2); goto simd_0f_mmx; - case X86EMUL_OPC_F3(0x0f, 0xd6): /* movq2dq mm,xmm */ - case X86EMUL_OPC_F2(0x0f, 0xd6): /* movdq2q xmm,mm */ + case X86EMUL_OPC_F3(0x0f, 0xd6): /* movq2dq mm,xmm */ + case X86EMUL_OPC_F2(0x0f, 0xd6): /* movdq2q xmm,mm */ generate_exception_if(ea.type != OP_REG, EXC_UD); op_bytes = 8; host_and_vcpu_must_have(mmx); goto simd_0f_int; - case X86EMUL_OPC(0x0f, 0xe7): /* movntq mm,m64 */ + case X86EMUL_OPC(0x0f, 0xe7): /* movntq mm,m64 */ generate_exception_if(ea.type != OP_MEM, EXC_UD); sfence = true; /* fall through */ - case X86EMUL_OPC(0x0f, 0xda): /* pminub mm/m64,mm */ - case X86EMUL_OPC(0x0f, 0xde): /* pmaxub mm/m64,mm */ - case X86EMUL_OPC(0x0f, 0xea): /* pminsw mm/m64,mm */ - case X86EMUL_OPC(0x0f, 0xee): /* pmaxsw mm/m64,mm */ - case X86EMUL_OPC(0x0f, 0xe0): /* pavgb mm/m64,mm */ - case X86EMUL_OPC(0x0f, 0xe3): /* pavgw mm/m64,mm */ - case X86EMUL_OPC(0x0f, 0xe4): /* pmulhuw mm/m64,mm */ - case X86EMUL_OPC(0x0f, 0xf6): /* psadbw mm/m64,mm */ + case X86EMUL_OPC(0x0f, 0xda): /* pminub mm/m64,mm */ + case X86EMUL_OPC(0x0f, 0xde): /* pmaxub mm/m64,mm */ + case X86EMUL_OPC(0x0f, 0xea): /* pminsw mm/m64,mm */ + case X86EMUL_OPC(0x0f, 0xee): /* pmaxsw mm/m64,mm */ + case X86EMUL_OPC(0x0f, 0xe0): /* pavgb mm/m64,mm */ + case X86EMUL_OPC(0x0f, 0xe3): /* pavgw mm/m64,mm */ + case X86EMUL_OPC(0x0f, 0xe4): /* pmulhuw mm/m64,mm */ + case X86EMUL_OPC(0x0f, 0xf6): /* psadbw mm/m64,mm */ vcpu_must_have(mmxext); goto simd_0f_mmx; - case X86EMUL_OPC_EVEX_66(0x0f, 0xda): /* vpminub [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xde): /* vpmaxub [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xe4): /* vpmulhuw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xea): /* vpminsw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f, 0xee): /* vpmaxsw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xda): /* vpminub [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xde): /* vpmaxub [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f, 0xe4): /* vpmulhuw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xea): /* vpminsw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f, + 0xee): /* vpmaxsw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ host_and_vcpu_must_have(avx512bw); generate_exception_if(evex.brs, EXC_UD); elem_bytes = b & 0x10 ? 1 : 2; goto avx512f_no_sae; - case X86EMUL_OPC_66(0x0f, 0xe6): /* cvttpd2dq xmm/mem,xmm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xe6): /* vcvttpd2dq {x,y}mm/mem,xmm */ - case X86EMUL_OPC_F3(0x0f, 0xe6): /* cvtdq2pd xmm/mem,xmm */ - case X86EMUL_OPC_VEX_F3(0x0f, 0xe6): /* vcvtdq2pd xmm/mem,{x,y}mm */ - case X86EMUL_OPC_F2(0x0f, 0xe6): /* cvtpd2dq xmm/mem,xmm */ - case X86EMUL_OPC_VEX_F2(0x0f, 0xe6): /* vcvtpd2dq {x,y}mm/mem,xmm */ + case X86EMUL_OPC_66(0x0f, 0xe6): /* cvttpd2dq xmm/mem,xmm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xe6): /* vcvttpd2dq {x,y}mm/mem,xmm */ + case X86EMUL_OPC_F3(0x0f, 0xe6): /* cvtdq2pd xmm/mem,xmm */ + case X86EMUL_OPC_VEX_F3(0x0f, 0xe6): /* vcvtdq2pd xmm/mem,{x,y}mm */ + case X86EMUL_OPC_F2(0x0f, 0xe6): /* cvtpd2dq xmm/mem,xmm */ + case X86EMUL_OPC_VEX_F2(0x0f, 0xe6): /* vcvtpd2dq {x,y}mm/mem,xmm */ d |= TwoOp; op_bytes = 8 << (!!(vex.pfx & VEX_PREFIX_DOUBLE_MASK) + vex.l); goto simd_0f_cvt; - CASE_SIMD_PACKED_INT(0x0f, 0xf7): /* maskmov{q,dqu} {,x}mm,{,x}mm */ - case X86EMUL_OPC_VEX_66(0x0f, 0xf7): /* vmaskmovdqu xmm,xmm */ - generate_exception_if(ea.type != OP_REG, EXC_UD); + CASE_SIMD_PACKED_INT(0x0f, 0xf7) + : /* maskmov{q,dqu} {,x}mm,{,x}mm */ + case X86EMUL_OPC_VEX_66(0x0f, 0xf7) + : /* vmaskmovdqu xmm,xmm */ + generate_exception_if(ea.type != OP_REG, EXC_UD); if ( vex.opcx != vex_none ) { generate_exception_if(vex.l || vex.reg != 0xf, EXC_UD); @@ -8053,7 +8513,7 @@ x86_emulate( opc[2] = 0xc3; copy_REX_VEX(opc, rex_prefix, vex); - invoke_stub("", "", "=a" (ea.val) : [dummy] "i" (0)); + invoke_stub("", "", "=a"(ea.val) : [dummy] "i"(0)); put_stub(stub); if ( !ea.val ) @@ -8108,7 +8568,7 @@ x86_emulate( host_and_vcpu_must_have(ssse3); if ( vex.pfx ) { - simd_0f38_common: + simd_0f38_common: get_fpu(X86EMUL_FPU_xmm); } else @@ -8141,8 +8601,10 @@ x86_emulate( host_and_vcpu_must_have(avx2); } /* fall through */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x0c): /* vpermilps {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x0d): /* vpermilpd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x0c): /* vpermilps {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x0d): /* vpermilpd {x,y}mm/mem,{x,y}mm,{x,y}mm */ generate_exception_if(vex.w, EXC_UD); goto simd_0f_avx; @@ -8190,22 +8652,26 @@ x86_emulate( } copy_REX_VEX(opc, rex_prefix, vex); - emulate_stub("+m" (*mmvalp), "a" (mmvalp)); + emulate_stub("+m"(*mmvalp), "a"(mmvalp)); put_stub(stub); state->simd_size = simd_none; dst.type = OP_NONE; break; - case X86EMUL_OPC_EVEX_66(0x0f38, 0x10): /* vpsrlvw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x11): /* vpsravw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x12): /* vpsllvw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x10): /* vpsrlvw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x11): /* vpsravw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x12): /* vpsllvw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ host_and_vcpu_must_have(avx512bw); generate_exception_if(!evex.w || evex.brs, EXC_UD); elem_bytes = 2; goto avx512f_no_sae; - case X86EMUL_OPC_EVEX_66(0x0f38, 0x18): /* vbroadcastss xmm/m32,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x18): /* vbroadcastss xmm/m32,[xyz]mm{k} */ generate_exception_if(evex.w || evex.brs, EXC_UD); avx512_broadcast: /* @@ -8227,15 +8693,17 @@ x86_emulate( /* vbroadcastf64x4 m256,zmm{k} */ generate_exception_if(ea.type != OP_MEM || evex.lr != 2, EXC_UD); /* fall through */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x19): /* vbroadcastsd xmm/m64,{y,z}mm{k} */ - /* vbroadcastf32x2 xmm/m64,{y,z}mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x19): /* vbroadcastsd xmm/m64,{y,z}mm{k} */ + /* vbroadcastf32x2 xmm/m64,{y,z}mm{k} */ generate_exception_if(!evex.lr || evex.brs, EXC_UD); if ( !evex.w ) host_and_vcpu_must_have(avx512dq); goto avx512_broadcast; - case X86EMUL_OPC_EVEX_66(0x0f38, 0x1a): /* vbroadcastf32x4 m128,{y,z}mm{k} */ - /* vbroadcastf64x2 m128,{y,z}mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x1a): /* vbroadcastf32x4 m128,{y,z}mm{k} */ + /* vbroadcastf64x2 m128,{y,z}mm{k} */ generate_exception_if(ea.type != OP_MEM || !evex.lr || evex.brs, EXC_UD); if ( evex.w ) @@ -8305,7 +8773,7 @@ x86_emulate( case X86EMUL_OPC_VEX_66(0x0f38, 0x2a): /* vmovntdqa mem,{x,y}mm */ generate_exception_if(ea.type != OP_MEM, EXC_UD); /* Ignore the non-temporal hint for now, using movdqa instead. */ - asm volatile ( "mfence" ::: "memory" ); + asm volatile("mfence" ::: "memory"); b = 0x6f; if ( vex.opcx == vex_none ) vcpu_must_have(sse4_1); @@ -8321,7 +8789,7 @@ x86_emulate( generate_exception_if(ea.type != OP_MEM || evex.opmsk || evex.w, EXC_UD); /* Ignore the non-temporal hint for now, using vmovdqa32 instead. */ - asm volatile ( "mfence" ::: "memory" ); + asm volatile("mfence" ::: "memory"); b = 0x6f; evex.opcx = vex_0f; goto vmovdqa; @@ -8361,7 +8829,7 @@ x86_emulate( pvex->reg = 0xf; opc[2] = 0xc3; - invoke_stub("", "", "=a" (ea.val) : [dummy] "i" (0)); + invoke_stub("", "", "=a"(ea.val) : [dummy] "i"(0)); put_stub(stub); evex.opmsk = 1; /* fake */ @@ -8383,16 +8851,21 @@ x86_emulate( host_and_vcpu_must_have(sse4_2); goto simd_0f38_common; - case X86EMUL_OPC_EVEX_66(0x0f38, 0x38): /* vpminsb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x3a): /* vpminuw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x3c): /* vpmaxsb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x3e): /* vpmaxuw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x38): /* vpminsb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x3a): /* vpminuw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x3c): /* vpmaxsb [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x3e): /* vpmaxuw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ host_and_vcpu_must_have(avx512bw); generate_exception_if(evex.brs, EXC_UD); elem_bytes = b & 2 ?: 1; goto avx512f_no_sae; - case X86EMUL_OPC_EVEX_66(0x0f38, 0x40): /* vpmull{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x40): /* vpmull{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ if ( evex.w ) host_and_vcpu_must_have(avx512dq); goto avx512f_no_sae; @@ -8421,7 +8894,8 @@ x86_emulate( case X86EMUL_OPC_VEX_66(0x0f38, 0x79): /* vpbroadcastw xmm/m16,{x,y}mm */ op_bytes = 1 << ((!(b & 0x20) * 2) + (b & 1)); /* fall through */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x46): /* vpsravd {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x46): /* vpsravd {x,y}mm/mem,{x,y}mm,{x,y}mm */ generate_exception_if(vex.w, EXC_UD); goto simd_0f_avx2; @@ -8429,17 +8903,23 @@ x86_emulate( generate_exception_if(ea.type != OP_MEM || !vex.l || vex.w, EXC_UD); goto simd_0f_avx2; - case X86EMUL_OPC_VEX_66(0x0f38, 0x8c): /* vpmaskmov{d,q} mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x8e): /* vpmaskmov{d,q} {x,y}mm,{x,y}mm,mem */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x8c): /* vpmaskmov{d,q} mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x8e): /* vpmaskmov{d,q} {x,y}mm,{x,y}mm,mem */ generate_exception_if(ea.type != OP_MEM, EXC_UD); host_and_vcpu_must_have(avx2); elem_bytes = 4 << vex.w; goto vmaskmov; - case X86EMUL_OPC_VEX_66(0x0f38, 0x90): /* vpgatherd{d,q} {x,y}mm,mem,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x91): /* vpgatherq{d,q} {x,y}mm,mem,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x92): /* vgatherdp{s,d} {x,y}mm,mem,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x93): /* vgatherqp{s,d} {x,y}mm,mem,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x90): /* vpgatherd{d,q} {x,y}mm,mem,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x91): /* vpgatherq{d,q} {x,y}mm,mem,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x92): /* vgatherdp{s,d} {x,y}mm,mem,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x93): /* vgatherqp{s,d} {x,y}mm,mem,{x,y}mm */ { unsigned int mask_reg = ~vex.reg & (mode_64bit() ? 0xf : 7); typeof(vex) *pvex; @@ -8451,8 +8931,9 @@ x86_emulate( ASSERT(ea.type == OP_MEM); generate_exception_if(modrm_reg == state->sib_index || - modrm_reg == mask_reg || - state->sib_index == mask_reg, EXC_UD); + modrm_reg == mask_reg || + state->sib_index == mask_reg, + EXC_UD); generate_exception_if(!cpu_has_avx, EXC_UD); vcpu_must_have(avx2); get_fpu(X86EMUL_FPU_ymm); @@ -8469,20 +8950,20 @@ x86_emulate( pvex->reg = 0xf; opc[2] = 0xc3; - invoke_stub("", "", "=m" (*mmvalp) : "a" (mmvalp)); + invoke_stub("", "", "=m"(*mmvalp) : "a"(mmvalp)); pvex->pfx = vex_f3; /* vmovdqu */ /* Switch to sib_index as source. */ pvex->r = !mode_64bit() || !(state->sib_index & 8); opc[1] = (state->sib_index & 7) << 3; - invoke_stub("", "", "=m" (index) : "a" (&index)); + invoke_stub("", "", "=m"(index) : "a"(&index)); /* Switch to mask_reg as source. */ pvex->r = !mode_64bit() || !(mask_reg & 8); opc[1] = (mask_reg & 7) << 3; - invoke_stub("", "", "=m" (mask) : "a" (&mask)); + invoke_stub("", "", "=m"(mask) : "a"(&mask)); put_stub(stub); /* Clear untouched parts of the destination and mask values. */ @@ -8497,10 +8978,10 @@ x86_emulate( { signed long idx = b & 1 ? index.qw[i] : index.dw[i]; - rc = ops->read(ea.mem.seg, - truncate_ea(ea.mem.off + - (idx << state->sib_scale)), - (void *)mmvalp + i * op_bytes, op_bytes, ctxt); + rc = ops->read( + ea.mem.seg, + truncate_ea(ea.mem.off + (idx << state->sib_scale)), + (void *)mmvalp + i * op_bytes, op_bytes, ctxt); if ( rc != X86EMUL_OKAY ) { /* @@ -8539,100 +9020,154 @@ x86_emulate( pvex->reg = 0xf; opc[2] = 0xc3; - invoke_stub("", "", "+m" (*mmvalp) : "a" (mmvalp)); + invoke_stub("", "", "+m"(*mmvalp) : "a"(mmvalp)); pvex->pfx = vex_f3; /* vmovdqu */ /* Switch to mask_reg as destination. */ pvex->r = !mode_64bit() || !(mask_reg & 8); opc[1] = (mask_reg & 7) << 3; - invoke_stub("", "", "+m" (mask) : "a" (&mask)); + invoke_stub("", "", "+m"(mask) : "a"(&mask)); put_stub(stub); state->simd_size = simd_none; break; } - case X86EMUL_OPC_VEX_66(0x0f38, 0x96): /* vfmaddsub132p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x97): /* vfmsubadd132p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x98): /* vfmadd132p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x96): /* vfmaddsub132p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x97): /* vfmsubadd132p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x98): /* vfmadd132p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ case X86EMUL_OPC_VEX_66(0x0f38, 0x99): /* vfmadd132s{s,d} xmm/mem,xmm,xmm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x9a): /* vfmsub132p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x9a): /* vfmsub132p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ case X86EMUL_OPC_VEX_66(0x0f38, 0x9b): /* vfmsub132s{s,d} xmm/mem,xmm,xmm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x9c): /* vfnmadd132p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x9d): /* vfnmadd132s{s,d} xmm/mem,xmm,xmm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x9e): /* vfnmsub132p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0x9f): /* vfnmsub132s{s,d} xmm/mem,xmm,xmm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xa6): /* vfmaddsub213p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xa7): /* vfmsubadd213p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xa8): /* vfmadd213p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x9c): /* vfnmadd132p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x9d): /* vfnmadd132s{s,d} xmm/mem,xmm,xmm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0x9e): /* vfnmsub132p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0x9f): /* vfnmsub132s{s,d} xmm/mem,xmm,xmm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0xa6): /* vfmaddsub213p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0xa7): /* vfmsubadd213p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0xa8): /* vfmadd213p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ case X86EMUL_OPC_VEX_66(0x0f38, 0xa9): /* vfmadd213s{s,d} xmm/mem,xmm,xmm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xaa): /* vfmsub213p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0xaa): /* vfmsub213p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ case X86EMUL_OPC_VEX_66(0x0f38, 0xab): /* vfmsub213s{s,d} xmm/mem,xmm,xmm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xac): /* vfnmadd213p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xad): /* vfnmadd213s{s,d} xmm/mem,xmm,xmm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xae): /* vfnmsub213p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xaf): /* vfnmsub213s{s,d} xmm/mem,xmm,xmm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xb6): /* vfmaddsub231p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xb7): /* vfmsubadd231p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xb8): /* vfmadd231p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0xac): /* vfnmadd213p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0xad): /* vfnmadd213s{s,d} xmm/mem,xmm,xmm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0xae): /* vfnmsub213p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0xaf): /* vfnmsub213s{s,d} xmm/mem,xmm,xmm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0xb6): /* vfmaddsub231p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0xb7): /* vfmsubadd231p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0xb8): /* vfmadd231p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ case X86EMUL_OPC_VEX_66(0x0f38, 0xb9): /* vfmadd231s{s,d} xmm/mem,xmm,xmm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xba): /* vfmsub231p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0xba): /* vfmsub231p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ case X86EMUL_OPC_VEX_66(0x0f38, 0xbb): /* vfmsub231s{s,d} xmm/mem,xmm,xmm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xbc): /* vfnmadd231p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xbd): /* vfnmadd231s{s,d} xmm/mem,xmm,xmm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xbe): /* vfnmsub231p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f38, 0xbf): /* vfnmsub231s{s,d} xmm/mem,xmm,xmm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0xbc): /* vfnmadd231p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0xbd): /* vfnmadd231s{s,d} xmm/mem,xmm,xmm */ + case X86EMUL_OPC_VEX_66( + 0x0f38, 0xbe): /* vfnmsub231p{s,d} {x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f38, + 0xbf): /* vfnmsub231s{s,d} xmm/mem,xmm,xmm */ host_and_vcpu_must_have(fma); goto simd_0f_ymm; - case X86EMUL_OPC_EVEX_66(0x0f38, 0x96): /* vfmaddsub132p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x97): /* vfmsubadd132p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x98): /* vfmadd132p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x9a): /* vfmsub132p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x9c): /* vfnmadd132p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x9e): /* vfnmsub132p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xa6): /* vfmaddsub213p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xa7): /* vfmsubadd213p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xa8): /* vfmadd213p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xaa): /* vfmsub213p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xac): /* vfnmadd213p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xae): /* vfnmsub213p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xb6): /* vfmaddsub231p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xb7): /* vfmsubadd231p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xb8): /* vfmadd231p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xba): /* vfmsub231p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xbc): /* vfnmadd231p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xbe): /* vfnmsub231p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x96): /* vfmaddsub132p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x97): /* vfmsubadd132p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x98): /* vfmadd132p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x9a): /* vfmsub132p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x9c): /* vfnmadd132p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0x9e): /* vfnmsub132p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0xa6): /* vfmaddsub213p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0xa7): /* vfmsubadd213p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0xa8): /* vfmadd213p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0xaa): /* vfmsub213p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0xac): /* vfnmadd213p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0xae): /* vfnmsub213p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0xb6): /* vfmaddsub231p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0xb7): /* vfmsubadd231p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0xb8): /* vfmadd231p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0xba): /* vfmsub231p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0xbc): /* vfnmadd231p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f38, 0xbe): /* vfnmsub231p{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ host_and_vcpu_must_have(avx512f); if ( ea.type != OP_REG || !evex.brs ) avx512_vlen_check(false); goto simd_zmm; - case X86EMUL_OPC_EVEX_66(0x0f38, 0x99): /* vfmadd132s{s,d} xmm/mem,xmm,xmm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x9b): /* vfmsub132s{s,d} xmm/mem,xmm,xmm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x9d): /* vfnmadd132s{s,d} xmm/mem,xmm,xmm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0x9f): /* vfnmsub132s{s,d} xmm/mem,xmm,xmm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xa9): /* vfmadd213s{s,d} xmm/mem,xmm,xmm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xab): /* vfmsub213s{s,d} xmm/mem,xmm,xmm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xad): /* vfnmadd213s{s,d} xmm/mem,xmm,xmm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xaf): /* vfnmsub213s{s,d} xmm/mem,xmm,xmm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xb9): /* vfmadd231s{s,d} xmm/mem,xmm,xmm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xbb): /* vfmsub231s{s,d} xmm/mem,xmm,xmm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xbd): /* vfnmadd231s{s,d} xmm/mem,xmm,xmm{k} */ - case X86EMUL_OPC_EVEX_66(0x0f38, 0xbf): /* vfnmsub231s{s,d} xmm/mem,xmm,xmm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x99): /* vfmadd132s{s,d} xmm/mem,xmm,xmm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x9b): /* vfmsub132s{s,d} xmm/mem,xmm,xmm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x9d): /* vfnmadd132s{s,d} xmm/mem,xmm,xmm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0x9f): /* vfnmsub132s{s,d} xmm/mem,xmm,xmm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0xa9): /* vfmadd213s{s,d} xmm/mem,xmm,xmm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0xab): /* vfmsub213s{s,d} xmm/mem,xmm,xmm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0xad): /* vfnmadd213s{s,d} xmm/mem,xmm,xmm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0xaf): /* vfnmsub213s{s,d} xmm/mem,xmm,xmm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0xb9): /* vfmadd231s{s,d} xmm/mem,xmm,xmm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0xbb): /* vfmsub231s{s,d} xmm/mem,xmm,xmm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0xbd): /* vfnmadd231s{s,d} xmm/mem,xmm,xmm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, + 0xbf): /* vfnmsub231s{s,d} xmm/mem,xmm,xmm{k} */ host_and_vcpu_must_have(avx512f); generate_exception_if(ea.type != OP_REG && evex.brs, EXC_UD); if ( !evex.brs ) avx512_vlen_check(true); goto simd_zmm; - case X86EMUL_OPC(0x0f38, 0xc8): /* sha1nexte xmm/m128,xmm */ - case X86EMUL_OPC(0x0f38, 0xc9): /* sha1msg1 xmm/m128,xmm */ - case X86EMUL_OPC(0x0f38, 0xca): /* sha1msg2 xmm/m128,xmm */ - case X86EMUL_OPC(0x0f38, 0xcb): /* sha256rnds2 XMM0,xmm/m128,xmm */ - case X86EMUL_OPC(0x0f38, 0xcc): /* sha256msg1 xmm/m128,xmm */ - case X86EMUL_OPC(0x0f38, 0xcd): /* sha256msg2 xmm/m128,xmm */ + case X86EMUL_OPC(0x0f38, 0xc8): /* sha1nexte xmm/m128,xmm */ + case X86EMUL_OPC(0x0f38, 0xc9): /* sha1msg1 xmm/m128,xmm */ + case X86EMUL_OPC(0x0f38, 0xca): /* sha1msg2 xmm/m128,xmm */ + case X86EMUL_OPC(0x0f38, 0xcb): /* sha256rnds2 XMM0,xmm/m128,xmm */ + case X86EMUL_OPC(0x0f38, 0xcc): /* sha256msg1 xmm/m128,xmm */ + case X86EMUL_OPC(0x0f38, 0xcd): /* sha256msg2 xmm/m128,xmm */ host_and_vcpu_must_have(sha); op_bytes = 16; goto simd_0f38_common; @@ -8640,20 +9175,18 @@ x86_emulate( case X86EMUL_OPC(0x0f38, 0xf0): /* movbe m,r */ case X86EMUL_OPC(0x0f38, 0xf1): /* movbe r,m */ vcpu_must_have(movbe); - switch ( op_bytes ) + switch (op_bytes) { case 2: - asm ( "xchg %h0,%b0" : "=Q" (dst.val) - : "0" (*(uint32_t *)&src.val) ); + asm("xchg %h0,%b0" : "=Q"(dst.val) : "0"(*(uint32_t *)&src.val)); break; case 4: #ifdef __x86_64__ - asm ( "bswap %k0" : "=r" (dst.val) - : "0" (*(uint32_t *)&src.val) ); + asm("bswap %k0" : "=r"(dst.val) : "0"(*(uint32_t *)&src.val)); break; case 8: #endif - asm ( "bswap %0" : "=r" (dst.val) : "0" (src.val) ); + asm("bswap %0" : "=r"(dst.val) : "0"(src.val)); break; default: ASSERT_UNREACHABLE(); @@ -8664,25 +9197,22 @@ x86_emulate( case X86EMUL_OPC_F2(0x0f38, 0xf1): /* crc32 r/m{16,32,64}, r{32,64} */ host_and_vcpu_must_have(sse4_2); dst.bytes = rex_prefix & REX_W ? 8 : 4; - switch ( op_bytes ) + switch (op_bytes) { case 1: - asm ( "crc32b %1,%k0" : "+r" (dst.val) - : "qm" (*(uint8_t *)&src.val) ); + asm("crc32b %1,%k0" : "+r"(dst.val) : "qm"(*(uint8_t *)&src.val)); break; case 2: - asm ( "crc32w %1,%k0" : "+r" (dst.val) - : "rm" (*(uint16_t *)&src.val) ); + asm("crc32w %1,%k0" : "+r"(dst.val) : "rm"(*(uint16_t *)&src.val)); break; case 4: - asm ( "crc32l %1,%k0" : "+r" (dst.val) - : "rm" (*(uint32_t *)&src.val) ); + asm("crc32l %1,%k0" : "+r"(dst.val) : "rm"(*(uint32_t *)&src.val)); break; -# ifdef __x86_64__ +#ifdef __x86_64__ case 8: - asm ( "crc32q %1,%0" : "+r" (dst.val) : "rm" (src.val) ); + asm("crc32q %1,%0" : "+r"(dst.val) : "rm"(src.val)); break; -# endif +#endif default: ASSERT_UNREACHABLE(); } @@ -8717,7 +9247,7 @@ x86_emulate( buf[5] = 0xc3; src.reg = decode_vex_gpr(vex.reg, &_regs, ctxt); - emulate_stub([dst] "=&c" (dst.val), "[dst]" (&src.val), "a" (*src.reg)); + emulate_stub([dst] "=&c"(dst.val), "[dst]"(&src.val), "a"(*src.reg)); put_stub(stub); break; @@ -8728,7 +9258,7 @@ x86_emulate( uint8_t *buf = get_stub(stub); typeof(vex) *pvex = container_of(buf + 1, typeof(vex), raw[0]); - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 1: /* blsr r,r/m */ case 2: /* blsmsk r,r/m */ @@ -8751,7 +9281,7 @@ x86_emulate( buf[5] = 0xc3; dst.reg = decode_vex_gpr(vex.reg, &_regs, ctxt); - emulate_stub("=&a" (dst.val), "c" (&src.val)); + emulate_stub("=&a"(dst.val), "c"(&src.val)); put_stub(stub); break; @@ -8767,22 +9297,18 @@ x86_emulate( vcpu_must_have(adx); #ifdef __x86_64__ if ( op_bytes == 8 ) - asm ( "add %[aux],%[aux]\n\t" - "adc %[src],%[dst]\n\t" - ASM_FLAG_OUT(, "setc %[carry]") - : [dst] "+r" (dst.val), - [carry] ASM_FLAG_OUT("=@ccc", "=qm") (carry), - [aux] "+r" (aux) - : [src] "rm" (src.val) ); + asm("add %[aux],%[aux]\n\t" + "adc %[src],%[dst]\n\t" ASM_FLAG_OUT(, "setc %[carry]") + : [dst] "+r"(dst.val), + [carry] ASM_FLAG_OUT("=@ccc", "=qm")(carry), [aux] "+r"(aux) + : [src] "rm"(src.val)); else #endif - asm ( "add %[aux],%[aux]\n\t" - "adc %k[src],%k[dst]\n\t" - ASM_FLAG_OUT(, "setc %[carry]") - : [dst] "+r" (dst.val), - [carry] ASM_FLAG_OUT("=@ccc", "=qm") (carry), - [aux] "+r" (aux) - : [src] "rm" (src.val) ); + asm("add %[aux],%[aux]\n\t" + "adc %k[src],%k[dst]\n\t" ASM_FLAG_OUT(, "setc %[carry]") + : [dst] "+r"(dst.val), + [carry] ASM_FLAG_OUT("=@ccc", "=qm")(carry), [aux] "+r"(aux) + : [src] "rm"(src.val)); if ( carry ) _regs.eflags |= mask; else @@ -8795,11 +9321,13 @@ x86_emulate( generate_exception_if(vex.l, EXC_UD); ea.reg = decode_vex_gpr(vex.reg, &_regs, ctxt); if ( mode_64bit() && vex.w ) - asm ( "mulq %3" : "=a" (*ea.reg), "=d" (dst.val) - : "0" (src.val), "rm" (_regs.r(dx)) ); + asm("mulq %3" + : "=a"(*ea.reg), "=d"(dst.val) + : "0"(src.val), "rm"(_regs.r(dx))); else - asm ( "mull %3" : "=a" (*ea.reg), "=d" (dst.val) - : "0" ((uint32_t)src.val), "rm" (_regs.edx) ); + asm("mull %3" + : "=a"(*ea.reg), "=d"(dst.val) + : "0"((uint32_t)src.val), "rm"(_regs.edx)); break; case X86EMUL_OPC_VEX_66(0x0f3a, 0x00): /* vpermq $imm8,ymm/m256,ymm */ @@ -8807,22 +9335,29 @@ x86_emulate( generate_exception_if(!vex.l || !vex.w, EXC_UD); goto simd_0f_imm8_avx2; - case X86EMUL_OPC_VEX_66(0x0f3a, 0x38): /* vinserti128 $imm8,xmm/m128,ymm,ymm */ + case X86EMUL_OPC_VEX_66(0x0f3a, + 0x38): /* vinserti128 $imm8,xmm/m128,ymm,ymm */ case X86EMUL_OPC_VEX_66(0x0f3a, 0x39): /* vextracti128 $imm8,ymm,xmm/m128 */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x46): /* vperm2i128 $imm8,ymm/m256,ymm,ymm */ + case X86EMUL_OPC_VEX_66(0x0f3a, + 0x46): /* vperm2i128 $imm8,ymm/m256,ymm,ymm */ generate_exception_if(!vex.l, EXC_UD); /* fall through */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x02): /* vpblendd $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x02): /* vpblendd $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ generate_exception_if(vex.w, EXC_UD); goto simd_0f_imm8_avx2; - case X86EMUL_OPC_VEX_66(0x0f3a, 0x06): /* vperm2f128 $imm8,ymm/m256,ymm,ymm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x18): /* vinsertf128 $imm8,xmm/m128,ymm,ymm */ + case X86EMUL_OPC_VEX_66(0x0f3a, + 0x06): /* vperm2f128 $imm8,ymm/m256,ymm,ymm */ + case X86EMUL_OPC_VEX_66(0x0f3a, + 0x18): /* vinsertf128 $imm8,xmm/m128,ymm,ymm */ case X86EMUL_OPC_VEX_66(0x0f3a, 0x19): /* vextractf128 $imm8,ymm,xmm/m128 */ generate_exception_if(!vex.l, EXC_UD); /* fall through */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x04): /* vpermilps $imm8,{x,y}mm/mem,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x05): /* vpermilpd $imm8,{x,y}mm/mem,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f3a, + 0x04): /* vpermilps $imm8,{x,y}mm/mem,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f3a, + 0x05): /* vpermilpd $imm8,{x,y}mm/mem,{x,y}mm */ generate_exception_if(vex.w, EXC_UD); goto simd_0f_imm8_avx; @@ -8844,7 +9379,7 @@ x86_emulate( host_and_vcpu_must_have(ssse3); if ( vex.pfx ) { - simd_0f3a_common: + simd_0f3a_common: get_fpu(X86EMUL_FPU_xmm); } else @@ -8893,7 +9428,7 @@ x86_emulate( } copy_REX_VEX(opc, rex_prefix, vex); - invoke_stub("", "", "=m" (dst.val) : "a" (&dst.val)); + invoke_stub("", "", "=m"(dst.val) : "a"(&dst.val)); put_stub(stub); ASSERT(!state->simd_size); @@ -8940,8 +9475,8 @@ x86_emulate( copy_VEX(opc, vex); /* Latch MXCSR - we may need to restore it below. */ - invoke_stub("stmxcsr %[mxcsr]", "", - "=m" (*mmvalp), [mxcsr] "=m" (mxcsr) : "a" (mmvalp)); + invoke_stub("stmxcsr %[mxcsr]", "", "=m"(*mmvalp), [mxcsr] "=m"(mxcsr) + : "a"(mmvalp)); put_stub(stub); @@ -8950,7 +9485,7 @@ x86_emulate( rc = ops->write(ea.mem.seg, ea.mem.off, mmvalp, 8 << vex.l, ctxt); if ( rc != X86EMUL_OKAY ) { - asm volatile ( "ldmxcsr %0" :: "m" (mxcsr) ); + asm volatile("ldmxcsr %0" ::"m"(mxcsr)); goto done; } } @@ -8959,10 +9494,14 @@ x86_emulate( break; } - case X86EMUL_OPC_EVEX_66(0x0f3a, 0x1e): /* vpcmpu{d,q} $imm8,[xyz]mm/mem,[xyz]mm,k{k} */ - case X86EMUL_OPC_EVEX_66(0x0f3a, 0x1f): /* vpcmp{d,q} $imm8,[xyz]mm/mem,[xyz]mm,k{k} */ - case X86EMUL_OPC_EVEX_66(0x0f3a, 0x3e): /* vpcmpu{b,w} $imm8,[xyz]mm/mem,[xyz]mm,k{k} */ - case X86EMUL_OPC_EVEX_66(0x0f3a, 0x3f): /* vpcmp{b,w} $imm8,[xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f3a, 0x1e): /* vpcmpu{d,q} $imm8,[xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f3a, 0x1f): /* vpcmp{d,q} $imm8,[xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f3a, 0x3e): /* vpcmpu{b,w} $imm8,[xyz]mm/mem,[xyz]mm,k{k} */ + case X86EMUL_OPC_EVEX_66( + 0x0f3a, 0x3f): /* vpcmp{b,w} $imm8,[xyz]mm/mem,[xyz]mm,k{k} */ generate_exception_if(!evex.r || !evex.R || evex.z, EXC_UD); if ( !(b & 0x20) ) goto avx512f_imm8_no_sae; @@ -9001,10 +9540,12 @@ x86_emulate( op_bytes = 4; goto simd_0f3a_common; - case X86EMUL_OPC_VEX_66(0x0f3a, 0x21): /* vinsertps $imm8,xmm/m128,xmm,xmm */ + case X86EMUL_OPC_VEX_66(0x0f3a, + 0x21): /* vinsertps $imm8,xmm/m128,xmm,xmm */ op_bytes = 4; /* fall through */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x41): /* vdppd $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66(0x0f3a, + 0x41): /* vdppd $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */ generate_exception_if(vex.l, EXC_UD); goto simd_0f_imm8_avx; @@ -9013,8 +9554,8 @@ x86_emulate( if ( !vex.w ) host_and_vcpu_must_have(avx512dq); opmask_shift_imm: - generate_exception_if(vex.l || !vex.r || vex.reg != 0xf || - ea.type != OP_REG, EXC_UD); + generate_exception_if( + vex.l || !vex.r || vex.reg != 0xf || ea.type != OP_REG, EXC_UD); host_and_vcpu_must_have(avx512f); get_fpu(X86EMUL_FPU_opmask); op_bytes = 1; /* Any non-zero value will do. */ @@ -9025,66 +9566,84 @@ x86_emulate( host_and_vcpu_must_have(avx512bw); goto opmask_shift_imm; - case X86EMUL_OPC_66(0x0f3a, 0x44): /* pclmulqdq $imm8,xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x44): /* vpclmulqdq $imm8,xmm/m128,xmm,xmm */ + case X86EMUL_OPC_66(0x0f3a, 0x44): /* pclmulqdq $imm8,xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f3a, + 0x44): /* vpclmulqdq $imm8,xmm/m128,xmm,xmm */ host_and_vcpu_must_have(pclmulqdq); if ( vex.opcx == vex_none ) goto simd_0f3a_common; generate_exception_if(vex.l, EXC_UD); goto simd_0f_imm8_avx; - case X86EMUL_OPC_VEX_66(0x0f3a, 0x4a): /* vblendvps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x4b): /* vblendvpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x4a): /* vblendvps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x4b): /* vblendvpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ generate_exception_if(vex.w, EXC_UD); goto simd_0f_imm8_avx; - case X86EMUL_OPC_VEX_66(0x0f3a, 0x48): /* vpermil2ps $imm,{x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ - /* vpermil2ps $imm,{x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x49): /* vpermil2pd $imm,{x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ - /* vpermil2pd $imm,{x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x48): /* vpermil2ps $imm,{x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + /* vpermil2ps $imm,{x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x49): /* vpermil2pd $imm,{x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + /* vpermil2pd $imm,{x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ host_and_vcpu_must_have(xop); goto simd_0f_imm8_ymm; - case X86EMUL_OPC_VEX_66(0x0f3a, 0x4c): /* vpblendvb {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x4c): /* vpblendvb {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ generate_exception_if(vex.w, EXC_UD); goto simd_0f_int_imm8; - case X86EMUL_OPC_VEX_66(0x0f3a, 0x5c): /* vfmaddsubps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - /* vfmaddsubps {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x5d): /* vfmaddsubpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - /* vfmaddsubpd {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x5e): /* vfmsubaddps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - /* vfmsubaddps {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x5f): /* vfmsubaddpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - /* vfmsubaddpd {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x68): /* vfmaddps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - /* vfmaddps {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x69): /* vfmaddpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - /* vfmaddpd {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x5c): /* vfmaddsubps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + /* vfmaddsubps {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x5d): /* vfmaddsubpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + /* vfmaddsubpd {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x5e): /* vfmsubaddps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + /* vfmsubaddps {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x5f): /* vfmsubaddpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + /* vfmsubaddpd {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x68): /* vfmaddps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + /* vfmaddps {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x69): /* vfmaddpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + /* vfmaddpd {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ case X86EMUL_OPC_VEX_66(0x0f3a, 0x6a): /* vfmaddss xmm,xmm/m32,xmm,xmm */ /* vfmaddss xmm/m32,xmm,xmm,xmm */ case X86EMUL_OPC_VEX_66(0x0f3a, 0x6b): /* vfmaddsd xmm,xmm/m64,xmm,xmm */ /* vfmaddsd xmm/m64,xmm,xmm,xmm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x6c): /* vfmsubps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - /* vfmsubps {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x6d): /* vfmsubpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - /* vfmsubpd {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x6c): /* vfmsubps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + /* vfmsubps {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x6d): /* vfmsubpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + /* vfmsubpd {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ case X86EMUL_OPC_VEX_66(0x0f3a, 0x6e): /* vfmsubss xmm,xmm/m32,xmm,xmm */ /* vfmsubss xmm/m32,xmm,xmm,xmm */ case X86EMUL_OPC_VEX_66(0x0f3a, 0x6f): /* vfmsubsd xmm,xmm/m64,xmm,xmm */ /* vfmsubsd xmm/m64,xmm,xmm,xmm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x78): /* vfnmaddps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - /* vfnmaddps {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x79): /* vfnmaddpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - /* vfnmaddpd {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x78): /* vfnmaddps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + /* vfnmaddps {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x79): /* vfnmaddpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + /* vfnmaddpd {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ case X86EMUL_OPC_VEX_66(0x0f3a, 0x7a): /* vfnmaddss xmm,xmm/m32,xmm,xmm */ /* vfnmaddss xmm/m32,xmm,xmm,xmm */ case X86EMUL_OPC_VEX_66(0x0f3a, 0x7b): /* vfnmaddsd xmm,xmm/m64,xmm,xmm */ /* vfnmaddsd xmm/m64,xmm,xmm,xmm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x7c): /* vfnmsubps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - /* vfnmsubps {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0x7d): /* vfnmsubpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ - /* vfnmsubpd {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x7c): /* vfnmsubps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + /* vfnmsubps {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_VEX_66( + 0x0f3a, 0x7d): /* vfnmsubpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + /* vfnmsubpd {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ case X86EMUL_OPC_VEX_66(0x0f3a, 0x7e): /* vfnmsubss xmm,xmm/m32,xmm,xmm */ /* vfnmsubss xmm/m32,xmm,xmm,xmm */ case X86EMUL_OPC_VEX_66(0x0f3a, 0x7f): /* vfnmsubsd xmm,xmm/m64,xmm,xmm */ @@ -9142,12 +9701,12 @@ x86_emulate( copy_REX_VEX(opc, rex_prefix, vex); #ifdef __x86_64__ if ( rex_prefix & REX_W ) - emulate_stub("=c" (dst.val), "m" (*mmvalp), "D" (mmvalp), - "a" (_regs.rax), "d" (_regs.rdx)); + emulate_stub("=c"(dst.val), "m"(*mmvalp), "D"(mmvalp), + "a"(_regs.rax), "d"(_regs.rdx)); else #endif - emulate_stub("=c" (dst.val), "m" (*mmvalp), "D" (mmvalp), - "a" (_regs.eax), "d" (_regs.edx)); + emulate_stub("=c"(dst.val), "m"(*mmvalp), "D"(mmvalp), + "a"(_regs.eax), "d"(_regs.edx)); state->simd_size = simd_none; if ( b & 1 ) @@ -9155,13 +9714,14 @@ x86_emulate( dst.type = OP_NONE; break; - case X86EMUL_OPC(0x0f3a, 0xcc): /* sha1rnds4 $imm8,xmm/m128,xmm */ + case X86EMUL_OPC(0x0f3a, 0xcc): /* sha1rnds4 $imm8,xmm/m128,xmm */ host_and_vcpu_must_have(sha); op_bytes = 16; goto simd_0f3a_common; - case X86EMUL_OPC_66(0x0f3a, 0xdf): /* aeskeygenassist $imm8,xmm/m128,xmm */ - case X86EMUL_OPC_VEX_66(0x0f3a, 0xdf): /* vaeskeygenassist $imm8,xmm/m128,xmm */ + case X86EMUL_OPC_66(0x0f3a, 0xdf): /* aeskeygenassist $imm8,xmm/m128,xmm */ + case X86EMUL_OPC_VEX_66(0x0f3a, + 0xdf): /* vaeskeygenassist $imm8,xmm/m128,xmm */ host_and_vcpu_must_have(aesni); if ( vex.opcx == vex_none ) goto simd_0f3a_common; @@ -9177,9 +9737,9 @@ x86_emulate( ctxt, ops)) != X86EMUL_OKAY ) goto done; if ( mode_64bit() && vex.w ) - asm ( "rorq %b1,%0" : "=g" (dst.val) : "c" (imm1), "0" (src.val) ); + asm("rorq %b1,%0" : "=g"(dst.val) : "c"(imm1), "0"(src.val)); else - asm ( "rorl %b1,%k0" : "=g" (dst.val) : "c" (imm1), "0" (src.val) ); + asm("rorl %b1,%k0" : "=g"(dst.val) : "c"(imm1), "0"(src.val)); break; case X86EMUL_OPC_XOP(08, 0x85): /* vpmacssww xmm,xmm/m128,xmm,xmm */ @@ -9212,13 +9772,14 @@ x86_emulate( /* vpperm xmm,xmm/m128,xmm,xmm */ generate_exception_if(vex.l, EXC_UD); /* fall through */ - case X86EMUL_OPC_XOP(08, 0xa2): /* vpcmov {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ - /* vpcmov {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ + case X86EMUL_OPC_XOP(08, + 0xa2): /* vpcmov {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */ + /* vpcmov {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */ host_and_vcpu_must_have(xop); goto simd_0f_imm8_ymm; case X86EMUL_OPC_XOP(09, 0x01): /* XOP Grp1 */ - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 1: /* blcfill r/m,r */ case 2: /* blsfill r/m,r */ @@ -9250,14 +9811,14 @@ x86_emulate( buf[5] = 0xc3; dst.reg = decode_vex_gpr(vex.reg, &_regs, ctxt); - emulate_stub([dst] "=&a" (dst.val), "c" (&src.val)); + emulate_stub([dst] "=&a"(dst.val), "c"(&src.val)); put_stub(stub); break; } case X86EMUL_OPC_XOP(09, 0x02): /* XOP Grp2 */ - switch ( modrm_reg & 7 ) + switch (modrm_reg & 7) { case 1: /* blcmsk r/m,r */ case 6: /* blci r/m,r */ @@ -9344,7 +9905,7 @@ x86_emulate( *(uint32_t *)(buf + 5) = imm1; buf[9] = 0xc3; - emulate_stub([dst] "=&c" (dst.val), "[dst]" (&src.val)); + emulate_stub([dst] "=&c"(dst.val), "[dst]"(&src.val)); put_stub(stub); break; @@ -9363,22 +9924,30 @@ x86_emulate( { ea.val = src.val; op_bytes = dst.bytes; - rc = ops->rmw(dst.mem.seg, dst.mem.off, dst.bytes, &_regs.eflags, - state, ctxt); + rc = ops->rmw(dst.mem.seg, dst.mem.off, dst.bytes, &_regs.eflags, state, + ctxt); if ( rc != X86EMUL_OKAY ) goto done; /* Some operations require a register to be written. */ - switch ( state->rmw ) + switch (state->rmw) { case rmw_xchg: case rmw_xadd: - switch ( dst.bytes ) + switch (dst.bytes) { - case 1: *(uint8_t *)src.reg = (uint8_t)ea.val; break; - case 2: *(uint16_t *)src.reg = (uint16_t)ea.val; break; - case 4: *src.reg = (uint32_t)ea.val; break; /* 64b reg: zero-extend */ - case 8: *src.reg = ea.val; break; + case 1: + *(uint8_t *)src.reg = (uint8_t)ea.val; + break; + case 2: + *(uint16_t *)src.reg = (uint16_t)ea.val; + break; + case 4: + *src.reg = (uint32_t)ea.val; + break; /* 64b reg: zero-extend */ + case 8: + *src.reg = ea.val; + break; } break; @@ -9391,9 +9960,10 @@ x86_emulate( else if ( state->simd_size ) { generate_exception_if(!op_bytes, EXC_UD); - generate_exception_if((vex.opcx && (d & TwoOp) && - (vex.reg != 0xf || (evex_encoded() && !evex.RX))), - EXC_UD); + generate_exception_if( + (vex.opcx && (d & TwoOp) && + (vex.reg != 0xf || (evex_encoded() && !evex.RX))), + EXC_UD); if ( !opc ) BUG(); @@ -9415,24 +9985,25 @@ x86_emulate( if ( op_bytes < 16 || (vex.opcx - ? /* vmov{{a,nt}p{s,d},{,nt}dqa,ntdq} are exceptions. */ - ext == ext_0f - ? ((b | 1) != 0x29 && b != 0x2b && - ((b | 0x10) != 0x7f || vex.pfx != vex_66) && - b != 0xe7) - : (ext != ext_0f38 || b != 0x2a) - : /* movup{s,d}, {,mask}movdqu, and lddqu are exceptions. */ - ext == ext_0f && - ((b | 1) == 0x11 || - ((b | 0x10) == 0x7f && vex.pfx == vex_f3) || - b == 0xf7 || b == 0xf0)) ) + ? /* vmov{{a,nt}p{s,d},{,nt}dqa,ntdq} are exceptions. */ + ext == ext_0f + ? ((b | 1) != 0x29 && b != 0x2b && + ((b | 0x10) != 0x7f || vex.pfx != vex_66) && + b != 0xe7) + : (ext != ext_0f38 || b != 0x2a) + : /* movup{s,d}, {,mask}movdqu, and lddqu are exceptions. + */ + ext == ext_0f && + ((b | 1) == 0x11 || + ((b | 0x10) == 0x7f && vex.pfx == vex_f3) || + b == 0xf7 || b == 0xf0)) ) mxcsr = MXCSR_MM; else if ( vcpu_has_misalignsse() ) - asm ( "stmxcsr %0" : "=m" (mxcsr) ); - generate_exception_if(!(mxcsr & MXCSR_MM) && - !is_aligned(ea.mem.seg, ea.mem.off, op_bytes, - ctxt, ops), - EXC_GP, 0); + asm("stmxcsr %0" : "=m"(mxcsr)); + generate_exception_if( + !(mxcsr & MXCSR_MM) && + !is_aligned(ea.mem.seg, ea.mem.off, op_bytes, ctxt, ops), + EXC_GP, 0); EXPECT(elem_bytes > 0); if ( evex.brs ) @@ -9472,12 +10043,11 @@ x86_emulate( op_mask != full ) d = (d & ~SrcMask) | SrcMem; - switch ( d & SrcMask ) + switch (d & SrcMask) { case SrcMem: rc = ops->read(ea.mem.seg, truncate_ea(ea.mem.off + first_byte), - (void *)mmvalp + first_byte, op_bytes, - ctxt); + (void *)mmvalp + first_byte, op_bytes, ctxt); if ( rc != X86EMUL_OKAY ) goto done; /* fall through */ @@ -9494,7 +10064,7 @@ x86_emulate( if ( (d & SrcMask) == SrcMem ) d |= Mov; /* Force memory write to occur below. */ - switch ( ctxt->opcode ) + switch (ctxt->opcode) { case X86EMUL_OPC_VEX_66(0x0f38, 0x2e): /* vmaskmovps */ case X86EMUL_OPC_VEX_66(0x0f38, 0x2f): /* vmaskmovpd */ @@ -9519,38 +10089,44 @@ x86_emulate( } /* {,v}maskmov{q,dqu}, as an exception, uses rDI. */ - if ( likely((ctxt->opcode & ~(X86EMUL_OPC_PFX_MASK | - X86EMUL_OPC_ENCODING_MASK)) != + if ( likely((ctxt->opcode & + ~(X86EMUL_OPC_PFX_MASK | X86EMUL_OPC_ENCODING_MASK)) != X86EMUL_OPC(0x0f, 0xf7)) ) - invoke_stub("", "", "+m" (*mmvalp) : "a" (mmvalp)); + invoke_stub("", "", "+m"(*mmvalp) : "a"(mmvalp)); else - invoke_stub("", "", "+m" (*mmvalp) : "D" (mmvalp)); + invoke_stub("", "", "+m"(*mmvalp) : "D"(mmvalp)); put_stub(stub); } - switch ( dst.type ) + switch (dst.type) { case OP_REG: /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ - switch ( dst.bytes ) + switch (dst.bytes) { - case 1: *(uint8_t *)dst.reg = (uint8_t)dst.val; break; - case 2: *(uint16_t *)dst.reg = (uint16_t)dst.val; break; - case 4: *dst.reg = (uint32_t)dst.val; break; /* 64b: zero-ext */ - case 8: *dst.reg = dst.val; break; + case 1: + *(uint8_t *)dst.reg = (uint8_t)dst.val; + break; + case 2: + *(uint16_t *)dst.reg = (uint16_t)dst.val; + break; + case 4: + *dst.reg = (uint32_t)dst.val; + break; /* 64b: zero-ext */ + case 8: + *dst.reg = dst.val; + break; } break; case OP_MEM: - if ( !(d & Mov) && (dst.orig_val == dst.val) && - !ctxt->force_writeback ) + if ( !(d & Mov) && (dst.orig_val == dst.val) && !ctxt->force_writeback ) /* nothing to do */; else if ( lock_prefix ) { fail_if(!ops->cmpxchg); - rc = ops->cmpxchg( - dst.mem.seg, dst.mem.off, &dst.orig_val, - &dst.val, dst.bytes, true, ctxt); + rc = ops->cmpxchg(dst.mem.seg, dst.mem.off, &dst.orig_val, &dst.val, + dst.bytes, true, ctxt); if ( rc == X86EMUL_CMPXCHG_FAILED ) rc = X86EMUL_RETRY; } @@ -9562,7 +10138,7 @@ x86_emulate( : (void *)mmvalp + first_byte, dst.bytes, ctxt); if ( sfence ) - asm volatile ( "sfence" ::: "memory" ); + asm volatile("sfence" ::: "memory"); } if ( rc != 0 ) goto done; @@ -9570,7 +10146,7 @@ x86_emulate( break; } - complete_insn: /* Commit shadow register state. */ +complete_insn: /* Commit shadow register state. */ put_fpu(fpu_type, false, state, ctxt, ops); fpu_type = X86EMUL_FPU_none; @@ -9595,14 +10171,14 @@ x86_emulate( ctxt->regs->eflags &= ~X86_EFLAGS_RF; - done: +done: put_fpu(fpu_type, insn_bytes > 0 && dst.type == OP_MEM, state, ctxt, ops); put_stub(stub); return rc; #undef state #ifdef __XEN__ - emulation_stub_failure: +emulation_stub_failure: generate_exception_if(stub_exn.info.fields.trapnr == EXC_MF, EXC_MF); if ( stub_exn.info.fields.trapnr == EXC_XM ) { @@ -9612,17 +10188,16 @@ x86_emulate( cr4 = X86_CR4_OSXMMEXCPT; generate_exception(cr4 & X86_CR4_OSXMMEXCPT ? EXC_XM : EXC_UD); } - gprintk(XENLOG_WARNING, - "exception %u (ec=%04x) in emulation stub (line %u)\n", - stub_exn.info.fields.trapnr, stub_exn.info.fields.ec, - stub_exn.line); - gprintk(XENLOG_INFO, " stub: %"__stringify(MAX_INST_LEN)"ph\n", + gprintk( + XENLOG_WARNING, "exception %u (ec=%04x) in emulation stub (line %u)\n", + stub_exn.info.fields.trapnr, stub_exn.info.fields.ec, stub_exn.line); + gprintk(XENLOG_INFO, " stub: %" __stringify(MAX_INST_LEN) "ph\n", stub.func); generate_exception_if(stub_exn.info.fields.trapnr == EXC_UD, EXC_UD); domain_crash(current->domain); #endif - unhandleable: +unhandleable: rc = X86EMUL_UNHANDLEABLE; goto done; } @@ -9639,12 +10214,8 @@ x86_emulate( #undef vex #undef ea -int x86_emul_rmw( - void *ptr, - unsigned int bytes, - uint32_t *eflags, - struct x86_emulate_state *state, - struct x86_emulate_ctxt *ctxt) +int x86_emul_rmw(void *ptr, unsigned int bytes, uint32_t *eflags, + struct x86_emulate_state *state, struct x86_emulate_ctxt *ctxt) { unsigned long *dst = ptr; @@ -9655,80 +10226,82 @@ int x86_emul_rmw( * loaded into the EFLAGS register. Hence our only choice is J{E,R}CXZ. */ #ifdef __x86_64__ -# define JCXZ "jrcxz" +#define JCXZ "jrcxz" #else -# define JCXZ "jecxz" +#define JCXZ "jecxz" #endif -#define COND_LOCK(op) \ +#define COND_LOCK(op) \ JCXZ " .L" #op "%=\n\t" \ - "lock\n" \ - ".L" #op "%=:\n\t" \ - #op + "lock\n" \ + ".L" #op "%=:\n\t" #op - switch ( state->rmw ) + switch (state->rmw) { -#define UNOP(op) \ - case rmw_##op: \ +#define UNOP(op) \ + case rmw_##op: \ _emulate_1op(COND_LOCK(op), dst, bytes, *eflags, \ - "c" ((long)state->lock_prefix) ); \ + "c"((long)state->lock_prefix)); \ break -#define BINOP(op, sfx) \ - case rmw_##op: \ - _emulate_2op_SrcV##sfx(COND_LOCK(op), \ - state->ea.val, dst, bytes, *eflags, \ - "c" ((long)state->lock_prefix) ); \ +#define BINOP(op, sfx) \ + case rmw_##op: \ + _emulate_2op_SrcV##sfx(COND_LOCK(op), state->ea.val, dst, bytes, \ + *eflags, "c"((long)state->lock_prefix)); \ break -#define SHIFT(op) \ - case rmw_##op: \ - ASSERT(!state->lock_prefix); \ +#define SHIFT(op) \ + case rmw_##op: \ + ASSERT(!state->lock_prefix); \ _emulate_2op_SrcB(#op, state->ea.val, dst, bytes, *eflags); \ break - BINOP(adc, ); - BINOP(add, ); - BINOP(and, ); - BINOP(btc, _nobyte); - BINOP(bts, _nobyte); - BINOP(btr, _nobyte); - UNOP(dec); - UNOP(inc); - UNOP(neg); - BINOP(or, ); - SHIFT(rcl); - SHIFT(rcr); - SHIFT(rol); - SHIFT(ror); - SHIFT(sar); - BINOP(sbb, ); - SHIFT(shl); - SHIFT(shr); - BINOP(sub, ); - BINOP(xor, ); + BINOP(adc, ); + BINOP(add, ); + BINOP(and, ); + BINOP(btc, _nobyte); + BINOP(bts, _nobyte); + BINOP(btr, _nobyte); + UNOP(dec); + UNOP(inc); + UNOP(neg); + BINOP(or, ); + SHIFT(rcl); + SHIFT(rcr); + SHIFT(rol); + SHIFT(ror); + SHIFT(sar); + BINOP(sbb, ); + SHIFT(shl); + SHIFT(shr); + BINOP(sub, ); + BINOP(xor, ); #undef UNOP #undef BINOP #undef SHIFT case rmw_not: - switch ( state->op_bytes ) + switch (state->op_bytes) { case 1: - asm ( COND_LOCK(notb) " %0" - : "+m" (*dst) : "c" ((long)state->lock_prefix) ); + asm(COND_LOCK(notb) " %0" + : "+m"(*dst) + : "c"((long)state->lock_prefix)); break; case 2: - asm ( COND_LOCK(notw) " %0" - : "+m" (*dst) : "c" ((long)state->lock_prefix) ); + asm(COND_LOCK(notw) " %0" + : "+m"(*dst) + : "c"((long)state->lock_prefix)); break; case 4: - asm ( COND_LOCK(notl) " %0" - : "+m" (*dst) : "c" ((long)state->lock_prefix) ); + asm(COND_LOCK(notl) " %0" + : "+m"(*dst) + : "c"((long)state->lock_prefix)); break; #ifdef __x86_64__ case 8: - asm ( COND_LOCK(notq) " %0" - : "+m" (*dst) : "c" ((long)state->lock_prefix) ); + asm(COND_LOCK(notq) " %0" + : "+m"(*dst) + : "c"((long)state->lock_prefix)); break; #endif } @@ -9736,61 +10309,56 @@ int x86_emul_rmw( case rmw_shld: ASSERT(!state->lock_prefix); - _emulate_2op_SrcV_nobyte("shld", - state->ea.val, dst, bytes, *eflags, - "c" (state->ea.orig_val) ); + _emulate_2op_SrcV_nobyte("shld", state->ea.val, dst, bytes, *eflags, + "c"(state->ea.orig_val)); break; case rmw_shrd: ASSERT(!state->lock_prefix); - _emulate_2op_SrcV_nobyte("shrd", - state->ea.val, dst, bytes, *eflags, - "c" (state->ea.orig_val) ); + _emulate_2op_SrcV_nobyte("shrd", state->ea.val, dst, bytes, *eflags, + "c"(state->ea.orig_val)); break; case rmw_xadd: - switch ( state->op_bytes ) + switch (state->op_bytes) { unsigned long dummy; -#define XADD(sz, cst, mod) \ - case sz: \ - asm ( _PRE_EFLAGS("[efl]", "[msk]", "[tmp]") \ - COND_LOCK(xadd) " %"#mod"[reg], %[mem]; " \ - _POST_EFLAGS("[efl]", "[msk]", "[tmp]") \ - : [reg] "+" #cst (state->ea.val), \ - [mem] "+m" (*dst), \ - [efl] "+g" (*eflags), \ - [tmp] "=&r" (dummy) \ - : "c" ((long)state->lock_prefix), \ - [msk] "i" (EFLAGS_MASK) ); \ - break - XADD(1, q, b); - XADD(2, r, w); - XADD(4, r, k); +#define XADD(sz, cst, mod) \ + case sz: \ + asm(_PRE_EFLAGS("[efl]", "[msk]", "[tmp]") \ + COND_LOCK(xadd) " %" #mod "[reg], %[mem]; " _POST_EFLAGS( \ + "[efl]", "[msk]", "[tmp]") \ + : [reg] "+" #cst(state->ea.val), [mem] "+m"(*dst), \ + [efl] "+g"(*eflags), [tmp] "=&r"(dummy) \ + : "c"((long)state->lock_prefix), [msk] "i"(EFLAGS_MASK)); \ + break + XADD(1, q, b); + XADD(2, r, w); + XADD(4, r, k); #ifdef __x86_64__ - XADD(8, r, ); + XADD(8, r, ); #endif #undef XADD } break; case rmw_xchg: - switch ( state->op_bytes ) + switch (state->op_bytes) { case 1: - asm ( "xchg %b0, %b1" : "+q" (state->ea.val), "+m" (*dst) ); + asm("xchg %b0, %b1" : "+q"(state->ea.val), "+m"(*dst)); break; case 2: - asm ( "xchg %w0, %w1" : "+r" (state->ea.val), "+m" (*dst) ); + asm("xchg %w0, %w1" : "+r"(state->ea.val), "+m"(*dst)); break; case 4: #ifdef __x86_64__ - asm ( "xchg %k0, %k1" : "+r" (state->ea.val), "+m" (*dst) ); + asm("xchg %k0, %k1" : "+r"(state->ea.val), "+m"(*dst)); break; case 8: #endif - asm ( "xchg %0, %1" : "+r" (state->ea.val), "+m" (*dst) ); + asm("xchg %0, %1" : "+r"(state->ea.val), "+m"(*dst)); break; } break; @@ -9833,9 +10401,8 @@ static void __init __maybe_unused build_assertions(void) * In debug builds, wrap x86_emulate() with some assertions about its expected * behaviour. */ -int x86_emulate_wrapper( - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) +int x86_emulate_wrapper(struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) { unsigned long orig_ip = ctxt->regs->r(ip); int rc; @@ -9877,19 +10444,16 @@ int x86_emulate_wrapper( #include -struct x86_emulate_state * -x86_decode_insn( +struct x86_emulate_state *x86_decode_insn( struct x86_emulate_ctxt *ctxt, - int (*insn_fetch)( - enum x86_segment seg, unsigned long offset, - void *p_data, unsigned int bytes, - struct x86_emulate_ctxt *ctxt)) + int (*insn_fetch)(enum x86_segment seg, unsigned long offset, void *p_data, + unsigned int bytes, struct x86_emulate_ctxt *ctxt)) { static DEFINE_PER_CPU(struct x86_emulate_state, state); struct x86_emulate_state *state = &this_cpu(state); const struct x86_emulate_ops ops = { .insn_fetch = insn_fetch, - .read = x86emul_unhandleable_rw, + .read = x86emul_unhandleable_rw, }; int rc; @@ -9932,17 +10496,15 @@ void x86_emulate_free_state(struct x86_emulate_state *state) } #endif -unsigned int -x86_insn_opsize(const struct x86_emulate_state *state) +unsigned int x86_insn_opsize(const struct x86_emulate_state *state) { check_state(state); return state->op_bytes << 3; } -int -x86_insn_modrm(const struct x86_emulate_state *state, - unsigned int *rm, unsigned int *reg) +int x86_insn_modrm(const struct x86_emulate_state *state, unsigned int *rm, + unsigned int *reg) { check_state(state); @@ -9963,9 +10525,8 @@ x86_insn_modrm(const struct x86_emulate_state *state, return state->modrm_mod; } -unsigned long -x86_insn_operand_ea(const struct x86_emulate_state *state, - enum x86_segment *seg) +unsigned long x86_insn_operand_ea(const struct x86_emulate_state *state, + enum x86_segment *seg) { *seg = state->ea.type == OP_MEM ? state->ea.mem.seg : x86_seg_none; @@ -9974,24 +10535,25 @@ x86_insn_operand_ea(const struct x86_emulate_state *state, return state->ea.mem.off; } -bool -x86_insn_is_mem_access(const struct x86_emulate_state *state, - const struct x86_emulate_ctxt *ctxt) +bool x86_insn_is_mem_access(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt) { if ( state->ea.type == OP_MEM ) return ctxt->opcode != 0x8d /* LEA */ && (ctxt->opcode != X86EMUL_OPC(0x0f, 0x01) || (state->modrm_reg & 7) != 7) /* INVLPG */; - switch ( ctxt->opcode ) + switch (ctxt->opcode) { case 0x6c ... 0x6f: /* INS / OUTS */ case 0xa4 ... 0xa7: /* MOVS / CMPS */ case 0xaa ... 0xaf: /* STOS / LODS / SCAS */ case 0xd7: /* XLAT */ - CASE_SIMD_PACKED_INT(0x0f, 0xf7): /* MASKMOV{Q,DQU} */ - case X86EMUL_OPC_VEX_66(0x0f, 0xf7): /* VMASKMOVDQU */ - return true; + CASE_SIMD_PACKED_INT(0x0f, 0xf7) + : /* MASKMOV{Q,DQU} */ + case X86EMUL_OPC_VEX_66(0x0f, 0xf7) + : /* VMASKMOVDQU */ + return true; case X86EMUL_OPC(0x0f, 0x01): /* Cover CLZERO. */ @@ -10001,11 +10563,10 @@ x86_insn_is_mem_access(const struct x86_emulate_state *state, return false; } -bool -x86_insn_is_mem_write(const struct x86_emulate_state *state, - const struct x86_emulate_ctxt *ctxt) +bool x86_insn_is_mem_write(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt) { - switch ( state->desc & DstMask ) + switch (state->desc & DstMask) { case DstMem: /* The SrcMem check is to cover {,V}MASKMOV{Q,DQU}. */ @@ -10024,18 +10585,21 @@ x86_insn_is_mem_write(const struct x86_emulate_state *state, return ctxt->opcode == X86EMUL_OPC(0x0f, 0x01) && (state->modrm_rm & 7) == 4 && (state->modrm_reg & 7) == 7; - switch ( ctxt->opcode ) + switch (ctxt->opcode) { - case 0x6c: case 0x6d: /* INS */ - case 0xa4: case 0xa5: /* MOVS */ - case 0xaa: case 0xab: /* STOS */ - case X86EMUL_OPC(0x0f, 0xab): /* BTS */ - case X86EMUL_OPC(0x0f, 0xb3): /* BTR */ - case X86EMUL_OPC(0x0f, 0xbb): /* BTC */ + case 0x6c: + case 0x6d: /* INS */ + case 0xa4: + case 0xa5: /* MOVS */ + case 0xaa: + case 0xab: /* STOS */ + case X86EMUL_OPC(0x0f, 0xab): /* BTS */ + case X86EMUL_OPC(0x0f, 0xb3): /* BTR */ + case X86EMUL_OPC(0x0f, 0xbb): /* BTC */ return true; case 0xd9: - switch ( state->modrm_reg & 7 ) + switch (state->modrm_reg & 7) { case 2: /* FST m32fp */ case 3: /* FSTP m32fp */ @@ -10046,7 +10610,7 @@ x86_insn_is_mem_write(const struct x86_emulate_state *state, break; case 0xdb: - switch ( state->modrm_reg & 7 ) + switch (state->modrm_reg & 7) { case 1: /* FISTTP m32i */ case 2: /* FIST m32i */ @@ -10057,7 +10621,7 @@ x86_insn_is_mem_write(const struct x86_emulate_state *state, break; case 0xdd: - switch ( state->modrm_reg & 7 ) + switch (state->modrm_reg & 7) { case 1: /* FISTTP m64i */ case 2: /* FST m64fp */ @@ -10069,7 +10633,7 @@ x86_insn_is_mem_write(const struct x86_emulate_state *state, break; case 0xdf: - switch ( state->modrm_reg & 7 ) + switch (state->modrm_reg & 7) { case 1: /* FISTTP m16i */ case 2: /* FIST m16i */ @@ -10093,11 +10657,10 @@ x86_insn_is_mem_write(const struct x86_emulate_state *state, return false; } -bool -x86_insn_is_portio(const struct x86_emulate_state *state, - const struct x86_emulate_ctxt *ctxt) +bool x86_insn_is_portio(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt) { - switch ( ctxt->opcode ) + switch (ctxt->opcode) { case 0x6c ... 0x6f: /* INS / OUTS */ case 0xe4 ... 0xe7: /* IN / OUT imm8 */ @@ -10108,17 +10671,16 @@ x86_insn_is_portio(const struct x86_emulate_state *state, return false; } -bool -x86_insn_is_cr_access(const struct x86_emulate_state *state, - const struct x86_emulate_ctxt *ctxt) +bool x86_insn_is_cr_access(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt) { - switch ( ctxt->opcode ) + switch (ctxt->opcode) { unsigned int ext; case X86EMUL_OPC(0x0f, 0x01): - if ( x86_insn_modrm(state, NULL, &ext) >= 0 - && (ext & 5) == 4 ) /* SMSW / LMSW */ + if ( x86_insn_modrm(state, NULL, &ext) >= 0 && + (ext & 5) == 4 ) /* SMSW / LMSW */ return true; break; @@ -10131,12 +10693,12 @@ x86_insn_is_cr_access(const struct x86_emulate_state *state, return false; } -unsigned long -x86_insn_immediate(const struct x86_emulate_state *state, unsigned int nr) +unsigned long x86_insn_immediate(const struct x86_emulate_state *state, + unsigned int nr) { check_state(state); - switch ( nr ) + switch (nr) { case 0: return state->imm1; @@ -10147,9 +10709,8 @@ x86_insn_immediate(const struct x86_emulate_state *state, unsigned int nr) return 0; } -unsigned int -x86_insn_length(const struct x86_emulate_state *state, - const struct x86_emulate_ctxt *ctxt) +unsigned int x86_insn_length(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt) { check_state(state); diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c index 15edd5df96..2f359885e2 100644 --- a/xen/arch/x86/xstate.c +++ b/xen/arch/x86/xstate.c @@ -34,7 +34,7 @@ uint32_t __read_mostly mxcsr_mask = 0x0000ffbf; /* Cached xcr0 for fast read */ static DEFINE_PER_CPU(uint64_t, xcr0); -/* Because XCR0 is cached for each CPU, xsetbv() is not exposed. Users should +/* Because XCR0 is cached for each CPU, xsetbv() is not exposed. Users should * use set_xcr0() instead. */ static inline bool xsetbv(u32 index, u64 xfeatures) @@ -42,15 +42,14 @@ static inline bool xsetbv(u32 index, u64 xfeatures) u32 hi = xfeatures >> 32; u32 lo = (u32)xfeatures; - asm volatile ( "1: .byte 0x0f,0x01,0xd1\n" - "3: \n" - ".section .fixup,\"ax\" \n" - "2: xor %0,%0 \n" - " jmp 3b \n" - ".previous \n" - _ASM_EXTABLE(1b, 2b) - : "+a" (lo) - : "c" (index), "d" (hi)); + asm volatile("1: .byte 0x0f,0x01,0xd1\n" + "3: \n" + ".section .fixup,\"ax\" \n" + "2: xor %0,%0 \n" + " jmp 3b \n" + ".previous \n" _ASM_EXTABLE(1b, 2b) + : "+a"(lo) + : "c"(index), "d"(hi)); return lo != 0; } @@ -113,8 +112,7 @@ static int setup_xstate_features(bool bsp) } else { - cpuid_count(XSTATE_CPUID, leaf, &eax, - &ebx, &ecx, &edx); + cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx); BUG_ON(eax != xstate_sizes[leaf]); BUG_ON(ebx != xstate_offsets[leaf]); BUG_ON(!(ecx & XSTATE_ALIGN64) != !test_bit(leaf, &xstate_align)); @@ -124,8 +122,7 @@ static int setup_xstate_features(bool bsp) return 0; } -static void setup_xstate_comp(uint16_t *comp_offsets, - const uint64_t xcomp_bv) +static void setup_xstate_comp(uint16_t *comp_offsets, const uint64_t xcomp_bv) { unsigned int i; unsigned int offset; @@ -169,7 +166,7 @@ void expand_xsave_states(struct vcpu *v, void *dest, unsigned int size) { const struct xsave_struct *xsave = v->arch.xsave_area; const void *src; - uint16_t comp_offsets[sizeof(xfeature_mask)*8]; + uint16_t comp_offsets[sizeof(xfeature_mask) * 8]; u64 xstate_bv = xsave->xsave_hdr.xstate_bv; u64 valid; @@ -193,7 +190,7 @@ void expand_xsave_states(struct vcpu *v, void *dest, unsigned int size) memcpy(dest, xsave, XSTATE_AREA_MIN_SIZE); memset(dest + XSTATE_AREA_MIN_SIZE, 0, size - XSTATE_AREA_MIN_SIZE); - ((struct xsave_struct *)dest)->xsave_hdr.xcomp_bv = 0; + ((struct xsave_struct *)dest)->xsave_hdr.xcomp_bv = 0; /* * Copy each region from the possibly compacted offset to the @@ -234,7 +231,7 @@ void compress_xsave_states(struct vcpu *v, const void *src, unsigned int size) { struct xsave_struct *xsave = v->arch.xsave_area; void *dest; - uint16_t comp_offsets[sizeof(xfeature_mask)*8]; + uint16_t comp_offsets[sizeof(xfeature_mask) * 8]; u64 xstate_bv, valid; BUG_ON(!v->arch.xcr0_accum); @@ -292,17 +289,16 @@ void xsave(struct vcpu *v, uint64_t mask) uint32_t hmask = mask >> 32; uint32_t lmask = mask; unsigned int fip_width = v->domain->arch.x87_fip_width; -#define XSAVE(pfx) \ - if ( v->arch.xcr0_accum & XSTATE_XSAVES_ONLY ) \ - asm volatile ( ".byte " pfx "0x0f,0xc7,0x2f\n" /* xsaves */ \ - : "=m" (*ptr) \ - : "a" (lmask), "d" (hmask), "D" (ptr) ); \ - else \ - alternative_io(".byte " pfx "0x0f,0xae,0x27\n", /* xsave */ \ - ".byte " pfx "0x0f,0xae,0x37\n", /* xsaveopt */ \ - X86_FEATURE_XSAVEOPT, \ - "=m" (*ptr), \ - "a" (lmask), "d" (hmask), "D" (ptr)) +#define XSAVE(pfx) \ + if ( v->arch.xcr0_accum & XSTATE_XSAVES_ONLY ) \ + asm volatile(".byte " pfx "0x0f,0xc7,0x2f\n" /* xsaves */ \ + : "=m"(*ptr) \ + : "a"(lmask), "d"(hmask), "D"(ptr)); \ + else \ + alternative_io(".byte " pfx "0x0f,0xae,0x27\n", /* xsave */ \ + ".byte " pfx "0x0f,0xae,0x37\n", /* xsaveopt */ \ + X86_FEATURE_XSAVEOPT, "=m"(*ptr), "a"(lmask), \ + "d"(hmask), "D"(ptr)) if ( fip_width == 8 || !(mask & X86_XCR0_FP) ) { @@ -348,7 +344,7 @@ void xsave(struct vcpu *v, uint64_t mask) { struct ix87_env fpu_env; - asm volatile ( "fnstenv %0" : "=m" (fpu_env) ); + asm volatile("fnstenv %0" : "=m"(fpu_env)); ptr->fpu_sse.fip.sel = fpu_env.fcs; ptr->fpu_sse.fdp.sel = fpu_env.fds; fip_width = 4; @@ -378,52 +374,53 @@ void xrstor(struct vcpu *v, uint64_t mask) if ( (mask & ptr->xsave_hdr.xstate_bv & X86_XCR0_FP) && !(ptr->fpu_sse.fsw & ~ptr->fpu_sse.fcw & 0x003f) && boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) - asm volatile ( "fnclex\n\t" /* clear exceptions */ - "ffree %%st(7)\n\t" /* clear stack tag */ - "fildl %0" /* load to clear state */ - : : "m" (ptr->fpu_sse) ); + asm volatile("fnclex\n\t" /* clear exceptions */ + "ffree %%st(7)\n\t" /* clear stack tag */ + "fildl %0" /* load to clear state */ + : + : "m"(ptr->fpu_sse)); /* * XRSTOR can fault if passed a corrupted data block. We handle this * possibility, which may occur if the block was passed to us by control * tools or through VCPUOP_initialise, by silently adjusting state. */ - for ( prev_faults = faults = 0; ; prev_faults = faults ) + for ( prev_faults = faults = 0;; prev_faults = faults ) { - switch ( __builtin_expect(ptr->fpu_sse.x[FPU_WORD_SIZE_OFFSET], 8) ) + switch (__builtin_expect(ptr->fpu_sse.x[FPU_WORD_SIZE_OFFSET], 8)) { - BUILD_BUG_ON(sizeof(faults) != 4); /* Clang doesn't support %z in asm. */ -#define _xrstor(insn) \ - asm volatile ( "1: .byte " insn "\n" \ - "3:\n" \ - " .section .fixup,\"ax\"\n" \ - "2: incl %[faults]\n" \ - " jmp 3b\n" \ - " .previous\n" \ - _ASM_EXTABLE(1b, 2b) \ - : [mem] "+m" (*ptr), [faults] "+g" (faults) \ - : [lmask] "a" (lmask), [hmask] "d" (hmask), \ - [ptr] "D" (ptr) ) - -#define XRSTOR(pfx) \ - if ( v->arch.xcr0_accum & XSTATE_XSAVES_ONLY ) \ - { \ - if ( unlikely(!(ptr->xsave_hdr.xcomp_bv & \ - XSTATE_COMPACTION_ENABLED)) ) \ - { \ - ASSERT(!ptr->xsave_hdr.xcomp_bv); \ - ptr->xsave_hdr.xcomp_bv = ptr->xsave_hdr.xstate_bv | \ - XSTATE_COMPACTION_ENABLED; \ - } \ - _xrstor(pfx "0x0f,0xc7,0x1f"); /* xrstors */ \ - } \ - else \ - _xrstor(pfx "0x0f,0xae,0x2f") /* xrstor */ + BUILD_BUG_ON(sizeof(faults) != + 4); /* Clang doesn't support %z in asm. */ +#define _xrstor(insn) \ + asm volatile("1: .byte " insn "\n" \ + "3:\n" \ + " .section .fixup,\"ax\"\n" \ + "2: incl %[faults]\n" \ + " jmp 3b\n" \ + " .previous\n" _ASM_EXTABLE(1b, 2b) \ + : [mem] "+m"(*ptr), [faults] "+g"(faults) \ + : [lmask] "a"(lmask), [hmask] "d"(hmask), [ptr] "D"(ptr)) + +#define XRSTOR(pfx) \ + if ( v->arch.xcr0_accum & XSTATE_XSAVES_ONLY ) \ + { \ + if ( unlikely( \ + !(ptr->xsave_hdr.xcomp_bv & XSTATE_COMPACTION_ENABLED)) ) \ + { \ + ASSERT(!ptr->xsave_hdr.xcomp_bv); \ + ptr->xsave_hdr.xcomp_bv = \ + ptr->xsave_hdr.xstate_bv | XSTATE_COMPACTION_ENABLED; \ + } \ + _xrstor(pfx "0x0f,0xc7,0x1f"); /* xrstors */ \ + } \ + else \ + _xrstor(pfx "0x0f,0xae,0x2f") /* xrstor */ default: XRSTOR("0x48,"); break; - case 4: case 2: + case 4: + case 2: XRSTOR(""); break; #undef XRSTOR @@ -432,8 +429,8 @@ void xrstor(struct vcpu *v, uint64_t mask) if ( likely(faults == prev_faults) ) break; #ifndef NDEBUG - gprintk(XENLOG_WARNING, "fault#%u: mxcsr=%08x\n", - faults, ptr->fpu_sse.mxcsr); + gprintk(XENLOG_WARNING, "fault#%u: mxcsr=%08x\n", faults, + ptr->fpu_sse.mxcsr); gprintk(XENLOG_WARNING, "xs=%016lx xc=%016lx\n", ptr->xsave_hdr.xstate_bv, ptr->xsave_hdr.xcomp_bv); gprintk(XENLOG_WARNING, "r0=%016lx r1=%016lx\n", @@ -443,7 +440,7 @@ void xrstor(struct vcpu *v, uint64_t mask) gprintk(XENLOG_WARNING, "r4=%016lx r5=%016lx\n", ptr->xsave_hdr.reserved[4], ptr->xsave_hdr.reserved[5]); #endif - switch ( faults ) + switch (faults) { case 1: /* Stage 1: Reset state to be loaded. */ ptr->xsave_hdr.xstate_bv &= ~mask; @@ -473,7 +470,8 @@ void xrstor(struct vcpu *v, uint64_t mask) ptr->fpu_sse.mxcsr = MXCSR_DEFAULT; ptr->xsave_hdr.xstate_bv = 0; ptr->xsave_hdr.xcomp_bv = v->arch.xcr0_accum & XSTATE_XSAVES_ONLY - ? XSTATE_COMPACTION_ENABLED : 0; + ? XSTATE_COMPACTION_ENABLED + : 0; continue; } @@ -584,8 +582,7 @@ void xstate_init(struct cpuinfo_x86 *c) u32 eax, ebx, ecx, edx; u64 feature_mask; - if ( (bsp && !use_xsave) || - boot_cpu_data.cpuid_level < XSTATE_CPUID ) + if ( (bsp && !use_xsave) || boot_cpu_data.cpuid_level < XSTATE_CPUID ) { BUG_ON(!bsp); setup_clear_cpu_cap(X86_FEATURE_XSAVE); @@ -615,10 +612,10 @@ void xstate_init(struct cpuinfo_x86 *c) * We know FP/SSE and YMM about eax, and nothing about edx at present. */ xsave_cntxt_size = _xstate_ctxt_size(feature_mask); - printk("xstate: size: %#x and states: %#"PRIx64"\n", - xsave_cntxt_size, xfeature_mask); + printk("xstate: size: %#x and states: %#" PRIx64 "\n", xsave_cntxt_size, + xfeature_mask); - asm ( "fxsave %0" : "=m" (ctxt) ); + asm("fxsave %0" : "=m"(ctxt)); if ( ctxt.mxcsr_mask ) mxcsr_mask = ctxt.mxcsr_mask; } @@ -632,14 +629,14 @@ void xstate_init(struct cpuinfo_x86 *c) cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx); /* Mask out features not currently understood by Xen. */ - eax &= (cpufeat_mask(X86_FEATURE_XSAVEOPT) | - cpufeat_mask(X86_FEATURE_XSAVEC) | - cpufeat_mask(X86_FEATURE_XGETBV1) | - cpufeat_mask(X86_FEATURE_XSAVES)); + eax &= + (cpufeat_mask(X86_FEATURE_XSAVEOPT) | cpufeat_mask(X86_FEATURE_XSAVEC) | + cpufeat_mask(X86_FEATURE_XGETBV1) | cpufeat_mask(X86_FEATURE_XSAVES)); c->x86_capability[cpufeat_word(X86_FEATURE_XSAVEOPT)] = eax; - BUG_ON(eax != boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_XSAVEOPT)]); + BUG_ON(eax != + boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_XSAVEOPT)]); if ( setup_xstate_features(bsp) && bsp ) BUG(); @@ -678,15 +675,12 @@ int validate_xstate(const struct domain *d, uint64_t xcr0, uint64_t xcr0_accum, ((uint64_t)cp->xstate.xcr0_high << 32) | cp->xstate.xcr0_low; unsigned int i; - if ( (hdr->xstate_bv & ~xcr0_accum) || - (xcr0 & ~xcr0_accum) || - (xcr0_accum & ~xcr0_max) || - !valid_xcr0(xcr0) || + if ( (hdr->xstate_bv & ~xcr0_accum) || (xcr0 & ~xcr0_accum) || + (xcr0_accum & ~xcr0_max) || !valid_xcr0(xcr0) || !valid_xcr0(xcr0_accum) ) return -EINVAL; - if ( (xcr0_accum & ~xfeature_mask) || - hdr->xcomp_bv ) + if ( (xcr0_accum & ~xfeature_mask) || hdr->xcomp_bv ) return -EOPNOTSUPP; for ( i = 0; i < ARRAY_SIZE(hdr->reserved); ++i ) @@ -749,7 +743,7 @@ int handle_xsetbv(u32 index, u64 new_bv) clts(); if ( curr->fpu_dirtied ) - asm ( "stmxcsr %0" : "=m" (curr->arch.xsave_area->fpu_sse.mxcsr) ); + asm("stmxcsr %0" : "=m"(curr->arch.xsave_area->fpu_sse.mxcsr)); else if ( xstate_all(curr) ) { /* See the comment in i387.c:vcpu_restore_fpu_eager(). */ @@ -769,8 +763,8 @@ int handle_xsetbv(u32 index, u64 new_bv) uint64_t read_bndcfgu(void) { unsigned long cr0 = read_cr0(); - struct xsave_struct *xstate - = idle_vcpu[smp_processor_id()]->arch.xsave_area; + struct xsave_struct *xstate = + idle_vcpu[smp_processor_id()]->arch.xsave_area; const struct xstate_bndcsr *bndcsr; ASSERT(cpu_has_mpx); @@ -778,17 +772,17 @@ uint64_t read_bndcfgu(void) if ( cpu_has_xsavec ) { - asm ( ".byte 0x0f,0xc7,0x27\n" /* xsavec */ - : "=m" (*xstate) - : "a" (X86_XCR0_BNDCSR), "d" (0), "D" (xstate) ); + asm(".byte 0x0f,0xc7,0x27\n" /* xsavec */ + : "=m"(*xstate) + : "a"(X86_XCR0_BNDCSR), "d"(0), "D"(xstate)); bndcsr = (void *)(xstate + 1); } else { - asm ( ".byte 0x0f,0xae,0x27\n" /* xsave */ - : "=m" (*xstate) - : "a" (X86_XCR0_BNDCSR), "d" (0), "D" (xstate) ); + asm(".byte 0x0f,0xae,0x27\n" /* xsave */ + : "=m"(*xstate) + : "a"(X86_XCR0_BNDCSR), "d"(0), "D"(xstate)); bndcsr = (void *)xstate + xstate_offsets[X86_XCR0_BNDCSR_POS]; } diff --git a/xen/common/argo.c b/xen/common/argo.c index 13052b9239..b248f2e2cf 100644 --- a/xen/common/argo.c +++ b/xen/common/argo.c @@ -46,21 +46,23 @@ CHECK_argo_unregister_ring; CHECK_argo_send_addr; #endif -#define MAX_RINGS_PER_DOMAIN 128U -#define MAX_NOTIFY_COUNT 256U -#define MAX_PENDING_PER_RING 32U +#define MAX_RINGS_PER_DOMAIN 128U +#define MAX_NOTIFY_COUNT 256U +#define MAX_PENDING_PER_RING 32U /* All messages on the ring are padded to a multiple of the slot size. */ #define ROUNDUP_MESSAGE(a) ROUNDUP((a), XEN_ARGO_MSG_SLOT_SIZE) /* The maximum size of a message that may be sent on the largest Argo ring. */ -#define MAX_ARGO_MESSAGE_SIZE ((XEN_ARGO_MAX_RING_SIZE) - \ - (sizeof(struct xen_argo_ring_message_header)) - ROUNDUP_MESSAGE(1)) +#define MAX_ARGO_MESSAGE_SIZE \ + ((XEN_ARGO_MAX_RING_SIZE) - \ + (sizeof(struct xen_argo_ring_message_header)) - ROUNDUP_MESSAGE(1)) /* Number of PAGEs needed to hold a ring of a given size in bytes */ -#define NPAGES_RING(ring_len) \ - (ROUNDUP((ROUNDUP_MESSAGE(ring_len) + sizeof(xen_argo_ring_t)), PAGE_SIZE) \ - >> PAGE_SHIFT) +#define NPAGES_RING(ring_len) \ + (ROUNDUP((ROUNDUP_MESSAGE(ring_len) + sizeof(xen_argo_ring_t)), \ + PAGE_SIZE) >> \ + PAGE_SHIFT) DEFINE_XEN_GUEST_HANDLE(xen_argo_addr_t); DEFINE_XEN_GUEST_HANDLE(xen_argo_gfn_t); @@ -289,7 +291,7 @@ static DEFINE_RWLOCK(L1_global_argo_rwlock); /* L1 */ */ #define LOCKING_Read_L1 (rw_is_locked(&L1_global_argo_rwlock)) -#define LOCKING_Write_rings_L2(d) \ +#define LOCKING_Write_rings_L2(d) \ ((LOCKING_Read_L1 && rw_is_write_locked(&(d)->argo->rings_L2_rwlock)) || \ LOCKING_Write_L1) /* @@ -298,18 +300,19 @@ static DEFINE_RWLOCK(L1_global_argo_rwlock); /* L1 */ * return true if R(L1) && W(L2) is true, because of the way that rw_is_locked * behaves. This results in a slightly shorter and faster implementation. */ -#define LOCKING_Read_rings_L2(d) \ +#define LOCKING_Read_rings_L2(d) \ ((LOCKING_Read_L1 && rw_is_locked(&(d)->argo->rings_L2_rwlock)) || \ LOCKING_Write_L1) /* * Skip checking LOCKING_Write_L1 within this LOCKING_L3 definition because * LOCKING_Write_rings_L2(d) will return true for that condition. */ -#define LOCKING_L3(d, r) \ - ((LOCKING_Read_L1 && rw_is_locked(&(d)->argo->rings_L2_rwlock) \ - && spin_is_locked(&(r)->L3_lock)) || LOCKING_Write_rings_L2(d)) +#define LOCKING_L3(d, r) \ + ((LOCKING_Read_L1 && rw_is_locked(&(d)->argo->rings_L2_rwlock) && \ + spin_is_locked(&(r)->L3_lock)) || \ + LOCKING_Write_rings_L2(d)) -#define LOCKING_send_L2(d) \ +#define LOCKING_send_L2(d) \ ((LOCKING_Read_L1 && spin_is_locked(&(d)->argo->send_L2_lock)) || \ LOCKING_Write_L1) @@ -317,9 +320,9 @@ static DEFINE_RWLOCK(L1_global_argo_rwlock); /* L1 */ #undef ARGO_DEBUG #ifdef ARGO_DEBUG -#define argo_dprintk(format, args...) printk("argo: " format, ## args ) +#define argo_dprintk(format, args...) printk("argo: " format, ##args) #else -#define argo_dprintk(format, ... ) ((void)0) +#define argo_dprintk(format, ...) ((void)0) #endif /* @@ -329,19 +332,18 @@ static DEFINE_RWLOCK(L1_global_argo_rwlock); /* L1 */ * ie. the key is a (domain id, argo port, partner domain id) tuple. * The algorithm approximates the string hashing function 'djb2'. */ -static unsigned int -hash_index(const struct argo_ring_id *id) +static unsigned int hash_index(const struct argo_ring_id *id) { unsigned int hash = 5381; /* prime constant from djb2 */ /* For each input: hash = hash * 33 + */ - hash = ((hash << 5) + hash) + (id->aport & 0xff); - hash = ((hash << 5) + hash) + ((id->aport >> 8) & 0xff); - hash = ((hash << 5) + hash) + ((id->aport >> 16) & 0xff); - hash = ((hash << 5) + hash) + ((id->aport >> 24) & 0xff); - hash = ((hash << 5) + hash) + (id->domain_id & 0xff); - hash = ((hash << 5) + hash) + ((id->domain_id >> 8) & 0xff); - hash = ((hash << 5) + hash) + (id->partner_id & 0xff); + hash = ((hash << 5) + hash) + (id->aport & 0xff); + hash = ((hash << 5) + hash) + ((id->aport >> 8) & 0xff); + hash = ((hash << 5) + hash) + ((id->aport >> 16) & 0xff); + hash = ((hash << 5) + hash) + ((id->aport >> 24) & 0xff); + hash = ((hash << 5) + hash) + (id->domain_id & 0xff); + hash = ((hash << 5) + hash) + ((id->domain_id >> 8) & 0xff); + hash = ((hash << 5) + hash) + (id->partner_id & 0xff); hash = ((hash << 5) + hash) + ((id->partner_id >> 8) & 0xff); /* @@ -351,8 +353,8 @@ hash_index(const struct argo_ring_id *id) return (hash ^ (hash >> 15)) & (ARGO_HASHTABLE_SIZE - 1); } -static struct argo_ring_info * -find_ring_info(const struct domain *d, const struct argo_ring_id *id) +static struct argo_ring_info *find_ring_info(const struct domain *d, + const struct argo_ring_id *id) { struct argo_ring_info *ring_info; const struct list_head *bucket; @@ -362,28 +364,27 @@ find_ring_info(const struct domain *d, const struct argo_ring_id *id) /* List is not modified here. Search and return the match if found. */ bucket = &d->argo->ring_hash[hash_index(id)]; - list_for_each_entry(ring_info, bucket, node) + list_for_each_entry (ring_info, bucket, node) { const struct argo_ring_id *cmpid = &ring_info->id; - if ( cmpid->aport == id->aport && - cmpid->domain_id == id->domain_id && + if ( cmpid->aport == id->aport && cmpid->domain_id == id->domain_id && cmpid->partner_id == id->partner_id ) { - argo_dprintk("found ring_info for ring(%u:%x %u)\n", - id->domain_id, id->aport, id->partner_id); + argo_dprintk("found ring_info for ring(%u:%x %u)\n", id->domain_id, + id->aport, id->partner_id); return ring_info; } } - argo_dprintk("no ring_info for ring(%u:%x %u)\n", - id->domain_id, id->aport, id->partner_id); + argo_dprintk("no ring_info for ring(%u:%x %u)\n", id->domain_id, id->aport, + id->partner_id); return NULL; } -static struct argo_ring_info * -find_ring_info_by_match(const struct domain *d, xen_argo_port_t aport, - domid_t partner_id) +static struct argo_ring_info *find_ring_info_by_match(const struct domain *d, + xen_argo_port_t aport, + domid_t partner_id) { struct argo_ring_id id; struct argo_ring_info *ring_info; @@ -403,8 +404,8 @@ find_ring_info_by_match(const struct domain *d, xen_argo_port_t aport, return find_ring_info(d, &id); } -static struct argo_send_info * -find_send_info(const struct domain *d, const struct argo_ring_id *id) +static struct argo_send_info *find_send_info(const struct domain *d, + const struct argo_ring_id *id) { struct argo_send_info *send_info; const struct list_head *bucket; @@ -414,35 +415,32 @@ find_send_info(const struct domain *d, const struct argo_ring_id *id) /* List is not modified here. Search and return the match if found. */ bucket = &d->argo->send_hash[hash_index(id)]; - list_for_each_entry(send_info, bucket, node) + list_for_each_entry (send_info, bucket, node) { const struct argo_ring_id *cmpid = &send_info->id; - if ( cmpid->aport == id->aport && - cmpid->domain_id == id->domain_id && + if ( cmpid->aport == id->aport && cmpid->domain_id == id->domain_id && cmpid->partner_id == id->partner_id ) { - argo_dprintk("found send_info for ring(%u:%x %u)\n", - id->domain_id, id->aport, id->partner_id); + argo_dprintk("found send_info for ring(%u:%x %u)\n", id->domain_id, + id->aport, id->partner_id); return send_info; } } - argo_dprintk("no send_info for ring(%u:%x %u)\n", - id->domain_id, id->aport, id->partner_id); + argo_dprintk("no send_info for ring(%u:%x %u)\n", id->domain_id, id->aport, + id->partner_id); return NULL; } -static void -signal_domain(struct domain *d) +static void signal_domain(struct domain *d) { argo_dprintk("signalling domid:%u\n", d->domain_id); send_guest_global_virq(d, VIRQ_ARGO); } -static void -signal_domid(domid_t domain_id) +static void signal_domid(domid_t domain_id) { struct domain *d = get_domain_by_id(domain_id); @@ -453,8 +451,7 @@ signal_domid(domid_t domain_id) put_domain(d); } -static void -ring_unmap(const struct domain *d, struct argo_ring_info *ring_info) +static void ring_unmap(const struct domain *d, struct argo_ring_info *ring_info) { unsigned int i; @@ -471,7 +468,7 @@ ring_unmap(const struct domain *d, struct argo_ring_info *ring_info) continue; ASSERT(!mfn_eq(ring_info->mfns[i], INVALID_MFN)); - argo_dprintk(XENLOG_ERR "argo: unmapping page %"PRI_mfn" from %p\n", + argo_dprintk(XENLOG_ERR "argo: unmapping page %" PRI_mfn " from %p\n", mfn_x(ring_info->mfns[i]), ring_info->mfn_mapping[i]); unmap_domain_page_global(ring_info->mfn_mapping[i]); @@ -479,9 +476,9 @@ ring_unmap(const struct domain *d, struct argo_ring_info *ring_info) } } -static int -ring_map_page(const struct domain *d, struct argo_ring_info *ring_info, - unsigned int i, void **out_ptr) +static int ring_map_page(const struct domain *d, + struct argo_ring_info *ring_info, unsigned int i, + void **out_ptr) { ASSERT(LOCKING_L3(d, ring_info)); @@ -496,7 +493,7 @@ ring_map_page(const struct domain *d, struct argo_ring_info *ring_info, if ( i >= ring_info->nmfns ) { gprintk(XENLOG_ERR, - "argo: ring (vm%u:%x vm%u) %p attempted to map page %u of %u\n", + "argo: ring (vm%u:%x vm%u) %p attempted to map page %u of %u\n", ring_info->id.domain_id, ring_info->id.aport, ring_info->id.partner_id, ring_info, i, ring_info->nmfns); return -ENOMEM; @@ -515,13 +512,14 @@ ring_map_page(const struct domain *d, struct argo_ring_info *ring_info, ring_info->mfn_mapping[i] = map_domain_page_global(ring_info->mfns[i]); if ( !ring_info->mfn_mapping[i] ) { - gprintk(XENLOG_ERR, "argo: ring (vm%u:%x vm%u) %p attempted to map " + gprintk(XENLOG_ERR, + "argo: ring (vm%u:%x vm%u) %p attempted to map " "page %u of %u\n", ring_info->id.domain_id, ring_info->id.aport, ring_info->id.partner_id, ring_info, i, ring_info->nmfns); return -ENOMEM; } - argo_dprintk("mapping page %"PRI_mfn" to %p\n", + argo_dprintk("mapping page %" PRI_mfn " to %p\n", mfn_x(ring_info->mfns[i]), ring_info->mfn_mapping[i]); } @@ -531,9 +529,8 @@ ring_map_page(const struct domain *d, struct argo_ring_info *ring_info, return 0; } -static void -update_tx_ptr(const struct domain *d, struct argo_ring_info *ring_info, - uint32_t tx_ptr) +static void update_tx_ptr(const struct domain *d, + struct argo_ring_info *ring_info, uint32_t tx_ptr) { xen_argo_ring_t *ringp; @@ -547,11 +544,11 @@ update_tx_ptr(const struct domain *d, struct argo_ring_info *ring_info, smp_wmb(); } -static int -memcpy_to_guest_ring(const struct domain *d, struct argo_ring_info *ring_info, - unsigned int offset, - const void *src, XEN_GUEST_HANDLE(uint8) src_hnd, - unsigned int len) +static int memcpy_to_guest_ring(const struct domain *d, + struct argo_ring_info *ring_info, + unsigned int offset, const void *src, + XEN_GUEST_HANDLE(uint8) src_hnd, + unsigned int len) { unsigned int mfns_index = offset >> PAGE_SHIFT; void *dst; @@ -567,8 +564,8 @@ memcpy_to_guest_ring(const struct domain *d, struct argo_ring_info *ring_info, while ( len ) { - unsigned int head_len = (offset + len) > PAGE_SIZE ? PAGE_SIZE - offset - : len; + unsigned int head_len = + (offset + len) > PAGE_SIZE ? PAGE_SIZE - offset : len; ret = ring_map_page(d, ring_info, mfns_index, &dst); if ( ret ) @@ -599,9 +596,8 @@ memcpy_to_guest_ring(const struct domain *d, struct argo_ring_info *ring_info, * Use this with caution: rx_ptr is under guest control and may be bogus. * See get_sanitized_ring for a safer alternative. */ -static int -get_rx_ptr(const struct domain *d, struct argo_ring_info *ring_info, - uint32_t *rx_ptr) +static int get_rx_ptr(const struct domain *d, struct argo_ring_info *ring_info, + uint32_t *rx_ptr) { void *src; xen_argo_ring_t *ringp; @@ -629,9 +625,8 @@ get_rx_ptr(const struct domain *d, struct argo_ring_info *ring_info, * wrap is handled. Simplifies safe use of the rx_ptr for available * space calculation. */ -static int -get_sanitized_ring(const struct domain *d, xen_argo_ring_t *ring, - struct argo_ring_info *ring_info) +static int get_sanitized_ring(const struct domain *d, xen_argo_ring_t *ring, + struct argo_ring_info *ring_info) { uint32_t rx_ptr; int ret; @@ -653,8 +648,8 @@ get_sanitized_ring(const struct domain *d, xen_argo_ring_t *ring, return 0; } -static unsigned int -ringbuf_payload_space(const struct domain *d, struct argo_ring_info *ring_info) +static unsigned int ringbuf_payload_space(const struct domain *d, + struct argo_ring_info *ring_info) { xen_argo_ring_t ring; unsigned int len; @@ -718,9 +713,8 @@ ringbuf_payload_space(const struct domain *d, struct argo_ring_info *ring_info) * potential for a negative return value to be used incorrectly * (eg. coerced into an unsigned variable resulting in a large incorrect value) */ -static int -iov_count(const xen_argo_iov_t *piov, unsigned int niov, - unsigned int *count) +static int iov_count(const xen_argo_iov_t *piov, unsigned int niov, + unsigned int *count) { unsigned int sum_iov_lens = 0; @@ -739,8 +733,8 @@ iov_count(const xen_argo_iov_t *piov, unsigned int niov, /* check each to protect sum against integer overflow */ if ( piov->iov_len > MAX_ARGO_MESSAGE_SIZE ) { - argo_dprintk("invalid iov_len: too big (%u)>%llu\n", - piov->iov_len, MAX_ARGO_MESSAGE_SIZE); + argo_dprintk("invalid iov_len: too big (%u)>%llu\n", piov->iov_len, + MAX_ARGO_MESSAGE_SIZE); return -EINVAL; } @@ -762,18 +756,18 @@ iov_count(const xen_argo_iov_t *piov, unsigned int niov, return 0; } -static int -ringbuf_insert(const struct domain *d, struct argo_ring_info *ring_info, - const struct argo_ring_id *src_id, xen_argo_iov_t *iovs, - unsigned int niov, uint32_t message_type, - unsigned long *out_len) +static int ringbuf_insert(const struct domain *d, + struct argo_ring_info *ring_info, + const struct argo_ring_id *src_id, + xen_argo_iov_t *iovs, unsigned int niov, + uint32_t message_type, unsigned long *out_len) { xen_argo_ring_t ring; - struct xen_argo_ring_message_header mh = { }; + struct xen_argo_ring_message_header mh = {}; int sp, ret; unsigned int len = 0; xen_argo_iov_t *piov; - XEN_GUEST_HANDLE(uint8) NULL_hnd = { }; + XEN_GUEST_HANDLE(uint8) NULL_hnd = {}; ASSERT(LOCKING_L3(d, ring_info)); @@ -793,8 +787,8 @@ ringbuf_insert(const struct domain *d, struct argo_ring_info *ring_info, * remaining so we can distinguish a full ring from an empty one. * iov_count has already verified: len <= MAX_ARGO_MESSAGE_SIZE. */ - if ( ring_info->len <= (sizeof(struct xen_argo_ring_message_header) + - ROUNDUP_MESSAGE(len)) ) + if ( ring_info->len <= + (sizeof(struct xen_argo_ring_message_header) + ROUNDUP_MESSAGE(len)) ) return -EMSGSIZE; ret = get_sanitized_ring(d, &ring, ring_info); @@ -818,8 +812,8 @@ ringbuf_insert(const struct domain *d, struct argo_ring_info *ring_info, * Size bounds check against currently available space in the ring. * Again: the message must not fill the ring leaving no space remaining. */ - if ( (ROUNDUP_MESSAGE(len) + - sizeof(struct xen_argo_ring_message_header)) >= sp ) + if ( (ROUNDUP_MESSAGE(len) + sizeof(struct xen_argo_ring_message_header)) >= + sp ) { argo_dprintk("EAGAIN\n"); return -EAGAIN; @@ -834,8 +828,8 @@ ringbuf_insert(const struct domain *d, struct argo_ring_info *ring_info, * For this copy to the guest ring, tx_ptr is always 16-byte aligned * and the message header is 16 bytes long. */ - BUILD_BUG_ON( - sizeof(struct xen_argo_ring_message_header) != ROUNDUP_MESSAGE(1)); + BUILD_BUG_ON(sizeof(struct xen_argo_ring_message_header) != + ROUNDUP_MESSAGE(1)); /* * First data write into the destination ring: fixed size, message header. @@ -843,8 +837,8 @@ ringbuf_insert(const struct domain *d, struct argo_ring_info *ring_info, * is checked above and must be at least this size. */ ret = memcpy_to_guest_ring(d, ring_info, - ring.tx_ptr + sizeof(xen_argo_ring_t), - &mh, NULL_hnd, sizeof(mh)); + ring.tx_ptr + sizeof(xen_argo_ring_t), &mh, + NULL_hnd, sizeof(mh)); if ( ret ) { gprintk(XENLOG_ERR, @@ -878,9 +872,8 @@ ringbuf_insert(const struct domain *d, struct argo_ring_info *ring_info, if ( unlikely(!guest_handle_okay(buf_hnd, iov_len)) ) { gprintk(XENLOG_ERR, - "argo: bad iov handle [%p, %u] (vm%u:%x vm%u)\n", - buf_hnd.p, iov_len, - ring_info->id.domain_id, ring_info->id.aport, + "argo: bad iov handle [%p, %u] (vm%u:%x vm%u)\n", buf_hnd.p, + iov_len, ring_info->id.domain_id, ring_info->id.aport, ring_info->id.partner_id); return -EFAULT; @@ -906,9 +899,8 @@ ringbuf_insert(const struct domain *d, struct argo_ring_info *ring_info, { gprintk(XENLOG_ERR, "argo: failed to copy {%p, %d} (vm%u:%x vm%u)\n", - buf_hnd.p, sp, - ring_info->id.domain_id, ring_info->id.aport, - ring_info->id.partner_id); + buf_hnd.p, sp, ring_info->id.domain_id, + ring_info->id.aport, ring_info->id.partner_id); return ret; } @@ -944,14 +936,14 @@ ringbuf_insert(const struct domain *d, struct argo_ring_info *ring_info, * so this write cannot overrun. */ ret = memcpy_to_guest_ring(d, ring_info, - ring.tx_ptr + sizeof(xen_argo_ring_t), - NULL, buf_hnd, iov_len); + ring.tx_ptr + sizeof(xen_argo_ring_t), NULL, + buf_hnd, iov_len); if ( ret ) { gprintk(XENLOG_ERR, - "argo: failed to copy [%p, %u] (vm%u:%x vm%u)\n", - buf_hnd.p, iov_len, ring_info->id.domain_id, - ring_info->id.aport, ring_info->id.partner_id); + "argo: failed to copy [%p, %u] (vm%u:%x vm%u)\n", buf_hnd.p, + iov_len, ring_info->id.domain_id, ring_info->id.aport, + ring_info->id.partner_id); return ret; } @@ -988,8 +980,8 @@ ringbuf_insert(const struct domain *d, struct argo_ring_info *ring_info, return ret; } -static void -wildcard_pending_list_remove(domid_t domain_id, struct pending_ent *ent) +static void wildcard_pending_list_remove(domid_t domain_id, + struct pending_ent *ent) { struct domain *d = get_domain_by_id(domain_id); @@ -1007,8 +999,8 @@ wildcard_pending_list_remove(domid_t domain_id, struct pending_ent *ent) put_domain(d); } -static void -wildcard_pending_list_insert(domid_t domain_id, struct pending_ent *ent) +static void wildcard_pending_list_insert(domid_t domain_id, + struct pending_ent *ent) { struct domain *d = get_domain_by_id(domain_id); @@ -1026,8 +1018,8 @@ wildcard_pending_list_insert(domid_t domain_id, struct pending_ent *ent) put_domain(d); } -static void -pending_remove_all(const struct domain *d, struct argo_ring_info *ring_info) +static void pending_remove_all(const struct domain *d, + struct argo_ring_info *ring_info) { struct pending_ent *ent; @@ -1046,16 +1038,15 @@ pending_remove_all(const struct domain *d, struct argo_ring_info *ring_info) ring_info->npending = 0; } -static void -pending_notify(struct list_head *to_notify) +static void pending_notify(struct list_head *to_notify) { struct pending_ent *ent; ASSERT(LOCKING_Read_L1); /* Sending signals for all ents in this list, draining until it is empty. */ - while ( (ent = list_first_entry_or_null(to_notify, struct pending_ent, - node)) ) + while ( + (ent = list_first_entry_or_null(to_notify, struct pending_ent, node)) ) { list_del(&ent->node); signal_domid(ent->domain_id); @@ -1063,9 +1054,10 @@ pending_notify(struct list_head *to_notify) } } -static void -pending_find(const struct domain *d, struct argo_ring_info *ring_info, - unsigned int payload_space, struct list_head *to_notify) +static void pending_find(const struct domain *d, + struct argo_ring_info *ring_info, + unsigned int payload_space, + struct list_head *to_notify) { struct pending_ent *ent, *next; @@ -1104,9 +1096,9 @@ pending_find(const struct domain *d, struct argo_ring_info *ring_info, spin_unlock(&ring_info->L3_lock); } -static int -pending_queue(const struct domain *d, struct argo_ring_info *ring_info, - domid_t src_id, unsigned int len) +static int pending_queue(const struct domain *d, + struct argo_ring_info *ring_info, domid_t src_id, + unsigned int len) { struct pending_ent *ent; @@ -1131,16 +1123,16 @@ pending_queue(const struct domain *d, struct argo_ring_info *ring_info, return 0; } -static int -pending_requeue(const struct domain *d, struct argo_ring_info *ring_info, - domid_t src_id, unsigned int len) +static int pending_requeue(const struct domain *d, + struct argo_ring_info *ring_info, domid_t src_id, + unsigned int len) { struct pending_ent *ent; ASSERT(LOCKING_L3(d, ring_info)); /* List structure is not modified here. Update len in a match if found. */ - list_for_each_entry(ent, &ring_info->pending, node) + list_for_each_entry (ent, &ring_info->pending, node) { if ( ent->domain_id == src_id ) { @@ -1164,9 +1156,8 @@ pending_requeue(const struct domain *d, struct argo_ring_info *ring_info, return pending_queue(d, ring_info, src_id, len); } -static void -pending_cancel(const struct domain *d, struct argo_ring_info *ring_info, - domid_t src_id) +static void pending_cancel(const struct domain *d, + struct argo_ring_info *ring_info, domid_t src_id) { struct pending_ent *ent, *next; @@ -1187,8 +1178,7 @@ pending_cancel(const struct domain *d, struct argo_ring_info *ring_info, } } -static void -wildcard_rings_pending_remove(struct domain *d) +static void wildcard_rings_pending_remove(struct domain *d) { struct pending_ent *ent; @@ -1211,8 +1201,8 @@ wildcard_rings_pending_remove(struct domain *d) } } -static void -ring_remove_mfns(const struct domain *d, struct argo_ring_info *ring_info) +static void ring_remove_mfns(const struct domain *d, + struct argo_ring_info *ring_info) { unsigned int i; @@ -1238,8 +1228,8 @@ ring_remove_mfns(const struct domain *d, struct argo_ring_info *ring_info) XFREE(ring_info->mfn_mapping); } -static void -ring_remove_info(const struct domain *d, struct argo_ring_info *ring_info) +static void ring_remove_info(const struct domain *d, + struct argo_ring_info *ring_info) { ASSERT(LOCKING_Write_rings_L2(d)); @@ -1249,8 +1239,7 @@ ring_remove_info(const struct domain *d, struct argo_ring_info *ring_info) xfree(ring_info); } -static void -domain_rings_remove_all(struct domain *d) +static void domain_rings_remove_all(struct domain *d) { unsigned int i; @@ -1261,9 +1250,8 @@ domain_rings_remove_all(struct domain *d) struct argo_ring_info *ring_info; struct list_head *bucket = &d->argo->ring_hash[i]; - while ( (ring_info = list_first_entry_or_null(bucket, - struct argo_ring_info, - node)) ) + while ( (ring_info = list_first_entry_or_null( + bucket, struct argo_ring_info, node)) ) ring_remove_info(d, ring_info); } d->argo->ring_count = 0; @@ -1274,8 +1262,7 @@ domain_rings_remove_all(struct domain *d) * (ie. it is the single domain that can send to those rings.) * This will also cancel any pending notifications about those rings. */ -static void -partner_rings_remove(struct domain *src_d) +static void partner_rings_remove(struct domain *src_d) { unsigned int i; @@ -1287,9 +1274,8 @@ partner_rings_remove(struct domain *src_d) struct list_head *bucket = &src_d->argo->send_hash[i]; /* Remove all ents from the send list. Take each off their ring list. */ - while ( (send_info = list_first_entry_or_null(bucket, - struct argo_send_info, - node)) ) + while ( (send_info = list_first_entry_or_null( + bucket, struct argo_send_info, node)) ) { struct domain *dst_d = get_domain_by_id(send_info->id.domain_id); @@ -1318,9 +1304,9 @@ partner_rings_remove(struct domain *src_d) } } -static int -fill_ring_data(const struct domain *currd, - XEN_GUEST_HANDLE(xen_argo_ring_data_ent_t) data_ent_hnd) +static int fill_ring_data(const struct domain *currd, + XEN_GUEST_HANDLE(xen_argo_ring_data_ent_t) + data_ent_hnd) { xen_argo_ring_data_ent_t ent; struct domain *dst_d; @@ -1355,8 +1341,8 @@ fill_ring_data(const struct domain *currd, read_lock(&dst_d->argo->rings_L2_rwlock); - ring_info = find_ring_info_by_match(dst_d, ent.ring.aport, - currd->domain_id); + ring_info = + find_ring_info_by_match(dst_d, ent.ring.aport, currd->domain_id); if ( ring_info ) { unsigned int space_avail; @@ -1366,8 +1352,8 @@ fill_ring_data(const struct domain *currd, spin_lock(&ring_info->L3_lock); ent.max_message_size = ring_info->len - - sizeof(struct xen_argo_ring_message_header) - - ROUNDUP_MESSAGE(1); + sizeof(struct xen_argo_ring_message_header) - + ROUNDUP_MESSAGE(1); if ( ring_info->id.partner_id == XEN_ARGO_DOMID_ANY ) ent.flags |= XEN_ARGO_RING_SHARED; @@ -1407,11 +1393,10 @@ fill_ring_data(const struct domain *currd, if ( space_avail == ent.max_message_size ) ent.flags |= XEN_ARGO_RING_EMPTY; - } read_unlock(&dst_d->argo->rings_L2_rwlock); - out: +out: if ( dst_d ) put_domain(dst_d); @@ -1422,8 +1407,7 @@ fill_ring_data(const struct domain *currd, return ret; } -static int -find_ring_mfn(struct domain *d, gfn_t gfn, mfn_t *mfn) +static int find_ring_mfn(struct domain *d, gfn_t gfn, mfn_t *mfn) { struct page_info *page; p2m_type_t p2mt; @@ -1449,11 +1433,10 @@ find_ring_mfn(struct domain *d, gfn_t gfn, mfn_t *mfn) return ret; } -static int -find_ring_mfns(struct domain *d, struct argo_ring_info *ring_info, - const unsigned int npage, - XEN_GUEST_HANDLE_PARAM(xen_argo_gfn_t) gfn_hnd, - const unsigned int len) +static int find_ring_mfns(struct domain *d, struct argo_ring_info *ring_info, + const unsigned int npage, + XEN_GUEST_HANDLE_PARAM(xen_argo_gfn_t) gfn_hnd, + const unsigned int len) { unsigned int i; int ret = 0; @@ -1465,10 +1448,11 @@ find_ring_mfns(struct domain *d, struct argo_ring_info *ring_info, if ( ring_info->mfns ) { /* Ring already existed: drop the previous mapping. */ - gprintk(XENLOG_INFO, "argo: vm%u re-register existing ring " + gprintk(XENLOG_INFO, + "argo: vm%u re-register existing ring " "(vm%u:%x vm%u) clears mapping\n", - d->domain_id, ring_info->id.domain_id, - ring_info->id.aport, ring_info->id.partner_id); + d->domain_id, ring_info->id.domain_id, ring_info->id.aport, + ring_info->id.partner_id); ring_remove_mfns(d, ring_info); ASSERT(!ring_info->mfns); @@ -1503,7 +1487,8 @@ find_ring_mfns(struct domain *d, struct argo_ring_info *ring_info, ret = find_ring_mfn(d, _gfn(argo_gfn), &mfn); if ( ret ) { - gprintk(XENLOG_ERR, "argo: vm%u: invalid gfn %"PRI_gfn" " + gprintk(XENLOG_ERR, + "argo: vm%u: invalid gfn %" PRI_gfn " " "r:(vm%u:%x vm%u) %p %u/%u\n", d->domain_id, gfn_x(_gfn(argo_gfn)), ring_info->id.domain_id, ring_info->id.aport, @@ -1513,8 +1498,8 @@ find_ring_mfns(struct domain *d, struct argo_ring_info *ring_info, ring_info->mfns[i] = mfn; - argo_dprintk("%u: %"PRI_gfn" -> %"PRI_mfn"\n", - i, gfn_x(_gfn(argo_gfn)), mfn_x(ring_info->mfns[i])); + argo_dprintk("%u: %" PRI_gfn " -> %" PRI_mfn "\n", i, + gfn_x(_gfn(argo_gfn)), mfn_x(ring_info->mfns[i])); } ring_info->nmfns = i; @@ -1525,19 +1510,20 @@ find_ring_mfns(struct domain *d, struct argo_ring_info *ring_info, { ASSERT(ring_info->nmfns == NPAGES_RING(len)); - gprintk(XENLOG_DEBUG, "argo: vm%u ring (vm%u:%x vm%u) %p " + gprintk(XENLOG_DEBUG, + "argo: vm%u ring (vm%u:%x vm%u) %p " "mfn_mapping %p len %u nmfns %u\n", - d->domain_id, ring_info->id.domain_id, - ring_info->id.aport, ring_info->id.partner_id, ring_info, - ring_info->mfn_mapping, ring_info->len, ring_info->nmfns); + d->domain_id, ring_info->id.domain_id, ring_info->id.aport, + ring_info->id.partner_id, ring_info, ring_info->mfn_mapping, + ring_info->len, ring_info->nmfns); } return ret; } -static long -unregister_ring(struct domain *currd, - XEN_GUEST_HANDLE_PARAM(xen_argo_unregister_ring_t) unreg_hnd) +static long unregister_ring(struct domain *currd, + XEN_GUEST_HANDLE_PARAM(xen_argo_unregister_ring_t) + unreg_hnd) { xen_argo_unregister_ring_t unreg; struct argo_ring_id ring_id; @@ -1594,7 +1580,7 @@ unregister_ring(struct domain *currd, spin_unlock(&dst_d->argo->send_L2_lock); - out: +out: write_unlock(&currd->argo->rings_L2_rwlock); read_unlock(&L1_global_argo_rwlock); @@ -1614,11 +1600,11 @@ unregister_ring(struct domain *currd, return 0; } -static long -register_ring(struct domain *currd, - XEN_GUEST_HANDLE_PARAM(xen_argo_register_ring_t) reg_hnd, - XEN_GUEST_HANDLE_PARAM(xen_argo_gfn_t) gfn_hnd, - unsigned int npage, unsigned int flags) +static long register_ring(struct domain *currd, + XEN_GUEST_HANDLE_PARAM(xen_argo_register_ring_t) + reg_hnd, + XEN_GUEST_HANDLE_PARAM(xen_argo_gfn_t) gfn_hnd, + unsigned int npage, unsigned int flags) { xen_argo_register_ring_t reg; struct argo_ring_id ring_id; @@ -1650,12 +1636,11 @@ register_ring(struct domain *currd, * message is present. * The above determines the minimum acceptable ring size. */ - if ( (reg.len < (sizeof(struct xen_argo_ring_message_header) - + ROUNDUP_MESSAGE(1) + ROUNDUP_MESSAGE(1))) || + if ( (reg.len < (sizeof(struct xen_argo_ring_message_header) + + ROUNDUP_MESSAGE(1) + ROUNDUP_MESSAGE(1))) || (reg.len > XEN_ARGO_MAX_RING_SIZE) || (reg.len != ROUNDUP_MESSAGE(reg.len)) || - (NPAGES_RING(reg.len) != npage) || - (reg.pad != 0) ) + (NPAGES_RING(reg.len) != npage) || (reg.pad != 0) ) return -EINVAL; ring_id.partner_id = reg.partner_id; @@ -1664,8 +1649,8 @@ register_ring(struct domain *currd, if ( reg.partner_id == XEN_ARGO_DOMID_ANY ) { - ret = opt_argo_mac_permissive ? xsm_argo_register_any_source(currd) : - -EPERM; + ret = opt_argo_mac_permissive ? xsm_argo_register_any_source(currd) + : -EPERM; if ( ret ) return ret; } @@ -1751,7 +1736,8 @@ register_ring(struct domain *currd, */ if ( flags & XEN_ARGO_REGISTER_FLAG_FAIL_EXIST ) { - gprintk(XENLOG_ERR, "argo: vm%u disallowed reregistration of " + gprintk(XENLOG_ERR, + "argo: vm%u disallowed reregistration of " "existing ring (vm%u:%x vm%u)\n", currd->domain_id, ring_id.domain_id, ring_id.aport, ring_id.partner_id); @@ -1767,7 +1753,8 @@ register_ring(struct domain *currd, * Simple blunt solution: disallow ring resize for now. * TODO: investigate enabling ring resize. */ - gprintk(XENLOG_ERR, "argo: vm%u attempted to change ring size " + gprintk(XENLOG_ERR, + "argo: vm%u attempted to change ring size " "(vm%u:%x vm%u)\n", currd->domain_id, ring_id.domain_id, ring_id.aport, ring_id.partner_id); @@ -1846,13 +1833,13 @@ register_ring(struct domain *currd, spin_unlock(&dst_d->argo->send_L2_lock); } - out_unlock2: +out_unlock2: write_unlock(&currd->argo->rings_L2_rwlock); - out_unlock: +out_unlock: read_unlock(&L1_global_argo_rwlock); - out: +out: if ( dst_d ) put_domain(dst_d); @@ -1864,9 +1851,9 @@ register_ring(struct domain *currd, return ret; } -static void -notify_ring(const struct domain *d, struct argo_ring_info *ring_info, - struct list_head *to_notify) +static void notify_ring(const struct domain *d, + struct argo_ring_info *ring_info, + struct list_head *to_notify) { unsigned int space; @@ -1885,8 +1872,7 @@ notify_ring(const struct domain *d, struct argo_ring_info *ring_info, pending_find(d, ring_info, space, to_notify); } -static void -notify_check_pending(struct domain *d) +static void notify_check_pending(struct domain *d) { unsigned int i; LIST_HEAD(to_notify); @@ -1911,9 +1897,8 @@ notify_check_pending(struct domain *d) pending_notify(&to_notify); } -static long -notify(struct domain *currd, - XEN_GUEST_HANDLE_PARAM(xen_argo_ring_data_t) ring_data_hnd) +static long notify(struct domain *currd, + XEN_GUEST_HANDLE_PARAM(xen_argo_ring_data_t) ring_data_hnd) { XEN_GUEST_HANDLE(xen_argo_ring_data_ent_t) ent_hnd; xen_argo_ring_data_t ring_data; @@ -1947,8 +1932,8 @@ notify(struct domain *currd, goto out; } - ent_hnd = guest_handle_for_field(ring_data_hnd, - xen_argo_ring_data_ent_t, data[0]); + ent_hnd = guest_handle_for_field(ring_data_hnd, xen_argo_ring_data_ent_t, + data[0]); if ( unlikely(!guest_handle_okay(ent_hnd, ring_data.nent)) ) { ret = -EFAULT; @@ -1961,16 +1946,15 @@ notify(struct domain *currd, guest_handle_add_offset(ent_hnd, 1); } - out: +out: read_unlock(&L1_global_argo_rwlock); return ret; } -static long -sendv(struct domain *src_d, xen_argo_addr_t *src_addr, - const xen_argo_addr_t *dst_addr, xen_argo_iov_t *iovs, unsigned int niov, - uint32_t message_type) +static long sendv(struct domain *src_d, xen_argo_addr_t *src_addr, + const xen_argo_addr_t *dst_addr, xen_argo_iov_t *iovs, + unsigned int niov, uint32_t message_type) { struct domain *dst_d = NULL; struct argo_ring_id src_id; @@ -1987,7 +1971,7 @@ sendv(struct domain *src_d, xen_argo_addr_t *src_addr, return -EINVAL; if ( src_addr->domain_id == XEN_ARGO_DOMID_ANY ) - src_addr->domain_id = src_d->domain_id; + src_addr->domain_id = src_d->domain_id; /* No domain is currently authorized to send on behalf of another */ if ( unlikely(src_addr->domain_id != src_d->domain_id) ) @@ -2004,8 +1988,8 @@ sendv(struct domain *src_d, xen_argo_addr_t *src_addr, ret = xsm_argo_send(src_d, dst_d); if ( ret ) { - gprintk(XENLOG_ERR, "argo: XSM REJECTED %i -> %i\n", - src_d->domain_id, dst_d->domain_id); + gprintk(XENLOG_ERR, "argo: XSM REJECTED %i -> %i\n", src_d->domain_id, + dst_d->domain_id); put_domain(dst_d); @@ -2029,8 +2013,8 @@ sendv(struct domain *src_d, xen_argo_addr_t *src_addr, read_lock(&dst_d->argo->rings_L2_rwlock); - ring_info = find_ring_info_by_match(dst_d, dst_addr->aport, - src_id.domain_id); + ring_info = + find_ring_info_by_match(dst_d, dst_addr->aport, src_id.domain_id); if ( !ring_info ) { gprintk(XENLOG_ERR, @@ -2058,7 +2042,7 @@ sendv(struct domain *src_d, xen_argo_addr_t *src_addr, read_unlock(&dst_d->argo->rings_L2_rwlock); - out_unlock: +out_unlock: read_unlock(&L1_global_argo_rwlock); if ( ret >= 0 ) @@ -2067,20 +2051,19 @@ sendv(struct domain *src_d, xen_argo_addr_t *src_addr, if ( dst_d ) put_domain(dst_d); - return ( ret < 0 ) ? ret : len; + return (ret < 0) ? ret : len; } -long -do_argo_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg1, - XEN_GUEST_HANDLE_PARAM(void) arg2, unsigned long raw_arg3, - unsigned long raw_arg4) +long do_argo_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg1, + XEN_GUEST_HANDLE_PARAM(void) arg2, unsigned long raw_arg3, + unsigned long raw_arg4) { struct domain *currd = current->domain; long rc; unsigned int arg3 = raw_arg3, arg4 = raw_arg4; - argo_dprintk("->do_argo_op(%u,%p,%p,%lu,0x%lx)\n", cmd, - (void *)arg1.p, (void *)arg2.p, raw_arg3, raw_arg4); + argo_dprintk("->do_argo_op(%u,%p,%p,%lu,0x%lx)\n", cmd, (void *)arg1.p, + (void *)arg2.p, raw_arg3, raw_arg4); /* Reject numeric hypercall args outside 32-bit range */ if ( (arg3 != raw_arg3) || (arg4 != raw_arg4) ) @@ -2093,14 +2076,14 @@ do_argo_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg1, if ( rc ) return rc; - switch ( cmd ) + switch (cmd) { case XEN_ARGO_OP_register_ring: { - XEN_GUEST_HANDLE_PARAM(xen_argo_register_ring_t) reg_hnd = - guest_handle_cast(arg1, xen_argo_register_ring_t); - XEN_GUEST_HANDLE_PARAM(xen_argo_gfn_t) gfn_hnd = - guest_handle_cast(arg2, xen_argo_gfn_t); + XEN_GUEST_HANDLE_PARAM(xen_argo_register_ring_t) + reg_hnd = guest_handle_cast(arg1, xen_argo_register_ring_t); + XEN_GUEST_HANDLE_PARAM(xen_argo_gfn_t) + gfn_hnd = guest_handle_cast(arg2, xen_argo_gfn_t); /* arg3: npage, arg4: flags */ BUILD_BUG_ON(!IS_ALIGNED(XEN_ARGO_MAX_RING_SIZE, PAGE_SIZE)); @@ -2124,8 +2107,8 @@ do_argo_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg1, case XEN_ARGO_OP_unregister_ring: { - XEN_GUEST_HANDLE_PARAM(xen_argo_unregister_ring_t) unreg_hnd = - guest_handle_cast(arg1, xen_argo_unregister_ring_t); + XEN_GUEST_HANDLE_PARAM(xen_argo_unregister_ring_t) + unreg_hnd = guest_handle_cast(arg1, xen_argo_unregister_ring_t); if ( unlikely((!guest_handle_is_null(arg2)) || arg3 || arg4) ) { @@ -2143,10 +2126,10 @@ do_argo_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg1, xen_argo_iov_t iovs[XEN_ARGO_MAXIOV]; unsigned int niov; - XEN_GUEST_HANDLE_PARAM(xen_argo_send_addr_t) send_addr_hnd = - guest_handle_cast(arg1, xen_argo_send_addr_t); - XEN_GUEST_HANDLE_PARAM(xen_argo_iov_t) iovs_hnd = - guest_handle_cast(arg2, xen_argo_iov_t); + XEN_GUEST_HANDLE_PARAM(xen_argo_send_addr_t) + send_addr_hnd = guest_handle_cast(arg1, xen_argo_send_addr_t); + XEN_GUEST_HANDLE_PARAM(xen_argo_iov_t) + iovs_hnd = guest_handle_cast(arg2, xen_argo_iov_t); /* arg3 is niov */ /* arg4 is message_type. Must be a 32-bit value. */ @@ -2184,8 +2167,8 @@ do_argo_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg1, case XEN_ARGO_OP_notify: { - XEN_GUEST_HANDLE_PARAM(xen_argo_ring_data_t) ring_data_hnd = - guest_handle_cast(arg1, xen_argo_ring_data_t); + XEN_GUEST_HANDLE_PARAM(xen_argo_ring_data_t) + ring_data_hnd = guest_handle_cast(arg1, xen_argo_ring_data_t); if ( unlikely((!guest_handle_is_null(arg2)) || arg3 || arg4) ) { @@ -2208,10 +2191,9 @@ do_argo_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg1, } #ifdef CONFIG_COMPAT -long -compat_argo_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg1, - XEN_GUEST_HANDLE_PARAM(void) arg2, unsigned long arg3, - unsigned long arg4) +long compat_argo_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg1, + XEN_GUEST_HANDLE_PARAM(void) arg2, unsigned long arg3, + unsigned long arg4) { struct domain *currd = current->domain; long rc; @@ -2235,8 +2217,8 @@ compat_argo_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg1, if ( rc ) return rc; - argo_dprintk("->compat_argo_op(%u,%p,%p,%lu,0x%lx)\n", cmd, - (void *)arg1.p, (void *)arg2.p, arg3, arg4); + argo_dprintk("->compat_argo_op(%u,%p,%p,%lu,0x%lx)\n", cmd, (void *)arg1.p, + (void *)arg2.p, arg3, arg4); send_addr_hnd = guest_handle_cast(arg1, xen_argo_send_addr_t); /* arg2: iovs, arg3: niov, arg4: message_type */ @@ -2267,15 +2249,14 @@ compat_argo_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg1, } rc = sendv(currd, &send_addr.src, &send_addr.dst, iovs, niov, arg4); - out: +out: argo_dprintk("<-compat_argo_op(%u)=%ld\n", cmd, rc); return rc; } #endif -static void -argo_domain_init(struct argo_domain *argo) +static void argo_domain_init(struct argo_domain *argo) { unsigned int i; @@ -2291,8 +2272,7 @@ argo_domain_init(struct argo_domain *argo) INIT_LIST_HEAD(&argo->wildcard_pend_list); } -int -argo_init(struct domain *d) +int argo_init(struct domain *d) { struct argo_domain *argo; @@ -2319,8 +2299,7 @@ argo_init(struct domain *d) return 0; } -void -argo_destroy(struct domain *d) +void argo_destroy(struct domain *d) { BUG_ON(!d->is_dying); @@ -2339,8 +2318,7 @@ argo_destroy(struct domain *d) write_unlock(&L1_global_argo_rwlock); } -void -argo_soft_reset(struct domain *d) +void argo_soft_reset(struct domain *d) { write_lock(&L1_global_argo_rwlock); diff --git a/xen/common/bitmap.c b/xen/common/bitmap.c index 34de387880..cf6b7f31f3 100644 --- a/xen/common/bitmap.c +++ b/xen/common/bitmap.c @@ -46,66 +46,66 @@ */ static void clamp_last_byte(uint8_t *bp, unsigned int nbits) { - unsigned int remainder = nbits % 8; + unsigned int remainder = nbits % 8; - if (remainder) - bp[nbits/8] &= (1U << remainder) - 1; + if ( remainder ) + bp[nbits / 8] &= (1U << remainder) - 1; } int __bitmap_empty(const unsigned long *bitmap, int bits) { - int k, lim = bits/BITS_PER_LONG; - for (k = 0; k < lim; ++k) - if (bitmap[k]) - return 0; + int k, lim = bits / BITS_PER_LONG; + for ( k = 0; k < lim; ++k ) + if ( bitmap[k] ) + return 0; - if (bits % BITS_PER_LONG) - if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) - return 0; + if ( bits % BITS_PER_LONG ) + if ( bitmap[k] & BITMAP_LAST_WORD_MASK(bits) ) + return 0; - return 1; + return 1; } EXPORT_SYMBOL(__bitmap_empty); int __bitmap_full(const unsigned long *bitmap, int bits) { - int k, lim = bits/BITS_PER_LONG; - for (k = 0; k < lim; ++k) - if (~bitmap[k]) - return 0; + int k, lim = bits / BITS_PER_LONG; + for ( k = 0; k < lim; ++k ) + if ( ~bitmap[k] ) + return 0; - if (bits % BITS_PER_LONG) - if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) - return 0; + if ( bits % BITS_PER_LONG ) + if ( ~bitmap[k] & BITMAP_LAST_WORD_MASK(bits) ) + return 0; - return 1; + return 1; } EXPORT_SYMBOL(__bitmap_full); -int __bitmap_equal(const unsigned long *bitmap1, - const unsigned long *bitmap2, int bits) +int __bitmap_equal(const unsigned long *bitmap1, const unsigned long *bitmap2, + int bits) { - int k, lim = bits/BITS_PER_LONG; - for (k = 0; k < lim; ++k) - if (bitmap1[k] != bitmap2[k]) - return 0; + int k, lim = bits / BITS_PER_LONG; + for ( k = 0; k < lim; ++k ) + if ( bitmap1[k] != bitmap2[k] ) + return 0; - if (bits % BITS_PER_LONG) - if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) - return 0; + if ( bits % BITS_PER_LONG ) + if ( (bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits) ) + return 0; - return 1; + return 1; } EXPORT_SYMBOL(__bitmap_equal); void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits) { - int k, lim = bits/BITS_PER_LONG; - for (k = 0; k < lim; ++k) - dst[k] = ~src[k]; + int k, lim = bits / BITS_PER_LONG; + for ( k = 0; k < lim; ++k ) + dst[k] = ~src[k]; - if (bits % BITS_PER_LONG) - dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits); + if ( bits % BITS_PER_LONG ) + dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits); } EXPORT_SYMBOL(__bitmap_complement); @@ -120,41 +120,41 @@ EXPORT_SYMBOL(__bitmap_complement); * direction. Zeros are fed into the vacated MS positions and the * LS bits shifted off the bottom are lost. */ -void __bitmap_shift_right(unsigned long *dst, - const unsigned long *src, int shift, int bits) +void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, + int shift, int bits) { - int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; - int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; - unsigned long mask = (1UL << left) - 1; - for (k = 0; off + k < lim; ++k) { - unsigned long upper, lower; - - /* - * If shift is not word aligned, take lower rem bits of - * word above and make them the top rem bits of result. - */ - if (!rem || off + k + 1 >= lim) - upper = 0; - else { - upper = src[off + k + 1]; - if (off + k + 1 == lim - 1 && left) - upper &= mask; - } - lower = src[off + k]; - if (left && off + k == lim - 1) - lower &= mask; - dst[k] = rem - ? (upper << (BITS_PER_LONG - rem)) | (lower >> rem) - : lower; - if (left && k == lim - 1) - dst[k] &= mask; - } - if (off) - memset(&dst[lim - off], 0, off*sizeof(unsigned long)); + int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; + int off = shift / BITS_PER_LONG, rem = shift % BITS_PER_LONG; + unsigned long mask = (1UL << left) - 1; + for ( k = 0; off + k < lim; ++k ) + { + unsigned long upper, lower; + + /* + * If shift is not word aligned, take lower rem bits of + * word above and make them the top rem bits of result. + */ + if ( !rem || off + k + 1 >= lim ) + upper = 0; + else + { + upper = src[off + k + 1]; + if ( off + k + 1 == lim - 1 && left ) + upper &= mask; + } + lower = src[off + k]; + if ( left && off + k == lim - 1 ) + lower &= mask; + dst[k] = + rem ? (upper << (BITS_PER_LONG - rem)) | (lower >> rem) : lower; + if ( left && k == lim - 1 ) + dst[k] &= mask; + } + if ( off ) + memset(&dst[lim - off], 0, off * sizeof(unsigned long)); } EXPORT_SYMBOL(__bitmap_shift_right); - /* * __bitmap_shift_left - logical left shift of the bits in a bitmap * @dst - destination bitmap @@ -167,140 +167,139 @@ EXPORT_SYMBOL(__bitmap_shift_right); * and those MS bits shifted off the top are lost. */ -void __bitmap_shift_left(unsigned long *dst, - const unsigned long *src, int shift, int bits) +void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, + int shift, int bits) { - int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; - int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; - for (k = lim - off - 1; k >= 0; --k) { - unsigned long upper, lower; - - /* - * If shift is not word aligned, take upper rem bits of - * word below and make them the bottom rem bits of result. - */ - if (rem && k > 0) - lower = src[k - 1]; - else - lower = 0; - upper = src[k]; - if (left && k == lim - 1) - upper &= (1UL << left) - 1; - dst[k + off] = rem ? (lower >> (BITS_PER_LONG - rem)) - | (upper << rem) - : upper; - if (left && k + off == lim - 1) - dst[k + off] &= (1UL << left) - 1; - } - if (off) - memset(dst, 0, off*sizeof(unsigned long)); + int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; + int off = shift / BITS_PER_LONG, rem = shift % BITS_PER_LONG; + for ( k = lim - off - 1; k >= 0; --k ) + { + unsigned long upper, lower; + + /* + * If shift is not word aligned, take upper rem bits of + * word below and make them the bottom rem bits of result. + */ + if ( rem && k > 0 ) + lower = src[k - 1]; + else + lower = 0; + upper = src[k]; + if ( left && k == lim - 1 ) + upper &= (1UL << left) - 1; + dst[k + off] = + rem ? (lower >> (BITS_PER_LONG - rem)) | (upper << rem) : upper; + if ( left && k + off == lim - 1 ) + dst[k + off] &= (1UL << left) - 1; + } + if ( off ) + memset(dst, 0, off * sizeof(unsigned long)); } EXPORT_SYMBOL(__bitmap_shift_left); void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, int bits) + const unsigned long *bitmap2, int bits) { - int k; - int nr = BITS_TO_LONGS(bits); + int k; + int nr = BITS_TO_LONGS(bits); - for (k = 0; k < nr; k++) - dst[k] = bitmap1[k] & bitmap2[k]; + for ( k = 0; k < nr; k++ ) + dst[k] = bitmap1[k] & bitmap2[k]; } EXPORT_SYMBOL(__bitmap_and); void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, int bits) + const unsigned long *bitmap2, int bits) { - int k; - int nr = BITS_TO_LONGS(bits); + int k; + int nr = BITS_TO_LONGS(bits); - for (k = 0; k < nr; k++) - dst[k] = bitmap1[k] | bitmap2[k]; + for ( k = 0; k < nr; k++ ) + dst[k] = bitmap1[k] | bitmap2[k]; } EXPORT_SYMBOL(__bitmap_or); void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, int bits) + const unsigned long *bitmap2, int bits) { - int k; - int nr = BITS_TO_LONGS(bits); + int k; + int nr = BITS_TO_LONGS(bits); - for (k = 0; k < nr; k++) - dst[k] = bitmap1[k] ^ bitmap2[k]; + for ( k = 0; k < nr; k++ ) + dst[k] = bitmap1[k] ^ bitmap2[k]; } EXPORT_SYMBOL(__bitmap_xor); void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, int bits) + const unsigned long *bitmap2, int bits) { - int k; - int nr = BITS_TO_LONGS(bits); + int k; + int nr = BITS_TO_LONGS(bits); - for (k = 0; k < nr; k++) - dst[k] = bitmap1[k] & ~bitmap2[k]; + for ( k = 0; k < nr; k++ ) + dst[k] = bitmap1[k] & ~bitmap2[k]; } EXPORT_SYMBOL(__bitmap_andnot); int __bitmap_intersects(const unsigned long *bitmap1, - const unsigned long *bitmap2, int bits) + const unsigned long *bitmap2, int bits) { - int k, lim = bits/BITS_PER_LONG; - for (k = 0; k < lim; ++k) - if (bitmap1[k] & bitmap2[k]) - return 1; - - if (bits % BITS_PER_LONG) - if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) - return 1; - return 0; + int k, lim = bits / BITS_PER_LONG; + for ( k = 0; k < lim; ++k ) + if ( bitmap1[k] & bitmap2[k] ) + return 1; + + if ( bits % BITS_PER_LONG ) + if ( (bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits) ) + return 1; + return 0; } EXPORT_SYMBOL(__bitmap_intersects); -int __bitmap_subset(const unsigned long *bitmap1, - const unsigned long *bitmap2, int bits) +int __bitmap_subset(const unsigned long *bitmap1, const unsigned long *bitmap2, + int bits) { - int k, lim = bits/BITS_PER_LONG; - for (k = 0; k < lim; ++k) - if (bitmap1[k] & ~bitmap2[k]) - return 0; - - if (bits % BITS_PER_LONG) - if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) - return 0; - return 1; + int k, lim = bits / BITS_PER_LONG; + for ( k = 0; k < lim; ++k ) + if ( bitmap1[k] & ~bitmap2[k] ) + return 0; + + if ( bits % BITS_PER_LONG ) + if ( (bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits) ) + return 0; + return 1; } EXPORT_SYMBOL(__bitmap_subset); #if BITS_PER_LONG == 32 int __bitmap_weight(const unsigned long *bitmap, int bits) { - int k, w = 0, lim = bits/BITS_PER_LONG; + int k, w = 0, lim = bits / BITS_PER_LONG; - for (k = 0; k < lim; k++) - w += hweight32(bitmap[k]); + for ( k = 0; k < lim; k++ ) + w += hweight32(bitmap[k]); - if (bits % BITS_PER_LONG) - w += hweight32(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); + if ( bits % BITS_PER_LONG ) + w += hweight32(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); - return w; + return w; } #else int __bitmap_weight(const unsigned long *bitmap, int bits) { - int k, w = 0, lim = bits/BITS_PER_LONG; + int k, w = 0, lim = bits / BITS_PER_LONG; - for (k = 0; k < lim; k++) - w += hweight64(bitmap[k]); + for ( k = 0; k < lim; k++ ) + w += hweight64(bitmap[k]); - if (bits % BITS_PER_LONG) - w += hweight64(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); + if ( bits % BITS_PER_LONG ) + w += hweight64(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); - return w; + return w; } #endif EXPORT_SYMBOL(__bitmap_weight); - /** * bitmap_find_free_region - find a contiguous aligned mem region * @bitmap: an array of unsigned longs corresponding to the bitmap @@ -318,28 +317,30 @@ EXPORT_SYMBOL(__bitmap_weight); */ int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) { - unsigned long mask; - int pages = 1 << order; - int i; - - if(pages > BITS_PER_LONG) - return -EINVAL; - - /* make a mask of the order */ - mask = (1ul << (pages - 1)); - mask += mask - 1; - - /* run up the bitmap pages bits at a time */ - for (i = 0; i < bits; i += pages) { - int index = i/BITS_PER_LONG; - int offset = i - (index * BITS_PER_LONG); - if((bitmap[index] & (mask << offset)) == 0) { - /* set region in bimap */ - bitmap[index] |= (mask << offset); - return i; - } - } - return -ENOMEM; + unsigned long mask; + int pages = 1 << order; + int i; + + if ( pages > BITS_PER_LONG ) + return -EINVAL; + + /* make a mask of the order */ + mask = (1ul << (pages - 1)); + mask += mask - 1; + + /* run up the bitmap pages bits at a time */ + for ( i = 0; i < bits; i += pages ) + { + int index = i / BITS_PER_LONG; + int offset = i - (index * BITS_PER_LONG); + if ( (bitmap[index] & (mask << offset)) == 0 ) + { + /* set region in bimap */ + bitmap[index] |= (mask << offset); + return i; + } + } + return -ENOMEM; } EXPORT_SYMBOL(bitmap_find_free_region); @@ -354,32 +355,32 @@ EXPORT_SYMBOL(bitmap_find_free_region); */ void bitmap_release_region(unsigned long *bitmap, int pos, int order) { - int pages = 1 << order; - unsigned long mask = (1ul << (pages - 1)); - int index = pos/BITS_PER_LONG; - int offset = pos - (index * BITS_PER_LONG); - mask += mask - 1; - bitmap[index] &= ~(mask << offset); + int pages = 1 << order; + unsigned long mask = (1ul << (pages - 1)); + int index = pos / BITS_PER_LONG; + int offset = pos - (index * BITS_PER_LONG); + mask += mask - 1; + bitmap[index] &= ~(mask << offset); } EXPORT_SYMBOL(bitmap_release_region); int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) { - int pages = 1 << order; - unsigned long mask = (1ul << (pages - 1)); - int index = pos/BITS_PER_LONG; - int offset = pos - (index * BITS_PER_LONG); - - /* We don't do regions of pages > BITS_PER_LONG. The - * algorithm would be a simple look for multiple zeros in the - * array, but there's no driver today that needs this. If you - * trip this BUG(), you get to code it... */ - BUG_ON(pages > BITS_PER_LONG); - mask += mask - 1; - if (bitmap[index] & (mask << offset)) - return -EBUSY; - bitmap[index] |= (mask << offset); - return 0; + int pages = 1 << order; + unsigned long mask = (1ul << (pages - 1)); + int index = pos / BITS_PER_LONG; + int offset = pos - (index * BITS_PER_LONG); + + /* We don't do regions of pages > BITS_PER_LONG. The + * algorithm would be a simple look for multiple zeros in the + * array, but there's no driver today that needs this. If you + * trip this BUG(), you get to code it... */ + BUG_ON(pages > BITS_PER_LONG); + mask += mask - 1; + if ( bitmap[index] & (mask << offset) ) + return -EBUSY; + bitmap[index] |= (mask << offset); + return 0; } EXPORT_SYMBOL(bitmap_allocate_region); @@ -387,49 +388,53 @@ EXPORT_SYMBOL(bitmap_allocate_region); void bitmap_long_to_byte(uint8_t *bp, const unsigned long *lp, int nbits) { - unsigned long l; - int i, j, b; - - for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) { - l = lp[i]; - for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) { - bp[b+j] = l; - l >>= 8; - nbits -= 8; - } - } - clamp_last_byte(bp, nbits); + unsigned long l; + int i, j, b; + + for ( i = 0, b = 0; nbits > 0; i++, b += sizeof(l) ) + { + l = lp[i]; + for ( j = 0; (j < sizeof(l)) && (nbits > 0); j++ ) + { + bp[b + j] = l; + l >>= 8; + nbits -= 8; + } + } + clamp_last_byte(bp, nbits); } void bitmap_byte_to_long(unsigned long *lp, const uint8_t *bp, int nbits) { - unsigned long l; - int i, j, b; - - for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) { - l = 0; - for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) { - l |= (unsigned long)bp[b+j] << (j*8); - nbits -= 8; - } - lp[i] = l; - } + unsigned long l; + int i, j, b; + + for ( i = 0, b = 0; nbits > 0; i++, b += sizeof(l) ) + { + l = 0; + for ( j = 0; (j < sizeof(l)) && (nbits > 0); j++ ) + { + l |= (unsigned long)bp[b + j] << (j * 8); + nbits -= 8; + } + lp[i] = l; + } } #elif defined(__LITTLE_ENDIAN) void bitmap_long_to_byte(uint8_t *bp, const unsigned long *lp, int nbits) { - memcpy(bp, lp, (nbits+7)/8); - clamp_last_byte(bp, nbits); + memcpy(bp, lp, (nbits + 7) / 8); + clamp_last_byte(bp, nbits); } void bitmap_byte_to_long(unsigned long *lp, const uint8_t *bp, int nbits) { - /* We may need to pad the final longword with zeroes. */ - if (nbits & (BITS_PER_LONG-1)) - lp[BITS_TO_LONGS(nbits)-1] = 0; - memcpy(lp, bp, (nbits+7)/8); + /* We may need to pad the final longword with zeroes. */ + if ( nbits & (BITS_PER_LONG - 1) ) + lp[BITS_TO_LONGS(nbits) - 1] = 0; + memcpy(lp, bp, (nbits + 7) / 8); } #endif diff --git a/xen/common/bsearch.c b/xen/common/bsearch.c index 7090930aab..33f8e57ad8 100644 --- a/xen/common/bsearch.c +++ b/xen/common/bsearch.c @@ -30,22 +30,23 @@ * the same comparison function for both sort() and bsearch(). */ void *bsearch(const void *key, const void *base, size_t num, size_t size, - int (*cmp)(const void *key, const void *elt)) + int (*cmp)(const void *key, const void *elt)) { - size_t start = 0, end = num; - int result; + size_t start = 0, end = num; + int result; - while (start < end) { - size_t mid = start + (end - start) / 2; + while ( start < end ) + { + size_t mid = start + (end - start) / 2; - result = cmp(key, base + mid * size); - if (result < 0) - end = mid; - else if (result > 0) - start = mid + 1; - else - return (void *)base + mid * size; - } + result = cmp(key, base + mid * size); + if ( result < 0 ) + end = mid; + else if ( result > 0 ) + start = mid + 1; + else + return (void *)base + mid * size; + } - return NULL; + return NULL; } diff --git a/xen/common/bunzip2.c b/xen/common/bunzip2.c index 6d6e8b19fd..8c378b768e 100644 --- a/xen/common/bunzip2.c +++ b/xen/common/bunzip2.c @@ -1,47 +1,47 @@ /* vi: set sw = 4 ts = 4: */ /* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net). - Based on bzip2 decompression code by Julian R Seward (jseward@acm.org), - which also acknowledges contributions by Mike Burrows, David Wheeler, - Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten, - Robert Sedgewick, and Jon L. Bentley. + Based on bzip2 decompression code by Julian R Seward (jseward@acm.org), + which also acknowledges contributions by Mike Burrows, David Wheeler, + Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten, + Robert Sedgewick, and Jon L. Bentley. - This code is licensed under the LGPLv2: - LGPL (http://www.gnu.org/copyleft/lgpl.html + This code is licensed under the LGPLv2: + LGPL (http://www.gnu.org/copyleft/lgpl.html */ /* - Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org). + Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org). - More efficient reading of Huffman codes, a streamlined read_bunzip() - function, and various other tweaks. In (limited) tests, approximately - 20% faster than bzcat on x86 and about 10% faster on arm. + More efficient reading of Huffman codes, a streamlined read_bunzip() + function, and various other tweaks. In (limited) tests, approximately + 20% faster than bzcat on x86 and about 10% faster on arm. - Note that about 2/3 of the time is spent in read_unzip() reversing - the Burrows-Wheeler transformation. Much of that time is delay - resulting from cache misses. + Note that about 2/3 of the time is spent in read_unzip() reversing + the Burrows-Wheeler transformation. Much of that time is delay + resulting from cache misses. - I would ask that anyone benefiting from this work, especially those - using it in commercial products, consider making a donation to my local - non-profit hospice organization in the name of the woman I loved, who - passed away Feb. 12, 2003. + I would ask that anyone benefiting from this work, especially those + using it in commercial products, consider making a donation to my local + non-profit hospice organization in the name of the woman I loved, who + passed away Feb. 12, 2003. - In memory of Toni W. Hagan + In memory of Toni W. Hagan - Hospice of Acadiana, Inc. - 2600 Johnston St., Suite 200 - Lafayette, LA 70503-3240 + Hospice of Acadiana, Inc. + 2600 Johnston St., Suite 200 + Lafayette, LA 70503-3240 - Phone (337) 232-1234 or 1-800-738-2226 - Fax (337) 232-1297 + Phone (337) 232-1234 or 1-800-738-2226 + Fax (337) 232-1297 - http://www.hospiceacadiana.com/ + http://www.hospiceacadiana.com/ - Manuel + Manuel */ /* - Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu) + Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu) */ #include "decompress.h" @@ -51,455 +51,479 @@ #endif /* Constants for Huffman coding */ -#define MAX_GROUPS 6 -#define GROUP_SIZE 50 /* 64 would have been more efficient */ -#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */ -#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */ -#define SYMBOL_RUNA 0 -#define SYMBOL_RUNB 1 +#define MAX_GROUPS 6 +#define GROUP_SIZE 50 /* 64 would have been more efficient */ +#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */ +#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */ +#define SYMBOL_RUNA 0 +#define SYMBOL_RUNB 1 /* Status return values */ -#define RETVAL_OK 0 -#define RETVAL_LAST_BLOCK (-1) -#define RETVAL_NOT_BZIP_DATA (-2) -#define RETVAL_UNEXPECTED_INPUT_EOF (-3) -#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4) -#define RETVAL_DATA_ERROR (-5) -#define RETVAL_OUT_OF_MEMORY (-6) -#define RETVAL_OBSOLETE_INPUT (-7) +#define RETVAL_OK 0 +#define RETVAL_LAST_BLOCK (-1) +#define RETVAL_NOT_BZIP_DATA (-2) +#define RETVAL_UNEXPECTED_INPUT_EOF (-3) +#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4) +#define RETVAL_DATA_ERROR (-5) +#define RETVAL_OUT_OF_MEMORY (-6) +#define RETVAL_OBSOLETE_INPUT (-7) /* Other housekeeping constants */ -#define BZIP2_IOBUF_SIZE 4096 +#define BZIP2_IOBUF_SIZE 4096 /* This is what we know about each Huffman coding group */ -struct group_data { - /* We have an extra slot at the end of limit[] for a sentinal value. */ - int limit[MAX_HUFCODE_BITS+1]; - int base[MAX_HUFCODE_BITS]; - int permute[MAX_SYMBOLS]; - int minLen, maxLen; +struct group_data +{ + /* We have an extra slot at the end of limit[] for a sentinal value. */ + int limit[MAX_HUFCODE_BITS + 1]; + int base[MAX_HUFCODE_BITS]; + int permute[MAX_SYMBOLS]; + int minLen, maxLen; }; /* Structure holding all the housekeeping data, including IO buffers and memory that persists between calls to bunzip */ -struct bunzip_data { - /* State for interrupting output loop */ - int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent; - /* I/O tracking data (file handles, buffers, positions, etc.) */ - int (*fill)(void*, unsigned int); - int inbufCount, inbufPos /*, outbufPos*/; - unsigned char *inbuf /*,*outbuf*/; - unsigned int inbufBitCount, inbufBits; - /* The CRC values stored in the block header and calculated from the - data */ - unsigned int crc32Table[256], headerCRC, totalCRC, writeCRC; - /* Intermediate buffer and its size (in bytes) */ - unsigned int *dbuf, dbufSize; - /* These things are a bit too big to go on the stack */ - unsigned char selectors[32768]; /* nSelectors = 15 bits */ - struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */ - int io_error; /* non-zero if we have IO error */ +struct bunzip_data +{ + /* State for interrupting output loop */ + int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent; + /* I/O tracking data (file handles, buffers, positions, etc.) */ + int (*fill)(void *, unsigned int); + int inbufCount, inbufPos /*, outbufPos*/; + unsigned char *inbuf /*,*outbuf*/; + unsigned int inbufBitCount, inbufBits; + /* The CRC values stored in the block header and calculated from the + data */ + unsigned int crc32Table[256], headerCRC, totalCRC, writeCRC; + /* Intermediate buffer and its size (in bytes) */ + unsigned int *dbuf, dbufSize; + /* These things are a bit too big to go on the stack */ + unsigned char selectors[32768]; /* nSelectors = 15 bits */ + struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */ + int io_error; /* non-zero if we have IO error */ }; - /* Return the next nnn bits of input. All reads from the compressed input are done through this function. All reads are big endian */ static unsigned int INIT get_bits(struct bunzip_data *bd, char bits_wanted) { - unsigned int bits = 0; - - /* If we need to get more data from the byte buffer, do so. - (Loop getting one byte at a time to enforce endianness and avoid - unaligned access.) */ - while (bd->inbufBitCount < bits_wanted) { - /* If we need to read more data from file into byte buffer, do - so */ - if (bd->inbufPos == bd->inbufCount) { - if (bd->io_error) - return 0; - bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE); - if (bd->inbufCount <= 0) { - bd->io_error = RETVAL_UNEXPECTED_INPUT_EOF; - return 0; - } - bd->inbufPos = 0; - } - /* Avoid 32-bit overflow (dump bit buffer to top of output) */ - if (bd->inbufBitCount >= 24) { - bits = bd->inbufBits&((1 << bd->inbufBitCount)-1); - bits_wanted -= bd->inbufBitCount; - bits <<= bits_wanted; - bd->inbufBitCount = 0; - } - /* Grab next 8 bits of input from buffer. */ - bd->inbufBits = (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++]; - bd->inbufBitCount += 8; - } - /* Calculate result */ - bd->inbufBitCount -= bits_wanted; - bits |= (bd->inbufBits >> bd->inbufBitCount)&((1 << bits_wanted)-1); - - return bits; + unsigned int bits = 0; + + /* If we need to get more data from the byte buffer, do so. + (Loop getting one byte at a time to enforce endianness and avoid + unaligned access.) */ + while ( bd->inbufBitCount < bits_wanted ) + { + /* If we need to read more data from file into byte buffer, do + so */ + if ( bd->inbufPos == bd->inbufCount ) + { + if ( bd->io_error ) + return 0; + bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE); + if ( bd->inbufCount <= 0 ) + { + bd->io_error = RETVAL_UNEXPECTED_INPUT_EOF; + return 0; + } + bd->inbufPos = 0; + } + /* Avoid 32-bit overflow (dump bit buffer to top of output) */ + if ( bd->inbufBitCount >= 24 ) + { + bits = bd->inbufBits & ((1 << bd->inbufBitCount) - 1); + bits_wanted -= bd->inbufBitCount; + bits <<= bits_wanted; + bd->inbufBitCount = 0; + } + /* Grab next 8 bits of input from buffer. */ + bd->inbufBits = (bd->inbufBits << 8) | bd->inbuf[bd->inbufPos++]; + bd->inbufBitCount += 8; + } + /* Calculate result */ + bd->inbufBitCount -= bits_wanted; + bits |= (bd->inbufBits >> bd->inbufBitCount) & ((1 << bits_wanted) - 1); + + return bits; } /* Unpacks the next block and sets up for the inverse burrows-wheeler step. */ static int INIT get_next_block(struct bunzip_data *bd) { - struct group_data *hufGroup = NULL; - int *base = NULL; - int *limit = NULL; - int dbufCount, nextSym, dbufSize, groupCount, selector, - i, j, k, t, runPos, symCount, symTotal, nSelectors, - byteCount[256]; - unsigned char uc, symToByte[256], mtfSymbol[256], *selectors; - unsigned int *dbuf, origPtr; - - dbuf = bd->dbuf; - dbufSize = bd->dbufSize; - selectors = bd->selectors; - - /* Read in header signature and CRC, then validate signature. - (last block signature means CRC is for whole file, return now) */ - i = get_bits(bd, 24); - j = get_bits(bd, 24); - bd->headerCRC = get_bits(bd, 32); - if ((i == 0x177245) && (j == 0x385090)) - return RETVAL_LAST_BLOCK; - if ((i != 0x314159) || (j != 0x265359)) - return RETVAL_NOT_BZIP_DATA; - /* We can add support for blockRandomised if anybody complains. - There was some code for this in busybox 1.0.0-pre3, but nobody ever - noticed that it didn't actually work. */ - if (get_bits(bd, 1)) - return RETVAL_OBSOLETE_INPUT; - origPtr = get_bits(bd, 24); - if (origPtr >= dbufSize) - return RETVAL_DATA_ERROR; - /* mapping table: if some byte values are never used (encoding things - like ascii text), the compression code removes the gaps to have fewer - symbols to deal with, and writes a sparse bitfield indicating which - values were present. We make a translation table to convert the - symbols back to the corresponding bytes. */ - t = get_bits(bd, 16); - symTotal = 0; - for (i = 0; i < 16; i++) { - if (t&(1 << (15-i))) { - k = get_bits(bd, 16); - for (j = 0; j < 16; j++) - if (k&(1 << (15-j))) - symToByte[symTotal++] = (16*i)+j; - } - } - /* How many different Huffman coding groups does this block use? */ - groupCount = get_bits(bd, 3); - if (groupCount < 2 || groupCount > MAX_GROUPS) - return RETVAL_DATA_ERROR; - /* nSelectors: Every GROUP_SIZE many symbols we select a new - Huffman coding group. Read in the group selector list, - which is stored as MTF encoded bit runs. (MTF = Move To - Front, as each value is used it's moved to the start of the - list.) */ - nSelectors = get_bits(bd, 15); - if (!nSelectors) - return RETVAL_DATA_ERROR; - for (i = 0; i < groupCount; i++) - mtfSymbol[i] = i; - for (i = 0; i < nSelectors; i++) { - /* Get next value */ - for (j = 0; get_bits(bd, 1); j++) - if (j >= groupCount) - return RETVAL_DATA_ERROR; - /* Decode MTF to get the next selector */ - uc = mtfSymbol[j]; - for (; j; j--) - mtfSymbol[j] = mtfSymbol[j-1]; - mtfSymbol[0] = selectors[i] = uc; - } - /* Read the Huffman coding tables for each group, which code - for symTotal literal symbols, plus two run symbols (RUNA, - RUNB) */ - symCount = symTotal+2; - for (j = 0; j < groupCount; j++) { - unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1]; - int minLen, maxLen, pp; - /* Read Huffman code lengths for each symbol. They're - stored in a way similar to mtf; record a starting - value for the first symbol, and an offset from the - previous value for everys symbol after that. - (Subtracting 1 before the loop and then adding it - back at the end is an optimization that makes the - test inside the loop simpler: symbol length 0 - becomes negative, so an unsigned inequality catches - it.) */ - t = get_bits(bd, 5)-1; - for (i = 0; i < symCount; i++) { - for (;;) { - if (((unsigned)t) > (MAX_HUFCODE_BITS-1)) - return RETVAL_DATA_ERROR; - - /* If first bit is 0, stop. Else - second bit indicates whether to - increment or decrement the value. - Optimization: grab 2 bits and unget - the second if the first was 0. */ - - k = get_bits(bd, 2); - if (k < 2) { - bd->inbufBitCount++; - break; - } - /* Add one if second bit 1, else - * subtract 1. Avoids if/else */ - t += (((k+1)&2)-1); - } - /* Correct for the initial -1, to get the - * final symbol length */ - length[i] = t+1; - } - /* Find largest and smallest lengths in this group */ - minLen = maxLen = length[0]; - - for (i = 1; i < symCount; i++) { - if (length[i] > maxLen) - maxLen = length[i]; - else if (length[i] < minLen) - minLen = length[i]; - } - - /* Calculate permute[], base[], and limit[] tables from - * length[]. - * - * permute[] is the lookup table for converting - * Huffman coded symbols into decoded symbols. base[] - * is the amount to subtract from the value of a - * Huffman symbol of a given length when using - * permute[]. - * - * limit[] indicates the largest numerical value a - * symbol with a given number of bits can have. This - * is how the Huffman codes can vary in length: each - * code with a value > limit[length] needs another - * bit. - */ - hufGroup = bd->groups+j; - hufGroup->minLen = minLen; - hufGroup->maxLen = maxLen; - /* Note that minLen can't be smaller than 1, so we - adjust the base and limit array pointers so we're - not always wasting the first entry. We do this - again when using them (during symbol decoding).*/ - base = hufGroup->base-1; - limit = hufGroup->limit-1; - /* Calculate permute[]. Concurently, initialize - * temp[] and limit[]. */ - pp = 0; - for (i = minLen; i <= maxLen; i++) { - temp[i] = limit[i] = 0; - for (t = 0; t < symCount; t++) - if (length[t] == i) - hufGroup->permute[pp++] = t; - } - /* Count symbols coded for at each bit length */ - for (i = 0; i < symCount; i++) - temp[length[i]]++; - /* Calculate limit[] (the largest symbol-coding value - *at each bit length, which is (previous limit << - *1)+symbols at this level), and base[] (number of - *symbols to ignore at each bit length, which is limit - *minus the cumulative count of symbols coded for - *already). */ - pp = t = 0; - for (i = minLen; i < maxLen; i++) { - pp += temp[i]; - /* We read the largest possible symbol size - and then unget bits after determining how - many we need, and those extra bits could be - set to anything. (They're noise from - future symbols.) At each level we're - really only interested in the first few - bits, so here we set all the trailing - to-be-ignored bits to 1 so they don't - affect the value > limit[length] - comparison. */ - limit[i] = (pp << (maxLen - i)) - 1; - pp <<= 1; - base[i+1] = pp-(t += temp[i]); - } - limit[maxLen+1] = INT_MAX; /* Sentinal value for - * reading next sym. */ - limit[maxLen] = pp+temp[maxLen]-1; - base[minLen] = 0; - } - /* We've finished reading and digesting the block header. Now - read this block's Huffman coded symbols from the file and - undo the Huffman coding and run length encoding, saving the - result into dbuf[dbufCount++] = uc */ - - /* Initialize symbol occurrence counters and symbol Move To - * Front table */ - for (i = 0; i < 256; i++) { - byteCount[i] = 0; - mtfSymbol[i] = (unsigned char)i; - } - /* Loop through compressed symbols. */ - runPos = dbufCount = symCount = selector = 0; - for (;;) { - /* Determine which Huffman coding group to use. */ - if (!(symCount--)) { - symCount = GROUP_SIZE-1; - if (selector >= nSelectors) - return RETVAL_DATA_ERROR; - hufGroup = bd->groups+selectors[selector++]; - base = hufGroup->base-1; - limit = hufGroup->limit-1; - } - /* Read next Huffman-coded symbol. */ - /* Note: It is far cheaper to read maxLen bits and - back up than it is to read minLen bits and then an - additional bit at a time, testing as we go. - Because there is a trailing last block (with file - CRC), there is no danger of the overread causing an - unexpected EOF for a valid compressed file. As a - further optimization, we do the read inline - (falling back to a call to get_bits if the buffer - runs dry). The following (up to got_huff_bits:) is - equivalent to j = get_bits(bd, hufGroup->maxLen); - */ - while (bd->inbufBitCount < hufGroup->maxLen) { - if (bd->inbufPos == bd->inbufCount) { - j = get_bits(bd, hufGroup->maxLen); - goto got_huff_bits; - } - bd->inbufBits = - (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++]; - bd->inbufBitCount += 8; - }; - bd->inbufBitCount -= hufGroup->maxLen; - j = (bd->inbufBits >> bd->inbufBitCount)& - ((1 << hufGroup->maxLen)-1); -got_huff_bits: - /* Figure how how many bits are in next symbol and - * unget extras */ - i = hufGroup->minLen; - while (j > limit[i]) - ++i; - bd->inbufBitCount += (hufGroup->maxLen - i); - /* Huffman decode value to get nextSym (with bounds checking) */ - if ((i > hufGroup->maxLen) - || (((unsigned)(j = (j>>(hufGroup->maxLen-i))-base[i])) - >= MAX_SYMBOLS)) - return RETVAL_DATA_ERROR; - nextSym = hufGroup->permute[j]; - /* We have now decoded the symbol, which indicates - either a new literal byte, or a repeated run of the - most recent literal byte. First, check if nextSym - indicates a repeated run, and if so loop collecting - how many times to repeat the last literal. */ - if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */ - /* If this is the start of a new run, zero out - * counter */ - if (!runPos) { - runPos = 1; - t = 0; - } - /* Neat trick that saves 1 symbol: instead of - or-ing 0 or 1 at each bit position, add 1 - or 2 instead. For example, 1011 is 1 << 0 - + 1 << 1 + 2 << 2. 1010 is 2 << 0 + 2 << 1 - + 1 << 2. You can make any bit pattern - that way using 1 less symbol than the basic - or 0/1 method (except all bits 0, which - would use no symbols, but a run of length 0 - doesn't mean anything in this context). - Thus space is saved. */ - t += (runPos << nextSym); - /* +runPos if RUNA; +2*runPos if RUNB */ - - runPos <<= 1; - continue; - } - /* When we hit the first non-run symbol after a run, - we now know how many times to repeat the last - literal, so append that many copies to our buffer - of decoded symbols (dbuf) now. (The last literal - used is the one at the head of the mtfSymbol - array.) */ - if (runPos) { - runPos = 0; - if (dbufCount+t >= dbufSize) - return RETVAL_DATA_ERROR; - - uc = symToByte[mtfSymbol[0]]; - byteCount[uc] += t; - while (t--) - dbuf[dbufCount++] = uc; - } - /* Is this the terminating symbol? */ - if (nextSym > symTotal) - break; - /* At this point, nextSym indicates a new literal - character. Subtract one to get the position in the - MTF array at which this literal is currently to be - found. (Note that the result can't be -1 or 0, - because 0 and 1 are RUNA and RUNB. But another - instance of the first symbol in the mtf array, - position 0, would have been handled as part of a - run above. Therefore 1 unused mtf position minus 2 - non-literal nextSym values equals -1.) */ - if (dbufCount >= dbufSize) - return RETVAL_DATA_ERROR; - i = nextSym - 1; - uc = mtfSymbol[i]; - /* Adjust the MTF array. Since we typically expect to - *move only a small number of symbols, and are bound - *by 256 in any case, using memmove here would - *typically be bigger and slower due to function call - *overhead and other assorted setup costs. */ - do { - mtfSymbol[i] = mtfSymbol[i-1]; - } while (--i); - mtfSymbol[0] = uc; - uc = symToByte[uc]; - /* We have our literal byte. Save it into dbuf. */ - byteCount[uc]++; - dbuf[dbufCount++] = (unsigned int)uc; - } - /* At this point, we've read all the Huffman-coded symbols - (and repeated runs) for this block from the input stream, - and decoded them into the intermediate buffer. There are - dbufCount many decoded bytes in dbuf[]. Now undo the - Burrows-Wheeler transform on dbuf. See - http://dogma.net/markn/articles/bwt/bwt.htm - */ - /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */ - j = 0; - for (i = 0; i < 256; i++) { - k = j+byteCount[i]; - byteCount[i] = j; - j = k; - } - /* Figure out what order dbuf would be in if we sorted it. */ - for (i = 0; i < dbufCount; i++) { - uc = (unsigned char)(dbuf[i] & 0xff); - dbuf[byteCount[uc]] |= (i << 8); - byteCount[uc]++; - } - /* Decode first byte by hand to initialize "previous" byte. - Note that it doesn't get output, and if the first three - characters are identical it doesn't qualify as a run (hence - writeRunCountdown = 5). */ - if (dbufCount) { - if (origPtr >= dbufCount) - return RETVAL_DATA_ERROR; - bd->writePos = dbuf[origPtr]; - bd->writeCurrent = (unsigned char)(bd->writePos&0xff); - bd->writePos >>= 8; - bd->writeRunCountdown = 5; - } - bd->writeCount = dbufCount; - - return RETVAL_OK; + struct group_data *hufGroup = NULL; + int *base = NULL; + int *limit = NULL; + int dbufCount, nextSym, dbufSize, groupCount, selector, i, j, k, t, runPos, + symCount, symTotal, nSelectors, byteCount[256]; + unsigned char uc, symToByte[256], mtfSymbol[256], *selectors; + unsigned int *dbuf, origPtr; + + dbuf = bd->dbuf; + dbufSize = bd->dbufSize; + selectors = bd->selectors; + + /* Read in header signature and CRC, then validate signature. + (last block signature means CRC is for whole file, return now) */ + i = get_bits(bd, 24); + j = get_bits(bd, 24); + bd->headerCRC = get_bits(bd, 32); + if ( (i == 0x177245) && (j == 0x385090) ) + return RETVAL_LAST_BLOCK; + if ( (i != 0x314159) || (j != 0x265359) ) + return RETVAL_NOT_BZIP_DATA; + /* We can add support for blockRandomised if anybody complains. + There was some code for this in busybox 1.0.0-pre3, but nobody ever + noticed that it didn't actually work. */ + if ( get_bits(bd, 1) ) + return RETVAL_OBSOLETE_INPUT; + origPtr = get_bits(bd, 24); + if ( origPtr >= dbufSize ) + return RETVAL_DATA_ERROR; + /* mapping table: if some byte values are never used (encoding things + like ascii text), the compression code removes the gaps to have fewer + symbols to deal with, and writes a sparse bitfield indicating which + values were present. We make a translation table to convert the + symbols back to the corresponding bytes. */ + t = get_bits(bd, 16); + symTotal = 0; + for ( i = 0; i < 16; i++ ) + { + if ( t & (1 << (15 - i)) ) + { + k = get_bits(bd, 16); + for ( j = 0; j < 16; j++ ) + if ( k & (1 << (15 - j)) ) + symToByte[symTotal++] = (16 * i) + j; + } + } + /* How many different Huffman coding groups does this block use? */ + groupCount = get_bits(bd, 3); + if ( groupCount < 2 || groupCount > MAX_GROUPS ) + return RETVAL_DATA_ERROR; + /* nSelectors: Every GROUP_SIZE many symbols we select a new + Huffman coding group. Read in the group selector list, + which is stored as MTF encoded bit runs. (MTF = Move To + Front, as each value is used it's moved to the start of the + list.) */ + nSelectors = get_bits(bd, 15); + if ( !nSelectors ) + return RETVAL_DATA_ERROR; + for ( i = 0; i < groupCount; i++ ) + mtfSymbol[i] = i; + for ( i = 0; i < nSelectors; i++ ) + { + /* Get next value */ + for ( j = 0; get_bits(bd, 1); j++ ) + if ( j >= groupCount ) + return RETVAL_DATA_ERROR; + /* Decode MTF to get the next selector */ + uc = mtfSymbol[j]; + for ( ; j; j-- ) + mtfSymbol[j] = mtfSymbol[j - 1]; + mtfSymbol[0] = selectors[i] = uc; + } + /* Read the Huffman coding tables for each group, which code + for symTotal literal symbols, plus two run symbols (RUNA, + RUNB) */ + symCount = symTotal + 2; + for ( j = 0; j < groupCount; j++ ) + { + unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS + 1]; + int minLen, maxLen, pp; + /* Read Huffman code lengths for each symbol. They're + stored in a way similar to mtf; record a starting + value for the first symbol, and an offset from the + previous value for everys symbol after that. + (Subtracting 1 before the loop and then adding it + back at the end is an optimization that makes the + test inside the loop simpler: symbol length 0 + becomes negative, so an unsigned inequality catches + it.) */ + t = get_bits(bd, 5) - 1; + for ( i = 0; i < symCount; i++ ) + { + for ( ;; ) + { + if ( ((unsigned)t) > (MAX_HUFCODE_BITS - 1) ) + return RETVAL_DATA_ERROR; + + /* If first bit is 0, stop. Else + second bit indicates whether to + increment or decrement the value. + Optimization: grab 2 bits and unget + the second if the first was 0. */ + + k = get_bits(bd, 2); + if ( k < 2 ) + { + bd->inbufBitCount++; + break; + } + /* Add one if second bit 1, else + * subtract 1. Avoids if/else */ + t += (((k + 1) & 2) - 1); + } + /* Correct for the initial -1, to get the + * final symbol length */ + length[i] = t + 1; + } + /* Find largest and smallest lengths in this group */ + minLen = maxLen = length[0]; + + for ( i = 1; i < symCount; i++ ) + { + if ( length[i] > maxLen ) + maxLen = length[i]; + else if ( length[i] < minLen ) + minLen = length[i]; + } + + /* Calculate permute[], base[], and limit[] tables from + * length[]. + * + * permute[] is the lookup table for converting + * Huffman coded symbols into decoded symbols. base[] + * is the amount to subtract from the value of a + * Huffman symbol of a given length when using + * permute[]. + * + * limit[] indicates the largest numerical value a + * symbol with a given number of bits can have. This + * is how the Huffman codes can vary in length: each + * code with a value > limit[length] needs another + * bit. + */ + hufGroup = bd->groups + j; + hufGroup->minLen = minLen; + hufGroup->maxLen = maxLen; + /* Note that minLen can't be smaller than 1, so we + adjust the base and limit array pointers so we're + not always wasting the first entry. We do this + again when using them (during symbol decoding).*/ + base = hufGroup->base - 1; + limit = hufGroup->limit - 1; + /* Calculate permute[]. Concurently, initialize + * temp[] and limit[]. */ + pp = 0; + for ( i = minLen; i <= maxLen; i++ ) + { + temp[i] = limit[i] = 0; + for ( t = 0; t < symCount; t++ ) + if ( length[t] == i ) + hufGroup->permute[pp++] = t; + } + /* Count symbols coded for at each bit length */ + for ( i = 0; i < symCount; i++ ) + temp[length[i]]++; + /* Calculate limit[] (the largest symbol-coding value + *at each bit length, which is (previous limit << + *1)+symbols at this level), and base[] (number of + *symbols to ignore at each bit length, which is limit + *minus the cumulative count of symbols coded for + *already). */ + pp = t = 0; + for ( i = minLen; i < maxLen; i++ ) + { + pp += temp[i]; + /* We read the largest possible symbol size + and then unget bits after determining how + many we need, and those extra bits could be + set to anything. (They're noise from + future symbols.) At each level we're + really only interested in the first few + bits, so here we set all the trailing + to-be-ignored bits to 1 so they don't + affect the value > limit[length] + comparison. */ + limit[i] = (pp << (maxLen - i)) - 1; + pp <<= 1; + base[i + 1] = pp - (t += temp[i]); + } + limit[maxLen + 1] = INT_MAX; /* Sentinal value for + * reading next sym. */ + limit[maxLen] = pp + temp[maxLen] - 1; + base[minLen] = 0; + } + /* We've finished reading and digesting the block header. Now + read this block's Huffman coded symbols from the file and + undo the Huffman coding and run length encoding, saving the + result into dbuf[dbufCount++] = uc */ + + /* Initialize symbol occurrence counters and symbol Move To + * Front table */ + for ( i = 0; i < 256; i++ ) + { + byteCount[i] = 0; + mtfSymbol[i] = (unsigned char)i; + } + /* Loop through compressed symbols. */ + runPos = dbufCount = symCount = selector = 0; + for ( ;; ) + { + /* Determine which Huffman coding group to use. */ + if ( !(symCount--) ) + { + symCount = GROUP_SIZE - 1; + if ( selector >= nSelectors ) + return RETVAL_DATA_ERROR; + hufGroup = bd->groups + selectors[selector++]; + base = hufGroup->base - 1; + limit = hufGroup->limit - 1; + } + /* Read next Huffman-coded symbol. */ + /* Note: It is far cheaper to read maxLen bits and + back up than it is to read minLen bits and then an + additional bit at a time, testing as we go. + Because there is a trailing last block (with file + CRC), there is no danger of the overread causing an + unexpected EOF for a valid compressed file. As a + further optimization, we do the read inline + (falling back to a call to get_bits if the buffer + runs dry). The following (up to got_huff_bits:) is + equivalent to j = get_bits(bd, hufGroup->maxLen); + */ + while ( bd->inbufBitCount < hufGroup->maxLen ) + { + if ( bd->inbufPos == bd->inbufCount ) + { + j = get_bits(bd, hufGroup->maxLen); + goto got_huff_bits; + } + bd->inbufBits = (bd->inbufBits << 8) | bd->inbuf[bd->inbufPos++]; + bd->inbufBitCount += 8; + }; + bd->inbufBitCount -= hufGroup->maxLen; + j = (bd->inbufBits >> bd->inbufBitCount) & + ((1 << hufGroup->maxLen) - 1); + got_huff_bits: + /* Figure how how many bits are in next symbol and + * unget extras */ + i = hufGroup->minLen; + while ( j > limit[i] ) + ++i; + bd->inbufBitCount += (hufGroup->maxLen - i); + /* Huffman decode value to get nextSym (with bounds checking) */ + if ( (i > hufGroup->maxLen) || + (((unsigned)(j = (j >> (hufGroup->maxLen - i)) - base[i])) >= + MAX_SYMBOLS) ) + return RETVAL_DATA_ERROR; + nextSym = hufGroup->permute[j]; + /* We have now decoded the symbol, which indicates + either a new literal byte, or a repeated run of the + most recent literal byte. First, check if nextSym + indicates a repeated run, and if so loop collecting + how many times to repeat the last literal. */ + if ( ((unsigned)nextSym) <= SYMBOL_RUNB ) + { /* RUNA or RUNB */ + /* If this is the start of a new run, zero out + * counter */ + if ( !runPos ) + { + runPos = 1; + t = 0; + } + /* Neat trick that saves 1 symbol: instead of + or-ing 0 or 1 at each bit position, add 1 + or 2 instead. For example, 1011 is 1 << 0 + + 1 << 1 + 2 << 2. 1010 is 2 << 0 + 2 << 1 + + 1 << 2. You can make any bit pattern + that way using 1 less symbol than the basic + or 0/1 method (except all bits 0, which + would use no symbols, but a run of length 0 + doesn't mean anything in this context). + Thus space is saved. */ + t += (runPos << nextSym); + /* +runPos if RUNA; +2*runPos if RUNB */ + + runPos <<= 1; + continue; + } + /* When we hit the first non-run symbol after a run, + we now know how many times to repeat the last + literal, so append that many copies to our buffer + of decoded symbols (dbuf) now. (The last literal + used is the one at the head of the mtfSymbol + array.) */ + if ( runPos ) + { + runPos = 0; + if ( dbufCount + t >= dbufSize ) + return RETVAL_DATA_ERROR; + + uc = symToByte[mtfSymbol[0]]; + byteCount[uc] += t; + while ( t-- ) + dbuf[dbufCount++] = uc; + } + /* Is this the terminating symbol? */ + if ( nextSym > symTotal ) + break; + /* At this point, nextSym indicates a new literal + character. Subtract one to get the position in the + MTF array at which this literal is currently to be + found. (Note that the result can't be -1 or 0, + because 0 and 1 are RUNA and RUNB. But another + instance of the first symbol in the mtf array, + position 0, would have been handled as part of a + run above. Therefore 1 unused mtf position minus 2 + non-literal nextSym values equals -1.) */ + if ( dbufCount >= dbufSize ) + return RETVAL_DATA_ERROR; + i = nextSym - 1; + uc = mtfSymbol[i]; + /* Adjust the MTF array. Since we typically expect to + *move only a small number of symbols, and are bound + *by 256 in any case, using memmove here would + *typically be bigger and slower due to function call + *overhead and other assorted setup costs. */ + do { + mtfSymbol[i] = mtfSymbol[i - 1]; + } while ( --i ); + mtfSymbol[0] = uc; + uc = symToByte[uc]; + /* We have our literal byte. Save it into dbuf. */ + byteCount[uc]++; + dbuf[dbufCount++] = (unsigned int)uc; + } + /* At this point, we've read all the Huffman-coded symbols + (and repeated runs) for this block from the input stream, + and decoded them into the intermediate buffer. There are + dbufCount many decoded bytes in dbuf[]. Now undo the + Burrows-Wheeler transform on dbuf. See + http://dogma.net/markn/articles/bwt/bwt.htm + */ + /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */ + j = 0; + for ( i = 0; i < 256; i++ ) + { + k = j + byteCount[i]; + byteCount[i] = j; + j = k; + } + /* Figure out what order dbuf would be in if we sorted it. */ + for ( i = 0; i < dbufCount; i++ ) + { + uc = (unsigned char)(dbuf[i] & 0xff); + dbuf[byteCount[uc]] |= (i << 8); + byteCount[uc]++; + } + /* Decode first byte by hand to initialize "previous" byte. + Note that it doesn't get output, and if the first three + characters are identical it doesn't qualify as a run (hence + writeRunCountdown = 5). */ + if ( dbufCount ) + { + if ( origPtr >= dbufCount ) + return RETVAL_DATA_ERROR; + bd->writePos = dbuf[origPtr]; + bd->writeCurrent = (unsigned char)(bd->writePos & 0xff); + bd->writePos >>= 8; + bd->writeRunCountdown = 5; + } + bd->writeCount = dbufCount; + + return RETVAL_OK; } /* Undo burrows-wheeler transform on intermediate buffer to produce output. @@ -509,225 +533,241 @@ got_huff_bits: are ignored, data is written to out_fd and return is RETVAL_OK or error. */ -static int INIT read_bunzip(struct bunzip_data *bd, unsigned char *outbuf, int len) +static int INIT read_bunzip(struct bunzip_data *bd, unsigned char *outbuf, + int len) { - const unsigned int *dbuf; - int pos, xcurrent, previous, gotcount; - - /* If last read was short due to end of file, return last block now */ - if (bd->writeCount < 0) - return bd->writeCount; - - gotcount = 0; - dbuf = bd->dbuf; - pos = bd->writePos; - xcurrent = bd->writeCurrent; - - /* We will always have pending decoded data to write into the output - buffer unless this is the very first call (in which case we haven't - Huffman-decoded a block into the intermediate buffer yet). */ - - if (bd->writeCopies) { - /* Inside the loop, writeCopies means extra copies (beyond 1) */ - --bd->writeCopies; - /* Loop outputting bytes */ - for (;;) { - /* If the output buffer is full, snapshot - * state and return */ - if (gotcount >= len) { - bd->writePos = pos; - bd->writeCurrent = xcurrent; - bd->writeCopies++; - return len; - } - /* Write next byte into output buffer, updating CRC */ - outbuf[gotcount++] = xcurrent; - bd->writeCRC = (((bd->writeCRC) << 8) - ^bd->crc32Table[((bd->writeCRC) >> 24) - ^xcurrent]); - /* Loop now if we're outputting multiple - * copies of this byte */ - if (bd->writeCopies) { - --bd->writeCopies; - continue; - } -decode_next_byte: - if (!bd->writeCount--) - break; - /* Follow sequence vector to undo - * Burrows-Wheeler transform */ - previous = xcurrent; - pos = dbuf[pos]; - xcurrent = pos&0xff; - pos >>= 8; - /* After 3 consecutive copies of the same - byte, the 4th is a repeat count. We count - down from 4 instead *of counting up because - testing for non-zero is faster */ - if (--bd->writeRunCountdown) { - if (xcurrent != previous) - bd->writeRunCountdown = 4; - } else { - /* We have a repeated run, this byte - * indicates the count */ - bd->writeCopies = xcurrent; - xcurrent = previous; - bd->writeRunCountdown = 5; - /* Sometimes there are just 3 bytes - * (run length 0) */ - if (!bd->writeCopies) - goto decode_next_byte; - /* Subtract the 1 copy we'd output - * anyway to get extras */ - --bd->writeCopies; - } - } - /* Decompression of this block completed successfully */ - bd->writeCRC = ~bd->writeCRC; - bd->totalCRC = ((bd->totalCRC << 1) | - (bd->totalCRC >> 31)) ^ bd->writeCRC; - /* If this block had a CRC error, force file level CRC error. */ - if (bd->writeCRC != bd->headerCRC) { - bd->totalCRC = bd->headerCRC+1; - return RETVAL_LAST_BLOCK; - } - } - - /* Refill the intermediate buffer by Huffman-decoding next - * block of input */ - /* (previous is just a convenient unused temp variable here) */ - previous = get_next_block(bd); - if (previous) { - bd->writeCount = previous; - return (previous != RETVAL_LAST_BLOCK) ? previous : gotcount; - } - bd->writeCRC = 0xffffffffUL; - pos = bd->writePos; - xcurrent = bd->writeCurrent; - goto decode_next_byte; + const unsigned int *dbuf; + int pos, xcurrent, previous, gotcount; + + /* If last read was short due to end of file, return last block now */ + if ( bd->writeCount < 0 ) + return bd->writeCount; + + gotcount = 0; + dbuf = bd->dbuf; + pos = bd->writePos; + xcurrent = bd->writeCurrent; + + /* We will always have pending decoded data to write into the output + buffer unless this is the very first call (in which case we haven't + Huffman-decoded a block into the intermediate buffer yet). */ + + if ( bd->writeCopies ) + { + /* Inside the loop, writeCopies means extra copies (beyond 1) */ + --bd->writeCopies; + /* Loop outputting bytes */ + for ( ;; ) + { + /* If the output buffer is full, snapshot + * state and return */ + if ( gotcount >= len ) + { + bd->writePos = pos; + bd->writeCurrent = xcurrent; + bd->writeCopies++; + return len; + } + /* Write next byte into output buffer, updating CRC */ + outbuf[gotcount++] = xcurrent; + bd->writeCRC = (((bd->writeCRC) << 8) ^ + bd->crc32Table[((bd->writeCRC) >> 24) ^ xcurrent]); + /* Loop now if we're outputting multiple + * copies of this byte */ + if ( bd->writeCopies ) + { + --bd->writeCopies; + continue; + } + decode_next_byte: + if ( !bd->writeCount-- ) + break; + /* Follow sequence vector to undo + * Burrows-Wheeler transform */ + previous = xcurrent; + pos = dbuf[pos]; + xcurrent = pos & 0xff; + pos >>= 8; + /* After 3 consecutive copies of the same + byte, the 4th is a repeat count. We count + down from 4 instead *of counting up because + testing for non-zero is faster */ + if ( --bd->writeRunCountdown ) + { + if ( xcurrent != previous ) + bd->writeRunCountdown = 4; + } + else + { + /* We have a repeated run, this byte + * indicates the count */ + bd->writeCopies = xcurrent; + xcurrent = previous; + bd->writeRunCountdown = 5; + /* Sometimes there are just 3 bytes + * (run length 0) */ + if ( !bd->writeCopies ) + goto decode_next_byte; + /* Subtract the 1 copy we'd output + * anyway to get extras */ + --bd->writeCopies; + } + } + /* Decompression of this block completed successfully */ + bd->writeCRC = ~bd->writeCRC; + bd->totalCRC = + ((bd->totalCRC << 1) | (bd->totalCRC >> 31)) ^ bd->writeCRC; + /* If this block had a CRC error, force file level CRC error. */ + if ( bd->writeCRC != bd->headerCRC ) + { + bd->totalCRC = bd->headerCRC + 1; + return RETVAL_LAST_BLOCK; + } + } + + /* Refill the intermediate buffer by Huffman-decoding next + * block of input */ + /* (previous is just a convenient unused temp variable here) */ + previous = get_next_block(bd); + if ( previous ) + { + bd->writeCount = previous; + return (previous != RETVAL_LAST_BLOCK) ? previous : gotcount; + } + bd->writeCRC = 0xffffffffUL; + pos = bd->writePos; + xcurrent = bd->writeCurrent; + goto decode_next_byte; } static int INIT nofill(void *buf, unsigned int len) { - return -1; + return -1; } /* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are ignored, and data is read from file handle into temporary buffer. */ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len, - int (*fill)(void*, unsigned int)) + int (*fill)(void *, unsigned int)) { - struct bunzip_data *bd; - unsigned int i, j, c; - const unsigned int BZh0 = - (((unsigned int)'B') << 24)+(((unsigned int)'Z') << 16) - +(((unsigned int)'h') << 8)+(unsigned int)'0'; - - /* Figure out how much data to allocate */ - i = sizeof(struct bunzip_data); - - /* Allocate bunzip_data. Most fields initialize to zero. */ - bd = *bdp = malloc(i); - if (!bd) - return RETVAL_OUT_OF_MEMORY; - memset(bd, 0, sizeof(struct bunzip_data)); - /* Setup input buffer */ - bd->inbuf = inbuf; - bd->inbufCount = len; - if (fill != NULL) - bd->fill = fill; - else - bd->fill = nofill; - - /* Init the CRC32 table (big endian) */ - for (i = 0; i < 256; i++) { - c = i << 24; - for (j = 8; j; j--) - c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1); - bd->crc32Table[i] = c; - } - - /* Ensure that file starts with "BZh['1'-'9']." */ - i = get_bits(bd, 32); - if (((unsigned int)(i-BZh0-1)) >= 9) - return RETVAL_NOT_BZIP_DATA; - - /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of - uncompressed data. Allocate intermediate buffer for block. */ - bd->dbufSize = 100000*(i-BZh0); - - bd->dbuf = large_malloc(bd->dbufSize * sizeof(int)); - if (!bd->dbuf) - return RETVAL_OUT_OF_MEMORY; - return RETVAL_OK; + struct bunzip_data *bd; + unsigned int i, j, c; + const unsigned int BZh0 = (((unsigned int)'B') << 24) + + (((unsigned int)'Z') << 16) + + (((unsigned int)'h') << 8) + (unsigned int)'0'; + + /* Figure out how much data to allocate */ + i = sizeof(struct bunzip_data); + + /* Allocate bunzip_data. Most fields initialize to zero. */ + bd = *bdp = malloc(i); + if ( !bd ) + return RETVAL_OUT_OF_MEMORY; + memset(bd, 0, sizeof(struct bunzip_data)); + /* Setup input buffer */ + bd->inbuf = inbuf; + bd->inbufCount = len; + if ( fill != NULL ) + bd->fill = fill; + else + bd->fill = nofill; + + /* Init the CRC32 table (big endian) */ + for ( i = 0; i < 256; i++ ) + { + c = i << 24; + for ( j = 8; j; j-- ) + c = c & 0x80000000 ? (c << 1) ^ 0x04c11db7 : (c << 1); + bd->crc32Table[i] = c; + } + + /* Ensure that file starts with "BZh['1'-'9']." */ + i = get_bits(bd, 32); + if ( ((unsigned int)(i - BZh0 - 1)) >= 9 ) + return RETVAL_NOT_BZIP_DATA; + + /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of + uncompressed data. Allocate intermediate buffer for block. */ + bd->dbufSize = 100000 * (i - BZh0); + + bd->dbuf = large_malloc(bd->dbufSize * sizeof(int)); + if ( !bd->dbuf ) + return RETVAL_OUT_OF_MEMORY; + return RETVAL_OK; } /* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data, not end of file.) */ STATIC int INIT bunzip2(unsigned char *buf, unsigned int len, - int(*fill)(void*, unsigned int), - int(*flush)(void*, unsigned int), - unsigned char *outbuf, - unsigned int *pos, - void(*error)(const char *x)) + int (*fill)(void *, unsigned int), + int (*flush)(void *, unsigned int), + unsigned char *outbuf, unsigned int *pos, + void (*error)(const char *x)) { - struct bunzip_data *bd; - int i = -1; - unsigned char *inbuf; - - if (flush) - outbuf = malloc(BZIP2_IOBUF_SIZE); - - if (!outbuf) { - error("Could not allocate output buffer"); - return RETVAL_OUT_OF_MEMORY; - } - if (buf) - inbuf = buf; - else - inbuf = malloc(BZIP2_IOBUF_SIZE); - if (!inbuf) { - error("Could not allocate input buffer"); - i = RETVAL_OUT_OF_MEMORY; - goto exit_0; - } - i = start_bunzip(&bd, inbuf, len, fill); - if (!i) { - for (;;) { - i = read_bunzip(bd, outbuf, BZIP2_IOBUF_SIZE); - if (i <= 0) - break; - if (!flush) - outbuf += i; - else - if (i != flush(outbuf, i)) { - i = RETVAL_UNEXPECTED_OUTPUT_EOF; - break; - } - } - } - /* Check CRC and release memory */ - if (i == RETVAL_LAST_BLOCK) { - if (bd->headerCRC != bd->totalCRC) - error("Data integrity error when decompressing."); - else - i = RETVAL_OK; - } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) { - error("Compressed file ends unexpectedly"); - } - if (!bd) - goto exit_1; - if (bd->dbuf) - large_free(bd->dbuf); - if (pos) - *pos = bd->inbufPos; - free(bd); + struct bunzip_data *bd; + int i = -1; + unsigned char *inbuf; + + if ( flush ) + outbuf = malloc(BZIP2_IOBUF_SIZE); + + if ( !outbuf ) + { + error("Could not allocate output buffer"); + return RETVAL_OUT_OF_MEMORY; + } + if ( buf ) + inbuf = buf; + else + inbuf = malloc(BZIP2_IOBUF_SIZE); + if ( !inbuf ) + { + error("Could not allocate input buffer"); + i = RETVAL_OUT_OF_MEMORY; + goto exit_0; + } + i = start_bunzip(&bd, inbuf, len, fill); + if ( !i ) + { + for ( ;; ) + { + i = read_bunzip(bd, outbuf, BZIP2_IOBUF_SIZE); + if ( i <= 0 ) + break; + if ( !flush ) + outbuf += i; + else if ( i != flush(outbuf, i) ) + { + i = RETVAL_UNEXPECTED_OUTPUT_EOF; + break; + } + } + } + /* Check CRC and release memory */ + if ( i == RETVAL_LAST_BLOCK ) + { + if ( bd->headerCRC != bd->totalCRC ) + error("Data integrity error when decompressing."); + else + i = RETVAL_OK; + } + else if ( i == RETVAL_UNEXPECTED_OUTPUT_EOF ) + { + error("Compressed file ends unexpectedly"); + } + if ( !bd ) + goto exit_1; + if ( bd->dbuf ) + large_free(bd->dbuf); + if ( pos ) + *pos = bd->inbufPos; + free(bd); exit_1: - if (!buf) - free(inbuf); + if ( !buf ) + free(inbuf); exit_0: - if (flush) - free(outbuf); - return i; + if ( flush ) + free(outbuf); + return i; } diff --git a/xen/common/compat/domain.c b/xen/common/compat/domain.c index 88bfdc836d..11ad5e8d73 100644 --- a/xen/common/compat/domain.c +++ b/xen/common/compat/domain.c @@ -33,7 +33,8 @@ CHECK_vcpu_hvm_context; #undef xen_vcpu_hvm_x86_32 #undef xen_vcpu_hvm_context -int compat_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) +int compat_vcpu_op(int cmd, unsigned int vcpuid, + XEN_GUEST_HANDLE_PARAM(void) arg) { struct domain *d = current->domain; struct vcpu *v; @@ -42,7 +43,7 @@ int compat_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) ar if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) return -ENOENT; - switch ( cmd ) + switch (cmd) { case VCPUOP_initialise: { @@ -57,7 +58,8 @@ int compat_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) ar return -EFAULT; domain_lock(d); - rc = v->is_initialised ? -EEXIST : arch_set_info_hvm_guest(v, &ctxt); + rc = + v->is_initialised ? -EEXIST : arch_set_info_hvm_guest(v, &ctxt); domain_unlock(d); } else @@ -81,8 +83,8 @@ int compat_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) ar } if ( rc == -ERESTART ) - rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iuh", - cmd, vcpuid, arg); + rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iuh", cmd, + vcpuid, arg); break; } diff --git a/xen/common/compat/grant_table.c b/xen/common/compat/grant_table.c index ff1d678f01..b244b82046 100644 --- a/xen/common/compat/grant_table.c +++ b/xen/common/compat/grant_table.c @@ -67,54 +67,54 @@ int compat_grant_table_op(unsigned int cmd, cmd_op = cmd & GNTTABOP_CMD_MASK; if ( cmd_op != GNTTABOP_cache_flush ) cmd_op = cmd; - switch ( cmd_op ) + switch (cmd_op) { -#define CASE(name) \ - case GNTTABOP_##name: \ - if ( unlikely(!guest_handle_okay(guest_handle_cast(cmp_uop, \ - gnttab_##name##_compat_t), \ - count)) ) \ - rc = -EFAULT; \ +#define CASE(name) \ + case GNTTABOP_##name: \ + if ( unlikely(!guest_handle_okay( \ + guest_handle_cast(cmp_uop, gnttab_##name##_compat_t), \ + count)) ) \ + rc = -EFAULT; \ break #ifndef CHECK_gnttab_map_grant_ref - CASE(map_grant_ref); + CASE(map_grant_ref); #endif #ifndef CHECK_gnttab_unmap_grant_ref - CASE(unmap_grant_ref); + CASE(unmap_grant_ref); #endif #ifndef CHECK_gnttab_unmap_and_replace - CASE(unmap_and_replace); + CASE(unmap_and_replace); #endif #ifndef CHECK_gnttab_setup_table - CASE(setup_table); + CASE(setup_table); #endif #ifndef CHECK_gnttab_transfer - CASE(transfer); + CASE(transfer); #endif #ifndef CHECK_gnttab_copy - CASE(copy); + CASE(copy); #endif #ifndef CHECK_gnttab_dump_table - CASE(dump_table); + CASE(dump_table); #endif #ifndef CHECK_gnttab_get_status_frames - CASE(get_status_frames); + CASE(get_status_frames); #endif #ifndef CHECK_gnttab_swap_grant_ref - CASE(swap_grant_ref); + CASE(swap_grant_ref); #endif #ifndef CHECK_gnttab_cache_flush - CASE(cache_flush); + CASE(cache_flush); #endif #undef CASE @@ -143,14 +143,15 @@ int compat_grant_table_op(unsigned int cmd, } cmp; set_xen_guest_handle(nat.uop, COMPAT_ARG_XLAT_VIRT_BASE); - switch ( cmd_op ) + switch (cmd_op) { case GNTTABOP_setup_table: if ( unlikely(count > 1) ) rc = -EINVAL; else if ( unlikely(__copy_from_guest(&cmp.setup, cmp_uop, 1)) ) rc = -EFAULT; - else if ( unlikely(!compat_handle_okay(cmp.setup.frame_list, cmp.setup.nr_frames)) ) + else if ( unlikely(!compat_handle_okay(cmp.setup.frame_list, + cmp.setup.nr_frames)) ) rc = -EFAULT; else { @@ -159,30 +160,29 @@ int compat_grant_table_op(unsigned int cmd, sizeof(*nat.setup->frame_list.p); #define XLAT_gnttab_setup_table_HNDL_frame_list(_d_, _s_) \ - set_xen_guest_handle((_d_)->frame_list, (unsigned long *)(nat.setup + 1)) + set_xen_guest_handle((_d_)->frame_list, (unsigned long *)(nat.setup + 1)) XLAT_gnttab_setup_table(nat.setup, &cmp.setup); #undef XLAT_gnttab_setup_table_HNDL_frame_list - rc = gnttab_setup_table(guest_handle_cast(nat.uop, - gnttab_setup_table_t), - 1, max_frame_list_size_in_page); + rc = gnttab_setup_table( + guest_handle_cast(nat.uop, gnttab_setup_table_t), 1, + max_frame_list_size_in_page); } ASSERT(rc <= 0); if ( rc == 0 ) { -#define XLAT_gnttab_setup_table_HNDL_frame_list(_d_, _s_) \ - do \ - { \ - if ( (_s_)->status == GNTST_okay ) \ - { \ - for ( i = 0; i < (_s_)->nr_frames; ++i ) \ - { \ - unsigned int frame = (_s_)->frame_list.p[i]; \ - if ( __copy_to_compat_offset((_d_)->frame_list, \ - i, &frame, 1) ) \ - (_s_)->status = GNTST_bad_virt_addr; \ - } \ - } \ - } while (0) +#define XLAT_gnttab_setup_table_HNDL_frame_list(_d_, _s_) \ + do { \ + if ( (_s_)->status == GNTST_okay ) \ + { \ + for ( i = 0; i < (_s_)->nr_frames; ++i ) \ + { \ + unsigned int frame = (_s_)->frame_list.p[i]; \ + if ( __copy_to_compat_offset((_d_)->frame_list, i, &frame, \ + 1) ) \ + (_s_)->status = GNTST_bad_virt_addr; \ + } \ + } \ + } while ( 0 ) XLAT_gnttab_setup_table(&cmp.setup, nat.setup); #undef XLAT_gnttab_setup_table_HNDL_frame_list if ( unlikely(__copy_to_guest(cmp_uop, &cmp.setup, 1)) ) @@ -193,9 +193,12 @@ int compat_grant_table_op(unsigned int cmd, break; case GNTTABOP_transfer: - for ( n = 0; n < COMPAT_ARG_XLAT_SIZE / sizeof(*nat.xfer) && i < count && rc == 0; ++i, ++n ) + for ( n = 0; n < COMPAT_ARG_XLAT_SIZE / sizeof(*nat.xfer) && + i < count && rc == 0; + ++i, ++n ) { - if ( unlikely(__copy_from_guest_offset(&cmp.xfer, cmp_uop, i, 1)) ) + if ( unlikely( + __copy_from_guest_offset(&cmp.xfer, cmp_uop, i, 1)) ) rc = -EFAULT; else { @@ -203,7 +206,8 @@ int compat_grant_table_op(unsigned int cmd, } } if ( rc == 0 ) - rc = gnttab_transfer(guest_handle_cast(nat.uop, gnttab_transfer_t), n); + rc = gnttab_transfer( + guest_handle_cast(nat.uop, gnttab_transfer_t), n); if ( rc > 0 ) { ASSERT(rc < n); @@ -227,9 +231,12 @@ int compat_grant_table_op(unsigned int cmd, break; case GNTTABOP_copy: - for ( n = 0; n < COMPAT_ARG_XLAT_SIZE / sizeof(*nat.copy) && i < count && rc == 0; ++i, ++n ) + for ( n = 0; n < COMPAT_ARG_XLAT_SIZE / sizeof(*nat.copy) && + i < count && rc == 0; + ++i, ++n ) { - if ( unlikely(__copy_from_guest_offset(&cmp.copy, cmp_uop, i, 1)) ) + if ( unlikely( + __copy_from_guest_offset(&cmp.copy, cmp_uop, i, 1)) ) rc = -EFAULT; else { @@ -271,11 +278,12 @@ int compat_grant_table_op(unsigned int cmd, } break; - case GNTTABOP_get_status_frames: { + case GNTTABOP_get_status_frames: + { unsigned int max_frame_list_size_in_pages = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.get_status)) / sizeof(*nat.get_status->frame_list.p); - if ( count != 1) + if ( count != 1 ) { rc = -EINVAL; break; @@ -289,29 +297,28 @@ int compat_grant_table_op(unsigned int cmd, } #define XLAT_gnttab_get_status_frames_HNDL_frame_list(_d_, _s_) \ - set_xen_guest_handle((_d_)->frame_list, (uint64_t *)(nat.get_status + 1)) + set_xen_guest_handle((_d_)->frame_list, (uint64_t *)(nat.get_status + 1)) XLAT_gnttab_get_status_frames(nat.get_status, &cmp.get_status); #undef XLAT_gnttab_get_status_frames_HNDL_frame_list rc = gnttab_get_status_frames( - guest_handle_cast(nat.uop, gnttab_get_status_frames_t), - count, max_frame_list_size_in_pages); + guest_handle_cast(nat.uop, gnttab_get_status_frames_t), count, + max_frame_list_size_in_pages); if ( rc >= 0 ) { -#define XLAT_gnttab_get_status_frames_HNDL_frame_list(_d_, _s_) \ - do \ - { \ - if ( (_s_)->status == GNTST_okay ) \ - { \ - for ( i = 0; i < (_s_)->nr_frames; ++i ) \ - { \ - uint64_t frame = (_s_)->frame_list.p[i]; \ - if ( __copy_to_compat_offset((_d_)->frame_list, \ - i, &frame, 1) ) \ - (_s_)->status = GNTST_bad_virt_addr; \ - } \ - } \ - } while (0) +#define XLAT_gnttab_get_status_frames_HNDL_frame_list(_d_, _s_) \ + do { \ + if ( (_s_)->status == GNTST_okay ) \ + { \ + for ( i = 0; i < (_s_)->nr_frames; ++i ) \ + { \ + uint64_t frame = (_s_)->frame_list.p[i]; \ + if ( __copy_to_compat_offset((_d_)->frame_list, i, &frame, \ + 1) ) \ + (_s_)->status = GNTST_bad_virt_addr; \ + } \ + } \ + } while ( 0 ) XLAT_gnttab_get_status_frames(&cmp.get_status, nat.get_status); #undef XLAT_gnttab_get_status_frames_HNDL_frame_list if ( unlikely(__copy_to_guest(cmp_uop, &cmp.get_status, 1)) ) @@ -332,8 +339,8 @@ int compat_grant_table_op(unsigned int cmd, { ASSERT(i < count); ASSERT(!guest_handle_is_null(cnt_uop)); - rc = hypercall_create_continuation(__HYPERVISOR_grant_table_op, - "ihi", cmd, cnt_uop, count - i); + rc = hypercall_create_continuation(__HYPERVISOR_grant_table_op, "ihi", + cmd, cnt_uop, count - i); } return rc; diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c index 13fd64ddf5..3ec105e6e4 100644 --- a/xen/common/compat/memory.c +++ b/xen/common/compat/memory.c @@ -18,13 +18,14 @@ CHECK_TYPE(domid); CHECK_vmemrange; #ifdef CONFIG_HAS_PASSTHROUGH -struct get_reserved_device_memory { +struct get_reserved_device_memory +{ struct compat_reserved_device_memory_map map; unsigned int used_entries; }; -static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr, - u32 id, void *ctxt) +static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr, u32 id, + void *ctxt) { struct get_reserved_device_memory *grdm = ctxt; u32 sbdf = PCI_SBDF3(grdm->map.dev.pci.seg, grdm->map.dev.pci.bus, @@ -35,15 +36,14 @@ static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr, if ( grdm->used_entries < grdm->map.nr_entries ) { - struct compat_reserved_device_memory rdm = { - .start_pfn = start, .nr_pages = nr - }; + struct compat_reserved_device_memory rdm = {.start_pfn = start, + .nr_pages = nr}; if ( rdm.start_pfn != start || rdm.nr_pages != nr ) return -ERANGE; - if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries, - &rdm, 1) ) + if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries, &rdm, + 1) ) return -EFAULT; } @@ -59,8 +59,7 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) long rc; unsigned int start_extent = cmd >> MEMOP_EXTENT_SHIFT; - do - { + do { unsigned int i, end_extent = 0; union { XEN_GUEST_HANDLE_PARAM(void) hnd; @@ -85,7 +84,7 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE); split = 0; - switch ( op ) + switch (op) { xen_pfn_t *space; @@ -100,42 +99,44 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) return start_extent; if ( !compat_handle_is_null(cmp.rsrv.extent_start) && - !compat_handle_okay(cmp.rsrv.extent_start, cmp.rsrv.nr_extents) ) + !compat_handle_okay(cmp.rsrv.extent_start, + cmp.rsrv.nr_extents) ) return start_extent; - end_extent = start_extent + (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.rsrv)) / - sizeof(*space); + end_extent = + start_extent + + (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.rsrv)) / sizeof(*space); if ( end_extent > cmp.rsrv.nr_extents ) end_extent = cmp.rsrv.nr_extents; space = (xen_pfn_t *)(nat.rsrv + 1); -#define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \ - do \ - { \ - if ( !compat_handle_is_null((_s_)->extent_start) ) \ - { \ - set_xen_guest_handle((_d_)->extent_start, space - start_extent); \ - if ( op != XENMEM_increase_reservation ) \ - { \ - for ( i = start_extent; i < end_extent; ++i ) \ - { \ - compat_pfn_t pfn; \ - if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \ - { \ - end_extent = i; \ - split = -1; \ - break; \ - } \ - *space++ = pfn; \ - } \ - } \ - } \ - else \ - { \ - set_xen_guest_handle((_d_)->extent_start, NULL); \ - end_extent = cmp.rsrv.nr_extents; \ - } \ - } while (0) +#define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \ + do { \ + if ( !compat_handle_is_null((_s_)->extent_start) ) \ + { \ + set_xen_guest_handle((_d_)->extent_start, space - start_extent); \ + if ( op != XENMEM_increase_reservation ) \ + { \ + for ( i = start_extent; i < end_extent; ++i ) \ + { \ + compat_pfn_t pfn; \ + if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, \ + i, 1) ) \ + { \ + end_extent = i; \ + split = -1; \ + break; \ + } \ + *space++ = pfn; \ + } \ + } \ + } \ + else \ + { \ + set_xen_guest_handle((_d_)->extent_start, NULL); \ + end_extent = cmp.rsrv.nr_extents; \ + } \ + } while ( 0 ) XLAT_memory_reservation(nat.rsrv, &cmp.rsrv); #undef XLAT_memory_reservation_HNDL_extent_start @@ -157,10 +158,12 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) order_delta = cmp.xchg.out.extent_order - cmp.xchg.in.extent_order; /* Various sanity checks. */ if ( (cmp.xchg.nr_exchanged > cmp.xchg.in.nr_extents) || - (order_delta > 0 && (cmp.xchg.nr_exchanged & ((1U << order_delta) - 1))) || + (order_delta > 0 && + (cmp.xchg.nr_exchanged & ((1U << order_delta) - 1))) || /* Sizes of input and output lists do not overflow an int? */ ((~0U >> cmp.xchg.in.extent_order) < cmp.xchg.in.nr_extents) || - ((~0U >> cmp.xchg.out.extent_order) < cmp.xchg.out.nr_extents) || + ((~0U >> cmp.xchg.out.extent_order) < + cmp.xchg.out.nr_extents) || /* Sizes of input and output lists match? */ ((cmp.xchg.in.nr_extents << cmp.xchg.in.extent_order) != (cmp.xchg.out.nr_extents << cmp.xchg.out.extent_order)) ) @@ -174,11 +177,11 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) start_extent = cmp.xchg.nr_exchanged; end_extent = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.xchg)) / - (((1U << ABS(order_delta)) + 1) * - sizeof(*space)); + (((1U << ABS(order_delta)) + 1) * sizeof(*space)); if ( end_extent == 0 ) { - printk("Cannot translate compatibility mode XENMEM_exchange extents (%u,%u)\n", + printk("Cannot translate compatibility mode XENMEM_exchange " + "extents (%u,%u)\n", cmp.xchg.in.extent_order, cmp.xchg.out.extent_order); return -E2BIG; } @@ -190,30 +193,30 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) space = (xen_pfn_t *)(nat.xchg + 1); /* Code below depends upon .in preceding .out. */ - BUILD_BUG_ON(offsetof(xen_memory_exchange_t, in) > offsetof(xen_memory_exchange_t, out)); -#define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \ - do \ - { \ - set_xen_guest_handle((_d_)->extent_start, space - start_extent); \ - for ( i = start_extent; i < end_extent; ++i ) \ - { \ - compat_pfn_t pfn; \ - if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \ - return -EFAULT; \ - *space++ = pfn; \ - } \ - if ( order_delta > 0 ) \ - { \ - start_extent >>= order_delta; \ - end_extent >>= order_delta; \ - } \ - else \ - { \ - start_extent <<= -order_delta; \ - end_extent <<= -order_delta; \ - } \ - order_delta = -order_delta; \ - } while (0) + BUILD_BUG_ON(offsetof(xen_memory_exchange_t, in) > + offsetof(xen_memory_exchange_t, out)); +#define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \ + do { \ + set_xen_guest_handle((_d_)->extent_start, space - start_extent); \ + for ( i = start_extent; i < end_extent; ++i ) \ + { \ + compat_pfn_t pfn; \ + if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \ + return -EFAULT; \ + *space++ = pfn; \ + } \ + if ( order_delta > 0 ) \ + { \ + start_extent >>= order_delta; \ + end_extent >>= order_delta; \ + } \ + else \ + { \ + start_extent <<= -order_delta; \ + end_extent <<= -order_delta; \ + } \ + order_delta = -order_delta; \ + } while ( 0 ) XLAT_memory_exchange(nat.xchg, &cmp.xchg); #undef XLAT_memory_reservation_HNDL_extent_start @@ -238,7 +241,7 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) break; case XENMEM_add_to_physmap: - BUILD_BUG_ON((typeof(cmp.atp.size))-1 > + BUILD_BUG_ON((typeof(cmp.atp.size)) - 1 > (UINT_MAX >> MEMOP_EXTENT_SHIFT)); if ( copy_from_guest(&cmp.atp, compat, 1) ) @@ -250,8 +253,9 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) case XENMEM_add_to_physmap_batch: { - unsigned int limit = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.atpb)) - / (sizeof(nat.atpb->idxs.p) + sizeof(nat.atpb->gpfns.p)); + unsigned int limit = + (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.atpb)) / + (sizeof(nat.atpb->idxs.p) + sizeof(nat.atpb->gpfns.p)); /* Use an intermediate variable to suppress warnings on old gcc: */ unsigned int size; xen_ulong_t *idxs = (void *)(nat.atpb + 1); @@ -292,11 +296,11 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) } #define XLAT_add_to_physmap_batch_HNDL_idxs(_d_, _s_) \ - set_xen_guest_handle((_d_)->idxs, idxs) + set_xen_guest_handle((_d_)->idxs, idxs) #define XLAT_add_to_physmap_batch_HNDL_gpfns(_d_, _s_) \ - set_xen_guest_handle((_d_)->gpfns, gpfns) + set_xen_guest_handle((_d_)->gpfns, gpfns) #define XLAT_add_to_physmap_batch_HNDL_errs(_d_, _s_) \ - guest_from_compat_handle((_d_)->errs, (_s_)->errs) + guest_from_compat_handle((_d_)->errs, (_s_)->errs) XLAT_add_to_physmap_batch(nat.atpb, &cmp.atpb); @@ -328,17 +332,17 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) case XENMEM_access_op: if ( copy_from_guest(&cmp.mao, compat, 1) ) return -EFAULT; - -#define XLAT_mem_access_op_HNDL_pfn_list(_d_, _s_) \ - guest_from_compat_handle((_d_)->pfn_list, (_s_)->pfn_list) -#define XLAT_mem_access_op_HNDL_access_list(_d_, _s_) \ - guest_from_compat_handle((_d_)->access_list, (_s_)->access_list) - + +#define XLAT_mem_access_op_HNDL_pfn_list(_d_, _s_) \ + guest_from_compat_handle((_d_)->pfn_list, (_s_)->pfn_list) +#define XLAT_mem_access_op_HNDL_access_list(_d_, _s_) \ + guest_from_compat_handle((_d_)->access_list, (_s_)->access_list) + XLAT_mem_access_op(nat.mao, &cmp.mao); - + #undef XLAT_mem_access_op_HNDL_pfn_list #undef XLAT_mem_access_op_HNDL_access_list - + break; case XENMEM_get_vnumainfo: @@ -353,12 +357,12 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) if ( copy_from_guest(&cmp.vnuma, compat, 1) ) return -EFAULT; -#define XLAT_vnuma_topology_info_HNDL_vdistance_h(_d_, _s_) \ - guest_from_compat_handle((_d_)->vdistance.h, (_s_)->vdistance.h) -#define XLAT_vnuma_topology_info_HNDL_vcpu_to_vnode_h(_d_, _s_) \ - guest_from_compat_handle((_d_)->vcpu_to_vnode.h, (_s_)->vcpu_to_vnode.h) -#define XLAT_vnuma_topology_info_HNDL_vmemrange_h(_d_, _s_) \ - guest_from_compat_handle((_d_)->vmemrange.h, (_s_)->vmemrange.h) +#define XLAT_vnuma_topology_info_HNDL_vdistance_h(_d_, _s_) \ + guest_from_compat_handle((_d_)->vdistance.h, (_s_)->vdistance.h) +#define XLAT_vnuma_topology_info_HNDL_vcpu_to_vnode_h(_d_, _s_) \ + guest_from_compat_handle((_d_)->vcpu_to_vnode.h, (_s_)->vcpu_to_vnode.h) +#define XLAT_vnuma_topology_info_HNDL_vmemrange_h(_d_, _s_) \ + guest_from_compat_handle((_d_)->vmemrange.h, (_s_)->vmemrange.h) XLAT_vnuma_topology_info(nat.vnuma, &cmp.vnuma); @@ -412,7 +416,7 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) * frame addresses. */ max_nr_frames = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.mar)) / - sizeof(*xen_frame_list); + sizeof(*xen_frame_list); if ( cmp.mar.nr_frames > max_nr_frames ) return -E2BIG; @@ -431,8 +435,8 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) { compat_pfn_t frame; - if ( __copy_from_compat_offset( - &frame, cmp.mar.frame_list, i, 1) ) + if ( __copy_from_compat_offset(&frame, cmp.mar.frame_list, + i, 1) ) return -EFAULT; xen_frame_list[i] = frame; @@ -440,7 +444,7 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) } #define XLAT_mem_acquire_resource_HNDL_frame_list(_d_, _s_) \ - set_xen_guest_handle((_d_)->frame_list, xen_frame_list) + set_xen_guest_handle((_d_)->frame_list, xen_frame_list) XLAT_mem_acquire_resource(nat.mar, &cmp.mar); @@ -474,7 +478,7 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) split = -1; } - switch ( op ) + switch (op) { case XENMEM_increase_reservation: case XENMEM_decrease_reservation: @@ -552,9 +556,9 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) } cmp.xchg.nr_exchanged = nat.xchg->nr_exchanged; - if ( __copy_field_to_guest(guest_handle_cast(compat, - compat_memory_exchange_t), - &cmp.xchg, nr_exchanged) ) + if ( __copy_field_to_guest( + guest_handle_cast(compat, compat_memory_exchange_t), + &cmp.xchg, nr_exchanged) ) rc = -EFAULT; if ( rc < 0 ) @@ -643,8 +647,8 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) cmd = op | (start_extent << MEMOP_EXTENT_SHIFT); if ( split > 0 && hypercall_preempt_check() ) - return hypercall_create_continuation( - __HYPERVISOR_memory_op, "ih", cmd, compat); + return hypercall_create_continuation(__HYPERVISOR_memory_op, "ih", + cmd, compat); } while ( split > 0 ); if ( unlikely(rc > INT_MAX) ) diff --git a/xen/common/compat/multicall.c b/xen/common/compat/multicall.c index 43d2d8152d..4a8d0ae279 100644 --- a/xen/common/compat/multicall.c +++ b/xen/common/compat/multicall.c @@ -15,16 +15,16 @@ typedef int ret_t; static inline void xlat_multicall_entry(struct mc_state *mcs) { int i; - for (i=0; i<6; i++) + for ( i = 0; i < 6; i++ ) mcs->compat_call.args[i] = mcs->call.args[i]; } DEFINE_XEN_GUEST_HANDLE(multicall_entry_compat_t); -#define multicall_entry compat_multicall_entry -#define multicall_entry_t multicall_entry_compat_t -#define do_multicall_call compat_multicall_call -#define call compat_call -#define do_multicall(l, n) compat_multicall(_##l, n) +#define multicall_entry compat_multicall_entry +#define multicall_entry_t multicall_entry_compat_t +#define do_multicall_call compat_multicall_call +#define call compat_call +#define do_multicall(l, n) compat_multicall(_##l, n) #define _XEN_GUEST_HANDLE(t) XEN_GUEST_HANDLE(t) #define _XEN_GUEST_HANDLE_PARAM(t) XEN_GUEST_HANDLE(t) diff --git a/xen/common/core_parking.c b/xen/common/core_parking.c index 803c6c7911..4aa4d1afd7 100644 --- a/xen/common/core_parking.c +++ b/xen/common/core_parking.c @@ -29,12 +29,13 @@ static unsigned int core_parking_power(unsigned int event); static unsigned int core_parking_performance(unsigned int event); static uint32_t cur_idle_nums; -static unsigned int core_parking_cpunum[NR_CPUS] = {[0 ... NR_CPUS-1] = -1}; +static unsigned int core_parking_cpunum[NR_CPUS] = {[0 ... NR_CPUS - 1] = -1}; -static struct core_parking_policy { +static struct core_parking_policy +{ char name[30]; unsigned int (*next)(unsigned int event); -} *core_parking_policy; +} * core_parking_policy; static enum core_parking_controller { POWER_FIRST, @@ -58,7 +59,7 @@ static unsigned int core_parking_performance(unsigned int event) { unsigned int cpu = -1; - switch ( event ) + switch (event) { case CORE_PARKING_INCREMENT: { @@ -68,7 +69,7 @@ static unsigned int core_parking_performance(unsigned int event) cpumask_clear(&core_candidate_map); cpumask_clear(&sibling_candidate_map); - for_each_cpu(cpu, &cpu_online_map) + for_each_cpu (cpu, &cpu_online_map) { if ( cpu == 0 ) continue; @@ -83,7 +84,7 @@ static unsigned int core_parking_performance(unsigned int event) __cpumask_set_cpu(cpu, &core_candidate_map); } - for_each_cpu(cpu, &core_candidate_map) + for_each_cpu (cpu, &core_candidate_map) { sibling_tmp = cpumask_weight(per_cpu(cpu_sibling_mask, cpu)); if ( sibling_weight < sibling_tmp ) @@ -101,7 +102,7 @@ static unsigned int core_parking_performance(unsigned int event) case CORE_PARKING_DECREMENT: { - cpu = core_parking_cpunum[cur_idle_nums -1]; + cpu = core_parking_cpunum[cur_idle_nums - 1]; } break; @@ -116,7 +117,7 @@ static unsigned int core_parking_power(unsigned int event) { unsigned int cpu = -1; - switch ( event ) + switch (event) { case CORE_PARKING_INCREMENT: { @@ -126,7 +127,7 @@ static unsigned int core_parking_power(unsigned int event) cpumask_clear(&core_candidate_map); cpumask_clear(&sibling_candidate_map); - for_each_cpu(cpu, &cpu_online_map) + for_each_cpu (cpu, &cpu_online_map) { if ( cpu == 0 ) continue; @@ -141,7 +142,7 @@ static unsigned int core_parking_power(unsigned int event) __cpumask_set_cpu(cpu, &core_candidate_map); } - for_each_cpu(cpu, &core_candidate_map) + for_each_cpu (cpu, &core_candidate_map) { sibling_tmp = cpumask_weight(per_cpu(cpu_sibling_mask, cpu)); if ( sibling_weight > sibling_tmp ) @@ -159,7 +160,7 @@ static unsigned int core_parking_power(unsigned int event) case CORE_PARKING_DECREMENT: { - cpu = core_parking_cpunum[cur_idle_nums -1]; + cpu = core_parking_cpunum[cur_idle_nums - 1]; } break; diff --git a/xen/common/coverage/coverage.c b/xen/common/coverage/coverage.c index bd90f28663..8c2609fc3a 100644 --- a/xen/common/coverage/coverage.c +++ b/xen/common/coverage/coverage.c @@ -29,7 +29,7 @@ int sysctl_cov_op(struct xen_sysctl_coverage_op *op) { int ret; - switch ( op->cmd ) + switch (op->cmd) { case XEN_SYSCTL_COVERAGE_get_size: op->size = cov_ops.get_size(); diff --git a/xen/common/coverage/gcc_3_4.c b/xen/common/coverage/gcc_3_4.c index 3631f4bc25..1dee4dbd5e 100644 --- a/xen/common/coverage/gcc_3_4.c +++ b/xen/common/coverage/gcc_3_4.c @@ -17,7 +17,6 @@ * Wei Liu */ - #include #include "gcov.h" @@ -78,14 +77,14 @@ struct gcov_ctr_info */ struct gcov_info { - unsigned int version; - struct gcov_info *next; - unsigned int stamp; - const char *filename; - unsigned int n_functions; + unsigned int version; + struct gcov_info *next; + unsigned int stamp; + const char *filename; + unsigned int n_functions; const struct gcov_fn_info *functions; - unsigned int ctr_mask; - struct gcov_ctr_info counts[0]; + unsigned int ctr_mask; + struct gcov_ctr_info counts[0]; }; /** @@ -108,7 +107,8 @@ struct gcov_info * * See gcc source gcc/gcov-io.h for more information on data organization. */ -struct type_info { +struct type_info +{ int ctr_type; unsigned int offset; }; @@ -123,7 +123,8 @@ struct type_info { * @num_types: number of counter types * @type_info: helper array to get values-array offset for current function */ -struct gcov_iterator { +struct gcov_iterator +{ const struct gcov_info *info; int record; @@ -136,16 +137,16 @@ struct gcov_iterator { }; /* Mapping of logical record number to actual file content. */ -#define RECORD_FILE_MAGIC 0 -#define RECORD_GCOV_VERSION 1 -#define RECORD_TIME_STAMP 2 -#define RECORD_FUNCTION_TAG 3 -#define RECORD_FUNCTON_TAG_LEN 4 -#define RECORD_FUNCTION_IDENT 5 -#define RECORD_FUNCTION_CHECK 6 -#define RECORD_COUNT_TAG 7 -#define RECORD_COUNT_LEN 8 -#define RECORD_COUNT 9 +#define RECORD_FILE_MAGIC 0 +#define RECORD_GCOV_VERSION 1 +#define RECORD_TIME_STAMP 2 +#define RECORD_FUNCTION_TAG 3 +#define RECORD_FUNCTON_TAG_LEN 4 +#define RECORD_FUNCTION_IDENT 5 +#define RECORD_FUNCTION_CHECK 6 +#define RECORD_COUNT_TAG 7 +#define RECORD_COUNT_LEN 8 +#define RECORD_COUNT 9 static int counter_active(const struct gcov_info *info, unsigned int type) { @@ -197,8 +198,8 @@ static size_t get_fn_size(const struct gcov_info *info) { size_t size; - size = sizeof(struct gcov_fn_info) + num_counter_active(info) * - sizeof(unsigned int); + size = sizeof(struct gcov_fn_info) + + num_counter_active(info) * sizeof(unsigned int); if ( __alignof__(struct gcov_fn_info) > sizeof(unsigned int) ) size = ROUNDUP(size, __alignof__(struct gcov_fn_info)); return size; @@ -207,8 +208,8 @@ static size_t get_fn_size(const struct gcov_info *info) static struct gcov_fn_info *get_fn_info(const struct gcov_info *info, unsigned int fn) { - return (struct gcov_fn_info *) - ((char *) info->functions + fn * get_fn_size(info)); + return (struct gcov_fn_info *)((char *)info->functions + + fn * get_fn_size(info)); } static struct gcov_fn_info *get_func(struct gcov_iterator *iter) @@ -229,7 +230,7 @@ static struct type_info *get_type(struct gcov_iterator *iter) */ static int gcov_iter_next(struct gcov_iterator *iter) { - switch ( iter->record ) + switch (iter->record) { case RECORD_FILE_MAGIC: case RECORD_GCOV_VERSION: @@ -287,12 +288,11 @@ static int gcov_iter_next(struct gcov_iterator *iter) * * Return number of bytes written into buffer. */ -static size_t gcov_iter_write(struct gcov_iterator *iter, char *buf, - size_t pos) +static size_t gcov_iter_write(struct gcov_iterator *iter, char *buf, size_t pos) { size_t ret = 0; - switch ( iter->record ) + switch (iter->record) { case RECORD_FILE_MAGIC: ret = gcov_store_uint32(buf, pos, GCOV_DATA_MAGIC); @@ -320,12 +320,14 @@ static size_t gcov_iter_write(struct gcov_iterator *iter, char *buf, GCOV_TAG_FOR_COUNTER(get_type(iter)->ctr_type)); break; case RECORD_COUNT_LEN: - ret = gcov_store_uint32(buf, pos, - get_func(iter)->n_ctrs[iter->type] * 2); + ret = + gcov_store_uint32(buf, pos, get_func(iter)->n_ctrs[iter->type] * 2); break; case RECORD_COUNT: - ret = gcov_store_uint64(buf, pos, iter->info->counts[iter->type]. - values[iter->count + get_type(iter)->offset]); + ret = gcov_store_uint64( + buf, pos, + iter->info->counts[iter->type] + .values[iter->count + get_type(iter)->offset]); break; } @@ -335,7 +337,7 @@ static size_t gcov_iter_write(struct gcov_iterator *iter, char *buf, /* If buffer is NULL, no data is written. */ size_t gcov_info_to_gcda(char *buffer, const struct gcov_info *info) { - struct gcov_iterator iter = { .info = info }; + struct gcov_iterator iter = {.info = info}; unsigned int i; size_t pos = 0; diff --git a/xen/common/coverage/gcc_4_7.c b/xen/common/coverage/gcc_4_7.c index 25b4a8bcdc..db6c237b27 100644 --- a/xen/common/coverage/gcc_4_7.c +++ b/xen/common/coverage/gcc_4_7.c @@ -23,13 +23,13 @@ * source files. */ #ifndef GCOV_COUNTERS -# if !(GCC_VERSION >= 40700 && GCC_VERSION < 40900) -# error "Wrong version of GCC used to compile gcov" -# endif +#if !(GCC_VERSION >= 40700 && GCC_VERSION < 40900) +#error "Wrong version of GCC used to compile gcov" +#endif #define GCOV_COUNTERS 8 #endif -#define GCOV_TAG_FUNCTION_LENGTH 3 +#define GCOV_TAG_FUNCTION_LENGTH 3 static struct gcov_info *gcov_info_head; @@ -41,7 +41,8 @@ static struct gcov_info *gcov_info_head; * This data is generated by gcc during compilation and doesn't change * at run-time with the exception of the values array. */ -struct gcov_ctr_info { +struct gcov_ctr_info +{ unsigned int num; gcov_type *values; }; @@ -63,7 +64,8 @@ struct gcov_ctr_info { * comdat functions was selected -- it points to the gcov_info object * of the object file containing the selected comdat function. */ -struct gcov_fn_info { +struct gcov_fn_info +{ const struct gcov_info *key; unsigned int ident; unsigned int lineno_checksum; @@ -84,7 +86,8 @@ struct gcov_fn_info { * This data is generated by gcc during compilation and doesn't change * at run-time with the exception of the next pointer. */ -struct gcov_info { +struct gcov_info +{ unsigned int version; struct gcov_info *next; unsigned int stamp; @@ -138,7 +141,6 @@ const char *gcov_info_filename(const struct gcov_info *info) return info->filename; } - /** * gcov_info_to_gcda - convert profiling data set to gcda file format * @buffer: the buffer to store file data or %NULL if no data should be stored @@ -175,12 +177,11 @@ size_t gcov_info_to_gcda(char *buffer, const struct gcov_info *info) for ( ct_idx = 0; ct_idx < GCOV_COUNTERS; ct_idx++ ) { - if (! counter_active(info, ct_idx) ) + if ( !counter_active(info, ct_idx) ) continue; /* Counter record. */ - pos += gcov_store_uint32(buffer, pos, - GCOV_TAG_FOR_COUNTER(ct_idx)); + pos += gcov_store_uint32(buffer, pos, GCOV_TAG_FOR_COUNTER(ct_idx)); pos += gcov_store_uint32(buffer, pos, ci_ptr->num * 2); for ( cv_idx = 0; cv_idx < ci_ptr->num; cv_idx++ ) diff --git a/xen/common/coverage/gcov.c b/xen/common/coverage/gcov.c index 3cc98728bf..a0075a3110 100644 --- a/xen/common/coverage/gcov.c +++ b/xen/common/coverage/gcov.c @@ -114,10 +114,9 @@ static int gcov_info_dump_payload(const struct gcov_info *info, *off += buf_size; ret = 0; - out: +out: xfree(buf); return ret; - } static uint32_t gcov_get_size(void) @@ -163,7 +162,7 @@ static int gcov_dump_one_record(const struct gcov_info *info, payload_size = gcov_info_payload_size(info); /* Payload size */ - if ( copy_to_guest_offset(buffer, *off, (char*)&payload_size, + if ( copy_to_guest_offset(buffer, *off, (char *)&payload_size, sizeof(uint32_t)) ) return -EFAULT; *off += sizeof(uint32_t); @@ -206,7 +205,7 @@ static int gcov_dump_all(XEN_GUEST_HANDLE_PARAM(char) buffer, *buffer_size = off; ret = 0; - out: +out: return ret; } diff --git a/xen/common/coverage/llvm.c b/xen/common/coverage/llvm.c index 766c07fd5d..cf3145eaaf 100644 --- a/xen/common/coverage/llvm.c +++ b/xen/common/coverage/llvm.c @@ -35,9 +35,10 @@ #endif #if BITS_PER_LONG == 64 -#define LLVM_PROFILE_MAGIC (((uint64_t)255 << 56) | ((uint64_t)'l' << 48) | \ - ((uint64_t)'p' << 40) | ((uint64_t)'r' << 32) | ((uint64_t)'o' << 24) | \ - ((uint64_t)'f' << 16) | ((uint64_t)'r' << 8) | ((uint64_t)129)) +#define LLVM_PROFILE_MAGIC \ + (((uint64_t)255 << 56) | ((uint64_t)'l' << 48) | ((uint64_t)'p' << 40) | \ + ((uint64_t)'r' << 32) | ((uint64_t)'o' << 24) | ((uint64_t)'f' << 16) | \ + ((uint64_t)'r' << 8) | ((uint64_t)129)) #else #define LLVM_PROFILE_MAGIC (((uint64_t)255 << 56) | ((uint64_t)'l' << 48) | \ ((uint64_t)'p' << 40) | ((uint64_t)'r' << 32) | ((uint64_t)'o' << 24) | \ @@ -45,13 +46,14 @@ #endif #if __clang_major__ >= 4 || (__clang_major__ == 3 && __clang_minor__ >= 9) -#define LLVM_PROFILE_VERSION 4 -#define LLVM_PROFILE_NUM_KINDS 2 +#define LLVM_PROFILE_VERSION 4 +#define LLVM_PROFILE_NUM_KINDS 2 #else #error "clang version not supported with coverage" #endif -struct llvm_profile_data { +struct llvm_profile_data +{ uint64_t name_ref; uint64_t function_hash; void *counter; @@ -61,7 +63,8 @@ struct llvm_profile_data { uint16_t nr_value_sites[LLVM_PROFILE_NUM_KINDS]; }; -struct llvm_profile_header { +struct llvm_profile_header +{ uint64_t magic; uint64_t version; uint64_t data_size; @@ -76,7 +79,7 @@ struct llvm_profile_header { * Since Xen uses the llvm code coverage support without the run time library * __llvm_profile_runtime must be defined according to the docs at: * - * https://clang.llvm.org/docs/SourceBasedCodeCoverage.html + * https://clang.llvm.org/docs/SourceBasedCodeCoverage.html */ int __llvm_profile_runtime; @@ -87,12 +90,12 @@ extern const char __stop___llvm_prf_names[]; extern uint64_t __start___llvm_prf_cnts[]; extern uint64_t __stop___llvm_prf_cnts[]; -#define START_DATA ((const void *)__start___llvm_prf_data) -#define END_DATA ((const void *)__stop___llvm_prf_data) -#define START_NAMES ((const void *)__start___llvm_prf_names) -#define END_NAMES ((const void *)__stop___llvm_prf_names) -#define START_COUNTERS ((void *)__start___llvm_prf_cnts) -#define END_COUNTERS ((void *)__stop___llvm_prf_cnts) +#define START_DATA ((const void *)__start___llvm_prf_data) +#define END_DATA ((const void *)__stop___llvm_prf_data) +#define START_NAMES ((const void *)__start___llvm_prf_names) +#define END_NAMES ((const void *)__stop___llvm_prf_names) +#define START_COUNTERS ((void *)__start___llvm_prf_cnts) +#define END_COUNTERS ((void *)__stop___llvm_prf_cnts) static void reset_counters(void) { @@ -102,7 +105,8 @@ static void reset_counters(void) static uint32_t get_size(void) { return ROUNDUP(sizeof(struct llvm_profile_header) + END_DATA - START_DATA + - END_COUNTERS - START_COUNTERS + END_NAMES - START_NAMES, 8); + END_COUNTERS - START_COUNTERS + END_NAMES - START_NAMES, + 8); } static int dump(XEN_GUEST_HANDLE_PARAM(char) buffer, uint32_t *buf_size) @@ -119,13 +123,13 @@ static int dump(XEN_GUEST_HANDLE_PARAM(char) buffer, uint32_t *buf_size) }; unsigned int off = 0; -#define APPEND_TO_BUFFER(src, size) \ -({ \ - if ( off + (size) > *buf_size ) \ - return -ENOMEM; \ - copy_to_guest_offset(buffer, off, (const char *)src, size); \ - off += (size); \ -}) +#define APPEND_TO_BUFFER(src, size) \ + ({ \ + if ( off + (size) > *buf_size ) \ + return -ENOMEM; \ + copy_to_guest_offset(buffer, off, (const char *)src, size); \ + off += (size); \ + }) APPEND_TO_BUFFER(&header, sizeof(header)); APPEND_TO_BUFFER(START_DATA, END_DATA - START_DATA); APPEND_TO_BUFFER(START_COUNTERS, END_COUNTERS - START_COUNTERS); diff --git a/xen/common/cpu.c b/xen/common/cpu.c index 653a56b840..3dbfb9df60 100644 --- a/xen/common/cpu.c +++ b/xen/common/cpu.c @@ -7,8 +7,8 @@ unsigned int __read_mostly nr_cpu_ids = NR_CPUS; #ifndef nr_cpumask_bits -unsigned int __read_mostly nr_cpumask_bits - = BITS_TO_LONGS(NR_CPUS) * BITS_PER_LONG; +unsigned int __read_mostly nr_cpumask_bits = + BITS_TO_LONGS(NR_CPUS) * BITS_PER_LONG; #endif /* @@ -20,18 +20,19 @@ unsigned int __read_mostly nr_cpumask_bits */ /* cpu_bit_bitmap[0] is empty - so we can back into it */ -#define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x) -#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) -#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) -#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) +#define MASK_DECLARE_1(x) [x + 1][0] = 1UL << (x) +#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x + 1) +#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x + 2) +#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x + 4) -const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { +const unsigned long cpu_bit_bitmap[BITS_PER_LONG + 1][BITS_TO_LONGS(NR_CPUS)] = + { - MASK_DECLARE_8(0), MASK_DECLARE_8(8), - MASK_DECLARE_8(16), MASK_DECLARE_8(24), + MASK_DECLARE_8(0), MASK_DECLARE_8(8), + MASK_DECLARE_8(16), MASK_DECLARE_8(24), #if BITS_PER_LONG > 32 - MASK_DECLARE_8(32), MASK_DECLARE_8(40), - MASK_DECLARE_8(48), MASK_DECLARE_8(56), + MASK_DECLARE_8(32), MASK_DECLARE_8(40), + MASK_DECLARE_8(48), MASK_DECLARE_8(56), #endif }; @@ -118,7 +119,7 @@ int cpu_down(unsigned int cpu) cpu_hotplug_done(); return 0; - fail: +fail: notifier_rc = notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, hcpu, &nb); BUG_ON(notifier_rc != NOTIFY_DONE); cpu_hotplug_done(); @@ -159,7 +160,7 @@ int cpu_up(unsigned int cpu) cpu_hotplug_done(); return 0; - fail: +fail: notifier_rc = notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu, &nb); BUG_ON(notifier_rc != NOTIFY_DONE); cpu_hotplug_done(); @@ -169,8 +170,7 @@ int cpu_up(unsigned int cpu) void notify_cpu_starting(unsigned int cpu) { void *hcpu = (void *)(long)cpu; - int notifier_rc = notifier_call_chain( - &cpu_chain, CPU_STARTING, hcpu, NULL); + int notifier_rc = notifier_call_chain(&cpu_chain, CPU_STARTING, hcpu, NULL); BUG_ON(notifier_rc != NOTIFY_DONE); } @@ -186,7 +186,7 @@ int disable_nonboot_cpus(void) printk("Disabling non-boot CPUs ...\n"); - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) { if ( cpu == 0 ) continue; @@ -211,7 +211,7 @@ void enable_nonboot_cpus(void) printk("Enabling non-boot CPUs ...\n"); - for_each_cpu ( cpu, &frozen_cpus ) + for_each_cpu (cpu, &frozen_cpus) { if ( (error = cpu_up(cpu)) ) { diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c index e89bb67e71..6d304eba2b 100644 --- a/xen/common/cpupool.c +++ b/xen/common/cpupool.c @@ -1,6 +1,6 @@ /****************************************************************************** * cpupool.c - * + * * Generic cpupool-handling functions. * * Cpupools are a feature to have configurable scheduling domains. Each @@ -20,13 +20,13 @@ #include #include -#define for_each_cpupool(ptr) \ - for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next)) +#define for_each_cpupool(ptr) \ + for ( (ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next) ) -struct cpupool *cpupool0; /* Initial cpupool with Dom0 */ -cpumask_t cpupool_free_cpus; /* cpus not in any cpupool */ +struct cpupool *cpupool0; /* Initial cpupool with Dom0 */ +cpumask_t cpupool_free_cpus; /* cpus not in any cpupool */ -static struct cpupool *cpupool_list; /* linked list, sorted by poolid */ +static struct cpupool *cpupool_list; /* linked list, sorted by poolid */ static int cpupool_moving_cpu = -1; static struct cpupool *cpupool_cpu_moving = NULL; @@ -79,9 +79,7 @@ static struct cpupool *__cpupool_find_by_id(int id, int exact) ASSERT(spin_is_locked(&cpupool_lock)); - for_each_cpupool(q) - if ( (*q)->cpupool_id >= id ) - break; + for_each_cpupool(q) if ( (*q)->cpupool_id >= id ) break; return (!exact || (*q == NULL) || ((*q)->cpupool_id == id)) ? *q : NULL; } @@ -128,8 +126,8 @@ void cpupool_put(struct cpupool *pool) * - poolid already used * - unknown scheduler */ -static struct cpupool *cpupool_create( - int poolid, unsigned int sched_id, int *perr) +static struct cpupool *cpupool_create(int poolid, unsigned int sched_id, + int *perr) { struct cpupool *c; struct cpupool **q; @@ -203,9 +201,7 @@ static int cpupool_destroy(struct cpupool *c) struct cpupool **q; spin_lock(&cpupool_lock); - for_each_cpupool(q) - if ( *q == c ) - break; + for_each_cpupool(q) if ( *q == c ) break; if ( *q != c ) { spin_unlock(&cpupool_lock); @@ -273,7 +269,7 @@ static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu) return ret; cpumask_clear_cpu(cpu, &cpupool_free_cpus); - if (cpupool_moving_cpu == cpu) + if ( cpupool_moving_cpu == cpu ) { cpupool_moving_cpu = -1; cpupool_put(cpupool_cpu_moving); @@ -365,8 +361,8 @@ static int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu) int ret; struct domain *d; - cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n", - c->cpupool_id, cpu); + cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n", c->cpupool_id, + cpu); spin_lock(&cpupool_lock); ret = -EADDRNOTAVAIL; @@ -496,7 +492,7 @@ static int cpupool_cpu_add(unsigned int cpu) for_each_cpupool(c) { - if ( cpumask_test_cpu(cpu, (*c)->cpu_suspended ) ) + if ( cpumask_test_cpu(cpu, (*c)->cpu_suspended) ) { ret = cpupool_assign_cpu_locked(*c, cpu); if ( ret ) @@ -525,7 +521,7 @@ static int cpupool_cpu_add(unsigned int cpu) per_cpu(cpupool, cpu) = NULL; ret = cpupool_assign_cpu_locked(cpupool0, cpu); } - out: +out: spin_unlock(&cpupool_lock); return ret; @@ -550,7 +546,7 @@ static int cpupool_cpu_remove(unsigned int cpu) for_each_cpupool(c) { - if ( cpumask_test_cpu(cpu, (*c)->cpu_valid ) ) + if ( cpumask_test_cpu(cpu, (*c)->cpu_valid) ) { cpumask_set_cpu(cpu, (*c)->cpu_suspended); cpumask_clear_cpu(cpu, (*c)->cpu_valid); @@ -595,15 +591,15 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op) int ret; struct cpupool *c; - switch ( op->op ) + switch (op->op) { - case XEN_SYSCTL_CPUPOOL_OP_CREATE: { int poolid; - poolid = (op->cpupool_id == XEN_SYSCTL_CPUPOOL_PAR_ANY) ? - CPUPOOLID_NONE: op->cpupool_id; + poolid = (op->cpupool_id == XEN_SYSCTL_CPUPOOL_PAR_ANY) + ? CPUPOOLID_NONE + : op->cpupool_id; c = cpupool_create(poolid, op->sched_id, &ret); if ( c != NULL ) { @@ -643,8 +639,8 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op) unsigned cpu; cpu = op->cpu; - cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d)\n", - op->cpupool_id, cpu); + cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d)\n", op->cpupool_id, + cpu); spin_lock(&cpupool_lock); if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY ) cpu = cpumask_first(&cpupool_free_cpus); @@ -701,8 +697,8 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op) rcu_unlock_domain(d); break; } - cpupool_dprintk("cpupool move_domain(dom=%d)->pool=%d\n", - d->domain_id, op->cpupool_id); + cpupool_dprintk("cpupool move_domain(dom=%d)->pool=%d\n", d->domain_id, + op->cpupool_id); ret = -ENOENT; spin_lock(&cpupool_lock); @@ -719,8 +715,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op) case XEN_SYSCTL_CPUPOOL_OP_FREEINFO: { - ret = cpumask_to_xenctl_bitmap( - &op->cpumap, &cpupool_free_cpus); + ret = cpumask_to_xenctl_bitmap(&op->cpumap, &cpupool_free_cpus); } break; @@ -734,22 +729,22 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op) void dump_runq(unsigned char key) { - unsigned long flags; - s_time_t now = NOW(); + unsigned long flags; + s_time_t now = NOW(); struct cpupool **c; spin_lock(&cpupool_lock); local_irq_save(flags); printk("sched_smt_power_savings: %s\n", - sched_smt_power_savings? "enabled":"disabled"); - printk("NOW=%"PRI_stime"\n", now); + sched_smt_power_savings ? "enabled" : "disabled"); + printk("NOW=%" PRI_stime "\n", now); printk("Online Cpus: %*pbl\n", nr_cpu_ids, cpumask_bits(&cpu_online_map)); if ( !cpumask_empty(&cpupool_free_cpus) ) { - printk("Free Cpus: %*pbl\n", - nr_cpu_ids, cpumask_bits(&cpupool_free_cpus)); + printk("Free Cpus: %*pbl\n", nr_cpu_ids, + cpumask_bits(&cpupool_free_cpus)); schedule_dump(NULL); } @@ -764,13 +759,13 @@ void dump_runq(unsigned char key) spin_unlock(&cpupool_lock); } -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int rc = 0; - switch ( action ) + switch (action) { case CPU_DOWN_FAILED: case CPU_ONLINE: @@ -786,9 +781,7 @@ static int cpu_callback( return !rc ? NOTIFY_DONE : notifier_from_errno(rc); } -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback -}; +static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback}; static int __init cpupool_presmp_init(void) { diff --git a/xen/common/decompress.c b/xen/common/decompress.c index 9d6e0c4ab0..5be7373baa 100644 --- a/xen/common/decompress.c +++ b/xen/common/decompress.c @@ -29,7 +29,7 @@ int __init decompress(void *inbuf, unsigned int len, void *outbuf) return unlzo(inbuf, len, NULL, NULL, outbuf, NULL, error); if ( len >= 2 && !memcmp(inbuf, "\x02\x21", 2) ) - return unlz4(inbuf, len, NULL, NULL, outbuf, NULL, error); + return unlz4(inbuf, len, NULL, NULL, outbuf, NULL, error); return 1; } diff --git a/xen/common/device_tree.c b/xen/common/device_tree.c index 65862b58bb..c512363dfc 100644 --- a/xen/common/device_tree.c +++ b/xen/common/device_tree.c @@ -43,7 +43,8 @@ const struct dt_device_node *dt_interrupt_controller; * The structure represents one alias property of 'aliases' node as * an entry in aliases_lookup list. */ -struct dt_alias_prop { +struct dt_alias_prop +{ struct list_head link; const char *alias; struct dt_device_node *np; @@ -62,7 +63,9 @@ static void dt_dump_addr(const char *s, const __be32 *addr, int na) dt_dprintk("\n"); } #else -static void dt_dump_addr(const char *s, const __be32 *addr, int na) { } +static void dt_dump_addr(const char *s, const __be32 *addr, int na) +{ +} #endif #define DT_BAD_ADDR ((u64)-1) @@ -78,8 +81,8 @@ struct dt_bus const char *name; const char *addresses; bool_t (*match)(const struct dt_device_node *node); - void (*count_cells)(const struct dt_device_node *child, - int *addrc, int *sizec); + void (*count_cells)(const struct dt_device_node *child, int *addrc, + int *sizec); u64 (*map)(__be32 *addr, const __be32 *range, int na, int ns, int pna); int (*translate)(__be32 *addr, u64 offset, int na); unsigned int (*get_flags)(const __be32 *addr); @@ -105,8 +108,8 @@ void dt_set_cell(__be32 **cellp, int size, u64 val) (*cellp) += cells; } -void dt_set_range(__be32 **cellp, const struct dt_device_node *np, - u64 address, u64 size) +void dt_set_range(__be32 **cellp, const struct dt_device_node *np, u64 address, + u64 size) { dt_set_cell(cellp, dt_n_addr_cells(np), address); dt_set_cell(cellp, dt_n_size_cells(np), size); @@ -153,16 +156,16 @@ const struct dt_property *dt_find_property(const struct dt_device_node *np, return pp; } -const void *dt_get_property(const struct dt_device_node *np, - const char *name, u32 *lenp) +const void *dt_get_property(const struct dt_device_node *np, const char *name, + u32 *lenp) { const struct dt_property *pp = dt_find_property(np, name, lenp); return pp ? pp->value : NULL; } -bool_t dt_property_read_u32(const struct dt_device_node *np, - const char *name, u32 *out_value) +bool_t dt_property_read_u32(const struct dt_device_node *np, const char *name, + u32 *out_value) { u32 len; const __be32 *val; @@ -176,9 +179,8 @@ bool_t dt_property_read_u32(const struct dt_device_node *np, return 1; } - -bool_t dt_property_read_u64(const struct dt_device_node *np, - const char *name, u64 *out_value) +bool_t dt_property_read_u64(const struct dt_device_node *np, const char *name, + u64 *out_value) { u32 len; const __be32 *val; @@ -211,7 +213,7 @@ int dt_property_read_string(const struct dt_device_node *np, bool_t dt_device_is_compatible(const struct dt_device_node *device, const char *compat) { - const char* cp; + const char *cp; u32 cplen, l; cp = dt_get_property(device, "compatible", &cplen); @@ -249,7 +251,7 @@ struct dt_device_node *dt_find_node_by_name(struct dt_device_node *from, struct dt_device_node *dt; dt = from ? from->allnext : dt_host; - dt_for_each_device_node(dt, np) + dt_for_each_device_node (dt, np) if ( np->name && (dt_node_cmp(np->name, name) == 0) ) break; @@ -263,7 +265,7 @@ struct dt_device_node *dt_find_node_by_type(struct dt_device_node *from, struct dt_device_node *dt; dt = from ? from->allnext : dt_host; - dt_for_each_device_node(dt, np) + dt_for_each_device_node (dt, np) if ( np->type && (dt_node_cmp(np->type, type) == 0) ) break; @@ -274,7 +276,7 @@ struct dt_device_node *dt_find_node_by_path(const char *path) { struct dt_device_node *np; - dt_for_each_device_node(dt_host, np) + dt_for_each_device_node (dt_host, np) if ( np->full_name && (dt_node_cmp(np->full_name, path) == 0) ) break; @@ -301,7 +303,7 @@ struct dt_device_node *dt_find_node_by_alias(const char *alias) { const struct dt_alias_prop *app; - list_for_each_entry( app, &aliases_lookup, link ) + list_for_each_entry (app, &aliases_lookup, link) { if ( !strcmp(app->alias, alias) ) return app->np; @@ -317,8 +319,8 @@ dt_match_node(const struct dt_device_match *matches, if ( !matches ) return NULL; - while ( matches->path || matches->type || - matches->compatible || matches->not_available || matches->prop) + while ( matches->path || matches->type || matches->compatible || + matches->not_available || matches->prop ) { bool_t match = 1; @@ -353,19 +355,17 @@ const struct dt_device_node *dt_get_parent(const struct dt_device_node *node) return node->parent; } -struct dt_device_node * -dt_find_compatible_node(struct dt_device_node *from, - const char *type, - const char *compatible) +struct dt_device_node *dt_find_compatible_node(struct dt_device_node *from, + const char *type, + const char *compatible) { struct dt_device_node *np; struct dt_device_node *dt; dt = from ? from->allnext : dt_host; - dt_for_each_device_node(dt, np) + dt_for_each_device_node (dt, np) { - if ( type - && !(np->type && (dt_node_cmp(np->type, type) == 0)) ) + if ( type && !(np->type && (dt_node_cmp(np->type, type) == 0)) ) continue; if ( dt_device_is_compatible(np, compatible) ) break; @@ -382,7 +382,7 @@ dt_find_matching_node(struct dt_device_node *from, struct dt_device_node *dt; dt = from ? from->allnext : dt_host; - dt_for_each_device_node(dt, np) + dt_for_each_device_node (dt, np) { if ( dt_match_node(matches, np) ) return np; @@ -449,21 +449,21 @@ int dt_child_n_size_cells(const struct dt_device_node *parent) * These are defined in Linux where much of this code comes from, but * are currently unused outside this file in the context of Xen. */ -#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ - -#define IORESOURCE_TYPE_BITS 0x00001f00 /* Resource type */ -#define IORESOURCE_IO 0x00000100 /* PCI/ISA I/O ports */ -#define IORESOURCE_MEM 0x00000200 -#define IORESOURCE_REG 0x00000300 /* Register offsets */ -#define IORESOURCE_IRQ 0x00000400 -#define IORESOURCE_DMA 0x00000800 -#define IORESOURCE_BUS 0x00001000 - -#define IORESOURCE_PREFETCH 0x00002000 /* No side effects */ -#define IORESOURCE_READONLY 0x00004000 -#define IORESOURCE_CACHEABLE 0x00008000 -#define IORESOURCE_RANGELENGTH 0x00010000 -#define IORESOURCE_SHADOWABLE 0x00020000 +#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ + +#define IORESOURCE_TYPE_BITS 0x00001f00 /* Resource type */ +#define IORESOURCE_IO 0x00000100 /* PCI/ISA I/O ports */ +#define IORESOURCE_MEM 0x00000200 +#define IORESOURCE_REG 0x00000300 /* Register offsets */ +#define IORESOURCE_IRQ 0x00000400 +#define IORESOURCE_DMA 0x00000800 +#define IORESOURCE_BUS 0x00001000 + +#define IORESOURCE_PREFETCH 0x00002000 /* No side effects */ +#define IORESOURCE_READONLY 0x00004000 +#define IORESOURCE_CACHEABLE 0x00008000 +#define IORESOURCE_RANGELENGTH 0x00010000 +#define IORESOURCE_SHADOWABLE 0x00020000 /* * Default translator (generic bus) @@ -481,7 +481,7 @@ static bool_t dt_bus_default_match(const struct dt_device_node *node) } static void dt_bus_default_count_cells(const struct dt_device_node *dev, - int *addrc, int *sizec) + int *addrc, int *sizec) { if ( addrc ) *addrc = dt_n_addr_cells(dev); @@ -489,8 +489,8 @@ static void dt_bus_default_count_cells(const struct dt_device_node *dev, *sizec = dt_n_size_cells(dev); } -static u64 dt_bus_default_map(__be32 *addr, const __be32 *range, - int na, int ns, int pna) +static u64 dt_bus_default_map(__be32 *addr, const __be32 *range, int na, int ns, + int pna) { u64 cp, s, da; @@ -543,15 +543,15 @@ static bool_t dt_bus_pci_match(const struct dt_device_node *np) * powermacs "ht" is hypertransport */ return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") || - !strcmp(np->type, "vci") || !strcmp(np->type, "ht"); + !strcmp(np->type, "vci") || !strcmp(np->type, "ht"); } -static void dt_bus_pci_count_cells(const struct dt_device_node *np, - int *addrc, int *sizec) +static void dt_bus_pci_count_cells(const struct dt_device_node *np, int *addrc, + int *sizec) { - if (addrc) + if ( addrc ) *addrc = 3; - if (sizec) + if ( sizec ) *sizec = 2; } @@ -560,7 +560,8 @@ static unsigned int dt_bus_pci_get_flags(const __be32 *addr) unsigned int flags = 0; u32 w = be32_to_cpup(addr); - switch((w >> 24) & 0x03) { + switch ((w >> 24) & 0x03) + { case 0x01: flags |= IORESOURCE_IO; break; @@ -569,13 +570,13 @@ static unsigned int dt_bus_pci_get_flags(const __be32 *addr) flags |= IORESOURCE_MEM; break; } - if (w & 0x40000000) + if ( w & 0x40000000 ) flags |= IORESOURCE_PREFETCH; return flags; } static u64 dt_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, - int pna) + int pna) { u64 cp, s, da; unsigned int af, rf; @@ -584,19 +585,19 @@ static u64 dt_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, rf = dt_bus_pci_get_flags(range); /* Check address type match */ - if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO)) + if ( (af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO) ) return DT_BAD_ADDR; /* Read address values, skipping high cell */ cp = dt_read_number(range + 1, na - 1); - s = dt_read_number(range + na + pna, ns); + s = dt_read_number(range + na + pna, ns); da = dt_read_number(addr + 1, na - 1); dt_dprintk("DT: PCI map, cp=%llx, s=%llx, da=%llx\n", (unsigned long long)cp, (unsigned long long)s, (unsigned long long)da); - if (da < cp || da >= (cp + s)) + if ( da < cp || da >= (cp + s) ) return DT_BAD_ADDR; return da - cp; } @@ -609,8 +610,7 @@ static int dt_bus_pci_translate(__be32 *addr, u64 offset, int na) /* * Array of bus specific translators */ -static const struct dt_bus dt_busses[] = -{ +static const struct dt_bus dt_busses[] = { /* PCI */ { .name = "pci", @@ -689,10 +689,9 @@ static const __be32 *dt_get_address(const struct dt_device_node *dev, } static int dt_translate_one(const struct dt_device_node *parent, - const struct dt_bus *bus, - const struct dt_bus *pbus, - __be32 *addr, int na, int ns, - int pna, const char *rprop) + const struct dt_bus *bus, const struct dt_bus *pbus, + __be32 *addr, int na, int ns, int pna, + const char *rprop) { const __be32 *ranges; unsigned int rlen; @@ -773,13 +772,13 @@ static u64 __dt_translate_address(const struct dt_device_node *dev, if ( !DT_CHECK_COUNTS(na, ns) ) { printk(XENLOG_ERR "dt_parse: Bad cell count for device %s\n", - dev->full_name); + dev->full_name); goto bail; } memcpy(addr, in_addr, na * 4); - dt_dprintk("DT: bus is %s (na=%d, ns=%d) on %s\n", - bus->name, na, ns, parent->full_name); + dt_dprintk("DT: bus is %s (na=%d, ns=%d) on %s\n", bus->name, na, ns, + parent->full_name); dt_dump_addr("DT: translating address:", addr, na); /* Translate */ @@ -812,8 +811,8 @@ static u64 __dt_translate_address(const struct dt_device_node *dev, break; } - dt_dprintk("DT: parent bus is %s (na=%d, ns=%d) on %s\n", - pbus->name, pna, pns, parent->full_name); + dt_dprintk("DT: parent bus is %s (na=%d, ns=%d) on %s\n", pbus->name, + pna, pns, parent->full_name); /* Apply bus translation */ if ( dt_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop) ) @@ -853,11 +852,9 @@ int dt_device_get_address(const struct dt_device_node *dev, unsigned int index, return 0; } - int dt_for_each_range(const struct dt_device_node *dev, - int (*cb)(const struct dt_device_node *, - u64 addr, u64 length, - void *), + int (*cb)(const struct dt_device_node *, u64 addr, + u64 length, void *), void *data) { const struct dt_device_node *parent = NULL; @@ -889,7 +886,7 @@ int dt_for_each_range(const struct dt_device_node *dev, if ( !DT_CHECK_COUNTS(na, ns) ) { printk(XENLOG_ERR "dt_parse: Bad cell count for device %s\n", - dev->full_name); + dev->full_name); return -EINVAL; } @@ -912,10 +909,8 @@ int dt_for_each_range(const struct dt_device_node *dev, rlen /= 4; rone = na + pna + ns; - dt_dprintk("%s: dev=%s, bus=%s, parent=%s, rlen=%d, rone=%d\n", - __func__, - dt_node_name(dev), bus->name, - dt_node_name(parent), rlen, rone); + dt_dprintk("%s: dev=%s, bus=%s, parent=%s, rlen=%d, rone=%d\n", __func__, + dt_node_name(dev), bus->name, dt_node_name(parent), rlen, rone); for ( ; rlen >= rone; rlen -= rone, ranges += rone ) { @@ -933,7 +928,6 @@ int dt_for_each_range(const struct dt_device_node *dev, dt_dprintk(" -> callback failed=%d\n", ret); return ret; } - } return 0; @@ -949,7 +943,7 @@ static struct dt_device_node *dt_find_node_by_phandle(dt_phandle handle) { struct dt_device_node *np; - dt_for_each_device_node(dt_host, np) + dt_for_each_device_node (dt_host, np) if ( np->phandle == handle ) break; @@ -969,8 +963,7 @@ dt_irq_find_parent(const struct dt_device_node *child) const struct dt_device_node *p; const __be32 *parp; - do - { + do { parp = dt_get_property(child, "interrupt-parent", NULL); if ( parp == NULL ) p = dt_get_parent(child); @@ -1048,8 +1041,7 @@ unsigned int dt_number_of_address(const struct dt_device_node *dev) int dt_for_each_irq_map(const struct dt_device_node *dev, int (*cb)(const struct dt_device_node *, - const struct dt_irq *, - void *), + const struct dt_irq *, void *), void *data) { const struct dt_device_node *ipar, *tnode, *old = NULL; @@ -1061,8 +1053,8 @@ int dt_for_each_irq_map(const struct dt_device_node *dev, struct dt_raw_irq dt_raw_irq; struct dt_irq dt_irq; - dt_dprintk("%s: par=%s cb=%p data=%p\n", __func__, - dev->full_name, cb, data); + dt_dprintk("%s: par=%s cb=%p data=%p\n", __func__, dev->full_name, cb, + data); ipar = dev; @@ -1153,8 +1145,7 @@ int dt_for_each_irq_map(const struct dt_device_node *dev, tmp = dt_get_property(ipar, "#address-cells", NULL); paddrsize = (tmp == NULL) ? 0 : be32_to_cpu(*tmp); - dt_dprintk(" -> pintsize=%d, paddrsize=%d\n", - pintsize, paddrsize); + dt_dprintk(" -> pintsize=%d, paddrsize=%d\n", pintsize, paddrsize); if ( pintsize > DT_MAX_IRQ_SPEC ) { @@ -1234,8 +1225,7 @@ fail: */ static int dt_irq_map_raw(const struct dt_device_node *parent, const __be32 *intspec, u32 ointsize, - const __be32 *addr, - struct dt_raw_irq *oirq) + const __be32 *addr, struct dt_raw_irq *oirq) { const struct dt_device_node *ipar, *tnode, *old = NULL, *newpar = NULL; const __be32 *tmp, *imap, *imask; @@ -1243,9 +1233,10 @@ static int dt_irq_map_raw(const struct dt_device_node *parent, u32 imaplen; int match, i; - dt_dprintk("dt_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n", - parent->full_name, be32_to_cpup(intspec), - be32_to_cpup(intspec + 1), ointsize); + dt_dprintk( + "dt_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n", + parent->full_name, be32_to_cpup(intspec), be32_to_cpup(intspec + 1), + ointsize); ipar = parent; @@ -1349,7 +1340,7 @@ static int dt_irq_map_raw(const struct dt_device_node *parent, for ( ; i < (addrsize + intsize) && match; ++i ) { __be32 mask = imask ? imask[i] : cpu_to_be32(0xffffffffu); - match = ((intspec[i-addrsize] ^ imap[i]) & mask) == 0; + match = ((intspec[i - addrsize] ^ imap[i]) & mask) == 0; } imap += addrsize + intsize; imaplen -= addrsize + intsize; @@ -1381,8 +1372,8 @@ static int dt_irq_map_raw(const struct dt_device_node *parent, tmp = dt_get_property(newpar, "#address-cells", NULL); newaddrsize = (tmp == NULL) ? 0 : be32_to_cpu(*tmp); - dt_dprintk(" -> newintsize=%d, newaddrsize=%d\n", - newintsize, newaddrsize); + dt_dprintk(" -> newintsize=%d, newaddrsize=%d\n", newintsize, + newaddrsize); /* Check for malformed properties */ if ( imaplen < (newaddrsize + newintsize) ) @@ -1413,16 +1404,15 @@ fail: } int dt_device_get_raw_irq(const struct dt_device_node *device, - unsigned int index, - struct dt_raw_irq *out_irq) + unsigned int index, struct dt_raw_irq *out_irq) { const struct dt_device_node *p; const __be32 *intspec, *tmp, *addr; u32 intsize, intlen; int res = -EINVAL; - dt_dprintk("dt_device_get_raw_irq: dev=%s, index=%u\n", - device->full_name, index); + dt_dprintk("dt_device_get_raw_irq: dev=%s, index=%u\n", device->full_name, + index); /* Get the interrupts property */ intspec = dt_get_property(device, "interrupts", &intlen); @@ -1453,16 +1443,14 @@ int dt_device_get_raw_irq(const struct dt_device_node *device, goto out; /* Get new specifier and map it */ - res = dt_irq_map_raw(p, intspec + index * intsize, intsize, - addr, out_irq); + res = dt_irq_map_raw(p, intspec + index * intsize, intsize, addr, out_irq); if ( res ) goto out; out: return res; } -int dt_irq_translate(const struct dt_raw_irq *raw, - struct dt_irq *out_irq) +int dt_irq_translate(const struct dt_raw_irq *raw, struct dt_irq *out_irq) { ASSERT(dt_irq_xlate != NULL); ASSERT(dt_interrupt_controller != NULL); @@ -1474,8 +1462,8 @@ int dt_irq_translate(const struct dt_raw_irq *raw, if ( raw->controller != dt_interrupt_controller ) return -EINVAL; - return dt_irq_xlate(raw->specifier, raw->size, - &out_irq->irq, &out_irq->type); + return dt_irq_xlate(raw->specifier, raw->size, &out_irq->irq, + &out_irq->type); } int dt_device_get_irq(const struct dt_device_node *device, unsigned int index, @@ -1513,13 +1501,12 @@ bool_t dt_device_is_available(const struct dt_device_node *device) bool_t dt_device_for_passthrough(const struct dt_device_node *device) { return (dt_find_property(device, "xen,passthrough", NULL) != NULL); - } static int __dt_parse_phandle_with_args(const struct dt_device_node *np, const char *list_name, - const char *cells_name, - int cell_count, int index, + const char *cells_name, int cell_count, + int index, struct dt_phandle_args *out_args) { const __be32 *list, *list_end; @@ -1571,8 +1558,8 @@ static int __dt_parse_phandle_with_args(const struct dt_device_node *np, { if ( !dt_property_read_u32(node, cells_name, &count) ) { - printk("%s: could not get %s for %s\n", - np->full_name, cells_name, node->full_name); + printk("%s: could not get %s for %s\n", np->full_name, + cells_name, node->full_name); goto err; } } @@ -1600,7 +1587,7 @@ static int __dt_parse_phandle_with_args(const struct dt_device_node *np, rc = -ENOENT; if ( cur_index == index ) { - if (!phandle) + if ( !phandle ) goto err; if ( out_args ) @@ -1608,7 +1595,7 @@ static int __dt_parse_phandle_with_args(const struct dt_device_node *np, int i; WARN_ON(count > MAX_PHANDLE_ARGS); - if (count > MAX_PHANDLE_ARGS) + if ( count > MAX_PHANDLE_ARGS ) count = MAX_PHANDLE_ARGS; out_args->np = node; out_args->args_count = count; @@ -1641,31 +1628,27 @@ struct dt_device_node *dt_parse_phandle(const struct dt_device_node *np, { struct dt_phandle_args args; - if (index < 0) + if ( index < 0 ) return NULL; - if (__dt_parse_phandle_with_args(np, phandle_name, NULL, 0, - index, &args)) + if ( __dt_parse_phandle_with_args(np, phandle_name, NULL, 0, index, &args) ) return NULL; return args.np; } - int dt_parse_phandle_with_args(const struct dt_device_node *np, - const char *list_name, - const char *cells_name, int index, - struct dt_phandle_args *out_args) + const char *list_name, const char *cells_name, + int index, struct dt_phandle_args *out_args) { if ( index < 0 ) return -EINVAL; - return __dt_parse_phandle_with_args(np, list_name, cells_name, 0, - index, out_args); + return __dt_parse_phandle_with_args(np, list_name, cells_name, 0, index, + out_args); } int dt_count_phandle_with_args(const struct dt_device_node *np, - const char *list_name, - const char *cells_name) + const char *list_name, const char *cells_name) { return __dt_parse_phandle_with_args(np, list_name, cells_name, 0, -1, NULL); } @@ -1679,12 +1662,10 @@ int dt_count_phandle_with_args(const struct dt_device_node *np, * @allnextpp: pointer to ->allnext from last allocated device_node * @fpsize: Size of the node path up at the current depth. */ -static unsigned long __init unflatten_dt_node(const void *fdt, - unsigned long mem, - unsigned long *p, - struct dt_device_node *dad, - struct dt_device_node ***allnextpp, - unsigned long fpsize) +static unsigned long __init +unflatten_dt_node(const void *fdt, unsigned long mem, unsigned long *p, + struct dt_device_node *dad, + struct dt_device_node ***allnextpp, unsigned long fpsize) { struct dt_device_node *np; struct dt_property *pp, **prev_pp = NULL; @@ -1755,9 +1736,8 @@ static unsigned long __init unflatten_dt_node(const void *fdt, #ifdef DEBUG_DT if ( (strlen(fn) + l + 1) != allocl ) { - dt_dprintk("%s: p: %d, l: %d, a: %d\n", - pathp, (int)strlen(fn), - l, allocl); + dt_dprintk("%s: p: %d, l: %d, a: %d\n", pathp, + (int)strlen(fn), l, allocl); } #endif fn += strlen(fn); @@ -1824,7 +1804,7 @@ static unsigned long __init unflatten_dt_node(const void *fdt, (strcmp(pname, "linux,phandle") == 0) ) { if ( np->phandle == 0 ) - np->phandle = be32_to_cpup((__be32*)*p); + np->phandle = be32_to_cpup((__be32 *)*p); } /* And we process the "ibm,phandle" property * used in pSeries dynamic device tree @@ -1888,7 +1868,7 @@ static unsigned long __init unflatten_dt_node(const void *fdt, if ( allnextpp ) { *prev_pp = NULL; - np->name = (np->name) ? : dt_get_property(np, "name", NULL); + np->name = (np->name) ?: dt_get_property(np, "name", NULL); np->type = dt_get_property(np, "device_type", NULL); if ( !np->name ) @@ -1945,7 +1925,7 @@ static void __init __unflatten_device_tree(const void *fdt, dt_dprintk(" size is %#lx allocating...\n", size); /* Allocate memory for the expanded device tree */ - mem = (unsigned long)_xmalloc (size + 4, __alignof__(struct dt_device_node)); + mem = (unsigned long)_xmalloc(size + 4, __alignof__(struct dt_device_node)); ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); @@ -1956,25 +1936,24 @@ static void __init __unflatten_device_tree(const void *fdt, unflatten_dt_node(fdt, mem, &start, NULL, &allnextp, 0); if ( be32_to_cpup((__be32 *)start) != FDT_END ) printk(XENLOG_WARNING "Weird tag at end of tree: %08x\n", - *((u32 *)start)); + *((u32 *)start)); if ( be32_to_cpu(((__be32 *)mem)[size / 4]) != 0xdeadbeef ) printk(XENLOG_WARNING "End of tree marker overwritten: %08x\n", - be32_to_cpu(((__be32 *)mem)[size / 4])); + be32_to_cpu(((__be32 *)mem)[size / 4])); *allnextp = NULL; dt_dprintk(" <- unflatten_device_tree()\n"); } -static void dt_alias_add(struct dt_alias_prop *ap, - struct dt_device_node *np, +static void dt_alias_add(struct dt_alias_prop *ap, struct dt_device_node *np, int id, const char *stem, int stem_len) { ap->np = np; ap->id = id; strlcpy(ap->stem, stem, stem_len + 1); list_add_tail(&ap->link, &aliases_lookup); - dt_dprintk("adding DT alias:%s: stem=%s id=%d node=%s\n", - ap->alias, ap->stem, ap->id, dt_node_full_name(np)); + dt_dprintk("adding DT alias:%s: stem=%s id=%d node=%s\n", ap->alias, + ap->stem, ap->id, dt_node_full_name(np)); } /** @@ -1993,7 +1972,7 @@ static void __init dt_alias_scan(void) if ( !aliases ) return; - dt_for_each_property_node( aliases, pp ) + dt_for_each_property_node(aliases, pp) { const char *start = pp->name; const char *end = start + strlen(start); @@ -2002,8 +1981,7 @@ static void __init dt_alias_scan(void) int id, len; /* Skip those we do not want to proceed */ - if ( !strcmp(pp->name, "name") || - !strcmp(pp->name, "phandle") || + if ( !strcmp(pp->name, "name") || !strcmp(pp->name, "phandle") || !strcmp(pp->name, "linux,phandle") ) continue; @@ -2013,7 +1991,7 @@ static void __init dt_alias_scan(void) /* walk the alias backwards to extract the id and work out * the 'stem' string */ - while ( isdigit(*(end-1)) && end > start ) + while ( isdigit(*(end - 1)) && end > start ) end--; len = end - start; @@ -2028,7 +2006,7 @@ static void __init dt_alias_scan(void) } } -struct dt_device_node * __init +struct dt_device_node *__init dt_find_interrupt_controller(const struct dt_device_match *matches) { struct dt_device_node *np = NULL; diff --git a/xen/common/domain.c b/xen/common/domain.c index d3a1941299..56b96c5f4f 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -1,6 +1,6 @@ /****************************************************************************** * domain.c - * + * * Generic domain-handling functions. */ @@ -62,7 +62,7 @@ DEFINE_SPINLOCK(domlist_update_lock); DEFINE_RCU_READ_LOCK(domlist_read_lock); #define DOMAIN_HASH_SIZE 256 -#define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1)) +#define DOMAIN_HASH(_id) ((int)(_id) & (DOMAIN_HASH_SIZE - 1)) static struct domain *domain_hash[DOMAIN_HASH_SIZE]; struct domain *domain_list; @@ -86,7 +86,7 @@ static void __domain_finalise_shutdown(struct domain *d) if ( d->is_shut_down ) return; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) if ( !v->paused_for_shutdown ) return; @@ -120,8 +120,8 @@ static void vcpu_info_reset(struct vcpu *v) struct domain *d = v->domain; v->vcpu_info = ((v->vcpu_id < XEN_LEGACY_MAX_VCPUS) - ? (vcpu_info_t *)&shared_info(d, vcpu_info[v->vcpu_id]) - : &dummy_vcpu_info); + ? (vcpu_info_t *)&shared_info(d, vcpu_info[v->vcpu_id]) + : &dummy_vcpu_info); v->vcpu_info_mfn = INVALID_MFN; } @@ -135,8 +135,8 @@ static void vcpu_destroy(struct vcpu *v) free_vcpu_struct(v); } -struct vcpu *vcpu_create( - struct domain *d, unsigned int vcpu_id, unsigned int cpu_id) +struct vcpu *vcpu_create(struct domain *d, unsigned int vcpu_id, + unsigned int cpu_id) { struct vcpu *v; @@ -196,11 +196,11 @@ struct vcpu *vcpu_create( return v; - fail_sched: +fail_sched: sched_destroy_vcpu(v); - fail_wq: +fail_wq: destroy_waitqueue_vcpu(v); - fail: +fail: vcpu_destroy(v); return NULL; @@ -294,11 +294,9 @@ static void _domain_destroy(struct domain *d) static int sanitise_domain_config(struct xen_domctl_createdomain *config) { - if ( config->flags & ~(XEN_DOMCTL_CDF_hvm_guest | - XEN_DOMCTL_CDF_hap | + if ( config->flags & ~(XEN_DOMCTL_CDF_hvm_guest | XEN_DOMCTL_CDF_hap | XEN_DOMCTL_CDF_s3_integrity | - XEN_DOMCTL_CDF_oos_off | - XEN_DOMCTL_CDF_xs_domain) ) + XEN_DOMCTL_CDF_oos_off | XEN_DOMCTL_CDF_xs_domain) ) { dprintk(XENLOG_INFO, "Unknown CDF flags %#x\n", config->flags); return -EINVAL; @@ -318,8 +316,13 @@ struct domain *domain_create(domid_t domid, bool is_priv) { struct domain *d, **pd, *old_hwdom = NULL; - enum { INIT_watchdog = 1u<<1, - INIT_evtchn = 1u<<3, INIT_gnttab = 1u<<4, INIT_arch = 1u<<5 }; + enum + { + INIT_watchdog = 1u << 1, + INIT_evtchn = 1u << 3, + INIT_gnttab = 1u << 4, + INIT_arch = 1u << 5 + }; int err, init_status = 0; if ( config && (err = sanitise_domain_config(config)) ) @@ -351,7 +354,8 @@ struct domain *domain_create(domid_t domid, /* Sort out our idea of is_{pv,hvm}_domain(). All system domains are PV. */ d->guest_type = ((config && (config->flags & XEN_DOMCTL_CDF_hvm_guest)) - ? guest_type_hvm : guest_type_pv); + ? guest_type_hvm + : guest_type_pv); TRACE_1D(TRC_DOM0_DOM_ADD, d->domain_id); @@ -429,8 +433,9 @@ struct domain *domain_create(domid_t domid, d->disable_migrate = 1; } - d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex); - d->irq_caps = rangeset_new(d, "Interrupts", 0); + d->iomem_caps = + rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex); + d->irq_caps = rangeset_new(d, "Interrupts", 0); if ( !d->iomem_caps || !d->irq_caps ) goto fail; @@ -485,7 +490,7 @@ struct domain *domain_create(domid_t domid, return d; - fail: +fail: ASSERT(err < 0); /* Sanity check paths leading here. */ err = err ?: -EILSEQ; /* Release build safety. */ @@ -519,7 +524,6 @@ struct domain *domain_create(domid_t domid, return ERR_PTR(err); } - void domain_update_node_affinity(struct domain *d) { cpumask_var_t dom_cpumask, dom_cpumask_soft; @@ -558,7 +562,7 @@ void domain_update_node_affinity(struct domain *d) * and the full mask of where it would prefer to run (the union of * the soft affinity of all its various vcpus). Let's build them. */ - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { cpumask_or(dom_cpumask, dom_cpumask, v->cpu_hard_affinity); cpumask_or(dom_cpumask_soft, dom_cpumask_soft, @@ -574,11 +578,11 @@ void domain_update_node_affinity(struct domain *d) * If not empty, the intersection of hard, soft and online is the * narrowest set we want. If empty, we fall back to hard&online. */ - dom_affinity = cpumask_empty(dom_cpumask_soft) ? - dom_cpumask : dom_cpumask_soft; + dom_affinity = + cpumask_empty(dom_cpumask_soft) ? dom_cpumask : dom_cpumask_soft; nodes_clear(d->node_affinity); - for_each_cpu ( cpu, dom_affinity ) + for_each_cpu (cpu, dom_affinity) node_set(cpu_to_node(cpu), d->node_affinity); } @@ -588,7 +592,6 @@ void domain_update_node_affinity(struct domain *d) free_cpumask_var(dom_cpumask); } - int domain_set_node_affinity(struct domain *d, const nodemask_t *affinity) { /* Being affine with no nodes is just wrong */ @@ -618,15 +621,13 @@ out: return 0; } - struct domain *get_domain_by_id(domid_t dom) { struct domain *d; rcu_read_lock(&domlist_read_lock); - for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]); - d != NULL; + for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]); d != NULL; d = rcu_dereference(d->next_in_hashbucket) ) { if ( d->domain_id == dom ) @@ -642,15 +643,13 @@ struct domain *get_domain_by_id(domid_t dom) return d; } - struct domain *rcu_lock_domain_by_id(domid_t dom) { struct domain *d = NULL; rcu_read_lock(&domlist_read_lock); - for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]); - d != NULL; + for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]); d != NULL; d = rcu_dereference(d->next_in_hashbucket) ) { if ( d->domain_id == dom ) @@ -710,7 +709,7 @@ int domain_kill(struct domain *d) return -EINVAL; /* Protected by d->domain_lock. */ - switch ( d->is_dying ) + switch (d->is_dying) { case DOMDYING_alive: domain_unlock(d); @@ -738,13 +737,13 @@ int domain_kill(struct domain *d) break; if ( cpupool_move_domain(d, cpupool0) ) return -ERESTART; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { unmap_runstate_area(v); unmap_vcpu_info(v); } d->is_dying = DOMDYING_dead; - /* Mem event cleanup has to go here because the rings + /* Mem event cleanup has to go here because the rings * have to be put before we call put_domain. */ vm_event_cleanup(d); put_domain(d); @@ -757,7 +756,6 @@ int domain_kill(struct domain *d) return rc; } - void __domain_crash(struct domain *d) { if ( d->is_shutting_down ) @@ -766,8 +764,8 @@ void __domain_crash(struct domain *d) } else if ( d == current->domain ) { - printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n", - d->domain_id, current->vcpu_id, smp_processor_id()); + printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n", d->domain_id, + current->vcpu_id, smp_processor_id()); show_execution_state(guest_cpu_user_regs()); } else @@ -779,7 +777,6 @@ void __domain_crash(struct domain *d) domain_shutdown(d, SHUTDOWN_crash); } - int domain_shutdown(struct domain *d, u8 reason) { struct vcpu *v; @@ -808,7 +805,7 @@ int domain_shutdown(struct domain *d, u8 reason) smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */ - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { if ( reason == SHUTDOWN_crash ) v->defer_shutdown = 0; @@ -842,7 +839,7 @@ void domain_resume(struct domain *d) d->is_shutting_down = d->is_shut_down = 0; d->shutdown_code = SHUTDOWN_CODE_INVALID; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { if ( v->paused_for_shutdown ) vcpu_unpause(v); @@ -969,11 +966,11 @@ void domain_destroy(struct domain *d) /* Delete from task list and task hashtable. */ spin_lock(&domlist_update_lock); pd = &domain_list; - while ( *pd != d ) + while ( *pd != d ) pd = &(*pd)->next_in_list; rcu_assign_pointer(*pd, d->next_in_list); pd = &domain_hash[DOMAIN_HASH(d->domain_id)]; - while ( *pd != d ) + while ( *pd != d ) pd = &(*pd)->next_in_hashbucket; rcu_assign_pointer(*pd, d->next_in_hashbucket); spin_unlock(&domlist_update_lock); @@ -1005,8 +1002,7 @@ int vcpu_pause_by_systemcontroller(struct vcpu *v) { int old, new, prev = v->controller_pause_count; - do - { + do { old = prev; new = old + 1; @@ -1025,8 +1021,7 @@ int vcpu_unpause_by_systemcontroller(struct vcpu *v) { int old, new, prev = v->controller_pause_count; - do - { + do { old = prev; new = old - 1; @@ -1041,14 +1036,13 @@ int vcpu_unpause_by_systemcontroller(struct vcpu *v) return 0; } -static void do_domain_pause(struct domain *d, - void (*sleep_fn)(struct vcpu *v)) +static void do_domain_pause(struct domain *d, void (*sleep_fn)(struct vcpu *v)) { struct vcpu *v; atomic_inc(&d->pause_count); - for_each_vcpu( d, v ) + for_each_vcpu (d, v) sleep_fn(v); arch_domain_pause(d); @@ -1072,7 +1066,7 @@ void domain_unpause(struct domain *d) arch_domain_unpause(d); if ( atomic_dec_and_test(&d->pause_count) ) - for_each_vcpu( d, v ) + for_each_vcpu (d, v) vcpu_wake(v); } @@ -1081,8 +1075,7 @@ int __domain_pause_by_systemcontroller(struct domain *d, { int old, new, prev = d->controller_pause_count; - do - { + do { old = prev; new = old + 1; @@ -1105,8 +1098,7 @@ int domain_unpause_by_systemcontroller(struct domain *d) { int old, new, prev = d->controller_pause_count; - do - { + do { old = prev; new = old - 1; @@ -1147,7 +1139,7 @@ int domain_pause_except_self(struct domain *d) /* Avoid racing with other vcpus which may want to be pausing us */ if ( !spin_trylock(&d->hypercall_deadlock_mutex) ) return -ERESTART; - for_each_vcpu( d, v ) + for_each_vcpu (d, v) if ( likely(v != curr) ) vcpu_pause(v); spin_unlock(&d->hypercall_deadlock_mutex); @@ -1164,7 +1156,7 @@ void domain_unpause_except_self(struct domain *d) if ( curr->domain == d ) { - for_each_vcpu( d, v ) + for_each_vcpu (d, v) if ( likely(v != curr) ) vcpu_unpause(v); } @@ -1178,7 +1170,7 @@ int domain_soft_reset(struct domain *d) int rc; spin_lock(&d->shutdown_lock); - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) if ( !v->paused_for_shutdown ) { spin_unlock(&d->shutdown_lock); @@ -1194,7 +1186,7 @@ int domain_soft_reset(struct domain *d) argo_soft_reset(d); - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { unmap_runstate_area(v); unmap_vcpu_info(v); @@ -1228,8 +1220,8 @@ int vcpu_reset(struct vcpu *v) v->poll_evtchn = 0; v->fpu_initialised = 0; - v->fpu_dirtied = 0; - v->is_initialised = 0; + v->fpu_dirtied = 0; + v->is_initialised = 0; #ifdef VCPU_TRAP_LAST v->async_exception_mask = 0; memset(v->async_exception_state, 0, sizeof(v->async_exception_state)); @@ -1238,7 +1230,7 @@ int vcpu_reset(struct vcpu *v) clear_bit(_VPF_blocked, &v->pause_flags); clear_bit(_VPF_in_reset, &v->pause_flags); - out_unlock: +out_unlock: domain_unlock(v->domain); vcpu_unpause(v); @@ -1329,8 +1321,7 @@ void unmap_vcpu_info(struct vcpu *v) if ( mfn_eq(mfn, INVALID_MFN) ) return; - unmap_domain_page_global((void *) - ((unsigned long)v->vcpu_info & PAGE_MASK)); + unmap_domain_page_global((void *)((unsigned long)v->vcpu_info & PAGE_MASK)); vcpu_info_reset(v); /* NB: Clobbers v->vcpu_info_mfn */ @@ -1346,7 +1337,7 @@ int map_runstate_area(struct vcpu *v, #ifdef CONFIG_COMPAT has_32bit_shinfo((v)->domain) ? sizeof(*v->compat_runstate_guest) : #endif - sizeof(*v->runstate_guest); + sizeof(*v->runstate_guest); if ( v->runstate_guest || v->runstate_nr ) { @@ -1404,8 +1395,8 @@ int map_runstate_area(struct vcpu *v, return 0; - release: - for ( i = 0; i < v->runstate_nr; i++) +release: + for ( i = 0; i < v->runstate_nr; i++ ) put_page_and_type(mfn_to_page(v->runstate_mfn[i])); v->runstate_nr = 0; @@ -1460,7 +1451,7 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) return -ENOENT; - switch ( cmd ) + switch (cmd) { case VCPUOP_initialise: if ( v->vcpu_info == &dummy_vcpu_info ) @@ -1468,8 +1459,8 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) rc = arch_initialise_vcpu(v, arg); if ( rc == -ERESTART ) - rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iuh", - cmd, vcpuid, arg); + rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iuh", cmd, + vcpuid, arg); break; @@ -1495,11 +1486,11 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) break; case VCPUOP_down: - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) if ( v->vcpu_id != vcpuid && !test_bit(_VPF_down, &v->pause_flags) ) { - rc = 1; - break; + rc = 1; + break; } if ( !rc ) /* Last vcpu going down? */ @@ -1517,7 +1508,7 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) else #endif if ( !test_and_set_bit(_VPF_down, &v->pause_flags) ) - vcpu_sleep_nosync(v); + vcpu_sleep_nosync(v); break; @@ -1645,7 +1636,7 @@ long vm_assist(struct domain *p, unsigned int cmd, unsigned int type, if ( type >= BITS_PER_LONG || !test_bit(type, &valid) ) return -EINVAL; - switch ( cmd ) + switch (cmd) { case VMASST_CMD_enable: set_bit(type, &p->vm_assist); @@ -1688,7 +1679,8 @@ void free_pirq_struct(void *ptr) call_rcu(&pirq->rcu_head, _free_pirq_struct); } -struct migrate_info { +struct migrate_info +{ long (*func)(void *data); void *data; struct vcpu *vcpu; @@ -1707,8 +1699,8 @@ static void continue_hypercall_tasklet_handler(unsigned long _info) vcpu_sleep_sync(v); this_cpu(continue_info) = info; - return_reg(v) = (info->cpu == smp_processor_id()) - ? info->func(info->data) : -EINVAL; + return_reg(v) = + (info->cpu == smp_processor_id()) ? info->func(info->data) : -EINVAL; this_cpu(continue_info) = NULL; if ( info->nest-- == 0 ) @@ -1719,8 +1711,8 @@ static void continue_hypercall_tasklet_handler(unsigned long _info) } } -int continue_hypercall_on_cpu( - unsigned int cpu, long (*func)(void *data), void *data) +int continue_hypercall_on_cpu(unsigned int cpu, long (*func)(void *data), + void *data) { struct migrate_info *info; @@ -1739,12 +1731,9 @@ int continue_hypercall_on_cpu( info->vcpu = curr; info->nest = 0; - tasklet_kill( - &curr->continue_hypercall_tasklet); - tasklet_init( - &curr->continue_hypercall_tasklet, - continue_hypercall_tasklet_handler, - (unsigned long)info); + tasklet_kill(&curr->continue_hypercall_tasklet); + tasklet_init(&curr->continue_hypercall_tasklet, + continue_hypercall_tasklet_handler, (unsigned long)info); get_knownalive_domain(curr->domain); vcpu_pause_nosync(curr); @@ -1757,7 +1746,7 @@ int continue_hypercall_on_cpu( info->func = func; info->data = data; - info->cpu = cpu; + info->cpu = cpu; tasklet_schedule_on_cpu(&info->vcpu->continue_hypercall_tasklet, cpu); diff --git a/xen/common/domctl.c b/xen/common/domctl.c index d08b6274e2..05b5a7eaff 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -48,7 +48,7 @@ static int bitmap_to_xenctl_bitmap(struct xenctl_bitmap *xenctl_bitmap, return -ENOMEM; guest_bytes = (xenctl_bitmap->nr_bits + 7) / 8; - copy_bytes = min_t(unsigned int, guest_bytes, (nbits + 7) / 8); + copy_bytes = min_t(unsigned int, guest_bytes, (nbits + 7) / 8); bitmap_long_to_byte(bytemap, bitmap, nbits); @@ -77,14 +77,14 @@ static int xenctl_bitmap_to_bitmap(unsigned long *bitmap, return -ENOMEM; guest_bytes = (xenctl_bitmap->nr_bits + 7) / 8; - copy_bytes = min_t(unsigned int, guest_bytes, (nbits + 7) / 8); + copy_bytes = min_t(unsigned int, guest_bytes, (nbits + 7) / 8); if ( copy_bytes != 0 ) { if ( copy_from_guest(bytemap, xenctl_bitmap->bitmap, copy_bytes) ) err = -EFAULT; if ( (xenctl_bitmap->nr_bits & 7) && (guest_bytes == copy_bytes) ) - bytemap[guest_bytes-1] &= ~(0xff << (xenctl_bitmap->nr_bits & 7)); + bytemap[guest_bytes - 1] &= ~(0xff << (xenctl_bitmap->nr_bits & 7)); } if ( !err ) @@ -107,7 +107,8 @@ int xenctl_bitmap_to_cpumask(cpumask_var_t *cpumask, { int err = 0; - if ( alloc_cpumask_var(cpumask) ) { + if ( alloc_cpumask_var(cpumask) ) + { err = xenctl_bitmap_to_bitmap(cpumask_bits(*cpumask), xenctl_cpumap, nr_cpu_ids); /* In case of error, cleanup is up to us, as the caller won't care! */ @@ -164,7 +165,7 @@ void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info) * - domain is marked as blocked only if all its vcpus are blocked * - domain is marked as running if any of its vcpus is running */ - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { vcpu_runstate_get(v, &runstate); cpu_time += runstate.time[RUNSTATE_running]; @@ -182,14 +183,14 @@ void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info) info->cpu_time = cpu_time; info->flags = (info->nr_online_vcpus ? flags : 0) | - ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying : 0) | - (d->is_shut_down ? XEN_DOMINF_shutdown : 0) | - (d->controller_pause_count > 0 ? XEN_DOMINF_paused : 0) | - (d->debugger_attached ? XEN_DOMINF_debugged : 0) | - (d->is_xenstore ? XEN_DOMINF_xs_domain : 0) | - d->shutdown_code << XEN_DOMINF_shutdownshift; - - switch ( d->guest_type ) + ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying : 0) | + (d->is_shut_down ? XEN_DOMINF_shutdown : 0) | + (d->controller_pause_count > 0 ? XEN_DOMINF_paused : 0) | + (d->debugger_attached ? XEN_DOMINF_debugged : 0) | + (d->is_xenstore ? XEN_DOMINF_xs_domain : 0) | + d->shutdown_code << XEN_DOMINF_shutdownshift; + + switch (d->guest_type) { case guest_type_hvm: info->flags |= XEN_DOMINF_hvm_guest; @@ -200,11 +201,11 @@ void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info) xsm_security_domaininfo(d, info); - info->tot_pages = d->tot_pages; - info->max_pages = d->max_pages; + info->tot_pages = d->tot_pages; + info->max_pages = d->max_pages; info->outstanding_pages = d->outstanding_pages; - info->shr_pages = atomic_read(&d->shr_pages); - info->paged_pages = atomic_read(&d->paged_pages); + info->shr_pages = atomic_read(&d->shr_pages); + info->paged_pages = atomic_read(&d->paged_pages); info->shared_info_frame = mfn_to_gmfn(d, virt_to_mfn(d->shared_info)); BUG_ON(SHARED_M2P(info->shared_info_frame)); @@ -242,8 +243,8 @@ void domctl_lock_release(void) spin_unlock(¤t->domain->hypercall_deadlock_mutex); } -static inline -int vcpuaffinity_params_invalid(const struct xen_domctl_vcpuaffinity *vcpuaff) +static inline int +vcpuaffinity_params_invalid(const struct xen_domctl_vcpuaffinity *vcpuaff) { return vcpuaff->flags == 0 || ((vcpuaff->flags & XEN_VCPUAFFINITY_HARD) && @@ -275,7 +276,6 @@ static struct vnuma_info *vnuma_alloc(unsigned int nr_vnodes, unsigned int nr_ranges, unsigned int nr_vcpus) { - struct vnuma_info *vnuma; /* @@ -294,7 +294,7 @@ static struct vnuma_info *vnuma_alloc(unsigned int nr_vnodes, if ( !vnuma ) return ERR_PTR(-ENOMEM); - vnuma->vdistance = xmalloc_array(unsigned int, nr_vnodes * nr_vnodes); + vnuma->vdistance = xmalloc_array(unsigned int, nr_vnodes *nr_vnodes); vnuma->vcpu_to_vnode = xmalloc_array(unsigned int, nr_vcpus); vnuma->vnode_to_pnode = xmalloc_array(nodeid_t, nr_vnodes); vnuma->vmemrange = xmalloc_array(xen_vmemrange_t, nr_ranges); @@ -371,7 +371,7 @@ static struct vnuma_info *vnuma_init(const struct xen_domctl_vnuma *uinfo, return info; - vnuma_fail: +vnuma_fail: vnuma_destroy(info); return ERR_PTR(ret); } @@ -389,13 +389,13 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION ) return -EACCES; - switch ( op->cmd ) + switch (op->cmd) { case XEN_DOMCTL_test_assign_device: if ( op->domain == DOMID_INVALID ) { - case XEN_DOMCTL_createdomain: - case XEN_DOMCTL_gdbsx_guestmemio: + case XEN_DOMCTL_createdomain: + case XEN_DOMCTL_gdbsx_guestmemio: d = NULL; break; } @@ -414,16 +414,15 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) { if ( d ) rcu_unlock_domain(d); - return hypercall_create_continuation( - __HYPERVISOR_domctl, "h", u_domctl); + return hypercall_create_continuation(__HYPERVISOR_domctl, "h", + u_domctl); } - switch ( op->cmd ) + switch (op->cmd) { - case XEN_DOMCTL_setvcpucontext: { - vcpu_guest_context_u c = { .nat = NULL }; + vcpu_guest_context_u c = {.nat = NULL}; unsigned int vcpu = op->u.vcpucontext.vcpu; struct vcpu *v; @@ -436,14 +435,14 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) { ret = vcpu_reset(v); if ( ret == -ERESTART ) - ret = hypercall_create_continuation( - __HYPERVISOR_domctl, "h", u_domctl); + ret = hypercall_create_continuation(__HYPERVISOR_domctl, "h", + u_domctl); break; } #ifdef CONFIG_COMPAT - BUILD_BUG_ON(sizeof(struct vcpu_guest_context) - < sizeof(struct compat_vcpu_guest_context)); + BUILD_BUG_ON(sizeof(struct vcpu_guest_context) < + sizeof(struct compat_vcpu_guest_context)); #endif ret = -ENOMEM; if ( (c.nat = alloc_vcpu_guest_context()) == NULL ) @@ -453,9 +452,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) if ( !is_pv_32bit_domain(d) ) ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1); else - ret = copy_from_guest(c.cmp, - guest_handle_cast(op->u.vcpucontext.ctxt, - void), 1); + ret = copy_from_guest( + c.cmp, guest_handle_cast(op->u.vcpucontext.ctxt, void), 1); #else ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1); #endif @@ -468,8 +466,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) domain_unpause(d); if ( ret == -ERESTART ) - ret = hypercall_create_continuation( - __HYPERVISOR_domctl, "h", u_domctl); + ret = hypercall_create_continuation(__HYPERVISOR_domctl, "h", + u_domctl); } free_vcpu_guest_context(c.nat); @@ -495,7 +493,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) case XEN_DOMCTL_createdomain: { - domid_t dom; + domid_t dom; static domid_t rover = 0; dom = op->domain; @@ -558,9 +556,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) if ( d->vcpu[i] != NULL ) continue; - cpu = (i == 0) ? - cpumask_any(online) : - cpumask_cycle(d->vcpu[i-1]->processor, online); + cpu = (i == 0) ? cpumask_any(online) + : cpumask_cycle(d->vcpu[i - 1]->processor, online); if ( vcpu_create(d, i, cpu) == NULL ) goto maxvcpu_out; @@ -589,8 +586,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) ret = domain_kill(d); domain_unlock(d); if ( ret == -ERESTART ) - ret = hypercall_create_continuation( - __HYPERVISOR_domctl, "h", u_domctl); + ret = hypercall_create_continuation(__HYPERVISOR_domctl, "h", + u_domctl); goto domctl_out_unlock_domonly; case XEN_DOMCTL_setnodeaffinity: @@ -663,9 +660,9 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) */ if ( vcpuaff->flags & XEN_VCPUAFFINITY_HARD ) { - ret = xenctl_bitmap_to_bitmap(cpumask_bits(new_affinity), - &vcpuaff->cpumap_hard, - nr_cpu_ids); + ret = + xenctl_bitmap_to_bitmap(cpumask_bits(new_affinity), + &vcpuaff->cpumap_hard, nr_cpu_ids); if ( !ret ) ret = vcpu_set_hard_affinity(v, new_affinity); if ( ret ) @@ -681,10 +678,10 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) } if ( vcpuaff->flags & XEN_VCPUAFFINITY_SOFT ) { - ret = xenctl_bitmap_to_bitmap(cpumask_bits(new_affinity), - &vcpuaff->cpumap_soft, - nr_cpu_ids); - if ( !ret) + ret = + xenctl_bitmap_to_bitmap(cpumask_bits(new_affinity), + &vcpuaff->cpumap_soft, nr_cpu_ids); + if ( !ret ) ret = vcpu_set_soft_affinity(v, new_affinity); if ( ret ) { @@ -709,7 +706,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) new_affinity); } - setvcpuaffinity_out: + setvcpuaffinity_out: free_cpumask_var(new_affinity); free_cpumask_var(old_affinity); } @@ -743,7 +740,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) rcu_read_lock(&domlist_read_lock); dom = op->domain; - for_each_domain ( d ) + for_each_domain (d) if ( d->domain_id >= dom ) break; } @@ -773,8 +770,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) case XEN_DOMCTL_getvcpucontext: { - vcpu_guest_context_u c = { .nat = NULL }; - struct vcpu *v; + vcpu_guest_context_u c = {.nat = NULL}; + struct vcpu *v; ret = -EINVAL; if ( op->u.vcpucontext.vcpu >= d->max_vcpus || @@ -787,8 +784,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) goto getvcpucontext_out; #ifdef CONFIG_COMPAT - BUILD_BUG_ON(sizeof(struct vcpu_guest_context) - < sizeof(struct compat_vcpu_guest_context)); + BUILD_BUG_ON(sizeof(struct vcpu_guest_context) < + sizeof(struct compat_vcpu_guest_context)); #endif ret = -ENOMEM; if ( (c.nat = xzalloc(struct vcpu_guest_context)) == NULL ) @@ -805,8 +802,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) if ( !is_pv_32bit_domain(d) ) ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1); else - ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt, - void), c.cmp, 1); + ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt, void), + c.cmp, 1); #else ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1); #endif @@ -822,7 +819,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) case XEN_DOMCTL_getvcpuinfo: { - struct vcpu *v; + struct vcpu *v; struct vcpu_runstate_info runstate; ret = -EINVAL; @@ -835,11 +832,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) vcpu_runstate_get(v, &runstate); - op->u.getvcpuinfo.online = !(v->pause_flags & VPF_down); - op->u.getvcpuinfo.blocked = !!(v->pause_flags & VPF_blocked); - op->u.getvcpuinfo.running = v->is_running; + op->u.getvcpuinfo.online = !(v->pause_flags & VPF_down); + op->u.getvcpuinfo.blocked = !!(v->pause_flags & VPF_blocked); + op->u.getvcpuinfo.running = v->is_running; op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running]; - op->u.getvcpuinfo.cpu = v->processor; + op->u.getvcpuinfo.cpu = v->processor; ret = 0; copyback = 1; break; @@ -855,7 +852,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) * that the domain will now be allowed to "ratchet" down to new_max. In * the meantime, while tot > max, all new allocations are disallowed. */ - d->max_pages = min(new_max, (uint64_t)(typeof(d->max_pages))-1); + d->max_pages = min(new_max, (uint64_t)(typeof(d->max_pages)) - 1); spin_unlock(&d->page_alloc_lock); break; } @@ -906,8 +903,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */ break; - if ( !iomem_access_permitted(current->domain, - mfn, mfn + nr_mfns - 1) || + if ( !iomem_access_permitted(current->domain, mfn, mfn + nr_mfns - 1) || xsm_iomem_permission(XSM_HOOK, d, mfn, mfn + nr_mfns - 1, allow) ) ret = -EPERM; else if ( allow ) @@ -957,9 +953,10 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) ret = map_mmio_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn)); if ( ret < 0 ) - printk(XENLOG_G_WARNING - "memory_map:fail: dom%d gfn=%lx mfn=%lx nr=%lx ret:%ld\n", - d->domain_id, gfn, mfn, nr_mfns, ret); + printk( + XENLOG_G_WARNING + "memory_map:fail: dom%d gfn=%lx mfn=%lx nr=%lx ret:%ld\n", + d->domain_id, gfn, mfn, nr_mfns, ret); } else { @@ -969,8 +966,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) ret = unmap_mmio_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn)); if ( ret < 0 && is_hardware_domain(current->domain) ) - printk(XENLOG_ERR - "memory_map: error %ld removing dom%d access to [%lx,%lx]\n", + printk(XENLOG_ERR "memory_map: error %ld removing dom%d access " + "to [%lx,%lx]\n", ret, d->domain_id, mfn, mfn_end); } /* Do this unconditionally to cover errors on above failure paths. */ @@ -1030,7 +1027,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) { domain_pause(d); arch_p2m_set_access_required(d, - op->u.access_required.access_required); + op->u.access_required.access_required); domain_unpause(d); } break; @@ -1073,7 +1070,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) domctl_lock_release(); - domctl_out_unlock_domonly: +domctl_out_unlock_domonly: if ( d ) rcu_unlock_domain(d); diff --git a/xen/common/earlycpio.c b/xen/common/earlycpio.c index 4bcf32a51c..ffcde94f1a 100644 --- a/xen/common/earlycpio.c +++ b/xen/common/earlycpio.c @@ -30,25 +30,26 @@ #include #include -#define ALIGN(x, a) ((x + (a) - 1) & ~((a) - 1)) -#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) - -enum cpio_fields { - C_MAGIC, - C_INO, - C_MODE, - C_UID, - C_GID, - C_NLINK, - C_MTIME, - C_FILESIZE, - C_MAJ, - C_MIN, - C_RMAJ, - C_RMIN, - C_NAMESIZE, - C_CHKSUM, - C_NFIELDS +#define ALIGN(x, a) ((x + (a)-1) & ~((a)-1)) +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) + +enum cpio_fields +{ + C_MAGIC, + C_INO, + C_MODE, + C_UID, + C_GID, + C_NLINK, + C_MTIME, + C_FILESIZE, + C_MAJ, + C_MIN, + C_RMAJ, + C_RMIN, + C_NAMESIZE, + C_CHKSUM, + C_NFIELDS }; /** @@ -68,84 +69,89 @@ enum cpio_fields { * the match returned an empty filename string. */ -struct cpio_data __init find_cpio_data(const char *path, void *data, - size_t len, long *nextoff) +struct cpio_data __init find_cpio_data(const char *path, void *data, size_t len, + long *nextoff) { - const size_t cpio_header_len = 8*C_NFIELDS - 2; - struct cpio_data cd = { NULL, 0, "" }; - const char *p, *dptr, *nptr; - unsigned int ch[C_NFIELDS], *chp, v; - unsigned char c, x; - size_t mypathsize = strlen(path); - int i, j; - - p = data; - - while (len > cpio_header_len) { - if (!*p) { - /* All cpio headers need to be 4-byte aligned */ - p += 4; - len -= 4; - continue; - } - - j = 6; /* The magic field is only 6 characters */ - chp = ch; - for (i = C_NFIELDS; i; i--) { - v = 0; - while (j--) { - v <<= 4; - c = *p++; - - x = c - '0'; - if (x < 10) { - v += x; - continue; - } - - x = (c | 0x20) - 'a'; - if (x < 6) { - v += x + 10; - continue; - } - - goto quit; /* Invalid hexadecimal */ - } - *chp++ = v; - j = 8; /* All other fields are 8 characters */ - } - - if ((ch[C_MAGIC] - 0x070701) > 1) - goto quit; /* Invalid magic */ - - len -= cpio_header_len; - - dptr = PTR_ALIGN(p + ch[C_NAMESIZE], 4); - nptr = PTR_ALIGN(dptr + ch[C_FILESIZE], 4); - - if (nptr > p + len || dptr < p || nptr < dptr) - goto quit; /* Buffer overrun */ - - if ((ch[C_MODE] & 0170000) == 0100000 && - ch[C_NAMESIZE] >= mypathsize && - !memcmp(p, path, mypathsize)) { - *nextoff = (long)nptr - (long)data; - if (ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME) { - printk( - "File %s exceeding MAX_CPIO_FILE_NAME [%d]\n", - p, MAX_CPIO_FILE_NAME); - } - strlcpy(cd.name, p + mypathsize, MAX_CPIO_FILE_NAME); - - cd.data = (void *)dptr; - cd.size = ch[C_FILESIZE]; - return cd; /* Found it! */ - } - len -= (nptr - p); - p = nptr; - } + const size_t cpio_header_len = 8 * C_NFIELDS - 2; + struct cpio_data cd = {NULL, 0, ""}; + const char *p, *dptr, *nptr; + unsigned int ch[C_NFIELDS], *chp, v; + unsigned char c, x; + size_t mypathsize = strlen(path); + int i, j; + + p = data; + + while ( len > cpio_header_len ) + { + if ( !*p ) + { + /* All cpio headers need to be 4-byte aligned */ + p += 4; + len -= 4; + continue; + } + + j = 6; /* The magic field is only 6 characters */ + chp = ch; + for ( i = C_NFIELDS; i; i-- ) + { + v = 0; + while ( j-- ) + { + v <<= 4; + c = *p++; + + x = c - '0'; + if ( x < 10 ) + { + v += x; + continue; + } + + x = (c | 0x20) - 'a'; + if ( x < 6 ) + { + v += x + 10; + continue; + } + + goto quit; /* Invalid hexadecimal */ + } + *chp++ = v; + j = 8; /* All other fields are 8 characters */ + } + + if ( (ch[C_MAGIC] - 0x070701) > 1 ) + goto quit; /* Invalid magic */ + + len -= cpio_header_len; + + dptr = PTR_ALIGN(p + ch[C_NAMESIZE], 4); + nptr = PTR_ALIGN(dptr + ch[C_FILESIZE], 4); + + if ( nptr > p + len || dptr < p || nptr < dptr ) + goto quit; /* Buffer overrun */ + + if ( (ch[C_MODE] & 0170000) == 0100000 && + ch[C_NAMESIZE] >= mypathsize && !memcmp(p, path, mypathsize) ) + { + *nextoff = (long)nptr - (long)data; + if ( ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME ) + { + printk("File %s exceeding MAX_CPIO_FILE_NAME [%d]\n", p, + MAX_CPIO_FILE_NAME); + } + strlcpy(cd.name, p + mypathsize, MAX_CPIO_FILE_NAME); + + cd.data = (void *)dptr; + cd.size = ch[C_FILESIZE]; + return cd; /* Found it! */ + } + len -= (nptr - p); + p = nptr; + } quit: - return cd; + return cd; } - diff --git a/xen/common/efi/boot.c b/xen/common/efi/boot.c index 79193784ff..320484a1d1 100644 --- a/xen/common/efi/boot.c +++ b/xen/common/efi/boot.c @@ -14,7 +14,7 @@ #include #include #if EFI_PAGE_SIZE != PAGE_SIZE -# error Cannot use xen/pfn.h here! +#error Cannot use xen/pfn.h here! #endif #include #include @@ -34,53 +34,56 @@ #define EFI_REVISION(major, minor) (((major) << 16) | (minor)) -#define SMBIOS3_TABLE_GUID \ - { 0xf2fd1544, 0x9794, 0x4a2c, {0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94} } -#define SHIM_LOCK_PROTOCOL_GUID \ - { 0x605dab50, 0xe046, 0x4300, {0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23} } -#define APPLE_PROPERTIES_PROTOCOL_GUID \ - { 0x91bd12fe, 0xf6c3, 0x44fb, { 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0} } +#define SMBIOS3_TABLE_GUID \ + { \ + 0xf2fd1544, 0x9794, 0x4a2c, \ + { \ + 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94 \ + } \ + } +#define SHIM_LOCK_PROTOCOL_GUID \ + { \ + 0x605dab50, 0xe046, 0x4300, \ + { \ + 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23 \ + } \ + } +#define APPLE_PROPERTIES_PROTOCOL_GUID \ + { \ + 0x91bd12fe, 0xf6c3, 0x44fb, \ + { \ + 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0 \ + } \ + } -typedef EFI_STATUS -(/* _not_ EFIAPI */ *EFI_SHIM_LOCK_VERIFY) ( - IN VOID *Buffer, - IN UINT32 Size); +typedef EFI_STATUS(/* _not_ EFIAPI */ *EFI_SHIM_LOCK_VERIFY)(IN VOID *Buffer, + IN UINT32 Size); -typedef struct { +typedef struct +{ EFI_SHIM_LOCK_VERIFY Verify; } EFI_SHIM_LOCK_PROTOCOL; struct _EFI_APPLE_PROPERTIES; -typedef EFI_STATUS -(EFIAPI *EFI_APPLE_PROPERTIES_GET) ( - IN struct _EFI_APPLE_PROPERTIES *This, - IN const EFI_DEVICE_PATH *Device, - IN const CHAR16 *PropertyName, - OUT VOID *Buffer, - IN OUT UINT32 *BufferSize); +typedef EFI_STATUS(EFIAPI *EFI_APPLE_PROPERTIES_GET)( + IN struct _EFI_APPLE_PROPERTIES *This, IN const EFI_DEVICE_PATH *Device, + IN const CHAR16 *PropertyName, OUT VOID *Buffer, IN OUT UINT32 *BufferSize); + +typedef EFI_STATUS(EFIAPI *EFI_APPLE_PROPERTIES_SET)( + IN struct _EFI_APPLE_PROPERTIES *This, IN const EFI_DEVICE_PATH *Device, + IN const CHAR16 *PropertyName, IN const VOID *Value, IN UINT32 ValueLen); -typedef EFI_STATUS -(EFIAPI *EFI_APPLE_PROPERTIES_SET) ( - IN struct _EFI_APPLE_PROPERTIES *This, - IN const EFI_DEVICE_PATH *Device, - IN const CHAR16 *PropertyName, - IN const VOID *Value, - IN UINT32 ValueLen); - -typedef EFI_STATUS -(EFIAPI *EFI_APPLE_PROPERTIES_DELETE) ( - IN struct _EFI_APPLE_PROPERTIES *This, - IN const EFI_DEVICE_PATH *Device, +typedef EFI_STATUS(EFIAPI *EFI_APPLE_PROPERTIES_DELETE)( + IN struct _EFI_APPLE_PROPERTIES *This, IN const EFI_DEVICE_PATH *Device, IN const CHAR16 *PropertyName); -typedef EFI_STATUS -(EFIAPI *EFI_APPLE_PROPERTIES_GETALL) ( - IN struct _EFI_APPLE_PROPERTIES *This, - OUT VOID *Buffer, +typedef EFI_STATUS(EFIAPI *EFI_APPLE_PROPERTIES_GETALL)( + IN struct _EFI_APPLE_PROPERTIES *This, OUT VOID *Buffer, IN OUT UINT32 *BufferSize); -typedef struct _EFI_APPLE_PROPERTIES { +typedef struct _EFI_APPLE_PROPERTIES +{ UINTN Version; /* 0x10000 */ EFI_APPLE_PROPERTIES_GET Get; EFI_APPLE_PROPERTIES_SET Set; @@ -88,13 +91,14 @@ typedef struct _EFI_APPLE_PROPERTIES { EFI_APPLE_PROPERTIES_GETALL GetAll; } EFI_APPLE_PROPERTIES; -typedef struct _EFI_LOAD_OPTION { +typedef struct _EFI_LOAD_OPTION +{ UINT32 Attributes; UINT16 FilePathListLength; CHAR16 Description[]; } EFI_LOAD_OPTION; -#define LOAD_OPTION_ACTIVE 0x00000001 +#define LOAD_OPTION_ACTIVE 0x00000001 union string { CHAR16 *w; @@ -102,7 +106,8 @@ union string { const char *cs; }; -struct file { +struct file +{ UINTN size; union { EFI_PHYSICAL_ADDRESS addr; @@ -112,31 +117,32 @@ struct file { static CHAR16 *FormatDec(UINT64 Val, CHAR16 *Buffer); static CHAR16 *FormatHex(UINT64 Val, UINTN Width, CHAR16 *Buffer); -static void DisplayUint(UINT64 Val, INTN Width); +static void DisplayUint(UINT64 Val, INTN Width); static CHAR16 *wstrcpy(CHAR16 *d, const CHAR16 *s); static void noreturn blexit(const CHAR16 *str); static void PrintErrMesg(const CHAR16 *mesg, EFI_STATUS ErrCode); static char *get_value(const struct file *cfg, const char *section, - const char *item); + const char *item); static char *split_string(char *s); static CHAR16 *s2w(union string *str); static char *w2s(const union string *str); static bool read_file(EFI_FILE_HANDLE dir_handle, CHAR16 *name, struct file *file, char *options); -static size_t wstrlen(const CHAR16 * s); +static size_t wstrlen(const CHAR16 *s); static int set_color(u32 mask, int bpp, u8 *pos, u8 *sz); static bool match_guid(const EFI_GUID *guid1, const EFI_GUID *guid2); static void efi_init(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable); static void efi_console_set_mode(void); static EFI_GRAPHICS_OUTPUT_PROTOCOL *efi_get_gop(void); -static UINTN efi_find_gop_mode(EFI_GRAPHICS_OUTPUT_PROTOCOL *gop, - UINTN cols, UINTN rows, UINTN depth); +static UINTN efi_find_gop_mode(EFI_GRAPHICS_OUTPUT_PROTOCOL *gop, UINTN cols, + UINTN rows, UINTN depth); static void efi_tables(void); static void setup_efi_pci(void); static void efi_variables(void); static void efi_set_gop_mode(EFI_GRAPHICS_OUTPUT_PROTOCOL *gop, UINTN gop_mode); -static void efi_exit_boot(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable); +static void efi_exit_boot(EFI_HANDLE ImageHandle, + EFI_SYSTEM_TABLE *SystemTable); static const EFI_BOOT_SERVICES *__initdata efi_bs; static UINT32 __initdata efi_bs_revision; @@ -170,13 +176,13 @@ static CHAR16 __initdata newline[] = L"\r\n"; * - use ebmalloc() in ARM/common EFI boot code, * - call free_ebmalloc_unused_mem() somewhere in init code. */ -#define EBMALLOC_SIZE MB(0) +#define EBMALLOC_SIZE MB(0) #else -#define EBMALLOC_SIZE MB(1) +#define EBMALLOC_SIZE MB(1) #endif -static char __section(".bss.page_aligned") __aligned(PAGE_SIZE) - ebmalloc_mem[EBMALLOC_SIZE]; +static char __section(".bss.page_aligned") + __aligned(PAGE_SIZE) ebmalloc_mem[EBMALLOC_SIZE]; static unsigned long __initdata ebmalloc_allocated; /* EFI boot allocator. */ @@ -194,7 +200,8 @@ static void __init __maybe_unused *ebmalloc(size_t size) static void __init __maybe_unused free_ebmalloc_unused_mem(void) { -#if 0 /* FIXME: Putting a hole in the BSS breaks the IOMMU mappings for dom0. */ +#if 0 /* FIXME: Putting a hole in the BSS breaks the IOMMU mappings for dom0. \ + */ unsigned long start, end; start = (unsigned long)ebmalloc_mem + PAGE_ALIGN(ebmalloc_allocated); @@ -233,7 +240,7 @@ static void __init DisplayUint(UINT64 Val, INTN Width) { CHAR16 PrintString[32], *end; - if (Width < 0) + if ( Width < 0 ) end = FormatDec(Val, PrintString); else { @@ -327,8 +334,7 @@ static char *__init w2s(const union string *str) static bool __init match_guid(const EFI_GUID *guid1, const EFI_GUID *guid2) { - return guid1->Data1 == guid2->Data1 && - guid1->Data2 == guid2->Data2 && + return guid1->Data1 == guid2->Data1 && guid1->Data2 == guid2->Data2 && guid1->Data3 == guid2->Data3 && !memcmp(guid1->Data4, guid2->Data4, sizeof(guid1->Data4)); } @@ -360,19 +366,19 @@ static void __init noreturn blexit(const CHAR16 *str) /* generic routine for printing error messages */ static void __init PrintErrMesg(const CHAR16 *mesg, EFI_STATUS ErrCode) { - static const CHAR16* const ErrCodeToStr[] __initconstrel = { - [~EFI_ERROR_MASK & EFI_NOT_FOUND] = L"Not found", - [~EFI_ERROR_MASK & EFI_NO_MEDIA] = L"The device has no media", - [~EFI_ERROR_MASK & EFI_MEDIA_CHANGED] = L"Media changed", - [~EFI_ERROR_MASK & EFI_DEVICE_ERROR] = L"Device error", - [~EFI_ERROR_MASK & EFI_VOLUME_CORRUPTED] = L"Volume corrupted", - [~EFI_ERROR_MASK & EFI_ACCESS_DENIED] = L"Access denied", - [~EFI_ERROR_MASK & EFI_OUT_OF_RESOURCES] = L"Out of resources", - [~EFI_ERROR_MASK & EFI_VOLUME_FULL] = L"Volume is full", - [~EFI_ERROR_MASK & EFI_SECURITY_VIOLATION] = L"Security violation", - [~EFI_ERROR_MASK & EFI_CRC_ERROR] = L"CRC error", - [~EFI_ERROR_MASK & EFI_COMPROMISED_DATA] = L"Compromised data", - [~EFI_ERROR_MASK & EFI_BUFFER_TOO_SMALL] = L"Buffer too small", + static const CHAR16 *const ErrCodeToStr[] __initconstrel = { + [~EFI_ERROR_MASK & EFI_NOT_FOUND] = L"Not found", + [~EFI_ERROR_MASK & EFI_NO_MEDIA] = L"The device has no media", + [~EFI_ERROR_MASK & EFI_MEDIA_CHANGED] = L"Media changed", + [~EFI_ERROR_MASK & EFI_DEVICE_ERROR] = L"Device error", + [~EFI_ERROR_MASK & EFI_VOLUME_CORRUPTED] = L"Volume corrupted", + [~EFI_ERROR_MASK & EFI_ACCESS_DENIED] = L"Access denied", + [~EFI_ERROR_MASK & EFI_OUT_OF_RESOURCES] = L"Out of resources", + [~EFI_ERROR_MASK & EFI_VOLUME_FULL] = L"Volume is full", + [~EFI_ERROR_MASK & EFI_SECURITY_VIOLATION] = L"Security violation", + [~EFI_ERROR_MASK & EFI_CRC_ERROR] = L"CRC error", + [~EFI_ERROR_MASK & EFI_COMPROMISED_DATA] = L"Compromised data", + [~EFI_ERROR_MASK & EFI_BUFFER_TOO_SMALL] = L"Buffer too small", }; EFI_STATUS ErrIdx = ErrCode & ~EFI_ERROR_MASK; @@ -380,7 +386,7 @@ static void __init PrintErrMesg(const CHAR16 *mesg, EFI_STATUS ErrCode) PrintErr((CHAR16 *)mesg); PrintErr(L": "); - if( (ErrIdx < ARRAY_SIZE(ErrCodeToStr)) && ErrCodeToStr[ErrIdx] ) + if ( (ErrIdx < ARRAY_SIZE(ErrCodeToStr)) && ErrCodeToStr[ErrIdx] ) mesg = ErrCodeToStr[ErrIdx]; else { @@ -417,15 +423,15 @@ static unsigned int __init get_argv(unsigned int argc, CHAR16 **argv, { const EFI_LOAD_OPTION *elo = data; /* The minimum size the buffer needs to be. */ - size_t elo_min = offsetof(EFI_LOAD_OPTION, Description[1]) + - elo->FilePathListLength; + size_t elo_min = + offsetof(EFI_LOAD_OPTION, Description[1]) + elo->FilePathListLength; if ( (elo->Attributes & LOAD_OPTION_ACTIVE) && size > elo_min && !((size - elo_min) % sizeof(*cmdline)) ) { const CHAR16 *desc = elo->Description; - const CHAR16 *end = wmemchr(desc, 0, - (size - elo_min) / sizeof(*desc) + 1); + const CHAR16 *end = + wmemchr(desc, 0, (size - elo_min) / sizeof(*desc) + 1); if ( end ) { @@ -444,7 +450,7 @@ static unsigned int __init get_argv(unsigned int argc, CHAR16 **argv, return 0; for ( ; size > sizeof(*cmdline) && *cmdline; - size -= sizeof(*cmdline), ++cmdline ) + size -= sizeof(*cmdline), ++cmdline ) { bool cur_sep = *cmdline == L' ' || *cmdline == L'\t'; @@ -497,8 +503,8 @@ static EFI_FILE_HANDLE __init get_parent_handle(EFI_LOADED_IMAGE *loaded_image, EFI_FILE_IO_INTERFACE *fio; /* Get the file system interface. */ - ret = efi_bs->HandleProtocol(loaded_image->DeviceHandle, - &fs_protocol, (void **)&fio); + ret = efi_bs->HandleProtocol(loaded_image->DeviceHandle, &fs_protocol, + (void **)&fio); if ( EFI_ERROR(ret) ) PrintErrMesg(L"Couldn't obtain the File System Protocol Interface", ret); @@ -533,8 +539,8 @@ static EFI_FILE_HANDLE __init get_parent_handle(EFI_LOADED_IMAGE *loaded_image, dir_handle = new_handle; } fp = (void *)dp; - if ( BUFFERSIZE < DevicePathNodeLength(dp) - - sizeof(*dp) + sizeof(*buffer) ) + if ( BUFFERSIZE < + DevicePathNodeLength(dp) - sizeof(*dp) + sizeof(*buffer) ) blexit(L"Increase BUFFERSIZE"); memcpy(buffer, fp->PathName, DevicePathNodeLength(dp) - sizeof(*dp)); buffer[(DevicePathNodeLength(dp) - sizeof(*dp)) / sizeof(*buffer)] = 0; @@ -552,7 +558,8 @@ static EFI_FILE_HANDLE __init get_parent_handle(EFI_LOADED_IMAGE *loaded_image, ret = dir_handle->Open(dir_handle, &new_handle, buffer, EFI_FILE_MODE_READ, 0); - if ( ret != EFI_SUCCESS ) { + if ( ret != EFI_SUCCESS ) + { PrintErr(L"Open failed for "); PrintErrMesg(buffer, ret); } @@ -572,8 +579,8 @@ static CHAR16 *__init point_tail(CHAR16 *fn) { CHAR16 *tail = NULL; - for ( ; ; ++fn ) - switch ( *fn ) + for ( ;; ++fn ) + switch (*fn) { case 0: return tail; @@ -589,7 +596,7 @@ static CHAR16 *__init point_tail(CHAR16 *fn) * to remainder of string, if any/ NULL returned if * no remainder after space. */ -static char * __init split_string(char *s) +static char *__init split_string(char *s) { while ( *s && !isspace(*s) ) ++s; @@ -611,8 +618,8 @@ static bool __init read_file(EFI_FILE_HANDLE dir_handle, CHAR16 *name, if ( !name ) PrintErrMesg(L"No filename", EFI_OUT_OF_RESOURCES); - ret = dir_handle->Open(dir_handle, &FileHandle, name, - EFI_FILE_MODE_READ, 0); + ret = + dir_handle->Open(dir_handle, &FileHandle, name, EFI_FILE_MODE_READ, 0); if ( file == &cfg && ret == EFI_NOT_FOUND ) return false; if ( EFI_ERROR(ret) ) @@ -701,8 +708,8 @@ static void __init pre_parse(const struct file *cfg) start = 0; } if ( cfg->size && end[-1] ) - PrintStr(L"No newline at end of config file," - " last line will be ignored.\r\n"); + PrintStr(L"No newline at end of config file," + " last line will be ignored.\r\n"); } static char *__init get_value(const struct file *cfg, const char *section, @@ -714,7 +721,7 @@ static char *__init get_value(const struct file *cfg, const char *section, for ( ; ptr < end; ++ptr ) { - switch ( *ptr ) + switch (*ptr) { case 0: continue; @@ -741,7 +748,8 @@ static char *__init get_value(const struct file *cfg, const char *section, return NULL; } -static void __init efi_init(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable) +static void __init efi_init(EFI_HANDLE ImageHandle, + EFI_SYSTEM_TABLE *SystemTable) { efi_ih = ImageHandle; efi_bs = SystemTable->BootServices; @@ -762,8 +770,8 @@ static void __init efi_console_set_mode(void) UINTN cols, rows, size; unsigned int best, i; - for ( i = 0, size = 0, best = StdOut->Mode->Mode; - i < StdOut->Mode->MaxMode; ++i ) + for ( i = 0, size = 0, best = StdOut->Mode->Mode; i < StdOut->Mode->MaxMode; + ++i ) { if ( StdOut->QueryMode(StdOut, i, &cols, &rows) == EFI_SUCCESS && cols * rows > size ) @@ -790,8 +798,8 @@ static EFI_GRAPHICS_OUTPUT_PROTOCOL __init *efi_get_gop(void) if ( status == EFI_BUFFER_TOO_SMALL ) status = efi_bs->AllocatePool(EfiLoaderData, size, (void **)&handles); if ( !EFI_ERROR(status) ) - status = efi_bs->LocateHandle(ByProtocol, &gop_guid, NULL, &size, - handles); + status = + efi_bs->LocateHandle(ByProtocol, &gop_guid, NULL, &size, handles); if ( EFI_ERROR(status) ) size = 0; for ( i = 0; i < size / sizeof(*handles); ++i ) @@ -826,7 +834,7 @@ static UINTN __init efi_find_gop_mode(EFI_GRAPHICS_OUTPUT_PROTOCOL *gop, status = gop->QueryMode(gop, i, &info_size, &mode_info); if ( EFI_ERROR(status) ) continue; - switch ( mode_info->PixelFormat ) + switch (mode_info->PixelFormat) { case PixelBitMask: bpp = hweight32(mode_info->PixelInformation.RedMask | @@ -841,15 +849,15 @@ static UINTN __init efi_find_gop_mode(EFI_GRAPHICS_OUTPUT_PROTOCOL *gop, continue; } if ( cols == mode_info->HorizontalResolution && - rows == mode_info->VerticalResolution && - (!depth || bpp == depth) ) + rows == mode_info->VerticalResolution && (!depth || bpp == depth) ) { gop_mode = i; break; } if ( !cols && !rows && (UINTN)mode_info->HorizontalResolution * - mode_info->VerticalResolution > size ) + mode_info->VerticalResolution > + size ) { size = (UINTN)mode_info->HorizontalResolution * mode_info->VerticalResolution; @@ -874,22 +882,22 @@ static void __init efi_tables(void) static EFI_GUID __initdata smbios3_guid = SMBIOS3_TABLE_GUID; if ( match_guid(&acpi2_guid, &efi_ct[i].VendorGuid) ) - efi.acpi20 = (long)efi_ct[i].VendorTable; + efi.acpi20 = (long)efi_ct[i].VendorTable; if ( match_guid(&acpi_guid, &efi_ct[i].VendorGuid) ) - efi.acpi = (long)efi_ct[i].VendorTable; + efi.acpi = (long)efi_ct[i].VendorTable; if ( match_guid(&mps_guid, &efi_ct[i].VendorGuid) ) - efi.mps = (long)efi_ct[i].VendorTable; + efi.mps = (long)efi_ct[i].VendorTable; if ( match_guid(&smbios_guid, &efi_ct[i].VendorGuid) ) - efi.smbios = (long)efi_ct[i].VendorTable; + efi.smbios = (long)efi_ct[i].VendorTable; if ( match_guid(&smbios3_guid, &efi_ct[i].VendorGuid) ) - efi.smbios3 = (long)efi_ct[i].VendorTable; + efi.smbios3 = (long)efi_ct[i].VendorTable; } #ifndef CONFIG_ARM /* TODO - disabled until implemented on ARM */ - dmi_efi_get_table(efi.smbios != EFI_INVALID_TABLE_ADDR - ? (void *)(long)efi.smbios : NULL, - efi.smbios3 != EFI_INVALID_TABLE_ADDR - ? (void *)(long)efi.smbios3 : NULL); + dmi_efi_get_table( + efi.smbios != EFI_INVALID_TABLE_ADDR ? (void *)(long)efi.smbios : NULL, + efi.smbios3 != EFI_INVALID_TABLE_ADDR ? (void *)(long)efi.smbios3 + : NULL); #endif } @@ -923,12 +931,12 @@ static void __init setup_efi_pci(void) if ( EFI_ERROR(status) || !pci || !pci->RomImage || !pci->RomSize ) continue; - status = pci->Attributes(pci, EfiPciIoAttributeOperationGet, 0, - &attributes); + status = + pci->Attributes(pci, EfiPciIoAttributeOperationGet, 0, &attributes); if ( EFI_ERROR(status) || !(attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM) || - EFI_ERROR(pci->GetLocation(pci, &segment, &bus, &device, - &function)) ) + EFI_ERROR( + pci->GetLocation(pci, &segment, &bus, &device, &function)) ) continue; DisplayUint(segment, 4); @@ -945,8 +953,8 @@ static void __init setup_efi_pci(void) PrintStr(newline); size = pci->RomSize + sizeof(*rom); - status = efi_bs->AllocatePool(EfiRuntimeServicesData, size, - (void **)&rom); + status = + efi_bs->AllocatePool(EfiRuntimeServicesData, size, (void **)&rom); if ( EFI_ERROR(status) ) continue; @@ -984,14 +992,14 @@ static void __init efi_variables(void) { EFI_STATUS status; - status = (efi_rs->Hdr.Revision >> 16) >= 2 ? - efi_rs->QueryVariableInfo(EFI_VARIABLE_NON_VOLATILE | - EFI_VARIABLE_BOOTSERVICE_ACCESS | - EFI_VARIABLE_RUNTIME_ACCESS, - &efi_boot_max_var_store_size, - &efi_boot_remain_var_store_size, - &efi_boot_max_var_size) : - EFI_INCOMPATIBLE_VERSION; + status = + (efi_rs->Hdr.Revision >> 16) >= 2 + ? efi_rs->QueryVariableInfo( + EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, + &efi_boot_max_var_store_size, &efi_boot_remain_var_store_size, + &efi_boot_max_var_size) + : EFI_INCOMPATIBLE_VERSION; if ( EFI_ERROR(status) ) { efi_boot_max_var_store_size = 0; @@ -1011,8 +1019,8 @@ static void __init efi_get_apple_properties(void) VOID *data; EFI_STATUS status; - if ( efi_bs->LocateProtocol(&props_guid, NULL, - (void **)&props) != EFI_SUCCESS ) + if ( efi_bs->LocateProtocol(&props_guid, NULL, (void **)&props) != + EFI_SUCCESS ) return; if ( props->Version != 0x10000 ) { @@ -1023,9 +1031,8 @@ static void __init efi_get_apple_properties(void) } props->GetAll(props, NULL, &size); - if ( !size || - efi_bs->AllocatePool(EfiRuntimeServicesData, size, - &data) != EFI_SUCCESS ) + if ( !size || efi_bs->AllocatePool(EfiRuntimeServicesData, size, &data) != + EFI_SUCCESS ) return; status = props->GetAll(props, data, &size); @@ -1043,7 +1050,8 @@ static void __init efi_get_apple_properties(void) } } -static void __init efi_set_gop_mode(EFI_GRAPHICS_OUTPUT_PROTOCOL *gop, UINTN gop_mode) +static void __init efi_set_gop_mode(EFI_GRAPHICS_OUTPUT_PROTOCOL *gop, + UINTN gop_mode) { EFI_GRAPHICS_OUTPUT_MODE_INFORMATION *mode_info; EFI_STATUS status; @@ -1059,26 +1067,26 @@ static void __init efi_set_gop_mode(EFI_GRAPHICS_OUTPUT_PROTOCOL *gop, UINTN gop efi_arch_video_init(gop, info_size, mode_info); } -static void __init efi_exit_boot(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable) +static void __init efi_exit_boot(EFI_HANDLE ImageHandle, + EFI_SYSTEM_TABLE *SystemTable) { EFI_STATUS status; UINTN info_size = 0, map_key; bool retry; - efi_bs->GetMemoryMap(&info_size, NULL, &map_key, - &efi_mdesc_size, &mdesc_ver); + efi_bs->GetMemoryMap(&info_size, NULL, &map_key, &efi_mdesc_size, + &mdesc_ver); info_size += 8 * efi_mdesc_size; efi_memmap = efi_arch_allocate_mmap_buffer(info_size); if ( !efi_memmap ) blexit(L"Unable to allocate memory for EFI memory map"); - for ( retry = false; ; retry = true ) + for ( retry = false;; retry = true ) { efi_memmap_size = info_size; - status = SystemTable->BootServices->GetMemoryMap(&efi_memmap_size, - efi_memmap, &map_key, - &efi_mdesc_size, - &mdesc_ver); + status = SystemTable->BootServices->GetMemoryMap( + &efi_memmap_size, efi_memmap, &map_key, &efi_mdesc_size, + &mdesc_ver); if ( EFI_ERROR(status) ) PrintErrMesg(L"Cannot obtain memory map", status); @@ -1087,8 +1095,8 @@ static void __init efi_exit_boot(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *Syste efi_arch_pre_exit_boot(); - status = SystemTable->BootServices->ExitBootServices(ImageHandle, - map_key); + status = + SystemTable->BootServices->ExitBootServices(ImageHandle, map_key); efi_bs = NULL; if ( status != EFI_INVALID_PARAMETER || retry ) break; @@ -1108,21 +1116,21 @@ static void __init efi_exit_boot(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *Syste static int __init __maybe_unused set_color(u32 mask, int bpp, u8 *pos, u8 *sz) { - if ( bpp < 0 ) - return bpp; - if ( !mask ) - return -EINVAL; - for ( *pos = 0; !(mask & 1); ++*pos ) - mask >>= 1; - for ( *sz = 0; mask & 1; ++sz) - mask >>= 1; - if ( mask ) - return -EINVAL; - return max(*pos + *sz, bpp); + if ( bpp < 0 ) + return bpp; + if ( !mask ) + return -EINVAL; + for ( *pos = 0; !(mask & 1); ++*pos ) + mask >>= 1; + for ( *sz = 0; mask & 1; ++sz ) + mask >>= 1; + if ( mask ) + return -EINVAL; + return max(*pos + *sz, bpp); } -void EFIAPI __init noreturn -efi_start(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable) +void EFIAPI __init noreturn efi_start(EFI_HANDLE ImageHandle, + EFI_SYSTEM_TABLE *SystemTable) { static EFI_GUID __initdata loaded_image_guid = LOADED_IMAGE_PROTOCOL; static EFI_GUID __initdata shim_lock_guid = SHIM_LOCK_PROTOCOL_GUID; @@ -1133,7 +1141,7 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable) UINTN gop_mode = ~0; EFI_SHIM_LOCK_PROTOCOL *shim_lock; EFI_GRAPHICS_OUTPUT_PROTOCOL *gop = NULL; - union string section = { NULL }, name; + union string section = {NULL}, name; bool base_video = false; char *option_str; bool use_cfg_file; @@ -1162,11 +1170,10 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable) argc = get_argv(0, NULL, loaded_image->LoadOptions, loaded_image->LoadOptionsSize, &offset, NULL); - if ( argc > 0 && - efi_bs->AllocatePool(EfiLoaderData, - (argc + 1) * sizeof(*argv) + - loaded_image->LoadOptionsSize, - (void **)&argv) == EFI_SUCCESS ) + if ( argc > 0 && efi_bs->AllocatePool(EfiLoaderData, + (argc + 1) * sizeof(*argv) + + loaded_image->LoadOptionsSize, + (void **)&argv) == EFI_SUCCESS ) get_argv(argc, argv, loaded_image->LoadOptions, loaded_image->LoadOptionsSize, &offset, &options); else @@ -1192,7 +1199,8 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable) { PrintStr(L"Xen EFI Loader options:\r\n"); PrintStr(L"-basevideo retain current video mode\r\n"); - PrintStr(L"-mapbs map EfiBootServices{Code,Data}\r\n"); + PrintStr( + L"-mapbs map EfiBootServices{Code,Data}\r\n"); PrintStr(L"-cfg= specify configuration file\r\n"); PrintStr(L"-help, -? display this help\r\n"); blexit(NULL); @@ -1213,7 +1221,7 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable) } PrintStr(L"Xen " __stringify(XEN_VERSION) "." __stringify(XEN_SUBVERSION) - XEN_EXTRAVERSION " (c/s " XEN_CHANGESET ") EFI loader\r\n"); + XEN_EXTRAVERSION " (c/s " XEN_CHANGESET ") EFI loader\r\n"); efi_arch_relocate_image(0); @@ -1224,8 +1232,8 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable) size = cols = rows = depth = 0; - if ( StdOut->QueryMode(StdOut, StdOut->Mode->Mode, - &cols, &rows) == EFI_SUCCESS ) + if ( StdOut->QueryMode(StdOut, StdOut->Mode->Mode, &cols, &rows) == + EFI_SUCCESS ) efi_arch_console_init(cols, rows); gop = efi_get_gop(); @@ -1260,7 +1268,7 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable) else section.s = get_value(&cfg, "global", "default"); - for ( ; ; ) + for ( ;; ) { name.s = get_value(&cfg, section.s, "kernel"); if ( name.s ) @@ -1291,8 +1299,9 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable) efi_bs->FreePool(name.w); if ( !EFI_ERROR(efi_bs->LocateProtocol(&shim_lock_guid, NULL, - (void **)&shim_lock)) && - (status = shim_lock->Verify(kernel.ptr, kernel.size)) != EFI_SUCCESS ) + (void **)&shim_lock)) && + (status = shim_lock->Verify(kernel.ptr, kernel.size)) != + EFI_SUCCESS ) PrintErrMesg(L"Dom0 kernel image could not be verified", status); name.s = get_value(&cfg, section.s, "ramdisk"); @@ -1315,8 +1324,8 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable) */ if ( argc && !*argv ) { - EFI_FILE_HANDLE handle = get_parent_handle(loaded_image, - &file_name); + EFI_FILE_HANDLE handle = + get_parent_handle(loaded_image, &file_name); handle->Close(handle); *argv = file_name; @@ -1377,7 +1386,8 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable) efi_exit_boot(ImageHandle, SystemTable); efi_arch_post_exit_boot(); - for( ; ; ); /* not reached */ + for ( ;; ) + ; /* not reached */ } #ifndef CONFIG_ARM /* TODO - runtime service support */ @@ -1468,18 +1478,19 @@ static bool __init rt_range_valid(unsigned long smfn, unsigned long emfn) } #endif -#define INVALID_VIRTUAL_ADDRESS (0xBAAADUL << \ - (EFI_PAGE_SHIFT + BITS_PER_LONG - 32)) +#define INVALID_VIRTUAL_ADDRESS \ + (0xBAAADUL << (EFI_PAGE_SHIFT + BITS_PER_LONG - 32)) void __init efi_init_memory(void) { unsigned int i; #ifndef USE_SET_VIRTUAL_ADDRESS_MAP - struct rt_extra { + struct rt_extra + { struct rt_extra *next; unsigned long smfn, emfn; unsigned int prot; - } *extra, *extra_head = NULL; + } * extra, *extra_head = NULL; #endif free_ebmalloc_unused_mem(); @@ -1498,24 +1509,22 @@ void __init efi_init_memory(void) printk(XENLOG_INFO " %013" PRIx64 "-%013" PRIx64 " type=%u attr=%016" PRIx64 "\n", - desc->PhysicalStart, desc->PhysicalStart + len - 1, - desc->Type, desc->Attribute); + desc->PhysicalStart, desc->PhysicalStart + len - 1, desc->Type, + desc->Attribute); if ( (desc->Attribute & (EFI_MEMORY_WB | EFI_MEMORY_WT)) || (efi_bs_revision >= EFI_REVISION(2, 5) && (desc->Attribute & EFI_MEMORY_WP)) ) { /* Supplement the heuristics in l1tf_calculations(). */ - l1tf_safe_maddr = - max(l1tf_safe_maddr, - ROUNDUP(desc->PhysicalStart + len, PAGE_SIZE)); + l1tf_safe_maddr = max( + l1tf_safe_maddr, ROUNDUP(desc->PhysicalStart + len, PAGE_SIZE)); } if ( !efi_enabled(EFI_RS) || (!(desc->Attribute & EFI_MEMORY_RUNTIME) && - (!map_bs || - (desc->Type != EfiBootServicesCode && - desc->Type != EfiBootServicesData))) ) + (!map_bs || (desc->Type != EfiBootServicesCode && + desc->Type != EfiBootServicesData))) ) continue; desc->VirtualStart = INVALID_VIRTUAL_ADDRESS; @@ -1543,8 +1552,9 @@ void __init efi_init_memory(void) prot |= _PAGE_PWT | _PAGE_PCD | MAP_SMALL_PAGES; } - if ( desc->Attribute & (efi_bs_revision < EFI_REVISION(2, 5) - ? EFI_MEMORY_WP : EFI_MEMORY_RO) ) + if ( desc->Attribute & + (efi_bs_revision < EFI_REVISION(2, 5) ? EFI_MEMORY_WP + : EFI_MEMORY_RO) ) prot &= ~_PAGE_RW; if ( desc->Attribute & EFI_MEMORY_XP ) prot |= _PAGE_NX; @@ -1555,13 +1565,13 @@ void __init efi_init_memory(void) { if ( (unsigned long)mfn_to_virt(emfn - 1) >= HYPERVISOR_VIRT_END ) prot &= ~_PAGE_GLOBAL; - if ( map_pages_to_xen((unsigned long)mfn_to_virt(smfn), - _mfn(smfn), emfn - smfn, prot) == 0 ) + if ( map_pages_to_xen((unsigned long)mfn_to_virt(smfn), _mfn(smfn), + emfn - smfn, prot) == 0 ) desc->VirtualStart = (unsigned long)maddr_to_virt(desc->PhysicalStart); else - printk(XENLOG_ERR "Could not map MFNs %#lx-%#lx\n", - smfn, emfn - 1); + printk(XENLOG_ERR "Could not map MFNs %#lx-%#lx\n", smfn, + emfn - 1); } #ifndef USE_SET_VIRTUAL_ADDRESS_MAP else if ( !((desc->PhysicalStart + len - 1) >> (VADDR_BITS - 1)) && @@ -1580,8 +1590,8 @@ void __init efi_init_memory(void) #ifdef USE_SET_VIRTUAL_ADDRESS_MAP /* XXX allocate e.g. down from FIXADDR_START */ #endif - printk(XENLOG_ERR "No mapping for MFNs %#lx-%#lx\n", - smfn, emfn - 1); + printk(XENLOG_ERR "No mapping for MFNs %#lx-%#lx\n", smfn, + emfn - 1); } } @@ -1592,8 +1602,8 @@ void __init efi_init_memory(void) } #ifdef USE_SET_VIRTUAL_ADDRESS_MAP - efi_rs->SetVirtualAddressMap(efi_memmap_size, efi_mdesc_size, - mdesc_ver, efi_memmap); + efi_rs->SetVirtualAddressMap(efi_memmap_size, efi_mdesc_size, mdesc_ver, + efi_memmap); #else /* Set up 1:1 page tables to do runtime calls in "physical" mode. */ efi_l4_pgtable = alloc_xen_pagetable(); @@ -1608,9 +1618,8 @@ void __init efi_init_memory(void) const EFI_MEMORY_DESCRIPTOR *desc = efi_memmap + i; if ( ((desc->Attribute & EFI_MEMORY_RUNTIME) || - (map_bs && - (desc->Type == EfiBootServicesCode || - desc->Type == EfiBootServicesData))) && + (map_bs && (desc->Type == EfiBootServicesCode || + desc->Type == EfiBootServicesData))) && desc->VirtualStart != INVALID_VIRTUAL_ADDRESS && desc->VirtualStart != desc->PhysicalStart ) copy_mapping(PFN_DOWN(desc->PhysicalStart), diff --git a/xen/common/efi/runtime.c b/xen/common/efi/runtime.c index 3d118d571d..ce890d2a2c 100644 --- a/xen/common/efi/runtime.c +++ b/xen/common/efi/runtime.c @@ -8,19 +8,20 @@ DEFINE_XEN_GUEST_HANDLE(CHAR16); -struct efi_rs_state { +struct efi_rs_state +{ #ifdef CONFIG_X86 - /* - * The way stacks get set up leads to them always being on an 8-byte - * boundary not evenly divisible by 16 (see asm-x86/current.h). The EFI ABI, - * just like the CPU one, however requires stacks to be 16-byte aligned - * before every function call. Since the compiler assumes this (unless - * passing it -mpreferred-stack-boundary=3), it wouldn't generate code to - * align the stack to 16 bytes even if putting a 16-byte aligned object - * there. Hence we need to force larger than 16-byte alignment, even if we - * don't strictly need that. - */ - unsigned long __aligned(32) cr3; + /* + * The way stacks get set up leads to them always being on an 8-byte + * boundary not evenly divisible by 16 (see asm-x86/current.h). The EFI ABI, + * just like the CPU one, however requires stacks to be 16-byte aligned + * before every function call. Since the compiler assumes this (unless + * passing it -mpreferred-stack-boundary=3), it wouldn't generate code to + * align the stack to 16 bytes even if putting a 16-byte aligned object + * there. Hence we need to force larger than 16-byte alignment, even if we + * don't strictly need that. + */ + unsigned long __aligned(32) cr3; #endif }; @@ -30,9 +31,9 @@ void efi_rs_leave(struct efi_rs_state *); #ifndef COMPAT #ifndef CONFIG_ARM -# include -# include -# include +#include +#include +#include #endif unsigned int __read_mostly efi_num_ct; @@ -63,11 +64,11 @@ UINTN __read_mostly efi_apple_properties_len; unsigned int efi_flags; struct efi __read_mostly efi = { - .acpi = EFI_INVALID_TABLE_ADDR, - .acpi20 = EFI_INVALID_TABLE_ADDR, - .mps = EFI_INVALID_TABLE_ADDR, - .smbios = EFI_INVALID_TABLE_ADDR, - .smbios3 = EFI_INVALID_TABLE_ADDR, + .acpi = EFI_INVALID_TABLE_ADDR, + .acpi20 = EFI_INVALID_TABLE_ADDR, + .mps = EFI_INVALID_TABLE_ADDR, + .smbios = EFI_INVALID_TABLE_ADDR, + .smbios3 = EFI_INVALID_TABLE_ADDR, }; const struct efi_pci_rom *__read_mostly efi_pci_roms; @@ -83,15 +84,15 @@ struct efi_rs_state efi_rs_enter(void) { static const u16 fcw = FCW_DEFAULT; static const u32 mxcsr = MXCSR_DEFAULT; - struct efi_rs_state state = { .cr3 = 0 }; + struct efi_rs_state state = {.cr3 = 0}; if ( !efi_l4_pgtable ) return state; state.cr3 = read_cr3(); save_fpu_enable(); - asm volatile ( "fnclex; fldcw %0" :: "m" (fcw) ); - asm volatile ( "ldmxcsr %0" :: "m" (mxcsr) ); + asm volatile("fnclex; fldcw %0" ::"m"(fcw)); + asm volatile("ldmxcsr %0" ::"m"(mxcsr)); spin_lock(&efi_rs_lock); @@ -104,9 +105,8 @@ struct efi_rs_state efi_rs_enter(void) { struct desc_ptr gdt_desc = { .limit = LAST_RESERVED_GDT_BYTE, - .base = (unsigned long)(per_cpu(gdt_table, smp_processor_id()) - - FIRST_RESERVED_GDT_ENTRY) - }; + .base = (unsigned long)(per_cpu(gdt_table, smp_processor_id()) - + FIRST_RESERVED_GDT_ENTRY)}; lgdt(&gdt_desc); } @@ -125,10 +125,8 @@ void efi_rs_leave(struct efi_rs_state *state) switch_cr3_cr4(state->cr3, read_cr4()); if ( is_pv_vcpu(curr) && !is_idle_vcpu(curr) ) { - struct desc_ptr gdt_desc = { - .limit = LAST_RESERVED_GDT_BYTE, - .base = GDT_VIRT_START(curr) - }; + struct desc_ptr gdt_desc = {.limit = LAST_RESERVED_GDT_BYTE, + .base = GDT_VIRT_START(curr)}; lgdt(&gdt_desc); } @@ -140,8 +138,7 @@ void efi_rs_leave(struct efi_rs_state *state) bool efi_rs_using_pgtables(void) { - return efi_l4_pgtable && - (smp_processor_id() == efi_rs_on_cpu) && + return efi_l4_pgtable && (smp_processor_id() == efi_rs_on_cpu) && (read_cr3() == virt_to_maddr(efi_l4_pgtable)); } @@ -162,8 +159,8 @@ unsigned long efi_get_time(void) if ( EFI_ERROR(status) ) return 0; - return mktime(time.Year, time.Month, time.Day, - time.Hour, time.Minute, time.Second); + return mktime(time.Year, time.Month, time.Day, time.Hour, time.Minute, + time.Second); } void efi_halt_system(void) @@ -204,7 +201,7 @@ int efi_get_info(uint32_t idx, union xenpf_efi_info *info) if ( !efi_enabled(EFI_BOOT) ) return -ENOSYS; - switch ( idx ) + switch (idx) { case XEN_FW_EFI_VERSION: info->version = efi_version; @@ -228,13 +225,13 @@ int efi_get_info(uint32_t idx, union xenpf_efi_info *info) return -EOPNOTSUPP; info->vendor.revision = efi_fw_revision; n = info->vendor.bufsz / sizeof(*efi_fw_vendor); - if ( !guest_handle_okay(guest_handle_cast(info->vendor.name, - CHAR16), n) ) + if ( !guest_handle_okay(guest_handle_cast(info->vendor.name, CHAR16), + n) ) return -EFAULT; for ( i = 0; i < n; ++i ) { - if ( __copy_to_guest_offset(info->vendor.name, i, - efi_fw_vendor + i, 1) ) + if ( __copy_to_guest_offset(info->vendor.name, i, efi_fw_vendor + i, + 1) ) return -EFAULT; if ( !efi_fw_vendor[i] ) break; @@ -253,14 +250,14 @@ int efi_get_info(uint32_t idx, union xenpf_efi_info *info) info->mem.attr = desc->Attribute; if ( info->mem.addr + info->mem.size < info->mem.addr || info->mem.addr + info->mem.size > - desc->PhysicalStart + len ) - info->mem.size = desc->PhysicalStart + len - - info->mem.addr; + desc->PhysicalStart + len ) + info->mem.size = desc->PhysicalStart + len - info->mem.addr; return 0; } } return -ESRCH; - case XEN_FW_EFI_PCI_ROM: { + case XEN_FW_EFI_PCI_ROM: + { const struct efi_pci_rom *ent; for ( ent = efi_pci_roms; ent; ent = ent->next ) @@ -295,7 +292,7 @@ static long gwstrlen(XEN_GUEST_HANDLE_PARAM(CHAR16) str) { unsigned long len; - for ( len = 0; ; ++len ) + for ( len = 0;; ++len ) { CHAR16 c; @@ -310,7 +307,7 @@ static long gwstrlen(XEN_GUEST_HANDLE_PARAM(CHAR16) str) static inline EFI_TIME *cast_time(struct xenpf_efi_time *time) { -#define chk_fld(F, f) \ +#define chk_fld(F, f) \ BUILD_BUG_ON(sizeof(cast_time(NULL)->F) != sizeof(time->f) || \ offsetof(EFI_TIME, F) != offsetof(struct xenpf_efi_time, f)) chk_fld(Year, year); @@ -328,10 +325,10 @@ static inline EFI_TIME *cast_time(struct xenpf_efi_time *time) static inline EFI_GUID *cast_guid(struct xenpf_efi_guid *guid) { -#define chk_fld(n) \ +#define chk_fld(n) \ BUILD_BUG_ON(sizeof(cast_guid(NULL)->Data##n) != sizeof(guid->data##n) || \ - offsetof(EFI_GUID, Data##n) != \ - offsetof(struct xenpf_efi_guid, data##n)) + offsetof(EFI_GUID, Data##n) != \ + offsetof(struct xenpf_efi_guid, data##n)) chk_fld(1); chk_fld(2); chk_fld(3); @@ -353,7 +350,7 @@ int efi_runtime_call(struct xenpf_efi_runtime_call *op) if ( !efi_enabled(EFI_RS) ) return -EOPNOTSUPP; - switch ( op->function ) + switch (op->function) { case XEN_EFI_get_time: { @@ -428,12 +425,11 @@ int efi_runtime_call(struct xenpf_efi_runtime_call *op) if ( !state.cr3 ) return -EOPNOTSUPP; spin_lock_irqsave(&rtc_lock, flags); - status = efi_rs->SetWakeupTime(!!(op->misc & - XEN_EFI_SET_WAKEUP_TIME_ENABLE), - (op->misc & - XEN_EFI_SET_WAKEUP_TIME_ENABLE_ONLY) ? - NULL : - cast_time(&op->u.set_wakeup_time)); + status = efi_rs->SetWakeupTime( + !!(op->misc & XEN_EFI_SET_WAKEUP_TIME_ENABLE), + (op->misc & XEN_EFI_SET_WAKEUP_TIME_ENABLE_ONLY) + ? NULL + : cast_time(&op->u.set_wakeup_time)); spin_unlock_irqrestore(&rtc_lock, flags); efi_rs_leave(&state); @@ -467,7 +463,7 @@ int efi_runtime_call(struct xenpf_efi_runtime_call *op) return len; name = xmalloc_array(CHAR16, ++len); if ( !name ) - return -ENOMEM; + return -ENOMEM; __copy_from_guest(name, op->u.get_variable.name, len); size = op->u.get_variable.size; @@ -487,8 +483,8 @@ int efi_runtime_call(struct xenpf_efi_runtime_call *op) if ( state.cr3 ) { status = efi_rs->GetVariable( - name, cast_guid(&op->u.get_variable.vendor_guid), - &op->misc, &size, data); + name, cast_guid(&op->u.get_variable.vendor_guid), &op->misc, + &size, data); efi_rs_leave(&state); if ( !EFI_ERROR(status) && @@ -515,7 +511,7 @@ int efi_runtime_call(struct xenpf_efi_runtime_call *op) return len; name = xmalloc_array(CHAR16, ++len); if ( !name ) - return -ENOMEM; + return -ENOMEM; __copy_from_guest(name, op->u.set_variable.name, len); data = xmalloc_bytes(op->u.set_variable.size); @@ -529,8 +525,8 @@ int efi_runtime_call(struct xenpf_efi_runtime_call *op) state = efi_rs_enter(); if ( state.cr3 ) status = efi_rs->SetVariable( - name, cast_guid(&op->u.set_variable.vendor_guid), - op->misc, op->u.set_variable.size, data); + name, cast_guid(&op->u.set_variable.vendor_guid), op->misc, + op->u.set_variable.size, data); else rc = -EOPNOTSUPP; efi_rs_leave(&state); @@ -576,8 +572,8 @@ int efi_runtime_call(struct xenpf_efi_runtime_call *op) * is used because some firmwares update size when they shouldn't. * */ if ( !EFI_ERROR(status) && - __copy_to_guest(op->u.get_next_variable_name.name, - name.raw, op->u.get_next_variable_name.size) ) + __copy_to_guest(op->u.get_next_variable_name.name, name.raw, + op->u.get_next_variable_name.size) ) rc = -EFAULT; op->u.get_next_variable_name.size = size; } @@ -594,10 +590,9 @@ int efi_runtime_call(struct xenpf_efi_runtime_call *op) if ( op->misc & XEN_EFI_VARINFO_BOOT_SNAPSHOT ) { - if ( (op->u.query_variable_info.attr - & ~EFI_VARIABLE_APPEND_WRITE) != - (EFI_VARIABLE_NON_VOLATILE | - EFI_VARIABLE_BOOTSERVICE_ACCESS | + if ( (op->u.query_variable_info.attr & + ~EFI_VARIABLE_APPEND_WRITE) != + (EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS) ) return -EINVAL; diff --git a/xen/common/event_2l.c b/xen/common/event_2l.c index 8ca90899ab..274fb8b27a 100644 --- a/xen/common/event_2l.c +++ b/xen/common/event_2l.c @@ -28,7 +28,7 @@ static void evtchn_2l_set_pending(struct vcpu *v, struct evtchn *evtchn) if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) ) return; - if ( !test_bit (port, &shared_info(d, evtchn_mask)) && + if ( !test_bit(port, &shared_info(d, evtchn_mask)) && !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d), &vcpu_info(v, evtchn_pending_sel)) ) { @@ -53,9 +53,9 @@ static void evtchn_2l_unmask(struct domain *d, struct evtchn *evtchn) * evtchn_2l_set_pending() above. */ if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) && - test_bit (port, &shared_info(d, evtchn_pending)) && - !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d), - &vcpu_info(v, evtchn_pending_sel)) ) + test_bit(port, &shared_info(d, evtchn_pending)) && + !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d), + &vcpu_info(v, evtchn_pending_sel)) ) { vcpu_mark_events_pending(v); } @@ -77,8 +77,7 @@ static bool evtchn_2l_is_masked(const struct domain *d, evtchn_port_t port) return port >= max_ports || test_bit(port, &shared_info(d, evtchn_mask)); } -static void evtchn_2l_print_state(struct domain *d, - const struct evtchn *evtchn) +static void evtchn_2l_print_state(struct domain *d, const struct evtchn *evtchn) { struct vcpu *v = d->vcpu[evtchn->notify_vcpu_id]; @@ -86,14 +85,13 @@ static void evtchn_2l_print_state(struct domain *d, &vcpu_info(v, evtchn_pending_sel))); } -static const struct evtchn_port_ops evtchn_port_ops_2l = -{ - .set_pending = evtchn_2l_set_pending, +static const struct evtchn_port_ops evtchn_port_ops_2l = { + .set_pending = evtchn_2l_set_pending, .clear_pending = evtchn_2l_clear_pending, - .unmask = evtchn_2l_unmask, - .is_pending = evtchn_2l_is_pending, - .is_masked = evtchn_2l_is_masked, - .print_state = evtchn_2l_print_state, + .unmask = evtchn_2l_unmask, + .is_pending = evtchn_2l_is_pending, + .is_masked = evtchn_2l_is_masked, + .print_state = evtchn_2l_print_state, }; void evtchn_2l_init(struct domain *d) diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index e86e2bfab0..613e3415b3 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -1,15 +1,15 @@ /****************************************************************************** * event_channel.c - * + * * Event notifications from VIRQs, PIRQs, and other domains. - * + * * Copyright (c) 2003-2006, K A Fraser. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; If not, see . */ @@ -31,21 +31,18 @@ #include #include -#define ERROR_EXIT(_errno) \ - do { \ - gdprintk(XENLOG_WARNING, \ - "EVTCHNOP failure: error %d\n", \ - (_errno)); \ - rc = (_errno); \ - goto out; \ +#define ERROR_EXIT(_errno) \ + do { \ + gdprintk(XENLOG_WARNING, "EVTCHNOP failure: error %d\n", (_errno)); \ + rc = (_errno); \ + goto out; \ } while ( 0 ) -#define ERROR_EXIT_DOM(_errno, _dom) \ - do { \ - gdprintk(XENLOG_WARNING, \ - "EVTCHNOP failure: domain %d, error %d\n", \ - (_dom)->domain_id, (_errno)); \ - rc = (_errno); \ - goto out; \ +#define ERROR_EXIT_DOM(_errno, _dom) \ + do { \ + gdprintk(XENLOG_WARNING, "EVTCHNOP failure: domain %d, error %d\n", \ + (_dom)->domain_id, (_errno)); \ + rc = (_errno); \ + goto out; \ } while ( 0 ) #define consumer_is_xen(e) (!!(e)->xen_consumer) @@ -87,15 +84,15 @@ static uint8_t get_xen_consumer(xen_event_channel_notification_t fn) } BUG_ON(i >= ARRAY_SIZE(xen_consumers)); - return i+1; + return i + 1; } /* Get the notification function for a given Xen-bound event channel. */ -#define xen_notification_fn(e) (xen_consumers[(e)->xen_consumer-1]) +#define xen_notification_fn(e) (xen_consumers[(e)->xen_consumer - 1]) static bool virq_is_global(unsigned int virq) { - switch ( virq ) + switch (virq) { case VIRQ_TIMER: case VIRQ_DEBUG: @@ -111,7 +108,6 @@ static bool virq_is_global(unsigned int virq) return true; } - static struct evtchn *alloc_evtchn_bucket(struct domain *d, unsigned int port) { struct evtchn *chn; @@ -186,7 +182,7 @@ int evtchn_allocate_port(struct domain *d, evtchn_port_t port) static int get_free_port(struct domain *d) { - int port; + int port; if ( d->is_dying ) return -EINVAL; @@ -210,9 +206,9 @@ void evtchn_free(struct domain *d, struct evtchn *chn) evtchn_port_clear_pending(d, chn); /* Reset binding to vcpu0 when the channel is freed. */ - chn->state = ECS_FREE; + chn->state = ECS_FREE; chn->notify_vcpu_id = 0; - chn->xen_consumer = 0; + chn->xen_consumer = 0; xsm_evtchn_close_post(chn); } @@ -221,9 +217,9 @@ static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc) { struct evtchn *chn; struct domain *d; - int port; - domid_t dom = alloc->dom; - long rc; + int port; + domid_t dom = alloc->dom; + long rc; d = rcu_lock_domain_by_any_id(dom); if ( d == NULL ) @@ -250,14 +246,13 @@ static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc) alloc->port = port; - out: +out: spin_unlock(&d->event_lock); rcu_unlock_domain(d); return rc; } - static void double_evtchn_lock(struct evtchn *lchn, struct evtchn *rchn) { if ( lchn < rchn ) @@ -284,9 +279,9 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) { struct evtchn *lchn, *rchn; struct domain *ld = current->domain, *rd; - int lport, rport = bind->remote_port; - domid_t rdom = bind->remote_dom; - long rc; + int lport, rport = bind->remote_port; + domid_t rdom = bind->remote_dom; + long rc; if ( rdom == DOMID_SELF ) rdom = current->domain->domain_id; @@ -324,14 +319,14 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) double_evtchn_lock(lchn, rchn); - lchn->u.interdomain.remote_dom = rd; + lchn->u.interdomain.remote_dom = rd; lchn->u.interdomain.remote_port = rport; - lchn->state = ECS_INTERDOMAIN; + lchn->state = ECS_INTERDOMAIN; evtchn_port_init(ld, lchn); - - rchn->u.interdomain.remote_dom = ld; + + rchn->u.interdomain.remote_dom = ld; rchn->u.interdomain.remote_port = lport; - rchn->state = ECS_INTERDOMAIN; + rchn->state = ECS_INTERDOMAIN; /* * We may have lost notifications on the remote unbound port. Fix that up @@ -343,32 +338,31 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) bind->local_port = lport; - out: +out: spin_unlock(&ld->event_lock); if ( ld != rd ) spin_unlock(&rd->event_lock); - + rcu_unlock_domain(rd); return rc; } - int evtchn_bind_virq(evtchn_bind_virq_t *bind, evtchn_port_t port) { struct evtchn *chn; - struct vcpu *v; + struct vcpu *v; struct domain *d = current->domain; - int virq = bind->virq, vcpu = bind->vcpu; - int rc = 0; + int virq = bind->virq, vcpu = bind->vcpu; + int rc = 0; if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) ) return -EINVAL; - /* - * Make sure the guest controlled value virq is bounded even during - * speculative execution. - */ + /* + * Make sure the guest controlled value virq is bounded even during + * speculative execution. + */ virq = array_index_nospec(virq, ARRAY_SIZE(v->virq_to_evtchn)); if ( virq_is_global(virq) && (vcpu != 0) ) @@ -400,28 +394,27 @@ int evtchn_bind_virq(evtchn_bind_virq_t *bind, evtchn_port_t port) spin_lock(&chn->lock); - chn->state = ECS_VIRQ; + chn->state = ECS_VIRQ; chn->notify_vcpu_id = vcpu; - chn->u.virq = virq; + chn->u.virq = virq; evtchn_port_init(d, chn); spin_unlock(&chn->lock); v->virq_to_evtchn[virq] = bind->port = port; - out: +out: spin_unlock(&d->event_lock); return rc; } - static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind) { struct evtchn *chn; struct domain *d = current->domain; - int port, vcpu = bind->vcpu; - long rc = 0; + int port, vcpu = bind->vcpu; + long rc = 0; if ( domain_vcpu(d, vcpu) == NULL ) return -ENOENT; @@ -435,7 +428,7 @@ static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind) spin_lock(&chn->lock); - chn->state = ECS_IPI; + chn->state = ECS_IPI; chn->notify_vcpu_id = vcpu; evtchn_port_init(d, chn); @@ -443,20 +436,19 @@ static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind) bind->port = port; - out: +out: spin_unlock(&d->event_lock); return rc; } - static void link_pirq_port(int port, struct evtchn *chn, struct vcpu *v) { chn->u.pirq.prev_port = 0; chn->u.pirq.next_port = v->pirq_evtchn_head; if ( v->pirq_evtchn_head ) - evtchn_from_port(v->domain, v->pirq_evtchn_head) - ->u.pirq.prev_port = port; + evtchn_from_port(v->domain, v->pirq_evtchn_head)->u.pirq.prev_port = + port; v->pirq_evtchn_head = port; } @@ -474,15 +466,14 @@ static void unlink_pirq_port(struct evtchn *chn, struct vcpu *v) chn->u.pirq.prev_port; } - static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) { struct evtchn *chn; struct domain *d = current->domain; - struct vcpu *v = d->vcpu[0]; - struct pirq *info; - int port, pirq = bind->pirq; - long rc; + struct vcpu *v = d->vcpu[0]; + struct pirq *info; + int port, pirq = bind->pirq; + long rc; if ( (pirq < 0) || (pirq >= d->nr_pirqs) ) return -EINVAL; @@ -504,10 +495,10 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) if ( !info ) ERROR_EXIT(-ENOMEM); info->evtchn = port; - rc = (!is_hvm_domain(d) - ? pirq_guest_bind(v, info, - !!(bind->flags & BIND_PIRQ__WILL_SHARE)) - : 0); + rc = + (!is_hvm_domain(d) + ? pirq_guest_bind(v, info, !!(bind->flags & BIND_PIRQ__WILL_SHARE)) + : 0); if ( rc != 0 ) { info->evtchn = 0; @@ -517,7 +508,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) spin_lock(&chn->lock); - chn->state = ECS_PIRQ; + chn->state = ECS_PIRQ; chn->u.pirq.irq = pirq; link_pirq_port(port, chn, v); evtchn_port_init(d, chn); @@ -528,22 +519,21 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) arch_evtchn_bind_pirq(d, pirq); - out: +out: spin_unlock(&d->event_lock); return rc; } - int evtchn_close(struct domain *d1, int port1, bool guest) { struct domain *d2 = NULL; - struct vcpu *v; + struct vcpu *v; struct evtchn *chn1, *chn2; - int port2; - long rc = 0; + int port2; + long rc = 0; - again: +again: spin_lock(&d1->event_lock); if ( !port_is_valid(d1, port1) ) @@ -561,7 +551,7 @@ int evtchn_close(struct domain *d1, int port1, bool guest) goto out; } - switch ( chn1->state ) + switch (chn1->state) { case ECS_FREE: case ECS_RESERVED: @@ -571,7 +561,8 @@ int evtchn_close(struct domain *d1, int port1, bool guest) case ECS_UNBOUND: break; - case ECS_PIRQ: { + case ECS_PIRQ: + { struct pirq *pirq = pirq_info(d1, chn1->u.pirq.irq); if ( !pirq ) @@ -589,7 +580,7 @@ int evtchn_close(struct domain *d1, int port1, bool guest) } case ECS_VIRQ: - for_each_vcpu ( d1, v ) + for_each_vcpu (d1, v) { if ( v->virq_to_evtchn[chn1->u.virq] != port1 ) continue; @@ -660,7 +651,7 @@ int evtchn_close(struct domain *d1, int port1, bool guest) evtchn_free(d1, chn1); spin_unlock(&chn1->lock); - out: +out: if ( d2 != NULL ) { if ( d1 != d2 ) @@ -677,7 +668,7 @@ int evtchn_send(struct domain *ld, unsigned int lport) { struct evtchn *lchn, *rchn; struct domain *rd; - int rport, ret = 0; + int rport, ret = 0; if ( !port_is_valid(ld, lport) ) return -EINVAL; @@ -697,12 +688,12 @@ int evtchn_send(struct domain *ld, unsigned int lport) if ( ret ) goto out; - switch ( lchn->state ) + switch (lchn->state) { case ECS_INTERDOMAIN: - rd = lchn->u.interdomain.remote_dom; + rd = lchn->u.interdomain.remote_dom; rport = lchn->u.interdomain.remote_port; - rchn = evtchn_from_port(rd, rport); + rchn = evtchn_from_port(rd, rport); if ( consumer_is_xen(rchn) ) xen_notification_fn(rchn)(rd->vcpu[rchn->notify_vcpu_id], rport); else @@ -746,7 +737,7 @@ void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq) d = v->domain; evtchn_port_set_pending(d, v->vcpu_id, evtchn_from_port(d, port)); - out: +out: spin_unlock_irqrestore(&v->virq_lock, flags); } @@ -775,7 +766,7 @@ void send_guest_global_virq(struct domain *d, uint32_t virq) chn = evtchn_from_port(d, port); evtchn_port_set_pending(d, chn->notify_vcpu_id, chn); - out: +out: spin_unlock_irqrestore(&v->virq_lock, flags); } @@ -815,15 +806,15 @@ int set_global_virq_handler(struct domain *d, uint32_t virq) { struct domain *old; - if (virq >= NR_VIRQS) + if ( virq >= NR_VIRQS ) return -EINVAL; - if (!virq_is_global(virq)) + if ( !virq_is_global(virq) ) return -EINVAL; - if (global_virq_handlers[virq] == d) + if ( global_virq_handlers[virq] == d ) return 0; - if (unlikely(!get_domain(d))) + if ( unlikely(!get_domain(d)) ) return -EINVAL; spin_lock(&global_virq_handlers_lock); @@ -831,7 +822,7 @@ int set_global_virq_handler(struct domain *d, uint32_t virq) global_virq_handlers[virq] = d; spin_unlock(&global_virq_handlers_lock); - if (old != NULL) + if ( old != NULL ) put_domain(old); return 0; @@ -844,9 +835,9 @@ static void clear_global_virq_handlers(struct domain *d) spin_lock(&global_virq_handlers_lock); - for (virq = 0; virq < NR_VIRQS; virq++) + for ( virq = 0; virq < NR_VIRQS; virq++ ) { - if (global_virq_handlers[virq] == d) + if ( global_virq_handlers[virq] == d ) { global_virq_handlers[virq] = NULL; put_count++; @@ -855,7 +846,7 @@ static void clear_global_virq_handlers(struct domain *d) spin_unlock(&global_virq_handlers_lock); - while (put_count) + while ( put_count ) { put_domain(d); put_count--; @@ -864,11 +855,11 @@ static void clear_global_virq_handlers(struct domain *d) int evtchn_status(evtchn_status_t *status) { - struct domain *d; - domid_t dom = status->dom; - int port = status->port; - struct evtchn *chn; - long rc = 0; + struct domain *d; + domid_t dom = status->dom; + int port = status->port; + struct evtchn *chn; + long rc = 0; d = rcu_lock_domain_by_any_id(dom); if ( d == NULL ) @@ -888,7 +879,7 @@ int evtchn_status(evtchn_status_t *status) if ( rc ) goto out; - switch ( chn->state ) + switch (chn->state) { case ECS_FREE: case ECS_RESERVED: @@ -900,8 +891,7 @@ int evtchn_status(evtchn_status_t *status) break; case ECS_INTERDOMAIN: status->status = EVTCHNSTAT_interdomain; - status->u.interdomain.dom = - chn->u.interdomain.remote_dom->domain_id; + status->u.interdomain.dom = chn->u.interdomain.remote_dom->domain_id; status->u.interdomain.port = chn->u.interdomain.remote_port; break; case ECS_PIRQ: @@ -921,20 +911,19 @@ int evtchn_status(evtchn_status_t *status) status->vcpu = chn->notify_vcpu_id; - out: +out: spin_unlock(&d->event_lock); rcu_unlock_domain(d); return rc; } - long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id) { struct domain *d = current->domain; struct evtchn *chn; - long rc = 0; - struct vcpu *v; + long rc = 0; + struct vcpu *v; /* Use the vcpu info to prevent speculative out-of-bound accesses */ if ( (v = domain_vcpu(d, vcpu_id)) == NULL ) @@ -957,7 +946,7 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id) goto out; } - switch ( chn->state ) + switch (chn->state) { case ECS_VIRQ: if ( virq_is_global(chn->u.virq) ) @@ -974,8 +963,7 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id) break; unlink_pirq_port(chn, d->vcpu[chn->notify_vcpu_id]); chn->notify_vcpu_id = v->vcpu_id; - pirq_set_affinity(d, chn->u.pirq.irq, - cpumask_of(v->processor)); + pirq_set_affinity(d, chn->u.pirq.irq, cpumask_of(v->processor)); link_pirq_port(port, chn, v); break; default: @@ -983,13 +971,12 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id) break; } - out: +out: spin_unlock(&d->event_lock); return rc; } - int evtchn_unmask(unsigned int port) { struct domain *d = current->domain; @@ -1004,7 +991,6 @@ int evtchn_unmask(unsigned int port) return 0; } - int evtchn_reset(struct domain *d) { unsigned int i; @@ -1055,9 +1041,10 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { long rc; - switch ( cmd ) + switch (cmd) + { + case EVTCHNOP_alloc_unbound: { - case EVTCHNOP_alloc_unbound: { struct evtchn_alloc_unbound alloc_unbound; if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 ) return -EFAULT; @@ -1067,7 +1054,8 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_bind_interdomain: { + case EVTCHNOP_bind_interdomain: + { struct evtchn_bind_interdomain bind_interdomain; if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 ) return -EFAULT; @@ -1077,7 +1065,8 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_bind_virq: { + case EVTCHNOP_bind_virq: + { struct evtchn_bind_virq bind_virq; if ( copy_from_guest(&bind_virq, arg, 1) != 0 ) return -EFAULT; @@ -1087,7 +1076,8 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_bind_ipi: { + case EVTCHNOP_bind_ipi: + { struct evtchn_bind_ipi bind_ipi; if ( copy_from_guest(&bind_ipi, arg, 1) != 0 ) return -EFAULT; @@ -1097,7 +1087,8 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_bind_pirq: { + case EVTCHNOP_bind_pirq: + { struct evtchn_bind_pirq bind_pirq; if ( copy_from_guest(&bind_pirq, arg, 1) != 0 ) return -EFAULT; @@ -1107,7 +1098,8 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_close: { + case EVTCHNOP_close: + { struct evtchn_close close; if ( copy_from_guest(&close, arg, 1) != 0 ) return -EFAULT; @@ -1115,7 +1107,8 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_send: { + case EVTCHNOP_send: + { struct evtchn_send send; if ( copy_from_guest(&send, arg, 1) != 0 ) return -EFAULT; @@ -1123,7 +1116,8 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_status: { + case EVTCHNOP_status: + { struct evtchn_status status; if ( copy_from_guest(&status, arg, 1) != 0 ) return -EFAULT; @@ -1133,7 +1127,8 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_bind_vcpu: { + case EVTCHNOP_bind_vcpu: + { struct evtchn_bind_vcpu bind_vcpu; if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 ) return -EFAULT; @@ -1141,7 +1136,8 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_unmask: { + case EVTCHNOP_unmask: + { struct evtchn_unmask unmask; if ( copy_from_guest(&unmask, arg, 1) != 0 ) return -EFAULT; @@ -1149,7 +1145,8 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_reset: { + case EVTCHNOP_reset: + { struct evtchn_reset reset; struct domain *d; @@ -1168,7 +1165,8 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_init_control: { + case EVTCHNOP_init_control: + { struct evtchn_init_control init_control; if ( copy_from_guest(&init_control, arg, 1) != 0 ) return -EFAULT; @@ -1178,7 +1176,8 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_expand_array: { + case EVTCHNOP_expand_array: + { struct evtchn_expand_array expand_array; if ( copy_from_guest(&expand_array, arg, 1) != 0 ) return -EFAULT; @@ -1186,7 +1185,8 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } - case EVTCHNOP_set_priority: { + case EVTCHNOP_set_priority: + { struct evtchn_set_priority set_priority; if ( copy_from_guest(&set_priority, arg, 1) != 0 ) return -EFAULT; @@ -1202,13 +1202,12 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) return rc; } - int alloc_unbound_xen_event_channel( struct domain *ld, unsigned int lvcpu, domid_t remote_domid, xen_event_channel_notification_t notification_fn) { struct evtchn *chn; - int port, rc; + int port, rc; spin_lock(&ld->event_lock); @@ -1231,7 +1230,7 @@ int alloc_unbound_xen_event_channel( spin_unlock(&chn->lock); - out: +out: spin_unlock(&ld->event_lock); return rc < 0 ? rc : port; @@ -1244,7 +1243,6 @@ void free_xen_event_channel(struct domain *d, int port) evtchn_close(d, port, 0); } - void notify_via_xen_event_channel(struct domain *ld, int lport) { struct evtchn *lchn, *rchn; @@ -1258,8 +1256,8 @@ void notify_via_xen_event_channel(struct domain *ld, int lport) if ( likely(lchn->state == ECS_INTERDOMAIN) ) { ASSERT(consumer_is_xen(lchn)); - rd = lchn->u.interdomain.remote_dom; - rchn = evtchn_from_port(rd, lchn->u.interdomain.remote_port); + rd = lchn->u.interdomain.remote_dom; + rchn = evtchn_from_port(rd, lchn->u.interdomain.remote_port); evtchn_port_set_pending(rd, rchn->notify_vcpu_id, rchn); } @@ -1278,7 +1276,7 @@ void evtchn_check_pollers(struct domain *d, unsigned int port) /* Wake any interested (or potentially interested) pollers. */ for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus); vcpuid < d->max_vcpus; - vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) ) + vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid + 1) ) { v = d->vcpu[vcpuid]; if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) && @@ -1320,7 +1318,6 @@ int evtchn_init(struct domain *d, unsigned int max_port) return 0; } - void evtchn_destroy(struct domain *d) { unsigned int i; @@ -1338,7 +1335,6 @@ void evtchn_destroy(struct domain *d) evtchn_fifo_destroy(d); } - void evtchn_destroy_final(struct domain *d) { unsigned int i, j; @@ -1360,7 +1356,6 @@ void evtchn_destroy_final(struct domain *d) #endif } - void evtchn_move_pirqs(struct vcpu *v) { struct domain *d = v->domain; @@ -1377,7 +1372,6 @@ void evtchn_move_pirqs(struct vcpu *v) spin_unlock(&d->event_lock); } - static void domain_dump_evtchn_info(struct domain *d) { unsigned int port; @@ -1385,7 +1379,8 @@ static void domain_dump_evtchn_info(struct domain *d) printk("Event channel information for domain %d:\n" "Polling vCPUs: {%*pbl}\n" - " port [p/m/s]\n", d->domain_id, d->max_vcpus, d->poll_mask); + " port [p/m/s]\n", + d->domain_id, d->max_vcpus, d->poll_mask); spin_lock(&d->event_lock); @@ -1400,22 +1395,19 @@ static void domain_dump_evtchn_info(struct domain *d) if ( chn->state == ECS_FREE ) continue; - printk(" %4u [%d/%d/", - port, - evtchn_port_is_pending(d, port), + printk(" %4u [%d/%d/", port, evtchn_port_is_pending(d, port), evtchn_port_is_masked(d, port)); evtchn_port_print_state(d, chn); - printk("]: s=%d n=%d x=%d", - chn->state, chn->notify_vcpu_id, chn->xen_consumer); + printk("]: s=%d n=%d x=%d", chn->state, chn->notify_vcpu_id, + chn->xen_consumer); - switch ( chn->state ) + switch (chn->state) { case ECS_UNBOUND: printk(" d=%d", chn->u.unbound.remote_domid); break; case ECS_INTERDOMAIN: - printk(" d=%d p=%d", - chn->u.interdomain.remote_dom->domain_id, + printk(" d=%d p=%d", chn->u.interdomain.remote_dom->domain_id, chn->u.interdomain.remote_port); break; case ECS_PIRQ: @@ -1428,10 +1420,13 @@ static void domain_dump_evtchn_info(struct domain *d) } ssid = xsm_show_security_evtchn(d, chn); - if (ssid) { + if ( ssid ) + { printk(" Z=%s\n", ssid); xfree(ssid); - } else { + } + else + { printk("\n"); } } @@ -1447,7 +1442,7 @@ static void dump_evtchn_info(unsigned char key) rcu_read_lock(&domlist_read_lock); - for_each_domain ( d ) + for_each_domain (d) domain_dump_evtchn_info(d); rcu_read_unlock(&domlist_read_lock); diff --git a/xen/common/event_fifo.c b/xen/common/event_fifo.c index 3eecab3f22..a75ad391da 100644 --- a/xen/common/event_fifo.c +++ b/xen/common/event_fifo.c @@ -62,9 +62,11 @@ static struct evtchn_fifo_queue *lock_old_queue(const struct domain *d, { struct vcpu *v; struct evtchn_fifo_queue *q, *old_q; - unsigned int try; + unsigned int + try + ; - for ( try = 0; try < 3; try++ ) + for ( try = 0; try < 3; try ++) { v = d->vcpu[evtchn->last_vcpu_id]; old_q = &v->evtchn_fifo->queue[evtchn->last_priority]; @@ -81,10 +83,10 @@ static struct evtchn_fifo_queue *lock_old_queue(const struct domain *d, } gprintk(XENLOG_WARNING, - "dom%d port %d lost event (too many queue changes)\n", - d->domain_id, evtchn->port); + "dom%d port %d lost event (too many queue changes)\n", d->domain_id, + evtchn->port); return NULL; -} +} static int try_set_link(event_word_t *word, event_word_t *w, uint32_t link) { @@ -120,7 +122,9 @@ static bool_t evtchn_fifo_set_link(const struct domain *d, event_word_t *word, uint32_t link) { event_word_t w; - unsigned int try; + unsigned int + try + ; int ret; w = read_atomic(word); @@ -134,7 +138,7 @@ static bool_t evtchn_fifo_set_link(const struct domain *d, event_word_t *word, w = read_atomic(word); - for ( try = 0; try < 4; try++ ) + for ( try = 0; try < 4; try ++) { ret = try_set_link(word, &w, link); if ( ret >= 0 ) @@ -144,8 +148,8 @@ static bool_t evtchn_fifo_set_link(const struct domain *d, event_word_t *word, return ret; } } - gdprintk(XENLOG_WARNING, "domain %d, port %d not linked\n", - d->domain_id, link); + gdprintk(XENLOG_WARNING, "domain %d, port %d not linked\n", d->domain_id, + link); clear_bit(EVTCHN_FIFO_BUSY, word); return 1; } @@ -176,8 +180,8 @@ static void evtchn_fifo_set_pending(struct vcpu *v, struct evtchn *evtchn) /* * Link the event if it unmasked and not already linked. */ - if ( !test_bit(EVTCHN_FIFO_MASKED, word) - && !test_bit(EVTCHN_FIFO_LINKED, word) ) + if ( !test_bit(EVTCHN_FIFO_MASKED, word) && + !test_bit(EVTCHN_FIFO_LINKED, word) ) { struct evtchn_fifo_queue *q, *old_q; event_word_t *tail_word; @@ -191,7 +195,8 @@ static void evtchn_fifo_set_pending(struct vcpu *v, struct evtchn *evtchn) if ( unlikely(!v->evtchn_fifo->control_block) ) { printk(XENLOG_G_WARNING - "%pv has no FIFO event channel control block\n", v); + "%pv has no FIFO event channel control block\n", + v); goto done; } @@ -251,12 +256,12 @@ static void evtchn_fifo_set_pending(struct vcpu *v, struct evtchn *evtchn) spin_unlock_irqrestore(&q->lock, flags); - if ( !linked - && !test_and_set_bit(q->priority, - &v->evtchn_fifo->control_block->ready) ) + if ( !linked && + !test_and_set_bit(q->priority, + &v->evtchn_fifo->control_block->ready) ) vcpu_mark_events_pending(v); } - done: +done: if ( !was_pending ) evtchn_check_pollers(d, port); } @@ -346,17 +351,16 @@ static void evtchn_fifo_print_state(struct domain *d, printk("%c - ", test_bit(EVTCHN_FIFO_BUSY, word) ? 'B' : ' '); } -static const struct evtchn_port_ops evtchn_port_ops_fifo = -{ - .init = evtchn_fifo_init, - .set_pending = evtchn_fifo_set_pending, +static const struct evtchn_port_ops evtchn_port_ops_fifo = { + .init = evtchn_fifo_init, + .set_pending = evtchn_fifo_set_pending, .clear_pending = evtchn_fifo_clear_pending, - .unmask = evtchn_fifo_unmask, - .is_pending = evtchn_fifo_is_pending, - .is_masked = evtchn_fifo_is_masked, - .is_busy = evtchn_fifo_is_busy, - .set_priority = evtchn_fifo_set_priority, - .print_state = evtchn_fifo_print_state, + .unmask = evtchn_fifo_unmask, + .is_pending = evtchn_fifo_is_pending, + .is_masked = evtchn_fifo_is_masked, + .is_busy = evtchn_fifo_is_busy, + .set_priority = evtchn_fifo_set_priority, + .print_state = evtchn_fifo_print_state, }; static int map_guest_page(struct domain *d, uint64_t gfn, void **virt) @@ -514,8 +518,8 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control) init_control->link_bits = EVTCHN_FIFO_LINK_BITS; vcpu_id = init_control->vcpu; - gfn = init_control->control_gfn; - offset = init_control->offset; + gfn = init_control->control_gfn; + offset = init_control->offset; if ( (v = domain_vcpu(d, vcpu_id)) == NULL ) return -ENOENT; @@ -528,8 +532,8 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control) * Make sure the guest controlled value offset is bounded even during * speculative execution. */ - offset = array_index_nospec(offset, - PAGE_SIZE - sizeof(evtchn_fifo_control_block_t) + 1); + offset = array_index_nospec( + offset, PAGE_SIZE - sizeof(evtchn_fifo_control_block_t) + 1); /* Must be 8-bytes aligned. */ if ( offset & (8 - 1) ) @@ -545,7 +549,8 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control) { struct vcpu *vcb; - for_each_vcpu ( d, vcb ) { + for_each_vcpu (d, vcb) + { rc = setup_control_block(vcb); if ( rc < 0 ) goto error; @@ -570,7 +575,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control) return rc; - error: +error: evtchn_fifo_destroy(d); spin_unlock(&d->event_lock); return rc; @@ -636,7 +641,7 @@ void evtchn_fifo_destroy(struct domain *d) { struct vcpu *v; - for_each_vcpu( d, v ) + for_each_vcpu (d, v) cleanup_control_block(v); cleanup_event_array(d); } diff --git a/xen/common/gdbstub.c b/xen/common/gdbstub.c index 07095e1ec7..13d691883e 100644 --- a/xen/common/gdbstub.c +++ b/xen/common/gdbstub.c @@ -2,7 +2,7 @@ * Copyright (C) 2005 Jimi Xenidis , IBM Corporation * Copyright (C) 2006 Isaku Yamahata * VA Linux Systems Japan. K.K. - * + * * gdbstub arch neutral part * Based on x86 cdb (xen/arch/x86/cdb.c) and ppc gdbstub(xen/common/gdbstub.c) * But extensively modified. @@ -11,12 +11,12 @@ * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; If not, see . */ @@ -51,7 +51,7 @@ #define dbg_printk(...) /*#define dbg_printk(...) printk(__VA_ARGS__)*/ -#define GDB_RETRY_MAX 10 +#define GDB_RETRY_MAX 10 struct gdb_cpu_info { @@ -71,15 +71,13 @@ string_param("gdb", opt_gdb); static void gdbstub_console_puts(const char *str); /* value <-> char (de)serialzers */ -static char -hex2char(unsigned long x) +static char hex2char(unsigned long x) { const char array[] = "0123456789abcdef"; return array[x & 15]; } -static unsigned int -char2hex(unsigned char c) +static unsigned int char2hex(unsigned char c) { if ( (c >= '0') && (c <= '9') ) return c - '0'; @@ -92,14 +90,12 @@ char2hex(unsigned char c) return -1; } -static unsigned char -str2hex(const char *str) +static unsigned char str2hex(const char *str) { return (char2hex(str[0]) << 4) | char2hex(str[1]); } -static unsigned long -str2ulong(const char *str, unsigned long bytes) +static unsigned long str2ulong(const char *str, unsigned long bytes) { unsigned long x = 0; unsigned long i = 0; @@ -115,8 +111,7 @@ str2ulong(const char *str, unsigned long bytes) return x; } -static unsigned long -str_to_native_ulong(const char *str) +static unsigned long str_to_native_ulong(const char *str) { unsigned long x = 0, i = 0; @@ -126,9 +121,9 @@ str_to_native_ulong(const char *str) x <<= 8; x += str2hex(str); #elif defined(__LITTLE_ENDIAN) - x += (unsigned long)str2hex(str) << (i*8); + x += (unsigned long)str2hex(str) << (i * 8); #else -# error unknown endian +#error unknown endian #endif str += 2; i++; @@ -138,8 +133,8 @@ str_to_native_ulong(const char *str) } /* gdb io wrappers */ -static signed long -gdb_io_write(const char *buf, unsigned long len, struct gdb_context *ctx) +static signed long gdb_io_write(const char *buf, unsigned long len, + struct gdb_context *ctx) { int i; for ( i = 0; i < len; i++ ) @@ -147,22 +142,19 @@ gdb_io_write(const char *buf, unsigned long len, struct gdb_context *ctx) return i; } -static int -gdb_io_write_char(u8 data, struct gdb_context *ctx) +static int gdb_io_write_char(u8 data, struct gdb_context *ctx) { - return gdb_io_write((char*)&data, 1, ctx); + return gdb_io_write((char *)&data, 1, ctx); } -static unsigned char -gdb_io_read(struct gdb_context *ctx) +static unsigned char gdb_io_read(struct gdb_context *ctx) { return serial_getc(ctx->serhnd); } /* Receive a command. Returns -1 on csum error, 0 otherwise. */ /* Does not acknowledge. */ -static int -attempt_receive_packet(struct gdb_context *ctx) +static int attempt_receive_packet(struct gdb_context *ctx) { u8 csum; u8 received_csum; @@ -173,8 +165,7 @@ attempt_receive_packet(struct gdb_context *ctx) continue; csum = 0; - for ( ctx->in_bytes = 0; - ctx->in_bytes < sizeof(ctx->in_buf); + for ( ctx->in_bytes = 0; ctx->in_bytes < sizeof(ctx->in_buf); ctx->in_bytes++ ) { ch = gdb_io_read(ctx); @@ -191,16 +182,15 @@ attempt_receive_packet(struct gdb_context *ctx) } ctx->in_buf[ctx->in_bytes] = '\0'; - received_csum = char2hex(gdb_io_read(ctx)) * 16 + - char2hex(gdb_io_read(ctx)); + received_csum = + char2hex(gdb_io_read(ctx)) * 16 + char2hex(gdb_io_read(ctx)); return (received_csum == csum) ? 0 : -1; } /* Receive a command, discarding up to ten packets with csum * errors. Acknowledges all received packets. */ -static int -receive_command(struct gdb_context *ctx) +static int receive_command(struct gdb_context *ctx) { int r, count = 0; @@ -216,45 +206,41 @@ receive_command(struct gdb_context *ctx) /* routines to send reply packets */ -static void -gdb_start_packet(struct gdb_context *ctx) +static void gdb_start_packet(struct gdb_context *ctx) { ctx->out_buf[0] = '$'; ctx->out_offset = 1; ctx->out_csum = 0; } -static void -gdb_write_to_packet_char(u8 data, struct gdb_context *ctx) +static void gdb_write_to_packet_char(u8 data, struct gdb_context *ctx) { ctx->out_csum += data; ctx->out_buf[ctx->out_offset] = data; ctx->out_offset++; } -void -gdb_write_to_packet(const char *buf, int count, struct gdb_context *ctx) +void gdb_write_to_packet(const char *buf, int count, struct gdb_context *ctx) { int x; for ( x = 0; x < count; x++ ) gdb_write_to_packet_char(buf[x], ctx); } -void -gdb_write_to_packet_str(const char *buf, struct gdb_context *ctx) +void gdb_write_to_packet_str(const char *buf, struct gdb_context *ctx) { gdb_write_to_packet(buf, strlen(buf), ctx); } -void -gdb_write_to_packet_hex(unsigned long x, int int_size, struct gdb_context *ctx) +void gdb_write_to_packet_hex(unsigned long x, int int_size, + struct gdb_context *ctx) { char buf[sizeof(unsigned long) * 2 + 1]; int i, width = int_size * 2; buf[sizeof(unsigned long) * 2] = 0; - switch ( int_size ) + switch (int_size) { case sizeof(u8): case sizeof(u16): @@ -262,17 +248,18 @@ gdb_write_to_packet_hex(unsigned long x, int int_size, struct gdb_context *ctx) case sizeof(u64): break; default: - dbg_printk("WARNING: %s x: %#lx int_size: %d\n", - __func__, x, int_size); + dbg_printk("WARNING: %s x: %#lx int_size: %d\n", __func__, x, int_size); break; } #ifdef __BIG_ENDIAN - i = sizeof(unsigned long) * 2 - do { + i = sizeof(unsigned long) * 2 do + { buf[--i] = hex2char(x & 15); x >>= 4; - } while ( x ); + } + while ( x ) + ; while ( (i + width) > (sizeof(unsigned long) * 2) ) buf[--i] = '0'; @@ -282,22 +269,21 @@ gdb_write_to_packet_hex(unsigned long x, int int_size, struct gdb_context *ctx) i = 0; while ( i < width ) { - buf[i++] = hex2char(x>>4); + buf[i++] = hex2char(x >> 4); buf[i++] = hex2char(x); x >>= 8; } gdb_write_to_packet(buf, width, ctx); #else -# error unknown endian +#error unknown endian #endif } -static int -gdb_check_ack(struct gdb_context *ctx) +static int gdb_check_ack(struct gdb_context *ctx) { u8 c = gdb_io_read(ctx); - switch ( c ) + switch (c) { case '+': return 1; @@ -310,8 +296,7 @@ gdb_check_ack(struct gdb_context *ctx) } /* Return 0 if the reply was successfully received, !0 otherwise. */ -void -gdb_send_packet(struct gdb_context *ctx) +void gdb_send_packet(struct gdb_context *ctx) { char buf[3]; int count; @@ -327,12 +312,11 @@ gdb_send_packet(struct gdb_context *ctx) } while ( !gdb_check_ack(ctx) && (count++ < GDB_RETRY_MAX) ); if ( count == GDB_RETRY_MAX ) - dbg_printk("WARNING: %s reached max retry %d\n", - __func__, GDB_RETRY_MAX); + dbg_printk("WARNING: %s reached max retry %d\n", __func__, + GDB_RETRY_MAX); } -void -gdb_send_reply(const char *buf, struct gdb_context *ctx) +void gdb_send_reply(const char *buf, struct gdb_context *ctx) { gdb_start_packet(ctx); gdb_write_to_packet_str(buf, ctx); @@ -341,23 +325,20 @@ gdb_send_reply(const char *buf, struct gdb_context *ctx) /* arch neutral command handlers */ -static void -gdb_cmd_signum(struct gdb_context *ctx) +static void gdb_cmd_signum(struct gdb_context *ctx) { gdb_write_to_packet_char('S', ctx); gdb_write_to_packet_hex(ctx->signum, sizeof(ctx->signum), ctx); gdb_send_packet(ctx); } -static void -gdb_cmd_read_mem(unsigned long addr, unsigned long length, - struct gdb_context *ctx) +static void gdb_cmd_read_mem(unsigned long addr, unsigned long length, + struct gdb_context *ctx) { int x, r; unsigned char val; - dbg_printk("Memory read starting at %lx, length %lx.\n", addr, - length); + dbg_printk("Memory read starting at %lx, length %lx.\n", addr, length); for ( x = 0; x < length; x++ ) { @@ -378,9 +359,8 @@ gdb_cmd_read_mem(unsigned long addr, unsigned long length, gdb_send_packet(ctx); } -static void -gdb_cmd_write_mem(unsigned long addr, unsigned long length, - const char *buf, struct gdb_context *ctx) +static void gdb_cmd_write_mem(unsigned long addr, unsigned long length, + const char *buf, struct gdb_context *ctx) { int x, r; unsigned char val; @@ -390,7 +370,7 @@ gdb_cmd_write_mem(unsigned long addr, unsigned long length, for ( x = 0; x < length; x++, addr++, buf += 2 ) { val = str2ulong(buf, sizeof(val)); - r = gdb_arch_copy_to_user((void*)addr, (void*)&val, 1); + r = gdb_arch_copy_to_user((void *)addr, (void *)&val, 1); if ( r != 0 ) { dbg_printk("Error writing to %lx.\n", addr); @@ -398,7 +378,7 @@ gdb_cmd_write_mem(unsigned long addr, unsigned long length, } } - if (x == length) + if ( x == length ) gdb_write_to_packet_str("OK", ctx); else gdb_write_to_packet_str("E11", ctx); @@ -408,17 +388,15 @@ gdb_cmd_write_mem(unsigned long addr, unsigned long length, gdb_send_packet(ctx); } -static void -gdbstub_attach(struct gdb_context *ctx) +static void gdbstub_attach(struct gdb_context *ctx) { if ( ctx->currently_attached ) - return; + return; ctx->currently_attached = 1; ctx->console_steal_id = console_steal(ctx->serhnd, gdbstub_console_puts); } -static void -gdbstub_detach(struct gdb_context *ctx) +static void gdbstub_detach(struct gdb_context *ctx) { if ( !ctx->currently_attached ) return; @@ -427,8 +405,7 @@ gdbstub_detach(struct gdb_context *ctx) } /* command dispatcher */ -static int -process_command(struct cpu_user_regs *regs, struct gdb_context *ctx) +static int process_command(struct cpu_user_regs *regs, struct gdb_context *ctx) { const char *ptr; unsigned long addr, length, val; @@ -438,12 +415,12 @@ process_command(struct cpu_user_regs *regs, struct gdb_context *ctx) /* XXX check ctx->in_bytes >= 2 or similar. */ gdb_start_packet(ctx); - switch ( ctx->in_buf[0] ) + switch (ctx->in_buf[0]) { - case '?': /* query signal number */ + case '?': /* query signal number */ gdb_cmd_signum(ctx); break; - case 'H': /* thread operations */ + case 'H': /* thread operations */ gdb_send_reply("OK", ctx); break; case 'g': /* Read registers */ @@ -475,7 +452,7 @@ process_command(struct cpu_user_regs *regs, struct gdb_context *ctx) return 0; } length = simple_strtoul(ptr + 1, &ptr, 16); - if ( ptr[0] != ':') + if ( ptr[0] != ':' ) { gdb_send_reply("E04", ctx); return 0; @@ -537,16 +514,11 @@ process_command(struct cpu_user_regs *regs, struct gdb_context *ctx) return resume; } -static struct gdb_context -__gdb_ctx = { - .serhnd = -1, - .running = ATOMIC_INIT(1), - .signum = 1 -}; +static struct gdb_context __gdb_ctx = { + .serhnd = -1, .running = ATOMIC_INIT(1), .signum = 1}; static struct gdb_context *gdb_ctx = &__gdb_ctx; -static void -gdbstub_console_puts(const char *str) +static void gdbstub_console_puts(const char *str) { const char *p; @@ -555,16 +527,15 @@ gdbstub_console_puts(const char *str) for ( p = str; *p != '\0'; p++ ) { - gdb_write_to_packet_char(hex2char((*p>>4) & 0x0f), gdb_ctx ); - gdb_write_to_packet_char(hex2char((*p) & 0x0f), gdb_ctx ); + gdb_write_to_packet_char(hex2char((*p >> 4) & 0x0f), gdb_ctx); + gdb_write_to_packet_char(hex2char((*p) & 0x0f), gdb_ctx); } gdb_send_packet(gdb_ctx); } /* trap handler: main entry point */ -int -__trap_to_gdb(struct cpu_user_regs *regs, unsigned long cookie) +int __trap_to_gdb(struct cpu_user_regs *regs, unsigned long cookie) { int rc = 0; unsigned long flags; @@ -683,7 +654,7 @@ static void gdb_smp_pause(void) int timeout = 100; int cpu; - for_each_online_cpu(cpu) + for_each_online_cpu (cpu) { atomic_set(&gdb_cpu[cpu].ack, 0); atomic_set(&gdb_cpu[cpu].paused, 1); @@ -691,17 +662,17 @@ static void gdb_smp_pause(void) atomic_set(&gdb_smp_paused_count, 0); - smp_call_function(gdb_pause_this_cpu, NULL, /* dont wait! */0); + smp_call_function(gdb_pause_this_cpu, NULL, /* dont wait! */ 0); /* Wait 100ms for all other CPUs to enter pause loop */ - while ( (atomic_read(&gdb_smp_paused_count) < (num_online_cpus() - 1)) - && (timeout-- > 0) ) + while ( (atomic_read(&gdb_smp_paused_count) < (num_online_cpus() - 1)) && + (timeout-- > 0) ) mdelay(1); if ( atomic_read(&gdb_smp_paused_count) < (num_online_cpus() - 1) ) { printk("GDB: Not all CPUs have paused, missing CPUs "); - for_each_online_cpu(cpu) + for_each_online_cpu (cpu) { if ( (cpu != smp_processor_id()) && !atomic_read(&gdb_cpu[cpu].ack) ) @@ -716,21 +687,19 @@ static void gdb_smp_resume(void) int cpu; int timeout = 100; - for_each_online_cpu(cpu) + for_each_online_cpu (cpu) atomic_set(&gdb_cpu[cpu].paused, 0); /* Make sure all CPUs resume */ - while ( (atomic_read(&gdb_smp_paused_count) > 0) - && (timeout-- > 0) ) + while ( (atomic_read(&gdb_smp_paused_count) > 0) && (timeout-- > 0) ) mdelay(1); if ( atomic_read(&gdb_smp_paused_count) > 0 ) { printk("GDB: Not all CPUs have resumed execution, missing CPUs "); - for_each_online_cpu(cpu) + for_each_online_cpu (cpu) { - if ( (cpu != smp_processor_id()) && - atomic_read(&gdb_cpu[cpu].ack) ) + if ( (cpu != smp_processor_id()) && atomic_read(&gdb_cpu[cpu].ack) ) printk("%d ", cpu); } printk("\n"); diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c index 80728ea57d..42d9dd8416 100644 --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -41,28 +41,29 @@ #include /* Per-domain grant information. */ -struct grant_table { +struct grant_table +{ /* * Lock protecting updates to grant table state (version, active * entry list, etc.) */ - percpu_rwlock_t lock; + percpu_rwlock_t lock; /* Lock protecting the maptrack limit */ - spinlock_t maptrack_lock; + spinlock_t maptrack_lock; /* * Defaults to v1. May be changed with GNTTABOP_set_version. All other * values are invalid. */ - unsigned int gt_version; + unsigned int gt_version; /* Resource limits of the domain. */ - unsigned int max_grant_frames; - unsigned int max_maptrack_frames; + unsigned int max_grant_frames; + unsigned int max_maptrack_frames; /* Table size. Number of frames shared with guest */ - unsigned int nr_grant_frames; + unsigned int nr_grant_frames; /* Number of grant status frames shared with guest (for version 2) */ - unsigned int nr_status_frames; + unsigned int nr_status_frames; /* Number of available maptrack entries. */ - unsigned int maptrack_limit; + unsigned int maptrack_limit; /* Shared grant table (see include/public/grant_table.h). */ union { void **shared_raw; @@ -70,7 +71,7 @@ struct grant_table { union grant_entry_v2 **shared_v2; }; /* State grant table (see include/public/grant_table.h). */ - grant_status_t **status; + grant_status_t **status; /* Active grant table. */ struct active_grant_entry **active; /* Mapping tracking table per vcpu. */ @@ -134,8 +135,8 @@ custom_param("gnttab", parse_gnttab); * a hypervisor using different values. */ #define GNTTABOP_CONTINUATION_ARG_SHIFT 12 -#define GNTTABOP_CMD_MASK ((1<maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE]) + ((t)->maptrack[(e) / MAPTRACK_PER_PAGE][(e) % MAPTRACK_PER_PAGE]) -static inline unsigned int -nr_maptrack_frames(struct grant_table *t) +static inline unsigned int nr_maptrack_frames(struct grant_table *t) { return t->maptrack_limit / MAPTRACK_PER_PAGE; } @@ -216,62 +218,63 @@ nr_maptrack_frames(struct grant_table *t) #define SHGNT_PER_PAGE_V1 (PAGE_SIZE / sizeof(grant_entry_v1_t)) #define shared_entry_v1(t, e) \ - ((t)->shared_v1[(e)/SHGNT_PER_PAGE_V1][(e)%SHGNT_PER_PAGE_V1]) + ((t)->shared_v1[(e) / SHGNT_PER_PAGE_V1][(e) % SHGNT_PER_PAGE_V1]) #define SHGNT_PER_PAGE_V2 (PAGE_SIZE / sizeof(grant_entry_v2_t)) #define shared_entry_v2(t, e) \ - ((t)->shared_v2[(e)/SHGNT_PER_PAGE_V2][(e)%SHGNT_PER_PAGE_V2]) + ((t)->shared_v2[(e) / SHGNT_PER_PAGE_V2][(e) % SHGNT_PER_PAGE_V2]) #define STGNT_PER_PAGE (PAGE_SIZE / sizeof(grant_status_t)) #define status_entry(t, e) \ - ((t)->status[(e)/STGNT_PER_PAGE][(e)%STGNT_PER_PAGE]) -static grant_entry_header_t * -shared_entry_header(struct grant_table *t, grant_ref_t ref) + ((t)->status[(e) / STGNT_PER_PAGE][(e) % STGNT_PER_PAGE]) +static grant_entry_header_t *shared_entry_header(struct grant_table *t, + grant_ref_t ref) { if ( t->gt_version == 1 ) - return (grant_entry_header_t*)&shared_entry_v1(t, ref); + return (grant_entry_header_t *)&shared_entry_v1(t, ref); else return &shared_entry_v2(t, ref).hdr; } /* Active grant entry - used for shadowing GTF_permit_access grants. */ -struct active_grant_entry { - uint32_t pin; /* Reference count information: */ - /* Count of writable host-CPU mappings. */ -#define GNTPIN_hstw_shift 0 -#define GNTPIN_hstw_inc (1U << GNTPIN_hstw_shift) -#define GNTPIN_hstw_mask (0xFFU << GNTPIN_hstw_shift) - /* Count of read-only host-CPU mappings. */ -#define GNTPIN_hstr_shift 8 -#define GNTPIN_hstr_inc (1U << GNTPIN_hstr_shift) -#define GNTPIN_hstr_mask (0xFFU << GNTPIN_hstr_shift) - /* Count of writable device-bus mappings. */ -#define GNTPIN_devw_shift 16 -#define GNTPIN_devw_inc (1U << GNTPIN_devw_shift) -#define GNTPIN_devw_mask (0xFFU << GNTPIN_devw_shift) - /* Count of read-only device-bus mappings. */ -#define GNTPIN_devr_shift 24 -#define GNTPIN_devr_inc (1U << GNTPIN_devr_shift) -#define GNTPIN_devr_mask (0xFFU << GNTPIN_devr_shift) - - domid_t domid; /* Domain being granted access. */ - unsigned int start:15; /* For sub-page grants, the start offset - in the page. */ - bool is_sub_page:1; /* True if this is a sub-page grant. */ - unsigned int length:16; /* For sub-page grants, the length of the - grant. */ - grant_ref_t trans_gref; +struct active_grant_entry +{ + uint32_t pin; /* Reference count information: */ + /* Count of writable host-CPU mappings. */ +#define GNTPIN_hstw_shift 0 +#define GNTPIN_hstw_inc (1U << GNTPIN_hstw_shift) +#define GNTPIN_hstw_mask (0xFFU << GNTPIN_hstw_shift) + /* Count of read-only host-CPU mappings. */ +#define GNTPIN_hstr_shift 8 +#define GNTPIN_hstr_inc (1U << GNTPIN_hstr_shift) +#define GNTPIN_hstr_mask (0xFFU << GNTPIN_hstr_shift) + /* Count of writable device-bus mappings. */ +#define GNTPIN_devw_shift 16 +#define GNTPIN_devw_inc (1U << GNTPIN_devw_shift) +#define GNTPIN_devw_mask (0xFFU << GNTPIN_devw_shift) + /* Count of read-only device-bus mappings. */ +#define GNTPIN_devr_shift 24 +#define GNTPIN_devr_inc (1U << GNTPIN_devr_shift) +#define GNTPIN_devr_mask (0xFFU << GNTPIN_devr_shift) + + domid_t domid; /* Domain being granted access. */ + unsigned int start : 15; /* For sub-page grants, the start offset + in the page. */ + bool is_sub_page : 1; /* True if this is a sub-page grant. */ + unsigned int length : 16; /* For sub-page grants, the length of the + grant. */ + grant_ref_t trans_gref; struct domain *trans_domain; - mfn_t mfn; /* Machine frame being granted. */ + mfn_t mfn; /* Machine frame being granted. */ #ifndef NDEBUG - gfn_t gfn; /* Guest's idea of the frame being granted. */ + gfn_t gfn; /* Guest's idea of the frame being granted. */ #endif - spinlock_t lock; /* lock to protect access of this entry. - see docs/misc/grant-tables.txt for - locking protocol */ + spinlock_t lock; /* lock to protect access of this entry. + see docs/misc/grant-tables.txt for + locking protocol */ }; #define ACGNT_PER_PAGE (PAGE_SIZE / sizeof(struct active_grant_entry)) #define _active_entry(t, e) \ - ((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE]) + ((t)->active[(e) / ACGNT_PER_PAGE][(e) % ACGNT_PER_PAGE]) static inline void act_set_gfn(struct active_grant_entry *act, gfn_t gfn) { @@ -323,8 +326,7 @@ num_act_frames_from_sha_frames(const unsigned int num) #define max_nr_active_grant_frames(gt) \ num_act_frames_from_sha_frames((gt)->max_grant_frames) -static inline unsigned int -nr_active_grant_frames(struct grant_table *gt) +static inline unsigned int nr_active_grant_frames(struct grant_table *gt) { return num_act_frames_from_sha_frames(nr_grant_frames(gt)); } @@ -378,7 +380,7 @@ static int get_paged_frame(unsigned long gfn, mfn_t *mfn, int rc; rc = check_get_page_from_gfn(rd, _gfn(gfn), readonly, &p2mt, page); - switch ( rc ) + switch (rc) { case 0: break; @@ -407,8 +409,8 @@ static int get_paged_frame(unsigned long gfn, mfn_t *mfn, return GNTST_okay; } -static inline void -double_gt_lock(struct grant_table *lgt, struct grant_table *rgt) +static inline void double_gt_lock(struct grant_table *lgt, + struct grant_table *rgt) { /* * See mapkind() for why the write lock is also required for the @@ -427,8 +429,8 @@ double_gt_lock(struct grant_table *lgt, struct grant_table *rgt) } } -static inline void -double_gt_unlock(struct grant_table *lgt, struct grant_table *rgt) +static inline void double_gt_unlock(struct grant_table *lgt, + struct grant_table *rgt) { grant_write_unlock(lgt); if ( lgt != rgt ) @@ -437,8 +439,8 @@ double_gt_unlock(struct grant_table *lgt, struct grant_table *rgt) #define INVALID_MAPTRACK_HANDLE UINT_MAX -static inline grant_handle_t -_get_maptrack_handle(struct grant_table *t, struct vcpu *v) +static inline grant_handle_t _get_maptrack_handle(struct grant_table *t, + struct vcpu *v) { unsigned int head, next, prev_head; @@ -514,9 +516,8 @@ static grant_handle_t steal_maptrack_handle(struct grant_table *t, return INVALID_MAPTRACK_HANDLE; } -static inline void -put_maptrack_handle( - struct grant_table *t, grant_handle_t handle) +static inline void put_maptrack_handle(struct grant_table *t, + grant_handle_t handle) { struct domain *currd = current->domain; struct vcpu *v; @@ -542,13 +543,11 @@ put_maptrack_handle( spin_unlock(&v->maptrack_freelist_lock); } -static inline grant_handle_t -get_maptrack_handle( - struct grant_table *lgt) +static inline grant_handle_t get_maptrack_handle(struct grant_table *lgt) { - struct vcpu *curr = current; - unsigned int i, head; - grant_handle_t handle; + struct vcpu *curr = current; + unsigned int i, head; + grant_handle_t handle; struct grant_mapping *new_mt = NULL; handle = _get_maptrack_handle(lgt, curr); @@ -628,7 +627,7 @@ get_maptrack_handle( /* Number of grant table entries. Caller must hold d's grant table lock. */ static unsigned int nr_grant_entries(struct grant_table *gt) { - switch ( gt->gt_version ) + switch (gt->gt_version) { #define f2e(nr, ver) (((nr) << PAGE_SHIFT) / sizeof(grant_entry_v##ver##_t)) case 1: @@ -645,9 +644,7 @@ static unsigned int nr_grant_entries(struct grant_table *gt) return 0; } -static int _set_status_v1(domid_t domid, - int readonly, - int mapflag, +static int _set_status_v1(domid_t domid, int readonly, int mapflag, grant_entry_header_t *shah, struct active_grant_entry *act) { @@ -678,17 +675,15 @@ static int _set_status_v1(domid_t domid, * up to five times, and then fails under the assumption that * the guest is misbehaving. */ - for ( ; ; ) + for ( ;; ) { /* If not already pinned, check the grant domid and type. */ if ( !act->pin && - (((scombo.shorts.flags & mask) != - GTF_permit_access) || + (((scombo.shorts.flags & mask) != GTF_permit_access) || (scombo.shorts.domid != domid)) ) PIN_FAIL(done, GNTST_general_error, "Bad flags (%x) or dom (%d); expected d%d\n", - scombo.shorts.flags, scombo.shorts.domid, - domid); + scombo.shorts.flags, scombo.shorts.domid, domid); new_scombo = scombo; new_scombo.shorts.flags |= GTF_reading; @@ -701,8 +696,7 @@ static int _set_status_v1(domid_t domid, "Attempt to write-pin a r/o grant entry\n"); } - prev_scombo.word = cmpxchg((u32 *)shah, - scombo.word, new_scombo.word); + prev_scombo.word = cmpxchg((u32 *)shah, scombo.word, new_scombo.word); if ( likely(prev_scombo.word == scombo.word) ) break; @@ -717,18 +711,16 @@ done: return rc; } -static int _set_status_v2(domid_t domid, - int readonly, - int mapflag, +static int _set_status_v2(domid_t domid, int readonly, int mapflag, grant_entry_header_t *shah, struct active_grant_entry *act, grant_status_t *status) { - int rc = GNTST_okay; + int rc = GNTST_okay; union grant_combo scombo; uint16_t flags = shah->flags; - domid_t id = shah->domid; - uint16_t mask = GTF_type_mask; + domid_t id = shah->domid; + uint16_t mask = GTF_type_mask; /* we read flags and domid in a single memory access. this avoids the need for another memory barrier to @@ -745,13 +737,12 @@ static int _set_status_v2(domid_t domid, mask |= GTF_sub_page; /* If not already pinned, check the grant domid and type. */ - if ( !act->pin && - ( (((flags & mask) != GTF_permit_access) && - ((flags & mask) != GTF_transitive)) || - (id != domid)) ) + if ( !act->pin && ((((flags & mask) != GTF_permit_access) && + ((flags & mask) != GTF_transitive)) || + (id != domid)) ) PIN_FAIL(done, GNTST_general_error, - "Bad flags (%x) or dom (%d); expected d%d, flags %x\n", - flags, id, domid, mask); + "Bad flags (%x) or dom (%d); expected d%d, flags %x\n", flags, + id, domid, mask); if ( readonly ) { @@ -778,14 +769,14 @@ static int _set_status_v2(domid_t domid, { if ( (((flags & mask) != GTF_permit_access) && ((flags & mask) != GTF_transitive)) || - (id != domid) || - (!readonly && (flags & GTF_readonly)) ) + (id != domid) || (!readonly && (flags & GTF_readonly)) ) { gnttab_clear_flag(_GTF_writing, status); gnttab_clear_flag(_GTF_reading, status); - PIN_FAIL(done, GNTST_general_error, - "Unstable flags (%x) or dom (%d); expected d%d (r/w: %d)\n", - flags, id, domid, !readonly); + PIN_FAIL( + done, GNTST_general_error, + "Unstable flags (%x) or dom (%d); expected d%d (r/w: %d)\n", + flags, id, domid, !readonly); } } else @@ -802,16 +793,10 @@ done: return rc; } - -static int _set_status(unsigned gt_version, - domid_t domid, - int readonly, - int mapflag, - grant_entry_header_t *shah, - struct active_grant_entry *act, - grant_status_t *status) +static int _set_status(unsigned gt_version, domid_t domid, int readonly, + int mapflag, grant_entry_header_t *shah, + struct active_grant_entry *act, grant_status_t *status) { - if ( gt_version == 1 ) return _set_status_v1(domid, readonly, mapflag, shah, act); else @@ -839,8 +824,7 @@ static struct active_grant_entry *grant_map_exists(const struct domain *ld, { struct active_grant_entry *act = active_entry_acquire(rgt, ref); - if ( act->pin && act->domid == ld->domain_id && - mfn_eq(act->mfn, mfn) ) + if ( act->pin && act->domid == ld->domain_id && mfn_eq(act->mfn, mfn) ) return act; active_entry_release(act); } @@ -856,8 +840,8 @@ static struct active_grant_entry *grant_map_exists(const struct domain *ld, #define MAPKIND_READ 1 #define MAPKIND_WRITE 2 -static unsigned int mapkind( - struct grant_table *lgt, const struct domain *rd, mfn_t mfn) +static unsigned int mapkind(struct grant_table *lgt, const struct domain *rd, + mfn_t mfn) { struct grant_mapping *map; grant_handle_t handle, limit = lgt->maptrack_limit; @@ -879,32 +863,29 @@ static unsigned int mapkind( for ( handle = 0; !(kind & MAPKIND_WRITE) && handle < limit; handle++ ) { map = &maptrack_entry(lgt, handle); - if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) || + if ( !(map->flags & (GNTMAP_device_map | GNTMAP_host_map)) || map->domid != rd->domain_id ) continue; if ( mfn_eq(_active_entry(rd->grant_table, map->ref).mfn, mfn) ) - kind |= map->flags & GNTMAP_readonly ? - MAPKIND_READ : MAPKIND_WRITE; + kind |= map->flags & GNTMAP_readonly ? MAPKIND_READ : MAPKIND_WRITE; } return kind; } -static void -map_grant_ref( - struct gnttab_map_grant_ref *op) +static void map_grant_ref(struct gnttab_map_grant_ref *op) { struct domain *ld, *rd, *owner = NULL; struct grant_table *lgt, *rgt; - struct vcpu *led; + struct vcpu *led; grant_handle_t handle; mfn_t mfn; struct page_info *pg = NULL; - int rc = GNTST_okay; - u32 old_pin; - u32 act_pin; - unsigned int cache_flags, refcnt = 0, typecnt = 0; - bool host_map_created = false; + int rc = GNTST_okay; + u32 old_pin; + u32 act_pin; + unsigned int cache_flags, refcnt = 0, typecnt = 0; + bool host_map_created = false; struct active_grant_entry *act = NULL; struct grant_mapping *mt; grant_entry_header_t *shah; @@ -914,7 +895,7 @@ map_grant_ref( led = current; ld = led->domain; - if ( unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) ) + if ( unlikely((op->flags & (GNTMAP_device_map | GNTMAP_host_map)) == 0) ) { gdprintk(XENLOG_INFO, "Bad flags in grant map op: %x\n", op->flags); op->status = GNTST_bad_gntref; @@ -922,8 +903,8 @@ map_grant_ref( } if ( unlikely(paging_mode_external(ld) && - (op->flags & (GNTMAP_device_map|GNTMAP_application_map| - GNTMAP_contains_pte))) ) + (op->flags & (GNTMAP_device_map | GNTMAP_application_map | + GNTMAP_contains_pte))) ) { gdprintk(XENLOG_INFO, "No device mapping in HVM domain\n"); op->status = GNTST_general_error; @@ -959,40 +940,39 @@ map_grant_ref( grant_read_lock(rgt); /* Bounds check on the grant ref */ - if ( unlikely(op->ref >= nr_grant_entries(rgt))) - PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref %#x for d%d\n", - op->ref, rgt->domain->domain_id); + if ( unlikely(op->ref >= nr_grant_entries(rgt)) ) + PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref %#x for d%d\n", op->ref, + rgt->domain->domain_id); act = active_entry_acquire(rgt, op->ref); shah = shared_entry_header(rgt, op->ref); status = rgt->gt_version == 1 ? &shah->flags : &status_entry(rgt, op->ref); /* If already pinned, check the active domid and avoid refcnt overflow. */ - if ( act->pin && - ((act->domid != ld->domain_id) || - (act->pin & 0x80808080U) != 0 || - (act->is_sub_page)) ) + if ( act->pin && ((act->domid != ld->domain_id) || + (act->pin & 0x80808080U) != 0 || (act->is_sub_page)) ) PIN_FAIL(act_release_out, GNTST_general_error, - "Bad domain (%d != %d), or risk of counter overflow %08x, or subpage %d\n", + "Bad domain (%d != %d), or risk of counter overflow %08x, or " + "subpage %d\n", act->domid, ld->domain_id, act->pin, act->is_sub_page); - if ( !act->pin || - (!(op->flags & GNTMAP_readonly) && - !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask))) ) + if ( !act->pin || (!(op->flags & GNTMAP_readonly) && + !(act->pin & (GNTPIN_hstw_mask | GNTPIN_devw_mask))) ) { if ( (rc = _set_status(rgt->gt_version, ld->domain_id, - op->flags & GNTMAP_readonly, - 1, shah, act, status) ) != GNTST_okay ) + op->flags & GNTMAP_readonly, 1, shah, act, + status)) != GNTST_okay ) goto act_release_out; if ( !act->pin ) { - unsigned long gfn = rgt->gt_version == 1 ? - shared_entry_v1(rgt, op->ref).frame : - shared_entry_v2(rgt, op->ref).full_page.frame; + unsigned long gfn = + rgt->gt_version == 1 + ? shared_entry_v1(rgt, op->ref).frame + : shared_entry_v2(rgt, op->ref).full_page.frame; - rc = get_paged_frame(gfn, &mfn, &pg, - op->flags & GNTMAP_readonly, rd); + rc = get_paged_frame(gfn, &mfn, &pg, op->flags & GNTMAP_readonly, + rd); if ( rc != GNTST_okay ) goto unlock_out_clear; act_set_gfn(act, _gfn(gfn)); @@ -1008,16 +988,16 @@ map_grant_ref( old_pin = act->pin; if ( op->flags & GNTMAP_device_map ) - act->pin += (op->flags & GNTMAP_readonly) ? - GNTPIN_devr_inc : GNTPIN_devw_inc; + act->pin += + (op->flags & GNTMAP_readonly) ? GNTPIN_devr_inc : GNTPIN_devw_inc; if ( op->flags & GNTMAP_host_map ) - act->pin += (op->flags & GNTMAP_readonly) ? - GNTPIN_hstr_inc : GNTPIN_hstw_inc; + act->pin += + (op->flags & GNTMAP_readonly) ? GNTPIN_hstr_inc : GNTPIN_hstw_inc; mfn = act->mfn; act_pin = act->pin; - cache_flags = (shah->flags & (GTF_PAT | GTF_PWT | GTF_PCD) ); + cache_flags = (shah->flags & (GTF_PAT | GTF_PWT | GTF_PCD)); active_entry_release(act); grant_read_unlock(rgt); @@ -1054,7 +1034,7 @@ map_grant_ref( if ( !iomem_access_permitted(rd, mfn_x(mfn), mfn_x(mfn)) ) { gdprintk(XENLOG_WARNING, - "Iomem mapping not permitted %#"PRI_mfn" (domain %d)\n", + "Iomem mapping not permitted %#" PRI_mfn " (domain %d)\n", mfn_x(mfn), rd->domain_id); rc = GNTST_general_error; goto undo_out; @@ -1074,8 +1054,7 @@ map_grant_ref( { if ( (op->flags & GNTMAP_device_map) && !(op->flags & GNTMAP_readonly) ) { - if ( (owner == dom_cow) || - !get_page_type(pg, PGT_writable_page) ) + if ( (owner == dom_cow) || !get_page_type(pg, PGT_writable_page) ) goto could_not_pin; typecnt++; } @@ -1113,8 +1092,8 @@ map_grant_ref( { could_not_pin: if ( !rd->is_dying ) - gdprintk(XENLOG_WARNING, "Could not pin grant frame %#"PRI_mfn"\n", - mfn_x(mfn)); + gdprintk(XENLOG_WARNING, + "Could not pin grant frame %#" PRI_mfn "\n", mfn_x(mfn)); rc = GNTST_general_error; goto undo_out; } @@ -1130,8 +1109,8 @@ map_grant_ref( /* We're not translated, so we know that gmfns and mfns are the same things, so the IOMMU entry is always 1-to-1. */ kind = mapkind(lgt, rd, mfn); - if ( (act_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) && - !(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) ) + if ( (act_pin & (GNTPIN_hstw_mask | GNTPIN_devw_mask)) && + !(old_pin & (GNTPIN_hstw_mask | GNTPIN_devw_mask)) ) { if ( !(kind & MAPKIND_WRITE) ) err = iommu_legacy_map(ld, _dfn(mfn_x(mfn)), mfn, 0, @@ -1163,7 +1142,7 @@ map_grant_ref( */ mt = &maptrack_entry(lgt, handle); mt->domid = op->dom; - mt->ref = op->ref; + mt->ref = op->ref; smp_wmb(); write_atomic(&mt->flags, op->flags); @@ -1171,13 +1150,13 @@ map_grant_ref( double_gt_unlock(lgt, rgt); op->dev_bus_addr = mfn_to_maddr(mfn); - op->handle = handle; - op->status = GNTST_okay; + op->handle = handle; + op->status = GNTST_okay; rcu_unlock_domain(rd); return; - undo_out: +undo_out: if ( host_map_created ) { replace_grant_host_mapping(op->host_addr, mfn, 0, op->flags); @@ -1195,33 +1174,33 @@ map_grant_ref( act = active_entry_acquire(rgt, op->ref); if ( op->flags & GNTMAP_device_map ) - act->pin -= (op->flags & GNTMAP_readonly) ? - GNTPIN_devr_inc : GNTPIN_devw_inc; + act->pin -= + (op->flags & GNTMAP_readonly) ? GNTPIN_devr_inc : GNTPIN_devw_inc; if ( op->flags & GNTMAP_host_map ) - act->pin -= (op->flags & GNTMAP_readonly) ? - GNTPIN_hstr_inc : GNTPIN_hstw_inc; + act->pin -= + (op->flags & GNTMAP_readonly) ? GNTPIN_hstr_inc : GNTPIN_hstw_inc; - unlock_out_clear: +unlock_out_clear: if ( !(op->flags & GNTMAP_readonly) && - !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) ) + !(act->pin & (GNTPIN_hstw_mask | GNTPIN_devw_mask)) ) gnttab_clear_flag(_GTF_writing, status); if ( !act->pin ) gnttab_clear_flag(_GTF_reading, status); - act_release_out: +act_release_out: active_entry_release(act); - unlock_out: +unlock_out: grant_read_unlock(rgt); op->status = rc; put_maptrack_handle(lgt, handle); rcu_unlock_domain(rd); } -static long -gnttab_map_grant_ref( - XEN_GUEST_HANDLE_PARAM(gnttab_map_grant_ref_t) uop, unsigned int count) +static long gnttab_map_grant_ref(XEN_GUEST_HANDLE_PARAM(gnttab_map_grant_ref_t) + uop, + unsigned int count) { int i; struct gnttab_map_grant_ref op; @@ -1243,15 +1222,13 @@ gnttab_map_grant_ref( return 0; } -static void -unmap_common( - struct gnttab_unmap_common *op) +static void unmap_common(struct gnttab_unmap_common *op) { - domid_t dom; - struct domain *ld, *rd; + domid_t dom; + struct domain *ld, *rd; struct grant_table *lgt, *rgt; struct active_grant_entry *act; - s16 rc = 0; + s16 rc = 0; struct grant_mapping *map; unsigned int flags; bool put_handle = false; @@ -1261,8 +1238,8 @@ unmap_common( if ( unlikely(op->handle >= lgt->maptrack_limit) ) { - gdprintk(XENLOG_INFO, "Bad d%d handle %#x\n", - lgt->domain->domain_id, op->handle); + gdprintk(XENLOG_INFO, "Bad d%d handle %#x\n", lgt->domain->domain_id, + op->handle); op->status = GNTST_bad_handle; return; } @@ -1346,14 +1323,14 @@ unmap_common( if ( op->dev_bus_addr && unlikely(op->dev_bus_addr != mfn_to_maddr(act->mfn)) ) PIN_FAIL(act_release_out, GNTST_general_error, - "Bus address doesn't match gntref (%"PRIx64" != %"PRIpaddr")\n", + "Bus address doesn't match gntref (%" PRIx64 " != %" PRIpaddr + ")\n", op->dev_bus_addr, mfn_to_maddr(act->mfn)); if ( op->host_addr && (flags & GNTMAP_host_map) ) { - if ( (rc = replace_grant_host_mapping(op->host_addr, - op->mfn, op->new_addr, - flags)) < 0 ) + if ( (rc = replace_grant_host_mapping(op->host_addr, op->mfn, + op->new_addr, flags)) < 0 ) goto act_release_out; map->flags &= ~GNTMAP_host_map; @@ -1366,15 +1343,15 @@ unmap_common( op->done |= GNTMAP_device_map | (flags & GNTMAP_readonly); } - if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) ) + if ( !(map->flags & (GNTMAP_device_map | GNTMAP_host_map)) ) { map->flags = 0; put_handle = true; } - act_release_out: +act_release_out: active_entry_release(act); - unlock_out: +unlock_out: grant_read_unlock(rgt); if ( put_handle ) @@ -1402,14 +1379,13 @@ unmap_common( /* If just unmapped a writable mapping, mark as dirtied */ if ( rc == GNTST_okay && !(flags & GNTMAP_readonly) ) - gnttab_mark_dirty(rd, op->mfn); + gnttab_mark_dirty(rd, op->mfn); op->status = rc; rcu_unlock_domain(rd); } -static void -unmap_common_complete(struct gnttab_unmap_common *op) +static void unmap_common_complete(struct gnttab_unmap_common *op) { struct domain *ld, *rd = op->rd; struct grant_table *rgt; @@ -1475,7 +1451,7 @@ unmap_common_complete(struct gnttab_unmap_common *op) act->pin -= GNTPIN_hstw_inc; } - if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) && + if ( ((act->pin & (GNTPIN_devw_mask | GNTPIN_hstw_mask)) == 0) && !(op->done & GNTMAP_readonly) ) gnttab_clear_flag(_GTF_writing, status); @@ -1488,10 +1464,8 @@ unmap_common_complete(struct gnttab_unmap_common *op) rcu_unlock_domain(rd); } -static void -unmap_grant_ref( - struct gnttab_unmap_grant_ref *op, - struct gnttab_unmap_common *common) +static void unmap_grant_ref(struct gnttab_unmap_grant_ref *op, + struct gnttab_unmap_common *common) { common->host_addr = op->host_addr; common->dev_bus_addr = op->dev_bus_addr; @@ -1507,10 +1481,9 @@ unmap_grant_ref( op->status = common->status; } - static long -gnttab_unmap_grant_ref( - XEN_GUEST_HANDLE_PARAM(gnttab_unmap_grant_ref_t) uop, unsigned int count) +gnttab_unmap_grant_ref(XEN_GUEST_HANDLE_PARAM(gnttab_unmap_grant_ref_t) uop, + unsigned int count) { int i, c, partial_done, done = 0; struct gnttab_unmap_grant_ref op; @@ -1554,10 +1527,8 @@ fault: return -EFAULT; } -static void -unmap_and_replace( - struct gnttab_unmap_and_replace *op, - struct gnttab_unmap_common *common) +static void unmap_and_replace(struct gnttab_unmap_and_replace *op, + struct gnttab_unmap_common *common) { common->host_addr = op->host_addr; common->new_addr = op->new_addr; @@ -1574,8 +1545,8 @@ unmap_and_replace( } static long -gnttab_unmap_and_replace( - XEN_GUEST_HANDLE_PARAM(gnttab_unmap_and_replace_t) uop, unsigned int count) +gnttab_unmap_and_replace(XEN_GUEST_HANDLE_PARAM(gnttab_unmap_and_replace_t) uop, + unsigned int count) { int i, c, partial_done, done = 0; struct gnttab_unmap_and_replace op; @@ -1619,9 +1590,9 @@ fault: return -EFAULT; } -static int -gnttab_populate_status_frames(struct domain *d, struct grant_table *gt, - unsigned int req_nr_frames) +static int gnttab_populate_status_frames(struct domain *d, + struct grant_table *gt, + unsigned int req_nr_frames) { unsigned i; unsigned req_status_frames; @@ -1650,8 +1621,8 @@ status_alloc_failed: return -ENOMEM; } -static int -gnttab_unpopulate_status_frames(struct domain *d, struct grant_table *gt) +static int gnttab_unpopulate_status_frames(struct domain *d, + struct grant_table *gt) { unsigned int i; @@ -1668,16 +1639,17 @@ gnttab_unpopulate_status_frames(struct domain *d, struct grant_table *gt) */ if ( paging_mode_translate(d) ) { - int rc = gfn_eq(gfn, INVALID_GFN) - ? 0 - : guest_physmap_remove_page(d, gfn, - page_to_mfn(pg), 0); + int rc = + gfn_eq(gfn, INVALID_GFN) + ? 0 + : guest_physmap_remove_page(d, gfn, page_to_mfn(pg), 0); if ( rc ) { - gprintk(XENLOG_ERR, - "Could not remove status frame %u (GFN %#lx) from P2M\n", - i, gfn_x(gfn)); + gprintk( + XENLOG_ERR, + "Could not remove status frame %u (GFN %#lx) from P2M\n", i, + gfn_x(gfn)); domain_crash(d); return rc; } @@ -1702,8 +1674,8 @@ gnttab_unpopulate_status_frames(struct domain *d, struct grant_table *gt) if ( get_page(pg, d) ) set_bit(_PGC_allocated, &pg->count_info); while ( i-- ) - share_xen_page_with_guest(virt_to_page(gt->status[i]), - d, SHARE_rw); + share_xen_page_with_guest(virt_to_page(gt->status[i]), d, + SHARE_rw); } return -EBUSY; } @@ -1725,8 +1697,7 @@ gnttab_unpopulate_status_frames(struct domain *d, struct grant_table *gt) * Grow the grant table. The caller must hold the grant table's * write lock before calling this function. */ -static int -gnttab_grow_table(struct domain *d, unsigned int req_nr_frames) +static int gnttab_grow_table(struct domain *d, unsigned int req_nr_frames) { struct grant_table *gt = d->grant_table; unsigned int i, j; @@ -1858,20 +1829,18 @@ int grant_table_init(struct domain *d, unsigned int max_grant_frames, /* gnttab_grow_table() allocates a min number of frames, so 0 is okay. */ ret = gnttab_grow_table(d, 0); - unlock: +unlock: grant_write_unlock(gt); - out: +out: if ( ret ) grant_table_destroy(d); return ret; } -static long -gnttab_setup_table( - XEN_GUEST_HANDLE_PARAM(gnttab_setup_table_t) uop, unsigned int count, - unsigned int limit_max) +static long gnttab_setup_table(XEN_GUEST_HANDLE_PARAM(gnttab_setup_table_t) uop, + unsigned int count, unsigned int limit_max) { struct vcpu *curr = current; struct gnttab_setup_table op; @@ -1907,7 +1876,7 @@ gnttab_setup_table( if ( unlikely(op.nr_frames > gt->max_grant_frames) ) { gdprintk(XENLOG_INFO, "d%d is limited to %u grant-table frames\n", - d->domain_id, gt->max_grant_frames); + d->domain_id, gt->max_grant_frames); op.status = GNTST_general_error; goto unlock; } @@ -1924,10 +1893,11 @@ gnttab_setup_table( (grant_to_status_frames(op.nr_frames) > nr_status_frames(gt)))) && gnttab_grow_table(d, op.nr_frames) ) { - gdprintk(XENLOG_INFO, - "Expand grant table of d%d to %u failed. Current: %u Max: %u\n", - d->domain_id, op.nr_frames, nr_grant_frames(gt), - gt->max_grant_frames); + gdprintk( + XENLOG_INFO, + "Expand grant table of d%d to %u failed. Current: %u Max: %u\n", + d->domain_id, op.nr_frames, nr_grant_frames(gt), + gt->max_grant_frames); op.status = GNTST_general_error; goto unlock; } @@ -1944,9 +1914,9 @@ gnttab_setup_table( op.status = GNTST_bad_virt_addr; } - unlock: +unlock: grant_write_unlock(gt); - out: +out: if ( d ) rcu_unlock_domain(d); @@ -1956,9 +1926,8 @@ gnttab_setup_table( return 0; } -static long -gnttab_query_size( - XEN_GUEST_HANDLE_PARAM(gnttab_query_size_t) uop, unsigned int count) +static long gnttab_query_size(XEN_GUEST_HANDLE_PARAM(gnttab_query_size_t) uop, + unsigned int count) { struct gnttab_query_size op; struct domain *d; @@ -1987,13 +1956,13 @@ gnttab_query_size( grant_read_lock(gt); - op.nr_frames = nr_grant_frames(gt); + op.nr_frames = nr_grant_frames(gt); op.max_nr_frames = gt->max_grant_frames; - op.status = GNTST_okay; + op.status = GNTST_okay; grant_read_unlock(gt); - out: +out: if ( d ) rcu_unlock_domain(d); @@ -2007,22 +1976,20 @@ gnttab_query_size( * Check that the given grant reference (rd,ref) allows 'ld' to transfer * ownership of a page frame. If so, lock down the grant entry. */ -static int -gnttab_prepare_for_transfer( - struct domain *rd, struct domain *ld, grant_ref_t ref) +static int gnttab_prepare_for_transfer(struct domain *rd, struct domain *ld, + grant_ref_t ref) { struct grant_table *rgt = rd->grant_table; grant_entry_header_t *sha; - union grant_combo scombo, prev_scombo, new_scombo; - int retries = 0; + union grant_combo scombo, prev_scombo, new_scombo; + int retries = 0; grant_read_lock(rgt); if ( unlikely(ref >= nr_grant_entries(rgt)) ) { - gdprintk(XENLOG_INFO, - "Bad grant reference %#x for transfer to d%d\n", - ref, rd->domain_id); + gdprintk(XENLOG_INFO, "Bad grant reference %#x for transfer to d%d\n", + ref, rd->domain_id); goto fail; } @@ -2030,23 +1997,21 @@ gnttab_prepare_for_transfer( scombo.word = *(u32 *)&sha->flags; - for ( ; ; ) + for ( ;; ) { if ( unlikely(scombo.shorts.flags != GTF_accept_transfer) || unlikely(scombo.shorts.domid != ld->domain_id) ) { - gdprintk(XENLOG_INFO, - "Bad flags (%x) or dom (%d); expected d%d\n", - scombo.shorts.flags, scombo.shorts.domid, - ld->domain_id); + gdprintk(XENLOG_INFO, "Bad flags (%x) or dom (%d); expected d%d\n", + scombo.shorts.flags, scombo.shorts.domid, ld->domain_id); goto fail; } new_scombo = scombo; new_scombo.shorts.flags |= GTF_transfer_committed; - prev_scombo.word = cmpxchg((u32 *)&sha->flags, - scombo.word, new_scombo.word); + prev_scombo.word = + cmpxchg((u32 *)&sha->flags, scombo.word, new_scombo.word); if ( likely(prev_scombo.word == scombo.word) ) break; @@ -2062,14 +2027,13 @@ gnttab_prepare_for_transfer( grant_read_unlock(rgt); return 1; - fail: +fail: grant_read_unlock(rgt); return 0; } -static long -gnttab_transfer( - XEN_GUEST_HANDLE_PARAM(gnttab_transfer_t) uop, unsigned int count) +static long gnttab_transfer(XEN_GUEST_HANDLE_PARAM(gnttab_transfer_t) uop, + unsigned int count) { struct domain *d = current->domain; struct domain *e; @@ -2091,8 +2055,7 @@ gnttab_transfer( /* Read from caller address space. */ if ( unlikely(__copy_from_guest(&gop, uop, 1)) ) { - gdprintk(XENLOG_INFO, "error reading req %d/%u\n", - i, count); + gdprintk(XENLOG_INFO, "error reading req %d/%u\n", i, count); return -EFAULT; } @@ -2134,7 +2097,7 @@ gnttab_transfer( if ( rc ) { gdprintk(XENLOG_INFO, - "can't remove GFN %"PRI_xen_pfn" (MFN %#"PRI_mfn")\n", + "can't remove GFN %" PRI_xen_pfn " (MFN %#" PRI_mfn ")\n", gop.mfn, mfn_x(mfn)); gop.status = GNTST_general_error; goto put_gfn_and_copyback; @@ -2164,14 +2127,15 @@ gnttab_transfer( max_bitsize = domain_clamp_alloc_bitsize( e, e->grant_table->gt_version > 1 || paging_mode_translate(e) - ? BITS_PER_LONG + PAGE_SHIFT : 32 + PAGE_SHIFT); + ? BITS_PER_LONG + PAGE_SHIFT + : 32 + PAGE_SHIFT); if ( max_bitsize < BITS_PER_LONG + PAGE_SHIFT && (mfn_x(mfn) >> (max_bitsize - PAGE_SHIFT)) ) { struct page_info *new_page; - new_page = alloc_domheap_page(e, MEMF_no_owner | - MEMF_bits(max_bitsize)); + new_page = + alloc_domheap_page(e, MEMF_no_owner | MEMF_bits(max_bitsize)); if ( new_page == NULL ) { gop.status = GNTST_address_too_big; @@ -2193,8 +2157,7 @@ gnttab_transfer( * headroom. Also, a domain mustn't have PGC_allocated * pages when it is dying. */ - if ( unlikely(e->is_dying) || - unlikely(e->tot_pages >= e->max_pages) ) + if ( unlikely(e->is_dying) || unlikely(e->tot_pages >= e->max_pages) ) { spin_unlock(&e->page_alloc_lock); @@ -2298,9 +2261,8 @@ gnttab_transfer( * Undo acquire_grant_for_copy(). This has no effect on page type and * reference counts. */ -static void -release_grant_for_copy( - struct domain *rd, grant_ref_t gref, bool readonly) +static void release_grant_for_copy(struct domain *rd, grant_ref_t gref, + bool readonly) { struct grant_table *rgt = rd->grant_table; grant_entry_header_t *sha; @@ -2338,7 +2300,7 @@ release_grant_for_copy( gnttab_mark_dirty(rd, mfn); act->pin -= GNTPIN_hstw_inc; - if ( !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) ) + if ( !(act->pin & (GNTPIN_devw_mask | GNTPIN_hstw_mask)) ) gnttab_clear_flag(_GTF_writing, status); } @@ -2381,11 +2343,10 @@ static void fixup_status_for_copy_pin(const struct active_grant_entry *act, * take one ref count on the target page, stored in *page. * If there is any error, *page = NULL, no ref taken. */ -static int -acquire_grant_for_copy( - struct domain *rd, grant_ref_t gref, domid_t ldom, bool readonly, - mfn_t *mfn, struct page_info **page, uint16_t *page_off, - uint16_t *length, bool allow_transitive) +static int acquire_grant_for_copy(struct domain *rd, grant_ref_t gref, + domid_t ldom, bool readonly, mfn_t *mfn, + struct page_info **page, uint16_t *page_off, + uint16_t *length, bool allow_transitive) { struct grant_table *rgt = rd->grant_table; grant_entry_v2_t *sha2; @@ -2407,8 +2368,8 @@ acquire_grant_for_copy( grant_read_lock(rgt); if ( unlikely(gref >= nr_grant_entries(rgt)) ) - PIN_FAIL(gt_unlock_out, GNTST_bad_gntref, - "Bad grant reference %#x\n", gref); + PIN_FAIL(gt_unlock_out, GNTST_bad_gntref, "Bad grant reference %#x\n", + gref); act = active_entry_acquire(rgt, gref); shah = shared_entry_header(rgt, gref); @@ -2432,10 +2393,10 @@ acquire_grant_for_copy( old_pin = act->pin; if ( sha2 && (shah->flags & GTF_type_mask) == GTF_transitive ) { - if ( (!old_pin || (!readonly && - !(old_pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)))) && - (rc = _set_status_v2(ldom, readonly, 0, shah, act, - status)) != GNTST_okay ) + if ( (!old_pin || (!readonly && !(old_pin & (GNTPIN_devw_mask | + GNTPIN_hstw_mask)))) && + (rc = _set_status_v2(ldom, readonly, 0, shah, act, status)) != + GNTST_okay ) goto unlock_out; if ( !allow_transitive ) @@ -2473,10 +2434,9 @@ acquire_grant_for_copy( active_entry_release(act); grant_read_unlock(rgt); - rc = acquire_grant_for_copy(td, trans_gref, rd->domain_id, - readonly, &grant_mfn, page, - &trans_page_off, &trans_length, - false); + rc = acquire_grant_for_copy(td, trans_gref, rd->domain_id, readonly, + &grant_mfn, page, &trans_page_off, + &trans_length, false); grant_read_lock(rgt); act = active_entry_acquire(rgt, gref); @@ -2495,15 +2455,12 @@ acquire_grant_for_copy( * change, and that nobody else tried to pin/unpin it. If anything * changed, just give up and tell the caller to retry. */ - if ( rgt->gt_version != 2 || - act->pin != old_pin || - (old_pin && (act->domid != ldom || - !mfn_eq(act->mfn, grant_mfn) || - act->start != trans_page_off || - act->length != trans_length || - act->trans_domain != td || - act->trans_gref != trans_gref || - !act->is_sub_page)) ) + if ( rgt->gt_version != 2 || act->pin != old_pin || + (old_pin && + (act->domid != ldom || !mfn_eq(act->mfn, grant_mfn) || + act->start != trans_page_off || act->length != trans_length || + act->trans_domain != td || act->trans_gref != trans_gref || + !act->is_sub_page)) ) { release_grant_for_copy(td, trans_gref, readonly); fixup_status_for_copy_pin(act, status); @@ -2532,13 +2489,12 @@ acquire_grant_for_copy( act->is_sub_page = true; } } - else if ( !old_pin || - (!readonly && !(old_pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask))) ) + else if ( !old_pin || (!readonly && + !(old_pin & (GNTPIN_devw_mask | GNTPIN_hstw_mask))) ) { - if ( (rc = _set_status(rgt->gt_version, ldom, - readonly, 0, shah, act, - status) ) != GNTST_okay ) - goto unlock_out; + if ( (rc = _set_status(rgt->gt_version, ldom, readonly, 0, shah, act, + status)) != GNTST_okay ) + goto unlock_out; td = rd; trans_gref = gref; @@ -2621,24 +2577,24 @@ acquire_grant_for_copy( grant_read_unlock(rgt); return rc; - unlock_out_clear: - if ( !(readonly) && - !(act->pin & (GNTPIN_hstw_mask | GNTPIN_devw_mask)) ) +unlock_out_clear: + if ( !(readonly) && !(act->pin & (GNTPIN_hstw_mask | GNTPIN_devw_mask)) ) gnttab_clear_flag(_GTF_writing, status); if ( !act->pin ) gnttab_clear_flag(_GTF_reading, status); - unlock_out: +unlock_out: active_entry_release(act); - gt_unlock_out: +gt_unlock_out: grant_read_unlock(rgt); return rc; } -struct gnttab_copy_buf { +struct gnttab_copy_buf +{ /* Guest provided. */ struct gnttab_copy_ptr ptr; uint16_t len; @@ -2695,8 +2651,8 @@ static int gnttab_copy_lock_domains(const struct gnttab_copy *op, op->flags & GNTCOPY_source_gref, src); if ( rc < 0 ) goto error; - rc = gnttab_copy_lock_domain(op->dest.domid, - op->flags & GNTCOPY_dest_gref, dest); + rc = gnttab_copy_lock_domain(op->dest.domid, op->flags & GNTCOPY_dest_gref, + dest); if ( rc < 0 ) goto error; @@ -2708,7 +2664,7 @@ static int gnttab_copy_lock_domains(const struct gnttab_copy *op, } return 0; - error: +error: gnttab_copy_unlock_domains(src, dest); return rc; } @@ -2749,11 +2705,9 @@ static int gnttab_copy_claim_buf(const struct gnttab_copy *op, if ( op->flags & gref_flag ) { rc = acquire_grant_for_copy(buf->domain, ptr->u.ref, - current->domain->domain_id, - buf->read_only, - &buf->mfn, &buf->page, - &buf->ptr.offset, &buf->len, - opt_transitive_grants); + current->domain->domain_id, buf->read_only, + &buf->mfn, &buf->page, &buf->ptr.offset, + &buf->len, opt_transitive_grants); if ( rc != GNTST_okay ) goto out; buf->ptr.u.ref = ptr->u.ref; @@ -2761,11 +2715,11 @@ static int gnttab_copy_claim_buf(const struct gnttab_copy *op, } else { - rc = get_paged_frame(ptr->u.gmfn, &buf->mfn, &buf->page, - buf->read_only, buf->domain); + rc = get_paged_frame(ptr->u.gmfn, &buf->mfn, &buf->page, buf->read_only, + buf->domain); if ( rc != GNTST_okay ) - PIN_FAIL(out, rc, - "source frame %"PRI_xen_pfn" invalid\n", ptr->u.gmfn); + PIN_FAIL(out, rc, "source frame %" PRI_xen_pfn " invalid\n", + ptr->u.gmfn); buf->ptr.u.gmfn = ptr->u.gmfn; buf->ptr.offset = 0; @@ -2778,7 +2732,7 @@ static int gnttab_copy_claim_buf(const struct gnttab_copy *op, { if ( !buf->domain->is_dying ) gdprintk(XENLOG_WARNING, - "Could not get writable frame %#"PRI_mfn"\n", + "Could not get writable frame %#" PRI_mfn "\n", mfn_x(buf->mfn)); rc = GNTST_general_error; goto out; @@ -2789,7 +2743,7 @@ static int gnttab_copy_claim_buf(const struct gnttab_copy *op, buf->virt = map_domain_page(buf->mfn); rc = GNTST_okay; - out: +out: return rc; } @@ -2818,21 +2772,19 @@ static int gnttab_copy_buf(const struct gnttab_copy *op, op->source.offset + op->len > src->ptr.offset + src->len ) PIN_FAIL(out, GNTST_general_error, "copy source out of bounds: %d < %d || %d > %d\n", - op->source.offset, src->ptr.offset, - op->len, src->len); + op->source.offset, src->ptr.offset, op->len, src->len); if ( op->dest.offset < dest->ptr.offset || op->dest.offset + op->len > dest->ptr.offset + dest->len ) PIN_FAIL(out, GNTST_general_error, "copy dest out of bounds: %d < %d || %d > %d\n", - op->dest.offset, dest->ptr.offset, - op->len, dest->len); + op->dest.offset, dest->ptr.offset, op->len, dest->len); memcpy(dest->virt + op->dest.offset, src->virt + op->source.offset, op->len); gnttab_mark_dirty(dest->domain, dest->mfn); rc = GNTST_okay; - out: +out: return rc; } @@ -2842,8 +2794,8 @@ static int gnttab_copy_one(const struct gnttab_copy *op, { int rc; - if ( !src->domain || op->source.domid != src->ptr.domid || - !dest->domain || op->dest.domid != dest->ptr.domid ) + if ( !src->domain || op->source.domid != src->ptr.domid || !dest->domain || + op->dest.domid != dest->ptr.domid ) { gnttab_copy_release_buf(src); gnttab_copy_release_buf(dest); @@ -2875,7 +2827,7 @@ static int gnttab_copy_one(const struct gnttab_copy *op, } rc = gnttab_copy_buf(op, dest, src); - out: +out: return rc; } @@ -2887,8 +2839,8 @@ static int gnttab_copy_one(const struct gnttab_copy *op, * positive value) a non-zero value is being handed back (zero needs * to be avoided, as that means "success, all done"). */ -static long gnttab_copy( - XEN_GUEST_HANDLE_PARAM(gnttab_copy_t) uop, unsigned int count) +static long gnttab_copy(XEN_GUEST_HANDLE_PARAM(gnttab_copy_t) uop, + unsigned int count) { unsigned int i; struct gnttab_copy op; @@ -2939,8 +2891,7 @@ static long gnttab_copy( return rc; } -static long -gnttab_set_version(XEN_GUEST_HANDLE_PARAM(gnttab_set_version_t) uop) +static long gnttab_set_version(XEN_GUEST_HANDLE_PARAM(gnttab_set_version_t) uop) { gnttab_set_version_t op; struct domain *currd = current->domain; @@ -2976,33 +2927,35 @@ gnttab_set_version(XEN_GUEST_HANDLE_PARAM(gnttab_set_version_t) uop) if ( read_atomic(&_active_entry(gt, i).pin) != 0 ) { gdprintk(XENLOG_WARNING, - "tried to change grant table version from %u to %u, but some grant entries still in use\n", + "tried to change grant table version from %u to %u, but " + "some grant entries still in use\n", gt->gt_version, op.version); res = -EBUSY; goto out_unlock; } } - switch ( gt->gt_version ) + switch (gt->gt_version) { case 1: /* XXX: We could maybe shrink the active grant table here. */ res = gnttab_populate_status_frames(currd, gt, nr_grant_frames(gt)); - if ( res < 0) + if ( res < 0 ) goto out_unlock; break; case 2: for ( i = 0; i < GNTTAB_NR_RESERVED_ENTRIES; i++ ) { - switch ( shared_entry_v2(gt, i).hdr.flags & GTF_type_mask ) + switch (shared_entry_v2(gt, i).hdr.flags & GTF_type_mask) { case GTF_permit_access: - if ( !(shared_entry_v2(gt, i).full_page.frame >> 32) ) - break; - /* fall through */ + if ( !(shared_entry_v2(gt, i).full_page.frame >> 32) ) + break; + /* fall through */ case GTF_transitive: gdprintk(XENLOG_WARNING, - "tried to change grant table version to 1 with non-representable entries\n"); + "tried to change grant table version to 1 with " + "non-representable entries\n"); res = -ERANGE; goto out_unlock; } @@ -3011,7 +2964,7 @@ gnttab_set_version(XEN_GUEST_HANDLE_PARAM(gnttab_set_version_t) uop) } /* Preserve the first 8 entries (toolstack reserved grants). */ - switch ( gt->gt_version ) + switch (gt->gt_version) { case 1: memcpy(reserved_entries, &shared_entry_v1(gt, 0), @@ -3022,12 +2975,13 @@ gnttab_set_version(XEN_GUEST_HANDLE_PARAM(gnttab_set_version_t) uop) { unsigned int flags = shared_entry_v2(gt, i).hdr.flags; - switch ( flags & GTF_type_mask ) + switch (flags & GTF_type_mask) { case GTF_permit_access: reserved_entries[i].flags = flags | status_entry(gt, i); reserved_entries[i].domid = shared_entry_v2(gt, i).hdr.domid; - reserved_entries[i].frame = shared_entry_v2(gt, i).full_page.frame; + reserved_entries[i].frame = + shared_entry_v2(gt, i).full_page.frame; break; default: gdprintk(XENLOG_INFO, @@ -3053,10 +3007,11 @@ gnttab_set_version(XEN_GUEST_HANDLE_PARAM(gnttab_set_version_t) uop) /* Restore the first 8 entries (toolstack reserved grants). */ if ( gt->gt_version ) { - switch ( op.version ) + switch (op.version) { case 1: - memcpy(&shared_entry_v1(gt, 0), reserved_entries, sizeof(reserved_entries)); + memcpy(&shared_entry_v1(gt, 0), reserved_entries, + sizeof(reserved_entries)); break; case 2: for ( i = 0; i < GNTTAB_NR_RESERVED_ENTRIES; i++ ) @@ -3065,8 +3020,7 @@ gnttab_set_version(XEN_GUEST_HANDLE_PARAM(gnttab_set_version_t) uop) reserved_entries[i].flags & (GTF_reading | GTF_writing); shared_entry_v2(gt, i).hdr.flags = reserved_entries[i].flags & ~(GTF_reading | GTF_writing); - shared_entry_v2(gt, i).hdr.domid = - reserved_entries[i].domid; + shared_entry_v2(gt, i).hdr.domid = reserved_entries[i].domid; shared_entry_v2(gt, i).full_page.frame = reserved_entries[i].frame; } @@ -3076,10 +3030,10 @@ gnttab_set_version(XEN_GUEST_HANDLE_PARAM(gnttab_set_version_t) uop) gt->gt_version = op.version; - out_unlock: +out_unlock: grant_write_unlock(gt); - out: +out: op.version = gt->gt_version; if ( __copy_to_guest(uop, &op, 1) ) @@ -3095,7 +3049,7 @@ gnttab_get_status_frames(XEN_GUEST_HANDLE_PARAM(gnttab_get_status_frames_t) uop, gnttab_get_status_frames_t op; struct domain *d; struct grant_table *gt; - uint64_t gmfn; + uint64_t gmfn; int i; int rc; @@ -3130,7 +3084,8 @@ gnttab_get_status_frames(XEN_GUEST_HANDLE_PARAM(gnttab_get_status_frames_t) uop, if ( unlikely(op.nr_frames > nr_status_frames(gt)) ) { - gdprintk(XENLOG_INFO, "Requested addresses of d%d for %u grant " + gdprintk(XENLOG_INFO, + "Requested addresses of d%d for %u grant " "status frames, but has only %u\n", d->domain_id, op.nr_frames, nr_status_frames(gt)); op.status = GNTST_general_error; @@ -3154,19 +3109,18 @@ gnttab_get_status_frames(XEN_GUEST_HANDLE_PARAM(gnttab_get_status_frames_t) uop, op.status = GNTST_bad_virt_addr; } - unlock: +unlock: grant_read_unlock(gt); - out2: +out2: rcu_unlock_domain(d); - out1: +out1: if ( unlikely(__copy_field_to_guest(uop, &op, status)) ) return -EFAULT; return 0; } -static long -gnttab_get_version(XEN_GUEST_HANDLE_PARAM(gnttab_get_version_t) uop) +static long gnttab_get_version(XEN_GUEST_HANDLE_PARAM(gnttab_get_version_t) uop) { gnttab_get_version_t op; struct domain *d; @@ -3196,8 +3150,7 @@ gnttab_get_version(XEN_GUEST_HANDLE_PARAM(gnttab_get_version_t) uop) return 0; } -static s16 -swap_grant_ref(grant_ref_t ref_a, grant_ref_t ref_b) +static s16 swap_grant_ref(grant_ref_t ref_a, grant_ref_t ref_b) { struct domain *d = rcu_lock_current_domain(); struct grant_table *gt = d->grant_table; @@ -3208,9 +3161,9 @@ swap_grant_ref(grant_ref_t ref_a, grant_ref_t ref_b) grant_write_lock(gt); /* Bounds check on the grant refs */ - if ( unlikely(ref_a >= nr_grant_entries(d->grant_table))) + if ( unlikely(ref_a >= nr_grant_entries(d->grant_table)) ) PIN_FAIL(out, GNTST_bad_gntref, "Bad ref-a %#x\n", ref_a); - if ( unlikely(ref_b >= nr_grant_entries(d->grant_table))) + if ( unlikely(ref_b >= nr_grant_entries(d->grant_table)) ) PIN_FAIL(out, GNTST_bad_gntref, "Bad ref-b %#x\n", ref_b); /* Swapping the same ref is a no-op. */ @@ -3290,8 +3243,7 @@ static int cache_flush(const gnttab_cache_flush_t *cflush, grant_ref_t *cur_ref) void *v; int ret; - if ( (cflush->offset >= PAGE_SIZE) || - (cflush->length > PAGE_SIZE) || + if ( (cflush->offset >= PAGE_SIZE) || (cflush->length > PAGE_SIZE) || (cflush->offset + cflush->length > PAGE_SIZE) || (cflush->op & ~(GNTTAB_CACHE_INVAL | GNTTAB_CACHE_CLEAN)) ) return -EINVAL; @@ -3337,7 +3289,8 @@ static int cache_flush(const gnttab_cache_flush_t *cflush, grant_ref_t *cur_ref) v = map_domain_page(mfn); v += cflush->offset; - if ( (cflush->op & GNTTAB_CACHE_INVAL) && (cflush->op & GNTTAB_CACHE_CLEAN) ) + if ( (cflush->op & GNTTAB_CACHE_INVAL) && + (cflush->op & GNTTAB_CACHE_CLEAN) ) ret = clean_and_invalidate_dcache_va_range(v, cflush->length); else if ( cflush->op & GNTTAB_CACHE_INVAL ) ret = invalidate_dcache_va_range(v, cflush->length); @@ -3359,10 +3312,8 @@ static int cache_flush(const gnttab_cache_flush_t *cflush, grant_ref_t *cur_ref) return ret; } -static long -gnttab_cache_flush(XEN_GUEST_HANDLE_PARAM(gnttab_cache_flush_t) uop, - grant_ref_t *cur_ref, - unsigned int count) +static long gnttab_cache_flush(XEN_GUEST_HANDLE_PARAM(gnttab_cache_flush_t) uop, + grant_ref_t *cur_ref, unsigned int count) { unsigned int i; gnttab_cache_flush_t op; @@ -3373,7 +3324,7 @@ gnttab_cache_flush(XEN_GUEST_HANDLE_PARAM(gnttab_cache_flush_t) uop, return i; if ( unlikely(__copy_from_guest(&op, uop, 1)) ) return -EFAULT; - for ( ; ; ) + for ( ;; ) { int ret = cache_flush(&op, cur_ref); @@ -3393,9 +3344,8 @@ gnttab_cache_flush(XEN_GUEST_HANDLE_PARAM(gnttab_cache_flush_t) uop, return 0; } -long -do_grant_table_op( - unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) uop, unsigned int count) +long do_grant_table_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) uop, + unsigned int count) { long rc; unsigned int opaque_in = cmd & GNTTABOP_ARG_MASK, opaque_out = 0; @@ -3407,12 +3357,12 @@ do_grant_table_op( return -EINVAL; rc = -EFAULT; - switch ( cmd ) + switch (cmd) { case GNTTABOP_map_grant_ref: { - XEN_GUEST_HANDLE_PARAM(gnttab_map_grant_ref_t) map = - guest_handle_cast(uop, gnttab_map_grant_ref_t); + XEN_GUEST_HANDLE_PARAM(gnttab_map_grant_ref_t) + map = guest_handle_cast(uop, gnttab_map_grant_ref_t); if ( unlikely(!guest_handle_okay(map, count)) ) goto out; @@ -3427,8 +3377,8 @@ do_grant_table_op( case GNTTABOP_unmap_grant_ref: { - XEN_GUEST_HANDLE_PARAM(gnttab_unmap_grant_ref_t) unmap = - guest_handle_cast(uop, gnttab_unmap_grant_ref_t); + XEN_GUEST_HANDLE_PARAM(gnttab_unmap_grant_ref_t) + unmap = guest_handle_cast(uop, gnttab_unmap_grant_ref_t); if ( unlikely(!guest_handle_okay(unmap, count)) ) goto out; @@ -3443,8 +3393,8 @@ do_grant_table_op( case GNTTABOP_unmap_and_replace: { - XEN_GUEST_HANDLE_PARAM(gnttab_unmap_and_replace_t) unmap = - guest_handle_cast(uop, gnttab_unmap_and_replace_t); + XEN_GUEST_HANDLE_PARAM(gnttab_unmap_and_replace_t) + unmap = guest_handle_cast(uop, gnttab_unmap_and_replace_t); if ( unlikely(!guest_handle_okay(unmap, count)) ) goto out; @@ -3458,15 +3408,15 @@ do_grant_table_op( } case GNTTABOP_setup_table: - rc = gnttab_setup_table( - guest_handle_cast(uop, gnttab_setup_table_t), count, UINT_MAX); + rc = gnttab_setup_table(guest_handle_cast(uop, gnttab_setup_table_t), + count, UINT_MAX); ASSERT(rc <= 0); break; case GNTTABOP_transfer: { - XEN_GUEST_HANDLE_PARAM(gnttab_transfer_t) transfer = - guest_handle_cast(uop, gnttab_transfer_t); + XEN_GUEST_HANDLE_PARAM(gnttab_transfer_t) + transfer = guest_handle_cast(uop, gnttab_transfer_t); if ( unlikely(!guest_handle_okay(transfer, count)) ) goto out; @@ -3481,8 +3431,8 @@ do_grant_table_op( case GNTTABOP_copy: { - XEN_GUEST_HANDLE_PARAM(gnttab_copy_t) copy = - guest_handle_cast(uop, gnttab_copy_t); + XEN_GUEST_HANDLE_PARAM(gnttab_copy_t) + copy = guest_handle_cast(uop, gnttab_copy_t); if ( unlikely(!guest_handle_okay(copy, count)) ) goto out; @@ -3497,8 +3447,8 @@ do_grant_table_op( } case GNTTABOP_query_size: - rc = gnttab_query_size( - guest_handle_cast(uop, gnttab_query_size_t), count); + rc = gnttab_query_size(guest_handle_cast(uop, gnttab_query_size_t), + count); ASSERT(rc <= 0); break; @@ -3509,7 +3459,7 @@ do_grant_table_op( case GNTTABOP_get_status_frames: rc = gnttab_get_status_frames( guest_handle_cast(uop, gnttab_get_status_frames_t), count, - UINT_MAX); + UINT_MAX); break; case GNTTABOP_get_version: @@ -3518,8 +3468,8 @@ do_grant_table_op( case GNTTABOP_swap_grant_ref: { - XEN_GUEST_HANDLE_PARAM(gnttab_swap_grant_ref_t) swap = - guest_handle_cast(uop, gnttab_swap_grant_ref_t); + XEN_GUEST_HANDLE_PARAM(gnttab_swap_grant_ref_t) + swap = guest_handle_cast(uop, gnttab_swap_grant_ref_t); if ( unlikely(!guest_handle_okay(swap, count)) ) goto out; @@ -3534,8 +3484,8 @@ do_grant_table_op( case GNTTABOP_cache_flush: { - XEN_GUEST_HANDLE_PARAM(gnttab_cache_flush_t) cflush = - guest_handle_cast(uop, gnttab_cache_flush_t); + XEN_GUEST_HANDLE_PARAM(gnttab_cache_flush_t) + cflush = guest_handle_cast(uop, gnttab_cache_flush_t); if ( unlikely(!guest_handle_okay(cflush, count)) ) goto out; @@ -3554,7 +3504,7 @@ do_grant_table_op( break; } - out: +out: if ( rc > 0 || opaque_out != 0 ) { ASSERT(rc < count); @@ -3570,26 +3520,24 @@ do_grant_table_op( #include "compat/grant_table.c" #endif -void -gnttab_release_mappings( - struct domain *d) +void gnttab_release_mappings(struct domain *d) { - struct grant_table *gt = d->grant_table, *rgt; + struct grant_table *gt = d->grant_table, *rgt; struct grant_mapping *map; - grant_ref_t ref; - grant_handle_t handle; - struct domain *rd; + grant_ref_t ref; + grant_handle_t handle; + struct domain *rd; struct active_grant_entry *act; grant_entry_header_t *sha; - uint16_t *status; - struct page_info *pg; + uint16_t *status; + struct page_info *pg; BUG_ON(!d->is_dying); for ( handle = 0; handle < gt->maptrack_limit; handle++ ) { map = &maptrack_entry(gt, handle); - if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) ) + if ( !(map->flags & (GNTMAP_device_map | GNTMAP_host_map)) ) continue; ref = map->ref; @@ -3653,15 +3601,14 @@ gnttab_release_mappings( if ( gnttab_release_host_mappings(d) && !is_iomem_page(act->mfn) ) { - if ( gnttab_host_mapping_get_page_type((map->flags & - GNTMAP_readonly), - d, rd) ) + if ( gnttab_host_mapping_get_page_type( + (map->flags & GNTMAP_readonly), d, rd) ) put_page_type(pg); put_page(pg); } } - if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 ) + if ( (act->pin & (GNTPIN_devw_mask | GNTPIN_hstw_mask)) == 0 ) gnttab_clear_flag(_GTF_writing, status); } @@ -3701,9 +3648,9 @@ void grant_table_warn_active_grants(struct domain *d) if ( nr_active <= WARN_GRANT_MAX ) printk(XENLOG_G_DEBUG "d%d has active grant %x (" #ifndef NDEBUG - "GFN %lx, " + "GFN %lx, " #endif - "MFN: %#"PRI_mfn")\n", + "MFN: %#" PRI_mfn ")\n", d->domain_id, ref, #ifndef NDEBUG gfn_x(act->gfn), @@ -3721,9 +3668,7 @@ void grant_table_warn_active_grants(struct domain *d) #undef WARN_GRANT_MAX } -void -grant_table_destroy( - struct domain *d) +void grant_table_destroy(struct domain *d) { struct grant_table *t = d->grant_table; int i; @@ -3761,8 +3706,8 @@ void grant_table_init_vcpu(struct vcpu *v) } #ifdef CONFIG_HAS_MEM_SHARING -int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref, - gfn_t *gfn, uint16_t *status) +int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref, gfn_t *gfn, + uint16_t *status) { int rc = 0; uint16_t flags = 0; @@ -3786,9 +3731,9 @@ int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref, flags = sha2->hdr.flags; if ( flags & GTF_sub_page ) - *gfn = _gfn(sha2->sub_page.frame); + *gfn = _gfn(sha2->sub_page.frame); else - *gfn = _gfn(sha2->full_page.frame); + *gfn = _gfn(sha2->full_page.frame); } if ( !rc && (flags & GTF_type_mask) != GTF_permit_access ) @@ -3808,8 +3753,8 @@ int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref, #endif /* caller must hold write lock */ -static int gnttab_get_status_frame_mfn(struct domain *d, - unsigned long idx, mfn_t *mfn) +static int gnttab_get_status_frame_mfn(struct domain *d, unsigned long idx, + mfn_t *mfn) { const struct grant_table *gt = d->grant_table; @@ -3843,8 +3788,8 @@ static int gnttab_get_status_frame_mfn(struct domain *d, } /* caller must hold write lock */ -static int gnttab_get_shared_frame_mfn(struct domain *d, - unsigned long idx, mfn_t *mfn) +static int gnttab_get_shared_frame_mfn(struct domain *d, unsigned long idx, + mfn_t *mfn) { const struct grant_table *gt = d->grant_table; @@ -3905,8 +3850,7 @@ int gnttab_map_frame(struct domain *d, unsigned long idx, gfn_t gfn, mfn_t *mfn) return rc; } -int gnttab_get_shared_frame(struct domain *d, unsigned long idx, - mfn_t *mfn) +int gnttab_get_shared_frame(struct domain *d, unsigned long idx, mfn_t *mfn) { struct grant_table *gt = d->grant_table; int rc; @@ -3918,15 +3862,14 @@ int gnttab_get_shared_frame(struct domain *d, unsigned long idx, return rc; } -int gnttab_get_status_frame(struct domain *d, unsigned long idx, - mfn_t *mfn) +int gnttab_get_status_frame(struct domain *d, unsigned long idx, mfn_t *mfn) { struct grant_table *gt = d->grant_table; int rc; grant_write_lock(gt); - rc = (gt->gt_version == 2) ? - gnttab_get_status_frame_mfn(d, idx, mfn) : -EINVAL; + rc = (gt->gt_version == 2) ? gnttab_get_status_frame_mfn(d, idx, mfn) + : -EINVAL; grant_write_unlock(gt); return rc; @@ -3945,9 +3888,9 @@ static void gnttab_usage_print(struct domain *rd) printk("grant-table for remote d%d (v%u)\n" " %u frames (%u max), %u maptrack frames (%u max)\n", - rd->domain_id, gt->gt_version, - nr_grant_frames(gt), gt->max_grant_frames, - nr_maptrack_frames(gt), gt->max_maptrack_frames); + rd->domain_id, gt->gt_version, nr_grant_frames(gt), + gt->max_grant_frames, nr_maptrack_frames(gt), + gt->max_maptrack_frames); for ( ref = 0; ref != nr_grant_entries(gt); ref++ ) { @@ -3979,9 +3922,10 @@ static void gnttab_usage_print(struct domain *rd) first = 0; /* [0xXXX] ddddd 0xXXXXX 0xXXXXXXXX ddddd 0xXXXXXX 0xXX */ - printk("[0x%03x] %5d 0x%"PRI_mfn" 0x%08x %5d 0x%06"PRIx64" 0x%02x\n", - ref, act->domid, mfn_x(act->mfn), act->pin, - sha->domid, frame, status); + printk("[0x%03x] %5d 0x%" PRI_mfn " 0x%08x %5d 0x%06" PRIx64 + " 0x%02x\n", + ref, act->domid, mfn_x(act->mfn), act->pin, sha->domid, frame, + status); active_entry_release(act); } @@ -3995,15 +3939,15 @@ static void gnttab_usage_print_all(unsigned char key) { struct domain *d; printk("%s [ key '%c' pressed\n", __func__, key); - for_each_domain ( d ) + for_each_domain (d) gnttab_usage_print(d); printk("%s ] done\n", __func__); } static int __init gnttab_usage_init(void) { - register_keyhandler('g', gnttab_usage_print_all, - "print grant table usage", 1); + register_keyhandler('g', gnttab_usage_print_all, "print grant table usage", + 1); return 0; } __initcall(gnttab_usage_init); diff --git a/xen/common/guestcopy.c b/xen/common/guestcopy.c index 6d38eefedd..aa06cce87b 100644 --- a/xen/common/guestcopy.c +++ b/xen/common/guestcopy.c @@ -6,8 +6,8 @@ * The function copies a string from the guest and adds a NUL to * make sure the string is correctly terminated. */ -char *safe_copy_string_from_guest(XEN_GUEST_HANDLE(char) u_buf, - size_t size, size_t max_size) +char *safe_copy_string_from_guest(XEN_GUEST_HANDLE(char) u_buf, size_t size, + size_t max_size) { char *tmp; diff --git a/xen/common/gunzip.c b/xen/common/gunzip.c index db4efcd34b..38e9db4ce1 100644 --- a/xen/common/gunzip.c +++ b/xen/common/gunzip.c @@ -10,7 +10,7 @@ static unsigned char *__initdata window; static memptr __initdata free_mem_ptr; static memptr __initdata free_mem_end_ptr; -#define WSIZE 0x80000000 +#define WSIZE 0x80000000 static unsigned char *__initdata inbuf; static unsigned __initdata insize; @@ -21,35 +21,58 @@ static unsigned __initdata inptr; /* Bytes in output buffer: */ static unsigned __initdata outcnt; -#define OF(args) args -#define STATIC static +#define OF(args) args +#define STATIC static -#define memzero(s, n) memset((s), 0, (n)) +#define memzero(s, n) memset((s), 0, (n)) -typedef unsigned char uch; -typedef unsigned short ush; -typedef unsigned long ulg; +typedef unsigned char uch; +typedef unsigned short ush; +typedef unsigned long ulg; -#define INIT __init -#define INITDATA __initdata +#define INIT __init +#define INITDATA __initdata -#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) +#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) /* Diagnostic functions */ #ifdef DEBUG -# define Assert(cond, msg) do { if (!(cond)) error(msg); } while (0) -# define Trace(x) do { fprintf x; } while (0) -# define Tracev(x) do { if (verbose) fprintf x ; } while (0) -# define Tracevv(x) do { if (verbose > 1) fprintf x ; } while (0) -# define Tracec(c, x) do { if (verbose && (c)) fprintf x ; } while (0) -# define Tracecv(c, x) do { if (verbose > 1 && (c)) fprintf x ; } while (0) +#define Assert(cond, msg) \ + do { \ + if ( !(cond) ) \ + error(msg); \ + } while ( 0 ) +#define Trace(x) \ + do { \ + fprintf x; \ + } while ( 0 ) +#define Tracev(x) \ + do { \ + if ( verbose ) \ + fprintf x; \ + } while ( 0 ) +#define Tracevv(x) \ + do { \ + if ( verbose > 1 ) \ + fprintf x; \ + } while ( 0 ) +#define Tracec(c, x) \ + do { \ + if ( verbose && (c) ) \ + fprintf x; \ + } while ( 0 ) +#define Tracecv(c, x) \ + do { \ + if ( verbose > 1 && (c) ) \ + fprintf x; \ + } while ( 0 ) #else -# define Assert(cond, msg) -# define Trace(x) -# define Tracev(x) -# define Tracevv(x) -# define Tracec(c, x) -# define Tracecv(c, x) +#define Assert(cond, msg) +#define Trace(x) +#define Tracev(x) +#define Tracevv(x) +#define Tracec(c, x) +#define Tracecv(c, x) #endif static long __initdata bytes_out; @@ -62,11 +85,10 @@ static __init void error(char *x) static __init int fill_inbuf(void) { - error("ran out of input data"); - return 0; + error("ran out of input data"); + return 0; } - #include "inflate.c" static __init void flush_window(void) diff --git a/xen/common/inflate.c b/xen/common/inflate.c index f99c985d61..66246dc260 100644 --- a/xen/common/inflate.c +++ b/xen/common/inflate.c @@ -3,9 +3,9 @@ /* inflate.c -- Not copyrighted 1992 by Mark Adler version c10p1, 10 January 1993 */ -/* +/* * Adapted for booting Linux by Hannu Savolainen 1993 - * based on gzip-1.0.3 + * based on gzip-1.0.3 * * Nicolas Pitre , 1999/04/14 : * Little mods for all variable to reside either into rodata or bss segments @@ -50,14 +50,13 @@ chunks), otherwise the dynamic method is used. In the latter case, the codes are customized to the probabilities in the current block, and so can code it much better than the pre-determined fixed codes. - + The Huffman codes themselves are decoded using a multi-level table lookup, in order to maximize the speed of decoding plus the speed of building the decoding tables. See the comments below that precede the lbits and dbits tuning parameters. */ - /* Notes beyond the 1.93a appnote.txt: @@ -110,8 +109,8 @@ static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; #ifndef STATIC #if defined(STDC_HEADERS) || defined(HAVE_STDLIB_H) -# include -# include +#include +#include #endif #include "gzip.h" @@ -122,7 +121,7 @@ static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; #define INIT #define INITDATA #endif - + #define slide window /* Huffman code lookup table entry--this entry is four bytes for machines @@ -132,19 +131,19 @@ static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; the next table, which codes e - 16 bits, and lastly e == 99 indicates an unused code. If a code with e == 99 is looked up, this implies an error in the data. */ -struct huft { - uch e; /* number of extra bits or operation */ - uch b; /* number of bits in this code or subcode */ +struct huft +{ + uch e; /* number of extra bits or operation */ + uch b; /* number of bits in this code or subcode */ union { - ush n; /* literal, length base, or distance base */ - struct huft *t; /* pointer to next level of table */ + ush n; /* literal, length base, or distance base */ + struct huft *t; /* pointer to next level of table */ } v; }; - /* Function prototypes */ -STATIC int INIT huft_build OF((unsigned *, unsigned, unsigned, - const ush *, const ush *, struct huft **, int *)); +STATIC int INIT huft_build OF((unsigned *, unsigned, unsigned, const ush *, + const ush *, struct huft **, int *)); STATIC int INIT huft_free OF((struct huft *)); STATIC int INIT inflate_codes OF((struct huft *, struct huft *, int, int)); STATIC int INIT inflate_stored OF((void)); @@ -153,7 +152,6 @@ STATIC int INIT inflate_dynamic OF((void)); STATIC int INIT inflate_block OF((int *)); STATIC int INIT inflate OF((void)); - /* The inflate algorithm uses a sliding 32 K byte window on the uncompressed stream to find repeated byte strings. This is implemented here as a circular buffer. The index is updated simply by incrementing and then @@ -164,32 +162,34 @@ STATIC int INIT inflate OF((void)); must be in unzip.h, included above. */ /* unsigned wp; current position in slide */ #define wp outcnt -#define flush_output(w) (wp=(w),flush_window()) +#define flush_output(w) (wp = (w), flush_window()) /* Tables for deflate from PKZIP's appnote.txt. */ -static const unsigned border[] = { /* Order of the bit length code lengths */ - 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; -static const ush cplens[] = { /* Copy lengths for literal codes 257..285 */ - 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, - 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; +static const unsigned border[] = {/* Order of the bit length code lengths */ + 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, + 11, 4, 12, 3, 13, 2, 14, 1, 15}; +static const ush cplens[] = {/* Copy lengths for literal codes 257..285 */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, + 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, + 99, 115, 131, 163, 195, 227, 258, 0, 0}; /* note: see note #13 above about the 258 in this list. */ -static const ush cplext[] = { /* Extra bits for literal codes 257..285 */ - 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, - 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */ -static const ush cpdist[] = { /* Copy offsets for distance codes 0..29 */ - 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, - 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, - 8193, 12289, 16385, 24577}; -static const ush cpdext[] = { /* Extra bits for distance codes */ - 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, - 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, - 12, 12, 13, 13}; - - +static const ush cplext[] = + {/* Extra bits for literal codes 257..285 */ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */ +static const ush cpdist[] = {/* Copy offsets for distance codes 0..29 */ + 1, 2, 3, 4, 5, 7, 9, 13, + 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, + 4097, 6145, 8193, 12289, 16385, 24577}; +static const ush cpdext[] = {/* Extra bits for distance codes */ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; /* Macros for inflate() bit peeking and grabbing. The usage is: - + NEEDBITS(j) x = b & mask_bits[j]; DUMPBITS(j) @@ -217,18 +217,33 @@ static const ush cpdext[] = { /* Extra bits for distance codes */ the stream. */ -STATIC ulg INITDATA bb; /* bit buffer */ -STATIC unsigned INITDATA bk; /* bits in bit buffer */ - -STATIC const ush mask_bits[] = { - 0x0000, - 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, - 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff -}; - -#define NEXTBYTE() ({ int v = get_byte(); if (v < 0) goto underrun; (uch)v; }) -#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<>=(n);k-=(n);} +STATIC ulg INITDATA bb; /* bit buffer */ +STATIC unsigned INITDATA bk; /* bits in bit buffer */ + +STATIC const ush mask_bits[] = {0x0000, 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, + 0x003f, 0x007f, 0x00ff, 0x01ff, 0x03ff, 0x07ff, + 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff}; + +#define NEXTBYTE() \ + ({ \ + int v = get_byte(); \ + if ( v < 0 ) \ + goto underrun; \ + (uch) v; \ + }) +#define NEEDBITS(n) \ + { \ + while ( k < (n) ) \ + { \ + b |= ((ulg)NEXTBYTE()) << k; \ + k += 8; \ + } \ + } +#define DUMPBITS(n) \ + { \ + b >>= (n); \ + k -= (n); \ + } #ifndef NO_INFLATE_MALLOC /* A trivial malloc implementation, adapted from @@ -242,17 +257,17 @@ static void *INIT malloc(int size) { void *p; - if (size < 0) + if ( size < 0 ) error("Malloc error"); - if (!malloc_ptr) + if ( !malloc_ptr ) malloc_ptr = free_mem_ptr; - malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */ + malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */ p = (void *)malloc_ptr; malloc_ptr += size; - if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) + if ( free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr ) error("Out of memory"); malloc_count++; @@ -262,7 +277,7 @@ static void *INIT malloc(int size) static void INIT free(void *where) { malloc_count--; - if (!malloc_count) + if ( !malloc_count ) malloc_ptr = free_mem_ptr; } #else @@ -302,55 +317,52 @@ static void INIT free(void *where) possibly even between compilers. Your mileage may vary. */ - -STATIC const int lbits = 9; /* bits in base literal/length lookup table */ -STATIC const int dbits = 6; /* bits in base distance lookup table */ - +STATIC const int lbits = 9; /* bits in base literal/length lookup table */ +STATIC const int dbits = 6; /* bits in base distance lookup table */ /* If BMAX needs to be larger than 16, then h and x[] should be ulg. */ -#define BMAX 16 /* maximum bit length of any code (16 for explode) */ -#define N_MAX 288 /* maximum number of codes in any set */ - - -STATIC unsigned INITDATA hufts; /* track memory usage */ +#define BMAX 16 /* maximum bit length of any code (16 for explode) */ +#define N_MAX 288 /* maximum number of codes in any set */ +STATIC unsigned INITDATA hufts; /* track memory usage */ STATIC int INIT huft_build( - unsigned *b, /* code lengths in bits (all assumed <= BMAX) */ - unsigned n, /* number of codes (assumed <= N_MAX) */ - unsigned s, /* number of simple-valued codes (0..s-1) */ - const ush *d, /* list of base values for non-simple codes */ - const ush *e, /* list of extra bits for non-simple codes */ - struct huft **t, /* result: starting table */ - int *m /* maximum lookup bits, returns actual */ - ) + unsigned *b, /* code lengths in bits (all assumed <= BMAX) */ + unsigned n, /* number of codes (assumed <= N_MAX) */ + unsigned s, /* number of simple-valued codes (0..s-1) */ + const ush *d, /* list of base values for non-simple codes */ + const ush *e, /* list of extra bits for non-simple codes */ + struct huft **t, /* result: starting table */ + int *m /* maximum lookup bits, returns actual */ +) /* Given a list of code lengths and a maximum table size, make a set of tables to decode that set of codes. Return zero on success, one if the given code set is incomplete (the tables are still built in this case), two if the input is invalid (all zero length codes or an oversubscribed set of lengths), and three if not enough memory. */ { - unsigned a; /* counter for codes of length k */ - unsigned f; /* i repeats in table every f entries */ - int g; /* maximum code length */ - int h; /* table level */ - register unsigned i; /* counter, current code */ - register unsigned j; /* counter */ - register int k; /* number of bits in current code */ - int l; /* bits per table (returned in m) */ - register unsigned *p; /* pointer into c[], b[], or v[] */ - register struct huft *q; /* points to current table */ - struct huft r; /* table entry for structure assignment */ - register int w; /* bits before this table == (l * h) */ - unsigned *xp; /* pointer into x */ - int y; /* number of dummy codes added */ - unsigned z; /* number of entries in current table */ - struct { - unsigned c[BMAX+1]; /* bit length count table */ - struct huft *u[BMAX]; /* table stack */ - unsigned v[N_MAX]; /* values in order of bit length */ - unsigned x[BMAX+1]; /* bit offsets, then code stack */ - } *stk; + unsigned a; /* counter for codes of length k */ + unsigned f; /* i repeats in table every f entries */ + int g; /* maximum code length */ + int h; /* table level */ + register unsigned i; /* counter, current code */ + register unsigned j; /* counter */ + register int k; /* number of bits in current code */ + int l; /* bits per table (returned in m) */ + register unsigned *p; /* pointer into c[], b[], or v[] */ + register struct huft *q; /* points to current table */ + struct huft r; /* table entry for structure assignment */ + register int w; /* bits before this table == (l * h) */ + unsigned *xp; /* pointer into x */ + int y; /* number of dummy codes added */ + unsigned z; /* number of entries in current table */ + struct + { + unsigned c[BMAX + 1]; /* bit length count table */ + struct huft *u[BMAX]; /* table stack */ + unsigned v[N_MAX]; /* values in order of bit length */ + unsigned x[BMAX + 1]; /* bit offsets, then code stack */ + } * stk; unsigned *c, *v, *x; struct huft **u; int ret; @@ -358,8 +370,8 @@ STATIC int INIT huft_build( DEBG("huft1 "); stk = malloc(sizeof(*stk)); - if (stk == NULL) - return 3; /* out of memory */ + if ( stk == NULL ) + return 3; /* out of memory */ c = stk->c; v = stk->v; @@ -368,14 +380,16 @@ STATIC int INIT huft_build( /* Generate counts for each bit length */ memzero(stk->c, sizeof(stk->c)); - p = b; i = n; + p = b; + i = n; do { - Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"), - n-i, *p)); - c[*p]++; /* assume all entries <= BMAX */ - p++; /* Can't combine with above line (Solaris bug) */ - } while (--i); - if (c[0] == n) /* null input--all zero length codes */ + Tracecv(*p, (stderr, + (n - i >= ' ' && n - i <= '~' ? "%c %d\n" : "0x%x %d\n"), + n - i, *p)); + c[*p]++; /* assume all entries <= BMAX */ + p++; /* Can't combine with above line (Solaris bug) */ + } while ( --i ); + if ( c[0] == n ) /* null input--all zero length codes */ { *t = (struct huft *)NULL; *m = 0; @@ -387,29 +401,31 @@ STATIC int INIT huft_build( /* Find minimum and maximum length, bound *m by those */ l = *m; - for (j = 1; j <= BMAX; j++) - if (c[j]) + for ( j = 1; j <= BMAX; j++ ) + if ( c[j] ) break; - k = j; /* minimum code length */ - if ((unsigned)l < j) + k = j; /* minimum code length */ + if ( (unsigned)l < j ) l = j; - for (i = BMAX; i; i--) - if (c[i]) + for ( i = BMAX; i; i-- ) + if ( c[i] ) break; - g = i; /* maximum code length */ - if ((unsigned)l > i) + g = i; /* maximum code length */ + if ( (unsigned)l > i ) l = i; *m = l; DEBG("huft3 "); /* Adjust last length count to fill out codes, if needed */ - for (y = 1 << j; j < i; j++, y <<= 1) - if ((y -= c[j]) < 0) { - ret = 2; /* bad input: more codes than bits */ + for ( y = 1 << j; j < i; j++, y <<= 1 ) + if ( (y -= c[j]) < 0 ) + { + ret = 2; /* bad input: more codes than bits */ goto out; } - if ((y -= c[i]) < 0) { + if ( (y -= c[i]) < 0 ) + { ret = 2; goto out; } @@ -419,92 +435,98 @@ STATIC int INIT huft_build( /* Generate starting offsets into the value table for each length */ x[1] = j = 0; - p = c + 1; xp = x + 2; - while (--i) { /* note that i == g from above */ + p = c + 1; + xp = x + 2; + while ( --i ) + { /* note that i == g from above */ *xp++ = (j += *p++); } DEBG("huft5 "); /* Make a table of values in order of bit lengths */ - p = b; i = 0; + p = b; + i = 0; do { - if ((j = *p++) != 0) + if ( (j = *p++) != 0 ) v[x[j]++] = i; - } while (++i < n); - n = x[g]; /* set n to length of v */ + } while ( ++i < n ); + n = x[g]; /* set n to length of v */ DEBG("h6 "); /* Generate the Huffman codes and for each, make the table entries */ - x[0] = i = 0; /* first Huffman code is zero */ - p = v; /* grab values in bit order */ - h = -1; /* no tables yet--level -1 */ - w = -l; /* bits decoded == (l * h) */ - u[0] = (struct huft *)NULL; /* just to keep compilers happy */ - q = (struct huft *)NULL; /* ditto */ - z = 0; /* ditto */ + x[0] = i = 0; /* first Huffman code is zero */ + p = v; /* grab values in bit order */ + h = -1; /* no tables yet--level -1 */ + w = -l; /* bits decoded == (l * h) */ + u[0] = (struct huft *)NULL; /* just to keep compilers happy */ + q = (struct huft *)NULL; /* ditto */ + z = 0; /* ditto */ DEBG("h6a "); /* go through the bit lengths (k already is bits in shortest code) */ - for (; k <= g; k++) + for ( ; k <= g; k++ ) { DEBG("h6b "); a = c[k]; - while (a--) + while ( a-- ) { DEBG("h6b1 "); /* here i is the Huffman code of length k bits for value *p */ /* make tables up to required level */ - while (k > w + l) + while ( k > w + l ) { DEBG1("1 "); h++; - w += l; /* previous table always l bits */ + w += l; /* previous table always l bits */ /* compute minimum size table less than or equal to l bits */ - z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */ - if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ - { /* too few codes for k-w bit table */ + z = (z = g - w) > (unsigned)l + ? l + : z; /* upper limit on table size */ + if ( (f = 1 << (j = k - w)) > a + 1 ) /* try a k-w bit table */ + { /* too few codes for k-w bit table */ DEBG1("2 "); - f -= a + 1; /* deduct codes from patterns left */ + f -= a + 1; /* deduct codes from patterns left */ xp = c + k; - if (j < z) - while (++j < z) /* try smaller tables up to z bits */ + if ( j < z ) + while ( ++j < z ) /* try smaller tables up to z bits */ { - if ((f <<= 1) <= *++xp) - break; /* enough codes to use up j bits */ - f -= *xp; /* else deduct codes from patterns */ + if ( (f <<= 1) <= *++xp ) + break; /* enough codes to use up j bits */ + f -= *xp; /* else deduct codes from patterns */ } } DEBG1("3 "); - z = 1 << j; /* table entries for j-bit table */ + z = 1 << j; /* table entries for j-bit table */ /* allocate and link in new table */ - if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) == - (struct huft *)NULL) + if ( (q = (struct huft *)malloc((z + 1) * + sizeof(struct huft))) == + (struct huft *)NULL ) { - if (h) + if ( h ) huft_free(u[0]); - ret = 3; /* not enough memory */ + ret = 3; /* not enough memory */ goto out; } DEBG1("4 "); - hufts += z + 1; /* track memory usage */ - *t = q + 1; /* link to list for huft_free() */ + hufts += z + 1; /* track memory usage */ + *t = q + 1; /* link to list for huft_free() */ *(t = &(q->v.t)) = (struct huft *)NULL; - u[h] = ++q; /* table starts after link */ + u[h] = ++q; /* table starts after link */ DEBG1("5 "); /* connect to last table, if there is one */ - if (h) + if ( h ) { - x[h] = i; /* save pattern for backing up */ - r.b = (uch)l; /* bits to dump before this table */ - r.e = (uch)(16 + j); /* bits in this table */ - r.v.t = q; /* pointer to this table */ - j = i >> (w - l); /* (get around Turbo C bug) */ - u[h-1][j] = r; /* connect to last table */ + x[h] = i; /* save pattern for backing up */ + r.b = (uch)l; /* bits to dump before this table */ + r.e = (uch)(16 + j); /* bits in this table */ + r.v.t = q; /* pointer to this table */ + j = i >> (w - l); /* (get around Turbo C bug) */ + u[h - 1][j] = r; /* connect to last table */ } DEBG1("6 "); } @@ -512,35 +534,35 @@ STATIC int INIT huft_build( /* set up table entry in r */ r.b = (uch)(k - w); - if (p >= v + n) - r.e = 99; /* out of values--invalid code */ - else if (*p < s) + if ( p >= v + n ) + r.e = 99; /* out of values--invalid code */ + else if ( *p < s ) { - r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */ - r.v.n = (ush)(*p); /* simple code is just the value */ - p++; /* one compiler does not like *p++ */ + r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */ + r.v.n = (ush)(*p); /* simple code is just the value */ + p++; /* one compiler does not like *p++ */ } else { - r.e = (uch)e[*p - s]; /* non-simple--look up in lists */ + r.e = (uch)e[*p - s]; /* non-simple--look up in lists */ r.v.n = d[*p++ - s]; } DEBG("h6d "); /* fill code-like entries with r */ f = 1 << (k - w); - for (j = i >> w; j < z; j += f) + for ( j = i >> w; j < z; j += f ) q[j] = r; /* backwards increment the k-bit code i */ - for (j = 1 << (k - 1); i & j; j >>= 1) + for ( j = 1 << (k - 1); i & j; j >>= 1 ) i ^= j; i ^= j; /* backup over finished tables */ - while ((i & ((1 << w) - 1)) != x[h]) + while ( (i & ((1 << w) - 1)) != x[h] ) { - h--; /* don't need to update q */ + h--; /* don't need to update q */ w -= l; } DEBG("h6e "); @@ -553,208 +575,197 @@ STATIC int INIT huft_build( /* Return true (1) if we were given an incomplete table */ ret = y != 0 && g != 1; - out: +out: free(stk); return ret; } - - -STATIC int INIT huft_free( - struct huft *t /* table to free */ - ) +STATIC int INIT huft_free(struct huft *t /* table to free */ +) /* Free the malloc'ed tables built by huft_build(), which makes a linked list of the tables it made, with the links in a dummy first entry of each table. */ { register struct huft *p, *q; - /* Go through linked list, freeing from the malloced (t[-1]) address. */ p = t; - while (p != (struct huft *)NULL) + while ( p != (struct huft *)NULL ) { q = (--p)->v.t; - free((char*)p); + free((char *)p); p = q; - } + } return 0; } - STATIC int INIT inflate_codes( - struct huft *tl, /* literal/length decoder tables */ - struct huft *td, /* distance decoder tables */ - int bl, /* number of bits decoded by tl[] */ - int bd /* number of bits decoded by td[] */ - ) + struct huft *tl, /* literal/length decoder tables */ + struct huft *td, /* distance decoder tables */ + int bl, /* number of bits decoded by tl[] */ + int bd /* number of bits decoded by td[] */ +) /* inflate (decompress) the codes in a deflated (compressed) block. Return an error code or zero if it all goes ok. */ { - register unsigned e; /* table entry flag/number of extra bits */ - unsigned n, d; /* length and index for copy */ - unsigned w; /* current window position */ - struct huft *t; /* pointer to table entry */ - unsigned ml, md; /* masks for bl and bd bits */ - register ulg b; /* bit buffer */ - register unsigned k; /* number of bits in bit buffer */ - + register unsigned e; /* table entry flag/number of extra bits */ + unsigned n, d; /* length and index for copy */ + unsigned w; /* current window position */ + struct huft *t; /* pointer to table entry */ + unsigned ml, md; /* masks for bl and bd bits */ + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ /* make local copies of globals */ - b = bb; /* initialize bit buffer */ + b = bb; /* initialize bit buffer */ k = bk; - w = wp; /* initialize window position */ + w = wp; /* initialize window position */ /* inflate the coded data */ - ml = mask_bits[bl]; /* precompute masks for speed */ + ml = mask_bits[bl]; /* precompute masks for speed */ md = mask_bits[bd]; - for (;;) /* do until end of block */ + for ( ;; ) /* do until end of block */ { NEEDBITS((unsigned)bl) - if ((e = (t = tl + ((unsigned)b & ml))->e) > 16) + if ( (e = (t = tl + ((unsigned)b & ml))->e) > 16 ) + do { + if ( e == 99 ) + return 1; + DUMPBITS(t->b) + e -= 16; + NEEDBITS(e) + } while ( (e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > + 16 ); + DUMPBITS(t->b) + if ( e == 16 ) /* then it's a literal */ + { + slide[w++] = (uch)t->v.n; + Tracevv((stderr, "%c", slide[w - 1])); + if ( w == WSIZE ) + { + flush_output(w); + w = 0; + } + } + else /* it's an EOB or a length */ + { + /* exit if end of block */ + if ( e == 15 ) + break; + + /* get length of block to copy */ + NEEDBITS(e) + n = t->v.n + ((unsigned)b & mask_bits[e]); + DUMPBITS(e); + + /* decode distance of block to copy */ + NEEDBITS((unsigned)bd) + if ( (e = (t = td + ((unsigned)b & md))->e) > 16 ) do { - if (e == 99) + if ( e == 99 ) return 1; DUMPBITS(t->b) - e -= 16; + e -= 16; NEEDBITS(e) - } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); - DUMPBITS(t->b) - if (e == 16) /* then it's a literal */ - { - slide[w++] = (uch)t->v.n; - Tracevv((stderr, "%c", slide[w-1])); - if (w == WSIZE) + } while ( (e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > + 16 ); + DUMPBITS(t->b) + NEEDBITS(e) + d = w - t->v.n - ((unsigned)b & mask_bits[e]); + DUMPBITS(e) + Tracevv((stderr, "\\[%d,%d]", w - d, n)); + + /* do the copy */ + do { + n -= (e = (e = WSIZE - ((d &= WSIZE - 1) > w ? d : w)) > n ? n + : e); +#if !defined(NOMEMCPY) && !defined(DEBUG) + if ( w - d >= e ) /* (this test assumes unsigned comparison) */ + { + memcpy(slide + w, slide + d, e); + w += e; + d += e; + } + else /* do it slow to avoid memcpy() overlap */ +#endif /* !NOMEMCPY */ + do { + slide[w++] = slide[d++]; + Tracevv((stderr, "%c", slide[w - 1])); + } while ( --e ); + if ( w == WSIZE ) { flush_output(w); w = 0; } - } - else /* it's an EOB or a length */ - { - /* exit if end of block */ - if (e == 15) - break; - - /* get length of block to copy */ - NEEDBITS(e) - n = t->v.n + ((unsigned)b & mask_bits[e]); - DUMPBITS(e); - - /* decode distance of block to copy */ - NEEDBITS((unsigned)bd) - if ((e = (t = td + ((unsigned)b & md))->e) > 16) - do { - if (e == 99) - return 1; - DUMPBITS(t->b) - e -= 16; - NEEDBITS(e) - } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); - DUMPBITS(t->b) - NEEDBITS(e) - d = w - t->v.n - ((unsigned)b & mask_bits[e]); - DUMPBITS(e) - Tracevv((stderr,"\\[%d,%d]", w-d, n)); - - /* do the copy */ - do { - n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e); -#if !defined(NOMEMCPY) && !defined(DEBUG) - if (w - d >= e) /* (this test assumes unsigned comparison) */ - { - memcpy(slide + w, slide + d, e); - w += e; - d += e; - } - else /* do it slow to avoid memcpy() overlap */ -#endif /* !NOMEMCPY */ - do { - slide[w++] = slide[d++]; - Tracevv((stderr, "%c", slide[w-1])); - } while (--e); - if (w == WSIZE) - { - flush_output(w); - w = 0; - } - } while (n); - } + } while ( n ); + } } - /* restore the globals from the locals */ - wp = w; /* restore global window pointer */ - bb = b; /* restore global bit buffer */ + wp = w; /* restore global window pointer */ + bb = b; /* restore global bit buffer */ bk = k; /* done */ return 0; - underrun: - return 4; /* Input underrun */ +underrun: + return 4; /* Input underrun */ } - - STATIC int INIT inflate_stored(void) /* "decompress" an inflated type 0 (stored) block. */ { - unsigned n; /* number of bytes in block */ - unsigned w; /* current window position */ - register ulg b; /* bit buffer */ - register unsigned k; /* number of bits in bit buffer */ + unsigned n; /* number of bytes in block */ + unsigned w; /* current window position */ + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ DEBG(""); return 0; - underrun: - return 4; /* Input underrun */ +underrun: + return 4; /* Input underrun */ } - /* * We use `noinline' here to prevent gcc-3.5 from using too much stack space */ @@ -763,39 +774,40 @@ STATIC int noinline INIT inflate_fixed(void) either replace this with a custom decoder, or at least precompute the Huffman tables. */ { - int i; /* temporary variable */ - struct huft *tl; /* literal/length code table */ - struct huft *td; /* distance code table */ - int bl; /* lookup bits for tl */ - int bd; /* lookup bits for td */ - unsigned *l; /* length list for huft_build */ + int i; /* temporary variable */ + struct huft *tl; /* literal/length code table */ + struct huft *td; /* distance code table */ + int bl; /* lookup bits for tl */ + int bd; /* lookup bits for td */ + unsigned *l; /* length list for huft_build */ DEBG(" 1) + if ( (i = huft_build(l, 30, 0, cpdist, cpdext, &td, &bd)) > 1 ) { huft_free(tl); free(l); @@ -804,9 +816,9 @@ STATIC int noinline INIT inflate_fixed(void) return i; } - /* decompress until an end-of-block code */ - if (inflate_codes(tl, td, bl, bd)) { + if ( inflate_codes(tl, td, bl, bd) ) + { free(l); return 1; } @@ -818,87 +830,87 @@ STATIC int noinline INIT inflate_fixed(void) return 0; } - /* * We use `noinline' here to prevent gcc-3.5 from using too much stack space */ STATIC int noinline INIT inflate_dynamic(void) /* decompress an inflated type 2 (dynamic Huffman codes) block. */ { - int i; /* temporary variables */ + int i; /* temporary variables */ unsigned j; - unsigned l; /* last length */ - unsigned m; /* mask for bit lengths table */ - unsigned n; /* number of lengths to get */ - struct huft *tl; /* literal/length code table */ - struct huft *td; /* distance code table */ - int bl; /* lookup bits for tl */ - int bd; /* lookup bits for td */ - unsigned nb; /* number of bit length codes */ - unsigned nl; /* number of literal/length codes */ - unsigned nd; /* number of distance codes */ - unsigned *ll; /* literal/length and distance code lengths */ - register ulg b; /* bit buffer */ - register unsigned k; /* number of bits in bit buffer */ + unsigned l; /* last length */ + unsigned m; /* mask for bit lengths table */ + unsigned n; /* number of lengths to get */ + struct huft *tl; /* literal/length code table */ + struct huft *td; /* distance code table */ + int bl; /* lookup bits for tl */ + int bd; /* lookup bits for td */ + unsigned nb; /* number of bit length codes */ + unsigned nl; /* number of literal/length codes */ + unsigned nd; /* number of distance codes */ + unsigned *ll; /* literal/length and distance code lengths */ + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ int ret; DEBG(" 288 || nd > 32) + if ( nl > 288 || nd > 32 ) #else - if (nl > 286 || nd > 30) + if ( nl > 286 || nd > 30 ) #endif - { - ret = 1; /* bad lengths */ - goto out; - } + { + ret = 1; /* bad lengths */ + goto out; + } DEBG("dyn1 "); /* read in bit-length-code lengths */ - for (j = 0; j < nb; j++) + for ( j = 0; j < nb; j++ ) { NEEDBITS(3) - ll[border[j]] = (unsigned)b & 7; + ll[border[j]] = (unsigned)b & 7; DUMPBITS(3) - } - for (; j < 19; j++) + } + for ( ; j < 19; j++ ) ll[border[j]] = 0; DEBG("dyn2 "); /* build decoding table for trees--single level, 7 bit lookup */ bl = 7; - if ((i = huft_build(ll, 19, 19, NULL, NULL, &tl, &bl)) != 0) + if ( (i = huft_build(ll, 19, 19, NULL, NULL, &tl, &bl)) != 0 ) { - if (i == 1) + if ( i == 1 ) huft_free(tl); - ret = i; /* incomplete code set */ + ret = i; /* incomplete code set */ goto out; } @@ -908,49 +920,52 @@ STATIC int noinline INIT inflate_dynamic(void) n = nl + nd; m = mask_bits[bl]; i = l = 0; - while ((unsigned)i < n) + while ( (unsigned)i < n ) { NEEDBITS((unsigned)bl) - j = (td = tl + ((unsigned)b & m))->b; + j = (td = tl + ((unsigned)b & m))->b; DUMPBITS(j) - j = td->v.n; - if (j < 16) /* length of code in bits (0..15) */ - ll[i++] = l = j; /* save last length in l */ - else if (j == 16) /* repeat last length 3 to 6 times */ + j = td->v.n; + if ( j < 16 ) /* length of code in bits (0..15) */ + ll[i++] = l = j; /* save last length in l */ + else if ( j == 16 ) /* repeat last length 3 to 6 times */ { NEEDBITS(2) - j = 3 + ((unsigned)b & 3); + j = 3 + ((unsigned)b & 3); DUMPBITS(2) - if ((unsigned)i + j > n) { - ret = 1; - goto out; - } - while (j--) + if ( (unsigned)i + j > n ) + { + ret = 1; + goto out; + } + while ( j-- ) ll[i++] = l; } - else if (j == 17) /* 3 to 10 zero length codes */ + else if ( j == 17 ) /* 3 to 10 zero length codes */ { NEEDBITS(3) - j = 3 + ((unsigned)b & 7); + j = 3 + ((unsigned)b & 7); DUMPBITS(3) - if ((unsigned)i + j > n) { - ret = 1; - goto out; - } - while (j--) + if ( (unsigned)i + j > n ) + { + ret = 1; + goto out; + } + while ( j-- ) ll[i++] = 0; l = 0; } - else /* j == 18: 11 to 138 zero length codes */ + else /* j == 18: 11 to 138 zero length codes */ { NEEDBITS(7) - j = 11 + ((unsigned)b & 0x7f); + j = 11 + ((unsigned)b & 0x7f); DUMPBITS(7) - if ((unsigned)i + j > n) { - ret = 1; - goto out; - } - while (j--) + if ( (unsigned)i + j > n ) + { + ret = 1; + goto out; + } + while ( j-- ) ll[i++] = 0; l = 0; } @@ -971,126 +986,120 @@ STATIC int noinline INIT inflate_dynamic(void) /* build the decoding tables for literal/length and distance codes */ bl = lbits; - if ((i = huft_build(ll, nl, 257, cplens, cplext, &tl, &bl)) != 0) + if ( (i = huft_build(ll, nl, 257, cplens, cplext, &tl, &bl)) != 0 ) { DEBG("dyn5b "); - if (i == 1) { + if ( i == 1 ) + { error("incomplete literal tree"); huft_free(tl); } - ret = i; /* incomplete code set */ + ret = i; /* incomplete code set */ goto out; } DEBG("dyn5c "); bd = dbits; - if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &td, &bd)) != 0) + if ( (i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &td, &bd)) != 0 ) { DEBG("dyn5d "); - if (i == 1) { + if ( i == 1 ) + { error("incomplete distance tree"); #ifdef PKZIP_BUG_WORKAROUND i = 0; } #else - huft_free(td); - } - huft_free(tl); - ret = i; /* incomplete code set */ - goto out; + huft_free(td); + } + huft_free(tl); + ret = i; /* incomplete code set */ + goto out; #endif -} + } -DEBG("dyn6 "); + DEBG("dyn6 "); - /* decompress until an end-of-block code */ -if (inflate_codes(tl, td, bl, bd)) { - ret = 1; - goto out; -} + /* decompress until an end-of-block code */ + if ( inflate_codes(tl, td, bl, bd) ) + { + ret = 1; + goto out; + } -DEBG("dyn7 "); + DEBG("dyn7 "); - /* free the decoding tables, return */ -huft_free(tl); -huft_free(td); + /* free the decoding tables, return */ + huft_free(tl); + huft_free(td); -DEBG(">"); -ret = 0; + DEBG(">"); + ret = 0; out: -free(ll); -return ret; + free(ll); + return ret; underrun: -ret = 4; /* Input underrun */ -goto out; + ret = 4; /* Input underrun */ + goto out; } - - -STATIC int INIT inflate_block( -int *e /* last block flag */ +STATIC int INIT inflate_block(int *e /* last block flag */ ) /* decompress an inflated block */ { -unsigned t; /* block type */ -register ulg b; /* bit buffer */ -register unsigned k; /* number of bits in bit buffer */ - -DEBG(""); /* bad block type */ return 2; - underrun: - return 4; /* Input underrun */ +underrun: + return 4; /* Input underrun */ } - - STATIC int INIT inflate(void) /* decompress an inflated entry */ { - int e; /* last block flag */ - int r; /* result code */ - unsigned h; /* maximum struct huft's malloc'ed */ + int e; /* last block flag */ + int r; /* result code */ + unsigned h; /* maximum struct huft's malloc'ed */ /* initialize window, bit buffer */ wp = 0; bk = 0; bb = 0; - /* decompress until the last block */ h = 0; do { @@ -1099,16 +1108,17 @@ STATIC int INIT inflate(void) arch_decomp_wdog(); #endif r = inflate_block(&e); - if (r) + if ( r ) return r; - if (hufts > h) + if ( hufts > h ) h = hufts; - } while (!e); + } while ( !e ); /* Undo too much lookahead. The next read will be byte aligned so we * can discard unused bits in the last meaningful byte. */ - while (bk >= 8) { + while ( bk >= 8 ) + { bk -= 8; inptr--; } @@ -1116,7 +1126,6 @@ STATIC int INIT inflate(void) /* flush out slide */ flush_output(wp); - /* return success */ #ifdef DEBUG fprintf(stderr, "<%u> ", h); @@ -1131,41 +1140,40 @@ STATIC int INIT inflate(void) **********************************************************************/ static ulg INITDATA crc_32_tab[256]; -static ulg INITDATA crc; /* initialized in makecrc() so it'll reside in bss */ +static ulg INITDATA crc; /* initialized in makecrc() so it'll reside in bss */ #define CRC_VALUE (crc ^ 0xffffffffUL) /* - * Code to compute the CRC-32 table. Borrowed from + * Code to compute the CRC-32 table. Borrowed from * gzip-1.0.3/makecrc.c. */ -static void INIT -makecrc(void) +static void INIT makecrc(void) { -/* Not copyrighted 1990 Mark Adler */ + /* Not copyrighted 1990 Mark Adler */ - unsigned long c; /* crc shift register */ - unsigned long e; /* polynomial exclusive-or pattern */ - int i; /* counter for all possible eight bit values */ - int k; /* byte being shifted into crc apparatus */ + unsigned long c; /* crc shift register */ + unsigned long e; /* polynomial exclusive-or pattern */ + int i; /* counter for all possible eight bit values */ + int k; /* byte being shifted into crc apparatus */ /* terms of polynomial defining this crc (except x^32): */ - static const int p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26}; + static const int p[] = {0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26}; /* Make exclusive-or pattern from polynomial */ e = 0; - for (i = 0; i < sizeof(p)/sizeof(int); i++) + for ( i = 0; i < sizeof(p) / sizeof(int); i++ ) e |= 1L << (31 - p[i]); crc_32_tab[0] = 0; - for (i = 1; i < 256; i++) + for ( i = 1; i < 256; i++ ) { c = 0; - for (k = i | 256; k != 1; k >>= 1) + for ( k = i | 256; k != 1; k >>= 1 ) { c = c & 1 ? (c >> 1) ^ e : c >> 1; - if (k & 1) + if ( k & 1 ) c ^= e; } crc_32_tab[i] = c; @@ -1176,13 +1184,14 @@ makecrc(void) } /* gzip flag byte */ -#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ -#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ -#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ -#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ -#define COMMENT 0x10 /* bit 4 set: file comment present */ -#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ -#define RESERVED 0xC0 /* bit 6,7: reserved */ +#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ +#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file \ + */ +#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ +#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ +#define COMMENT 0x10 /* bit 4 set: file comment present */ +#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ +#define RESERVED 0xC0 /* bit 6,7: reserved */ /* * Do the uncompression! @@ -1192,36 +1201,40 @@ static int INIT gunzip(void) uch flags; unsigned char magic[2]; /* magic header */ char method; - ulg orig_crc = 0; /* original crc */ - ulg orig_len = 0; /* original uncompressed length */ + ulg orig_crc = 0; /* original crc */ + ulg orig_len = 0; /* original uncompressed length */ int res; magic[0] = NEXTBYTE(); magic[1] = NEXTBYTE(); - method = NEXTBYTE(); + method = NEXTBYTE(); - if (magic[0] != 037 || - ((magic[1] != 0213) && (magic[1] != 0236))) { + if ( magic[0] != 037 || ((magic[1] != 0213) && (magic[1] != 0236)) ) + { error("bad gzip magic numbers"); return -1; } /* We only support method #8, DEFLATED */ - if (method != 8) { + if ( method != 8 ) + { error("internal error, invalid method"); return -1; } - flags = (uch)get_byte(); - if ((flags & ENCRYPTED) != 0) { + flags = (uch)get_byte(); + if ( (flags & ENCRYPTED) != 0 ) + { error("Input is encrypted"); return -1; } - if ((flags & CONTINUATION) != 0) { + if ( (flags & CONTINUATION) != 0 ) + { error("Multi part input"); return -1; } - if ((flags & RESERVED) != 0) { + if ( (flags & RESERVED) != 0 ) + { error("Input has invalid flags"); return -1; } @@ -1230,29 +1243,37 @@ static int INIT gunzip(void) NEXTBYTE(); NEXTBYTE(); - (void)NEXTBYTE(); /* Ignore extra flags for the moment */ - (void)NEXTBYTE(); /* Ignore OS type for the moment */ + (void)NEXTBYTE(); /* Ignore extra flags for the moment */ + (void)NEXTBYTE(); /* Ignore OS type for the moment */ - if ((flags & EXTRA_FIELD) != 0) { + if ( (flags & EXTRA_FIELD) != 0 ) + { unsigned len = (unsigned)NEXTBYTE(); - len |= ((unsigned)NEXTBYTE())<<8; - while (len--) (void)NEXTBYTE(); + len |= ((unsigned)NEXTBYTE()) << 8; + while ( len-- ) + (void)NEXTBYTE(); } /* Get original file name if it was truncated */ - if ((flags & ORIG_NAME) != 0) { + if ( (flags & ORIG_NAME) != 0 ) + { /* Discard the old name */ - while (NEXTBYTE() != 0) /* null */ ; - } + while ( NEXTBYTE() != 0 ) /* null */ + ; + } /* Discard file comment if any */ - if ((flags & COMMENT) != 0) { - while (NEXTBYTE() != 0) /* null */ ; + if ( (flags & COMMENT) != 0 ) + { + while ( NEXTBYTE() != 0 ) /* null */ + ; } /* Decompress */ - if ((res = inflate())) { - switch (res) { + if ( (res = inflate()) ) + { + switch (res) + { case 0: break; case 1: @@ -1272,33 +1293,35 @@ static int INIT gunzip(void) } return -1; } - + /* Get the crc and original length */ /* crc32 (see algorithm.doc) * uncompressed input size modulo 2^32 */ - orig_crc = (ulg) NEXTBYTE(); - orig_crc |= (ulg) NEXTBYTE() << 8; - orig_crc |= (ulg) NEXTBYTE() << 16; - orig_crc |= (ulg) NEXTBYTE() << 24; - - orig_len = (ulg) NEXTBYTE(); - orig_len |= (ulg) NEXTBYTE() << 8; - orig_len |= (ulg) NEXTBYTE() << 16; - orig_len |= (ulg) NEXTBYTE() << 24; - + orig_crc = (ulg)NEXTBYTE(); + orig_crc |= (ulg)NEXTBYTE() << 8; + orig_crc |= (ulg)NEXTBYTE() << 16; + orig_crc |= (ulg)NEXTBYTE() << 24; + + orig_len = (ulg)NEXTBYTE(); + orig_len |= (ulg)NEXTBYTE() << 8; + orig_len |= (ulg)NEXTBYTE() << 16; + orig_len |= (ulg)NEXTBYTE() << 24; + /* Validate decompression */ - if (orig_crc != CRC_VALUE) { + if ( orig_crc != CRC_VALUE ) + { error("crc error"); return -1; } - if (orig_len != bytes_out) { + if ( orig_len != bytes_out ) + { error("length error"); return -1; } return 0; - underrun: /* NEXTBYTE() goto's here if needed */ +underrun: /* NEXTBYTE() goto's here if needed */ error("out of input data"); return -1; } diff --git a/xen/common/irq.c b/xen/common/irq.c index f42512db33..a55e4ba580 100644 --- a/xen/common/irq.c +++ b/xen/common/irq.c @@ -5,7 +5,7 @@ int init_one_irq_desc(struct irq_desc *desc) { int err; - if (irq_desc_initialized(desc)) + if ( irq_desc_initialized(desc) ) return 0; if ( !alloc_cpumask_var(&desc->affinity) ) diff --git a/xen/common/kernel.c b/xen/common/kernel.c index 612575430f..f0bb3ac416 100644 --- a/xen/common/kernel.c +++ b/xen/common/kernel.c @@ -1,6 +1,6 @@ /****************************************************************************** * kernel.c - * + * * Copyright (c) 2002-2005 K A Fraser */ @@ -25,7 +25,7 @@ static const char __initconst opt_builtin_cmdline[] = CONFIG_CMDLINE; static int assign_integer_param(const struct kernel_param *param, uint64_t val) { - switch ( param->len ) + switch (param->len) { case sizeof(uint8_t): if ( val > UINT8_MAX && val < (uint64_t)INT8_MIN ) @@ -61,7 +61,7 @@ static int parse_params(const char *cmdline, const struct kernel_param *start, int rc, final_rc = 0; bool bool_assert, found; - for ( ; ; ) + for ( ;; ) { /* Skip whitespace. */ while ( *p == ' ' ) @@ -73,7 +73,7 @@ static int parse_params(const char *cmdline, const struct kernel_param *start, q = optkey = opt; while ( (*p != ' ') && (*p != '\0') ) { - if ( (q-opt) < (sizeof(opt)-1) ) /* avoid overflow */ + if ( (q - opt) < (sizeof(opt) - 1) ) /* avoid overflow */ *q++ = *p; p++; } @@ -88,7 +88,7 @@ static int parse_params(const char *cmdline, const struct kernel_param *start, } else { - optval = q; /* default option value is empty string */ + optval = q; /* default option value is empty string */ q = NULL; } @@ -123,15 +123,14 @@ static int parse_params(const char *cmdline, const struct kernel_param *start, rctmp = 0; found = true; - switch ( param->type ) + switch (param->type) { case OPT_STR: strlcpy(param->par.var, optval, param->len); break; case OPT_UINT: - rctmp = assign_integer_param( - param, - simple_strtoll(optval, &s, 0)); + rctmp = + assign_integer_param(param, simple_strtoll(optval, &s, 0)); if ( *s ) rctmp = -EINVAL; break; @@ -145,9 +144,8 @@ static int parse_params(const char *cmdline, const struct kernel_param *start, assign_integer_param(param, bool_assert); break; case OPT_SIZE: - rctmp = assign_integer_param( - param, - parse_size_and_unit(optval, &s)); + rctmp = assign_integer_param(param, + parse_size_and_unit(optval, &s)); if ( *s ) rctmp = -EINVAL; break; @@ -175,8 +173,8 @@ static int parse_params(const char *cmdline, const struct kernel_param *start, if ( rc ) { - printk("parameter \"%s\" has invalid value \"%s\", rc=%d!\n", - key, optval, rc); + printk("parameter \"%s\" has invalid value \"%s\", rc=%d!\n", key, + optval, rc); final_rc = rc; } if ( !found ) @@ -223,9 +221,13 @@ void __init cmdline_parse(const char *cmdline) int parse_bool(const char *s, const char *e) { - size_t len = e ? ({ ASSERT(e >= s); e - s; }) : strlen(s); + size_t len = e ? ({ + ASSERT(e >= s); + e - s; + }) + : strlen(s); - switch ( len ) + switch (len) { case 1: if ( *s == '1' ) @@ -280,7 +282,11 @@ int parse_boolean(const char *name, const char *s, const char *e) if ( !val ) s += 3; - slen = e ? ({ ASSERT(e >= s); e - s; }) : strlen(s); + slen = e ? ({ + ASSERT(e >= s); + e - s; + }) + : strlen(s); nlen = strlen(name); /* Does s now start with name? */ @@ -301,7 +307,7 @@ int parse_boolean(const char *name, const char *s, const char *e) int cmdline_strcmp(const char *frag, const char *name) { - for ( ; ; frag++, name++ ) + for ( ;; frag++, name++ ) { unsigned char f = *frag, n = *name; int res = f - n; @@ -372,7 +378,7 @@ void __init do_initcalls(void) (*call)(); } -# define DO(fn) long do_##fn +#define DO(fn) long do_##fn #endif @@ -384,7 +390,7 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { bool_t deny = !!xsm_xen_version(XSM_OTHER, cmd); - switch ( cmd ) + switch (cmd) { case XENVER_version: return (xen_major_version() << 16) | xen_minor_version(); @@ -405,10 +411,11 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) xen_compile_info_t info; memset(&info, 0, sizeof(info)); - safe_strcpy(info.compiler, deny ? xen_deny() : xen_compiler()); - safe_strcpy(info.compile_by, deny ? xen_deny() : xen_compile_by()); - safe_strcpy(info.compile_domain, deny ? xen_deny() : xen_compile_domain()); - safe_strcpy(info.compile_date, deny ? xen_deny() : xen_compile_date()); + safe_strcpy(info.compiler, deny ? xen_deny() : xen_compiler()); + safe_strcpy(info.compile_by, deny ? xen_deny() : xen_compile_by()); + safe_strcpy(info.compile_domain, + deny ? xen_deny() : xen_compile_domain()); + safe_strcpy(info.compile_date, deny ? xen_deny() : xen_compile_date()); if ( copy_to_guest(arg, &info, 1) ) return -EFAULT; return 0; @@ -426,19 +433,17 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) return -EFAULT; return 0; } - + case XENVER_platform_parameters: { - xen_platform_parameters_t params = { - .virt_start = HYPERVISOR_VIRT_START - }; + xen_platform_parameters_t params = {.virt_start = + HYPERVISOR_VIRT_START}; if ( copy_to_guest(arg, ¶ms, 1) ) return -EFAULT; return 0; - } - + case XENVER_changeset: { xen_changeset_info_t chgset; @@ -458,23 +463,22 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) if ( copy_from_guest(&fi, arg, 1) ) return -EFAULT; - switch ( fi.submap_idx ) + switch (fi.submap_idx) { case 0: fi.submap = (1U << XENFEAT_memory_op_vnode_supported); if ( VM_ASSIST(d, pae_extended_cr3) ) fi.submap |= (1U << XENFEAT_pae_pgdir_above_4gb); if ( paging_mode_translate(d) ) - fi.submap |= - (1U << XENFEAT_writable_page_tables) | - (1U << XENFEAT_auto_translated_physmap); + fi.submap |= (1U << XENFEAT_writable_page_tables) | + (1U << XENFEAT_auto_translated_physmap); if ( is_hardware_domain(d) ) fi.submap |= 1U << XENFEAT_dom0; #ifdef CONFIG_ARM fi.submap |= (1U << XENFEAT_ARM_SMCCC_supported); #endif #ifdef CONFIG_X86 - switch ( d->guest_type ) + switch (d->guest_type) { case guest_type_pv: fi.submap |= (1U << XENFEAT_mmu_pt_update_preserve_ad) | @@ -513,7 +517,7 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) BUILD_BUG_ON(ARRAY_SIZE(current->domain->handle) != ARRAY_SIZE(hdl)); if ( copy_to_guest(arg, deny ? hdl : current->domain->handle, - ARRAY_SIZE(hdl) ) ) + ARRAY_SIZE(hdl)) ) return -EFAULT; return 0; } diff --git a/xen/common/kexec.c b/xen/common/kexec.c index c14cbb2b9c..14db8a3f06 100644 --- a/xen/common/kexec.c +++ b/xen/common/kexec.c @@ -36,8 +36,12 @@ bool_t kexecing = FALSE; /* Memory regions to store the per cpu register state etc. on a crash. */ -typedef struct { Elf_Note * start; size_t size; } crash_note_range_t; -static crash_note_range_t * crash_notes; +typedef struct +{ + Elf_Note *start; + size_t size; +} crash_note_range_t; +static crash_note_range_t *crash_notes; /* Lock to prevent race conditions when allocating the crash note buffers. * It also serves to protect calls to alloc_from_crash_heap when allocating @@ -50,19 +54,21 @@ static cpumask_t crash_saved_cpus; static struct kexec_image *kexec_image[KEXEC_IMAGE_NR]; -#define KEXEC_FLAG_DEFAULT_POS (KEXEC_IMAGE_NR + 0) -#define KEXEC_FLAG_CRASH_POS (KEXEC_IMAGE_NR + 1) -#define KEXEC_FLAG_IN_PROGRESS (KEXEC_IMAGE_NR + 2) -#define KEXEC_FLAG_IN_HYPERCALL (KEXEC_IMAGE_NR + 3) +#define KEXEC_FLAG_DEFAULT_POS (KEXEC_IMAGE_NR + 0) +#define KEXEC_FLAG_CRASH_POS (KEXEC_IMAGE_NR + 1) +#define KEXEC_FLAG_IN_PROGRESS (KEXEC_IMAGE_NR + 2) +#define KEXEC_FLAG_IN_HYPERCALL (KEXEC_IMAGE_NR + 3) -static unsigned long kexec_flags = 0; /* the lowest bits are for KEXEC_IMAGE... */ +static unsigned long kexec_flags = + 0; /* the lowest bits are for KEXEC_IMAGE... */ static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; static size_t vmcoreinfo_size = 0; xen_kexec_reserve_t kexec_crash_area; paddr_t __initdata kexec_crash_area_limit = ~(paddr_t)0; -static struct { +static struct +{ u64 start, end; unsigned long size; } ranges[16] __initdata; @@ -107,7 +113,7 @@ static int __init parse_crashkernel(const char *str) const char *cur; int rc = 0; - if ( strchr(str, ':' ) ) + if ( strchr(str, ':') ) { unsigned int idx = 0; @@ -201,19 +207,19 @@ custom_param("crashkernel", parse_crashkernel); */ static int __init parse_low_crashinfo(const char *str) { - if ( !strlen(str) ) /* default to min if user just specifies "low_crashinfo" */ low_crashinfo_mode = LOW_CRASHINFO_MIN; - else if ( !strcmp(str, "none" ) ) + else if ( !strcmp(str, "none") ) low_crashinfo_mode = LOW_CRASHINFO_NONE; - else if ( !strcmp(str, "min" ) ) + else if ( !strcmp(str, "min") ) low_crashinfo_mode = LOW_CRASHINFO_MIN; - else if ( !strcmp(str, "all" ) ) + else if ( !strcmp(str, "all") ) low_crashinfo_mode = LOW_CRASHINFO_ALL; else { - printk("Unknown low_crashinfo parameter '%s'. Defaulting to min.\n", str); + printk("Unknown low_crashinfo parameter '%s'. Defaulting to min.\n", + str); low_crashinfo_mode = LOW_CRASHINFO_MIN; return -EINVAL; } @@ -241,7 +247,8 @@ static int __init parse_crashinfo_maxaddr(const char *str) crashinfo_maxaddr = addr; else { - printk("Unable to parse crashinfo_maxaddr. Defaulting to %"PRIpaddr"\n", + printk("Unable to parse crashinfo_maxaddr. Defaulting to %" PRIpaddr + "\n", crashinfo_maxaddr); return -EINVAL; } @@ -297,7 +304,7 @@ static int noinline one_cpu_only(void) * Another cpu has beaten us to this point. Wait here patiently for * it to kill us. */ - for ( ; ; ) + for ( ;; ) halt(); } @@ -315,7 +322,7 @@ void kexec_crash_save_cpu(void) ELF_Prstatus *prstatus; crash_xen_core_t *xencore; - BUG_ON ( ! crash_notes ); + BUG_ON(!crash_notes); if ( cpumask_test_and_set_cpu(cpu, &crash_saved_cpus) ) return; @@ -422,22 +429,19 @@ static void setup_note(Elf_Note *n, const char *name, int type, int descsz) static size_t sizeof_note(const char *name, int descsz) { - return (sizeof(Elf_Note) + - ELFNOTE_ALIGN(strlen(name)+1) + + return (sizeof(Elf_Note) + ELFNOTE_ALIGN(strlen(name) + 1) + ELFNOTE_ALIGN(descsz)); } static size_t sizeof_cpu_notes(const unsigned long cpu) { /* All CPUs present a PRSTATUS and crash_xen_core note. */ - size_t bytes = - + sizeof_note("CORE", sizeof(ELF_Prstatus)) + - + sizeof_note("Xen", sizeof(crash_xen_core_t)); + size_t bytes = +sizeof_note("CORE", sizeof(ELF_Prstatus)) + + +sizeof_note("Xen", sizeof(crash_xen_core_t)); /* CPU0 also presents the crash_xen_info note. */ - if ( ! cpu ) - bytes = bytes + - sizeof_note("Xen", sizeof(crash_xen_info_t)); + if ( !cpu ) + bytes = bytes + sizeof_note("Xen", sizeof(crash_xen_info_t)); return bytes; } @@ -446,12 +450,12 @@ static size_t sizeof_cpu_notes(const unsigned long cpu) * crash heap if the user has requested that crash notes be allocated * in lower memory. There is currently no case where the crash notes * should be free()'d. */ -static void * alloc_from_crash_heap(const size_t bytes) +static void *alloc_from_crash_heap(const size_t bytes) { - void * ret; + void *ret; if ( crash_heap_current + bytes > crash_heap_end ) return NULL; - ret = (void*)crash_heap_current; + ret = (void *)crash_heap_current; crash_heap_current += bytes; return ret; } @@ -459,11 +463,11 @@ static void * alloc_from_crash_heap(const size_t bytes) /* Allocate a crash note buffer for a newly onlined cpu. */ static int kexec_init_cpu_notes(const unsigned long cpu) { - Elf_Note * note = NULL; + Elf_Note *note = NULL; int ret = 0; int nr_bytes = 0; - BUG_ON( cpu >= nr_cpu_ids || ! crash_notes ); + BUG_ON(cpu >= nr_cpu_ids || !crash_notes); /* If already allocated, nothing to do. */ if ( crash_notes[cpu].start ) @@ -501,7 +505,7 @@ static int kexec_init_cpu_notes(const unsigned long cpu) /* If the allocation failed, and another CPU did not beat us, give * up with ENOMEM. */ - if ( ! note ) + if ( !note ) ret = -ENOMEM; /* else all is good so lets set up the notes. */ else @@ -514,7 +518,7 @@ static int kexec_init_cpu_notes(const unsigned long cpu) setup_note(note, "Xen", XEN_ELFNOTE_CRASH_REGS, sizeof(crash_xen_core_t)); - if ( ! cpu ) + if ( !cpu ) { /* Set up Xen Crash Info note. */ xen_crash_note = note = ELFNOTE_NEXT(note); @@ -527,15 +531,15 @@ static int kexec_init_cpu_notes(const unsigned long cpu) return ret; } -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned long cpu = (unsigned long)hcpu; /* Only hook on CPU_UP_PREPARE because once a crash_note has been reported * to dom0, it must keep it around in case of a crash, as the crash kernel * will be hard coded to the original physical address reported. */ - switch ( action ) + switch (action) { case CPU_UP_PREPARE: /* Ignore return value. If this boot time, -ENOMEM will cause all @@ -550,9 +554,7 @@ static int cpu_callback( return NOTIFY_DONE; } -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback -}; +static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback}; void __init kexec_early_calculations(void) { @@ -580,15 +582,15 @@ static int __init kexec_init(void) /* This calculation is safe even if the machine is booted in * uniprocessor mode. */ - crash_heap_size = sizeof_cpu_notes(0) + - sizeof_cpu_notes(1) * (nr_cpu_ids - 1); + crash_heap_size = + sizeof_cpu_notes(0) + sizeof_cpu_notes(1) * (nr_cpu_ids - 1); crash_heap_size = PAGE_ALIGN(crash_heap_size); - crash_heap_current = alloc_xenheap_pages( - get_order_from_bytes(crash_heap_size), - MEMF_bits(crashinfo_maxaddr_bits) ); + crash_heap_current = + alloc_xenheap_pages(get_order_from_bytes(crash_heap_size), + MEMF_bits(crashinfo_maxaddr_bits)); - if ( ! crash_heap_current ) + if ( !crash_heap_current ) return -ENOMEM; memset(crash_heap_current, 0, crash_heap_size); @@ -600,7 +602,7 @@ static int __init kexec_init(void) Only the individual CPU crash notes themselves must be allocated in lower memory if requested. */ crash_notes = xzalloc_array(crash_note_range_t, nr_cpu_ids); - if ( ! crash_notes ) + if ( !crash_notes ) return -ENOMEM; register_keyhandler('C', do_crashdump_trigger, "trigger a crashdump", 0); @@ -616,7 +618,8 @@ presmp_initcall(kexec_init); static int kexec_get_reserve(xen_kexec_range_t *range) { - if ( kexec_crash_area.size > 0 && kexec_crash_area.start > 0) { + if ( kexec_crash_area.size > 0 && kexec_crash_area.start > 0 ) + { range->start = kexec_crash_area.start; range->size = kexec_crash_area.size; } @@ -632,7 +635,7 @@ static int kexec_get_cpu(xen_kexec_range_t *range) if ( nr < 0 || nr >= nr_cpu_ids ) return -ERANGE; - if ( ! crash_notes ) + if ( !crash_notes ) return -EINVAL; /* Try once again to allocate room for the crash notes. It is just possible @@ -665,7 +668,7 @@ static int kexec_get_range_internal(xen_kexec_range_t *range) { int ret = -EINVAL; - switch ( range->range ) + switch (range->range) { case KEXEC_RANGE_MA_CRASH: ret = kexec_get_reserve(range); @@ -722,18 +725,18 @@ static int kexec_get_range_compat(XEN_GUEST_HANDLE_PARAM(void) uarg) { XLAT_kexec_range(&compat_range, &range); if ( unlikely(__copy_to_guest(uarg, &compat_range, 1)) ) - ret = -EFAULT; + ret = -EFAULT; } return ret; -#else /* CONFIG_COMPAT */ +#else /* CONFIG_COMPAT */ return 0; #endif /* CONFIG_COMPAT */ } static int kexec_load_get_bits(int type, int *base, int *bit) { - switch ( type ) + switch (type) { case KEXEC_TYPE_DEFAULT: *base = KEXEC_IMAGE_DEFAULT_BASE; @@ -754,9 +757,10 @@ void vmcoreinfo_append_str(const char *fmt, ...) va_list args; char buf[0x50]; int r; - size_t note_size = sizeof(Elf_Note) + ELFNOTE_ALIGN(strlen(VMCOREINFO_NOTE_NAME) + 1); + size_t note_size = + sizeof(Elf_Note) + ELFNOTE_ALIGN(strlen(VMCOREINFO_NOTE_NAME) + 1); - if (vmcoreinfo_size + note_size + sizeof(buf) > VMCOREINFO_BYTES) + if ( vmcoreinfo_size + note_size + sizeof(buf) > VMCOREINFO_BYTES ) return; va_start(args, fmt); @@ -772,10 +776,12 @@ static void crash_save_vmcoreinfo(void) { size_t data_size; - if (vmcoreinfo_size > 0) /* already saved */ + if ( vmcoreinfo_size > 0 ) /* already saved */ return; - data_size = VMCOREINFO_BYTES - (sizeof(Elf_Note) + ELFNOTE_ALIGN(strlen(VMCOREINFO_NOTE_NAME) + 1)); + data_size = + VMCOREINFO_BYTES - + (sizeof(Elf_Note) + ELFNOTE_ALIGN(strlen(VMCOREINFO_NOTE_NAME) + 1)); setup_note((Elf_Note *)vmcoreinfo_data, VMCOREINFO_NOTE_NAME, 0, data_size); VMCOREINFO_PAGESIZE(PAGE_SIZE); @@ -906,32 +912,30 @@ static uint16_t kexec_load_v1_arch(void) } static int kexec_segments_add_segment(unsigned int *nr_segments, - xen_kexec_segment_t *segments, - mfn_t mfn) + xen_kexec_segment_t *segments, mfn_t mfn) { paddr_t maddr = mfn_to_maddr(mfn); unsigned int n = *nr_segments; /* Need a new segment? */ - if ( n == 0 - || segments[n-1].dest_maddr + segments[n-1].dest_size != maddr ) + if ( n == 0 || + segments[n - 1].dest_maddr + segments[n - 1].dest_size != maddr ) { n++; if ( n > KEXEC_SEGMENT_MAX ) return -EINVAL; *nr_segments = n; - set_xen_guest_handle(segments[n-1].buf.h, NULL); - segments[n-1].buf_size = 0; - segments[n-1].dest_maddr = maddr; - segments[n-1].dest_size = 0; + set_xen_guest_handle(segments[n - 1].buf.h, NULL); + segments[n - 1].buf_size = 0; + segments[n - 1].dest_maddr = maddr; + segments[n - 1].dest_size = 0; } return 0; } -static int kexec_segments_from_ind_page(mfn_t mfn, - unsigned int *nr_segments, +static int kexec_segments_from_ind_page(mfn_t mfn, unsigned int *nr_segments, xen_kexec_segment_t *segments, bool_t compat) { @@ -945,14 +949,14 @@ static int kexec_segments_from_ind_page(mfn_t mfn, * Walk the indirection page list, adding destination pages to the * segments. */ - for ( entry = page; ; ) + for ( entry = page;; ) { unsigned long ind; ind = kimage_entry_ind(entry, compat); mfn = kimage_entry_mfn(entry, compat); - switch ( ind ) + switch (ind) { case IND_DESTINATION: ret = kexec_segments_add_segment(nr_segments, segments, mfn); @@ -971,7 +975,7 @@ static int kexec_segments_from_ind_page(mfn_t mfn, ret = -EINVAL; goto done; } - segments[*nr_segments-1].dest_size += PAGE_SIZE; + segments[*nr_segments - 1].dest_size += PAGE_SIZE; break; default: ret = -EINVAL; @@ -1135,7 +1139,7 @@ static int kexec_load(XEN_GUEST_HANDLE_PARAM(void) uarg) return 0; error: - if ( ! kimage ) + if ( !kimage ) xfree(segments); kimage_free(kimage); return ret; @@ -1220,15 +1224,16 @@ static int do_kexec_op_internal(unsigned long op, return ret; if ( test_and_set_bit(KEXEC_FLAG_IN_HYPERCALL, &kexec_flags) ) - return hypercall_create_continuation(__HYPERVISOR_kexec_op, "lh", op, uarg); + return hypercall_create_continuation(__HYPERVISOR_kexec_op, "lh", op, + uarg); - switch ( op ) + switch (op) { case KEXEC_CMD_kexec_get_range: - if (compat) - ret = kexec_get_range_compat(uarg); + if ( compat ) + ret = kexec_get_range_compat(uarg); else - ret = kexec_get_range(uarg); + ret = kexec_get_range(uarg); break; case KEXEC_CMD_kexec_load_v1: if ( compat ) diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c index c25a30ed13..01afbc3f20 100644 --- a/xen/common/keyhandler.c +++ b/xen/common/keyhandler.c @@ -24,12 +24,13 @@ static unsigned char keypress_key; static bool_t alt_key_handling; -static keyhandler_fn_t show_handlers, dump_hwdom_registers, - dump_domains, read_clocks; -static irq_keyhandler_fn_t do_toggle_alt_key, dump_registers, - reboot_machine, run_all_keyhandlers, do_debug_key; +static keyhandler_fn_t show_handlers, dump_hwdom_registers, dump_domains, + read_clocks; +static irq_keyhandler_fn_t do_toggle_alt_key, dump_registers, reboot_machine, + run_all_keyhandlers, do_debug_key; -static struct keyhandler { +static struct keyhandler +{ union { keyhandler_fn_t *fn; irq_keyhandler_fn_t *irq_fn; @@ -38,22 +39,21 @@ static struct keyhandler { const char *desc; /* Description for help message. */ bool_t irq_callback, /* Call in irq context? if not, tasklet context. */ diagnostic; /* Include in 'dump all' handler. */ -} key_table[128] __read_mostly = -{ -#define KEYHANDLER(k, f, desc, diag) \ - [k] = { { (f) }, desc, 0, diag } +} key_table[128] __read_mostly = { +#define KEYHANDLER(k, f, desc, diag) [k] = {{(f)}, desc, 0, diag} -#define IRQ_KEYHANDLER(k, f, desc, diag) \ - [k] = { { (keyhandler_fn_t *)(f) }, desc, 1, diag } +#define IRQ_KEYHANDLER(k, f, desc, diag) \ + [k] = {{(keyhandler_fn_t *)(f)}, desc, 1, diag} - IRQ_KEYHANDLER('A', do_toggle_alt_key, "toggle alternative key handling", 0), + IRQ_KEYHANDLER('A', do_toggle_alt_key, "toggle alternative key handling", + 0), IRQ_KEYHANDLER('d', dump_registers, "dump registers", 1), - KEYHANDLER('h', show_handlers, "show this message", 0), - KEYHANDLER('q', dump_domains, "dump domain (and guest debug) info", 1), - KEYHANDLER('r', dump_runq, "dump run queues", 1), + KEYHANDLER('h', show_handlers, "show this message", 0), + KEYHANDLER('q', dump_domains, "dump domain (and guest debug) info", 1), + KEYHANDLER('r', dump_runq, "dump run queues", 1), IRQ_KEYHANDLER('R', reboot_machine, "reboot machine", 0), - KEYHANDLER('t', read_clocks, "display multi-cpu clock info", 1), - KEYHANDLER('0', dump_hwdom_registers, "dump Dom0 registers", 1), + KEYHANDLER('t', read_clocks, "display multi-cpu clock info", 1), + KEYHANDLER('0', dump_hwdom_registers, "dump Dom0 registers", 1), IRQ_KEYHANDLER('%', do_debug_key, "trap to xendbg", 0), IRQ_KEYHANDLER('*', run_all_keyhandlers, "print all diagnostics", 0), @@ -129,8 +129,8 @@ static void show_handlers(unsigned char key) printk("'%c' pressed -> showing installed handlers\n", key); for ( i = 0; i < ARRAY_SIZE(key_table); i++ ) if ( key_table[i].fn ) - printk(" key '%c' (ascii '%02x') => %s\n", - isprint(i) ? i : ' ', i, key_table[i].desc); + printk(" key '%c' (ascii '%02x') => %s\n", isprint(i) ? i : ' ', i, + key_table[i].desc); } static cpumask_t dump_execstate_mask; @@ -147,8 +147,8 @@ void dump_execstate(struct cpu_user_regs *regs) if ( !is_idle_vcpu(current) ) { - printk("*** Dumping CPU%u guest state (%pv): ***\n", - smp_processor_id(), current); + printk("*** Dumping CPU%u guest state (%pv): ***\n", smp_processor_id(), + current); show_execution_state(guest_cpu_user_regs()); printk("\n"); } @@ -188,7 +188,7 @@ static void dump_registers(unsigned char key, struct cpu_user_regs *regs) return; /* Normal handling: synchronously dump the remaining CPUs' states. */ - for_each_cpu ( cpu, &dump_execstate_mask ) + for_each_cpu (cpu, &dump_execstate_mask) { smp_send_state_dump(cpu); while ( cpumask_test_cpu(cpu, &dump_execstate_mask) ) @@ -205,7 +205,7 @@ static void dump_hwdom_action(unsigned long arg) { struct vcpu *v = (void *)arg; - for ( ; ; ) + for ( ;; ) { vcpu_show_execution_state(v); if ( (v = v->next_in_list) == NULL ) @@ -228,7 +228,7 @@ static void dump_hwdom_registers(unsigned char key) printk("'%c' pressed -> dumping Dom0's registers\n", key); - for_each_vcpu ( hardware_domain, v ) + for_each_vcpu (hardware_domain, v) { if ( alt_key_handling && softirq_pending(smp_processor_id()) ) { @@ -251,15 +251,15 @@ static void reboot_machine(unsigned char key, struct cpu_user_regs *regs) static void dump_domains(unsigned char key) { struct domain *d; - struct vcpu *v; - s_time_t now = NOW(); + struct vcpu *v; + s_time_t now = NOW(); - printk("'%c' pressed -> dumping domain info (now = %"PRI_stime")\n", - key, now); + printk("'%c' pressed -> dumping domain info (now = %" PRI_stime ")\n", key, + now); rcu_read_lock(&domlist_read_lock); - for_each_domain ( d ) + for_each_domain (d) { unsigned int i; @@ -269,22 +269,23 @@ static void dump_domains(unsigned char key) printk(" refcnt=%d dying=%d pause_count=%d\n", atomic_read(&d->refcnt), d->is_dying, atomic_read(&d->pause_count)); - printk(" nr_pages=%d xenheap_pages=%d shared_pages=%u paged_pages=%u " - "dirty_cpus={%*pbl} max_pages=%u\n", - d->tot_pages, d->xenheap_pages, atomic_read(&d->shr_pages), - atomic_read(&d->paged_pages), nr_cpu_ids, - cpumask_bits(d->dirty_cpumask), d->max_pages); + printk( + " nr_pages=%d xenheap_pages=%d shared_pages=%u paged_pages=%u " + "dirty_cpus={%*pbl} max_pages=%u\n", + d->tot_pages, d->xenheap_pages, atomic_read(&d->shr_pages), + atomic_read(&d->paged_pages), nr_cpu_ids, + cpumask_bits(d->dirty_cpumask), d->max_pages); printk(" handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-" "%02x%02x-%02x%02x%02x%02x%02x%02x vm_assist=%08lx\n", - d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3], - d->handle[ 4], d->handle[ 5], d->handle[ 6], d->handle[ 7], - d->handle[ 8], d->handle[ 9], d->handle[10], d->handle[11], + d->handle[0], d->handle[1], d->handle[2], d->handle[3], + d->handle[4], d->handle[5], d->handle[6], d->handle[7], + d->handle[8], d->handle[9], d->handle[10], d->handle[11], d->handle[12], d->handle[13], d->handle[14], d->handle[15], d->vm_assist); - for ( i = 0 ; i < NR_DOMAIN_WATCHDOG_TIMERS; i++ ) + for ( i = 0; i < NR_DOMAIN_WATCHDOG_TIMERS; i++ ) if ( test_bit(i, &d->watchdog_inuse_map) ) - printk(" watchdog %d expires in %d seconds\n", - i, (u32)((d->watchdog_timer[i].expires - NOW()) >> 30)); + printk(" watchdog %d expires in %d seconds\n", i, + (u32)((d->watchdog_timer[i].expires - NOW()) >> 30)); arch_dump_domain_info(d); @@ -292,28 +293,26 @@ static void dump_domains(unsigned char key) dump_pageframe_info(d); - printk("NODE affinity for domain %d: [%*pbl]\n", - d->domain_id, MAX_NUMNODES, d->node_affinity.bits); + printk("NODE affinity for domain %d: [%*pbl]\n", d->domain_id, + MAX_NUMNODES, d->node_affinity.bits); - printk("VCPU information and callbacks for domain %u:\n", - d->domain_id); - for_each_vcpu ( d, v ) + printk("VCPU information and callbacks for domain %u:\n", d->domain_id); + for_each_vcpu (d, v) { if ( !(v->vcpu_id & 0x3f) ) process_pending_softirqs(); printk(" VCPU%d: CPU%d [has=%c] poll=%d " "upcall_pend=%02x upcall_mask=%02x ", - v->vcpu_id, v->processor, - v->is_running ? 'T':'F', v->poll_evtchn, - vcpu_info(v, evtchn_upcall_pending), + v->vcpu_id, v->processor, v->is_running ? 'T' : 'F', + v->poll_evtchn, vcpu_info(v, evtchn_upcall_pending), !vcpu_event_delivery_is_enabled(v)); if ( vcpu_cpu_dirty(v) ) printk("dirty_cpu=%u", v->dirty_cpu); printk("\n"); printk(" cpu_hard_affinity={%*pbl} cpu_soft_affinity={%*pbl}\n", - nr_cpu_ids, cpumask_bits(v->cpu_hard_affinity), - nr_cpu_ids, cpumask_bits(v->cpu_soft_affinity)); + nr_cpu_ids, cpumask_bits(v->cpu_hard_affinity), nr_cpu_ids, + cpumask_bits(v->cpu_soft_affinity)); printk(" pause_count=%d pause_flags=%lx\n", atomic_read(&v->pause_count), v->pause_flags); arch_dump_vcpu_info(v); @@ -321,22 +320,22 @@ static void dump_domains(unsigned char key) if ( v->periodic_period == 0 ) printk("No periodic timer\n"); else - printk("%"PRI_stime" Hz periodic timer (period %"PRI_stime" ms)\n", + printk("%" PRI_stime " Hz periodic timer (period %" PRI_stime + " ms)\n", 1000000000 / v->periodic_period, v->periodic_period / 1000000); } } - for_each_domain ( d ) + for_each_domain (d) { - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { if ( !(v->vcpu_id & 0x3f) ) process_pending_softirqs(); - printk("Notifying guest %d:%d (virq %d, port %d)\n", - d->domain_id, v->vcpu_id, - VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG]); + printk("Notifying guest %d:%d (virq %d, port %d)\n", d->domain_id, + v->vcpu_id, VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG]); send_guest_vcpu_virq(v, VIRQ_DEBUG); } } @@ -387,7 +386,7 @@ static void read_clocks(unsigned char key) cpu_relax(); min_stime_cpu = max_stime_cpu = min_cycles_cpu = max_cycles_cpu = cpu; - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) { if ( per_cpu(read_clocks_time, cpu) < per_cpu(read_clocks_time, min_stime_cpu) ) @@ -419,12 +418,12 @@ static void read_clocks(unsigned char key) maxdif_cycles = dif_cycles; sumdif_cycles += dif_cycles; count++; - printk("Synced stime skew: max=%"PRIu64"ns avg=%"PRIu64"ns " - "samples=%"PRIu32" current=%"PRIu64"ns\n", - maxdif_stime, sumdif_stime/count, count, dif_stime); - printk("Synced cycles skew: max=%"PRIu64" avg=%"PRIu64" " - "samples=%"PRIu32" current=%"PRIu64"\n", - maxdif_cycles, sumdif_cycles/count, count, dif_cycles); + printk("Synced stime skew: max=%" PRIu64 "ns avg=%" PRIu64 "ns " + "samples=%" PRIu32 " current=%" PRIu64 "ns\n", + maxdif_stime, sumdif_stime / count, count, dif_stime); + printk("Synced cycles skew: max=%" PRIu64 " avg=%" PRIu64 " " + "samples=%" PRIu32 " current=%" PRIu64 "\n", + maxdif_cycles, sumdif_cycles / count, count, dif_cycles); } static void run_all_nonirq_keyhandlers(unsigned long unused) @@ -448,8 +447,8 @@ static void run_all_nonirq_keyhandlers(unsigned long unused) console_end_log_everything(); } -static DECLARE_TASKLET(run_all_keyhandlers_tasklet, - run_all_nonirq_keyhandlers, 0); +static DECLARE_TASKLET(run_all_keyhandlers_tasklet, run_all_nonirq_keyhandlers, + 0); static void run_all_keyhandlers(unsigned char key, struct cpu_user_regs *regs) { @@ -498,7 +497,7 @@ void __init initialize_keytable(void) { alt_key_handling = 1; printk(XENLOG_INFO "Defaulting to alternative key handling; " - "send 'A' to switch to normal mode.\n"); + "send 'A' to switch to normal mode.\n"); } } diff --git a/xen/common/kimage.c b/xen/common/kimage.c index 210241dfb7..b30103504c 100644 --- a/xen/common/kimage.c +++ b/xen/common/kimage.c @@ -60,11 +60,10 @@ /* * Offset of the last entry in an indirection page. */ -#define KIMAGE_LAST_ENTRY (PAGE_SIZE/sizeof(kimage_entry_t) - 1) +#define KIMAGE_LAST_ENTRY (PAGE_SIZE / sizeof(kimage_entry_t) - 1) - -static int kimage_is_destination_range(struct kexec_image *image, - paddr_t start, paddr_t end); +static int kimage_is_destination_range(struct kexec_image *image, paddr_t start, + paddr_t end); static struct page_info *kimage_alloc_page(struct kexec_image *image, paddr_t dest); @@ -124,7 +123,7 @@ static int do_kimage_alloc(struct kexec_image **rimage, paddr_t entry, paddr_t mstart, mend; mstart = image->segments[i].dest_maddr; - mend = mstart + image->segments[i].dest_size; + mend = mstart + image->segments[i].dest_size; if ( (mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK) ) goto out; } @@ -142,12 +141,12 @@ static int do_kimage_alloc(struct kexec_image **rimage, paddr_t entry, unsigned long j; mstart = image->segments[i].dest_maddr; - mend = mstart + image->segments[i].dest_size; - for (j = 0; j < i; j++ ) + mend = mstart + image->segments[i].dest_size; + for ( j = 0; j < i; j++ ) { paddr_t pstart, pend; pstart = image->segments[j].dest_maddr; - pend = pstart + image->segments[j].dest_size; + pend = pstart + image->segments[j].dest_size; /* Do the segments overlap? */ if ( (mend > pstart) && (mstart < pend) ) goto out; @@ -166,7 +165,7 @@ static int do_kimage_alloc(struct kexec_image **rimage, paddr_t entry, goto out; } - /* + /* * Page for the relocation code must still be accessible after the * processor has switched to 32-bit mode. */ @@ -174,9 +173,9 @@ static int do_kimage_alloc(struct kexec_image **rimage, paddr_t entry, image->control_code_page = kimage_alloc_control_page(image, MEMF_bits(32)); if ( !image->control_code_page ) goto out; - result = machine_kexec_add_page(image, - page_to_maddr(image->control_code_page), - page_to_maddr(image->control_code_page)); + result = + machine_kexec_add_page(image, page_to_maddr(image->control_code_page), + page_to_maddr(image->control_code_page)); if ( result < 0 ) goto out; @@ -203,7 +202,6 @@ out: } return result; - } static int kimage_normal_alloc(struct kexec_image **rimage, paddr_t entry, @@ -221,8 +219,8 @@ static int kimage_crash_alloc(struct kexec_image **rimage, paddr_t entry, unsigned long i; /* Verify we have a valid entry point */ - if ( (entry < kexec_crash_area.start) - || (entry > kexec_crash_area.start + kexec_crash_area.size)) + if ( (entry < kexec_crash_area.start) || + (entry > kexec_crash_area.start + kexec_crash_area.size) ) return -EADDRNOTAVAIL; /* @@ -244,8 +242,8 @@ static int kimage_crash_alloc(struct kexec_image **rimage, paddr_t entry, mstart = segments[i].dest_maddr; mend = mstart + segments[i].dest_size; /* Ensure we are within the crash kernel limits. */ - if ( (mstart < kexec_crash_area.start ) - || (mend > kexec_crash_area.start + kexec_crash_area.size)) + if ( (mstart < kexec_crash_area.start) || + (mend > kexec_crash_area.start + kexec_crash_area.size) ) return -EADDRNOTAVAIL; } @@ -254,8 +252,7 @@ static int kimage_crash_alloc(struct kexec_image **rimage, paddr_t entry, KEXEC_TYPE_CRASH); } -static int kimage_is_destination_range(struct kexec_image *image, - paddr_t start, +static int kimage_is_destination_range(struct kexec_image *image, paddr_t start, paddr_t end) { unsigned long i; @@ -284,8 +281,8 @@ static void kimage_free_page_list(struct page_list_head *list) } } -static struct page_info *kimage_alloc_normal_control_page( - struct kexec_image *image, unsigned memflags) +static struct page_info * +kimage_alloc_normal_control_page(struct kexec_image *image, unsigned memflags) { /* * Control pages are special, they are the intermediaries that are @@ -315,7 +312,7 @@ static struct page_info *kimage_alloc_normal_control_page( page = kimage_alloc_zeroed_page(memflags); if ( !page ) break; - addr = page_to_maddr(page); + addr = page_to_maddr(page); eaddr = addr + PAGE_SIZE; if ( kimage_is_destination_range(image, addr, eaddr) ) { @@ -349,7 +346,8 @@ static struct page_info *kimage_alloc_normal_control_page( return page; } -static struct page_info *kimage_alloc_crash_control_page(struct kexec_image *image) +static struct page_info * +kimage_alloc_crash_control_page(struct kexec_image *image) { /* * Control pages are special, they are the intermediaries that are @@ -375,7 +373,7 @@ static struct page_info *kimage_alloc_crash_control_page(struct kexec_image *ima struct page_info *page = NULL; hole_start = PAGE_ALIGN(image->next_crash_page); - hole_end = hole_start + PAGE_SIZE; + hole_end = hole_start + PAGE_SIZE; while ( hole_end <= kexec_crash_area.start + kexec_crash_area.size ) { unsigned long i; @@ -386,12 +384,12 @@ static struct page_info *kimage_alloc_crash_control_page(struct kexec_image *ima paddr_t mstart, mend; mstart = image->segments[i].dest_maddr; - mend = mstart + image->segments[i].dest_size; + mend = mstart + image->segments[i].dest_size; if ( (hole_end > mstart) && (hole_start < mend) ) { /* Advance the hole to the end of the segment. */ hole_start = PAGE_ALIGN(mend); - hole_end = hole_start + PAGE_SIZE; + hole_end = hole_start + PAGE_SIZE; break; } } @@ -411,13 +409,12 @@ static struct page_info *kimage_alloc_crash_control_page(struct kexec_image *ima return page; } - struct page_info *kimage_alloc_control_page(struct kexec_image *image, unsigned memflags) { struct page_info *pages = NULL; - switch ( image->type ) + switch (image->type) { case KEXEC_TYPE_DEFAULT: pages = kimage_alloc_normal_control_page(image, memflags); @@ -463,13 +460,11 @@ static int kimage_set_destination(struct kexec_image *image, return kimage_add_entry(image, (destination & PAGE_MASK) | IND_DESTINATION); } - static int kimage_add_page(struct kexec_image *image, paddr_t maddr) { return kimage_add_entry(image, (maddr & PAGE_MASK) | IND_SOURCE); } - static void kimage_free_extra_pages(struct kexec_image *image) { kimage_free_page_list(&image->dest_pages); @@ -490,12 +485,13 @@ static void kimage_terminate(struct kexec_image *image) * * Call unmap_domain_page(ptr) after the loop exits. */ -#define for_each_kimage_entry(image, ptr, entry) \ - for ( ptr = map_domain_page(_mfn(paddr_to_pfn(image->head))); \ - (entry = *ptr) && !(entry & IND_DONE); \ - ptr = (entry & IND_INDIRECTION) ? \ - (unmap_domain_page(ptr), map_domain_page(_mfn(paddr_to_pfn(entry)))) \ - : ptr + 1 ) +#define for_each_kimage_entry(image, ptr, entry) \ + for ( ptr = map_domain_page(_mfn(paddr_to_pfn(image->head))); \ + (entry = *ptr) && !(entry & IND_DONE); \ + ptr = (entry & IND_INDIRECTION) \ + ? (unmap_domain_page(ptr), \ + map_domain_page(_mfn(paddr_to_pfn(entry)))) \ + : ptr + 1 ) static void kimage_free_entry(kimage_entry_t entry) { @@ -513,7 +509,7 @@ static void kimage_free_all_entries(struct kexec_image *image) if ( !image->head ) return; - for_each_kimage_entry(image, ptr, entry) + for_each_kimage_entry (image, ptr, entry) { if ( entry & IND_INDIRECTION ) { @@ -545,13 +541,12 @@ void kimage_free(struct kexec_image *image) xfree(image); } -static kimage_entry_t *kimage_dst_used(struct kexec_image *image, - paddr_t maddr) +static kimage_entry_t *kimage_dst_used(struct kexec_image *image, paddr_t maddr) { kimage_entry_t *ptr, entry; unsigned long destination = 0; - for_each_kimage_entry(image, ptr, entry) + for_each_kimage_entry (image, ptr, entry) { if ( entry & IND_DESTINATION ) destination = entry & PAGE_MASK; @@ -595,7 +590,7 @@ static struct page_info *kimage_alloc_page(struct kexec_image *image, * Walk through the list of destination pages, and see if I have a * match. */ - page_list_for_each(page, &image->dest_pages) + page_list_for_each (page, &image->dest_pages) { addr = page_to_maddr(page); if ( addr == destination ) @@ -605,7 +600,7 @@ static struct page_info *kimage_alloc_page(struct kexec_image *image, } } page = NULL; - for (;;) + for ( ;; ) { kimage_entry_t *old; @@ -620,8 +615,7 @@ static struct page_info *kimage_alloc_page(struct kexec_image *image, break; /* If the page is not a destination page use it. */ - if ( !kimage_is_destination_range(image, addr, - addr + PAGE_SIZE) ) + if ( !kimage_is_destination_range(image, addr, addr + PAGE_SIZE) ) break; /* @@ -654,8 +648,8 @@ static struct page_info *kimage_alloc_page(struct kexec_image *image, } } found: - ret = machine_kexec_add_page(image, page_to_maddr(page), - page_to_maddr(page)); + ret = + machine_kexec_add_page(image, page_to_maddr(page), page_to_maddr(page)); if ( ret < 0 ) { free_domheap_page(page); @@ -748,7 +742,8 @@ static int kimage_load_crash_segment(struct kexec_image *image, if ( !dest_va ) return -EINVAL; - ret = copy_from_guest_offset(dest_va, segment->buf.h, src_offset, schunk); + ret = + copy_from_guest_offset(dest_va, segment->buf.h, src_offset, schunk); memset(dest_va + schunk, 0, dchunk - schunk); unmap_domain_page(dest_va); @@ -764,14 +759,15 @@ static int kimage_load_crash_segment(struct kexec_image *image, return 0; } -static int kimage_load_segment(struct kexec_image *image, xen_kexec_segment_t *segment) +static int kimage_load_segment(struct kexec_image *image, + xen_kexec_segment_t *segment) { int result = -ENOMEM; paddr_t addr; if ( !guest_handle_is_null(segment->buf.h) ) { - switch ( image->type ) + switch (image->type) { case KEXEC_TYPE_DEFAULT: result = kimage_load_normal_segment(image, segment); @@ -794,12 +790,12 @@ static int kimage_load_segment(struct kexec_image *image, xen_kexec_segment_t *s } int kimage_alloc(struct kexec_image **rimage, uint8_t type, uint16_t arch, - uint64_t entry_maddr, - uint32_t nr_segments, xen_kexec_segment_t *segment) + uint64_t entry_maddr, uint32_t nr_segments, + xen_kexec_segment_t *segment) { int result; - switch( type ) + switch (type) { case KEXEC_TYPE_DEFAULT: result = kimage_normal_alloc(rimage, entry_maddr, nr_segments, segment); @@ -824,7 +820,8 @@ int kimage_load_segments(struct kexec_image *image) int s; int result; - for ( s = 0; s < image->nr_segments; s++ ) { + for ( s = 0; s < image->nr_segments; s++ ) + { result = kimage_load_segment(image, &image->segments[s]); if ( result < 0 ) return result; @@ -854,8 +851,7 @@ unsigned long kimage_entry_ind(kimage_entry_t *entry, bool_t compat) return *entry & 0xf; } -int kimage_build_ind(struct kexec_image *image, mfn_t ind_mfn, - bool_t compat) +int kimage_build_ind(struct kexec_image *image, mfn_t ind_mfn, bool_t compat) { void *page; kimage_entry_t *entry; @@ -870,7 +866,7 @@ int kimage_build_ind(struct kexec_image *image, mfn_t ind_mfn, * Walk the guest-supplied indirection pages, adding entries to * the image's indirection pages. */ - for ( entry = page; ; ) + for ( entry = page;; ) { unsigned long ind; mfn_t mfn; @@ -878,7 +874,7 @@ int kimage_build_ind(struct kexec_image *image, mfn_t ind_mfn, ind = kimage_entry_ind(entry, compat); mfn = kimage_entry_mfn(entry, compat); - switch ( ind ) + switch (ind) { case IND_DESTINATION: dest = mfn_to_maddr(mfn); diff --git a/xen/common/lib.c b/xen/common/lib.c index 8ebec811b3..3638f3b6af 100644 --- a/xen/common/lib.c +++ b/xen/common/lib.c @@ -6,31 +6,63 @@ #include /* for ctype.h */ -const unsigned char _ctype[] = { - _C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ - _C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ - _C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ - _C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ - _S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ - _P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ - _D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ - _D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ - _P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ - _U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ - _U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ - _U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ - _P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ - _L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ - _L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ - _L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ - _S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ - _P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ - _U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ - _U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ - _L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ - _L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ +const unsigned char + _ctype[] = {_C, _C, _C, _C, _C, _C, + _C, _C, /* 0-7 */ + _C, _C | _S, _C | _S, _C | _S, _C | _S, _C | _S, + _C, _C, /* 8-15 */ + _C, _C, _C, _C, _C, _C, + _C, _C, /* 16-23 */ + _C, _C, _C, _C, _C, _C, + _C, _C, /* 24-31 */ + _S | _SP, _P, _P, _P, _P, _P, + _P, _P, /* 32-39 */ + _P, _P, _P, _P, _P, _P, + _P, _P, /* 40-47 */ + _D, _D, _D, _D, _D, _D, + _D, _D, /* 48-55 */ + _D, _D, _P, _P, _P, _P, + _P, _P, /* 56-63 */ + _P, _U | _X, _U | _X, _U | _X, _U | _X, _U | _X, + _U | _X, _U, /* 64-71 */ + _U, _U, _U, _U, _U, _U, + _U, _U, /* 72-79 */ + _U, _U, _U, _U, _U, _U, + _U, _U, /* 80-87 */ + _U, _U, _U, _P, _P, _P, + _P, _P, /* 88-95 */ + _P, _L | _X, _L | _X, _L | _X, _L | _X, _L | _X, + _L | _X, _L, /* 96-103 */ + _L, _L, _L, _L, _L, _L, + _L, _L, /* 104-111 */ + _L, _L, _L, _L, _L, _L, + _L, _L, /* 112-119 */ + _L, _L, _L, _P, _P, _P, + _P, _C, /* 120-127 */ + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, /* 128-143 */ + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, /* 144-159 */ + _S | _SP, _P, _P, _P, _P, _P, + _P, _P, _P, _P, _P, _P, + _P, _P, _P, _P, /* 160-175 */ + _P, _P, _P, _P, _P, _P, + _P, _P, _P, _P, _P, _P, + _P, _P, _P, _P, /* 176-191 */ + _U, _U, _U, _U, _U, _U, + _U, _U, _U, _U, _U, _U, + _U, _U, _U, _U, /* 192-207 */ + _U, _U, _U, _U, _U, _U, + _U, _P, _U, _U, _U, _U, + _U, _U, _U, _L, /* 208-223 */ + _L, _L, _L, _L, _L, _L, + _L, _L, _L, _L, _L, _L, + _L, _L, _L, _L, /* 224-239 */ + _L, _L, _L, _L, _L, _L, + _L, _P, _L, _L, _L, _L, + _L, _L, _L, _L}; /* 240-255 */ /* * A couple of 64 bit operations ported from FreeBSD. @@ -77,10 +109,10 @@ const unsigned char _ctype[] = { * one or more of the following formats. */ union uu { - s64 q; /* as a (signed) quad */ - s64 uq; /* as an unsigned quad */ - long sl[2]; /* as two signed longs */ - unsigned long ul[2]; /* as two unsigned longs */ + s64 q; /* as a (signed) quad */ + s64 uq; /* as an unsigned quad */ + long sl[2]; /* as two signed longs */ + unsigned long ul[2]; /* as two unsigned longs */ }; #ifdef __BIG_ENDIAN @@ -94,18 +126,18 @@ union uu { /* * Define high and low longwords. */ -#define H _QUAD_HIGHWORD -#define L _QUAD_LOWWORD +#define H _QUAD_HIGHWORD +#define L _QUAD_LOWWORD /* * Total number of bits in a quad_t and in the pieces that make it up. * These are used for shifting, and also below for halfword extraction * and assembly. */ -#define CHAR_BIT 8 /* number of bits in a char */ -#define QUAD_BITS (sizeof(s64) * CHAR_BIT) -#define LONG_BITS (sizeof(long) * CHAR_BIT) -#define HALF_BITS (sizeof(long) * CHAR_BIT / 2) +#define CHAR_BIT 8 /* number of bits in a char */ +#define QUAD_BITS (sizeof(s64) * CHAR_BIT) +#define LONG_BITS (sizeof(long) * CHAR_BIT) +#define HALF_BITS (sizeof(long) * CHAR_BIT / 2) /* * Extract high and low shortwords from longword, and move low shortword of @@ -117,9 +149,9 @@ union uu { * and lower halves, and to reassemble a product as a quad_t, shifted left * (sizeof(long)*CHAR_BIT/2). */ -#define HHALF(x) ((x) >> HALF_BITS) -#define LHALF(x) ((x) & ((1 << HALF_BITS) - 1)) -#define LHUP(x) ((x) << HALF_BITS) +#define HHALF(x) ((x) >> HALF_BITS) +#define LHALF(x) ((x) & ((1 << HALF_BITS) - 1)) +#define LHUP(x) ((x) << HALF_BITS) /* * Multiprecision divide. This algorithm is from Knuth vol. 2 (2nd ed), @@ -142,7 +174,7 @@ static void shl(register digit *p, register int len, register int sh) { register int i; - for (i = 0; i < len; i++) + for ( i = 0; i < len; i++ ) p[i] = LHALF(p[i] << sh) | (p[i + 1] >> (HALF_BITS - sh)); p[i] = LHALF(p[i] << sh); } @@ -167,17 +199,19 @@ u64 __qdivrem(u64 uq, u64 vq, u64 *arq) /* * Take care of special cases: divide by zero, and u < v. */ - if (vq == 0) { + if ( vq == 0 ) + { /* divide by zero. */ static volatile const unsigned int zero = 0; tmp.ul[H] = tmp.ul[L] = 1 / zero; - if (arq) + if ( arq ) *arq = uq; return (tmp.q); } - if (uq < vq) { - if (arq) + if ( uq < vq ) + { + if ( arq ) *arq = uq; return (0); } @@ -209,8 +243,10 @@ u64 __qdivrem(u64 uq, u64 vq, u64 *arq) v[2] = LHALF(tmp.ul[H]); v[3] = HHALF(tmp.ul[L]); v[4] = LHALF(tmp.ul[L]); - for (n = 4; v[1] == 0; v++) { - if (--n == 1) { + for ( n = 4; v[1] == 0; v++ ) + { + if ( --n == 1 ) + { unsigned long rbj; /* r*B+u[j] (not root boy jim) */ digit q1, q2, q3, q4; @@ -230,7 +266,7 @@ u64 __qdivrem(u64 uq, u64 vq, u64 *arq) q3 = rbj / t; rbj = COMBINE(rbj % t, u[4]); q4 = rbj / t; - if (arq) + if ( arq ) *arq = rbj % t; tmp.ul[H] = COMBINE(q1, q2); tmp.ul[L] = COMBINE(q3, q4); @@ -243,9 +279,9 @@ u64 __qdivrem(u64 uq, u64 vq, u64 *arq) * there is a complete four-digit quotient at &qspace[1] when * we finally stop. */ - for (m = 4 - n; u[1] == 0; u++) + for ( m = 4 - n; u[1] == 0; u++ ) m--; - for (i = 4 - m; --i >= 0;) + for ( i = 4 - m; --i >= 0; ) q[i] = 0; q += 4 - m; @@ -256,11 +292,12 @@ u64 __qdivrem(u64 uq, u64 vq, u64 *arq) * D1: choose multiplier 1 << d to ensure v[1] >= B/2. */ d = 0; - for (t = v[1]; t < B / 2; t <<= 1) + for ( t = v[1]; t < B / 2; t <<= 1 ) d++; - if (d > 0) { - shl(&u[0], m + n, d); /* u <<= d */ - shl(&v[1], n - 1, d); /* v <<= d */ + if ( d > 0 ) + { + shl(&u[0], m + n, d); /* u <<= d */ + shl(&v[1], n - 1, d); /* v <<= d */ } /* * D2: j = 0. @@ -282,20 +319,24 @@ u64 __qdivrem(u64 uq, u64 vq, u64 *arq) uj0 = u[j + 0]; /* for D3 only -- note that u[j+...] change */ uj1 = u[j + 1]; /* for D3 only */ uj2 = u[j + 2]; /* for D3 only */ - if (uj0 == v1) { + if ( uj0 == v1 ) + { qhat = B; rhat = uj1; goto qhat_too_big; - } else { + } + else + { unsigned long nn = COMBINE(uj0, uj1); qhat = nn / v1; rhat = nn % v1; } - while (v2 * qhat > COMBINE(rhat, uj2)) { + while ( v2 * qhat > COMBINE(rhat, uj2) ) + { qhat_too_big: qhat--; - if ((rhat += v1) >= B) + if ( (rhat += v1) >= B ) break; } /* @@ -304,7 +345,8 @@ u64 __qdivrem(u64 uq, u64 vq, u64 *arq) * We split this up so that we do not require v[0] = 0, * and to eliminate a final special case. */ - for (t = 0, i = n; i > 0; i--) { + for ( t = 0, i = n; i > 0; i-- ) + { t = u[i + j] - v[i] * qhat - t; u[i + j] = LHALF(t); t = (B - HHALF(t)) & (B - 1); @@ -317,9 +359,11 @@ u64 __qdivrem(u64 uq, u64 vq, u64 *arq) * in that (rare) case, qhat was too large (by exactly 1). * Fix it by adding v[1..n] to u[j..j+n]. */ - if (HHALF(t)) { + if ( HHALF(t) ) + { qhat--; - for (t = 0, i = n; i > 0; i--) { /* D6: add back. */ + for ( t = 0, i = n; i > 0; i-- ) + { /* D6: add back. */ t += u[i + j] + v[i]; u[i + j] = LHALF(t); t = HHALF(t); @@ -327,18 +371,19 @@ u64 __qdivrem(u64 uq, u64 vq, u64 *arq) u[j] = LHALF(u[j] + t); } q[j] = qhat; - } while (++j <= m); /* D7: loop on j. */ + } while ( ++j <= m ); /* D7: loop on j. */ /* * If caller wants the remainder, we have to calculate it as * u[m..m+n] >> d (this is at most n digits and thus fits in * u[m+1..m+n], but we may need more source digits). */ - if (arq) { - if (d) { - for (i = m + n; i > m; --i) - u[i] = (u[i] >> d) | - LHALF(u[i - 1] << (HALF_BITS - d)); + if ( arq ) + { + if ( d ) + { + for ( i = m + n; i > m; --i ) + u[i] = (u[i] >> d) | LHALF(u[i - 1] << (HALF_BITS - d)); u[i] = 0; } tmp.ul[H] = COMBINE(uspace[1], uspace[2]); @@ -365,7 +410,6 @@ s64 __divdi3(s64 a, s64 b) return (neg ? -uq : uq); } - /* * Divide two unsigned quads. */ @@ -427,17 +471,18 @@ s64 __ldivmod_helper(s64 a, s64 b, s64 *r) uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) { #ifdef CONFIG_X86 - asm ( "mul %%rdx; div %%rcx" : "=a" (a) : "0" (a), "d" (b), "c" (c) ); + asm("mul %%rdx; div %%rcx" : "=a"(a) : "0"(a), "d"(b), "c"(c)); return a; #else union { uint64_t ll; - struct { + struct + { #ifdef WORDS_BIGENDIAN uint32_t high, low; #else uint32_t low, high; -#endif +#endif } l; } u, res; uint64_t rl, rh; @@ -459,21 +504,26 @@ unsigned long long parse_size_and_unit(const char *s, const char **ps) ret = simple_strtoull(s, &s1, 0); - switch ( *s1 ) + switch (*s1) { - case 'T': case 't': + case 'T': + case 't': ret <<= 10; /* fallthrough */ - case 'G': case 'g': + case 'G': + case 'g': ret <<= 10; /* fallthrough */ - case 'M': case 'm': + case 'M': + case 'm': ret <<= 10; /* fallthrough */ - case 'K': case 'k': + case 'K': + case 'k': ret <<= 10; /* fallthrough */ - case 'B': case 'b': + case 'B': + case 'b': s1++; break; case '%': diff --git a/xen/common/libelf/libelf-dominfo.c b/xen/common/libelf/libelf-dominfo.c index 508f08db42..358096eea6 100644 --- a/xen/common/libelf/libelf-dominfo.c +++ b/xen/common/libelf/libelf-dominfo.c @@ -27,14 +27,12 @@ static const char *const elf_xen_feature_names[] = { [XENFEAT_supervisor_mode_kernel] = "supervisor_mode_kernel", [XENFEAT_pae_pgdir_above_4gb] = "pae_pgdir_above_4gb", [XENFEAT_hvm_callback_vector] = "hvm_callback_vector", - [XENFEAT_dom0] = "dom0" -}; + [XENFEAT_dom0] = "dom0"}; static const unsigned elf_xen_features = -sizeof(elf_xen_feature_names) / sizeof(elf_xen_feature_names[0]); + sizeof(elf_xen_feature_names) / sizeof(elf_xen_feature_names[0]); elf_errorstatus elf_xen_parse_features(const char *features, - uint32_t *supported, - uint32_t *required) + uint32_t *supported, uint32_t *required) { unsigned char feature[64]; unsigned pos, len, i; @@ -47,7 +45,7 @@ elf_errorstatus elf_xen_parse_features(const char *features, elf_memset_unchecked(feature, 0, sizeof(feature)); for ( len = 0;; len++ ) { - if ( len >= sizeof(feature)-1 ) + if ( len >= sizeof(feature) - 1 ) break; if ( features[pos + len] == '\0' ) break; @@ -95,33 +93,34 @@ elf_errorstatus elf_xen_parse_features(const char *features, /* xen elf notes */ elf_errorstatus elf_xen_parse_note(struct elf_binary *elf, - struct elf_dom_parms *parms, - ELF_HANDLE_DECL(elf_note) note) + struct elf_dom_parms *parms, + ELF_HANDLE_DECL(elf_note) note) { -/* *INDENT-OFF* */ - static const struct { + /* *INDENT-OFF* */ + static const struct + { char *name; bool str; } note_desc[] = { - [XEN_ELFNOTE_ENTRY] = { "ENTRY", 0}, - [XEN_ELFNOTE_HYPERCALL_PAGE] = { "HYPERCALL_PAGE", 0}, - [XEN_ELFNOTE_VIRT_BASE] = { "VIRT_BASE", 0}, - [XEN_ELFNOTE_INIT_P2M] = { "INIT_P2M", 0}, - [XEN_ELFNOTE_PADDR_OFFSET] = { "PADDR_OFFSET", 0}, - [XEN_ELFNOTE_HV_START_LOW] = { "HV_START_LOW", 0}, - [XEN_ELFNOTE_XEN_VERSION] = { "XEN_VERSION", 1}, - [XEN_ELFNOTE_GUEST_OS] = { "GUEST_OS", 1}, - [XEN_ELFNOTE_GUEST_VERSION] = { "GUEST_VERSION", 1}, - [XEN_ELFNOTE_LOADER] = { "LOADER", 1}, - [XEN_ELFNOTE_PAE_MODE] = { "PAE_MODE", 1}, - [XEN_ELFNOTE_FEATURES] = { "FEATURES", 1}, - [XEN_ELFNOTE_SUPPORTED_FEATURES] = { "SUPPORTED_FEATURES", 0}, - [XEN_ELFNOTE_BSD_SYMTAB] = { "BSD_SYMTAB", 1}, - [XEN_ELFNOTE_SUSPEND_CANCEL] = { "SUSPEND_CANCEL", 0 }, - [XEN_ELFNOTE_MOD_START_PFN] = { "MOD_START_PFN", 0 }, - [XEN_ELFNOTE_PHYS32_ENTRY] = { "PHYS32_ENTRY", 0 }, + [XEN_ELFNOTE_ENTRY] = {"ENTRY", 0}, + [XEN_ELFNOTE_HYPERCALL_PAGE] = {"HYPERCALL_PAGE", 0}, + [XEN_ELFNOTE_VIRT_BASE] = {"VIRT_BASE", 0}, + [XEN_ELFNOTE_INIT_P2M] = {"INIT_P2M", 0}, + [XEN_ELFNOTE_PADDR_OFFSET] = {"PADDR_OFFSET", 0}, + [XEN_ELFNOTE_HV_START_LOW] = {"HV_START_LOW", 0}, + [XEN_ELFNOTE_XEN_VERSION] = {"XEN_VERSION", 1}, + [XEN_ELFNOTE_GUEST_OS] = {"GUEST_OS", 1}, + [XEN_ELFNOTE_GUEST_VERSION] = {"GUEST_VERSION", 1}, + [XEN_ELFNOTE_LOADER] = {"LOADER", 1}, + [XEN_ELFNOTE_PAE_MODE] = {"PAE_MODE", 1}, + [XEN_ELFNOTE_FEATURES] = {"FEATURES", 1}, + [XEN_ELFNOTE_SUPPORTED_FEATURES] = {"SUPPORTED_FEATURES", 0}, + [XEN_ELFNOTE_BSD_SYMTAB] = {"BSD_SYMTAB", 1}, + [XEN_ELFNOTE_SUSPEND_CANCEL] = {"SUSPEND_CANCEL", 0}, + [XEN_ELFNOTE_MOD_START_PFN] = {"MOD_START_PFN", 0}, + [XEN_ELFNOTE_PHYS32_ENTRY] = {"PHYS32_ENTRY", 0}, }; -/* *INDENT-ON* */ + /* *INDENT-ON* */ const char *str = NULL; uint64_t val = 0; @@ -138,7 +137,7 @@ elf_errorstatus elf_xen_parse_note(struct elf_binary *elf, if ( note_desc[type].str ) { str = elf_strval(elf, elf_note_desc(elf, note)); - if (str == NULL) + if ( str == NULL ) /* elf_strval will mark elf broken if it fails so no need to log */ return 0; elf_msg(elf, "ELF: note: %s = \"%s\"\n", note_desc[type].name, str); @@ -148,13 +147,14 @@ elf_errorstatus elf_xen_parse_note(struct elf_binary *elf, else { val = elf_note_numeric(elf, note); - elf_msg(elf, "ELF: note: %s = %#" PRIx64 "\n", note_desc[type].name, val); + elf_msg(elf, "ELF: note: %s = %#" PRIx64 "\n", note_desc[type].name, + val); parms->elf_notes[type].type = XEN_ENT_LONG; parms->elf_notes[type].data.num = val; } parms->elf_notes[type].name = note_desc[type].name; - switch ( type ) + switch (type) { case XEN_ELFNOTE_LOADER: safe_strcpy(parms->loader, str); @@ -223,17 +223,16 @@ elf_errorstatus elf_xen_parse_note(struct elf_binary *elf, #define ELF_NOTE_INVALID (~0U) static unsigned elf_xen_parse_notes(struct elf_binary *elf, - struct elf_dom_parms *parms, - elf_ptrval start, - elf_ptrval end, - unsigned *total_note_count) + struct elf_dom_parms *parms, + elf_ptrval start, elf_ptrval end, + unsigned *total_note_count) { unsigned xen_elfnotes = 0; ELF_HANDLE_DECL(elf_note) note; const char *note_name; parms->elf_note_start = start; - parms->elf_note_end = end; + parms->elf_note_end = end; for ( note = ELF_MAKE_HANDLE(elf_note, parms->elf_note_start); ELF_HANDLE_PTRVAL(note) < parms->elf_note_end; note = elf_note_next(elf, note) ) @@ -264,7 +263,7 @@ static unsigned elf_xen_parse_notes(struct elf_binary *elf, /* __xen_guest section */ elf_errorstatus elf_xen_parse_guest_info(struct elf_binary *elf, - struct elf_dom_parms *parms) + struct elf_dom_parms *parms) { elf_ptrval h; unsigned char name[32], value[128]; @@ -279,7 +278,7 @@ elf_errorstatus elf_xen_parse_guest_info(struct elf_binary *elf, elf_memset_unchecked(value, 0, sizeof(value)); for ( len = 0;; len++, h++ ) { - if ( len >= sizeof(name)-1 ) + if ( len >= sizeof(name) - 1 ) break; if ( STAR(h) == '\0' ) break; @@ -293,7 +292,7 @@ elf_errorstatus elf_xen_parse_guest_info(struct elf_binary *elf, h++; for ( len = 0;; len++, h++ ) { - if ( len >= sizeof(value)-1 ) + if ( len >= sizeof(value) - 1 ) break; if ( STAR(h) == '\0' ) break; @@ -360,7 +359,7 @@ elf_errorstatus elf_xen_parse_guest_info(struct elf_binary *elf, /* sanity checks */ static elf_errorstatus elf_xen_note_check(struct elf_binary *elf, - struct elf_dom_parms *parms) + struct elf_dom_parms *parms) { if ( (ELF_PTRVAL_INVALID(parms->elf_note_start)) && (ELF_PTRVAL_INVALID(parms->guest_info)) ) @@ -369,7 +368,7 @@ static elf_errorstatus elf_xen_note_check(struct elf_binary *elf, if ( (machine == EM_386) || (machine == EM_X86_64) ) { elf_err(elf, "ERROR: Not a Xen-ELF image: " - "No ELF notes or '__xen_guest' section found\n"); + "No ELF notes or '__xen_guest' section found\n"); return -1; } return 0; @@ -377,8 +376,8 @@ static elf_errorstatus elf_xen_note_check(struct elf_binary *elf, if ( elf_uval(elf, elf->ehdr, e_machine) == EM_ARM ) { - elf_msg(elf, "ELF: Not bothering with notes on ARM\n"); - return 0; + elf_msg(elf, "ELF: Not bothering with notes on ARM\n"); + return 0; } /* PVH only requires one ELF note to be set */ @@ -395,7 +394,8 @@ static elf_errorstatus elf_xen_note_check(struct elf_binary *elf, strncmp(parms->guest_os, "linux", 5)) ) { elf_err(elf, - "ERROR: Will only load images built for the generic loader or Linux images" + "ERROR: Will only load images built for the generic loader or " + "Linux images" " (Not '%.*s' and '%.*s') or with PHYS32_ENTRY set\n", (int)sizeof(parms->loader), parms->loader, (int)sizeof(parms->guest_os), parms->guest_os); @@ -405,7 +405,8 @@ static elf_errorstatus elf_xen_note_check(struct elf_binary *elf, if ( (strlen(parms->xen_ver) == 0) || strncmp(parms->xen_ver, "xen-3.0", 7) ) { - elf_err(elf, "ERROR: Xen will only load images built for Xen v3.0 " + elf_err(elf, + "ERROR: Xen will only load images built for Xen v3.0 " "(Not '%.*s')\n", (int)sizeof(parms->xen_ver), parms->xen_ver); return -1; @@ -414,7 +415,7 @@ static elf_errorstatus elf_xen_note_check(struct elf_binary *elf, } static elf_errorstatus elf_xen_addr_calc_check(struct elf_binary *elf, - struct elf_dom_parms *parms) + struct elf_dom_parms *parms) { uint64_t virt_offset; @@ -454,7 +455,7 @@ static elf_errorstatus elf_xen_addr_calc_check(struct elf_binary *elf, virt_offset = parms->virt_base - parms->elf_paddr_offset; parms->virt_kstart = elf->pstart + virt_offset; - parms->virt_kend = elf->pend + virt_offset; + parms->virt_kend = elf->pend + virt_offset; if ( parms->virt_entry == UNSET_ADDR ) parms->virt_entry = elf_uval(elf, elf->ehdr, e_entry); @@ -468,7 +469,8 @@ static elf_errorstatus elf_xen_addr_calc_check(struct elf_binary *elf, elf_msg(elf, "ELF: addresses:\n"); elf_msg(elf, " virt_base = 0x%" PRIx64 "\n", parms->virt_base); - elf_msg(elf, " elf_paddr_offset = 0x%" PRIx64 "\n", parms->elf_paddr_offset); + elf_msg(elf, " elf_paddr_offset = 0x%" PRIx64 "\n", + parms->elf_paddr_offset); elf_msg(elf, " virt_offset = 0x%" PRIx64 "\n", virt_offset); elf_msg(elf, " virt_kstart = 0x%" PRIx64 "\n", parms->virt_kstart); elf_msg(elf, " virt_kend = 0x%" PRIx64 "\n", parms->virt_kend); @@ -499,7 +501,7 @@ static elf_errorstatus elf_xen_addr_calc_check(struct elf_binary *elf, /* glue it all together ... */ elf_errorstatus elf_xen_parse(struct elf_binary *elf, - struct elf_dom_parms *parms) + struct elf_dom_parms *parms) { ELF_HANDLE_DECL(elf_shdr) shdr; ELF_HANDLE_DECL(elf_phdr) phdr; @@ -531,13 +533,12 @@ elf_errorstatus elf_xen_parse(struct elf_binary *elf, * Some versions of binutils do not correctly set p_offset for * note segments. */ - if (elf_uval(elf, phdr, p_offset) == 0) - continue; + if ( elf_uval(elf, phdr, p_offset) == 0 ) + continue; - more_notes = elf_xen_parse_notes(elf, parms, - elf_segment_start(elf, phdr), - elf_segment_end(elf, phdr), - &total_note_count); + more_notes = + elf_xen_parse_notes(elf, parms, elf_segment_start(elf, phdr), + elf_segment_end(elf, phdr), &total_note_count); if ( more_notes == ELF_NOTE_INVALID ) return -1; @@ -561,10 +562,9 @@ elf_errorstatus elf_xen_parse(struct elf_binary *elf, if ( elf_uval(elf, shdr, sh_type) != SHT_NOTE ) continue; - more_notes = elf_xen_parse_notes(elf, parms, - elf_section_start(elf, shdr), - elf_section_end(elf, shdr), - &total_note_count); + more_notes = elf_xen_parse_notes( + elf, parms, elf_section_start(elf, shdr), + elf_section_end(elf, shdr), &total_note_count); if ( more_notes == ELF_NOTE_INVALID ) return -1; @@ -574,7 +574,6 @@ elf_errorstatus elf_xen_parse(struct elf_binary *elf, xen_elfnotes += more_notes; } - } /* @@ -587,7 +586,7 @@ elf_errorstatus elf_xen_parse(struct elf_binary *elf, { parms->guest_info = elf_section_start(elf, shdr); parms->elf_note_start = ELF_INVALID_PTRVAL; - parms->elf_note_end = ELF_INVALID_PTRVAL; + parms->elf_note_end = ELF_INVALID_PTRVAL; elf_msg(elf, "ELF: __xen_guest: \"%s\"\n", elf_strfmt(elf, parms->guest_info)); elf_xen_parse_guest_info(elf, parms); diff --git a/xen/common/libelf/libelf-loader.c b/xen/common/libelf/libelf-loader.c index 0f468727d0..ac39050e19 100644 --- a/xen/common/libelf/libelf-loader.c +++ b/xen/common/libelf/libelf-loader.c @@ -25,15 +25,18 @@ /* Number of section header needed in order to fit the SYMTAB and STRTAB. */ #define ELF_BSDSYM_SECTIONS 3 -struct elf_sym_header { +struct elf_sym_header +{ uint32_t size; - struct { + struct + { elf_ehdr header; elf_shdr section[ELF_BSDSYM_SECTIONS]; } elf_header; } __attribute__((packed)); -elf_errorstatus elf_init(struct elf_binary *elf, const char *image_input, size_t size) +elf_errorstatus elf_init(struct elf_binary *elf, const char *image_input, + size_t size) { ELF_HANDLE_DECL(elf_shdr) shdr; unsigned i, count, section, link; @@ -54,7 +57,7 @@ elf_errorstatus elf_init(struct elf_binary *elf, const char *image_input, size_t /* Sanity check phdr. */ offset = elf_uval(elf, elf->ehdr, e_phoff) + - elf_uval(elf, elf->ehdr, e_phentsize) * elf_phdr_count(elf); + elf_uval(elf, elf->ehdr, e_phentsize) * elf_phdr_count(elf); if ( offset > elf->size ) { elf_err(elf, "ELF: phdr overflow (off %" PRIx64 " > size %lx)\n", @@ -64,7 +67,7 @@ elf_errorstatus elf_init(struct elf_binary *elf, const char *image_input, size_t /* Sanity check shdr. */ offset = elf_uval(elf, elf->ehdr, e_shoff) + - elf_uval(elf, elf->ehdr, e_shentsize) * elf_shdr_count(elf); + elf_uval(elf, elf->ehdr, e_shentsize) * elf_shdr_count(elf); if ( offset > elf->size ) { elf_err(elf, "ELF: shdr overflow (off %" PRIx64 " > size %lx)\n", @@ -109,20 +112,21 @@ elf_errorstatus elf_init(struct elf_binary *elf, const char *image_input, size_t } #ifndef __XEN__ -void elf_call_log_callback(struct elf_binary *elf, bool iserr, - const char *fmt,...) { +void elf_call_log_callback(struct elf_binary *elf, bool iserr, const char *fmt, + ...) +{ va_list al; - if (!elf->log_callback) + if ( !elf->log_callback ) return; - if (!(iserr || elf->verbose)) + if ( !(iserr || elf->verbose) ) return; - va_start(al,fmt); + va_start(al, fmt); elf->log_callback(elf, elf->log_caller_data, iserr, fmt, al); va_end(al); } - + void elf_set_log(struct elf_binary *elf, elf_log_callback *log_callback, void *log_caller_data, bool verbose) { @@ -131,9 +135,9 @@ void elf_set_log(struct elf_binary *elf, elf_log_callback *log_callback, elf->verbose = verbose; } -static elf_errorstatus elf_load_image(struct elf_binary *elf, - elf_ptrval dst, elf_ptrval src, - uint64_t filesz, uint64_t memsz) +static elf_errorstatus elf_load_image(struct elf_binary *elf, elf_ptrval dst, + elf_ptrval src, uint64_t filesz, + uint64_t memsz) { elf_memcpy_safe(elf, dst, src, filesz); elf_memset_safe(elf, dst + filesz, 0, memsz - filesz); @@ -161,21 +165,22 @@ static elf_errorstatus elf_memcpy(struct vcpu *v, void *dst, void *src, } #endif - res = src ? raw_copy_to_guest(dst, src, size) : - raw_clear_guest(dst, size); + res = src ? raw_copy_to_guest(dst, src, size) : raw_clear_guest(dst, size); return res ? -1 : 0; } -static elf_errorstatus elf_load_image(struct elf_binary *elf, elf_ptrval dst, elf_ptrval src, uint64_t filesz, uint64_t memsz) +static elf_errorstatus elf_load_image(struct elf_binary *elf, elf_ptrval dst, + elf_ptrval src, uint64_t filesz, + uint64_t memsz) { elf_errorstatus rc; if ( filesz > ULONG_MAX || memsz > ULONG_MAX ) return -1; /* We trust the dom0 kernel image completely, so we don't care * about overruns etc. here. */ - rc = elf_memcpy(elf->vcpu, ELF_UNSAFE_PTR(dst), ELF_UNSAFE_PTR(src), - filesz); + rc = + elf_memcpy(elf->vcpu, ELF_UNSAFE_PTR(dst), ELF_UNSAFE_PTR(src), filesz); if ( rc != 0 ) return -1; rc = elf_memcpy(elf->vcpu, ELF_UNSAFE_PTR(dst + filesz), NULL, @@ -205,8 +210,8 @@ void elf_parse_bsdsyms(struct elf_binary *elf, uint64_t pstart) /* Space for the ELF header and section headers */ sz += offsetof(struct elf_sym_header, elf_header.section) + - ELF_BSDSYM_SECTIONS * (elf_64bit(elf) ? sizeof(Elf64_Shdr) : - sizeof(Elf32_Shdr)); + ELF_BSDSYM_SECTIONS * + (elf_64bit(elf) ? sizeof(Elf64_Shdr) : sizeof(Elf32_Shdr)); sz = elf_round_up(elf, sz); /* @@ -230,7 +235,7 @@ void elf_parse_bsdsyms(struct elf_binary *elf, uint64_t pstart) sz = elf_round_up(elf, sz + elf_uval(elf, shdr, sh_size)); elf->bsd_symtab_pstart = pstart; - elf->bsd_symtab_pend = pstart + sz; + elf->bsd_symtab_pend = pstart + sz; } static void elf_load_bsdsyms(struct elf_binary *elf) @@ -296,16 +301,16 @@ static void elf_load_bsdsyms(struct elf_binary *elf) if ( !elf->bsd_symtab_pstart ) return; -#define elf_store_field_bitness(_elf, _hdr, _elm, _val) \ -do { \ - if ( elf_64bit(_elf) ) \ - elf_store_field(_elf, _hdr, e64._elm, _val); \ - else \ - elf_store_field(_elf, _hdr, e32._elm, _val); \ -} while ( 0 ) +#define elf_store_field_bitness(_elf, _hdr, _elm, _val) \ + do { \ + if ( elf_64bit(_elf) ) \ + elf_store_field(_elf, _hdr, e64._elm, _val); \ + else \ + elf_store_field(_elf, _hdr, e32._elm, _val); \ + } while ( 0 ) -#define SYMTAB_INDEX 1 -#define STRTAB_INDEX 2 +#define SYMTAB_INDEX 1 +#define STRTAB_INDEX 2 /* Allow elf_memcpy_safe to write to header. */ elf_set_xdest(elf, &header, sizeof(header)); @@ -318,22 +323,22 @@ do { \ * know the size of the symtab yet, and the strtab will be placed after it. */ header_base = elf_get_ptr(elf, elf->bsd_symtab_pstart); - elf_header_base = elf_get_ptr(elf, elf->bsd_symtab_pstart) + - sizeof(uint32_t); + elf_header_base = + elf_get_ptr(elf, elf->bsd_symtab_pstart) + sizeof(uint32_t); symtab_base = elf_round_up(elf, header_base + sizeof(header)); /* * Set the size of the ELF header and the section headers, based on the * size of our local copy. */ - ehdr_size = elf_64bit(elf) ? sizeof(header.elf_header.header.e64) : - sizeof(header.elf_header.header.e32); - shdr_size = elf_64bit(elf) ? sizeof(header.elf_header.section[0].e64) : - sizeof(header.elf_header.section[0].e32); + ehdr_size = elf_64bit(elf) ? sizeof(header.elf_header.header.e64) + : sizeof(header.elf_header.header.e32); + shdr_size = elf_64bit(elf) ? sizeof(header.elf_header.section[0].e64) + : sizeof(header.elf_header.section[0].e32); /* Fill the ELF header, copied from the original ELF header. */ - header_handle = ELF_MAKE_HANDLE(elf_ehdr, - ELF_REALPTR2PTRVAL(&header.elf_header.header)); + header_handle = ELF_MAKE_HANDLE( + elf_ehdr, ELF_REALPTR2PTRVAL(&header.elf_header.header)); elf_memcpy_safe(elf, ELF_HANDLE_PTRVAL(header_handle), ELF_HANDLE_PTRVAL(elf->ehdr), ehdr_size); @@ -365,21 +370,20 @@ do { \ * where the sections are actually loaded (relative to the ELF header * location). */ - section_handle = ELF_MAKE_HANDLE(elf_shdr, - ELF_REALPTR2PTRVAL(&header.elf_header.section[SYMTAB_INDEX])); + section_handle = ELF_MAKE_HANDLE( + elf_shdr, ELF_REALPTR2PTRVAL(&header.elf_header.section[SYMTAB_INDEX])); elf_memcpy_safe(elf, ELF_HANDLE_PTRVAL(section_handle), - ELF_HANDLE_PTRVAL(elf->sym_tab), - shdr_size); + ELF_HANDLE_PTRVAL(elf->sym_tab), shdr_size); /* Copy the original sh_link field before mangling it. */ link = elf_uval(elf, section_handle, sh_link); /* Load symtab into guest memory. */ - rc = elf_load_image(elf, symtab_base, - elf_section_start(elf, section_handle), - elf_uval(elf, section_handle, sh_size), - elf_uval(elf, section_handle, sh_size)); + rc = + elf_load_image(elf, symtab_base, elf_section_start(elf, section_handle), + elf_uval(elf, section_handle, sh_size), + elf_uval(elf, section_handle, sh_size)); if ( rc != 0 ) { elf_mark_broken(elf, "unable to load symtab into guest memory"); @@ -389,19 +393,17 @@ do { \ /* Adjust the sh_offset and sh_link of the copied section header. */ elf_store_field_bitness(elf, section_handle, sh_offset, symtab_base - elf_header_base); - elf_store_field_bitness(elf, section_handle, sh_link, - STRTAB_INDEX); + elf_store_field_bitness(elf, section_handle, sh_link, STRTAB_INDEX); /* Calculate the guest address where strtab is loaded. */ - strtab_base = elf_round_up(elf, symtab_base + - elf_uval(elf, section_handle, sh_size)); + strtab_base = + elf_round_up(elf, symtab_base + elf_uval(elf, section_handle, sh_size)); /* Load strtab section header. */ - section_handle = ELF_MAKE_HANDLE(elf_shdr, - ELF_REALPTR2PTRVAL(&header.elf_header.section[STRTAB_INDEX])); + section_handle = ELF_MAKE_HANDLE( + elf_shdr, ELF_REALPTR2PTRVAL(&header.elf_header.section[STRTAB_INDEX])); elf_memcpy_safe(elf, ELF_HANDLE_PTRVAL(section_handle), - ELF_HANDLE_PTRVAL(elf_shdr_by_index(elf, link)), - shdr_size); + ELF_HANDLE_PTRVAL(elf_shdr_by_index(elf, link)), shdr_size); if ( elf_uval(elf, section_handle, sh_type) != SHT_STRTAB ) { @@ -410,10 +412,10 @@ do { \ } /* Load strtab into guest memory. */ - rc = elf_load_image(elf, strtab_base, - elf_section_start(elf, section_handle), - elf_uval(elf, section_handle, sh_size), - elf_uval(elf, section_handle, sh_size)); + rc = + elf_load_image(elf, strtab_base, elf_section_start(elf, section_handle), + elf_uval(elf, section_handle, sh_size), + elf_uval(elf, section_handle, sh_size)); if ( rc != 0 ) { elf_mark_broken(elf, "unable to load strtab into guest memory"); @@ -424,8 +426,8 @@ do { \ strtab_base - elf_header_base); /* Store the whole size (including headers and loaded sections). */ - header.size = strtab_base + elf_uval(elf, section_handle, sh_size) - - elf_header_base; + header.size = + strtab_base + elf_uval(elf, section_handle, sh_size) - elf_header_base; /* Load the size plus ELF header. */ header_size = offsetof(typeof(header), elf_header.section); @@ -450,8 +452,8 @@ do { \ shdr_size, shdr_size); if ( rc != 0 ) { - elf_mark_broken(elf, - "unable to load ELF section header into guest memory"); + elf_mark_broken( + elf, "unable to load ELF section header into guest memory"); return; } } @@ -490,8 +492,8 @@ void elf_parse_binary(struct elf_binary *elf) } elf->pstart = low; elf->pend = high; - elf_msg(elf, "ELF: memory: %#" PRIx64 " -> %#" PRIx64 "\n", - elf->pstart, elf->pend); + elf_msg(elf, "ELF: memory: %#" PRIx64 " -> %#" PRIx64 "\n", elf->pstart, + elf->pend); } elf_errorstatus elf_load_binary(struct elf_binary *elf) @@ -530,15 +532,16 @@ elf_errorstatus elf_load_binary(struct elf_binary *elf) if ( remain_allow_copy < memsz ) { elf_mark_broken(elf, "program segments total to more" - " than the input image size"); + " than the input image size"); break; } remain_allow_copy -= memsz; elf_msg(elf, - "ELF: phdr %u at %#"ELF_PRPTRVAL" -> %#"ELF_PRPTRVAL"\n", - i, dest, (elf_ptrval)(dest + filesz)); - if ( elf_load_image(elf, dest, ELF_IMAGE_BASE(elf) + offset, filesz, memsz) != 0 ) + "ELF: phdr %u at %#" ELF_PRPTRVAL " -> %#" ELF_PRPTRVAL "\n", i, + dest, (elf_ptrval)(dest + filesz)); + if ( elf_load_image(elf, dest, ELF_IMAGE_BASE(elf) + offset, filesz, + memsz) != 0 ) return -1; } @@ -551,7 +554,7 @@ elf_ptrval elf_get_ptr(struct elf_binary *elf, unsigned long addr) return ELF_REALPTR2PTRVAL(elf->dest_base) + addr - elf->pstart; } -uint64_t elf_lookup_addr(struct elf_binary * elf, const char *symbol) +uint64_t elf_lookup_addr(struct elf_binary *elf, const char *symbol) { ELF_HANDLE_DECL(elf_sym) sym; uint64_t value; @@ -564,8 +567,8 @@ uint64_t elf_lookup_addr(struct elf_binary * elf, const char *symbol) } value = elf_uval(elf, sym, st_value); - elf_msg(elf, "%s: symbol \"%s\" at 0x%" PRIx64 "\n", __func__, - symbol, value); + elf_msg(elf, "%s: symbol \"%s\" at 0x%" PRIx64 "\n", __func__, symbol, + value); return value; } diff --git a/xen/common/libelf/libelf-tools.c b/xen/common/libelf/libelf-tools.c index a9edb6a8dc..5598ac708b 100644 --- a/xen/common/libelf/libelf-tools.c +++ b/xen/common/libelf/libelf-tools.c @@ -31,29 +31,27 @@ const char *elf_check_broken(const struct elf_binary *elf) } static bool elf_ptrval_in_range(elf_ptrval ptrval, uint64_t size, - const void *region, uint64_t regionsize) - /* - * Returns true if the putative memory area [ptrval,ptrval+size> - * is completely inside the region [region,region+regionsize>. - * - * ptrval and size are the untrusted inputs to be checked. - * region and regionsize are trusted and must be correct and valid, - * although it is OK for region to perhaps be maliciously NULL - * (but not some other malicious value). - */ + const void *region, uint64_t regionsize) +/* + * Returns true if the putative memory area [ptrval,ptrval+size> + * is completely inside the region [region,region+regionsize>. + * + * ptrval and size are the untrusted inputs to be checked. + * region and regionsize are trusted and must be correct and valid, + * although it is OK for region to perhaps be maliciously NULL + * (but not some other malicious value). + */ { elf_ptrval regionp = (elf_ptrval)region; - if ( (region == NULL) || - (ptrval < regionp) || /* start is before region */ - (ptrval > regionp + regionsize) || /* start is after region */ + if ( (region == NULL) || (ptrval < regionp) || /* start is before region */ + (ptrval > regionp + regionsize) || /* start is after region */ (size > regionsize - (ptrval - regionp)) ) /* too big */ return 0; return 1; } -bool elf_access_ok(struct elf_binary * elf, - uint64_t ptrval, size_t size) +bool elf_access_ok(struct elf_binary *elf, uint64_t ptrval, size_t size) { if ( elf_ptrval_in_range(ptrval, size, elf->image_base, elf->size) ) return 1; @@ -65,11 +63,10 @@ bool elf_access_ok(struct elf_binary * elf, return 0; } -void elf_memcpy_safe(struct elf_binary *elf, elf_ptrval dst, - elf_ptrval src, size_t size) +void elf_memcpy_safe(struct elf_binary *elf, elf_ptrval dst, elf_ptrval src, + size_t size) { - if ( elf_access_ok(elf, dst, size) && - elf_access_ok(elf, src, size) ) + if ( elf_access_ok(elf, dst, size) && elf_access_ok(elf, src, size) ) { /* use memmove because these checks do not prove that the * regions don't overlap and overlapping regions grant @@ -86,7 +83,7 @@ void elf_memset_safe(struct elf_binary *elf, elf_ptrval dst, int c, size_t size) } } -uint64_t elf_access_unsigned(struct elf_binary * elf, elf_ptrval base, +uint64_t elf_access_unsigned(struct elf_binary *elf, elf_ptrval base, uint64_t moreoffset, size_t size) { elf_ptrval ptrval = base + moreoffset; @@ -99,19 +96,19 @@ uint64_t elf_access_unsigned(struct elf_binary * elf, elf_ptrval base, if ( !elf_access_ok(elf, ptrval, size) ) return 0; - switch ( size ) + switch (size) { case 1: - u8 = (const void*)ptrval; + u8 = (const void *)ptrval; return *u8; case 2: - u16 = (const void*)ptrval; + u16 = (const void *)ptrval; return need_swap ? bswap_16(*u16) : *u16; case 4: - u32 = (const void*)ptrval; + u32 = (const void *)ptrval; return need_swap ? bswap_32(*u32) : *u32; case 8: - u64 = (const void*)ptrval; + u64 = (const void *)ptrval; return need_swap ? bswap_64(*u64) : *u64; default: return 0; @@ -147,7 +144,8 @@ unsigned elf_phdr_count(struct elf_binary *elf) return elf_uval(elf, elf->ehdr, e_phnum); } -ELF_HANDLE_DECL(elf_shdr) elf_shdr_by_name(struct elf_binary *elf, const char *name) +ELF_HANDLE_DECL(elf_shdr) +elf_shdr_by_name(struct elf_binary *elf, const char *name) { unsigned i, count = elf_shdr_count(elf); ELF_HANDLE_DECL(elf_shdr) shdr; @@ -166,7 +164,8 @@ ELF_HANDLE_DECL(elf_shdr) elf_shdr_by_name(struct elf_binary *elf, const char *n return ELF_INVALID_HANDLE(elf_shdr); } -ELF_HANDLE_DECL(elf_shdr) elf_shdr_by_index(struct elf_binary *elf, unsigned index) +ELF_HANDLE_DECL(elf_shdr) +elf_shdr_by_index(struct elf_binary *elf, unsigned index) { unsigned count = elf_shdr_count(elf); elf_ptrval ptr; @@ -174,13 +173,13 @@ ELF_HANDLE_DECL(elf_shdr) elf_shdr_by_index(struct elf_binary *elf, unsigned ind if ( index >= count ) return ELF_INVALID_HANDLE(elf_shdr); - ptr = (ELF_IMAGE_BASE(elf) - + elf_uval(elf, elf->ehdr, e_shoff) - + elf_uval(elf, elf->ehdr, e_shentsize) * index); + ptr = (ELF_IMAGE_BASE(elf) + elf_uval(elf, elf->ehdr, e_shoff) + + elf_uval(elf, elf->ehdr, e_shentsize) * index); return ELF_MAKE_HANDLE(elf_shdr, ptr); } -ELF_HANDLE_DECL(elf_phdr) elf_phdr_by_index(struct elf_binary *elf, unsigned index) +ELF_HANDLE_DECL(elf_phdr) +elf_phdr_by_index(struct elf_binary *elf, unsigned index) { unsigned count = elf_phdr_count(elf); elf_ptrval ptr; @@ -188,13 +187,11 @@ ELF_HANDLE_DECL(elf_phdr) elf_phdr_by_index(struct elf_binary *elf, unsigned ind if ( index >= count ) return ELF_INVALID_HANDLE(elf_phdr); - ptr = (ELF_IMAGE_BASE(elf) - + elf_uval(elf, elf->ehdr, e_phoff) - + elf_uval(elf, elf->ehdr, e_phentsize) * index); + ptr = (ELF_IMAGE_BASE(elf) + elf_uval(elf, elf->ehdr, e_phoff) + + elf_uval(elf, elf->ehdr, e_phentsize) * index); return ELF_MAKE_HANDLE(elf_phdr, ptr); } - const char *elf_section_name(struct elf_binary *elf, ELF_HANDLE_DECL(elf_shdr) shdr) { @@ -208,7 +205,8 @@ const char *elf_strval(struct elf_binary *elf, elf_ptrval start) { uint64_t length; - for ( length = 0; ; length++ ) { + for ( length = 0;; length++ ) + { if ( !elf_access_ok(elf, start + length, 1) ) return NULL; if ( !elf_access_unsigned(elf, start, length, 1) ) @@ -231,30 +229,34 @@ const char *elf_strfmt(struct elf_binary *elf, elf_ptrval start) return str; } -elf_ptrval elf_section_start(struct elf_binary *elf, ELF_HANDLE_DECL(elf_shdr) shdr) +elf_ptrval elf_section_start(struct elf_binary *elf, + ELF_HANDLE_DECL(elf_shdr) shdr) { return ELF_IMAGE_BASE(elf) + elf_uval(elf, shdr, sh_offset); } -elf_ptrval elf_section_end(struct elf_binary *elf, ELF_HANDLE_DECL(elf_shdr) shdr) +elf_ptrval elf_section_end(struct elf_binary *elf, + ELF_HANDLE_DECL(elf_shdr) shdr) { - return ELF_IMAGE_BASE(elf) - + elf_uval(elf, shdr, sh_offset) + elf_uval(elf, shdr, sh_size); + return ELF_IMAGE_BASE(elf) + elf_uval(elf, shdr, sh_offset) + + elf_uval(elf, shdr, sh_size); } -elf_ptrval elf_segment_start(struct elf_binary *elf, ELF_HANDLE_DECL(elf_phdr) phdr) +elf_ptrval elf_segment_start(struct elf_binary *elf, + ELF_HANDLE_DECL(elf_phdr) phdr) { - return ELF_IMAGE_BASE(elf) - + elf_uval(elf, phdr, p_offset); + return ELF_IMAGE_BASE(elf) + elf_uval(elf, phdr, p_offset); } -elf_ptrval elf_segment_end(struct elf_binary *elf, ELF_HANDLE_DECL(elf_phdr) phdr) +elf_ptrval elf_segment_end(struct elf_binary *elf, + ELF_HANDLE_DECL(elf_phdr) phdr) { - return ELF_IMAGE_BASE(elf) - + elf_uval(elf, phdr, p_offset) + elf_uval(elf, phdr, p_filesz); + return ELF_IMAGE_BASE(elf) + elf_uval(elf, phdr, p_offset) + + elf_uval(elf, phdr, p_filesz); } -ELF_HANDLE_DECL(elf_sym) elf_sym_by_name(struct elf_binary *elf, const char *symbol) +ELF_HANDLE_DECL(elf_sym) +elf_sym_by_name(struct elf_binary *elf, const char *symbol) { elf_ptrval ptr = elf_section_start(elf, elf->sym_tab); elf_ptrval end = elf_section_end(elf, elf->sym_tab); @@ -279,7 +281,8 @@ ELF_HANDLE_DECL(elf_sym) elf_sym_by_name(struct elf_binary *elf, const char *sym return ELF_INVALID_HANDLE(elf_sym); } -ELF_HANDLE_DECL(elf_sym) elf_sym_by_index(struct elf_binary *elf, unsigned index) +ELF_HANDLE_DECL(elf_sym) +elf_sym_by_index(struct elf_binary *elf, unsigned index) { elf_ptrval ptr = elf_section_start(elf, elf->sym_tab); ELF_HANDLE_DECL(elf_sym) sym; @@ -288,7 +291,8 @@ ELF_HANDLE_DECL(elf_sym) elf_sym_by_index(struct elf_binary *elf, unsigned index return sym; } -const char *elf_note_name(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note) +const char *elf_note_name(struct elf_binary *elf, + ELF_HANDLE_DECL(elf_note) note) { return elf_strval(elf, ELF_HANDLE_PTRVAL(note) + elf_size(elf, note)); } @@ -300,7 +304,8 @@ elf_ptrval elf_note_desc(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note) return ELF_HANDLE_PTRVAL(note) + elf_size(elf, note) + namesz; } -uint64_t elf_note_numeric(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note) +uint64_t elf_note_numeric(struct elf_binary *elf, + ELF_HANDLE_DECL(elf_note) note) { elf_ptrval desc = elf_note_desc(elf, note); unsigned descsz = elf_uval(elf, note, descsz); @@ -317,7 +322,8 @@ uint64_t elf_note_numeric(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note } } -uint64_t elf_note_numeric_array(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note, +uint64_t elf_note_numeric_array(struct elf_binary *elf, + ELF_HANDLE_DECL(elf_note) note, unsigned int unitsz, unsigned int idx) { elf_ptrval desc = elf_note_desc(elf, note); @@ -337,16 +343,17 @@ uint64_t elf_note_numeric_array(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note } } -ELF_HANDLE_DECL(elf_note) elf_note_next(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note) +ELF_HANDLE_DECL(elf_note) +elf_note_next(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note) { unsigned namesz = (elf_uval(elf, note, namesz) + 3) & ~3; unsigned descsz = (elf_uval(elf, note, descsz) + 3) & ~3; - elf_ptrval ptrval = ELF_HANDLE_PTRVAL(note) - + elf_size(elf, note) + namesz + descsz; + elf_ptrval ptrval = + ELF_HANDLE_PTRVAL(note) + elf_size(elf, note) + namesz + descsz; - if ( ( ptrval <= ELF_HANDLE_PTRVAL(note) || /* wrapped or stuck */ - !elf_access_ok(elf, ELF_HANDLE_PTRVAL(note), 1) ) ) + if ( (ptrval <= ELF_HANDLE_PTRVAL(note) || /* wrapped or stuck */ + !elf_access_ok(elf, ELF_HANDLE_PTRVAL(note), 1)) ) ptrval = ELF_MAX_PTRVAL; /* terminate caller's loop */ return ELF_MAKE_HANDLE(elf_note, ptrval); @@ -364,7 +371,8 @@ bool elf_is_elfbinary(const void *image_start, size_t image_size) return IS_ELF(*ehdr); } -bool elf_phdr_is_loadable(struct elf_binary *elf, ELF_HANDLE_DECL(elf_phdr) phdr) +bool elf_phdr_is_loadable(struct elf_binary *elf, + ELF_HANDLE_DECL(elf_phdr) phdr) { uint64_t p_type = elf_uval(elf, phdr, p_type); uint64_t p_flags = elf_uval(elf, phdr, p_flags); diff --git a/xen/common/libfdt/fdt.c b/xen/common/libfdt/fdt.c index d02f4bf5f3..1cf77fbb01 100644 --- a/xen/common/libfdt/fdt.c +++ b/xen/common/libfdt/fdt.c @@ -16,7 +16,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public - * License along with this library; If not, see . + * License along with this library; If not, see + * . * * Alternatively, * @@ -55,194 +56,201 @@ int fdt_check_header(const void *fdt) { - if (fdt_magic(fdt) == FDT_MAGIC) { - /* Complete tree */ - if (fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION) - return -FDT_ERR_BADVERSION; - if (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION) - return -FDT_ERR_BADVERSION; - } else if (fdt_magic(fdt) == FDT_SW_MAGIC) { - /* Unfinished sequential-write blob */ - if (fdt_size_dt_struct(fdt) == 0) - return -FDT_ERR_BADSTATE; - } else { - return -FDT_ERR_BADMAGIC; - } - - return 0; + if ( fdt_magic(fdt) == FDT_MAGIC ) + { + /* Complete tree */ + if ( fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION ) + return -FDT_ERR_BADVERSION; + if ( fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION ) + return -FDT_ERR_BADVERSION; + } + else if ( fdt_magic(fdt) == FDT_SW_MAGIC ) + { + /* Unfinished sequential-write blob */ + if ( fdt_size_dt_struct(fdt) == 0 ) + return -FDT_ERR_BADSTATE; + } + else + { + return -FDT_ERR_BADMAGIC; + } + + return 0; } const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int len) { - const char *p; + const char *p; - if (fdt_version(fdt) >= 0x11) - if (((offset + len) < offset) - || ((offset + len) > fdt_size_dt_struct(fdt))) - return NULL; + if ( fdt_version(fdt) >= 0x11 ) + if ( ((offset + len) < offset) || + ((offset + len) > fdt_size_dt_struct(fdt)) ) + return NULL; - p = _fdt_offset_ptr(fdt, offset); + p = _fdt_offset_ptr(fdt, offset); - if (p + len < p) - return NULL; - return p; + if ( p + len < p ) + return NULL; + return p; } uint32_t fdt_next_tag(const void *fdt, int startoffset, int *nextoffset) { - const fdt32_t *tagp, *lenp; - uint32_t tag; - int offset = startoffset; - const char *p; - - *nextoffset = -FDT_ERR_TRUNCATED; - tagp = fdt_offset_ptr(fdt, offset, FDT_TAGSIZE); - if (!tagp) - return FDT_END; /* premature end */ - tag = fdt32_to_cpu(*tagp); - offset += FDT_TAGSIZE; - - *nextoffset = -FDT_ERR_BADSTRUCTURE; - switch (tag) { - case FDT_BEGIN_NODE: - /* skip name */ - do { - p = fdt_offset_ptr(fdt, offset++, 1); - } while (p && (*p != '\0')); - if (!p) - return FDT_END; /* premature end */ - break; - - case FDT_PROP: - lenp = fdt_offset_ptr(fdt, offset, sizeof(*lenp)); - if (!lenp) - return FDT_END; /* premature end */ - /* skip-name offset, length and value */ - offset += sizeof(struct fdt_property) - FDT_TAGSIZE - + fdt32_to_cpu(*lenp); - break; - - case FDT_END: - case FDT_END_NODE: - case FDT_NOP: - break; - - default: - return FDT_END; - } - - if (!fdt_offset_ptr(fdt, startoffset, offset - startoffset)) - return FDT_END; /* premature end */ - - *nextoffset = FDT_TAGALIGN(offset); - return tag; + const fdt32_t *tagp, *lenp; + uint32_t tag; + int offset = startoffset; + const char *p; + + *nextoffset = -FDT_ERR_TRUNCATED; + tagp = fdt_offset_ptr(fdt, offset, FDT_TAGSIZE); + if ( !tagp ) + return FDT_END; /* premature end */ + tag = fdt32_to_cpu(*tagp); + offset += FDT_TAGSIZE; + + *nextoffset = -FDT_ERR_BADSTRUCTURE; + switch (tag) + { + case FDT_BEGIN_NODE: + /* skip name */ + do { + p = fdt_offset_ptr(fdt, offset++, 1); + } while ( p && (*p != '\0') ); + if ( !p ) + return FDT_END; /* premature end */ + break; + + case FDT_PROP: + lenp = fdt_offset_ptr(fdt, offset, sizeof(*lenp)); + if ( !lenp ) + return FDT_END; /* premature end */ + /* skip-name offset, length and value */ + offset += + sizeof(struct fdt_property) - FDT_TAGSIZE + fdt32_to_cpu(*lenp); + break; + + case FDT_END: + case FDT_END_NODE: + case FDT_NOP: + break; + + default: + return FDT_END; + } + + if ( !fdt_offset_ptr(fdt, startoffset, offset - startoffset) ) + return FDT_END; /* premature end */ + + *nextoffset = FDT_TAGALIGN(offset); + return tag; } int _fdt_check_node_offset(const void *fdt, int offset) { - if ((offset < 0) || (offset % FDT_TAGSIZE) - || (fdt_next_tag(fdt, offset, &offset) != FDT_BEGIN_NODE)) - return -FDT_ERR_BADOFFSET; + if ( (offset < 0) || (offset % FDT_TAGSIZE) || + (fdt_next_tag(fdt, offset, &offset) != FDT_BEGIN_NODE) ) + return -FDT_ERR_BADOFFSET; - return offset; + return offset; } int _fdt_check_prop_offset(const void *fdt, int offset) { - if ((offset < 0) || (offset % FDT_TAGSIZE) - || (fdt_next_tag(fdt, offset, &offset) != FDT_PROP)) - return -FDT_ERR_BADOFFSET; + if ( (offset < 0) || (offset % FDT_TAGSIZE) || + (fdt_next_tag(fdt, offset, &offset) != FDT_PROP) ) + return -FDT_ERR_BADOFFSET; - return offset; + return offset; } int fdt_next_node(const void *fdt, int offset, int *depth) { - int nextoffset = 0; - uint32_t tag; - - if (offset >= 0) - if ((nextoffset = _fdt_check_node_offset(fdt, offset)) < 0) - return nextoffset; - - do { - offset = nextoffset; - tag = fdt_next_tag(fdt, offset, &nextoffset); - - switch (tag) { - case FDT_PROP: - case FDT_NOP: - break; - - case FDT_BEGIN_NODE: - if (depth) - (*depth)++; - break; - - case FDT_END_NODE: - if (depth && ((--(*depth)) < 0)) - return nextoffset; - break; - - case FDT_END: - if ((nextoffset >= 0) - || ((nextoffset == -FDT_ERR_TRUNCATED) && !depth)) - return -FDT_ERR_NOTFOUND; - else - return nextoffset; - } - } while (tag != FDT_BEGIN_NODE); - - return offset; + int nextoffset = 0; + uint32_t tag; + + if ( offset >= 0 ) + if ( (nextoffset = _fdt_check_node_offset(fdt, offset)) < 0 ) + return nextoffset; + + do { + offset = nextoffset; + tag = fdt_next_tag(fdt, offset, &nextoffset); + + switch (tag) + { + case FDT_PROP: + case FDT_NOP: + break; + + case FDT_BEGIN_NODE: + if ( depth ) + (*depth)++; + break; + + case FDT_END_NODE: + if ( depth && ((--(*depth)) < 0) ) + return nextoffset; + break; + + case FDT_END: + if ( (nextoffset >= 0) || + ((nextoffset == -FDT_ERR_TRUNCATED) && !depth) ) + return -FDT_ERR_NOTFOUND; + else + return nextoffset; + } + } while ( tag != FDT_BEGIN_NODE ); + + return offset; } int fdt_first_subnode(const void *fdt, int offset) { - int depth = 0; + int depth = 0; - offset = fdt_next_node(fdt, offset, &depth); - if (offset < 0 || depth != 1) - return -FDT_ERR_NOTFOUND; + offset = fdt_next_node(fdt, offset, &depth); + if ( offset < 0 || depth != 1 ) + return -FDT_ERR_NOTFOUND; - return offset; + return offset; } int fdt_next_subnode(const void *fdt, int offset) { - int depth = 1; - - /* - * With respect to the parent, the depth of the next subnode will be - * the same as the last. - */ - do { - offset = fdt_next_node(fdt, offset, &depth); - if (offset < 0 || depth < 1) - return -FDT_ERR_NOTFOUND; - } while (depth > 1); - - return offset; + int depth = 1; + + /* + * With respect to the parent, the depth of the next subnode will be + * the same as the last. + */ + do { + offset = fdt_next_node(fdt, offset, &depth); + if ( offset < 0 || depth < 1 ) + return -FDT_ERR_NOTFOUND; + } while ( depth > 1 ); + + return offset; } const char *_fdt_find_string(const char *strtab, int tabsize, const char *s) { - int len = strlen(s) + 1; - const char *last = strtab + tabsize - len; - const char *p; - - for (p = strtab; p <= last; p++) - if (memcmp(p, s, len) == 0) - return p; - return NULL; + int len = strlen(s) + 1; + const char *last = strtab + tabsize - len; + const char *p; + + for ( p = strtab; p <= last; p++ ) + if ( memcmp(p, s, len) == 0 ) + return p; + return NULL; } int fdt_move(const void *fdt, void *buf, int bufsize) { - FDT_CHECK_HEADER(fdt); + FDT_CHECK_HEADER(fdt); - if (fdt_totalsize(fdt) > bufsize) - return -FDT_ERR_NOSPACE; + if ( fdt_totalsize(fdt) > bufsize ) + return -FDT_ERR_NOSPACE; - memmove(buf, fdt, fdt_totalsize(fdt)); - return 0; + memmove(buf, fdt, fdt_totalsize(fdt)); + return 0; } diff --git a/xen/common/libfdt/fdt_empty_tree.c b/xen/common/libfdt/fdt_empty_tree.c index d50561101f..09a72c94fc 100644 --- a/xen/common/libfdt/fdt_empty_tree.c +++ b/xen/common/libfdt/fdt_empty_tree.c @@ -16,7 +16,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public - * License along with this library; If not, see . + * License along with this library; If not, see + * . * * Alternatively, * @@ -55,28 +56,27 @@ int fdt_create_empty_tree(void *buf, int bufsize) { - int err; + int err; - err = fdt_create(buf, bufsize); - if (err) - return err; + err = fdt_create(buf, bufsize); + if ( err ) + return err; - err = fdt_finish_reservemap(buf); - if (err) - return err; + err = fdt_finish_reservemap(buf); + if ( err ) + return err; - err = fdt_begin_node(buf, ""); - if (err) - return err; + err = fdt_begin_node(buf, ""); + if ( err ) + return err; - err = fdt_end_node(buf); - if (err) - return err; + err = fdt_end_node(buf); + if ( err ) + return err; - err = fdt_finish(buf); - if (err) - return err; + err = fdt_finish(buf); + if ( err ) + return err; - return fdt_open_into(buf, buf, bufsize); + return fdt_open_into(buf, buf, bufsize); } - diff --git a/xen/common/libfdt/fdt_ro.c b/xen/common/libfdt/fdt_ro.c index 36f9b480d1..9c21a642e4 100644 --- a/xen/common/libfdt/fdt_ro.c +++ b/xen/common/libfdt/fdt_ro.c @@ -16,7 +16,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public - * License along with this library; If not, see . + * License along with this library; If not, see + * . * * Alternatively, * @@ -53,519 +54,520 @@ #include "libfdt_internal.h" -static int _fdt_nodename_eq(const void *fdt, int offset, - const char *s, int len) +static int _fdt_nodename_eq(const void *fdt, int offset, const char *s, int len) { - const char *p = fdt_offset_ptr(fdt, offset + FDT_TAGSIZE, len+1); + const char *p = fdt_offset_ptr(fdt, offset + FDT_TAGSIZE, len + 1); - if (! p) - /* short match */ - return 0; + if ( !p ) + /* short match */ + return 0; - if (memcmp(p, s, len) != 0) - return 0; + if ( memcmp(p, s, len) != 0 ) + return 0; - if (p[len] == '\0') - return 1; - else if (!memchr(s, '@', len) && (p[len] == '@')) - return 1; - else - return 0; + if ( p[len] == '\0' ) + return 1; + else if ( !memchr(s, '@', len) && (p[len] == '@') ) + return 1; + else + return 0; } const char *fdt_string(const void *fdt, int stroffset) { - return (const char *)fdt + fdt_off_dt_strings(fdt) + stroffset; + return (const char *)fdt + fdt_off_dt_strings(fdt) + stroffset; } -static int _fdt_string_eq(const void *fdt, int stroffset, - const char *s, int len) +static int _fdt_string_eq(const void *fdt, int stroffset, const char *s, + int len) { - const char *p = fdt_string(fdt, stroffset); + const char *p = fdt_string(fdt, stroffset); - return (strlen(p) == len) && (memcmp(p, s, len) == 0); + return (strlen(p) == len) && (memcmp(p, s, len) == 0); } int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size) { - FDT_CHECK_HEADER(fdt); - *address = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->address); - *size = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->size); - return 0; + FDT_CHECK_HEADER(fdt); + *address = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->address); + *size = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->size); + return 0; } int fdt_num_mem_rsv(const void *fdt) { - int i = 0; + int i = 0; - while (fdt64_to_cpu(_fdt_mem_rsv(fdt, i)->size) != 0) - i++; - return i; + while ( fdt64_to_cpu(_fdt_mem_rsv(fdt, i)->size) != 0 ) + i++; + return i; } static int _nextprop(const void *fdt, int offset) { - uint32_t tag; - int nextoffset; - - do { - tag = fdt_next_tag(fdt, offset, &nextoffset); - - switch (tag) { - case FDT_END: - if (nextoffset >= 0) - return -FDT_ERR_BADSTRUCTURE; - else - return nextoffset; - - case FDT_PROP: - return offset; - } - offset = nextoffset; - } while (tag == FDT_NOP); - - return -FDT_ERR_NOTFOUND; + uint32_t tag; + int nextoffset; + + do { + tag = fdt_next_tag(fdt, offset, &nextoffset); + + switch (tag) + { + case FDT_END: + if ( nextoffset >= 0 ) + return -FDT_ERR_BADSTRUCTURE; + else + return nextoffset; + + case FDT_PROP: + return offset; + } + offset = nextoffset; + } while ( tag == FDT_NOP ); + + return -FDT_ERR_NOTFOUND; } -int fdt_subnode_offset_namelen(const void *fdt, int offset, - const char *name, int namelen) +int fdt_subnode_offset_namelen(const void *fdt, int offset, const char *name, + int namelen) { - int depth; + int depth; - FDT_CHECK_HEADER(fdt); + FDT_CHECK_HEADER(fdt); - for (depth = 0; - (offset >= 0) && (depth >= 0); - offset = fdt_next_node(fdt, offset, &depth)) - if ((depth == 1) - && _fdt_nodename_eq(fdt, offset, name, namelen)) - return offset; + for ( depth = 0; (offset >= 0) && (depth >= 0); + offset = fdt_next_node(fdt, offset, &depth) ) + if ( (depth == 1) && _fdt_nodename_eq(fdt, offset, name, namelen) ) + return offset; - if (depth < 0) - return -FDT_ERR_NOTFOUND; - return offset; /* error */ + if ( depth < 0 ) + return -FDT_ERR_NOTFOUND; + return offset; /* error */ } -int fdt_subnode_offset(const void *fdt, int parentoffset, - const char *name) +int fdt_subnode_offset(const void *fdt, int parentoffset, const char *name) { - return fdt_subnode_offset_namelen(fdt, parentoffset, name, strlen(name)); + return fdt_subnode_offset_namelen(fdt, parentoffset, name, strlen(name)); } int fdt_path_offset(const void *fdt, const char *path) { - const char *end = path + strlen(path); - const char *p = path; - int offset = 0; + const char *end = path + strlen(path); + const char *p = path; + int offset = 0; - FDT_CHECK_HEADER(fdt); + FDT_CHECK_HEADER(fdt); - /* see if we have an alias */ - if (*path != '/') { - const char *q = strchr(path, '/'); + /* see if we have an alias */ + if ( *path != '/' ) + { + const char *q = strchr(path, '/'); - if (!q) - q = end; + if ( !q ) + q = end; - p = fdt_get_alias_namelen(fdt, p, q - p); - if (!p) - return -FDT_ERR_BADPATH; - offset = fdt_path_offset(fdt, p); + p = fdt_get_alias_namelen(fdt, p, q - p); + if ( !p ) + return -FDT_ERR_BADPATH; + offset = fdt_path_offset(fdt, p); - p = q; - } + p = q; + } - while (*p) { - const char *q; + while ( *p ) + { + const char *q; - while (*p == '/') - p++; - if (! *p) - return offset; - q = strchr(p, '/'); - if (! q) - q = end; + while ( *p == '/' ) + p++; + if ( !*p ) + return offset; + q = strchr(p, '/'); + if ( !q ) + q = end; - offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p); - if (offset < 0) - return offset; + offset = fdt_subnode_offset_namelen(fdt, offset, p, q - p); + if ( offset < 0 ) + return offset; - p = q; - } + p = q; + } - return offset; + return offset; } const char *fdt_get_name(const void *fdt, int nodeoffset, int *len) { - const struct fdt_node_header *nh = _fdt_offset_ptr(fdt, nodeoffset); - int err; + const struct fdt_node_header *nh = _fdt_offset_ptr(fdt, nodeoffset); + int err; - if (((err = fdt_check_header(fdt)) != 0) - || ((err = _fdt_check_node_offset(fdt, nodeoffset)) < 0)) - goto fail; + if ( ((err = fdt_check_header(fdt)) != 0) || + ((err = _fdt_check_node_offset(fdt, nodeoffset)) < 0) ) + goto fail; - if (len) - *len = strlen(nh->name); + if ( len ) + *len = strlen(nh->name); - return nh->name; + return nh->name; - fail: - if (len) - *len = err; - return NULL; +fail: + if ( len ) + *len = err; + return NULL; } int fdt_first_property_offset(const void *fdt, int nodeoffset) { - int offset; + int offset; - if ((offset = _fdt_check_node_offset(fdt, nodeoffset)) < 0) - return offset; + if ( (offset = _fdt_check_node_offset(fdt, nodeoffset)) < 0 ) + return offset; - return _nextprop(fdt, offset); + return _nextprop(fdt, offset); } int fdt_next_property_offset(const void *fdt, int offset) { - if ((offset = _fdt_check_prop_offset(fdt, offset)) < 0) - return offset; + if ( (offset = _fdt_check_prop_offset(fdt, offset)) < 0 ) + return offset; - return _nextprop(fdt, offset); + return _nextprop(fdt, offset); } const struct fdt_property *fdt_get_property_by_offset(const void *fdt, - int offset, - int *lenp) + int offset, int *lenp) { - int err; - const struct fdt_property *prop; + int err; + const struct fdt_property *prop; - if ((err = _fdt_check_prop_offset(fdt, offset)) < 0) { - if (lenp) - *lenp = err; - return NULL; - } + if ( (err = _fdt_check_prop_offset(fdt, offset)) < 0 ) + { + if ( lenp ) + *lenp = err; + return NULL; + } - prop = _fdt_offset_ptr(fdt, offset); + prop = _fdt_offset_ptr(fdt, offset); - if (lenp) - *lenp = fdt32_to_cpu(prop->len); + if ( lenp ) + *lenp = fdt32_to_cpu(prop->len); - return prop; + return prop; } -const struct fdt_property *fdt_get_property_namelen(const void *fdt, - int offset, - const char *name, - int namelen, int *lenp) +const struct fdt_property *fdt_get_property_namelen(const void *fdt, int offset, + const char *name, + int namelen, int *lenp) { - for (offset = fdt_first_property_offset(fdt, offset); - (offset >= 0); - (offset = fdt_next_property_offset(fdt, offset))) { - const struct fdt_property *prop; - - if (!(prop = fdt_get_property_by_offset(fdt, offset, lenp))) { - offset = -FDT_ERR_INTERNAL; - break; - } - if (_fdt_string_eq(fdt, fdt32_to_cpu(prop->nameoff), - name, namelen)) - return prop; - } - - if (lenp) - *lenp = offset; - return NULL; + for ( offset = fdt_first_property_offset(fdt, offset); (offset >= 0); + (offset = fdt_next_property_offset(fdt, offset)) ) + { + const struct fdt_property *prop; + + if ( !(prop = fdt_get_property_by_offset(fdt, offset, lenp)) ) + { + offset = -FDT_ERR_INTERNAL; + break; + } + if ( _fdt_string_eq(fdt, fdt32_to_cpu(prop->nameoff), name, namelen) ) + return prop; + } + + if ( lenp ) + *lenp = offset; + return NULL; } -const struct fdt_property *fdt_get_property(const void *fdt, - int nodeoffset, - const char *name, int *lenp) +const struct fdt_property *fdt_get_property(const void *fdt, int nodeoffset, + const char *name, int *lenp) { - return fdt_get_property_namelen(fdt, nodeoffset, name, - strlen(name), lenp); + return fdt_get_property_namelen(fdt, nodeoffset, name, strlen(name), lenp); } const void *fdt_getprop_namelen(const void *fdt, int nodeoffset, - const char *name, int namelen, int *lenp) + const char *name, int namelen, int *lenp) { - const struct fdt_property *prop; + const struct fdt_property *prop; - prop = fdt_get_property_namelen(fdt, nodeoffset, name, namelen, lenp); - if (! prop) - return NULL; + prop = fdt_get_property_namelen(fdt, nodeoffset, name, namelen, lenp); + if ( !prop ) + return NULL; - return prop->data; + return prop->data; } const void *fdt_getprop_by_offset(const void *fdt, int offset, - const char **namep, int *lenp) + const char **namep, int *lenp) { - const struct fdt_property *prop; - - prop = fdt_get_property_by_offset(fdt, offset, lenp); - if (!prop) - return NULL; - if (namep) - *namep = fdt_string(fdt, fdt32_to_cpu(prop->nameoff)); - return prop->data; + const struct fdt_property *prop; + + prop = fdt_get_property_by_offset(fdt, offset, lenp); + if ( !prop ) + return NULL; + if ( namep ) + *namep = fdt_string(fdt, fdt32_to_cpu(prop->nameoff)); + return prop->data; } -const void *fdt_getprop(const void *fdt, int nodeoffset, - const char *name, int *lenp) +const void *fdt_getprop(const void *fdt, int nodeoffset, const char *name, + int *lenp) { - return fdt_getprop_namelen(fdt, nodeoffset, name, strlen(name), lenp); + return fdt_getprop_namelen(fdt, nodeoffset, name, strlen(name), lenp); } uint32_t fdt_get_phandle(const void *fdt, int nodeoffset) { - const fdt32_t *php; - int len; - - /* FIXME: This is a bit sub-optimal, since we potentially scan - * over all the properties twice. */ - php = fdt_getprop(fdt, nodeoffset, "phandle", &len); - if (!php || (len != sizeof(*php))) { - php = fdt_getprop(fdt, nodeoffset, "linux,phandle", &len); - if (!php || (len != sizeof(*php))) - return 0; - } - - return fdt32_to_cpu(*php); + const fdt32_t *php; + int len; + + /* FIXME: This is a bit sub-optimal, since we potentially scan + * over all the properties twice. */ + php = fdt_getprop(fdt, nodeoffset, "phandle", &len); + if ( !php || (len != sizeof(*php)) ) + { + php = fdt_getprop(fdt, nodeoffset, "linux,phandle", &len); + if ( !php || (len != sizeof(*php)) ) + return 0; + } + + return fdt32_to_cpu(*php); } -const char *fdt_get_alias_namelen(const void *fdt, - const char *name, int namelen) +const char *fdt_get_alias_namelen(const void *fdt, const char *name, + int namelen) { - int aliasoffset; + int aliasoffset; - aliasoffset = fdt_path_offset(fdt, "/aliases"); - if (aliasoffset < 0) - return NULL; + aliasoffset = fdt_path_offset(fdt, "/aliases"); + if ( aliasoffset < 0 ) + return NULL; - return fdt_getprop_namelen(fdt, aliasoffset, name, namelen, NULL); + return fdt_getprop_namelen(fdt, aliasoffset, name, namelen, NULL); } const char *fdt_get_alias(const void *fdt, const char *name) { - return fdt_get_alias_namelen(fdt, name, strlen(name)); + return fdt_get_alias_namelen(fdt, name, strlen(name)); } int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen) { - int pdepth = 0, p = 0; - int offset, depth, namelen; - const char *name; - - FDT_CHECK_HEADER(fdt); - - if (buflen < 2) - return -FDT_ERR_NOSPACE; - - for (offset = 0, depth = 0; - (offset >= 0) && (offset <= nodeoffset); - offset = fdt_next_node(fdt, offset, &depth)) { - while (pdepth > depth) { - do { - p--; - } while (buf[p-1] != '/'); - pdepth--; - } - - if (pdepth >= depth) { - name = fdt_get_name(fdt, offset, &namelen); - if (!name) - return namelen; - if ((p + namelen + 1) <= buflen) { - memcpy(buf + p, name, namelen); - p += namelen; - buf[p++] = '/'; - pdepth++; - } - } - - if (offset == nodeoffset) { - if (pdepth < (depth + 1)) - return -FDT_ERR_NOSPACE; - - if (p > 1) /* special case so that root path is "/", not "" */ - p--; - buf[p] = '\0'; - return 0; - } - } - - if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0)) - return -FDT_ERR_BADOFFSET; - else if (offset == -FDT_ERR_BADOFFSET) - return -FDT_ERR_BADSTRUCTURE; - - return offset; /* error from fdt_next_node() */ + int pdepth = 0, p = 0; + int offset, depth, namelen; + const char *name; + + FDT_CHECK_HEADER(fdt); + + if ( buflen < 2 ) + return -FDT_ERR_NOSPACE; + + for ( offset = 0, depth = 0; (offset >= 0) && (offset <= nodeoffset); + offset = fdt_next_node(fdt, offset, &depth) ) + { + while ( pdepth > depth ) + { + do { + p--; + } while ( buf[p - 1] != '/' ); + pdepth--; + } + + if ( pdepth >= depth ) + { + name = fdt_get_name(fdt, offset, &namelen); + if ( !name ) + return namelen; + if ( (p + namelen + 1) <= buflen ) + { + memcpy(buf + p, name, namelen); + p += namelen; + buf[p++] = '/'; + pdepth++; + } + } + + if ( offset == nodeoffset ) + { + if ( pdepth < (depth + 1) ) + return -FDT_ERR_NOSPACE; + + if ( p > 1 ) /* special case so that root path is "/", not "" */ + p--; + buf[p] = '\0'; + return 0; + } + } + + if ( (offset == -FDT_ERR_NOTFOUND) || (offset >= 0) ) + return -FDT_ERR_BADOFFSET; + else if ( offset == -FDT_ERR_BADOFFSET ) + return -FDT_ERR_BADSTRUCTURE; + + return offset; /* error from fdt_next_node() */ } int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset, - int supernodedepth, int *nodedepth) + int supernodedepth, int *nodedepth) { - int offset, depth; - int supernodeoffset = -FDT_ERR_INTERNAL; - - FDT_CHECK_HEADER(fdt); - - if (supernodedepth < 0) - return -FDT_ERR_NOTFOUND; - - for (offset = 0, depth = 0; - (offset >= 0) && (offset <= nodeoffset); - offset = fdt_next_node(fdt, offset, &depth)) { - if (depth == supernodedepth) - supernodeoffset = offset; - - if (offset == nodeoffset) { - if (nodedepth) - *nodedepth = depth; - - if (supernodedepth > depth) - return -FDT_ERR_NOTFOUND; - else - return supernodeoffset; - } - } - - if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0)) - return -FDT_ERR_BADOFFSET; - else if (offset == -FDT_ERR_BADOFFSET) - return -FDT_ERR_BADSTRUCTURE; - - return offset; /* error from fdt_next_node() */ + int offset, depth; + int supernodeoffset = -FDT_ERR_INTERNAL; + + FDT_CHECK_HEADER(fdt); + + if ( supernodedepth < 0 ) + return -FDT_ERR_NOTFOUND; + + for ( offset = 0, depth = 0; (offset >= 0) && (offset <= nodeoffset); + offset = fdt_next_node(fdt, offset, &depth) ) + { + if ( depth == supernodedepth ) + supernodeoffset = offset; + + if ( offset == nodeoffset ) + { + if ( nodedepth ) + *nodedepth = depth; + + if ( supernodedepth > depth ) + return -FDT_ERR_NOTFOUND; + else + return supernodeoffset; + } + } + + if ( (offset == -FDT_ERR_NOTFOUND) || (offset >= 0) ) + return -FDT_ERR_BADOFFSET; + else if ( offset == -FDT_ERR_BADOFFSET ) + return -FDT_ERR_BADSTRUCTURE; + + return offset; /* error from fdt_next_node() */ } int fdt_node_depth(const void *fdt, int nodeoffset) { - int nodedepth; - int err; + int nodedepth; + int err; - err = fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, &nodedepth); - if (err) - return (err < 0) ? err : -FDT_ERR_INTERNAL; - return nodedepth; + err = fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, &nodedepth); + if ( err ) + return (err < 0) ? err : -FDT_ERR_INTERNAL; + return nodedepth; } int fdt_parent_offset(const void *fdt, int nodeoffset) { - int nodedepth = fdt_node_depth(fdt, nodeoffset); + int nodedepth = fdt_node_depth(fdt, nodeoffset); - if (nodedepth < 0) - return nodedepth; - return fdt_supernode_atdepth_offset(fdt, nodeoffset, - nodedepth - 1, NULL); + if ( nodedepth < 0 ) + return nodedepth; + return fdt_supernode_atdepth_offset(fdt, nodeoffset, nodedepth - 1, NULL); } int fdt_node_offset_by_prop_value(const void *fdt, int startoffset, - const char *propname, - const void *propval, int proplen) + const char *propname, const void *propval, + int proplen) { - int offset; - const void *val; - int len; - - FDT_CHECK_HEADER(fdt); - - /* FIXME: The algorithm here is pretty horrible: we scan each - * property of a node in fdt_getprop(), then if that didn't - * find what we want, we scan over them again making our way - * to the next node. Still it's the easiest to implement - * approach; performance can come later. */ - for (offset = fdt_next_node(fdt, startoffset, NULL); - offset >= 0; - offset = fdt_next_node(fdt, offset, NULL)) { - val = fdt_getprop(fdt, offset, propname, &len); - if (val && (len == proplen) - && (memcmp(val, propval, len) == 0)) - return offset; - } - - return offset; /* error from fdt_next_node() */ + int offset; + const void *val; + int len; + + FDT_CHECK_HEADER(fdt); + + /* FIXME: The algorithm here is pretty horrible: we scan each + * property of a node in fdt_getprop(), then if that didn't + * find what we want, we scan over them again making our way + * to the next node. Still it's the easiest to implement + * approach; performance can come later. */ + for ( offset = fdt_next_node(fdt, startoffset, NULL); offset >= 0; + offset = fdt_next_node(fdt, offset, NULL) ) + { + val = fdt_getprop(fdt, offset, propname, &len); + if ( val && (len == proplen) && (memcmp(val, propval, len) == 0) ) + return offset; + } + + return offset; /* error from fdt_next_node() */ } int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle) { - int offset; - - if ((phandle == 0) || (phandle == -1)) - return -FDT_ERR_BADPHANDLE; - - FDT_CHECK_HEADER(fdt); - - /* FIXME: The algorithm here is pretty horrible: we - * potentially scan each property of a node in - * fdt_get_phandle(), then if that didn't find what - * we want, we scan over them again making our way to the next - * node. Still it's the easiest to implement approach; - * performance can come later. */ - for (offset = fdt_next_node(fdt, -1, NULL); - offset >= 0; - offset = fdt_next_node(fdt, offset, NULL)) { - if (fdt_get_phandle(fdt, offset) == phandle) - return offset; - } - - return offset; /* error from fdt_next_node() */ + int offset; + + if ( (phandle == 0) || (phandle == -1) ) + return -FDT_ERR_BADPHANDLE; + + FDT_CHECK_HEADER(fdt); + + /* FIXME: The algorithm here is pretty horrible: we + * potentially scan each property of a node in + * fdt_get_phandle(), then if that didn't find what + * we want, we scan over them again making our way to the next + * node. Still it's the easiest to implement approach; + * performance can come later. */ + for ( offset = fdt_next_node(fdt, -1, NULL); offset >= 0; + offset = fdt_next_node(fdt, offset, NULL) ) + { + if ( fdt_get_phandle(fdt, offset) == phandle ) + return offset; + } + + return offset; /* error from fdt_next_node() */ } int fdt_stringlist_contains(const char *strlist, int listlen, const char *str) { - int len = strlen(str); - const char *p; - - while (listlen >= len) { - if (memcmp(str, strlist, len+1) == 0) - return 1; - p = memchr(strlist, '\0', listlen); - if (!p) - return 0; /* malformed strlist.. */ - listlen -= (p-strlist) + 1; - strlist = p + 1; - } - return 0; + int len = strlen(str); + const char *p; + + while ( listlen >= len ) + { + if ( memcmp(str, strlist, len + 1) == 0 ) + return 1; + p = memchr(strlist, '\0', listlen); + if ( !p ) + return 0; /* malformed strlist.. */ + listlen -= (p - strlist) + 1; + strlist = p + 1; + } + return 0; } int fdt_node_check_compatible(const void *fdt, int nodeoffset, - const char *compatible) + const char *compatible) { - const void *prop; - int len; - - prop = fdt_getprop(fdt, nodeoffset, "compatible", &len); - if (!prop) - return len; - if (fdt_stringlist_contains(prop, len, compatible)) - return 0; - else - return 1; + const void *prop; + int len; + + prop = fdt_getprop(fdt, nodeoffset, "compatible", &len); + if ( !prop ) + return len; + if ( fdt_stringlist_contains(prop, len, compatible) ) + return 0; + else + return 1; } int fdt_node_offset_by_compatible(const void *fdt, int startoffset, - const char *compatible) + const char *compatible) { - int offset, err; - - FDT_CHECK_HEADER(fdt); - - /* FIXME: The algorithm here is pretty horrible: we scan each - * property of a node in fdt_node_check_compatible(), then if - * that didn't find what we want, we scan over them again - * making our way to the next node. Still it's the easiest to - * implement approach; performance can come later. */ - for (offset = fdt_next_node(fdt, startoffset, NULL); - offset >= 0; - offset = fdt_next_node(fdt, offset, NULL)) { - err = fdt_node_check_compatible(fdt, offset, compatible); - if ((err < 0) && (err != -FDT_ERR_NOTFOUND)) - return err; - else if (err == 0) - return offset; - } - - return offset; /* error from fdt_next_node() */ + int offset, err; + + FDT_CHECK_HEADER(fdt); + + /* FIXME: The algorithm here is pretty horrible: we scan each + * property of a node in fdt_node_check_compatible(), then if + * that didn't find what we want, we scan over them again + * making our way to the next node. Still it's the easiest to + * implement approach; performance can come later. */ + for ( offset = fdt_next_node(fdt, startoffset, NULL); offset >= 0; + offset = fdt_next_node(fdt, offset, NULL) ) + { + err = fdt_node_check_compatible(fdt, offset, compatible); + if ( (err < 0) && (err != -FDT_ERR_NOTFOUND) ) + return err; + else if ( err == 0 ) + return offset; + } + + return offset; /* error from fdt_next_node() */ } diff --git a/xen/common/libfdt/fdt_rw.c b/xen/common/libfdt/fdt_rw.c index ee18bfc298..f90f907327 100644 --- a/xen/common/libfdt/fdt_rw.c +++ b/xen/common/libfdt/fdt_rw.c @@ -16,7 +16,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public - * License along with this library; If not, see . + * License along with this library; If not, see + * . * * Alternatively, * @@ -53,438 +54,443 @@ #include "libfdt_internal.h" -static int _fdt_blocks_misordered(const void *fdt, - int mem_rsv_size, int struct_size) +static int _fdt_blocks_misordered(const void *fdt, int mem_rsv_size, + int struct_size) { - return (fdt_off_mem_rsvmap(fdt) < FDT_ALIGN(sizeof(struct fdt_header), 8)) - || (fdt_off_dt_struct(fdt) < - (fdt_off_mem_rsvmap(fdt) + mem_rsv_size)) - || (fdt_off_dt_strings(fdt) < - (fdt_off_dt_struct(fdt) + struct_size)) - || (fdt_totalsize(fdt) < - (fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt))); + return (fdt_off_mem_rsvmap(fdt) < + FDT_ALIGN(sizeof(struct fdt_header), 8)) || + (fdt_off_dt_struct(fdt) < + (fdt_off_mem_rsvmap(fdt) + mem_rsv_size)) || + (fdt_off_dt_strings(fdt) < (fdt_off_dt_struct(fdt) + struct_size)) || + (fdt_totalsize(fdt) < + (fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt))); } static int _fdt_rw_check_header(void *fdt) { - FDT_CHECK_HEADER(fdt); + FDT_CHECK_HEADER(fdt); - if (fdt_version(fdt) < 17) - return -FDT_ERR_BADVERSION; - if (_fdt_blocks_misordered(fdt, sizeof(struct fdt_reserve_entry), - fdt_size_dt_struct(fdt))) - return -FDT_ERR_BADLAYOUT; - if (fdt_version(fdt) > 17) - fdt_set_version(fdt, 17); + if ( fdt_version(fdt) < 17 ) + return -FDT_ERR_BADVERSION; + if ( _fdt_blocks_misordered(fdt, sizeof(struct fdt_reserve_entry), + fdt_size_dt_struct(fdt)) ) + return -FDT_ERR_BADLAYOUT; + if ( fdt_version(fdt) > 17 ) + fdt_set_version(fdt, 17); - return 0; + return 0; } -#define FDT_RW_CHECK_HEADER(fdt) \ - { \ - int err; \ - if ((err = _fdt_rw_check_header(fdt)) != 0) \ - return err; \ - } +#define FDT_RW_CHECK_HEADER(fdt) \ + { \ + int err; \ + if ( (err = _fdt_rw_check_header(fdt)) != 0 ) \ + return err; \ + } static inline int _fdt_data_size(void *fdt) { - return fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt); + return fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt); } static int _fdt_splice(void *fdt, void *splicepoint, int oldlen, int newlen) { - char *p = splicepoint; - char *end = (char *)fdt + _fdt_data_size(fdt); - - if (((p + oldlen) < p) || ((p + oldlen) > end)) - return -FDT_ERR_BADOFFSET; - if ((end - oldlen + newlen) > ((char *)fdt + fdt_totalsize(fdt))) - return -FDT_ERR_NOSPACE; - memmove(p + newlen, p + oldlen, end - p - oldlen); - return 0; + char *p = splicepoint; + char *end = (char *)fdt + _fdt_data_size(fdt); + + if ( ((p + oldlen) < p) || ((p + oldlen) > end) ) + return -FDT_ERR_BADOFFSET; + if ( (end - oldlen + newlen) > ((char *)fdt + fdt_totalsize(fdt)) ) + return -FDT_ERR_NOSPACE; + memmove(p + newlen, p + oldlen, end - p - oldlen); + return 0; } -static int _fdt_splice_mem_rsv(void *fdt, struct fdt_reserve_entry *p, - int oldn, int newn) +static int _fdt_splice_mem_rsv(void *fdt, struct fdt_reserve_entry *p, int oldn, + int newn) { - int delta = (newn - oldn) * sizeof(*p); - int err; - err = _fdt_splice(fdt, p, oldn * sizeof(*p), newn * sizeof(*p)); - if (err) - return err; - fdt_set_off_dt_struct(fdt, fdt_off_dt_struct(fdt) + delta); - fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta); - return 0; + int delta = (newn - oldn) * sizeof(*p); + int err; + err = _fdt_splice(fdt, p, oldn * sizeof(*p), newn * sizeof(*p)); + if ( err ) + return err; + fdt_set_off_dt_struct(fdt, fdt_off_dt_struct(fdt) + delta); + fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta); + return 0; } -static int _fdt_splice_struct(void *fdt, void *p, - int oldlen, int newlen) +static int _fdt_splice_struct(void *fdt, void *p, int oldlen, int newlen) { - int delta = newlen - oldlen; - int err; + int delta = newlen - oldlen; + int err; - if ((err = _fdt_splice(fdt, p, oldlen, newlen))) - return err; + if ( (err = _fdt_splice(fdt, p, oldlen, newlen)) ) + return err; - fdt_set_size_dt_struct(fdt, fdt_size_dt_struct(fdt) + delta); - fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta); - return 0; + fdt_set_size_dt_struct(fdt, fdt_size_dt_struct(fdt) + delta); + fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta); + return 0; } static int _fdt_splice_string(void *fdt, int newlen) { - void *p = (char *)fdt - + fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt); - int err; + void *p = (char *)fdt + fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt); + int err; - if ((err = _fdt_splice(fdt, p, 0, newlen))) - return err; + if ( (err = _fdt_splice(fdt, p, 0, newlen)) ) + return err; - fdt_set_size_dt_strings(fdt, fdt_size_dt_strings(fdt) + newlen); - return 0; + fdt_set_size_dt_strings(fdt, fdt_size_dt_strings(fdt) + newlen); + return 0; } static int _fdt_find_add_string(void *fdt, const char *s) { - char *strtab = (char *)fdt + fdt_off_dt_strings(fdt); - const char *p; - char *new; - int len = strlen(s) + 1; - int err; - - p = _fdt_find_string(strtab, fdt_size_dt_strings(fdt), s); - if (p) - /* found it */ - return (p - strtab); - - new = strtab + fdt_size_dt_strings(fdt); - err = _fdt_splice_string(fdt, len); - if (err) - return err; - - memcpy(new, s, len); - return (new - strtab); + char *strtab = (char *)fdt + fdt_off_dt_strings(fdt); + const char *p; + char *new; + int len = strlen(s) + 1; + int err; + + p = _fdt_find_string(strtab, fdt_size_dt_strings(fdt), s); + if ( p ) + /* found it */ + return (p - strtab); + + new = strtab + fdt_size_dt_strings(fdt); + err = _fdt_splice_string(fdt, len); + if ( err ) + return err; + + memcpy(new, s, len); + return (new - strtab); } int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size) { - struct fdt_reserve_entry *re; - int err; + struct fdt_reserve_entry *re; + int err; - FDT_RW_CHECK_HEADER(fdt); + FDT_RW_CHECK_HEADER(fdt); - re = _fdt_mem_rsv_w(fdt, fdt_num_mem_rsv(fdt)); - err = _fdt_splice_mem_rsv(fdt, re, 0, 1); - if (err) - return err; + re = _fdt_mem_rsv_w(fdt, fdt_num_mem_rsv(fdt)); + err = _fdt_splice_mem_rsv(fdt, re, 0, 1); + if ( err ) + return err; - re->address = cpu_to_fdt64(address); - re->size = cpu_to_fdt64(size); - return 0; + re->address = cpu_to_fdt64(address); + re->size = cpu_to_fdt64(size); + return 0; } int fdt_del_mem_rsv(void *fdt, int n) { - struct fdt_reserve_entry *re = _fdt_mem_rsv_w(fdt, n); - int err; + struct fdt_reserve_entry *re = _fdt_mem_rsv_w(fdt, n); + int err; - FDT_RW_CHECK_HEADER(fdt); + FDT_RW_CHECK_HEADER(fdt); - if (n >= fdt_num_mem_rsv(fdt)) - return -FDT_ERR_NOTFOUND; + if ( n >= fdt_num_mem_rsv(fdt) ) + return -FDT_ERR_NOTFOUND; - err = _fdt_splice_mem_rsv(fdt, re, 1, 0); - if (err) - return err; - return 0; + err = _fdt_splice_mem_rsv(fdt, re, 1, 0); + if ( err ) + return err; + return 0; } static int _fdt_resize_property(void *fdt, int nodeoffset, const char *name, - int len, struct fdt_property **prop) + int len, struct fdt_property **prop) { - int oldlen; - int err; + int oldlen; + int err; - *prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen); - if (! (*prop)) - return oldlen; + *prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen); + if ( !(*prop) ) + return oldlen; - if ((err = _fdt_splice_struct(fdt, (*prop)->data, FDT_TAGALIGN(oldlen), - FDT_TAGALIGN(len)))) - return err; + if ( (err = _fdt_splice_struct(fdt, (*prop)->data, FDT_TAGALIGN(oldlen), + FDT_TAGALIGN(len))) ) + return err; - (*prop)->len = cpu_to_fdt32(len); - return 0; + (*prop)->len = cpu_to_fdt32(len); + return 0; } static int _fdt_add_property(void *fdt, int nodeoffset, const char *name, - int len, struct fdt_property **prop) + int len, struct fdt_property **prop) { - int proplen; - int nextoffset; - int namestroff; - int err; + int proplen; + int nextoffset; + int namestroff; + int err; - if ((nextoffset = _fdt_check_node_offset(fdt, nodeoffset)) < 0) - return nextoffset; + if ( (nextoffset = _fdt_check_node_offset(fdt, nodeoffset)) < 0 ) + return nextoffset; - namestroff = _fdt_find_add_string(fdt, name); - if (namestroff < 0) - return namestroff; + namestroff = _fdt_find_add_string(fdt, name); + if ( namestroff < 0 ) + return namestroff; - *prop = _fdt_offset_ptr_w(fdt, nextoffset); - proplen = sizeof(**prop) + FDT_TAGALIGN(len); + *prop = _fdt_offset_ptr_w(fdt, nextoffset); + proplen = sizeof(**prop) + FDT_TAGALIGN(len); - err = _fdt_splice_struct(fdt, *prop, 0, proplen); - if (err) - return err; + err = _fdt_splice_struct(fdt, *prop, 0, proplen); + if ( err ) + return err; - (*prop)->tag = cpu_to_fdt32(FDT_PROP); - (*prop)->nameoff = cpu_to_fdt32(namestroff); - (*prop)->len = cpu_to_fdt32(len); - return 0; + (*prop)->tag = cpu_to_fdt32(FDT_PROP); + (*prop)->nameoff = cpu_to_fdt32(namestroff); + (*prop)->len = cpu_to_fdt32(len); + return 0; } int fdt_set_name(void *fdt, int nodeoffset, const char *name) { - char *namep; - int oldlen, newlen; - int err; + char *namep; + int oldlen, newlen; + int err; - FDT_RW_CHECK_HEADER(fdt); + FDT_RW_CHECK_HEADER(fdt); - namep = (char *)(uintptr_t)fdt_get_name(fdt, nodeoffset, &oldlen); - if (!namep) - return oldlen; + namep = (char *)(uintptr_t)fdt_get_name(fdt, nodeoffset, &oldlen); + if ( !namep ) + return oldlen; - newlen = strlen(name); + newlen = strlen(name); - err = _fdt_splice_struct(fdt, namep, FDT_TAGALIGN(oldlen+1), - FDT_TAGALIGN(newlen+1)); - if (err) - return err; + err = _fdt_splice_struct(fdt, namep, FDT_TAGALIGN(oldlen + 1), + FDT_TAGALIGN(newlen + 1)); + if ( err ) + return err; - memcpy(namep, name, newlen+1); - return 0; + memcpy(namep, name, newlen + 1); + return 0; } -int fdt_setprop(void *fdt, int nodeoffset, const char *name, - const void *val, int len) +int fdt_setprop(void *fdt, int nodeoffset, const char *name, const void *val, + int len) { - struct fdt_property *prop; - int err; + struct fdt_property *prop; + int err; - FDT_RW_CHECK_HEADER(fdt); + FDT_RW_CHECK_HEADER(fdt); - err = _fdt_resize_property(fdt, nodeoffset, name, len, &prop); - if (err == -FDT_ERR_NOTFOUND) - err = _fdt_add_property(fdt, nodeoffset, name, len, &prop); - if (err) - return err; + err = _fdt_resize_property(fdt, nodeoffset, name, len, &prop); + if ( err == -FDT_ERR_NOTFOUND ) + err = _fdt_add_property(fdt, nodeoffset, name, len, &prop); + if ( err ) + return err; - memcpy(prop->data, val, len); - return 0; + memcpy(prop->data, val, len); + return 0; } -int fdt_appendprop(void *fdt, int nodeoffset, const char *name, - const void *val, int len) +int fdt_appendprop(void *fdt, int nodeoffset, const char *name, const void *val, + int len) { - struct fdt_property *prop; - int err, oldlen, newlen; - - FDT_RW_CHECK_HEADER(fdt); - - prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen); - if (prop) { - newlen = len + oldlen; - err = _fdt_splice_struct(fdt, prop->data, - FDT_TAGALIGN(oldlen), - FDT_TAGALIGN(newlen)); - if (err) - return err; - prop->len = cpu_to_fdt32(newlen); - memcpy(prop->data + oldlen, val, len); - } else { - err = _fdt_add_property(fdt, nodeoffset, name, len, &prop); - if (err) - return err; - memcpy(prop->data, val, len); - } - return 0; + struct fdt_property *prop; + int err, oldlen, newlen; + + FDT_RW_CHECK_HEADER(fdt); + + prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen); + if ( prop ) + { + newlen = len + oldlen; + err = _fdt_splice_struct(fdt, prop->data, FDT_TAGALIGN(oldlen), + FDT_TAGALIGN(newlen)); + if ( err ) + return err; + prop->len = cpu_to_fdt32(newlen); + memcpy(prop->data + oldlen, val, len); + } + else + { + err = _fdt_add_property(fdt, nodeoffset, name, len, &prop); + if ( err ) + return err; + memcpy(prop->data, val, len); + } + return 0; } int fdt_delprop(void *fdt, int nodeoffset, const char *name) { - struct fdt_property *prop; - int len, proplen; + struct fdt_property *prop; + int len, proplen; - FDT_RW_CHECK_HEADER(fdt); + FDT_RW_CHECK_HEADER(fdt); - prop = fdt_get_property_w(fdt, nodeoffset, name, &len); - if (! prop) - return len; + prop = fdt_get_property_w(fdt, nodeoffset, name, &len); + if ( !prop ) + return len; - proplen = sizeof(*prop) + FDT_TAGALIGN(len); - return _fdt_splice_struct(fdt, prop, proplen, 0); + proplen = sizeof(*prop) + FDT_TAGALIGN(len); + return _fdt_splice_struct(fdt, prop, proplen, 0); } -int fdt_add_subnode_namelen(void *fdt, int parentoffset, - const char *name, int namelen) +int fdt_add_subnode_namelen(void *fdt, int parentoffset, const char *name, + int namelen) { - struct fdt_node_header *nh; - int offset, nextoffset; - int nodelen; - int err; - uint32_t tag; - fdt32_t *endtag; - - FDT_RW_CHECK_HEADER(fdt); - - offset = fdt_subnode_offset_namelen(fdt, parentoffset, name, namelen); - if (offset >= 0) - return -FDT_ERR_EXISTS; - else if (offset != -FDT_ERR_NOTFOUND) - return offset; - - /* Try to place the new node after the parent's properties */ - fdt_next_tag(fdt, parentoffset, &nextoffset); /* skip the BEGIN_NODE */ - do { - offset = nextoffset; - tag = fdt_next_tag(fdt, offset, &nextoffset); - } while ((tag == FDT_PROP) || (tag == FDT_NOP)); - - nh = _fdt_offset_ptr_w(fdt, offset); - nodelen = sizeof(*nh) + FDT_TAGALIGN(namelen+1) + FDT_TAGSIZE; - - err = _fdt_splice_struct(fdt, nh, 0, nodelen); - if (err) - return err; - - nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE); - memset(nh->name, 0, FDT_TAGALIGN(namelen+1)); - memcpy(nh->name, name, namelen); - endtag = (fdt32_t *)((char *)nh + nodelen - FDT_TAGSIZE); - *endtag = cpu_to_fdt32(FDT_END_NODE); - - return offset; + struct fdt_node_header *nh; + int offset, nextoffset; + int nodelen; + int err; + uint32_t tag; + fdt32_t *endtag; + + FDT_RW_CHECK_HEADER(fdt); + + offset = fdt_subnode_offset_namelen(fdt, parentoffset, name, namelen); + if ( offset >= 0 ) + return -FDT_ERR_EXISTS; + else if ( offset != -FDT_ERR_NOTFOUND ) + return offset; + + /* Try to place the new node after the parent's properties */ + fdt_next_tag(fdt, parentoffset, &nextoffset); /* skip the BEGIN_NODE */ + do { + offset = nextoffset; + tag = fdt_next_tag(fdt, offset, &nextoffset); + } while ( (tag == FDT_PROP) || (tag == FDT_NOP) ); + + nh = _fdt_offset_ptr_w(fdt, offset); + nodelen = sizeof(*nh) + FDT_TAGALIGN(namelen + 1) + FDT_TAGSIZE; + + err = _fdt_splice_struct(fdt, nh, 0, nodelen); + if ( err ) + return err; + + nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE); + memset(nh->name, 0, FDT_TAGALIGN(namelen + 1)); + memcpy(nh->name, name, namelen); + endtag = (fdt32_t *)((char *)nh + nodelen - FDT_TAGSIZE); + *endtag = cpu_to_fdt32(FDT_END_NODE); + + return offset; } int fdt_add_subnode(void *fdt, int parentoffset, const char *name) { - return fdt_add_subnode_namelen(fdt, parentoffset, name, strlen(name)); + return fdt_add_subnode_namelen(fdt, parentoffset, name, strlen(name)); } int fdt_del_node(void *fdt, int nodeoffset) { - int endoffset; + int endoffset; - FDT_RW_CHECK_HEADER(fdt); + FDT_RW_CHECK_HEADER(fdt); - endoffset = _fdt_node_end_offset(fdt, nodeoffset); - if (endoffset < 0) - return endoffset; + endoffset = _fdt_node_end_offset(fdt, nodeoffset); + if ( endoffset < 0 ) + return endoffset; - return _fdt_splice_struct(fdt, _fdt_offset_ptr_w(fdt, nodeoffset), - endoffset - nodeoffset, 0); + return _fdt_splice_struct(fdt, _fdt_offset_ptr_w(fdt, nodeoffset), + endoffset - nodeoffset, 0); } -static void _fdt_packblocks(const char *old, char *new, - int mem_rsv_size, int struct_size) +static void _fdt_packblocks(const char *old, char *new, int mem_rsv_size, + int struct_size) { - int mem_rsv_off, struct_off, strings_off; + int mem_rsv_off, struct_off, strings_off; - mem_rsv_off = FDT_ALIGN(sizeof(struct fdt_header), 8); - struct_off = mem_rsv_off + mem_rsv_size; - strings_off = struct_off + struct_size; + mem_rsv_off = FDT_ALIGN(sizeof(struct fdt_header), 8); + struct_off = mem_rsv_off + mem_rsv_size; + strings_off = struct_off + struct_size; - memmove(new + mem_rsv_off, old + fdt_off_mem_rsvmap(old), mem_rsv_size); - fdt_set_off_mem_rsvmap(new, mem_rsv_off); + memmove(new + mem_rsv_off, old + fdt_off_mem_rsvmap(old), mem_rsv_size); + fdt_set_off_mem_rsvmap(new, mem_rsv_off); - memmove(new + struct_off, old + fdt_off_dt_struct(old), struct_size); - fdt_set_off_dt_struct(new, struct_off); - fdt_set_size_dt_struct(new, struct_size); + memmove(new + struct_off, old + fdt_off_dt_struct(old), struct_size); + fdt_set_off_dt_struct(new, struct_off); + fdt_set_size_dt_struct(new, struct_size); - memmove(new + strings_off, old + fdt_off_dt_strings(old), - fdt_size_dt_strings(old)); - fdt_set_off_dt_strings(new, strings_off); - fdt_set_size_dt_strings(new, fdt_size_dt_strings(old)); + memmove(new + strings_off, old + fdt_off_dt_strings(old), + fdt_size_dt_strings(old)); + fdt_set_off_dt_strings(new, strings_off); + fdt_set_size_dt_strings(new, fdt_size_dt_strings(old)); } int fdt_open_into(const void *fdt, void *buf, int bufsize) { - int err; - int mem_rsv_size, struct_size; - int newsize; - const char *fdtstart = fdt; - const char *fdtend = fdtstart + fdt_totalsize(fdt); - char *tmp; - - FDT_CHECK_HEADER(fdt); - - mem_rsv_size = (fdt_num_mem_rsv(fdt)+1) - * sizeof(struct fdt_reserve_entry); - - if (fdt_version(fdt) >= 17) { - struct_size = fdt_size_dt_struct(fdt); - } else { - struct_size = 0; - while (fdt_next_tag(fdt, struct_size, &struct_size) != FDT_END) - ; - if (struct_size < 0) - return struct_size; - } - - if (!_fdt_blocks_misordered(fdt, mem_rsv_size, struct_size)) { - /* no further work necessary */ - err = fdt_move(fdt, buf, bufsize); - if (err) - return err; - fdt_set_version(buf, 17); - fdt_set_size_dt_struct(buf, struct_size); - fdt_set_totalsize(buf, bufsize); - return 0; - } - - /* Need to reorder */ - newsize = FDT_ALIGN(sizeof(struct fdt_header), 8) + mem_rsv_size - + struct_size + fdt_size_dt_strings(fdt); - - if (bufsize < newsize) - return -FDT_ERR_NOSPACE; - - /* First attempt to build converted tree at beginning of buffer */ - tmp = buf; - /* But if that overlaps with the old tree... */ - if (((tmp + newsize) > fdtstart) && (tmp < fdtend)) { - /* Try right after the old tree instead */ - tmp = (char *)(uintptr_t)fdtend; - if ((tmp + newsize) > ((char *)buf + bufsize)) - return -FDT_ERR_NOSPACE; - } - - _fdt_packblocks(fdt, tmp, mem_rsv_size, struct_size); - memmove(buf, tmp, newsize); - - fdt_set_magic(buf, FDT_MAGIC); - fdt_set_totalsize(buf, bufsize); - fdt_set_version(buf, 17); - fdt_set_last_comp_version(buf, 16); - fdt_set_boot_cpuid_phys(buf, fdt_boot_cpuid_phys(fdt)); - - return 0; + int err; + int mem_rsv_size, struct_size; + int newsize; + const char *fdtstart = fdt; + const char *fdtend = fdtstart + fdt_totalsize(fdt); + char *tmp; + + FDT_CHECK_HEADER(fdt); + + mem_rsv_size = + (fdt_num_mem_rsv(fdt) + 1) * sizeof(struct fdt_reserve_entry); + + if ( fdt_version(fdt) >= 17 ) + { + struct_size = fdt_size_dt_struct(fdt); + } + else + { + struct_size = 0; + while ( fdt_next_tag(fdt, struct_size, &struct_size) != FDT_END ) + ; + if ( struct_size < 0 ) + return struct_size; + } + + if ( !_fdt_blocks_misordered(fdt, mem_rsv_size, struct_size) ) + { + /* no further work necessary */ + err = fdt_move(fdt, buf, bufsize); + if ( err ) + return err; + fdt_set_version(buf, 17); + fdt_set_size_dt_struct(buf, struct_size); + fdt_set_totalsize(buf, bufsize); + return 0; + } + + /* Need to reorder */ + newsize = FDT_ALIGN(sizeof(struct fdt_header), 8) + mem_rsv_size + + struct_size + fdt_size_dt_strings(fdt); + + if ( bufsize < newsize ) + return -FDT_ERR_NOSPACE; + + /* First attempt to build converted tree at beginning of buffer */ + tmp = buf; + /* But if that overlaps with the old tree... */ + if ( ((tmp + newsize) > fdtstart) && (tmp < fdtend) ) + { + /* Try right after the old tree instead */ + tmp = (char *)(uintptr_t)fdtend; + if ( (tmp + newsize) > ((char *)buf + bufsize) ) + return -FDT_ERR_NOSPACE; + } + + _fdt_packblocks(fdt, tmp, mem_rsv_size, struct_size); + memmove(buf, tmp, newsize); + + fdt_set_magic(buf, FDT_MAGIC); + fdt_set_totalsize(buf, bufsize); + fdt_set_version(buf, 17); + fdt_set_last_comp_version(buf, 16); + fdt_set_boot_cpuid_phys(buf, fdt_boot_cpuid_phys(fdt)); + + return 0; } int fdt_pack(void *fdt) { - int mem_rsv_size; + int mem_rsv_size; - FDT_RW_CHECK_HEADER(fdt); + FDT_RW_CHECK_HEADER(fdt); - mem_rsv_size = (fdt_num_mem_rsv(fdt)+1) - * sizeof(struct fdt_reserve_entry); - _fdt_packblocks(fdt, fdt, mem_rsv_size, fdt_size_dt_struct(fdt)); - fdt_set_totalsize(fdt, _fdt_data_size(fdt)); + mem_rsv_size = + (fdt_num_mem_rsv(fdt) + 1) * sizeof(struct fdt_reserve_entry); + _fdt_packblocks(fdt, fdt, mem_rsv_size, fdt_size_dt_struct(fdt)); + fdt_set_totalsize(fdt, _fdt_data_size(fdt)); - return 0; + return 0; } diff --git a/xen/common/libfdt/fdt_strerror.c b/xen/common/libfdt/fdt_strerror.c index 8d0289cb38..ace38097d8 100644 --- a/xen/common/libfdt/fdt_strerror.c +++ b/xen/common/libfdt/fdt_strerror.c @@ -16,7 +16,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public - * License along with this library; If not, see . + * License along with this library; If not, see + * . * * Alternatively, * @@ -53,42 +54,42 @@ #include "libfdt_internal.h" -struct fdt_errtabent { - const char *str; +struct fdt_errtabent +{ + const char *str; }; #define FDT_ERRTABENT(val) \ - [(val)] = { .str = #val, } + [(val)] = { \ + .str = #val, \ + } static struct fdt_errtabent fdt_errtable[] = { - FDT_ERRTABENT(FDT_ERR_NOTFOUND), - FDT_ERRTABENT(FDT_ERR_EXISTS), - FDT_ERRTABENT(FDT_ERR_NOSPACE), + FDT_ERRTABENT(FDT_ERR_NOTFOUND), FDT_ERRTABENT(FDT_ERR_EXISTS), + FDT_ERRTABENT(FDT_ERR_NOSPACE), - FDT_ERRTABENT(FDT_ERR_BADOFFSET), - FDT_ERRTABENT(FDT_ERR_BADPATH), - FDT_ERRTABENT(FDT_ERR_BADSTATE), + FDT_ERRTABENT(FDT_ERR_BADOFFSET), FDT_ERRTABENT(FDT_ERR_BADPATH), + FDT_ERRTABENT(FDT_ERR_BADSTATE), - FDT_ERRTABENT(FDT_ERR_TRUNCATED), - FDT_ERRTABENT(FDT_ERR_BADMAGIC), - FDT_ERRTABENT(FDT_ERR_BADVERSION), - FDT_ERRTABENT(FDT_ERR_BADSTRUCTURE), - FDT_ERRTABENT(FDT_ERR_BADLAYOUT), + FDT_ERRTABENT(FDT_ERR_TRUNCATED), FDT_ERRTABENT(FDT_ERR_BADMAGIC), + FDT_ERRTABENT(FDT_ERR_BADVERSION), FDT_ERRTABENT(FDT_ERR_BADSTRUCTURE), + FDT_ERRTABENT(FDT_ERR_BADLAYOUT), }; -#define FDT_ERRTABSIZE (sizeof(fdt_errtable) / sizeof(fdt_errtable[0])) +#define FDT_ERRTABSIZE (sizeof(fdt_errtable) / sizeof(fdt_errtable[0])) const char *fdt_strerror(int errval) { - if (errval > 0) - return ""; - else if (errval == 0) - return ""; - else if (errval > -FDT_ERRTABSIZE) { - const char *s = fdt_errtable[-errval].str; + if ( errval > 0 ) + return ""; + else if ( errval == 0 ) + return ""; + else if ( errval > -FDT_ERRTABSIZE ) + { + const char *s = fdt_errtable[-errval].str; - if (s) - return s; - } + if ( s ) + return s; + } - return ""; + return ""; } diff --git a/xen/common/libfdt/fdt_sw.c b/xen/common/libfdt/fdt_sw.c index c7d93d3bba..fd2ef0d797 100644 --- a/xen/common/libfdt/fdt_sw.c +++ b/xen/common/libfdt/fdt_sw.c @@ -16,7 +16,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public - * License along with this library; If not, see . + * License along with this library; If not, see + * . * * Alternatively, * @@ -55,200 +56,201 @@ static int _fdt_sw_check_header(void *fdt) { - if (fdt_magic(fdt) != FDT_SW_MAGIC) - return -FDT_ERR_BADMAGIC; - /* FIXME: should check more details about the header state */ - return 0; + if ( fdt_magic(fdt) != FDT_SW_MAGIC ) + return -FDT_ERR_BADMAGIC; + /* FIXME: should check more details about the header state */ + return 0; } -#define FDT_SW_CHECK_HEADER(fdt) \ - { \ - int err; \ - if ((err = _fdt_sw_check_header(fdt)) != 0) \ - return err; \ - } +#define FDT_SW_CHECK_HEADER(fdt) \ + { \ + int err; \ + if ( (err = _fdt_sw_check_header(fdt)) != 0 ) \ + return err; \ + } static void *_fdt_grab_space(void *fdt, size_t len) { - int offset = fdt_size_dt_struct(fdt); - int spaceleft; + int offset = fdt_size_dt_struct(fdt); + int spaceleft; - spaceleft = fdt_totalsize(fdt) - fdt_off_dt_struct(fdt) - - fdt_size_dt_strings(fdt); + spaceleft = + fdt_totalsize(fdt) - fdt_off_dt_struct(fdt) - fdt_size_dt_strings(fdt); - if ((offset + len < offset) || (offset + len > spaceleft)) - return NULL; + if ( (offset + len < offset) || (offset + len > spaceleft) ) + return NULL; - fdt_set_size_dt_struct(fdt, offset + len); - return _fdt_offset_ptr_w(fdt, offset); + fdt_set_size_dt_struct(fdt, offset + len); + return _fdt_offset_ptr_w(fdt, offset); } int fdt_create(void *buf, int bufsize) { - void *fdt = buf; + void *fdt = buf; - if (bufsize < sizeof(struct fdt_header)) - return -FDT_ERR_NOSPACE; + if ( bufsize < sizeof(struct fdt_header) ) + return -FDT_ERR_NOSPACE; - memset(buf, 0, bufsize); + memset(buf, 0, bufsize); - fdt_set_magic(fdt, FDT_SW_MAGIC); - fdt_set_version(fdt, FDT_LAST_SUPPORTED_VERSION); - fdt_set_last_comp_version(fdt, FDT_FIRST_SUPPORTED_VERSION); - fdt_set_totalsize(fdt, bufsize); + fdt_set_magic(fdt, FDT_SW_MAGIC); + fdt_set_version(fdt, FDT_LAST_SUPPORTED_VERSION); + fdt_set_last_comp_version(fdt, FDT_FIRST_SUPPORTED_VERSION); + fdt_set_totalsize(fdt, bufsize); - fdt_set_off_mem_rsvmap(fdt, FDT_ALIGN(sizeof(struct fdt_header), - sizeof(struct fdt_reserve_entry))); - fdt_set_off_dt_struct(fdt, fdt_off_mem_rsvmap(fdt)); - fdt_set_off_dt_strings(fdt, bufsize); + fdt_set_off_mem_rsvmap(fdt, FDT_ALIGN(sizeof(struct fdt_header), + sizeof(struct fdt_reserve_entry))); + fdt_set_off_dt_struct(fdt, fdt_off_mem_rsvmap(fdt)); + fdt_set_off_dt_strings(fdt, bufsize); - return 0; + return 0; } int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size) { - struct fdt_reserve_entry *re; - int offset; + struct fdt_reserve_entry *re; + int offset; - FDT_SW_CHECK_HEADER(fdt); + FDT_SW_CHECK_HEADER(fdt); - if (fdt_size_dt_struct(fdt)) - return -FDT_ERR_BADSTATE; + if ( fdt_size_dt_struct(fdt) ) + return -FDT_ERR_BADSTATE; - offset = fdt_off_dt_struct(fdt); - if ((offset + sizeof(*re)) > fdt_totalsize(fdt)) - return -FDT_ERR_NOSPACE; + offset = fdt_off_dt_struct(fdt); + if ( (offset + sizeof(*re)) > fdt_totalsize(fdt) ) + return -FDT_ERR_NOSPACE; - re = (struct fdt_reserve_entry *)((char *)fdt + offset); - re->address = cpu_to_fdt64(addr); - re->size = cpu_to_fdt64(size); + re = (struct fdt_reserve_entry *)((char *)fdt + offset); + re->address = cpu_to_fdt64(addr); + re->size = cpu_to_fdt64(size); - fdt_set_off_dt_struct(fdt, offset + sizeof(*re)); + fdt_set_off_dt_struct(fdt, offset + sizeof(*re)); - return 0; + return 0; } int fdt_finish_reservemap(void *fdt) { - return fdt_add_reservemap_entry(fdt, 0, 0); + return fdt_add_reservemap_entry(fdt, 0, 0); } int fdt_begin_node(void *fdt, const char *name) { - struct fdt_node_header *nh; - int namelen = strlen(name) + 1; + struct fdt_node_header *nh; + int namelen = strlen(name) + 1; - FDT_SW_CHECK_HEADER(fdt); + FDT_SW_CHECK_HEADER(fdt); - nh = _fdt_grab_space(fdt, sizeof(*nh) + FDT_TAGALIGN(namelen)); - if (! nh) - return -FDT_ERR_NOSPACE; + nh = _fdt_grab_space(fdt, sizeof(*nh) + FDT_TAGALIGN(namelen)); + if ( !nh ) + return -FDT_ERR_NOSPACE; - nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE); - memcpy(nh->name, name, namelen); - return 0; + nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE); + memcpy(nh->name, name, namelen); + return 0; } int fdt_end_node(void *fdt) { - fdt32_t *en; + fdt32_t *en; - FDT_SW_CHECK_HEADER(fdt); + FDT_SW_CHECK_HEADER(fdt); - en = _fdt_grab_space(fdt, FDT_TAGSIZE); - if (! en) - return -FDT_ERR_NOSPACE; + en = _fdt_grab_space(fdt, FDT_TAGSIZE); + if ( !en ) + return -FDT_ERR_NOSPACE; - *en = cpu_to_fdt32(FDT_END_NODE); - return 0; + *en = cpu_to_fdt32(FDT_END_NODE); + return 0; } static int _fdt_find_add_string(void *fdt, const char *s) { - char *strtab = (char *)fdt + fdt_totalsize(fdt); - const char *p; - int strtabsize = fdt_size_dt_strings(fdt); - int len = strlen(s) + 1; - int struct_top, offset; - - p = _fdt_find_string(strtab - strtabsize, strtabsize, s); - if (p) - return p - strtab; - - /* Add it */ - offset = -strtabsize - len; - struct_top = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt); - if (fdt_totalsize(fdt) + offset < struct_top) - return 0; /* no more room :( */ - - memcpy(strtab + offset, s, len); - fdt_set_size_dt_strings(fdt, strtabsize + len); - return offset; + char *strtab = (char *)fdt + fdt_totalsize(fdt); + const char *p; + int strtabsize = fdt_size_dt_strings(fdt); + int len = strlen(s) + 1; + int struct_top, offset; + + p = _fdt_find_string(strtab - strtabsize, strtabsize, s); + if ( p ) + return p - strtab; + + /* Add it */ + offset = -strtabsize - len; + struct_top = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt); + if ( fdt_totalsize(fdt) + offset < struct_top ) + return 0; /* no more room :( */ + + memcpy(strtab + offset, s, len); + fdt_set_size_dt_strings(fdt, strtabsize + len); + return offset; } int fdt_property(void *fdt, const char *name, const void *val, int len) { - struct fdt_property *prop; - int nameoff; + struct fdt_property *prop; + int nameoff; - FDT_SW_CHECK_HEADER(fdt); + FDT_SW_CHECK_HEADER(fdt); - nameoff = _fdt_find_add_string(fdt, name); - if (nameoff == 0) - return -FDT_ERR_NOSPACE; + nameoff = _fdt_find_add_string(fdt, name); + if ( nameoff == 0 ) + return -FDT_ERR_NOSPACE; - prop = _fdt_grab_space(fdt, sizeof(*prop) + FDT_TAGALIGN(len)); - if (! prop) - return -FDT_ERR_NOSPACE; + prop = _fdt_grab_space(fdt, sizeof(*prop) + FDT_TAGALIGN(len)); + if ( !prop ) + return -FDT_ERR_NOSPACE; - prop->tag = cpu_to_fdt32(FDT_PROP); - prop->nameoff = cpu_to_fdt32(nameoff); - prop->len = cpu_to_fdt32(len); - memcpy(prop->data, val, len); - return 0; + prop->tag = cpu_to_fdt32(FDT_PROP); + prop->nameoff = cpu_to_fdt32(nameoff); + prop->len = cpu_to_fdt32(len); + memcpy(prop->data, val, len); + return 0; } int fdt_finish(void *fdt) { - char *p = (char *)fdt; - fdt32_t *end; - int oldstroffset, newstroffset; - uint32_t tag; - int offset, nextoffset; - - FDT_SW_CHECK_HEADER(fdt); - - /* Add terminator */ - end = _fdt_grab_space(fdt, sizeof(*end)); - if (! end) - return -FDT_ERR_NOSPACE; - *end = cpu_to_fdt32(FDT_END); - - /* Relocate the string table */ - oldstroffset = fdt_totalsize(fdt) - fdt_size_dt_strings(fdt); - newstroffset = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt); - memmove(p + newstroffset, p + oldstroffset, fdt_size_dt_strings(fdt)); - fdt_set_off_dt_strings(fdt, newstroffset); - - /* Walk the structure, correcting string offsets */ - offset = 0; - while ((tag = fdt_next_tag(fdt, offset, &nextoffset)) != FDT_END) { - if (tag == FDT_PROP) { - struct fdt_property *prop = - _fdt_offset_ptr_w(fdt, offset); - int nameoff; - - nameoff = fdt32_to_cpu(prop->nameoff); - nameoff += fdt_size_dt_strings(fdt); - prop->nameoff = cpu_to_fdt32(nameoff); - } - offset = nextoffset; - } - if (nextoffset < 0) - return nextoffset; - - /* Finally, adjust the header */ - fdt_set_totalsize(fdt, newstroffset + fdt_size_dt_strings(fdt)); - fdt_set_magic(fdt, FDT_MAGIC); - return 0; + char *p = (char *)fdt; + fdt32_t *end; + int oldstroffset, newstroffset; + uint32_t tag; + int offset, nextoffset; + + FDT_SW_CHECK_HEADER(fdt); + + /* Add terminator */ + end = _fdt_grab_space(fdt, sizeof(*end)); + if ( !end ) + return -FDT_ERR_NOSPACE; + *end = cpu_to_fdt32(FDT_END); + + /* Relocate the string table */ + oldstroffset = fdt_totalsize(fdt) - fdt_size_dt_strings(fdt); + newstroffset = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt); + memmove(p + newstroffset, p + oldstroffset, fdt_size_dt_strings(fdt)); + fdt_set_off_dt_strings(fdt, newstroffset); + + /* Walk the structure, correcting string offsets */ + offset = 0; + while ( (tag = fdt_next_tag(fdt, offset, &nextoffset)) != FDT_END ) + { + if ( tag == FDT_PROP ) + { + struct fdt_property *prop = _fdt_offset_ptr_w(fdt, offset); + int nameoff; + + nameoff = fdt32_to_cpu(prop->nameoff); + nameoff += fdt_size_dt_strings(fdt); + prop->nameoff = cpu_to_fdt32(nameoff); + } + offset = nextoffset; + } + if ( nextoffset < 0 ) + return nextoffset; + + /* Finally, adjust the header */ + fdt_set_totalsize(fdt, newstroffset + fdt_size_dt_strings(fdt)); + fdt_set_magic(fdt, FDT_MAGIC); + return 0; } diff --git a/xen/common/libfdt/fdt_wip.c b/xen/common/libfdt/fdt_wip.c index 2d1cac084d..9507cc176e 100644 --- a/xen/common/libfdt/fdt_wip.c +++ b/xen/common/libfdt/fdt_wip.c @@ -16,7 +16,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public - * License along with this library; If not, see . + * License along with this library; If not, see + * . * * Alternatively, * @@ -54,63 +55,63 @@ #include "libfdt_internal.h" int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name, - const void *val, int len) + const void *val, int len) { - void *propval; - int proplen; + void *propval; + int proplen; - propval = fdt_getprop_w(fdt, nodeoffset, name, &proplen); - if (! propval) - return proplen; + propval = fdt_getprop_w(fdt, nodeoffset, name, &proplen); + if ( !propval ) + return proplen; - if (proplen != len) - return -FDT_ERR_NOSPACE; + if ( proplen != len ) + return -FDT_ERR_NOSPACE; - memcpy(propval, val, len); - return 0; + memcpy(propval, val, len); + return 0; } static void _fdt_nop_region(void *start, int len) { - fdt32_t *p; + fdt32_t *p; - for (p = start; (char *)p < ((char *)start + len); p++) - *p = cpu_to_fdt32(FDT_NOP); + for ( p = start; (char *)p < ((char *)start + len); p++ ) + *p = cpu_to_fdt32(FDT_NOP); } int fdt_nop_property(void *fdt, int nodeoffset, const char *name) { - struct fdt_property *prop; - int len; + struct fdt_property *prop; + int len; - prop = fdt_get_property_w(fdt, nodeoffset, name, &len); - if (! prop) - return len; + prop = fdt_get_property_w(fdt, nodeoffset, name, &len); + if ( !prop ) + return len; - _fdt_nop_region(prop, len + sizeof(*prop)); + _fdt_nop_region(prop, len + sizeof(*prop)); - return 0; + return 0; } int _fdt_node_end_offset(void *fdt, int offset) { - int depth = 0; + int depth = 0; - while ((offset >= 0) && (depth >= 0)) - offset = fdt_next_node(fdt, offset, &depth); + while ( (offset >= 0) && (depth >= 0) ) + offset = fdt_next_node(fdt, offset, &depth); - return offset; + return offset; } int fdt_nop_node(void *fdt, int nodeoffset) { - int endoffset; + int endoffset; - endoffset = _fdt_node_end_offset(fdt, nodeoffset); - if (endoffset < 0) - return endoffset; + endoffset = _fdt_node_end_offset(fdt, nodeoffset); + if ( endoffset < 0 ) + return endoffset; - _fdt_nop_region(fdt_offset_ptr_w(fdt, nodeoffset, 0), - endoffset - nodeoffset); - return 0; + _fdt_nop_region(fdt_offset_ptr_w(fdt, nodeoffset, 0), + endoffset - nodeoffset); + return 0; } diff --git a/xen/common/list_sort.c b/xen/common/list_sort.c index af2b2f6519..360661a3ff 100644 --- a/xen/common/list_sort.c +++ b/xen/common/list_sort.c @@ -26,25 +26,29 @@ * sentinel head node, "prev" links not maintained. */ static struct list_head *merge(void *priv, - int (*cmp)(void *priv, struct list_head *a, - struct list_head *b), - struct list_head *a, struct list_head *b) + int (*cmp)(void *priv, struct list_head *a, + struct list_head *b), + struct list_head *a, struct list_head *b) { - struct list_head head, *tail = &head; - - while (a && b) { - /* if equal, take 'a' -- important for sort stability */ - if ((*cmp)(priv, a, b) <= 0) { - tail->next = a; - a = a->next; - } else { - tail->next = b; - b = b->next; - } - tail = tail->next; - } - tail->next = a?:b; - return head.next; + struct list_head head, *tail = &head; + + while ( a && b ) + { + /* if equal, take 'a' -- important for sort stability */ + if ( (*cmp)(priv, a, b) <= 0 ) + { + tail->next = a; + a = a->next; + } + else + { + tail->next = b; + b = b->next; + } + tail = tail->next; + } + tail->next = a ?: b; + return head.next; } /* @@ -54,46 +58,49 @@ static struct list_head *merge(void *priv, * prev-link restoration pass, or maintaining the prev links * throughout. */ -static void merge_and_restore_back_links(void *priv, - int (*cmp)(void *priv, struct list_head *a, - struct list_head *b), - struct list_head *head, - struct list_head *a, struct list_head *b) +static void merge_and_restore_back_links( + void *priv, + int (*cmp)(void *priv, struct list_head *a, struct list_head *b), + struct list_head *head, struct list_head *a, struct list_head *b) { - struct list_head *tail = head; - u8 count = 0; - - while (a && b) { - /* if equal, take 'a' -- important for sort stability */ - if ((*cmp)(priv, a, b) <= 0) { - tail->next = a; - a->prev = tail; - a = a->next; - } else { - tail->next = b; - b->prev = tail; - b = b->next; - } - tail = tail->next; - } - tail->next = a ? : b; - - do { - /* - * In worst cases this loop may run many iterations. - * Continue callbacks to the client even though no - * element comparison is needed, so the client's cmp() - * routine can invoke cond_resched() periodically. - */ - if (unlikely(!(++count))) - (*cmp)(priv, tail->next, tail->next); - - tail->next->prev = tail; - tail = tail->next; - } while (tail->next); - - tail->next = head; - head->prev = tail; + struct list_head *tail = head; + u8 count = 0; + + while ( a && b ) + { + /* if equal, take 'a' -- important for sort stability */ + if ( (*cmp)(priv, a, b) <= 0 ) + { + tail->next = a; + a->prev = tail; + a = a->next; + } + else + { + tail->next = b; + b->prev = tail; + b = b->next; + } + tail = tail->next; + } + tail->next = a ?: b; + + do { + /* + * In worst cases this loop may run many iterations. + * Continue callbacks to the client even though no + * element comparison is needed, so the client's cmp() + * routine can invoke cond_resched() periodically. + */ + if ( unlikely(!(++count)) ) + (*cmp)(priv, tail->next, tail->next); + + tail->next->prev = tail; + tail = tail->next; + } while ( tail->next ); + + tail->next = head; + head->prev = tail; } /** @@ -111,47 +118,49 @@ static void merge_and_restore_back_links(void *priv, * ordering is to be preserved, @cmp must return 0. */ void list_sort(void *priv, struct list_head *head, - int (*cmp)(void *priv, struct list_head *a, - struct list_head *b)) + int (*cmp)(void *priv, struct list_head *a, struct list_head *b)) { - struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists - -- last slot is a sentinel */ - int lev; /* index into part[] */ - int max_lev = 0; - struct list_head *list; - - if (list_empty(head)) - return; - - memset(part, 0, sizeof(part)); - - head->prev->next = NULL; - list = head->next; - - while (list) { - struct list_head *cur = list; - list = list->next; - cur->next = NULL; - - for (lev = 0; part[lev]; lev++) { - cur = merge(priv, cmp, part[lev], cur); - part[lev] = NULL; - } - if (lev > max_lev) { - if (unlikely(lev >= ARRAY_SIZE(part)-1)) { - dprintk(XENLOG_DEBUG, - "list too long for efficiency\n"); - lev--; - } - max_lev = lev; - } - part[lev] = cur; - } - - for (lev = 0; lev < max_lev; lev++) - if (part[lev]) - list = merge(priv, cmp, part[lev], list); - - merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); + struct list_head *part[MAX_LIST_LENGTH_BITS + 1]; /* sorted partial lists + -- last slot is a sentinel */ + int lev; /* index into part[] */ + int max_lev = 0; + struct list_head *list; + + if ( list_empty(head) ) + return; + + memset(part, 0, sizeof(part)); + + head->prev->next = NULL; + list = head->next; + + while ( list ) + { + struct list_head *cur = list; + list = list->next; + cur->next = NULL; + + for ( lev = 0; part[lev]; lev++ ) + { + cur = merge(priv, cmp, part[lev], cur); + part[lev] = NULL; + } + if ( lev > max_lev ) + { + if ( unlikely(lev >= ARRAY_SIZE(part) - 1) ) + { + dprintk(XENLOG_DEBUG, "list too long for efficiency\n"); + lev--; + } + max_lev = lev; + } + part[lev] = cur; + } + + for ( lev = 0; lev < max_lev; lev++ ) + if ( part[lev] ) + list = merge(priv, cmp, part[lev], list); + + merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); } EXPORT_SYMBOL(list_sort); diff --git a/xen/common/livepatch.c b/xen/common/livepatch.c index d6eaae6d3b..1885a5c468 100644 --- a/xen/common/livepatch.c +++ b/xen/common/livepatch.c @@ -46,51 +46,56 @@ static unsigned int payload_cnt; static unsigned int payload_version = 1; /* To contain the ELF Note header. */ -struct livepatch_build_id { - const void *p; - unsigned int len; +struct livepatch_build_id +{ + const void *p; + unsigned int len; }; -struct payload { - uint32_t state; /* One of the LIVEPATCH_STATE_*. */ - int32_t rc; /* 0 or -XEN_EXX. */ - bool reverted; /* Whether it was reverted. */ - bool safe_to_reapply; /* Can apply safely after revert. */ - struct list_head list; /* Linked to 'payload_list'. */ - const void *text_addr; /* Virtual address of .text. */ - size_t text_size; /* .. and its size. */ - const void *rw_addr; /* Virtual address of .data. */ - size_t rw_size; /* .. and its size (if any). */ - const void *ro_addr; /* Virtual address of .rodata. */ - size_t ro_size; /* .. and its size (if any). */ - unsigned int pages; /* Total pages for [text,rw,ro]_addr */ - struct list_head applied_list; /* Linked to 'applied_list'. */ - struct livepatch_func *funcs; /* The array of functions to patch. */ - unsigned int nfuncs; /* Nr of functions to patch. */ +struct payload +{ + uint32_t state; /* One of the LIVEPATCH_STATE_*. */ + int32_t rc; /* 0 or -XEN_EXX. */ + bool reverted; /* Whether it was reverted. */ + bool safe_to_reapply; /* Can apply safely after revert. */ + struct list_head list; /* Linked to 'payload_list'. */ + const void *text_addr; /* Virtual address of .text. */ + size_t text_size; /* .. and its size. */ + const void *rw_addr; /* Virtual address of .data. */ + size_t rw_size; /* .. and its size (if any). */ + const void *ro_addr; /* Virtual address of .rodata. */ + size_t ro_size; /* .. and its size (if any). */ + unsigned int pages; /* Total pages for [text,rw,ro]_addr */ + struct list_head applied_list; /* Linked to 'applied_list'. */ + struct livepatch_func *funcs; /* The array of functions to patch. */ + unsigned int nfuncs; /* Nr of functions to patch. */ const struct livepatch_symbol *symtab; /* All symbols. */ - const char *strtab; /* Pointer to .strtab. */ - struct virtual_region region; /* symbol, bug.frame patching and - exception table (x86). */ - unsigned int nsyms; /* Nr of entries in .strtab and symbols. */ - struct livepatch_build_id id; /* ELFNOTE_DESC(.note.gnu.build-id) of the payload. */ - struct livepatch_build_id dep; /* ELFNOTE_DESC(.livepatch.depends). */ - livepatch_loadcall_t *const *load_funcs; /* The array of funcs to call after */ - livepatch_unloadcall_t *const *unload_funcs;/* load and unload of the payload. */ - unsigned int n_load_funcs; /* Nr of the funcs to load and execute. */ - unsigned int n_unload_funcs; /* Nr of funcs to call durung unload. */ - char name[XEN_LIVEPATCH_NAME_SIZE]; /* Name of it. */ + const char *strtab; /* Pointer to .strtab. */ + struct virtual_region region; /* symbol, bug.frame patching and + exception table (x86). */ + unsigned int nsyms; /* Nr of entries in .strtab and symbols. */ + struct livepatch_build_id + id; /* ELFNOTE_DESC(.note.gnu.build-id) of the payload. */ + struct livepatch_build_id dep; /* ELFNOTE_DESC(.livepatch.depends). */ + livepatch_loadcall_t *const + *load_funcs; /* The array of funcs to call after */ + livepatch_unloadcall_t *const + *unload_funcs; /* load and unload of the payload. */ + unsigned int n_load_funcs; /* Nr of the funcs to load and execute. */ + unsigned int n_unload_funcs; /* Nr of funcs to call durung unload. */ + char name[XEN_LIVEPATCH_NAME_SIZE]; /* Name of it. */ }; /* Defines an outstanding patching action. */ struct livepatch_work { - atomic_t semaphore; /* Used to rendezvous CPUs in - check_for_livepatch_work. */ - uint32_t timeout; /* Timeout to do the operation. */ - struct payload *data; /* The payload on which to act. */ - volatile bool_t do_work; /* Signals work to do. */ - volatile bool_t ready; /* Signals all CPUs synchronized. */ - unsigned int cmd; /* Action request: LIVEPATCH_ACTION_* */ + atomic_t semaphore; /* Used to rendezvous CPUs in + check_for_livepatch_work. */ + uint32_t timeout; /* Timeout to do the operation. */ + struct payload *data; /* The payload on which to act. */ + volatile bool_t do_work; /* Signals work to do. */ + volatile bool_t ready; /* Signals all CPUs synchronized. */ + unsigned int cmd; /* Action request: LIVEPATCH_ACTION_* */ }; /* There can be only one outstanding patching action. */ @@ -121,7 +126,8 @@ static int get_name(const struct xen_livepatch_name *name, char *n) return 0; } -static int verify_payload(const struct xen_sysctl_livepatch_upload *upload, char *n) +static int verify_payload(const struct xen_sysctl_livepatch_upload *upload, + char *n) { if ( get_name(&upload->name, n) ) return -EINVAL; @@ -148,19 +154,16 @@ bool_t is_patch(const void *ptr) * or revert context. And in case it dies there we need an safe list. */ rcu_read_lock(&rcu_applied_lock); - list_for_each_entry_rcu ( data, &applied_list, applied_list ) + list_for_each_entry_rcu(data, &applied_list, applied_list) { - if ( (ptr >= data->rw_addr && - ptr < (data->rw_addr + data->rw_size)) || - (ptr >= data->ro_addr && - ptr < (data->ro_addr + data->ro_size)) || + if ( (ptr >= data->rw_addr && ptr < (data->rw_addr + data->rw_size)) || + (ptr >= data->ro_addr && ptr < (data->ro_addr + data->ro_size)) || (ptr >= data->text_addr && ptr < (data->text_addr + data->text_size)) ) { r = 1; break; } - } rcu_read_unlock(&rcu_applied_lock); @@ -172,7 +175,7 @@ unsigned long livepatch_symbols_lookup_by_name(const char *symname) const struct payload *data; ASSERT(spin_is_locked(&payload_lock)); - list_for_each_entry ( data, &payload_list, list ) + list_for_each_entry (data, &payload_list, list) { unsigned int i; @@ -204,10 +207,9 @@ static const char *livepatch_symbols_lookup(unsigned long addr, * or revert context. And in case it dies there we need an safe list. */ rcu_read_lock(&rcu_applied_lock); - list_for_each_entry_rcu ( data, &applied_list, applied_list ) + list_for_each_entry_rcu(data, &applied_list, applied_list) { - if ( va < data->text_addr || - va >= (data->text_addr + data->text_size) ) + if ( va < data->text_addr || va >= (data->text_addr + data->text_size) ) continue; best = UINT_MAX; @@ -251,7 +253,8 @@ static int resolve_old_address(struct livepatch_func *f, f->old_addr = (void *)livepatch_symbols_lookup_by_name(f->name); if ( !f->old_addr ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Could not resolve old address of %s\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Could not resolve old address of %s\n", elf->name, f->name); return -ENOENT; } @@ -267,7 +270,7 @@ static struct payload *find_payload(const char *name) struct payload *data, *found = NULL; ASSERT(spin_is_locked(&payload_lock)); - list_for_each_entry ( data, &payload_list, list ) + list_for_each_entry (data, &payload_list, list) { if ( !strcmp(data->name, name) ) { @@ -296,12 +299,12 @@ static void free_payload_data(struct payload *payload) } /* -* calc_section computes the size (taking into account section alignment). -* -* Furthermore the offset is set with the offset from the start of the virtual -* address space for the payload (using passed in size). This is used in -* move_payload to figure out the destination location (load_addr). -*/ + * calc_section computes the size (taking into account section alignment). + * + * Furthermore the offset is set with the offset from the start of the virtual + * address space for the payload (using passed in size). This is used in + * move_payload to figure out the destination location (load_addr). + */ static void calc_section(const struct livepatch_elf_sec *sec, size_t *size, unsigned int *offset) { @@ -340,7 +343,7 @@ static int move_payload(struct payload *payload, struct livepatch_elf *elf) if ( livepatch_elf_ignore_section(elf->sec[i].sec) ) offset[i] = UINT_MAX; else if ( (elf->sec[i].sec->sh_flags & SHF_EXECINSTR) && - !(elf->sec[i].sec->sh_flags & SHF_WRITE) ) + !(elf->sec[i].sec->sh_flags & SHF_WRITE) ) calc_section(&elf->sec[i], &payload->text_size, &offset[i]); else if ( !(elf->sec[i].sec->sh_flags & SHF_EXECINSTR) && (elf->sec[i].sec->sh_flags & SHF_WRITE) ) @@ -364,13 +367,14 @@ static int move_payload(struct payload *payload, struct livepatch_elf *elf) * own page. */ size = PAGE_ALIGN(payload->text_size) + PAGE_ALIGN(payload->rw_size) + - payload->ro_size; + payload->ro_size; size = PFN_UP(size); /* Nr of pages. */ text_buf = vmalloc_xen(size * PAGE_SIZE); if ( !text_buf ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Could not allocate memory for payload!\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Could not allocate memory for payload!\n", elf->name); rc = -ENOMEM; goto out; @@ -421,10 +425,10 @@ static int move_payload(struct payload *payload, struct livepatch_elf *elf) * Only one RW section with non-zero size: .livepatch.funcs, * or only RO sections. */ - if ( !rw_buf_cnt || (rw_buf_cnt == 1 && - !strcmp(elf->sec[rw_buf_sec].name, ELF_LIVEPATCH_FUNC)) ) + if ( !rw_buf_cnt || (rw_buf_cnt == 1 && !strcmp(elf->sec[rw_buf_sec].name, + ELF_LIVEPATCH_FUNC)) ) payload->safe_to_reapply = true; - out: +out: xfree(offset); return rc; @@ -439,7 +443,8 @@ static int secure_payload(struct payload *payload, struct livepatch_elf *elf) if ( text_pages ) { - rc = arch_livepatch_secure(payload->text_addr, text_pages, LIVEPATCH_VA_RX); + rc = arch_livepatch_secure(payload->text_addr, text_pages, + LIVEPATCH_VA_RX); if ( rc ) return rc; } @@ -468,7 +473,9 @@ static bool section_ok(const struct livepatch_elf *elf, if ( sec->sec->sh_size % sz ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Wrong size %"PRIuElfWord" of %s (must be multiple of %zu)\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Wrong size %" PRIuElfWord + " of %s (must be multiple of %zu)\n", elf->name, sec->sec->sh_size, sec->name, sz); return false; } @@ -479,10 +486,9 @@ static bool section_ok(const struct livepatch_elf *elf, static int check_special_sections(const struct livepatch_elf *elf) { unsigned int i; - static const char *const names[] = { ELF_LIVEPATCH_FUNC, - ELF_LIVEPATCH_DEPENDS, - ELF_BUILD_ID_NOTE}; - DECLARE_BITMAP(found, ARRAY_SIZE(names)) = { 0 }; + static const char *const names[] = { + ELF_LIVEPATCH_FUNC, ELF_LIVEPATCH_DEPENDS, ELF_BUILD_ID_NOTE}; + DECLARE_BITMAP(found, ARRAY_SIZE(names)) = {0}; for ( i = 0; i < ARRAY_SIZE(names); i++ ) { @@ -491,15 +497,15 @@ static int check_special_sections(const struct livepatch_elf *elf) sec = livepatch_elf_sec_by_name(elf, names[i]); if ( !sec ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: %s is missing!\n", - elf->name, names[i]); + dprintk(XENLOG_ERR, LIVEPATCH "%s: %s is missing!\n", elf->name, + names[i]); return -EINVAL; } if ( !sec->sec->sh_size ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: %s is empty!\n", - elf->name, names[i]); + dprintk(XENLOG_ERR, LIVEPATCH "%s: %s is empty!\n", elf->name, + names[i]); return -EINVAL; } @@ -514,8 +520,7 @@ static int check_special_sections(const struct livepatch_elf *elf) return 0; } -static int prepare_payload(struct payload *payload, - struct livepatch_elf *elf) +static int prepare_payload(struct payload *payload, struct livepatch_elf *elf) { const struct livepatch_elf_sec *sec; unsigned int i; @@ -539,7 +544,8 @@ static int prepare_payload(struct payload *payload, if ( f->version != LIVEPATCH_PAYLOAD_VERSION ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Wrong version (%u). Expected %d!\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Wrong version (%u). Expected %d!\n", elf->name, f->version, LIVEPATCH_PAYLOAD_VERSION); return -EOPNOTSUPP; } @@ -547,7 +553,8 @@ static int prepare_payload(struct payload *payload, /* 'old_addr', 'new_addr', 'new_size' can all be zero. */ if ( !f->old_size ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Address or size fields are zero!\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Address or size fields are zero!\n", elf->name); return -EINVAL; } @@ -572,7 +579,8 @@ static int prepare_payload(struct payload *payload, return -EINVAL; payload->load_funcs = sec->load_addr; - payload->n_load_funcs = sec->sec->sh_size / sizeof(*payload->load_funcs); + payload->n_load_funcs = + sec->sec->sh_size / sizeof(*payload->load_funcs); } sec = livepatch_elf_sec_by_name(elf, ".livepatch.hooks.unload"); @@ -582,7 +590,8 @@ static int prepare_payload(struct payload *payload, return -EINVAL; payload->unload_funcs = sec->load_addr; - payload->n_unload_funcs = sec->sec->sh_size / sizeof(*payload->unload_funcs); + payload->n_unload_funcs = + sec->sec->sh_size / sizeof(*payload->unload_funcs); } sec = livepatch_elf_sec_by_name(elf, ELF_BUILD_ID_NOTE); if ( sec ) @@ -594,15 +603,15 @@ static int prepare_payload(struct payload *payload, if ( sec->sec->sh_size <= sizeof(*n) ) return -EINVAL; - if ( xen_build_id_check(n, sec->sec->sh_size, - &payload->id.p, &payload->id.len) ) + if ( xen_build_id_check(n, sec->sec->sh_size, &payload->id.p, + &payload->id.len) ) return -EINVAL; if ( !payload->id.len || !payload->id.p ) return -EINVAL; /* Make sure it is not a duplicate. */ - list_for_each_entry ( data, &payload_list, list ) + list_for_each_entry (data, &payload_list, list) { /* No way _this_ payload is on the list. */ ASSERT(data != payload); @@ -624,8 +633,8 @@ static int prepare_payload(struct payload *payload, if ( sec->sec->sh_size <= sizeof(*n) ) return -EINVAL; - if ( xen_build_id_check(n, sec->sec->sh_size, - &payload->dep.p, &payload->dep.len) ) + if ( xen_build_id_check(n, sec->sec->sh_size, &payload->dep.p, + &payload->dep.len) ) return -EINVAL; if ( !payload->dep.len || !payload->dep.p ) @@ -653,8 +662,8 @@ static int prepare_payload(struct payload *payload, return -EINVAL; region->frame[i].bugs = sec->load_addr; - region->frame[i].n_bugs = sec->sec->sh_size / - sizeof(*region->frame[i].bugs); + region->frame[i].n_bugs = + sec->sec->sh_size / sizeof(*region->frame[i].bugs); } sec = livepatch_elf_sec_by_name(elf, ".altinstructions"); @@ -677,14 +686,16 @@ static int prepare_payload(struct payload *payload, if ( (instr < region->start && instr >= region->end) || (replacement < region->start && replacement >= region->end) ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s Alt patching outside payload: %p!\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s Alt patching outside payload: %p!\n", elf->name, instr); return -EINVAL; } } apply_alternatives(start, end); #else - dprintk(XENLOG_ERR, LIVEPATCH "%s: We don't support alternative patching!\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: We don't support alternative patching!\n", elf->name); return -EOPNOTSUPP; #endif @@ -702,7 +713,7 @@ static int prepare_payload(struct payload *payload, s = sec->load_addr; e = sec->load_addr + sec->sec->sh_size; - sort_exception_table(s ,e); + sort_exception_table(s, e); region->ex = s; region->ex_end = e; @@ -792,8 +803,9 @@ static int build_symbol_table(struct payload *payload, symtab[nsyms].size = elf->sym[i].sym->st_size; symtab[nsyms].value = elf->sym[i].sym->st_value; symtab[nsyms].new_symbol = 0; /* May be overwritten below. */ - strtab_len += strlcpy(strtab + strtab_len, elf->sym[i].name, - KSYM_NAME_LEN) + 1; + strtab_len += + strlcpy(strtab + strtab_len, elf->sym[i].name, KSYM_NAME_LEN) + + 1; nsyms++; } } @@ -823,8 +835,8 @@ static int build_symbol_table(struct payload *payload, return -EEXIST; } symtab[i].new_symbol = 1; - dprintk(XENLOG_DEBUG, LIVEPATCH "%s: new symbol %s\n", - elf->name, symtab[i].name); + dprintk(XENLOG_DEBUG, LIVEPATCH "%s: new symbol %s\n", elf->name, + symtab[i].name); } else { @@ -855,7 +867,7 @@ static void free_payload(struct payload *data) static int load_payload_data(struct payload *payload, void *raw, size_t len) { - struct livepatch_elf elf = { .name = payload->name, .len = len }; + struct livepatch_elf elf = {.name = payload->name, .len = len}; int rc = 0; rc = livepatch_elf_load(&elf, raw); @@ -888,7 +900,7 @@ static int load_payload_data(struct payload *payload, void *raw, size_t len) rc = secure_payload(payload, &elf); - out: +out: if ( rc ) free_payload_data(payload); @@ -940,7 +952,7 @@ static int livepatch_upload(struct xen_sysctl_livepatch_upload *upload) payload_version++; } - out: +out: spin_unlock(&payload_lock); vfree(raw_data); @@ -1014,7 +1026,7 @@ static int livepatch_list(struct xen_sysctl_livepatch_list *list) if ( list->nr ) { - list_for_each_entry( data, &payload_list, list ) + list_for_each_entry (data, &payload_list, list) { uint32_t len; @@ -1026,10 +1038,11 @@ static int livepatch_list(struct xen_sysctl_livepatch_list *list) len = strlen(data->name) + 1; /* N.B. 'idx' != 'i'. */ - if ( __copy_to_guest_offset(list->name, idx * XEN_LIVEPATCH_NAME_SIZE, + if ( __copy_to_guest_offset(list->name, + idx * XEN_LIVEPATCH_NAME_SIZE, data->name, len) || - __copy_to_guest_offset(list->len, idx, &len, 1) || - __copy_to_guest_offset(list->status, idx, &status, 1) ) + __copy_to_guest_offset(list->len, idx, &len, 1) || + __copy_to_guest_offset(list->status, idx, &status, 1) ) { rc = -EFAULT; break; @@ -1046,7 +1059,7 @@ static int livepatch_list(struct xen_sysctl_livepatch_list *list) spin_unlock(&payload_lock); /* And how many we have processed. */ - return rc ? : idx; + return rc ?: idx; } /* @@ -1060,8 +1073,8 @@ static int apply_payload(struct payload *data) unsigned int i; int rc; - printk(XENLOG_INFO LIVEPATCH "%s: Applying %u functions\n", - data->name, data->nfuncs); + printk(XENLOG_INFO LIVEPATCH "%s: Applying %u functions\n", data->name, + data->nfuncs); rc = arch_livepatch_quiesce(); if ( rc ) @@ -1153,7 +1166,7 @@ static void livepatch_do_action(void) * This function and the transition from asm to C code should be the only * one on any stack. No need to lock the payload list or applied list. */ - switch ( livepatch_work.cmd ) + switch (livepatch_work.cmd) { case LIVEPATCH_ACTION_APPLY: rc = apply_payload(data); @@ -1173,7 +1186,8 @@ static void livepatch_do_action(void) * N.B: Use 'applied_list' member, not 'list'. We also abuse the * the 'normal' list iterator as the list is an RCU one. */ - list_for_each_entry_safe_reverse ( other, tmp, &applied_list, applied_list ) + list_for_each_entry_safe_reverse(other, tmp, &applied_list, + applied_list) { other->rc = revert_payload(other); if ( other->rc == 0 ) @@ -1229,7 +1243,7 @@ static int schedule_work(struct payload *data, uint32_t cmd, uint32_t timeout) livepatch_work.data = data; livepatch_work.timeout = timeout ?: MILLISECS(30); - dprintk(XENLOG_DEBUG, LIVEPATCH "%s: timeout is %"PRIu32"ns\n", + dprintk(XENLOG_DEBUG, LIVEPATCH "%s: timeout is %" PRIu32 "ns\n", data->name, livepatch_work.timeout); atomic_set(&livepatch_work.semaphore, -1); @@ -1263,7 +1277,8 @@ static int livepatch_spin(atomic_t *counter, s_time_t timeout, /* Log & abort. */ if ( atomic_read(counter) != cpus ) { - printk(XENLOG_ERR LIVEPATCH "%s: Timed out on semaphore in %s quiesce phase %u/%u\n", + printk(XENLOG_ERR LIVEPATCH + "%s: Timed out on semaphore in %s quiesce phase %u/%u\n", livepatch_work.data->name, s, atomic_read(counter), cpus); rc = -EBUSY; livepatch_work.data->rc = rc; @@ -1282,9 +1297,9 @@ void check_for_livepatch_work(void) { #define ACTION(x) [LIVEPATCH_ACTION_##x] = #x static const char *const names[] = { - ACTION(APPLY), - ACTION(REVERT), - ACTION(REPLACE), + ACTION(APPLY), + ACTION(REVERT), + ACTION(REPLACE), }; #undef ACTION unsigned int cpu = smp_processor_id(); @@ -1292,7 +1307,7 @@ void check_for_livepatch_work(void) unsigned long flags; /* Fast path: no work to do. */ - if ( !per_cpu(work_to_do, cpu ) ) + if ( !per_cpu(work_to_do, cpu) ) return; smp_rmb(); @@ -1314,17 +1329,18 @@ void check_for_livepatch_work(void) p = livepatch_work.data; if ( !get_cpu_maps() ) { - printk(XENLOG_ERR LIVEPATCH "%s: CPU%u - unable to get cpu_maps lock!\n", + printk(XENLOG_ERR LIVEPATCH + "%s: CPU%u - unable to get cpu_maps lock!\n", p->name, cpu); per_cpu(work_to_do, cpu) = 0; livepatch_work.data->rc = -EBUSY; smp_wmb(); livepatch_work.do_work = 0; /* - * Do NOT decrement livepatch_work.semaphore down - as that may cause - * the other CPU (which may be at this point ready to increment it) - * to assume the role of master and then needlessly time out - * out (as do_work is zero). + * Do NOT decrement livepatch_work.semaphore down - as that may + * cause the other CPU (which may be at this point ready to + * increment it) to assume the role of master and then needlessly + * time out out (as do_work is zero). */ return; } @@ -1336,8 +1352,9 @@ void check_for_livepatch_work(void) if ( cpus ) { - dprintk(XENLOG_DEBUG, LIVEPATCH "%s: CPU%u - IPIing the other %u CPUs\n", - p->name, cpu, cpus); + dprintk(XENLOG_DEBUG, + LIVEPATCH "%s: CPU%u - IPIing the other %u CPUs\n", p->name, + cpu, cpus); smp_call_function(reschedule_fn, NULL, 0); } @@ -1360,12 +1377,13 @@ void check_for_livepatch_work(void) local_irq_save(flags); /* Do the patching. */ livepatch_do_action(); - /* Serialize and flush out the CPU via CPUID instruction (on x86). */ + /* Serialize and flush out the CPU via CPUID instruction (on x86). + */ arch_livepatch_post_action(); local_irq_restore(flags); } - abort: + abort: arch_livepatch_unmask(); per_cpu(work_to_do, cpu) = 0; @@ -1374,8 +1392,8 @@ void check_for_livepatch_work(void) /* put_cpu_maps has an barrier(). */ put_cpu_maps(); - printk(XENLOG_INFO LIVEPATCH "%s finished %s with rc=%d\n", - p->name, names[livepatch_work.cmd], p->rc); + printk(XENLOG_INFO LIVEPATCH "%s finished %s with rc=%d\n", p->name, + names[livepatch_work.cmd], p->rc); } else { @@ -1441,8 +1459,7 @@ static int build_id_dep(struct payload *payload, bool_t internal) name = data->name; } - if ( payload->dep.len != len || - memcmp(id, payload->dep.p, len) ) + if ( payload->dep.len != len || memcmp(id, payload->dep.p, len) ) { dprintk(XENLOG_ERR, "%s%s: check against %s build-id failed!\n", LIVEPATCH, payload->name, name); @@ -1481,7 +1498,7 @@ static int livepatch_action(struct xen_sysctl_livepatch_action *action) goto out; } - switch ( action->cmd ) + switch (action->cmd) { case LIVEPATCH_ACTION_UNLOAD: if ( data->state == LIVEPATCH_STATE_CHECKED ) @@ -1525,8 +1542,10 @@ static int livepatch_action(struct xen_sysctl_livepatch_action *action) */ if ( data->reverted && !data->safe_to_reapply ) { - dprintk(XENLOG_ERR, "%s%s: can't revert as payload has .data. Please unload!\n", - LIVEPATCH, data->name); + dprintk( + XENLOG_ERR, + "%s%s: can't revert as payload has .data. Please unload!\n", + LIVEPATCH, data->name); data->rc = -EINVAL; break; } @@ -1555,7 +1574,7 @@ static int livepatch_action(struct xen_sysctl_livepatch_action *action) break; } - out: +out: spin_unlock(&payload_lock); return rc; @@ -1568,7 +1587,7 @@ int livepatch_op(struct xen_sysctl_livepatch_op *livepatch) if ( livepatch->pad ) return -EINVAL; - switch ( livepatch->cmd ) + switch (livepatch->cmd) { case XEN_SYSCTL_LIVEPATCH_UPLOAD: rc = livepatch_upload(&livepatch->u.upload); @@ -1589,7 +1608,7 @@ int livepatch_op(struct xen_sysctl_livepatch_op *livepatch) default: rc = -EOPNOTSUPP; break; - } + } return rc; } @@ -1598,8 +1617,8 @@ static const char *state2str(unsigned int state) { #define STATE(x) [LIVEPATCH_STATE_##x] = #x static const char *const names[] = { - STATE(CHECKED), - STATE(APPLIED), + STATE(CHECKED), + STATE(APPLIED), }; #undef STATE @@ -1627,17 +1646,18 @@ static void livepatch_printall(unsigned char key) return; } - list_for_each_entry ( data, &payload_list, list ) + list_for_each_entry (data, &payload_list, list) { - printk(" name=%s state=%s(%d) %p (.data=%p, .rodata=%p) using %u pages.\n", - data->name, state2str(data->state), data->state, data->text_addr, - data->rw_addr, data->ro_addr, data->pages); + printk( + " name=%s state=%s(%d) %p (.data=%p, .rodata=%p) using %u pages.\n", + data->name, state2str(data->state), data->state, data->text_addr, + data->rw_addr, data->ro_addr, data->pages); for ( i = 0; i < data->nfuncs; i++ ) { struct livepatch_func *f = &(data->funcs[i]); - printk(" %s patch %p(%u) with %p (%u)\n", - f->name, f->old_addr, f->old_size, f->new_addr, f->new_size); + printk(" %s patch %p(%u) with %p (%u)\n", f->name, f->old_addr, + f->old_size, f->new_addr, f->new_size); if ( i && !(i % 64) ) { diff --git a/xen/common/livepatch_elf.c b/xen/common/livepatch_elf.c index dd8b47a1fa..ffbc8a5040 100644 --- a/xen/common/livepatch_elf.c +++ b/xen/common/livepatch_elf.c @@ -9,8 +9,7 @@ #include const struct livepatch_elf_sec * -livepatch_elf_sec_by_name(const struct livepatch_elf *elf, - const char *name) +livepatch_elf_sec_by_name(const struct livepatch_elf *elf, const char *name) { unsigned int i; @@ -55,14 +54,16 @@ static int elf_resolve_sections(struct livepatch_elf *elf, const void *data) sec = xzalloc_array(struct livepatch_elf_sec, elf->hdr->e_shnum); if ( !sec ) { - dprintk(XENLOG_ERR, LIVEPATCH"%s: Could not allocate memory for section table!\n", - elf->name); + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Could not allocate memory for section table!\n", + elf->name); return -ENOMEM; } elf->sec = sec; - /* e_shoff and e_shnum overflow checks are done in livepatch_header_check. */ + /* e_shoff and e_shnum overflow checks are done in livepatch_header_check. + */ delta = elf->hdr->e_shoff + elf->hdr->e_shnum * elf->hdr->e_shentsize; ASSERT(delta <= elf->len); @@ -81,22 +82,28 @@ static int elf_resolve_sections(struct livepatch_elf *elf, const void *data) (sec[i].sec->sh_type != SHT_NOBITS && /* Skip SHT_NOBITS */ (delta > elf->len || (delta + sec[i].sec->sh_size > elf->len))) ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Section [%u] data %s of payload!\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Section [%u] data %s of payload!\n", elf->name, i, delta < sizeof(Elf_Ehdr) ? "at ELF header" : "is past end"); return -EINVAL; } else if ( sec[i].sec->sh_addralign & (sec[i].sec->sh_addralign - 1) ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Section [%u] alignment (%#"PRIxElfAddr") is not supported\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Section [%u] alignment (%#" PRIxElfAddr + ") is not supported\n", elf->name, i, sec[i].sec->sh_addralign); return -EOPNOTSUPP; } else if ( sec[i].sec->sh_addralign && sec[i].sec->sh_addr % sec[i].sec->sh_addralign ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Section [%u] addr (%#"PRIxElfAddr") is not aligned properly (%#"PRIxElfAddr")\n", - elf->name, i, sec[i].sec->sh_addr, sec[i].sec->sh_addralign); + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Section [%u] addr (%#" PRIxElfAddr + ") is not aligned properly (%#" PRIxElfAddr ")\n", + elf->name, i, sec[i].sec->sh_addr, + sec[i].sec->sh_addralign); return -EINVAL; } else if ( (sec[i].sec->sh_flags & (SHF_WRITE | SHF_ALLOC)) && @@ -112,7 +119,8 @@ static int elf_resolve_sections(struct livepatch_elf *elf, const void *data) { if ( elf->symtab ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Unsupported multiple symbol tables!\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Unsupported multiple symbol tables!\n", elf->name); return -EOPNOTSUPP; } @@ -126,7 +134,8 @@ static int elf_resolve_sections(struct livepatch_elf *elf, const void *data) */ if ( elf->symtab->sec->sh_link >= elf->hdr->e_shnum ) { - dprintk(XENLOG_ERR, LIVEPATCH + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Symbol table idx (%u) to strtab past end (%u)\n", elf->name, elf->symtab->sec->sh_link, elf->hdr->e_shnum); @@ -167,7 +176,8 @@ static int elf_resolve_sections(struct livepatch_elf *elf, const void *data) return rc; } -static int elf_resolve_section_names(struct livepatch_elf *elf, const void *data) +static int elf_resolve_section_names(struct livepatch_elf *elf, + const void *data) { const char *shstrtab; unsigned int i; @@ -203,7 +213,8 @@ static int elf_resolve_section_names(struct livepatch_elf *elf, const void *data /* Boundary check on offset of name within the .shstrtab. */ if ( delta >= sec->sec->sh_size ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Section %u name is not within .shstrtab!\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Section %u name is not within .shstrtab!\n", elf->name, i); return -EINVAL; } @@ -230,7 +241,8 @@ static int elf_get_sym(struct livepatch_elf *elf, const void *data) /* Checked already in elf_resolve_sections, but just in case. */ ASSERT(offset == strtab_sec->sec->sh_offset); - ASSERT(offset < elf->len && (offset + strtab_sec->sec->sh_size <= elf->len)); + ASSERT(offset < elf->len && + (offset + strtab_sec->sec->sh_size <= elf->len)); /* symtab_sec->data was computed in elf_resolve_sections. */ ASSERT((symtab_sec->sec->sh_offset + data) == symtab_sec->data); @@ -241,8 +253,9 @@ static int elf_get_sym(struct livepatch_elf *elf, const void *data) sym = xzalloc_array(struct livepatch_elf_sym, nsym); if ( !sym ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Could not allocate memory for symbols\n", - elf->name); + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Could not allocate memory for symbols\n", + elf->name); return -ENOMEM; } @@ -257,7 +270,8 @@ static int elf_get_sym(struct livepatch_elf *elf, const void *data) /* Boundary check within the .strtab. */ if ( delta >= strtab_sec->sec->sh_size ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Symbol [%u] name is not within .strtab!\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Symbol [%u] name is not within .strtab!\n", elf->name, i); return -EINVAL; } @@ -266,7 +280,8 @@ static int elf_get_sym(struct livepatch_elf *elf, const void *data) sym[i].name = strtab_sec->data + delta; if ( arch_livepatch_symbol_deny(elf, &sym[i]) ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Symbol '%s' should not be in payload!\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Symbol '%s' should not be in payload!\n", elf->name, sym[i].name); return -EINVAL; } @@ -289,7 +304,7 @@ int livepatch_elf_resolve_symbols(struct livepatch_elf *elf) const Elf_Sym *sym = elf->sym[i].sym; Elf_Addr st_value = sym->st_value; - switch ( idx ) + switch (idx) { case SHN_COMMON: dprintk(XENLOG_ERR, LIVEPATCH "%s: Unexpected common symbol: %s\n", @@ -310,12 +325,15 @@ int livepatch_elf_resolve_symbols(struct livepatch_elf *elf) break; } } - dprintk(XENLOG_DEBUG, LIVEPATCH "%s: Undefined symbol resolved: %s => %#"PRIxElfAddr"\n", + dprintk(XENLOG_DEBUG, + LIVEPATCH + "%s: Undefined symbol resolved: %s => %#" PRIxElfAddr "\n", elf->name, elf->sym[i].name, st_value); break; case SHN_ABS: - dprintk(XENLOG_DEBUG, LIVEPATCH "%s: Absolute symbol: %s => %#"PRIxElfAddr"\n", + dprintk(XENLOG_DEBUG, + LIVEPATCH "%s: Absolute symbol: %s => %#" PRIxElfAddr "\n", elf->name, elf->sym[i].name, sym->st_value); break; @@ -328,7 +346,8 @@ int livepatch_elf_resolve_symbols(struct livepatch_elf *elf) if ( rc ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Out of bounds symbol section %#x\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Out of bounds symbol section %#x\n", elf->name, idx); break; } @@ -338,9 +357,11 @@ int livepatch_elf_resolve_symbols(struct livepatch_elf *elf) st_value += (unsigned long)elf->sec[idx].load_addr; if ( elf->sym[i].name ) - dprintk(XENLOG_DEBUG, LIVEPATCH "%s: Symbol resolved: %s => %#"PRIxElfAddr" (%s)\n", - elf->name, elf->sym[i].name, - st_value, elf->sec[idx].name); + dprintk(XENLOG_DEBUG, + LIVEPATCH "%s: Symbol resolved: %s => %#" PRIxElfAddr + " (%s)\n", + elf->name, elf->sym[i].name, st_value, + elf->sec[idx].name); } if ( rc ) @@ -365,23 +386,24 @@ int livepatch_elf_perform_relocs(struct livepatch_elf *elf) { r = &elf->sec[i]; - if ( (r->sec->sh_type != SHT_RELA) && - (r->sec->sh_type != SHT_REL) ) + if ( (r->sec->sh_type != SHT_RELA) && (r->sec->sh_type != SHT_REL) ) continue; - /* Is it a valid relocation section? */ - if ( r->sec->sh_info >= elf->hdr->e_shnum ) + /* Is it a valid relocation section? */ + if ( r->sec->sh_info >= elf->hdr->e_shnum ) continue; - base = &elf->sec[r->sec->sh_info]; + base = &elf->sec[r->sec->sh_info]; - /* Don't relocate non-allocated sections. */ - if ( !(base->sec->sh_flags & SHF_ALLOC) ) + /* Don't relocate non-allocated sections. */ + if ( !(base->sec->sh_flags & SHF_ALLOC) ) continue; if ( r->sec->sh_link != elf->symtab_idx ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative link of %s is incorrect (%d, expected=%d)\n", + dprintk(XENLOG_ERR, + LIVEPATCH + "%s: Relative link of %s is incorrect (%d, expected=%d)\n", elf->name, r->name, r->sec->sh_link, elf->symtab_idx); rc = -EINVAL; break; @@ -397,7 +419,8 @@ int livepatch_elf_perform_relocs(struct livepatch_elf *elf) if ( r->sec->sh_entsize < sz || r->sec->sh_size % r->sec->sh_entsize ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Section relative header is corrupted!\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Section relative header is corrupted!\n", elf->name); rc = -EINVAL; break; @@ -422,7 +445,8 @@ static int livepatch_header_check(const struct livepatch_elf *elf) if ( sizeof(*elf->hdr) > elf->len ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Section header is bigger than payload!\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Section header is bigger than payload!\n", elf->name); return -EINVAL; } @@ -439,8 +463,7 @@ static int livepatch_header_check(const struct livepatch_elf *elf) hdr->e_ident[EI_ABIVERSION] != 0 || (hdr->e_ident[EI_OSABI] != ELFOSABI_NONE && hdr->e_ident[EI_OSABI] != ELFOSABI_FREEBSD) || - hdr->e_type != ET_REL || - hdr->e_phnum != 0 ) + hdr->e_type != ET_REL || hdr->e_phnum != 0 ) { dprintk(XENLOG_ERR, LIVEPATCH "%s: Invalid ELF payload!\n", elf->name); return -EOPNOTSUPP; @@ -468,7 +491,9 @@ static int livepatch_header_check(const struct livepatch_elf *elf) /* Check that section name index is within the sections. */ if ( elf->hdr->e_shstrndx >= elf->hdr->e_shnum ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Section name idx (%u) is past end of sections (%u)!\n", + dprintk(XENLOG_ERR, + LIVEPATCH + "%s: Section name idx (%u) is past end of sections (%u)!\n", elf->name, elf->hdr->e_shstrndx, elf->hdr->e_shnum); return -EINVAL; } @@ -481,7 +506,8 @@ static int livepatch_header_check(const struct livepatch_elf *elf) if ( elf->hdr->e_shentsize < sizeof(Elf_Shdr) ) { - dprintk(XENLOG_ERR, LIVEPATCH "%s: Section header size is %u! Expected %zu!?\n", + dprintk(XENLOG_ERR, + LIVEPATCH "%s: Section header size is %u! Expected %zu!?\n", elf->name, elf->hdr->e_shentsize, sizeof(Elf_Shdr)); return -EINVAL; } diff --git a/xen/common/lz4/decompress.c b/xen/common/lz4/decompress.c index 94ad591331..5f78e1d20c 100644 --- a/xen/common/lz4/decompress.c +++ b/xen/common/lz4/decompress.c @@ -42,254 +42,265 @@ #if defined(__XEN__) || defined(__MINIOS__) static int INIT lz4_uncompress(const unsigned char *source, unsigned char *dest, - int osize) + int osize) { - const BYTE *ip = (const BYTE *) source; - const BYTE *ref; - BYTE *op = (BYTE *) dest; - BYTE * const oend = op + osize; - BYTE *cpy; - unsigned token; - size_t length; - size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; + const BYTE *ip = (const BYTE *)source; + const BYTE *ref; + BYTE *op = (BYTE *)dest; + BYTE *const oend = op + osize; + BYTE *cpy; + unsigned token; + size_t length; + size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; #if LZ4_ARCH64 - size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; + size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; #endif - while (1) { - - /* get runlength */ - token = *ip++; - length = (token >> ML_BITS); - if (length == RUN_MASK) { - size_t len; - - len = *ip++; - for (; len == 255; length += 255) - len = *ip++; - length += len; - } - - /* copy literals */ - cpy = op + length; - if (unlikely(cpy > oend - COPYLENGTH)) { - /* - * Error: not enough place for another match - * (min 4) + 5 literals - */ - if (cpy != oend) - goto _output_error; - - memcpy(op, ip, length); - ip += length; - break; /* EOF */ - } - if (unlikely((unsigned long)cpy < (unsigned long)op)) - goto _output_error; - LZ4_WILDCOPY(ip, op, cpy); - ip -= (op - cpy); - op = cpy; - - /* get offset */ - LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); - ip += 2; - - /* Error: offset create reference outside destination buffer */ - if (unlikely(ref < (BYTE *const) dest)) - goto _output_error; - - /* get matchlength */ - length = token & ML_MASK; - if (length == ML_MASK) { - for (; *ip == 255; length += 255) - ip++; - length += *ip++; - } - - /* copy repeated sequence */ - if (unlikely((op - ref) < STEPSIZE)) { + while ( 1 ) + { + /* get runlength */ + token = *ip++; + length = (token >> ML_BITS); + if ( length == RUN_MASK ) + { + size_t len; + + len = *ip++; + for ( ; len == 255; length += 255 ) + len = *ip++; + length += len; + } + + /* copy literals */ + cpy = op + length; + if ( unlikely(cpy > oend - COPYLENGTH) ) + { + /* + * Error: not enough place for another match + * (min 4) + 5 literals + */ + if ( cpy != oend ) + goto _output_error; + + memcpy(op, ip, length); + ip += length; + break; /* EOF */ + } + if ( unlikely((unsigned long)cpy < (unsigned long)op) ) + goto _output_error; + LZ4_WILDCOPY(ip, op, cpy); + ip -= (op - cpy); + op = cpy; + + /* get offset */ + LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); + ip += 2; + + /* Error: offset create reference outside destination buffer */ + if ( unlikely(ref < (BYTE *const)dest) ) + goto _output_error; + + /* get matchlength */ + length = token & ML_MASK; + if ( length == ML_MASK ) + { + for ( ; *ip == 255; length += 255 ) + ip++; + length += *ip++; + } + + /* copy repeated sequence */ + if ( unlikely((op - ref) < STEPSIZE) ) + { #if LZ4_ARCH64 - size_t dec64 = dec64table[op - ref]; + size_t dec64 = dec64table[op - ref]; #else - const int dec64 = 0; + const int dec64 = 0; #endif - op[0] = ref[0]; - op[1] = ref[1]; - op[2] = ref[2]; - op[3] = ref[3]; - op += 4; - ref += 4; - ref -= dec32table[op-ref]; - PUT4(ref, op); - op += STEPSIZE - 4; - ref -= dec64; - } else { - LZ4_COPYSTEP(ref, op); - } - cpy = op + length - (STEPSIZE - 4); - if (cpy > (oend - COPYLENGTH)) { - - /* Error: request to write beyond destination buffer */ - if (cpy > oend) - goto _output_error; - if ((ref + COPYLENGTH) > oend || - (op + COPYLENGTH) > oend) - goto _output_error; - LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); - while (op < cpy) - *op++ = *ref++; - op = cpy; - /* - * Check EOF (should never happen, since last 5 bytes - * are supposed to be literals) - */ - if (op == oend) - goto _output_error; - continue; - } - if (unlikely((unsigned long)cpy < (unsigned long)op)) - goto _output_error; - LZ4_SECURECOPY(ref, op, cpy); - op = cpy; /* correction */ - } - /* end of decoding */ - return (int) (ip - source); - - /* write overflow error detected */ + op[0] = ref[0]; + op[1] = ref[1]; + op[2] = ref[2]; + op[3] = ref[3]; + op += 4; + ref += 4; + ref -= dec32table[op - ref]; + PUT4(ref, op); + op += STEPSIZE - 4; + ref -= dec64; + } + else + { + LZ4_COPYSTEP(ref, op); + } + cpy = op + length - (STEPSIZE - 4); + if ( cpy > (oend - COPYLENGTH) ) + { + /* Error: request to write beyond destination buffer */ + if ( cpy > oend ) + goto _output_error; + if ( (ref + COPYLENGTH) > oend || (op + COPYLENGTH) > oend ) + goto _output_error; + LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); + while ( op < cpy ) + *op++ = *ref++; + op = cpy; + /* + * Check EOF (should never happen, since last 5 bytes + * are supposed to be literals) + */ + if ( op == oend ) + goto _output_error; + continue; + } + if ( unlikely((unsigned long)cpy < (unsigned long)op) ) + goto _output_error; + LZ4_SECURECOPY(ref, op, cpy); + op = cpy; /* correction */ + } + /* end of decoding */ + return (int)(ip - source); + + /* write overflow error detected */ _output_error: - return (int) (-(ip - source)); + return (int)(-(ip - source)); } #else /* defined(__XEN__) || defined(__MINIOS__) */ static int lz4_uncompress_unknownoutputsize(const unsigned char *source, - unsigned char *dest, int isize, - size_t maxoutputsize) + unsigned char *dest, int isize, + size_t maxoutputsize) { - const BYTE *ip = (const BYTE *) source; - const BYTE *const iend = ip + isize; - const BYTE *ref; + const BYTE *ip = (const BYTE *)source; + const BYTE *const iend = ip + isize; + const BYTE *ref; + BYTE *op = (BYTE *)dest; + BYTE *const oend = op + maxoutputsize; + BYTE *cpy; - BYTE *op = (BYTE *) dest; - BYTE * const oend = op + maxoutputsize; - BYTE *cpy; - - size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; + size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; #if LZ4_ARCH64 - size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; + size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; #endif - /* Main Loop */ - while (ip < iend) { - - unsigned token; - size_t length; - - /* get runlength */ - token = *ip++; - length = (token >> ML_BITS); - if (length == RUN_MASK) { - int s = 255; - while ((ip < iend) && (s == 255)) { - s = *ip++; - length += s; - } - } - /* copy literals */ - cpy = op + length; - if ((cpy > oend - COPYLENGTH) || - (ip + length > iend - COPYLENGTH)) { - - if (cpy > oend) - goto _output_error;/* writes beyond buffer */ - - if (ip + length != iend) - goto _output_error;/* - * Error: LZ4 format requires - * to consume all input - * at this stage - */ - memcpy(op, ip, length); - op += length; - break;/* Necessarily EOF, due to parsing restrictions */ - } - if (unlikely((unsigned long)cpy < (unsigned long)op)) - goto _output_error; - LZ4_WILDCOPY(ip, op, cpy); - ip -= (op - cpy); - op = cpy; - - /* get offset */ - LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); - ip += 2; - if (ref < (BYTE * const) dest) - goto _output_error; - /* - * Error : offset creates reference - * outside of destination buffer - */ - - /* get matchlength */ - length = (token & ML_MASK); - if (length == ML_MASK) { - while (ip < iend) { - int s = *ip++; - length += s; - if (s == 255) - continue; - break; - } - } - - /* copy repeated sequence */ - if (unlikely((op - ref) < STEPSIZE)) { + /* Main Loop */ + while ( ip < iend ) + { + unsigned token; + size_t length; + + /* get runlength */ + token = *ip++; + length = (token >> ML_BITS); + if ( length == RUN_MASK ) + { + int s = 255; + while ( (ip < iend) && (s == 255) ) + { + s = *ip++; + length += s; + } + } + /* copy literals */ + cpy = op + length; + if ( (cpy > oend - COPYLENGTH) || (ip + length > iend - COPYLENGTH) ) + { + if ( cpy > oend ) + goto _output_error; /* writes beyond buffer */ + + if ( ip + length != iend ) + goto _output_error; /* + * Error: LZ4 format requires + * to consume all input + * at this stage + */ + memcpy(op, ip, length); + op += length; + break; /* Necessarily EOF, due to parsing restrictions */ + } + if ( unlikely((unsigned long)cpy < (unsigned long)op) ) + goto _output_error; + LZ4_WILDCOPY(ip, op, cpy); + ip -= (op - cpy); + op = cpy; + + /* get offset */ + LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); + ip += 2; + if ( ref < (BYTE *const)dest ) + goto _output_error; + /* + * Error : offset creates reference + * outside of destination buffer + */ + + /* get matchlength */ + length = (token & ML_MASK); + if ( length == ML_MASK ) + { + while ( ip < iend ) + { + int s = *ip++; + length += s; + if ( s == 255 ) + continue; + break; + } + } + + /* copy repeated sequence */ + if ( unlikely((op - ref) < STEPSIZE) ) + { #if LZ4_ARCH64 - size_t dec64 = dec64table[op - ref]; + size_t dec64 = dec64table[op - ref]; #else - const int dec64 = 0; + const int dec64 = 0; #endif - op[0] = ref[0]; - op[1] = ref[1]; - op[2] = ref[2]; - op[3] = ref[3]; - op += 4; - ref += 4; - ref -= dec32table[op - ref]; - PUT4(ref, op); - op += STEPSIZE - 4; - ref -= dec64; - } else { - LZ4_COPYSTEP(ref, op); - } - cpy = op + length - (STEPSIZE-4); - if (cpy > oend - COPYLENGTH) { - if (cpy > oend) - goto _output_error; /* write outside of buf */ - - LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); - while (op < cpy) - *op++ = *ref++; - op = cpy; - /* - * Check EOF (should never happen, since last 5 bytes - * are supposed to be literals) - */ - if (op == oend) - goto _output_error; - continue; - } - if (unlikely((unsigned long)cpy < (unsigned long)op)) - goto _output_error; - LZ4_SECURECOPY(ref, op, cpy); - op = cpy; /* correction */ - } - /* end of decoding */ - return (int) (op - dest); - - /* write overflow error detected */ + op[0] = ref[0]; + op[1] = ref[1]; + op[2] = ref[2]; + op[3] = ref[3]; + op += 4; + ref += 4; + ref -= dec32table[op - ref]; + PUT4(ref, op); + op += STEPSIZE - 4; + ref -= dec64; + } + else + { + LZ4_COPYSTEP(ref, op); + } + cpy = op + length - (STEPSIZE - 4); + if ( cpy > oend - COPYLENGTH ) + { + if ( cpy > oend ) + goto _output_error; /* write outside of buf */ + + LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); + while ( op < cpy ) + *op++ = *ref++; + op = cpy; + /* + * Check EOF (should never happen, since last 5 bytes + * are supposed to be literals) + */ + if ( op == oend ) + goto _output_error; + continue; + } + if ( unlikely((unsigned long)cpy < (unsigned long)op) ) + goto _output_error; + LZ4_SECURECOPY(ref, op, cpy); + op = cpy; /* correction */ + } + /* end of decoding */ + return (int)(op - dest); + + /* write overflow error detected */ _output_error: - return (int) (-(ip - source)); + return (int)(-(ip - source)); } #endif @@ -297,38 +308,37 @@ _output_error: #if defined(__XEN__) || defined(__MINIOS__) int INIT lz4_decompress(const unsigned char *src, size_t *src_len, - unsigned char *dest, size_t actual_dest_len) + unsigned char *dest, size_t actual_dest_len) { - int ret = -1; - int input_len = 0; + int ret = -1; + int input_len = 0; - input_len = lz4_uncompress(src, dest, actual_dest_len); - if (input_len < 0) - goto exit_0; - *src_len = input_len; + input_len = lz4_uncompress(src, dest, actual_dest_len); + if ( input_len < 0 ) + goto exit_0; + *src_len = input_len; - return 0; + return 0; exit_0: - return ret; + return ret; } #else /* defined(__XEN__) || defined(__MINIOS__) */ int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len, - unsigned char *dest, size_t *dest_len) + unsigned char *dest, size_t *dest_len) { - int ret = -1; - int out_len = 0; + int ret = -1; + int out_len = 0; - out_len = lz4_uncompress_unknownoutputsize(src, dest, src_len, - *dest_len); - if (out_len < 0) - goto exit_0; - *dest_len = out_len; + out_len = lz4_uncompress_unknownoutputsize(src, dest, src_len, *dest_len); + if ( out_len < 0 ) + goto exit_0; + *dest_len = out_len; - return 0; + return 0; exit_0: - return ret; + return ret; } #endif diff --git a/xen/common/lzo.c b/xen/common/lzo.c index 0a11671075..f6771acbc0 100644 --- a/xen/common/lzo.c +++ b/xen/common/lzo.c @@ -24,36 +24,36 @@ * Richard Purdie */ - -#define COPY4(dst, src) \ - put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst)) +#define COPY4(dst, src) \ + put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst)) #if defined(__x86_64__) -#define COPY8(dst, src) \ - put_unaligned(get_unaligned((const u64 *)(src)), (u64 *)(dst)) +#define COPY8(dst, src) \ + put_unaligned(get_unaligned((const u64 *)(src)), (u64 *)(dst)) #else -#define COPY8(dst, src) \ - COPY4(dst, src); COPY4((dst) + 4, (src) + 4) +#define COPY8(dst, src) \ + COPY4(dst, src); \ + COPY4((dst) + 4, (src) + 4) #endif #ifdef __MINIOS__ -# include -# if __BYTE_ORDER == __LITTLE_ENDIAN -# undef __BIG_ENDIAN -# endif -# if __BYTE_ORDER == __BIG_ENDIAN -# undef __LITTLE_ENDIAN -# endif +#include +#if __BYTE_ORDER == __LITTLE_ENDIAN +#undef __BIG_ENDIAN +#endif +#if __BYTE_ORDER == __BIG_ENDIAN +#undef __LITTLE_ENDIAN +#endif #endif #if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) #error "conflicting endian definitions" #elif defined(__x86_64__) -#define LZO_USE_CTZ64 1 -#define LZO_USE_CTZ32 1 +#define LZO_USE_CTZ64 1 +#define LZO_USE_CTZ32 1 #elif defined(__i386__) || defined(__powerpc__) -#define LZO_USE_CTZ32 1 +#define LZO_USE_CTZ32 1 #elif defined(__arm__) && (__LINUX_ARM_ARCH__ >= 5) -#define LZO_USE_CTZ32 1 +#define LZO_USE_CTZ32 1 #endif #define M1_MAX_OFFSET 0x0400 @@ -76,10 +76,10 @@ #define M4_MARKER 16 #define lzo_dict_t unsigned short -#define D_BITS 13 -#define D_SIZE (1u << D_BITS) -#define D_MASK (D_SIZE - 1) -#define D_HIGH ((D_MASK >> 1) + 1) +#define D_BITS 13 +#define D_SIZE (1u << D_BITS) +#define D_MASK (D_SIZE - 1) +#define D_HIGH ((D_MASK >> 1) + 1) /* * LZO1X Compressor from LZO @@ -101,65 +101,76 @@ #include #define get_unaligned(_p) (*(_p)) -#define put_unaligned(_val,_p) (*(_p)=_val) +#define put_unaligned(_val, _p) (*(_p) = _val) #define get_unaligned_le16(_p) (*(u16 *)(_p)) #define get_unaligned_le32(_p) (*(u32 *)(_p)) #ifdef CONFIG_TMEM -static noinline size_t -lzo1x_1_do_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - size_t ti, void *wrkmem) +static noinline size_t lzo1x_1_do_compress(const unsigned char *in, + size_t in_len, unsigned char *out, + size_t *out_len, size_t ti, + void *wrkmem) { const unsigned char *ip; unsigned char *op; - const unsigned char * const in_end = in + in_len; - const unsigned char * const ip_end = in + in_len - 20; + const unsigned char *const in_end = in + in_len; + const unsigned char *const ip_end = in + in_len - 20; const unsigned char *ii; - lzo_dict_t * const dict = (lzo_dict_t *) wrkmem; + lzo_dict_t *const dict = (lzo_dict_t *)wrkmem; op = out; ip = in; ii = ip; ip += ti < 4 ? 4 - ti : 0; - for (;;) { + for ( ;; ) + { const unsigned char *m_pos; size_t t, m_len, m_off; u32 dv; literal: ip += 1 + ((ip - ii) >> 5); next: - if (unlikely(ip >= ip_end)) + if ( unlikely(ip >= ip_end) ) break; dv = get_unaligned_le32(ip); t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK; m_pos = in + dict[t]; - dict[t] = (lzo_dict_t) (ip - in); - if (unlikely(dv != get_unaligned_le32(m_pos))) + dict[t] = (lzo_dict_t)(ip - in); + if ( unlikely(dv != get_unaligned_le32(m_pos)) ) goto literal; ii -= ti; ti = 0; t = ip - ii; - if (t != 0) { - if (t <= 3) { + if ( t != 0 ) + { + if ( t <= 3 ) + { op[-2] |= t; COPY4(op, ii); op += t; - } else if (t <= 16) { + } + else if ( t <= 16 ) + { *op++ = (t - 3); COPY8(op, ii); COPY8(op + 8, ii + 8); op += t; - } else { - if (t <= 18) { + } + else + { + if ( t <= 18 ) + { *op++ = (t - 3); - } else { + } + else + { size_t tt = t - 18; *op++ = 0; - while (unlikely(tt > 255)) { + while ( unlikely(tt > 255) ) + { tt -= 255; *op++ = 0; } @@ -171,108 +182,117 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, op += 16; ii += 16; t -= 16; - } while (t >= 16); - if (t > 0) do { - *op++ = *ii++; - } while (--t > 0); + } while ( t >= 16 ); + if ( t > 0 ) + do { + *op++ = *ii++; + } while ( --t > 0 ); } } m_len = 4; { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64) - u64 v; - v = get_unaligned((const u64 *) (ip + m_len)) ^ - get_unaligned((const u64 *) (m_pos + m_len)); - if (unlikely(v == 0)) { - do { - m_len += 8; - v = get_unaligned((const u64 *) (ip + m_len)) ^ - get_unaligned((const u64 *) (m_pos + m_len)); - if (unlikely(ip + m_len >= ip_end)) - goto m_len_done; - } while (v == 0); - } -# if defined(__LITTLE_ENDIAN) - m_len += (unsigned) __builtin_ctzll(v) / 8; -# elif defined(__BIG_ENDIAN) - m_len += (unsigned) __builtin_clzll(v) / 8; -# else -# error "missing endian definition" -# endif + u64 v; + v = get_unaligned((const u64 *)(ip + m_len)) ^ + get_unaligned((const u64 *)(m_pos + m_len)); + if ( unlikely(v == 0) ) + { + do { + m_len += 8; + v = get_unaligned((const u64 *)(ip + m_len)) ^ + get_unaligned((const u64 *)(m_pos + m_len)); + if ( unlikely(ip + m_len >= ip_end) ) + goto m_len_done; + } while ( v == 0 ); + } +#if defined(__LITTLE_ENDIAN) + m_len += (unsigned)__builtin_ctzll(v) / 8; +#elif defined(__BIG_ENDIAN) + m_len += (unsigned)__builtin_clzll(v) / 8; +#else +#error "missing endian definition" +#endif #elif defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ32) - u32 v; - v = get_unaligned((const u32 *) (ip + m_len)) ^ - get_unaligned((const u32 *) (m_pos + m_len)); - if (unlikely(v == 0)) { - do { - m_len += 4; - v = get_unaligned((const u32 *) (ip + m_len)) ^ - get_unaligned((const u32 *) (m_pos + m_len)); - if (v != 0) - break; - m_len += 4; - v = get_unaligned((const u32 *) (ip + m_len)) ^ - get_unaligned((const u32 *) (m_pos + m_len)); - if (unlikely(ip + m_len >= ip_end)) - goto m_len_done; - } while (v == 0); - } -# if defined(__LITTLE_ENDIAN) - m_len += (unsigned) __builtin_ctz(v) / 8; -# elif defined(__BIG_ENDIAN) - m_len += (unsigned) __builtin_clz(v) / 8; -# else -# error "missing endian definition" -# endif + u32 v; + v = get_unaligned((const u32 *)(ip + m_len)) ^ + get_unaligned((const u32 *)(m_pos + m_len)); + if ( unlikely(v == 0) ) + { + do { + m_len += 4; + v = get_unaligned((const u32 *)(ip + m_len)) ^ + get_unaligned((const u32 *)(m_pos + m_len)); + if ( v != 0 ) + break; + m_len += 4; + v = get_unaligned((const u32 *)(ip + m_len)) ^ + get_unaligned((const u32 *)(m_pos + m_len)); + if ( unlikely(ip + m_len >= ip_end) ) + goto m_len_done; + } while ( v == 0 ); + } +#if defined(__LITTLE_ENDIAN) + m_len += (unsigned)__builtin_ctz(v) / 8; +#elif defined(__BIG_ENDIAN) + m_len += (unsigned)__builtin_clz(v) / 8; #else - if (unlikely(ip[m_len] == m_pos[m_len])) { - do { - m_len += 1; - if (ip[m_len] != m_pos[m_len]) - break; - m_len += 1; - if (ip[m_len] != m_pos[m_len]) - break; - m_len += 1; - if (ip[m_len] != m_pos[m_len]) - break; - m_len += 1; - if (ip[m_len] != m_pos[m_len]) - break; - m_len += 1; - if (ip[m_len] != m_pos[m_len]) - break; - m_len += 1; - if (ip[m_len] != m_pos[m_len]) - break; - m_len += 1; - if (ip[m_len] != m_pos[m_len]) - break; - m_len += 1; - if (unlikely(ip + m_len >= ip_end)) - goto m_len_done; - } while (ip[m_len] == m_pos[m_len]); - } +#error "missing endian definition" +#endif +#else + if ( unlikely(ip[m_len] == m_pos[m_len]) ) + { + do { + m_len += 1; + if ( ip[m_len] != m_pos[m_len] ) + break; + m_len += 1; + if ( ip[m_len] != m_pos[m_len] ) + break; + m_len += 1; + if ( ip[m_len] != m_pos[m_len] ) + break; + m_len += 1; + if ( ip[m_len] != m_pos[m_len] ) + break; + m_len += 1; + if ( ip[m_len] != m_pos[m_len] ) + break; + m_len += 1; + if ( ip[m_len] != m_pos[m_len] ) + break; + m_len += 1; + if ( ip[m_len] != m_pos[m_len] ) + break; + m_len += 1; + if ( unlikely(ip + m_len >= ip_end) ) + goto m_len_done; + } while ( ip[m_len] == m_pos[m_len] ); + } #endif } - m_len_done: + m_len_done: m_off = ip - m_pos; ip += m_len; ii = ip; - if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { + if ( m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET ) + { m_off -= 1; *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); *op++ = (m_off >> 3); - } else if (m_off <= M3_MAX_OFFSET) { + } + else if ( m_off <= M3_MAX_OFFSET ) + { m_off -= 1; - if (m_len <= M3_MAX_LEN) + if ( m_len <= M3_MAX_LEN ) *op++ = (M3_MARKER | (m_len - 2)); - else { + else + { m_len -= M3_MAX_LEN; *op++ = M3_MARKER | 0; - while (unlikely(m_len > 255)) { + while ( unlikely(m_len > 255) ) + { m_len -= 255; *op++ = 0; } @@ -280,15 +300,18 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, } *op++ = (m_off << 2); *op++ = (m_off >> 6); - } else { + } + else + { m_off -= 0x4000; - if (m_len <= M4_MAX_LEN) - *op++ = (M4_MARKER | ((m_off >> 11) & 8) - | (m_len - 2)); - else { + if ( m_len <= M4_MAX_LEN ) + *op++ = (M4_MARKER | ((m_off >> 11) & 8) | (m_len - 2)); + else + { m_len -= M4_MAX_LEN; *op++ = (M4_MARKER | ((m_off >> 11) & 8)); - while (unlikely(m_len > 255)) { + while ( unlikely(m_len > 255) ) + { m_len -= 255; *op++ = 0; } @@ -303,57 +326,68 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, return in_end - (ii - ti); } -int lzo1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem) +int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, + size_t *out_len, void *wrkmem) { const unsigned char *ip = in; unsigned char *op = out; size_t l = in_len; size_t t = 0; - while (l > 20) { + while ( l > 20 ) + { size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1); - uintptr_t ll_end = (uintptr_t) ip + ll; - if ((ll_end + ((t + ll) >> 5)) <= ll_end) + uintptr_t ll_end = (uintptr_t)ip + ll; + if ( (ll_end + ((t + ll) >> 5)) <= ll_end ) break; BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem); ip += ll; op += *out_len; - l -= ll; + l -= ll; } t += l; - if (t > 0) { + if ( t > 0 ) + { const unsigned char *ii = in + in_len - t; - if (op == out && t <= 238) { + if ( op == out && t <= 238 ) + { *op++ = (17 + t); - } else if (t <= 3) { + } + else if ( t <= 3 ) + { op[-2] |= t; - } else if (t <= 18) { + } + else if ( t <= 18 ) + { *op++ = (t - 3); - } else { + } + else + { size_t tt = t - 18; *op++ = 0; - while (tt > 255) { + while ( tt > 255 ) + { tt -= 255; *op++ = 0; } *op++ = tt; } - if (t >= 16) do { - COPY8(op, ii); - COPY8(op + 8, ii + 8); - op += 16; - ii += 16; - t -= 16; - } while (t >= 16); - if (t > 0) do { - *op++ = *ii++; - } while (--t > 0); + if ( t >= 16 ) + do { + COPY8(op, ii); + COPY8(op + 8, ii + 8); + op += 16; + ii += 16; + t -= 16; + } while ( t >= 16 ); + if ( t > 0 ) + do { + *op++ = *ii++; + } while ( --t > 0 ); } *op++ = M4_MARKER | 1; @@ -364,9 +398,9 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len, return LZO_E_OK; } -# define INIT +#define INIT #else /* CONFIG_TMEM */ -# include "decompress.h" +#include "decompress.h" #endif /* CONFIG_TMEM */ /* @@ -382,11 +416,17 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len, * Richard Purdie */ -#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x)) -#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) -#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun -#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun -#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun +#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x)) +#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) +#define NEED_IP(x) \ + if ( !HAVE_IP(x) ) \ + goto input_overrun +#define NEED_OP(x) \ + if ( !HAVE_OP(x) ) \ + goto output_overrun +#define TEST_LB(m_pos) \ + if ( (m_pos) < out ) \ + goto lookbehind_overrun /* This MAX_255_COUNT is the maximum number of times we can add 255 to a base * count without overflowing an integer. The multiply will overflow when @@ -396,7 +436,7 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len, * or equal to 2*255, thus we can always prevent any overflow by accepting * two less 255 steps. See Documentation/lzo.txt for more information. */ -#define MAX_255_COUNT ((((size_t)~0) / 255) - 2) +#define MAX_255_COUNT ((((size_t)~0) / 255) - 2) int INIT lzo1x_decompress_safe(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len) @@ -406,46 +446,54 @@ int INIT lzo1x_decompress_safe(const unsigned char *in, size_t in_len, size_t t, next; size_t state = 0; const unsigned char *m_pos; - const unsigned char * const ip_end = in + in_len; - unsigned char * const op_end = out + *out_len; + const unsigned char *const ip_end = in + in_len; + unsigned char *const op_end = out + *out_len; op = out; ip = in; - if (unlikely(in_len < 3)) + if ( unlikely(in_len < 3) ) goto input_overrun; - if (*ip > 17) { + if ( *ip > 17 ) + { t = *ip++ - 17; - if (t < 4) { + if ( t < 4 ) + { next = t; goto match_next; } goto copy_literal_run; } - for (;;) { + for ( ;; ) + { t = *ip++; - if (t < 16) { - if (likely(state == 0)) { - if (unlikely(t == 0)) { + if ( t < 16 ) + { + if ( likely(state == 0) ) + { + if ( unlikely(t == 0) ) + { size_t offset; const unsigned char *ip_last = ip; - while (unlikely(*ip == 0)) { + while ( unlikely(*ip == 0) ) + { ip++; NEED_IP(1); } offset = ip - ip_last; - if (unlikely(offset > MAX_255_COUNT)) + if ( unlikely(offset > MAX_255_COUNT) ) return LZO_E_ERROR; offset = (offset << 8) - offset; t += offset + 15 + *ip++; } t += 3; - copy_literal_run: + copy_literal_run: #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) - if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) { + if ( likely(HAVE_IP(t + 15) && HAVE_OP(t + 15)) ) + { const unsigned char *ie = ip + t; unsigned char *oe = op + t; do { @@ -455,21 +503,24 @@ int INIT lzo1x_decompress_safe(const unsigned char *in, size_t in_len, COPY8(op, ip); op += 8; ip += 8; - } while (ip < ie); + } while ( ip < ie ); ip = ie; op = oe; - } else + } + else #endif { NEED_OP(t); NEED_IP(t + 3); do { *op++ = *ip++; - } while (--t > 0); + } while ( --t > 0 ); } state = 4; continue; - } else if (state != 4) { + } + else if ( state != 4 ) + { next = t & 3; m_pos = op - 1; m_pos -= t >> 2; @@ -480,31 +531,39 @@ int INIT lzo1x_decompress_safe(const unsigned char *in, size_t in_len, op[1] = m_pos[1]; op += 2; goto match_next; - } else { + } + else + { next = t & 3; m_pos = op - (1 + M2_MAX_OFFSET); m_pos -= t >> 2; m_pos -= *ip++ << 2; t = 3; } - } else if (t >= 64) { + } + else if ( t >= 64 ) + { next = t & 3; m_pos = op - 1; m_pos -= (t >> 2) & 7; m_pos -= *ip++ << 3; t = (t >> 5) - 1 + (3 - 1); - } else if (t >= 32) { + } + else if ( t >= 32 ) + { t = (t & 31) + (3 - 1); - if (unlikely(t == 2)) { + if ( unlikely(t == 2) ) + { size_t offset; const unsigned char *ip_last = ip; - while (unlikely(*ip == 0)) { + while ( unlikely(*ip == 0) ) + { ip++; NEED_IP(1); } offset = ip - ip_last; - if (unlikely(offset > MAX_255_COUNT)) + if ( unlikely(offset > MAX_255_COUNT) ) return LZO_E_ERROR; offset = (offset << 8) - offset; @@ -516,20 +575,24 @@ int INIT lzo1x_decompress_safe(const unsigned char *in, size_t in_len, ip += 2; m_pos -= next >> 2; next &= 3; - } else { + } + else + { m_pos = op; m_pos -= (t & 8) << 11; t = (t & 7) + (3 - 1); - if (unlikely(t == 2)) { + if ( unlikely(t == 2) ) + { size_t offset; const unsigned char *ip_last = ip; - while (unlikely(*ip == 0)) { + while ( unlikely(*ip == 0) ) + { ip++; NEED_IP(1); } offset = ip - ip_last; - if (unlikely(offset > MAX_255_COUNT)) + if ( unlikely(offset > MAX_255_COUNT) ) return LZO_E_ERROR; offset = (offset << 8) - offset; @@ -540,15 +603,17 @@ int INIT lzo1x_decompress_safe(const unsigned char *in, size_t in_len, ip += 2; m_pos -= next >> 2; next &= 3; - if (m_pos == op) + if ( m_pos == op ) goto eof_found; m_pos -= 0x4000; } TEST_LB(m_pos); #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) - if (op - m_pos >= 8) { + if ( op - m_pos >= 8 ) + { unsigned char *oe = op + t; - if (likely(HAVE_OP(t + 15))) { + if ( likely(HAVE_OP(t + 15)) ) + { do { COPY8(op, m_pos); op += 8; @@ -556,22 +621,26 @@ int INIT lzo1x_decompress_safe(const unsigned char *in, size_t in_len, COPY8(op, m_pos); op += 8; m_pos += 8; - } while (op < oe); + } while ( op < oe ); op = oe; - if (HAVE_IP(6)) { + if ( HAVE_IP(6) ) + { state = next; COPY4(op, ip); op += next; ip += next; continue; } - } else { + } + else + { NEED_OP(t); do { *op++ = *m_pos++; - } while (op < oe); + } while ( op < oe ); } - } else + } + else #endif { unsigned char *oe = op + t; @@ -582,43 +651,47 @@ int INIT lzo1x_decompress_safe(const unsigned char *in, size_t in_len, m_pos += 2; do { *op++ = *m_pos++; - } while (op < oe); + } while ( op < oe ); } - match_next: + match_next: state = next; t = next; #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) - if (likely(HAVE_IP(6) && HAVE_OP(4))) { + if ( likely(HAVE_IP(6) && HAVE_OP(4)) ) + { COPY4(op, ip); op += t; ip += t; - } else + } + else #endif { NEED_IP(t + 3); NEED_OP(t); - while (t > 0) { + while ( t > 0 ) + { *op++ = *ip++; t--; } } } - eof_found: +eof_found: *out_len = op - out; - return (t != 3 ? LZO_E_ERROR : - ip == ip_end ? LZO_E_OK : - ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN); + return (t != 3 ? LZO_E_ERROR + : ip == ip_end ? LZO_E_OK + : ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED + : LZO_E_INPUT_OVERRUN); - input_overrun: +input_overrun: *out_len = op - out; return LZO_E_INPUT_OVERRUN; - output_overrun: +output_overrun: *out_len = op - out; return LZO_E_OUTPUT_OVERRUN; - lookbehind_overrun: +lookbehind_overrun: *out_len = op - out; return LZO_E_LOOKBEHIND_OVERRUN; } diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c index 010e6f8dbf..87dd617462 100644 --- a/xen/common/mem_access.c +++ b/xen/common/mem_access.c @@ -19,7 +19,6 @@ * along with this program; If not, see . */ - #include #include #include @@ -55,14 +54,12 @@ int mem_access_memop(unsigned long cmd, if ( unlikely(!vm_event_check_ring(d->vm_event_monitor)) ) goto out; - switch ( mao.op ) + switch (mao.op) { - case XENMEM_access_op_set_access: rc = -EINVAL; if ( (mao.pfn != ~0ull) && - (mao.nr < start_iter || - ((mao.pfn + mao.nr - 1) < mao.pfn) || + (mao.nr < start_iter || ((mao.pfn + mao.nr - 1) < mao.pfn) || ((mao.pfn + mao.nr - 1) > domain_get_maximum_gpfn(d))) ) break; @@ -114,7 +111,7 @@ int mem_access_memop(unsigned long cmd, break; } - out: +out: rcu_unlock_domain(d); return rc; } diff --git a/xen/common/memory.c b/xen/common/memory.c index 5387769987..e9abdd485a 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -34,17 +34,18 @@ #include #endif -struct memop_args { +struct memop_args +{ /* INPUT */ - struct domain *domain; /* Domain to be affected. */ + struct domain *domain; /* Domain to be affected. */ XEN_GUEST_HANDLE(xen_pfn_t) extent_list; /* List of extent base addrs. */ unsigned int nr_extents; /* Number of extents to allocate or free. */ unsigned int extent_order; /* Size of each extent. */ unsigned int memflags; /* Allocation flags. */ /* INPUT/OUTPUT */ - unsigned int nr_done; /* Number of extents processed so far. */ - int preempted; /* Was the hypercall preempted? */ + unsigned int nr_done; /* Number of extents processed so far. */ + int preempted; /* Was the hypercall preempted? */ }; #ifndef CONFIG_CTLDOM_MAX_ORDER @@ -97,10 +98,10 @@ static unsigned int max_order(const struct domain *d) } /* Helper to copy a typesafe MFN to guest */ -static inline -unsigned long __copy_mfn_to_guest_offset(XEN_GUEST_HANDLE(xen_pfn_t) hnd, - size_t off, mfn_t mfn) - { +static inline unsigned long +__copy_mfn_to_guest_offset(XEN_GUEST_HANDLE(xen_pfn_t) hnd, size_t off, + mfn_t mfn) +{ xen_pfn_t mfn_ = mfn_x(mfn); return __copy_to_guest_offset(hnd, off, &mfn_, 1); @@ -114,7 +115,7 @@ static void increase_reservation(struct memop_args *a) if ( !guest_handle_is_null(a->extent_list) && !guest_handle_subrange_okay(a->extent_list, a->nr_done, - a->nr_extents-1) ) + a->nr_extents - 1) ) return; if ( a->extent_order > max_order(current->domain) ) @@ -129,16 +130,17 @@ static void increase_reservation(struct memop_args *a) } page = alloc_domheap_pages(d, a->extent_order, a->memflags); - if ( unlikely(page == NULL) ) + if ( unlikely(page == NULL) ) { - gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: " - "id=%d memflags=%x (%ld of %d)\n", - a->extent_order, d->domain_id, a->memflags, - i, a->nr_extents); + gdprintk(XENLOG_INFO, + "Could not allocate order=%d extent: " + "id=%d memflags=%x (%ld of %d)\n", + a->extent_order, d->domain_id, a->memflags, i, + a->nr_extents); goto out; } - /* Inform the domain of the new page's machine address. */ + /* Inform the domain of the new page's machine address. */ if ( !paging_mode_translate(d) && !guest_handle_is_null(a->extent_list) ) { @@ -149,7 +151,7 @@ static void increase_reservation(struct memop_args *a) } } - out: +out: a->nr_done = i; } @@ -163,11 +165,12 @@ static void populate_physmap(struct memop_args *a) uint32_t tlbflush_timestamp = 0; if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done, - a->nr_extents-1) ) + a->nr_extents - 1) ) return; - if ( a->extent_order > (a->memflags & MEMF_populate_on_demand ? MAX_ORDER : - max_order(curr_d)) ) + if ( a->extent_order > (a->memflags & MEMF_populate_on_demand + ? MAX_ORDER + : max_order(curr_d)) ) return; if ( unlikely(!d->creation_finished) ) @@ -210,9 +213,8 @@ static void populate_physmap(struct memop_args *a) if ( d == curr_d ) goto out; - if ( is_hvm_domain(d) && - guest_physmap_mark_populate_on_demand(d, gpfn, - a->extent_order) < 0 ) + if ( is_hvm_domain(d) && guest_physmap_mark_populate_on_demand( + d, gpfn, a->extent_order) < 0 ) goto out; } else @@ -221,12 +223,12 @@ static void populate_physmap(struct memop_args *a) { mfn = _mfn(gpfn); - for ( j = 0; j < (1U << a->extent_order); j++, - mfn = mfn_add(mfn, 1) ) + for ( j = 0; j < (1U << a->extent_order); + j++, mfn = mfn_add(mfn, 1) ) { if ( !mfn_valid(mfn) ) { - gdprintk(XENLOG_INFO, "Invalid mfn %#"PRI_mfn"\n", + gdprintk(XENLOG_INFO, "Invalid mfn %#" PRI_mfn "\n", mfn_x(mfn)); goto out; } @@ -235,8 +237,8 @@ static void populate_physmap(struct memop_args *a) if ( !get_page(page, d) ) { gdprintk(XENLOG_INFO, - "mfn %#"PRI_mfn" doesn't belong to d%d\n", - mfn_x(mfn), d->domain_id); + "mfn %#" PRI_mfn " doesn't belong to d%d\n", + mfn_x(mfn), d->domain_id); goto out; } put_page(page); @@ -252,9 +254,10 @@ static void populate_physmap(struct memop_args *a) { if ( !tmem_enabled() || a->extent_order ) gdprintk(XENLOG_INFO, - "Could not allocate order=%u extent: id=%d memflags=%#x (%u of %u)\n", - a->extent_order, d->domain_id, a->memflags, - i, a->nr_extents); + "Could not allocate order=%u extent: id=%d " + "memflags=%#x (%u of %u)\n", + a->extent_order, d->domain_id, a->memflags, i, + a->nr_extents); goto out; } @@ -275,9 +278,9 @@ static void populate_physmap(struct memop_args *a) for ( j = 0; j < (1U << a->extent_order); j++ ) set_gpfn_from_mfn(mfn_x(mfn_add(mfn, j)), gpfn + j); - /* Inform the domain of the new page's machine address. */ - if ( unlikely(__copy_mfn_to_guest_offset(a->extent_list, i, - mfn)) ) + /* Inform the domain of the new page's machine address. */ + if ( unlikely( + __copy_mfn_to_guest_offset(a->extent_list, i, mfn)) ) goto out; } } @@ -347,11 +350,11 @@ int guest_remove_page(struct domain *d, unsigned long gmfn) put_gfn(d, gmfn); #endif gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n", - d->domain_id, gmfn); + d->domain_id, gmfn); return -EINVAL; } - + #ifdef CONFIG_X86 if ( p2m_is_shared(p2mt) ) { @@ -372,7 +375,8 @@ int guest_remove_page(struct domain *d, unsigned long gmfn) } #endif /* CONFIG_X86 */ - obtain_page: __maybe_unused; +obtain_page: + __maybe_unused; page = mfn_to_page(mfn); if ( unlikely(!get_page(page, d)) ) { @@ -403,7 +407,7 @@ int guest_remove_page(struct domain *d, unsigned long gmfn) put_page(page); #ifdef CONFIG_X86 - out_put_gfn: +out_put_gfn: put_gfn(d, gmfn); #endif @@ -420,7 +424,7 @@ static void decrease_reservation(struct memop_args *a) xen_pfn_t gmfn; if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done, - a->nr_extents-1) || + a->nr_extents - 1) || a->extent_order > max_order(current->domain) ) return; @@ -439,22 +443,24 @@ static void decrease_reservation(struct memop_args *a) if ( tb_init_done ) { - struct { + struct + { u64 gfn; - int d:16,order:16; + int d : 16, order : 16; } t; t.gfn = gmfn; t.d = a->domain->domain_id; t.order = a->extent_order; - + __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), &t); } /* See if populate-on-demand wants to handle this */ - pod_done = is_hvm_domain(a->domain) ? - p2m_pod_decrease_reservation(a->domain, _gfn(gmfn), - a->extent_order) : 0; + pod_done = is_hvm_domain(a->domain) + ? p2m_pod_decrease_reservation(a->domain, _gfn(gmfn), + a->extent_order) + : 0; /* * Look for pages not handled by p2m_pod_decrease_reservation(). @@ -466,7 +472,7 @@ static void decrease_reservation(struct memop_args *a) */ for ( j = 0; j + pod_done < (1UL << a->extent_order); j++ ) { - switch ( guest_remove_page(a->domain, gmfn + j) ) + switch (guest_remove_page(a->domain, gmfn + j)) { case 0: break; @@ -481,7 +487,7 @@ static void decrease_reservation(struct memop_args *a) } } - out: +out: a->nr_done = i; } @@ -516,11 +522,11 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) PAGE_LIST_HEAD(in_chunk_list); PAGE_LIST_HEAD(out_chunk_list); unsigned long in_chunk_order, out_chunk_order; - xen_pfn_t gpfn, gmfn; - mfn_t mfn; + xen_pfn_t gpfn, gmfn; + mfn_t mfn; unsigned long i, j, k; - unsigned int memflags = 0; - long rc = 0; + unsigned int memflags = 0; + long rc = 0; struct domain *d; struct page_info *page; @@ -558,7 +564,7 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) if ( exch.in.extent_order <= exch.out.extent_order ) { - in_chunk_order = exch.out.extent_order - exch.in.extent_order; + in_chunk_order = exch.out.extent_order - exch.in.extent_order; out_chunk_order = 0; if ( !guest_handle_subrange_okay(exch.out.extent_start, @@ -571,7 +577,7 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) } else { - in_chunk_order = 0; + in_chunk_order = 0; out_chunk_order = exch.in.extent_order - exch.out.extent_order; if ( !guest_handle_subrange_okay(exch.out.extent_start, @@ -604,13 +610,11 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) } memflags |= MEMF_bits(domain_clamp_alloc_bitsize( - d, - XENMEMF_get_address_bits(exch.out.mem_flags) ? : - (BITS_PER_LONG+PAGE_SHIFT))); + d, XENMEMF_get_address_bits(exch.out.mem_flags) + ?: (BITS_PER_LONG + PAGE_SHIFT))); for ( i = (exch.nr_exchanged >> in_chunk_order); - i < (exch.in.nr_extents >> in_chunk_order); - i++ ) + i < (exch.in.nr_extents >> in_chunk_order); i++ ) { if ( i != (exch.nr_exchanged >> in_chunk_order) && hypercall_preempt_check() ) @@ -619,15 +623,16 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) rcu_unlock_domain(d); if ( __copy_field_to_guest(arg, &exch, nr_exchanged) ) return -EFAULT; - return hypercall_create_continuation( - __HYPERVISOR_memory_op, "lh", XENMEM_exchange, arg); + return hypercall_create_continuation(__HYPERVISOR_memory_op, "lh", + XENMEM_exchange, arg); } /* Steal a chunk's worth of input pages from the domain. */ for ( j = 0; j < (1UL << in_chunk_order); j++ ) { - if ( unlikely(__copy_from_guest_offset( - &gmfn, exch.in.extent_start, (i<page_alloc_lock); - drop_dom_ref = (dec_count && - !domain_adjust_tot_pages(d, -dec_count)); + drop_dom_ref = + (dec_count && !domain_adjust_tot_pages(d, -dec_count)); spin_unlock(&d->page_alloc_lock); if ( drop_dom_ref ) @@ -752,8 +757,7 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) } mfn = page_to_mfn(page); - guest_physmap_add_page(d, _gfn(gpfn), mfn, - exch.out.extent_order); + guest_physmap_add_page(d, _gfn(gpfn), mfn, exch.out.extent_order); if ( !paging_mode_translate(d) ) { @@ -765,7 +769,7 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) rc = -EFAULT; } } - BUG_ON( !(d->is_dying) && (j != (1UL << out_chunk_order)) ); + BUG_ON(!(d->is_dying) && (j != (1UL << out_chunk_order))); if ( rc ) goto fail; @@ -781,7 +785,7 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) * Failed a chunk! Free any partial chunk work. Tell caller how many * chunks succeeded. */ - fail: +fail: /* * Reassign any input pages we managed to steal. NB that if the assign * fails again, we're on the hook for freeing the page, since we've already @@ -794,7 +798,7 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) free_domheap_page(page); } - dying: +dying: rcu_unlock_domain(d); /* Free any output pages we managed to allocate. */ while ( (page = page_list_remove_head(&out_chunk_list)) ) @@ -802,7 +806,7 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) exch.nr_exchanged = i << in_chunk_order; - fail_early: +fail_early: if ( __copy_field_to_guest(arg, &exch, nr_exchanged) ) rc = -EFAULT; return rc; @@ -821,8 +825,8 @@ int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap *xatp, extra.foreign_domid = DOMID_INVALID; if ( xatp->space != XENMAPSPACE_gmfn_range ) - return xenmem_add_to_physmap_one(d, xatp->space, extra, - xatp->idx, _gfn(xatp->gpfn)); + return xenmem_add_to_physmap_one(d, xatp->space, extra, xatp->idx, + _gfn(xatp->gpfn)); if ( xatp->size < start ) return -EILSEQ; @@ -832,12 +836,12 @@ int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap *xatp, xatp->size -= start; if ( has_iommu_pt(d) ) - this_cpu(iommu_dont_flush_iotlb) = 1; + this_cpu(iommu_dont_flush_iotlb) = 1; while ( xatp->size > done ) { - rc = xenmem_add_to_physmap_one(d, XENMAPSPACE_gmfn, extra, - xatp->idx, _gfn(xatp->gpfn)); + rc = xenmem_add_to_physmap_one(d, XENMAPSPACE_gmfn, extra, xatp->idx, + _gfn(xatp->gpfn)); if ( rc < 0 ) break; @@ -890,15 +894,13 @@ static int xenmem_add_to_physmap_batch(struct domain *d, xen_pfn_t gpfn; int rc; - if ( unlikely(__copy_from_guest_offset(&idx, xatpb->idxs, - extent, 1)) || - unlikely(__copy_from_guest_offset(&gpfn, xatpb->gpfns, - extent, 1)) ) + if ( unlikely(__copy_from_guest_offset(&idx, xatpb->idxs, extent, 1)) || + unlikely( + __copy_from_guest_offset(&gpfn, xatpb->gpfns, extent, 1)) ) return -EFAULT; - rc = xenmem_add_to_physmap_one(d, xatpb->space, - xatpb->u, - idx, _gfn(gpfn)); + rc = xenmem_add_to_physmap_one(d, xatpb->space, xatpb->u, idx, + _gfn(gpfn)); if ( unlikely(__copy_to_guest_offset(xatpb->errs, extent, &rc, 1)) ) return -EFAULT; @@ -911,16 +913,16 @@ static int xenmem_add_to_physmap_batch(struct domain *d, return 0; } -static int construct_memop_from_reservation( - const struct xen_memory_reservation *r, - struct memop_args *a) +static int +construct_memop_from_reservation(const struct xen_memory_reservation *r, + struct memop_args *a) { unsigned int address_bits; - a->extent_list = r->extent_start; - a->nr_extents = r->nr_extents; + a->extent_list = r->extent_start; + a->nr_extents = r->nr_extents; a->extent_order = r->extent_order; - a->memflags = 0; + a->memflags = 0; address_bits = XENMEMF_get_address_bits(r->mem_flags); if ( (address_bits != 0) && @@ -963,13 +965,14 @@ static int construct_memop_from_reservation( } #ifdef CONFIG_HAS_PASSTHROUGH -struct get_reserved_device_memory { +struct get_reserved_device_memory +{ struct xen_reserved_device_memory_map map; unsigned int used_entries; }; -static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr, - u32 id, void *ctxt) +static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr, u32 id, + void *ctxt) { struct get_reserved_device_memory *grdm = ctxt; u32 sbdf = PCI_SBDF3(grdm->map.dev.pci.seg, grdm->map.dev.pci.bus, @@ -980,12 +983,11 @@ static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr, if ( grdm->used_entries < grdm->map.nr_entries ) { - struct xen_reserved_device_memory rdm = { - .start_pfn = start, .nr_pages = nr - }; + struct xen_reserved_device_memory rdm = {.start_pfn = start, + .nr_pages = nr}; - if ( __copy_to_guest_offset(grdm->map.buffer, grdm->used_entries, - &rdm, 1) ) + if ( __copy_to_guest_offset(grdm->map.buffer, grdm->used_entries, &rdm, + 1) ) return -EFAULT; } @@ -1009,8 +1011,7 @@ static long xatp_permission_check(struct domain *d, unsigned int space) } static int acquire_grant_table(struct domain *d, unsigned int id, - unsigned long frame, - unsigned int nr_frames, + unsigned long frame, unsigned int nr_frames, xen_pfn_t mfn_list[]) { unsigned int i = nr_frames; @@ -1021,7 +1022,7 @@ static int acquire_grant_table(struct domain *d, unsigned int id, mfn_t mfn = INVALID_MFN; int rc; - switch ( id ) + switch (id) { case XENMEM_resource_grant_table_id_shared: rc = gnttab_get_shared_frame(d, frame + i, &mfn); @@ -1046,8 +1047,8 @@ static int acquire_grant_table(struct domain *d, unsigned int id, return 0; } -static int acquire_resource( - XEN_GUEST_HANDLE_PARAM(xen_mem_acquire_resource_t) arg) +static int acquire_resource(XEN_GUEST_HANDLE_PARAM(xen_mem_acquire_resource_t) + arg) { struct domain *d, *currd = current->domain; xen_mem_acquire_resource_t xmar; @@ -1089,7 +1090,7 @@ static int acquire_resource( if ( rc ) goto out; - switch ( xmar.type ) + switch (xmar.type) { case XENMEM_resource_grant_table: rc = acquire_grant_table(d, xmar.id, xmar.frame, xmar.nr_frames, @@ -1130,19 +1131,17 @@ static int acquire_resource( for ( i = 0; !rc && i < xmar.nr_frames; i++ ) { - rc = set_foreign_p2m_entry(currd, gfn_list[i], - _mfn(mfn_list[i])); + rc = set_foreign_p2m_entry(currd, gfn_list[i], _mfn(mfn_list[i])); /* rc should be -EIO for any iteration other than the first */ if ( rc && i ) rc = -EIO; } } - if ( xmar.flags != 0 && - __copy_field_to_guest(arg, &xmar, flags) ) + if ( xmar.flags != 0 && __copy_field_to_guest(arg, &xmar, flags) ) rc = -EFAULT; - out: +out: rcu_unlock_domain(d); return rc; @@ -1158,7 +1157,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) unsigned long start_extent = cmd >> MEMOP_EXTENT_SHIFT; int op = cmd & MEMOP_CMD_MASK; - switch ( op ) + switch (op) { case XENMEM_increase_reservation: case XENMEM_decrease_reservation: @@ -1184,11 +1183,11 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) return start_extent; } - args.nr_done = start_extent; + args.nr_done = start_extent; args.preempted = 0; - if ( op == XENMEM_populate_physmap - && (reservation.mem_flags & XENMEMF_populate_on_demand) ) + if ( op == XENMEM_populate_physmap && + (reservation.mem_flags & XENMEMF_populate_on_demand) ) args.memflags |= MEMF_populate_on_demand; if ( xsm_memory_adjust_reservation(XSM_TARGET, curr_d, d) ) @@ -1203,7 +1202,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) pv_shim_online_memory(args.nr_extents, args.extent_order); #endif - switch ( op ) + switch (op) { case XENMEM_increase_reservation: increase_reservation(&args); @@ -1222,8 +1221,8 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) if ( args.preempted ) return hypercall_create_continuation( - __HYPERVISOR_memory_op, "lh", - op | (rc << MEMOP_EXTENT_SHIFT), arg); + __HYPERVISOR_memory_op, "lh", op | (rc << MEMOP_EXTENT_SHIFT), + arg); #ifdef CONFIG_X86 if ( pv_shim && op == XENMEM_decrease_reservation ) @@ -1272,7 +1271,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) return rc; } - switch ( op ) + switch (op) { case XENMEM_current_reservation: rc = d->tot_pages; @@ -1294,7 +1293,8 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { struct xen_add_to_physmap xatp; - BUILD_BUG_ON((typeof(xatp.size))-1 > (UINT_MAX >> MEMOP_EXTENT_SHIFT)); + BUILD_BUG_ON((typeof(xatp.size)) - 1 > + (UINT_MAX >> MEMOP_EXTENT_SHIFT)); /* Check for malicious or buggy input. */ if ( start_extent != (typeof(xatp.size))start_extent ) @@ -1323,9 +1323,9 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) rcu_unlock_domain(d); if ( xatp.space == XENMAPSPACE_gmfn_range && rc > 0 ) - rc = hypercall_create_continuation( - __HYPERVISOR_memory_op, "lh", - op | (rc << MEMOP_EXTENT_SHIFT), arg); + rc = hypercall_create_continuation(__HYPERVISOR_memory_op, "lh", + op | (rc << MEMOP_EXTENT_SHIFT), + arg); return rc; } @@ -1334,7 +1334,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { struct xen_add_to_physmap_batch xatpb; - BUILD_BUG_ON((typeof(xatpb.size))-1 > + BUILD_BUG_ON((typeof(xatpb.size)) - 1 > (UINT_MAX >> MEMOP_EXTENT_SHIFT)); /* Check for malicious or buggy input. */ @@ -1364,9 +1364,9 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) rcu_unlock_domain(d); if ( rc > 0 ) - rc = hypercall_create_continuation( - __HYPERVISOR_memory_op, "lh", - op | (rc << MEMOP_EXTENT_SHIFT), arg); + rc = hypercall_create_continuation(__HYPERVISOR_memory_op, "lh", + op | (rc << MEMOP_EXTENT_SHIFT), + arg); return rc; } @@ -1454,7 +1454,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) * Guest passes nr_vnodes, number of regions and nr_vcpus thus * we know how much memory guest has allocated. */ - if ( copy_from_guest(&topology, arg, 1 )) + if ( copy_from_guest(&topology, arg, 1) ) return -EFAULT; if ( topology.pad != 0 ) @@ -1488,8 +1488,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) * Check here guest parameters make sure we dont overflow. * Additionaly check padding. */ - if ( topology.nr_vnodes < dom_vnodes || - topology.nr_vcpus < dom_vcpus || + if ( topology.nr_vnodes < dom_vnodes || topology.nr_vcpus < dom_vcpus || topology.nr_vmemranges < dom_vranges ) { read_unlock(&d->vnuma_rwlock); @@ -1505,12 +1504,11 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) read_unlock(&d->vnuma_rwlock); - tmp.vdistance = xmalloc_array(unsigned int, dom_vnodes * dom_vnodes); + tmp.vdistance = xmalloc_array(unsigned int, dom_vnodes *dom_vnodes); tmp.vmemrange = xmalloc_array(xen_vmemrange_t, dom_vranges); tmp.vcpu_to_vnode = xmalloc_array(unsigned int, dom_vcpus); - if ( tmp.vdistance == NULL || - tmp.vmemrange == NULL || + if ( tmp.vdistance == NULL || tmp.vmemrange == NULL || tmp.vcpu_to_vnode == NULL ) { rc = -ENOMEM; @@ -1524,8 +1522,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) read_lock(&d->vnuma_rwlock); if ( dom_vnodes < d->vnuma->nr_vnodes || - dom_vranges < d->vnuma->nr_vmemranges || - dom_vcpus < d->max_vcpus ) + dom_vranges < d->vnuma->nr_vmemranges || dom_vcpus < d->max_vcpus ) { read_unlock(&d->vnuma_rwlock); rc = -EAGAIN; @@ -1547,8 +1544,8 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) rc = -EFAULT; - if ( copy_to_guest(topology.vmemrange.h, tmp.vmemrange, - dom_vranges) != 0 ) + if ( copy_to_guest(topology.vmemrange.h, tmp.vmemrange, dom_vranges) != + 0 ) goto vnumainfo_out; if ( copy_to_guest(topology.vdistance.h, tmp.vdistance, @@ -1565,7 +1562,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) rc = __copy_to_guest(arg, &topology, 1) ? -EFAULT : 0; - vnumainfo_out: + vnumainfo_out: rcu_unlock_domain(d); xfree(tmp.vdistance); @@ -1590,8 +1587,8 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) return -EINVAL; grdm.used_entries = 0; - rc = iommu_get_reserved_device_memory(get_reserved_device_memory, - &grdm); + rc = + iommu_get_reserved_device_memory(get_reserved_device_memory, &grdm); if ( !rc && grdm.map.nr_entries < grdm.used_entries ) rc = -ENOBUFS; @@ -1634,8 +1631,7 @@ void copy_domain_page(mfn_t dest, mfn_t source) unmap_domain_page(src); } -void destroy_ring_for_helper( - void **_va, struct page_info *page) +void destroy_ring_for_helper(void **_va, struct page_info *page) { void *va = *_va; @@ -1694,9 +1690,8 @@ int check_get_page_from_gfn(struct domain *d, gfn_t gfn, bool readonly, return 0; } -int prepare_ring_for_helper( - struct domain *d, unsigned long gmfn, struct page_info **_page, - void **_va) +int prepare_ring_for_helper(struct domain *d, unsigned long gmfn, + struct page_info **_page, void **_va) { p2m_type_t p2mt; struct page_info *page; diff --git a/xen/common/monitor.c b/xen/common/monitor.c index cb5f37fdb2..e2cb64163a 100644 --- a/xen/common/monitor.c +++ b/xen/common/monitor.c @@ -40,7 +40,7 @@ int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop) if ( unlikely(rc) ) return rc; - switch ( mop->op ) + switch (mop->op) { case XEN_DOMCTL_MONITOR_OP_ENABLE: requested_status = true; @@ -50,7 +50,8 @@ int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop) if ( unlikely(mop->event > 31) ) return -EINVAL; /* Check if event type is available. */ - if ( unlikely(!(arch_monitor_get_capabilities(d) & (1U << mop->event))) ) + if ( unlikely( + !(arch_monitor_get_capabilities(d) & (1U << mop->event))) ) return -EOPNOTSUPP; break; @@ -63,7 +64,7 @@ int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop) return arch_monitor_domctl_op(d, mop); } - switch ( mop->event ) + switch (mop->event) { case XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST: { @@ -94,7 +95,7 @@ int monitor_traps(struct vcpu *v, bool sync, vm_event_request_t *req) struct domain *d = v->domain; rc = vm_event_claim_slot(d, d->vm_event_monitor); - switch ( rc ) + switch (rc) { case 0: break; diff --git a/xen/common/multicall.c b/xen/common/multicall.c index 5a199ebf8f..f64feff40c 100644 --- a/xen/common/multicall.c +++ b/xen/common/multicall.c @@ -32,14 +32,13 @@ static void trace_multicall_call(multicall_entry_t *call) __trace_multicall_call(call); } -ret_t -do_multicall( - XEN_GUEST_HANDLE_PARAM(multicall_entry_t) call_list, uint32_t nr_calls) +ret_t do_multicall(XEN_GUEST_HANDLE_PARAM(multicall_entry_t) call_list, + uint32_t nr_calls) { struct vcpu *curr = current; struct mc_state *mcs = &curr->mc_state; - uint32_t i; - int rc = 0; + uint32_t i; + int rc = 0; enum mc_disposition disp = mc_continue; if ( unlikely(__test_and_set_bit(_MCSF_in_multicall, &mcs->flags)) ) @@ -91,8 +90,8 @@ do_multicall( /* nothing, best effort only */; rc = mcs->call.result; } - else if ( unlikely(__copy_field_to_guest(call_list, &mcs->call, - result)) ) + else if ( unlikely( + __copy_field_to_guest(call_list, &mcs->call, result)) ) rc = -EFAULT; else if ( curr->hcall_preempted ) { @@ -118,11 +117,11 @@ do_multicall( mcs->flags = 0; return rc; - preempted: +preempted: perfc_add(calls_from_multicall, i); mcs->flags = 0; - return hypercall_create_continuation( - __HYPERVISOR_multicall, "hi", call_list, nr_calls-i); + return hypercall_create_continuation(__HYPERVISOR_multicall, "hi", + call_list, nr_calls - i); } /* diff --git a/xen/common/notifier.c b/xen/common/notifier.c index 34488a84ca..66b54b9bf5 100644 --- a/xen/common/notifier.c +++ b/xen/common/notifier.c @@ -18,8 +18,8 @@ * Adds a notifier to a raw notifier chain. * All locking must be provided by the caller. */ -void __init notifier_chain_register( - struct notifier_head *nh, struct notifier_block *n) +void __init notifier_chain_register(struct notifier_head *nh, + struct notifier_block *n) { struct list_head *chain = &nh->head.chain; struct notifier_block *nb; @@ -43,8 +43,8 @@ void __init notifier_chain_register( * Removes a notifier from a raw notifier chain. * All locking must be provided by the caller. */ -void __init notifier_chain_unregister( - struct notifier_head *nh, struct notifier_block *n) +void __init notifier_chain_unregister(struct notifier_head *nh, + struct notifier_block *n) { list_del(&n->chain); } @@ -65,9 +65,8 @@ void __init notifier_chain_unregister( * the notifier function which halted execution. Otherwise the return value is * the return value of the last notifier function called. */ -int notifier_call_chain( - struct notifier_head *nh, unsigned long val, void *v, - struct notifier_block **pcursor) +int notifier_call_chain(struct notifier_head *nh, unsigned long val, void *v, + struct notifier_block **pcursor) { int ret = NOTIFY_DONE; struct list_head *cursor; diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index be44158033..19488c7df0 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -162,7 +162,8 @@ string_param("badpage", opt_badpage); /* * no-bootscrub -> Free pages are not zeroed during boot. */ -enum bootscrub_mode { +enum bootscrub_mode +{ BOOTSCRUB_OFF, BOOTSCRUB_ON, BOOTSCRUB_IDLE, @@ -185,7 +186,7 @@ static int __init parse_bootscrub_param(const char *s) return 0; } - switch ( parse_bool(s, NULL) ) + switch (parse_bool(s, NULL)) { case 0: opt_bootscrub = BOOTSCRUB_OFF; @@ -217,7 +218,7 @@ size_param("bootscrub_chunk", opt_bootscrub_chunk); #ifdef CONFIG_SCRUB_DEBUG static bool __read_mostly scrub_debug; #else -#define scrub_debug false +#define scrub_debug false #endif /* @@ -242,12 +243,14 @@ PAGE_LIST_HEAD(page_broken_list); */ mfn_t first_valid_mfn = INVALID_MFN_INITIALIZER; -static struct bootmem_region { +static struct bootmem_region +{ unsigned long s, e; /* MFNs @s through @e-1 inclusive are free */ -} *__initdata bootmem_region_list; +} * __initdata bootmem_region_list; static unsigned int __initdata nr_bootmem_regions; -struct scrub_region { +struct scrub_region +{ unsigned long offset; unsigned long start; unsigned long per_cpu_sz; @@ -274,9 +277,9 @@ static void __init bootmem_region_add(unsigned long s, unsigned long e) BUG_ON((i < nr_bootmem_regions) && (e > bootmem_region_list[i].s)); BUG_ON(nr_bootmem_regions == (PAGE_SIZE / sizeof(struct bootmem_region))); - memmove(&bootmem_region_list[i+1], &bootmem_region_list[i], + memmove(&bootmem_region_list[i + 1], &bootmem_region_list[i], (nr_bootmem_regions - i) * sizeof(*bootmem_region_list)); - bootmem_region_list[i] = (struct bootmem_region) { s, e }; + bootmem_region_list[i] = (struct bootmem_region){s, e}; nr_bootmem_regions++; } @@ -316,8 +319,7 @@ void __init init_boot_pages(paddr_t ps, paddr_t pe) const struct platform_bad_page *badpage; unsigned int i, array_size; - BUILD_BUG_ON(8 * sizeof(frame_table->u.free.first_dirty) < - MAX_ORDER + 1); + BUILD_BUG_ON(8 * sizeof(frame_table->u.free.first_dirty) < MAX_ORDER + 1); #endif BUILD_BUG_ON(sizeof(frame_table->u) != sizeof(unsigned long)); @@ -383,7 +385,7 @@ void __init init_boot_pages(paddr_t ps, paddr_t pe) else if ( *p != '\0' ) break; - bootmem_region_zap(bad_spfn, bad_epfn+1); + bootmem_region_zap(bad_spfn, bad_epfn + 1); } } @@ -409,8 +411,7 @@ mfn_t __init alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align) * Those allocations get remapped anyway, i.e. them not having 1:1 * mappings always accessible is not a problem. */ - if ( highmem_start && pfn_align == 1 && - r->e > PFN_DOWN(highmem_start) ) + if ( highmem_start && pfn_align == 1 && r->e > PFN_DOWN(highmem_start) ) { pg = r->s; if ( pg + nr_pfns > PFN_DOWN(highmem_start) ) @@ -429,20 +430,18 @@ mfn_t __init alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align) BUG(); } - - /************************* * BINARY BUDDY ALLOCATOR */ #define MEMZONE_XEN 0 -#define NR_ZONES (PADDR_BITS - PAGE_SHIFT + 1) +#define NR_ZONES (PADDR_BITS - PAGE_SHIFT + 1) -#define bits_to_zone(b) (((b) < (PAGE_SHIFT + 1)) ? 1 : ((b) - PAGE_SHIFT)) -#define page_to_zone(pg) (is_xen_heap_page(pg) ? MEMZONE_XEN : \ - (flsl(mfn_x(page_to_mfn(pg))) ? : 1)) +#define bits_to_zone(b) (((b) < (PAGE_SHIFT + 1)) ? 1 : ((b)-PAGE_SHIFT)) +#define page_to_zone(pg) \ + (is_xen_heap_page(pg) ? MEMZONE_XEN : (flsl(mfn_x(page_to_mfn(pg))) ?: 1)) -typedef struct page_list_head heap_by_zone_and_order_t[NR_ZONES][MAX_ORDER+1]; +typedef struct page_list_head heap_by_zone_and_order_t[NR_ZONES][MAX_ORDER + 1]; static heap_by_zone_and_order_t *_heap[MAX_NUMNODES]; #define heap(node, zone, order) ((*_heap[node])[zone][order]) @@ -565,7 +564,7 @@ void get_outstanding_claims(uint64_t *free_pages, uint64_t *outstanding_pages) { spin_lock(&heap_lock); *outstanding_pages = outstanding_claims; - *free_pages = avail_domheap_pages(); + *free_pages = avail_domheap_pages(); spin_unlock(&heap_lock); } @@ -582,9 +581,9 @@ static unsigned long init_node_heap(int node, unsigned long mfn, /* First node to be discovered has its heap metadata statically alloced. */ static heap_by_zone_and_order_t _heap_static; static unsigned long avail_static[NR_ZONES]; - unsigned long needed = (sizeof(**_heap) + - sizeof(**avail) * NR_ZONES + - PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long needed = + (sizeof(**_heap) + sizeof(**avail) * NR_ZONES + PAGE_SIZE - 1) >> + PAGE_SHIFT; int i, j; if ( !first_node_initialised ) @@ -594,23 +593,21 @@ static unsigned long init_node_heap(int node, unsigned long mfn, first_node_initialised = true; needed = 0; } - else if ( *use_tail && nr >= needed && - arch_mfn_in_directmap(mfn + nr) && + else if ( *use_tail && nr >= needed && arch_mfn_in_directmap(mfn + nr) && (!xenheap_bits || !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) ) { _heap[node] = mfn_to_virt(mfn + nr - needed); - avail[node] = mfn_to_virt(mfn + nr - 1) + - PAGE_SIZE - sizeof(**avail) * NR_ZONES; + avail[node] = + mfn_to_virt(mfn + nr - 1) + PAGE_SIZE - sizeof(**avail) * NR_ZONES; } - else if ( nr >= needed && - arch_mfn_in_directmap(mfn + needed) && + else if ( nr >= needed && arch_mfn_in_directmap(mfn + needed) && (!xenheap_bits || !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) ) { _heap[node] = mfn_to_virt(mfn); - avail[node] = mfn_to_virt(mfn + needed - 1) + - PAGE_SIZE - sizeof(**avail) * NR_ZONES; + avail[node] = mfn_to_virt(mfn + needed - 1) + PAGE_SIZE - + sizeof(**avail) * NR_ZONES; *use_tail = false; } else if ( get_order_from_bytes(sizeof(**_heap)) == @@ -640,22 +637,22 @@ static unsigned long init_node_heap(int node, unsigned long mfn, } /* Default to 64 MiB */ -#define DEFAULT_LOW_MEM_VIRQ (((paddr_t) 64) << 20) -#define MAX_LOW_MEM_VIRQ (((paddr_t) 1024) << 20) +#define DEFAULT_LOW_MEM_VIRQ (((paddr_t)64) << 20) +#define MAX_LOW_MEM_VIRQ (((paddr_t)1024) << 20) -static paddr_t __read_mostly opt_low_mem_virq = ((paddr_t) -1); +static paddr_t __read_mostly opt_low_mem_virq = ((paddr_t)-1); size_param("low_mem_virq_limit", opt_low_mem_virq); /* Thresholds to control hysteresis. In pages */ /* When memory grows above this threshold, reset hysteresis. * -1 initially to not reset until at least one virq issued. */ -static unsigned long low_mem_virq_high = -1UL; +static unsigned long low_mem_virq_high = -1UL; /* Threshold at which we issue virq */ -static unsigned long low_mem_virq_th = 0; +static unsigned long low_mem_virq_th = 0; /* Original threshold after all checks completed */ -static unsigned long low_mem_virq_orig = 0; +static unsigned long low_mem_virq_orig = 0; /* Order for current threshold */ -static unsigned int low_mem_virq_th_order = 0; +static unsigned int low_mem_virq_th_order = 0; /* Perform bootstrapping checks and set bounds */ static void __init setup_low_mem_virq(void) @@ -673,25 +670,23 @@ static void __init setup_low_mem_virq(void) } /* If the user did not specify a knob, remember that */ - halve = (opt_low_mem_virq == ((paddr_t) -1)); + halve = (opt_low_mem_virq == ((paddr_t)-1)); threshold = halve ? DEFAULT_LOW_MEM_VIRQ : opt_low_mem_virq; /* Dom0 has already been allocated by now. So check we won't be * complaining immediately with whatever's left of the heap. */ - threshold = min(threshold, - ((paddr_t) total_avail_pages) << PAGE_SHIFT); + threshold = min(threshold, ((paddr_t)total_avail_pages) << PAGE_SHIFT); /* Then, cap to some predefined maximum */ threshold = min(threshold, MAX_LOW_MEM_VIRQ); /* If the user specified no knob, and we are at the current available * level, halve the threshold. */ - if ( halve && - (threshold == (((paddr_t) total_avail_pages) << PAGE_SHIFT)) ) + if ( halve && (threshold == (((paddr_t)total_avail_pages) << PAGE_SHIFT)) ) threshold >>= 1; /* Zero? Have to fire immediately */ - threshold = max(threshold, (paddr_t) PAGE_SIZE); + threshold = max(threshold, (paddr_t)PAGE_SIZE); /* Threshold bytes -> pages */ low_mem_virq_th = threshold >> PAGE_SHIFT; @@ -706,13 +701,13 @@ static void __init setup_low_mem_virq(void) low_mem_virq_th_order = order; printk("Initial low memory virq threshold set at %#lx pages.\n", - low_mem_virq_th); + low_mem_virq_th); } static void check_low_mem_virq(void) { - unsigned long avail_pages = total_avail_pages + - tmem_freeable_pages() - outstanding_claims; + unsigned long avail_pages = + total_avail_pages + tmem_freeable_pages() - outstanding_claims; if ( unlikely(avail_pages <= low_mem_virq_th) ) { @@ -721,10 +716,10 @@ static void check_low_mem_virq(void) /* Update thresholds. Next warning will be when we drop below * next order. However, we wait until we grow beyond one * order above us to complain again at the current order */ - low_mem_virq_high = 1UL << (low_mem_virq_th_order + 1); + low_mem_virq_high = 1UL << (low_mem_virq_th_order + 1); if ( low_mem_virq_th_order > 0 ) low_mem_virq_th_order--; - low_mem_virq_th = 1UL << low_mem_virq_th_order; + low_mem_virq_th = 1UL << low_mem_virq_th_order; return; } @@ -763,11 +758,11 @@ static void page_list_add_scrub(struct page_info *pg, unsigned int node, /* SCRUB_PATTERN needs to be a repeating series of bytes. */ #ifndef NDEBUG -#define SCRUB_PATTERN 0xc2c2c2c2c2c2c2c2ULL +#define SCRUB_PATTERN 0xc2c2c2c2c2c2c2c2ULL #else -#define SCRUB_PATTERN 0ULL +#define SCRUB_PATTERN 0ULL #endif -#define SCRUB_BYTE_PATTERN (SCRUB_PATTERN & 0xff) +#define SCRUB_BYTE_PATTERN (SCRUB_PATTERN & 0xff) static void poison_one_page(struct page_info *pg) { @@ -793,7 +788,7 @@ static void check_one_page(struct page_info *pg) return; ptr = __map_domain_page(pg); - for ( i = 0; i < PAGE_SIZE / sizeof (*ptr); i++ ) + for ( i = 0; i < PAGE_SIZE / sizeof(*ptr); i++ ) BUG_ON(ptr[i] != SCRUB_PATTERN); unmap_domain_page(ptr); #endif @@ -807,7 +802,7 @@ static void check_and_stop_scrub(struct page_info *head) head->u.free.scrub_state = BUDDY_SCRUB_ABORT; spin_lock_kick(); - for ( ; ; ) + for ( ;; ) { /* Can't ACCESS_ONCE() a bitfield. */ pgfree.val = ACCESS_ONCE(head->u.free.val); @@ -818,10 +813,9 @@ static void check_and_stop_scrub(struct page_info *head) } } -static struct page_info *get_free_buddy(unsigned int zone_lo, - unsigned int zone_hi, - unsigned int order, unsigned int memflags, - const struct domain *d) +static struct page_info * +get_free_buddy(unsigned int zone_lo, unsigned int zone_hi, unsigned int order, + unsigned int memflags, const struct domain *d) { nodeid_t first_node, node = MEMF_get_node(memflags), req_node = node; nodemask_t nodemask = d ? d->node_affinity : node_online_map; @@ -852,7 +846,7 @@ static struct page_info *get_free_buddy(unsigned int zone_lo, * zone before failing, only calc new node value if we fail to find memory * in target node, this avoids needless computation on fast-path. */ - for ( ; ; ) + for ( ;; ) { zone = zone_hi; do { @@ -911,10 +905,9 @@ static struct page_info *get_free_buddy(unsigned int zone_lo, } /* Allocate 2^@order contiguous pages. */ -static struct page_info *alloc_heap_pages( - unsigned int zone_lo, unsigned int zone_hi, - unsigned int order, unsigned int memflags, - struct domain *d) +static struct page_info * +alloc_heap_pages(unsigned int zone_lo, unsigned int zone_hi, unsigned int order, + unsigned int memflags, struct domain *d) { nodeid_t node; unsigned int i, buddy_order, zone, first_dirty; @@ -941,8 +934,8 @@ static struct page_info *alloc_heap_pages( */ if ( (outstanding_claims + request > total_avail_pages + tmem_freeable_pages()) && - ((memflags & MEMF_no_refcount) || - !d || d->outstanding_pages < request) ) + ((memflags & MEMF_no_refcount) || !d || + d->outstanding_pages < request) ) { spin_unlock(&heap_lock); return NULL; @@ -967,8 +960,8 @@ static struct page_info *alloc_heap_pages( pg = get_free_buddy(zone_lo, zone_hi, order, memflags, d); /* Try getting a dirty buddy if we couldn't get a clean one. */ if ( !pg && !(memflags & MEMF_no_scrub) ) - pg = get_free_buddy(zone_lo, zone_hi, order, - memflags | MEMF_no_scrub, d); + pg = get_free_buddy(zone_lo, zone_hi, order, memflags | MEMF_no_scrub, + d); if ( !pg ) { /* No suitable memory blocks. Fail the request. */ @@ -987,8 +980,9 @@ static struct page_info *alloc_heap_pages( { buddy_order--; page_list_add_scrub(pg, node, zone, buddy_order, - (1U << buddy_order) > first_dirty ? - first_dirty : INVALID_DIRTY_IDX); + (1U << buddy_order) > first_dirty + ? first_dirty + : INVALID_DIRTY_IDX); pg += 1U << buddy_order; if ( first_dirty != INVALID_DIRTY_IDX ) @@ -1017,14 +1011,15 @@ static struct page_info *alloc_heap_pages( BUG_ON((pg[i].count_info & ~PGC_need_scrub) != PGC_state_free); /* PGC_need_scrub can only be set if first_dirty is valid */ - ASSERT(first_dirty != INVALID_DIRTY_IDX || !(pg[i].count_info & PGC_need_scrub)); + ASSERT(first_dirty != INVALID_DIRTY_IDX || + !(pg[i].count_info & PGC_need_scrub)); /* Preserve PGC_need_scrub so we can check it after lock is dropped. */ - pg[i].count_info = PGC_state_inuse | (pg[i].count_info & PGC_need_scrub); + pg[i].count_info = + PGC_state_inuse | (pg[i].count_info & PGC_need_scrub); if ( !(memflags & MEMF_no_tlbflush) ) - accumulate_tlbflush(&need_tlbflush, &pg[i], - &tlbflush_timestamp); + accumulate_tlbflush(&need_tlbflush, &pg[i], &tlbflush_timestamp); /* Initialise fields which have other uses for free pages. */ pg[i].u.inuse.type_info = 0; @@ -1115,15 +1110,14 @@ static int reserve_offlined_page(struct page_info *head) { next_order = cur_order + 1; - if ( (cur_head + (1 << next_order)) >= (head + ( 1 << head_order)) ) + if ( (cur_head + (1 << next_order)) >= (head + (1 << head_order)) ) goto merge; - for ( i = (1 << cur_order), pg = cur_head + (1 << cur_order ); - i < (1 << next_order); - i++, pg++ ) + for ( i = (1 << cur_order), pg = cur_head + (1 << cur_order); + i < (1 << next_order); i++, pg++ ) if ( page_state_is(pg, offlined) ) break; - if ( i == ( 1 << next_order) ) + if ( i == (1 << next_order) ) { cur_order = next_order; continue; @@ -1133,14 +1127,15 @@ static int reserve_offlined_page(struct page_info *head) merge: /* We don't consider merging outside the head_order. */ page_list_add_scrub(cur_head, node, zone, cur_order, - (1U << cur_order) > first_dirty ? - first_dirty : INVALID_DIRTY_IDX); + (1U << cur_order) > first_dirty + ? first_dirty + : INVALID_DIRTY_IDX); cur_head += (1 << cur_order); /* Adjust first_dirty if needed. */ if ( first_dirty != INVALID_DIRTY_IDX ) { - if ( first_dirty >= 1U << cur_order ) + if ( first_dirty >= 1U << cur_order ) first_dirty -= 1U << cur_order; else first_dirty = 0; @@ -1151,7 +1146,7 @@ static int reserve_offlined_page(struct page_info *head) } } - for ( cur_head = head; cur_head < head + ( 1UL << head_order); cur_head++ ) + for ( cur_head = head; cur_head < head + (1UL << head_order); cur_head++ ) { if ( !page_state_is(cur_head, offlined) ) continue; @@ -1161,8 +1156,9 @@ static int reserve_offlined_page(struct page_info *head) ASSERT(total_avail_pages >= 0); page_list_add_tail(cur_head, - test_bit(_PGC_broken, &cur_head->count_info) ? - &page_broken_list : &page_offlined_list); + test_bit(_PGC_broken, &cur_head->count_info) + ? &page_broken_list + : &page_offlined_list); count++; } @@ -1197,7 +1193,7 @@ static unsigned int node_to_scrub(bool get_node) * the closest one. */ local_node = node; - for ( ; ; ) + for ( ;; ) { do { node = cycle_node(node, node_online_map); @@ -1217,10 +1213,10 @@ static unsigned int node_to_scrub(bool get_node) /* * Grab the node right away. If we find a closer node later we will * release this one. While there is a chance that another CPU will - * not be able to scrub that node when it is searching for scrub work - * at the same time it will be able to do so next time it wakes up. - * The alternative would be to perform this search under a lock but - * then we'd need to take this lock every time we come in here. + * not be able to scrub that node when it is searching for scrub + * work at the same time it will be able to do so next time it wakes + * up. The alternative would be to perform this search under a lock + * but then we'd need to take this lock every time we come in here. */ if ( (dist < shortest || closest == NUMA_NO_NODE) && !node_test_and_set(node, node_scrubbing) ) @@ -1236,7 +1232,8 @@ static unsigned int node_to_scrub(bool get_node) return closest; } -struct scrub_wait_state { +struct scrub_wait_state +{ struct page_info *pg; unsigned int first_dirty; bool drop; @@ -1296,7 +1293,7 @@ bool scrub_free_pages(void) dirty_cnt = 0; - for ( i = pg->u.free.first_dirty; i < (1U << order); i++) + for ( i = pg->u.free.first_dirty; i < (1U << order); i++ ) { if ( test_bit(_PGC_need_scrub, &pg[i].count_info) ) { @@ -1317,8 +1314,9 @@ bool scrub_free_pages(void) { /* Someone wants this chunk. Drop everything. */ - pg->u.free.first_dirty = (i == (1U << order) - 1) ? - INVALID_DIRTY_IDX : i + 1; + pg->u.free.first_dirty = (i == (1U << order) - 1) + ? INVALID_DIRTY_IDX + : i + 1; smp_wmb(); pg->u.free.scrub_state = BUDDY_NOT_SCRUBBING; @@ -1348,8 +1346,8 @@ bool scrub_free_pages(void) * It will be set either below or in the lock callback (in * scrub_continue()). */ - st.first_dirty = (i >= (1U << order) - 1) ? - INVALID_DIRTY_IDX : i + 1; + st.first_dirty = + (i >= (1U << order) - 1) ? INVALID_DIRTY_IDX : i + 1; st.drop = false; spin_lock_cb(&heap_lock, scrub_continue, &st); @@ -1361,7 +1359,8 @@ bool scrub_free_pages(void) if ( i >= (1U << order) - 1 ) { page_list_del(pg, &heap(node, zone, order)); - page_list_add_scrub(pg, node, zone, order, INVALID_DIRTY_IDX); + page_list_add_scrub(pg, node, zone, order, + INVALID_DIRTY_IDX); } else pg->u.free.first_dirty = i + 1; @@ -1374,17 +1373,17 @@ bool scrub_free_pages(void) } while ( order-- != 0 ); } - out: +out: spin_unlock(&heap_lock); - out_nolock: +out_nolock: node_clear(node, node_scrubbing); return node_to_scrub(false) != NUMA_NO_NODE; } /* Free 2^@order set of pages. */ -static void free_heap_pages( - struct page_info *pg, unsigned int order, bool need_scrub) +static void free_heap_pages(struct page_info *pg, unsigned int order, + bool need_scrub) { unsigned long mask; mfn_t mfn = page_to_mfn(pg); @@ -1412,8 +1411,8 @@ static void free_heap_pages( ASSERT(!page_state_is(&pg[i], offlined)); pg[i].count_info = ((pg[i].count_info & PGC_broken) | - (page_state_is(&pg[i], offlining) - ? PGC_state_offlined : PGC_state_free)); + (page_state_is(&pg[i], offlining) ? PGC_state_offlined + : PGC_state_free)); if ( page_state_is(&pg[i], offlined) ) tainted = 1; @@ -1444,8 +1443,8 @@ static void free_heap_pages( pg->u.free.first_dirty = INVALID_DIRTY_IDX; if ( tmem_enabled() ) - midsize_alloc_zone_pages = max( - midsize_alloc_zone_pages, total_avail_pages / MIDSIZE_ALLOC_FRAC); + midsize_alloc_zone_pages = max(midsize_alloc_zone_pages, + total_avail_pages / MIDSIZE_ALLOC_FRAC); /* Merge chunks as far as possible. */ while ( order < MAX_ORDER ) @@ -1470,8 +1469,8 @@ static void free_heap_pages( /* Update predecessor's first_dirty if necessary. */ if ( predecessor->u.free.first_dirty == INVALID_DIRTY_IDX && pg->u.free.first_dirty != INVALID_DIRTY_IDX ) - predecessor->u.free.first_dirty = (1U << order) + - pg->u.free.first_dirty; + predecessor->u.free.first_dirty = + (1U << order) + pg->u.free.first_dirty; pg = predecessor; } @@ -1491,8 +1490,8 @@ static void free_heap_pages( /* Update pg's first_dirty if necessary. */ if ( pg->u.free.first_dirty == INVALID_DIRTY_IDX && successor->u.free.first_dirty != INVALID_DIRTY_IDX ) - pg->u.free.first_dirty = (1U << order) + - successor->u.free.first_dirty; + pg->u.free.first_dirty = + (1U << order) + successor->u.free.first_dirty; page_list_del(successor, &heap(node, zone, order)); } @@ -1508,7 +1507,6 @@ static void free_heap_pages( spin_unlock(&heap_lock); } - /* * Following rules applied for page offline: * Once a page is broken, it can't be assigned anymore @@ -1529,8 +1527,8 @@ static unsigned long mark_page_offline(struct page_info *pg, int broken) ((x & PGC_state) != PGC_state_offlining) ) { nx &= ~PGC_state; - nx |= (((x & PGC_state) == PGC_state_free) - ? PGC_state_offlined : PGC_state_offlining); + nx |= (((x & PGC_state) == PGC_state_free) ? PGC_state_offlined + : PGC_state_offlining); } if ( broken ) @@ -1556,16 +1554,14 @@ static int reserve_heap_page(struct page_info *pg) if ( page_list_empty(&heap(node, zone, i)) ) continue; - page_list_for_each_safe ( head, tmp, &heap(node, zone, i) ) + page_list_for_each_safe(head, tmp, &heap(node, zone, i)) { - if ( (head <= pg) && - (head + (1UL << i) > pg) ) + if ( (head <= pg) && (head + (1UL << i) > pg) ) return reserve_offlined_page(head); } } return -EINVAL; - } int offline_page(unsigned long mfn, int broken, uint32_t *status) @@ -1576,8 +1572,7 @@ int offline_page(unsigned long mfn, int broken, uint32_t *status) if ( !mfn_valid(_mfn(mfn)) ) { - dprintk(XENLOG_WARNING, - "try to offline page out of range %lx\n", mfn); + dprintk(XENLOG_WARNING, "try to offline page out of range %lx\n", mfn); return -EINVAL; } @@ -1587,7 +1582,7 @@ int offline_page(unsigned long mfn, int broken, uint32_t *status) if ( is_xen_fixed_mfn(mfn) ) { *status = PG_OFFLINE_XENPAGE | PG_OFFLINE_FAILED | - (DOMID_XEN << PG_OFFLINE_OWNER_SHIFT); + (DOMID_XEN << PG_OFFLINE_OWNER_SHIFT); return -EPERM; } @@ -1663,7 +1658,7 @@ int offline_page(unsigned long mfn, int broken, uint32_t *status) * before call again. */ *status = PG_OFFLINE_ANONYMOUS | PG_OFFLINE_FAILED | - (DOMID_INVALID << PG_OFFLINE_OWNER_SHIFT ); + (DOMID_INVALID << PG_OFFLINE_OWNER_SHIFT); } if ( broken ) @@ -1700,7 +1695,7 @@ unsigned int online_page(unsigned long mfn, uint32_t *status) if ( y & PGC_broken ) { ret = -EINVAL; - *status = PG_ONLINE_FAILED |PG_ONLINE_BROKEN; + *status = PG_ONLINE_FAILED | PG_ONLINE_BROKEN; break; } @@ -1734,7 +1729,8 @@ int query_page_offline(unsigned long mfn, uint32_t *status) { struct page_info *pg; - if ( !mfn_valid(_mfn(mfn)) || !page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL) ) + if ( !mfn_valid(_mfn(mfn)) || + !page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL) ) { dprintk(XENLOG_WARNING, "call expand_pages() first\n"); return -EINVAL; @@ -1763,8 +1759,7 @@ int query_page_offline(unsigned long mfn, uint32_t *status) * latter is not on a MAX_ORDER boundary, then we reserve the page by * not freeing it to the buddy allocator. */ -static void init_heap_pages( - struct page_info *pg, unsigned long nr_pages) +static void init_heap_pages(struct page_info *pg, unsigned long nr_pages) { unsigned long i; bool idle_scrub = false; @@ -1784,7 +1779,7 @@ static void init_heap_pages( for ( i = 0; i < nr_pages; i++ ) { - unsigned int nid = phys_to_nid(page_to_maddr(pg+i)); + unsigned int nid = phys_to_nid(page_to_maddr(pg + i)); if ( unlikely(!avail[nid]) ) { @@ -1812,8 +1807,8 @@ static void init_heap_pages( } } -static unsigned long avail_heap_pages( - unsigned int zone_lo, unsigned int zone_hi, unsigned int node) +static unsigned long avail_heap_pages(unsigned int zone_lo, + unsigned int zone_hi, unsigned int node) { unsigned int i, zone; unsigned long free_pages = 0; @@ -1892,7 +1887,7 @@ static void __init smp_scrub_heap_pages(void *data) } /* Determine the current CPU's index into CPU's linked to this node. */ - for_each_cpu ( temp_cpu, &r->cpus ) + for_each_cpu (temp_cpu, &r->cpus) { if ( cpu == temp_cpu ) break; @@ -1932,7 +1927,7 @@ static int __init find_non_smt(unsigned int node, cpumask_t *dest) cpumask_and(&node_cpus, &node_to_cpumask(node), &cpu_online_map); cpumask_clear(dest); - for_each_cpu ( i, &node_cpus ) + for_each_cpu (i, &node_cpus) { if ( cpumask_intersects(dest, per_cpu(cpu_sibling_mask, i)) ) continue; @@ -1963,7 +1958,7 @@ static void __init scrub_heap_pages(void) chunk_size = MB(128) >> PAGE_SHIFT; /* Round #0 - figure out amounts and which CPUs to use. */ - for_each_online_node ( i ) + for_each_online_node(i) { if ( !node_spanned_pages(i) ) continue; @@ -1999,8 +1994,7 @@ static void __init scrub_heap_pages(void) /* Round: #1 - do NUMA nodes with CPUs. */ for ( offset = 0; offset < max_per_cpu_sz; offset += chunk_size ) { - for_each_online_node ( i ) - region[i].offset = offset; + for_each_online_node(i) region[i].offset = offset; process_pending_softirqs(); @@ -2015,7 +2009,7 @@ static void __init scrub_heap_pages(void) * Round #2: NUMA nodes with no CPUs get scrubbed with CPUs on the node * closest to us and with CPUs. */ - for_each_online_node ( i ) + for_each_online_node(i) { node_cpus = node_to_cpumask(i); @@ -2025,7 +2019,7 @@ static void __init scrub_heap_pages(void) last_distance = INT_MAX; best_node = first_node(node_online_map); /* Figure out which NODE CPUs are close. */ - for_each_online_node ( j ) + for_each_online_node(j) { u8 distance; @@ -2084,7 +2078,7 @@ void __init heap_init_late(void) */ setup_low_mem_virq(); - switch ( opt_bootscrub ) + switch (opt_bootscrub) { default: ASSERT_UNREACHABLE(); @@ -2103,7 +2097,6 @@ void __init heap_init_late(void) } } - /************************* * XEN-HEAP SUB-ALLOCATOR */ @@ -2121,7 +2114,7 @@ void init_xenheap_pages(paddr_t ps, paddr_t pe) * Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to * prevent merging of power-of-two blocks across the zone boundary. */ - if ( ps && !is_xen_heap_mfn(paddr_to_pfn(ps)-1) ) + if ( ps && !is_xen_heap_mfn(paddr_to_pfn(ps) - 1) ) ps += PAGE_SIZE; if ( !is_xen_heap_mfn(paddr_to_pfn(pe)) ) pe -= PAGE_SIZE; @@ -2131,15 +2124,14 @@ void init_xenheap_pages(paddr_t ps, paddr_t pe) init_heap_pages(maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT); } - void *alloc_xenheap_pages(unsigned int order, unsigned int memflags) { struct page_info *pg; ASSERT(!in_irq()); - pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN, - order, memflags | MEMF_no_scrub, NULL); + pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN, order, + memflags | MEMF_no_scrub, NULL); if ( unlikely(pg == NULL) ) return NULL; @@ -2148,7 +2140,6 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags) return page_to_virt(pg); } - void free_xenheap_pages(void *v, unsigned int order) { ASSERT(!in_irq()); @@ -2161,7 +2152,7 @@ void free_xenheap_pages(void *v, unsigned int order) free_heap_pages(virt_to_page(v), order, false); } -#else /* !CONFIG_SEPARATE_XENHEAP */ +#else /* !CONFIG_SEPARATE_XENHEAP */ void __init xenheap_max_mfn(unsigned long mfn) { @@ -2217,9 +2208,7 @@ void free_xenheap_pages(void *v, unsigned int order) free_heap_pages(pg, order, true); } -#endif /* CONFIG_SEPARATE_XENHEAP */ - - +#endif /* CONFIG_SEPARATE_XENHEAP */ /************************* * DOMAIN-HEAP SUB-ALLOCATOR @@ -2240,12 +2229,8 @@ void init_domheap_pages(paddr_t ps, paddr_t pe) init_heap_pages(mfn_to_page(smfn), mfn_x(emfn) - mfn_x(smfn)); } - -int assign_pages( - struct domain *d, - struct page_info *pg, - unsigned int order, - unsigned int memflags) +int assign_pages(struct domain *d, struct page_info *pg, unsigned int order, + unsigned int memflags) { int rc = 0; unsigned long i; @@ -2255,7 +2240,7 @@ int assign_pages( if ( unlikely(d->is_dying) ) { gdprintk(XENLOG_INFO, "Cannot assign page to domain%d -- dying.\n", - d->domain_id); + d->domain_id); rc = -EINVAL; goto out; } @@ -2265,9 +2250,11 @@ int assign_pages( if ( unlikely((d->tot_pages + (1 << order)) > d->max_pages) ) { if ( !tmem_enabled() || order != 0 || d->tot_pages != d->max_pages ) - gprintk(XENLOG_INFO, "Over-allocation for domain %u: " - "%u > %u\n", d->domain_id, - d->tot_pages + (1 << order), d->max_pages); + gprintk(XENLOG_INFO, + "Over-allocation for domain %u: " + "%u > %u\n", + d->domain_id, d->tot_pages + (1 << order), + d->max_pages); rc = -E2BIG; goto out; } @@ -2286,14 +2273,13 @@ int assign_pages( page_list_add_tail(&pg[i], &d->page_list); } - out: +out: spin_unlock(&d->page_alloc_lock); return rc; } - -struct page_info *alloc_domheap_pages( - struct domain *d, unsigned int order, unsigned int memflags) +struct page_info *alloc_domheap_pages(struct domain *d, unsigned int order, + unsigned int memflags) { struct page_info *pg = NULL; unsigned int bits = memflags >> _MEMF_bits, zone_hi = NR_ZONES - 1; @@ -2302,7 +2288,7 @@ struct page_info *alloc_domheap_pages( ASSERT(!in_irq()); bits = domain_clamp_alloc_bitsize(memflags & MEMF_no_owner ? NULL : d, - bits ? : (BITS_PER_LONG+PAGE_SHIFT)); + bits ?: (BITS_PER_LONG + PAGE_SHIFT)); if ( (zone_hi = min_t(unsigned int, bits_to_zone(bits), zone_hi)) == 0 ) return NULL; @@ -2321,9 +2307,9 @@ struct page_info *alloc_domheap_pages( if ( (pg == NULL) && ((memflags & MEMF_no_dma) || - ((pg = alloc_heap_pages(MEMZONE_XEN + 1, zone_hi, order, - memflags, d)) == NULL)) ) - return NULL; + ((pg = alloc_heap_pages(MEMZONE_XEN + 1, zone_hi, order, memflags, + d)) == NULL)) ) + return NULL; if ( d && !(memflags & MEMF_no_owner) && assign_pages(d, pg, order, memflags) ) @@ -2403,8 +2389,9 @@ void free_domheap_pages(struct page_info *pg, unsigned int order) put_domain(d); } -unsigned long avail_domheap_pages_region( - unsigned int node, unsigned int min_width, unsigned int max_width) +unsigned long avail_domheap_pages_region(unsigned int node, + unsigned int min_width, + unsigned int max_width) { int zone_lo, zone_hi; @@ -2419,42 +2406,40 @@ unsigned long avail_domheap_pages_region( unsigned long avail_domheap_pages(void) { - return avail_heap_pages(MEMZONE_XEN + 1, - NR_ZONES - 1, - -1); + return avail_heap_pages(MEMZONE_XEN + 1, NR_ZONES - 1, -1); } unsigned long avail_node_heap_pages(unsigned int nodeid) { - return avail_heap_pages(MEMZONE_XEN, NR_ZONES -1, nodeid); + return avail_heap_pages(MEMZONE_XEN, NR_ZONES - 1, nodeid); } - static void pagealloc_info(unsigned char key) { unsigned int zone = MEMZONE_XEN; unsigned long n, total = 0; printk("Physical memory information:\n"); - printk(" Xen heap: %lukB free\n", - avail_heap_pages(zone, zone, -1) << (PAGE_SHIFT-10)); + printk(" Xen heap: %lukB free\n", avail_heap_pages(zone, zone, -1) + << (PAGE_SHIFT - 10)); while ( ++zone < NR_ZONES ) { if ( (zone + PAGE_SHIFT) == dma_bitsize ) { - printk(" DMA heap: %lukB free\n", total << (PAGE_SHIFT-10)); + printk(" DMA heap: %lukB free\n", total << (PAGE_SHIFT - 10)); total = 0; } if ( (n = avail_heap_pages(zone, zone, -1)) != 0 ) { total += n; - printk(" heap[%02u]: %lukB free\n", zone, n << (PAGE_SHIFT-10)); + printk(" heap[%02u]: %lukB free\n", zone, + n << (PAGE_SHIFT - 10)); } } - printk(" Dom heap: %lukB free\n", total << (PAGE_SHIFT-10)); + printk(" Dom heap: %lukB free\n", total << (PAGE_SHIFT - 10)); } static __init int pagealloc_keyhandler_init(void) @@ -2464,7 +2449,6 @@ static __init int pagealloc_keyhandler_init(void) } __initcall(pagealloc_keyhandler_init); - void scrub_one_page(struct page_info *pg) { if ( unlikely(pg->count_info & PGC_broken) ) @@ -2472,8 +2456,8 @@ void scrub_one_page(struct page_info *pg) #ifndef NDEBUG /* Avoid callers relying on allocations returning zeroed pages. */ - unmap_domain_page(memset(__map_domain_page(pg), - SCRUB_BYTE_PATTERN, PAGE_SIZE)); + unmap_domain_page( + memset(__map_domain_page(pg), SCRUB_BYTE_PATTERN, PAGE_SIZE)); #else /* For a production build, clear_page() is the fastest way to scrub. */ clear_domain_page(_mfn(page_to_mfn(pg))); @@ -2482,10 +2466,10 @@ void scrub_one_page(struct page_info *pg) static void dump_heap(unsigned char key) { - s_time_t now = NOW(); - int i, j; + s_time_t now = NOW(); + int i, j; - printk("'%c' pressed -> dumping heap info (now = %"PRI_stime")\n", key, + printk("'%c' pressed -> dumping heap info (now = %" PRI_stime ")\n", key, now); for ( i = 0; i < MAX_NUMNODES; i++ ) @@ -2493,8 +2477,7 @@ static void dump_heap(unsigned char key) if ( !avail[i] ) continue; for ( j = 0; j < NR_ZONES; j++ ) - printk("heap[node=%d][zone=%d] -> %lu pages\n", - i, j, avail[i][j]); + printk("heap[node=%d][zone=%d] -> %lu pages\n", i, j, avail[i][j]); } for ( i = 0; i < MAX_NUMNODES; i++ ) @@ -2528,7 +2511,7 @@ struct domain *get_pg_owner(domid_t domid) goto out; } - switch ( domid ) + switch (domid) { case DOMID_IO: pg_owner = rcu_lock_domain(dom_io); @@ -2544,7 +2527,7 @@ struct domain *get_pg_owner(domid_t domid) break; } - out: +out: return pg_owner; } diff --git a/xen/common/pdx.c b/xen/common/pdx.c index 1a2100c9d9..6861daa43c 100644 --- a/xen/common/pdx.c +++ b/xen/common/pdx.c @@ -29,20 +29,18 @@ unsigned long __read_mostly pfn_hole_mask = 0; unsigned int __read_mostly pfn_pdx_hole_shift = 0; unsigned long __read_mostly pdx_group_valid[BITS_TO_LONGS( - (FRAMETABLE_NR + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT)] = { [0] = 1 }; + (FRAMETABLE_NR + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT)] = {[0] = 1}; bool __mfn_valid(unsigned long mfn) { - return likely(mfn < max_page) && - likely(!(mfn & pfn_hole_mask)) && - likely(test_bit(pfn_to_pdx(mfn) / PDX_GROUP_COUNT, - pdx_group_valid)); + return likely(mfn < max_page) && likely(!(mfn & pfn_hole_mask)) && + likely(test_bit(pfn_to_pdx(mfn) / PDX_GROUP_COUNT, pdx_group_valid)); } /* Sets all bits from the most-significant 1-bit down to the LSB */ static u64 __init fill_mask(u64 mask) { - while (mask & (mask + 1)) + while ( mask & (mask + 1) ) mask |= mask + 1; return mask; @@ -79,7 +77,7 @@ void __init pfn_pdx_hole_setup(unsigned long mask) * contiguous aligned ranges of 2^MAX_ORDER pages. Among others, our * buddy allocator relies on this assumption. */ - for ( j = MAX_ORDER-1; ; ) + for ( j = MAX_ORDER - 1;; ) { i = find_next_zero_bit(&mask, BITS_PER_LONG, j); j = find_next_bit(&mask, BITS_PER_LONG, i); @@ -94,18 +92,17 @@ void __init pfn_pdx_hole_setup(unsigned long mask) if ( !hole_shift ) return; - printk(KERN_INFO "PFN compression on bits %u...%u\n", - bottom_shift, bottom_shift + hole_shift - 1); + printk(KERN_INFO "PFN compression on bits %u...%u\n", bottom_shift, + bottom_shift + hole_shift - 1); - pfn_pdx_hole_shift = hole_shift; + pfn_pdx_hole_shift = hole_shift; pfn_pdx_bottom_mask = (1UL << bottom_shift) - 1; - ma_va_bottom_mask = (PAGE_SIZE << bottom_shift) - 1; - pfn_hole_mask = ((1UL << hole_shift) - 1) << bottom_shift; - pfn_top_mask = ~(pfn_pdx_bottom_mask | pfn_hole_mask); - ma_top_mask = pfn_top_mask << PAGE_SHIFT; + ma_va_bottom_mask = (PAGE_SIZE << bottom_shift) - 1; + pfn_hole_mask = ((1UL << hole_shift) - 1) << bottom_shift; + pfn_top_mask = ~(pfn_pdx_bottom_mask | pfn_hole_mask); + ma_top_mask = pfn_top_mask << PAGE_SHIFT; } - /* * Local variables: * mode: C diff --git a/xen/common/perfc.c b/xen/common/perfc.c index 3abe35892a..c8e14d136b 100644 --- a/xen/common/perfc.c +++ b/xen/common/perfc.c @@ -3,21 +3,26 @@ #include #include #include -#include +#include #include #include #include #include #include -#define PERFCOUNTER( var, name ) { name, TYPE_SINGLE, 0 }, -#define PERFCOUNTER_ARRAY( var, name, size ) { name, TYPE_ARRAY, size }, -#define PERFSTATUS( var, name ) { name, TYPE_S_SINGLE, 0 }, -#define PERFSTATUS_ARRAY( var, name, size ) { name, TYPE_S_ARRAY, size }, -static const struct { +#define PERFCOUNTER(var, name) {name, TYPE_SINGLE, 0}, +#define PERFCOUNTER_ARRAY(var, name, size) {name, TYPE_ARRAY, size}, +#define PERFSTATUS(var, name) {name, TYPE_S_SINGLE, 0}, +#define PERFSTATUS_ARRAY(var, name, size) {name, TYPE_S_ARRAY, size}, +static const struct +{ const char *name; - enum { TYPE_SINGLE, TYPE_ARRAY, - TYPE_S_SINGLE, TYPE_S_ARRAY + enum + { + TYPE_SINGLE, + TYPE_ARRAY, + TYPE_S_SINGLE, + TYPE_S_ARRAY } type; unsigned int nr_elements; } perfc_info[] = { @@ -33,31 +38,32 @@ void perfc_printall(unsigned char key) unsigned int i, j; s_time_t now = NOW(); - printk("Xen performance counters SHOW (now = %"PRI_stime")\n", now); + printk("Xen performance counters SHOW (now = %" PRI_stime ")\n", now); for ( i = j = 0; i < NR_PERFCTRS; i++ ) { unsigned int k, cpu; unsigned long long sum = 0; - printk("%-32s ", perfc_info[i].name); - switch ( perfc_info[i].type ) + printk("%-32s ", perfc_info[i].name); + switch (perfc_info[i].type) { case TYPE_SINGLE: case TYPE_S_SINGLE: - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) sum += per_cpu(perfcounters, cpu)[j]; - if ( perfc_info[i].type == TYPE_S_SINGLE ) - sum = (perfc_t) sum; + if ( perfc_info[i].type == TYPE_S_SINGLE ) + sum = (perfc_t)sum; printk("TOTAL[%12Lu]", sum); if ( sum ) { k = 0; - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) { if ( k > 0 && (k % 4) == 0 ) printk("\n%53s", ""); - printk(" CPU%02u[%10"PRIperfc"u]", cpu, per_cpu(perfcounters, cpu)[j]); + printk(" CPU%02u[%10" PRIperfc "u]", cpu, + per_cpu(perfcounters, cpu)[j]); ++k; } } @@ -65,33 +71,33 @@ void perfc_printall(unsigned char key) break; case TYPE_ARRAY: case TYPE_S_ARRAY: - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) { perfc_t *counters = per_cpu(perfcounters, cpu) + j; for ( k = 0; k < perfc_info[i].nr_elements; k++ ) sum += counters[k]; } - if ( perfc_info[i].type == TYPE_S_ARRAY ) - sum = (perfc_t) sum; + if ( perfc_info[i].type == TYPE_S_ARRAY ) + sum = (perfc_t)sum; printk("TOTAL[%12Lu]", sum); - if (sum) + if ( sum ) { #ifdef CONFIG_PERF_ARRAYS for ( k = 0; k < perfc_info[i].nr_elements; k++ ) { sum = 0; - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) sum += per_cpu(perfcounters, cpu)[j + k]; - if ( perfc_info[i].type == TYPE_S_ARRAY ) - sum = (perfc_t) sum; + if ( perfc_info[i].type == TYPE_S_ARRAY ) + sum = (perfc_t)sum; if ( (k % 4) == 0 ) printk("\n%16s", ""); printk(" ARR%02u[%10Lu]", k, sum); } #else k = 0; - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) { perfc_t *counters = per_cpu(perfcounters, cpu) + j; unsigned int n; @@ -99,8 +105,8 @@ void perfc_printall(unsigned char key) sum = 0; for ( n = 0; n < perfc_info[i].nr_elements; n++ ) sum += counters[n]; - if ( perfc_info[i].type == TYPE_S_ARRAY ) - sum = (perfc_t) sum; + if ( perfc_info[i].type == TYPE_S_ARRAY ) + sum = (perfc_t)sum; if ( k > 0 && (k % 4) == 0 ) printk("\n%53s", ""); printk(" CPU%02u[%10Lu]", cpu, sum); @@ -121,7 +127,7 @@ void perfc_reset(unsigned char key) s_time_t now = NOW(); if ( key != '\0' ) - printk("Xen performance counters RESET (now = %"PRI_stime")\n", now); + printk("Xen performance counters RESET (now = %" PRI_stime ")\n", now); /* leave STATUS counters alone -- don't reset */ @@ -129,16 +135,16 @@ void perfc_reset(unsigned char key) { unsigned int cpu; - switch ( perfc_info[i].type ) + switch (perfc_info[i].type) { case TYPE_SINGLE: - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) per_cpu(perfcounters, cpu)[j] = 0; case TYPE_S_SINGLE: ++j; break; case TYPE_ARRAY: - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) memset(per_cpu(perfcounters, cpu) + j, 0, perfc_info[i].nr_elements * sizeof(perfc_t)); case TYPE_S_ARRAY: @@ -152,8 +158,8 @@ void perfc_reset(unsigned char key) static struct xen_sysctl_perfc_desc perfc_d[NR_PERFCTRS]; static xen_sysctl_perfc_val_t *perfc_vals; -static unsigned int perfc_nbr_vals; -static cpumask_t perfc_cpumap; +static unsigned int perfc_nbr_vals; +static cpumask_t perfc_cpumap; static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc, XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val) @@ -173,7 +179,7 @@ static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc, { safe_strcpy(perfc_d[i].name, perfc_info[i].name); - switch ( perfc_info[i].type ) + switch (perfc_info[i].type) { case TYPE_SINGLE: case TYPE_S_SINGLE: @@ -205,18 +211,18 @@ static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc, { unsigned int cpu; - switch ( perfc_info[i].type ) + switch (perfc_info[i].type) { case TYPE_SINGLE: case TYPE_S_SINGLE: - for_each_cpu ( cpu, &perfc_cpumap ) + for_each_cpu (cpu, &perfc_cpumap) perfc_vals[v++] = per_cpu(perfcounters, cpu)[j]; ++j; break; case TYPE_ARRAY: case TYPE_S_ARRAY: memset(perfc_vals + v, 0, perfc_d[i].nr_vals * sizeof(*perfc_vals)); - for_each_cpu ( cpu, &perfc_cpumap ) + for_each_cpu (cpu, &perfc_cpumap) { perfc_t *counters = per_cpu(perfcounters, cpu) + j; unsigned int k; @@ -246,7 +252,7 @@ int perfc_control(struct xen_sysctl_perfc_op *pc) spin_lock(&lock); - switch ( pc->cmd ) + switch (pc->cmd) { case XEN_SYSCTL_PERFCOP_reset: rc = perfc_copy_info(pc->desc, pc->val); diff --git a/xen/common/preempt.c b/xen/common/preempt.c index 3b4178fd44..ebe368aaca 100644 --- a/xen/common/preempt.c +++ b/xen/common/preempt.c @@ -1,20 +1,20 @@ /****************************************************************************** * preempt.c - * + * * Track atomic regions in the hypervisor which disallow sleeping. - * + * * Copyright (c) 2010, Keir Fraser - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; If not, see . */ diff --git a/xen/common/radix-tree.c b/xen/common/radix-tree.c index 2384655a2e..e4d3bf9b18 100644 --- a/xen/common/radix-tree.c +++ b/xen/common/radix-tree.c @@ -22,14 +22,15 @@ #include #include -struct radix_tree_path { - struct radix_tree_node *node; - int offset; +struct radix_tree_path +{ + struct radix_tree_node *node; + int offset; }; -#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) -#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ - RADIX_TREE_MAP_SHIFT)) +#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) +#define RADIX_TREE_MAX_PATH \ + (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, RADIX_TREE_MAP_SHIFT)) /* * The height_to_maxindex array needs to be one deeper than the maximum @@ -39,52 +40,52 @@ static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1] __read_mostly; static inline void *ptr_to_indirect(void *ptr) { - return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR); + return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR); } static inline void *indirect_to_ptr(void *ptr) { - return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); + return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); } -struct rcu_node { - struct radix_tree_node node; - struct rcu_head rcu_head; +struct rcu_node +{ + struct radix_tree_node node; + struct rcu_head rcu_head; }; static struct radix_tree_node *rcu_node_alloc(void *arg) { - struct rcu_node *rcu_node = xmalloc(struct rcu_node); - return rcu_node ? &rcu_node->node : NULL; + struct rcu_node *rcu_node = xmalloc(struct rcu_node); + return rcu_node ? &rcu_node->node : NULL; } static void _rcu_node_free(struct rcu_head *head) { - struct rcu_node *rcu_node = - container_of(head, struct rcu_node, rcu_head); - xfree(rcu_node); + struct rcu_node *rcu_node = container_of(head, struct rcu_node, rcu_head); + xfree(rcu_node); } static void rcu_node_free(struct radix_tree_node *node, void *arg) { - struct rcu_node *rcu_node = container_of(node, struct rcu_node, node); - call_rcu(&rcu_node->rcu_head, _rcu_node_free); + struct rcu_node *rcu_node = container_of(node, struct rcu_node, node); + call_rcu(&rcu_node->rcu_head, _rcu_node_free); } -static struct radix_tree_node *radix_tree_node_alloc( - struct radix_tree_root *root) +static struct radix_tree_node * +radix_tree_node_alloc(struct radix_tree_root *root) { - struct radix_tree_node *ret; - ret = root->node_alloc(root->node_alloc_free_arg); - if (ret) - memset(ret, 0, sizeof(*ret)); - return ret; + struct radix_tree_node *ret; + ret = root->node_alloc(root->node_alloc_free_arg); + if ( ret ) + memset(ret, 0, sizeof(*ret)); + return ret; } -static void radix_tree_node_free( - struct radix_tree_root *root, struct radix_tree_node *node) +static void radix_tree_node_free(struct radix_tree_root *root, + struct radix_tree_node *node) { - root->node_free(node, root->node_alloc_free_arg); + root->node_free(node, root->node_alloc_free_arg); } /* @@ -93,7 +94,7 @@ static void radix_tree_node_free( */ static inline unsigned long radix_tree_maxindex(unsigned int height) { - return height_to_maxindex[height]; + return height_to_maxindex[height]; } /* @@ -101,36 +102,37 @@ static inline unsigned long radix_tree_maxindex(unsigned int height) */ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) { - struct radix_tree_node *node; - unsigned int height; - - /* Figure out what the height should be. */ - height = root->height + 1; - while (index > radix_tree_maxindex(height)) - height++; - - if (root->rnode == NULL) { - root->height = height; - goto out; - } - - do { - unsigned int newheight; - if (!(node = radix_tree_node_alloc(root))) - return -ENOMEM; - - /* Increase the height. */ - node->slots[0] = indirect_to_ptr(root->rnode); - - newheight = root->height+1; - node->height = newheight; - node->count = 1; - node = ptr_to_indirect(node); - rcu_assign_pointer(root->rnode, node); - root->height = newheight; - } while (height > root->height); + struct radix_tree_node *node; + unsigned int height; + + /* Figure out what the height should be. */ + height = root->height + 1; + while ( index > radix_tree_maxindex(height) ) + height++; + + if ( root->rnode == NULL ) + { + root->height = height; + goto out; + } + + do { + unsigned int newheight; + if ( !(node = radix_tree_node_alloc(root)) ) + return -ENOMEM; + + /* Increase the height. */ + node->slots[0] = indirect_to_ptr(root->rnode); + + newheight = root->height + 1; + node->height = newheight; + node->count = 1; + node = ptr_to_indirect(node); + rcu_assign_pointer(root->rnode, node); + root->height = newheight; + } while ( height > root->height ); out: - return 0; + return 0; } /** @@ -141,61 +143,69 @@ out: * * Insert an item into the radix tree at position @index. */ -int radix_tree_insert(struct radix_tree_root *root, - unsigned long index, void *item) +int radix_tree_insert(struct radix_tree_root *root, unsigned long index, + void *item) { - struct radix_tree_node *node = NULL, *slot; - unsigned int height, shift; - int offset; - int error; - - BUG_ON(radix_tree_is_indirect_ptr(item)); - - /* Make sure the tree is high enough. */ - if (index > radix_tree_maxindex(root->height)) { - error = radix_tree_extend(root, index); - if (error) - return error; - } - - slot = indirect_to_ptr(root->rnode); - - height = root->height; - shift = (height-1) * RADIX_TREE_MAP_SHIFT; - - offset = 0; /* uninitialised var warning */ - while (height > 0) { - if (slot == NULL) { - /* Have to add a child node. */ - if (!(slot = radix_tree_node_alloc(root))) - return -ENOMEM; - slot->height = height; - if (node) { - rcu_assign_pointer(node->slots[offset], slot); - node->count++; - } else - rcu_assign_pointer(root->rnode, ptr_to_indirect(slot)); - } - - /* Go a level down */ - offset = (index >> shift) & RADIX_TREE_MAP_MASK; - node = slot; - slot = node->slots[offset]; - shift -= RADIX_TREE_MAP_SHIFT; - height--; - } - - if (slot != NULL) - return -EEXIST; - - if (node) { - node->count++; - rcu_assign_pointer(node->slots[offset], item); - } else { - rcu_assign_pointer(root->rnode, item); - } - - return 0; + struct radix_tree_node *node = NULL, *slot; + unsigned int height, shift; + int offset; + int error; + + BUG_ON(radix_tree_is_indirect_ptr(item)); + + /* Make sure the tree is high enough. */ + if ( index > radix_tree_maxindex(root->height) ) + { + error = radix_tree_extend(root, index); + if ( error ) + return error; + } + + slot = indirect_to_ptr(root->rnode); + + height = root->height; + shift = (height - 1) * RADIX_TREE_MAP_SHIFT; + + offset = 0; /* uninitialised var warning */ + while ( height > 0 ) + { + if ( slot == NULL ) + { + /* Have to add a child node. */ + if ( !(slot = radix_tree_node_alloc(root)) ) + return -ENOMEM; + slot->height = height; + if ( node ) + { + rcu_assign_pointer(node->slots[offset], slot); + node->count++; + } + else + rcu_assign_pointer(root->rnode, ptr_to_indirect(slot)); + } + + /* Go a level down */ + offset = (index >> shift) & RADIX_TREE_MAP_MASK; + node = slot; + slot = node->slots[offset]; + shift -= RADIX_TREE_MAP_SHIFT; + height--; + } + + if ( slot != NULL ) + return -EEXIST; + + if ( node ) + { + node->count++; + rcu_assign_pointer(node->slots[offset], item); + } + else + { + rcu_assign_pointer(root->rnode, item); + } + + return 0; } EXPORT_SYMBOL(radix_tree_insert); @@ -204,40 +214,41 @@ EXPORT_SYMBOL(radix_tree_insert); * is_slot == 0 : search for the node. */ static void *radix_tree_lookup_element(struct radix_tree_root *root, - unsigned long index, int is_slot) + unsigned long index, int is_slot) { - unsigned int height, shift; - struct radix_tree_node *node, **slot; - - node = rcu_dereference(root->rnode); - if (node == NULL) - return NULL; - - if (!radix_tree_is_indirect_ptr(node)) { - if (index > 0) - return NULL; - return is_slot ? (void *)&root->rnode : node; - } - node = indirect_to_ptr(node); - - height = node->height; - if (index > radix_tree_maxindex(height)) - return NULL; - - shift = (height-1) * RADIX_TREE_MAP_SHIFT; - - do { - slot = (struct radix_tree_node **) - (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); - node = rcu_dereference(*slot); - if (node == NULL) - return NULL; - - shift -= RADIX_TREE_MAP_SHIFT; - height--; - } while (height > 0); - - return is_slot ? (void *)slot : indirect_to_ptr(node); + unsigned int height, shift; + struct radix_tree_node *node, **slot; + + node = rcu_dereference(root->rnode); + if ( node == NULL ) + return NULL; + + if ( !radix_tree_is_indirect_ptr(node) ) + { + if ( index > 0 ) + return NULL; + return is_slot ? (void *)&root->rnode : node; + } + node = indirect_to_ptr(node); + + height = node->height; + if ( index > radix_tree_maxindex(height) ) + return NULL; + + shift = (height - 1) * RADIX_TREE_MAP_SHIFT; + + do { + slot = (struct radix_tree_node **)(node->slots + ((index >> shift) & + RADIX_TREE_MAP_MASK)); + node = rcu_dereference(*slot); + if ( node == NULL ) + return NULL; + + shift -= RADIX_TREE_MAP_SHIFT; + height--; + } while ( height > 0 ); + + return is_slot ? (void *)slot : indirect_to_ptr(node); } /** @@ -255,7 +266,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, */ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) { - return (void **)radix_tree_lookup_element(root, index, 1); + return (void **)radix_tree_lookup_element(root, index, 1); } EXPORT_SYMBOL(radix_tree_lookup_slot); @@ -273,7 +284,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot); */ void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) { - return radix_tree_lookup_element(root, index, 0); + return radix_tree_lookup_element(root, index, 0); } EXPORT_SYMBOL(radix_tree_lookup); @@ -298,19 +309,20 @@ EXPORT_SYMBOL(radix_tree_lookup); * under rcu_read_lock. */ unsigned long radix_tree_next_hole(struct radix_tree_root *root, - unsigned long index, unsigned long max_scan) + unsigned long index, unsigned long max_scan) { - unsigned long i; - - for (i = 0; i < max_scan; i++) { - if (!radix_tree_lookup(root, index)) - break; - index++; - if (index == 0) - break; - } - - return index; + unsigned long i; + + for ( i = 0; i < max_scan; i++ ) + { + if ( !radix_tree_lookup(root, index) ) + break; + index++; + if ( index == 0 ) + break; + } + + return index; } EXPORT_SYMBOL(radix_tree_next_hole); @@ -335,67 +347,72 @@ EXPORT_SYMBOL(radix_tree_next_hole); * rcu_read_lock. */ unsigned long radix_tree_prev_hole(struct radix_tree_root *root, - unsigned long index, unsigned long max_scan) + unsigned long index, unsigned long max_scan) { - unsigned long i; - - for (i = 0; i < max_scan; i++) { - if (!radix_tree_lookup(root, index)) - break; - index--; - if (index == ULONG_MAX) - break; - } - - return index; + unsigned long i; + + for ( i = 0; i < max_scan; i++ ) + { + if ( !radix_tree_lookup(root, index) ) + break; + index--; + if ( index == ULONG_MAX ) + break; + } + + return index; } EXPORT_SYMBOL(radix_tree_prev_hole); -static unsigned int -__lookup(struct radix_tree_node *slot, void ***results, unsigned long index, - unsigned int max_items, unsigned long *next_index) +static unsigned int __lookup(struct radix_tree_node *slot, void ***results, + unsigned long index, unsigned int max_items, + unsigned long *next_index) { - unsigned int nr_found = 0; - unsigned int shift, height; - unsigned long i; - - height = slot->height; - if (height == 0) - goto out; - shift = (height-1) * RADIX_TREE_MAP_SHIFT; - - for ( ; height > 1; height--) { - i = (index >> shift) & RADIX_TREE_MAP_MASK; - for (;;) { - if (slot->slots[i] != NULL) - break; - index &= ~((1UL << shift) - 1); - index += 1UL << shift; - if (index == 0) - goto out; /* 32-bit wraparound */ - i++; - if (i == RADIX_TREE_MAP_SIZE) - goto out; - } - - shift -= RADIX_TREE_MAP_SHIFT; - slot = rcu_dereference(slot->slots[i]); - if (slot == NULL) - goto out; - } - - /* Bottom level: grab some items */ - for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { - index++; - if (slot->slots[i]) { - results[nr_found++] = &(slot->slots[i]); - if (nr_found == max_items) - goto out; - } - } + unsigned int nr_found = 0; + unsigned int shift, height; + unsigned long i; + + height = slot->height; + if ( height == 0 ) + goto out; + shift = (height - 1) * RADIX_TREE_MAP_SHIFT; + + for ( ; height > 1; height-- ) + { + i = (index >> shift) & RADIX_TREE_MAP_MASK; + for ( ;; ) + { + if ( slot->slots[i] != NULL ) + break; + index &= ~((1UL << shift) - 1); + index += 1UL << shift; + if ( index == 0 ) + goto out; /* 32-bit wraparound */ + i++; + if ( i == RADIX_TREE_MAP_SIZE ) + goto out; + } + + shift -= RADIX_TREE_MAP_SHIFT; + slot = rcu_dereference(slot->slots[i]); + if ( slot == NULL ) + goto out; + } + + /* Bottom level: grab some items */ + for ( i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++ ) + { + index++; + if ( slot->slots[i] ) + { + results[nr_found++] = &(slot->slots[i]); + if ( nr_found == max_items ) + goto out; + } + } out: - *next_index = index; - return nr_found; + *next_index = index; + return nr_found; } /** @@ -417,55 +434,57 @@ out: * of an RCU protected gang lookup are as though multiple radix_tree_lookups * have been issued in individual locks, and results stored in 'results'. */ -unsigned int -radix_tree_gang_lookup(struct radix_tree_root *root, void **results, - unsigned long first_index, unsigned int max_items) +unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, + void **results, unsigned long first_index, + unsigned int max_items) { - unsigned long max_index; - struct radix_tree_node *node; - unsigned long cur_index = first_index; - unsigned int ret; - - node = rcu_dereference(root->rnode); - if (!node) - return 0; - - if (!radix_tree_is_indirect_ptr(node)) { - if (first_index > 0) - return 0; - results[0] = node; - return 1; - } - node = indirect_to_ptr(node); - - max_index = radix_tree_maxindex(node->height); - - ret = 0; - while (ret < max_items) { - unsigned int nr_found, slots_found, i; - unsigned long next_index; /* Index of next search */ - - if (cur_index > max_index) - break; - slots_found = __lookup(node, (void ***)results + ret, cur_index, - max_items - ret, &next_index); - nr_found = 0; - for (i = 0; i < slots_found; i++) { - struct radix_tree_node *slot; - slot = *(((void ***)results)[ret + i]); - if (!slot) - continue; - results[ret + nr_found] = - indirect_to_ptr(rcu_dereference(slot)); - nr_found++; - } - ret += nr_found; - if (next_index == 0) - break; - cur_index = next_index; - } - - return ret; + unsigned long max_index; + struct radix_tree_node *node; + unsigned long cur_index = first_index; + unsigned int ret; + + node = rcu_dereference(root->rnode); + if ( !node ) + return 0; + + if ( !radix_tree_is_indirect_ptr(node) ) + { + if ( first_index > 0 ) + return 0; + results[0] = node; + return 1; + } + node = indirect_to_ptr(node); + + max_index = radix_tree_maxindex(node->height); + + ret = 0; + while ( ret < max_items ) + { + unsigned int nr_found, slots_found, i; + unsigned long next_index; /* Index of next search */ + + if ( cur_index > max_index ) + break; + slots_found = __lookup(node, (void ***)results + ret, cur_index, + max_items - ret, &next_index); + nr_found = 0; + for ( i = 0; i < slots_found; i++ ) + { + struct radix_tree_node *slot; + slot = *(((void ***)results)[ret + i]); + if ( !slot ) + continue; + results[ret + nr_found] = indirect_to_ptr(rcu_dereference(slot)); + nr_found++; + } + ret += nr_found; + if ( next_index == 0 ) + break; + cur_index = next_index; + } + + return ret; } EXPORT_SYMBOL(radix_tree_gang_lookup); @@ -486,45 +505,48 @@ EXPORT_SYMBOL(radix_tree_gang_lookup); * be dereferenced with radix_tree_deref_slot, and if using only RCU * protection, radix_tree_deref_slot may fail requiring a retry. */ -unsigned int -radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, - unsigned long first_index, unsigned int max_items) +unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, + void ***results, + unsigned long first_index, + unsigned int max_items) { - unsigned long max_index; - struct radix_tree_node *node; - unsigned long cur_index = first_index; - unsigned int ret; - - node = rcu_dereference(root->rnode); - if (!node) - return 0; - - if (!radix_tree_is_indirect_ptr(node)) { - if (first_index > 0) - return 0; - results[0] = (void **)&root->rnode; - return 1; - } - node = indirect_to_ptr(node); - - max_index = radix_tree_maxindex(node->height); - - ret = 0; - while (ret < max_items) { - unsigned int slots_found; - unsigned long next_index; /* Index of next search */ - - if (cur_index > max_index) - break; - slots_found = __lookup(node, results + ret, cur_index, - max_items - ret, &next_index); - ret += slots_found; - if (next_index == 0) - break; - cur_index = next_index; - } - - return ret; + unsigned long max_index; + struct radix_tree_node *node; + unsigned long cur_index = first_index; + unsigned int ret; + + node = rcu_dereference(root->rnode); + if ( !node ) + return 0; + + if ( !radix_tree_is_indirect_ptr(node) ) + { + if ( first_index > 0 ) + return 0; + results[0] = (void **)&root->rnode; + return 1; + } + node = indirect_to_ptr(node); + + max_index = radix_tree_maxindex(node->height); + + ret = 0; + while ( ret < max_items ) + { + unsigned int slots_found; + unsigned long next_index; /* Index of next search */ + + if ( cur_index > max_index ) + break; + slots_found = __lookup(node, results + ret, cur_index, max_items - ret, + &next_index); + ret += slots_found; + if ( next_index == 0 ) + break; + cur_index = next_index; + } + + return ret; } EXPORT_SYMBOL(radix_tree_gang_lookup_slot); @@ -534,60 +556,60 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_slot); */ static inline void radix_tree_shrink(struct radix_tree_root *root) { - /* try to shrink tree height */ - while (root->height > 0) { - struct radix_tree_node *to_free = root->rnode; - void *newptr; - - BUG_ON(!radix_tree_is_indirect_ptr(to_free)); - to_free = indirect_to_ptr(to_free); - - /* - * The candidate node has more than one child, or its child - * is not at the leftmost slot, we cannot shrink. - */ - if (to_free->count != 1) - break; - if (!to_free->slots[0]) - break; - - /* - * We don't need rcu_assign_pointer(), since we are simply - * moving the node from one part of the tree to another: if it - * was safe to dereference the old pointer to it - * (to_free->slots[0]), it will be safe to dereference the new - * one (root->rnode) as far as dependent read barriers go. - */ - newptr = to_free->slots[0]; - if (root->height > 1) - newptr = ptr_to_indirect(newptr); - root->rnode = newptr; - root->height--; - - /* - * We have a dilemma here. The node's slot[0] must not be - * NULLed in case there are concurrent lookups expecting to - * find the item. However if this was a bottom-level node, - * then it may be subject to the slot pointer being visible - * to callers dereferencing it. If item corresponding to - * slot[0] is subsequently deleted, these callers would expect - * their slot to become empty sooner or later. - * - * For example, lockless pagecache will look up a slot, deref - * the page pointer, and if the page is 0 refcount it means it - * was concurrently deleted from pagecache so try the deref - * again. Fortunately there is already a requirement for logic - * to retry the entire slot lookup -- the indirect pointer - * problem (replacing direct root node with an indirect pointer - * also results in a stale slot). So tag the slot as indirect - * to force callers to retry. - */ - if (root->height == 0) - *((unsigned long *)&to_free->slots[0]) |= - RADIX_TREE_INDIRECT_PTR; - - radix_tree_node_free(root, to_free); - } + /* try to shrink tree height */ + while ( root->height > 0 ) + { + struct radix_tree_node *to_free = root->rnode; + void *newptr; + + BUG_ON(!radix_tree_is_indirect_ptr(to_free)); + to_free = indirect_to_ptr(to_free); + + /* + * The candidate node has more than one child, or its child + * is not at the leftmost slot, we cannot shrink. + */ + if ( to_free->count != 1 ) + break; + if ( !to_free->slots[0] ) + break; + + /* + * We don't need rcu_assign_pointer(), since we are simply + * moving the node from one part of the tree to another: if it + * was safe to dereference the old pointer to it + * (to_free->slots[0]), it will be safe to dereference the new + * one (root->rnode) as far as dependent read barriers go. + */ + newptr = to_free->slots[0]; + if ( root->height > 1 ) + newptr = ptr_to_indirect(newptr); + root->rnode = newptr; + root->height--; + + /* + * We have a dilemma here. The node's slot[0] must not be + * NULLed in case there are concurrent lookups expecting to + * find the item. However if this was a bottom-level node, + * then it may be subject to the slot pointer being visible + * to callers dereferencing it. If item corresponding to + * slot[0] is subsequently deleted, these callers would expect + * their slot to become empty sooner or later. + * + * For example, lockless pagecache will look up a slot, deref + * the page pointer, and if the page is 0 refcount it means it + * was concurrently deleted from pagecache so try the deref + * again. Fortunately there is already a requirement for logic + * to retry the entire slot lookup -- the indirect pointer + * problem (replacing direct root node with an indirect pointer + * also results in a stale slot). So tag the slot as indirect + * to force callers to retry. + */ + if ( root->height == 0 ) + *((unsigned long *)&to_free->slots[0]) |= RADIX_TREE_INDIRECT_PTR; + + radix_tree_node_free(root, to_free); + } } /** @@ -601,157 +623,162 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) */ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) { - /* - * The radix tree path needs to be one longer than the maximum path - * since the "list" is null terminated. - */ - struct radix_tree_path path[RADIX_TREE_MAX_PATH + 1], *pathp = path; - struct radix_tree_node *slot = NULL; - struct radix_tree_node *to_free; - unsigned int height, shift; - int offset; - - height = root->height; - if (index > radix_tree_maxindex(height)) - goto out; - - slot = root->rnode; - if (height == 0) { - root->rnode = NULL; - goto out; - } - slot = indirect_to_ptr(slot); - - shift = (height - 1) * RADIX_TREE_MAP_SHIFT; - pathp->node = NULL; - - do { - if (slot == NULL) - goto out; - - pathp++; - offset = (index >> shift) & RADIX_TREE_MAP_MASK; - pathp->offset = offset; - pathp->node = slot; - slot = slot->slots[offset]; - shift -= RADIX_TREE_MAP_SHIFT; - height--; - } while (height > 0); - - if (slot == NULL) - goto out; - - to_free = NULL; - /* Now free the nodes we do not need anymore */ - while (pathp->node) { - pathp->node->slots[pathp->offset] = NULL; - pathp->node->count--; - /* - * Queue the node for deferred freeing after the - * last reference to it disappears (set NULL, above). - */ - if (to_free) - radix_tree_node_free(root, to_free); - - if (pathp->node->count) { - if (pathp->node == indirect_to_ptr(root->rnode)) - radix_tree_shrink(root); - goto out; - } - - /* Node with zero slots in use so free it */ - to_free = pathp->node; - pathp--; - - } - root->height = 0; - root->rnode = NULL; - if (to_free) - radix_tree_node_free(root, to_free); + /* + * The radix tree path needs to be one longer than the maximum path + * since the "list" is null terminated. + */ + struct radix_tree_path path[RADIX_TREE_MAX_PATH + 1], *pathp = path; + struct radix_tree_node *slot = NULL; + struct radix_tree_node *to_free; + unsigned int height, shift; + int offset; + + height = root->height; + if ( index > radix_tree_maxindex(height) ) + goto out; + + slot = root->rnode; + if ( height == 0 ) + { + root->rnode = NULL; + goto out; + } + slot = indirect_to_ptr(slot); + + shift = (height - 1) * RADIX_TREE_MAP_SHIFT; + pathp->node = NULL; + + do { + if ( slot == NULL ) + goto out; + + pathp++; + offset = (index >> shift) & RADIX_TREE_MAP_MASK; + pathp->offset = offset; + pathp->node = slot; + slot = slot->slots[offset]; + shift -= RADIX_TREE_MAP_SHIFT; + height--; + } while ( height > 0 ); + + if ( slot == NULL ) + goto out; + + to_free = NULL; + /* Now free the nodes we do not need anymore */ + while ( pathp->node ) + { + pathp->node->slots[pathp->offset] = NULL; + pathp->node->count--; + /* + * Queue the node for deferred freeing after the + * last reference to it disappears (set NULL, above). + */ + if ( to_free ) + radix_tree_node_free(root, to_free); + + if ( pathp->node->count ) + { + if ( pathp->node == indirect_to_ptr(root->rnode) ) + radix_tree_shrink(root); + goto out; + } + + /* Node with zero slots in use so free it */ + to_free = pathp->node; + pathp--; + } + root->height = 0; + root->rnode = NULL; + if ( to_free ) + radix_tree_node_free(root, to_free); out: - return slot; + return slot; } EXPORT_SYMBOL(radix_tree_delete); -static void -radix_tree_node_destroy( - struct radix_tree_root *root, struct radix_tree_node *node, - void (*slot_free)(void *)) +static void radix_tree_node_destroy(struct radix_tree_root *root, + struct radix_tree_node *node, + void (*slot_free)(void *)) { - int i; - - for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { - struct radix_tree_node *slot = node->slots[i]; - BUG_ON(radix_tree_is_indirect_ptr(slot)); - if (slot == NULL) - continue; - if (node->height == 1) { - if (slot_free) - slot_free(slot); - } else { - radix_tree_node_destroy(root, slot, slot_free); - } - } - - radix_tree_node_free(root, node); + int i; + + for ( i = 0; i < RADIX_TREE_MAP_SIZE; i++ ) + { + struct radix_tree_node *slot = node->slots[i]; + BUG_ON(radix_tree_is_indirect_ptr(slot)); + if ( slot == NULL ) + continue; + if ( node->height == 1 ) + { + if ( slot_free ) + slot_free(slot); + } + else + { + radix_tree_node_destroy(root, slot, slot_free); + } + } + + radix_tree_node_free(root, node); } -void radix_tree_destroy( - struct radix_tree_root *root, - void (*slot_free)(void *)) +void radix_tree_destroy(struct radix_tree_root *root, void (*slot_free)(void *)) { - struct radix_tree_node *node = root->rnode; - if (node == NULL) - return; - if (!radix_tree_is_indirect_ptr(node)) { - if (slot_free) - slot_free(node); - } else { - node = indirect_to_ptr(node); - radix_tree_node_destroy(root, node, slot_free); - } - radix_tree_init(root); + struct radix_tree_node *node = root->rnode; + if ( node == NULL ) + return; + if ( !radix_tree_is_indirect_ptr(node) ) + { + if ( slot_free ) + slot_free(node); + } + else + { + node = indirect_to_ptr(node); + radix_tree_node_destroy(root, node, slot_free); + } + radix_tree_init(root); } void radix_tree_init(struct radix_tree_root *root) { - memset(root, 0, sizeof(*root)); - root->node_alloc = rcu_node_alloc; - root->node_free = rcu_node_free; + memset(root, 0, sizeof(*root)); + root->node_alloc = rcu_node_alloc; + root->node_free = rcu_node_free; } -void radix_tree_set_alloc_callbacks( - struct radix_tree_root *root, - radix_tree_alloc_fn_t *node_alloc, - radix_tree_free_fn_t *node_free, - void *node_alloc_free_arg) +void radix_tree_set_alloc_callbacks(struct radix_tree_root *root, + radix_tree_alloc_fn_t *node_alloc, + radix_tree_free_fn_t *node_free, + void *node_alloc_free_arg) { - root->node_alloc = node_alloc; - root->node_free = node_free; - root->node_alloc_free_arg = node_alloc_free_arg; + root->node_alloc = node_alloc; + root->node_free = node_free; + root->node_alloc_free_arg = node_alloc_free_arg; } static __init unsigned long __maxindex(unsigned int height) { - unsigned int width = height * RADIX_TREE_MAP_SHIFT; - int shift = RADIX_TREE_INDEX_BITS - width; - - if (shift < 0) - return ~0UL; - if (shift >= BITS_PER_LONG) - return 0UL; - return ~0UL >> shift; + unsigned int width = height * RADIX_TREE_MAP_SHIFT; + int shift = RADIX_TREE_INDEX_BITS - width; + + if ( shift < 0 ) + return ~0UL; + if ( shift >= BITS_PER_LONG ) + return 0UL; + return ~0UL >> shift; } static __init int radix_tree_init_maxindex(void) { - unsigned int i; + unsigned int i; - for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++) - height_to_maxindex[i] = __maxindex(i); + for ( i = 0; i < ARRAY_SIZE(height_to_maxindex); i++ ) + height_to_maxindex[i] = __maxindex(i); - return 0; + return 0; } /* pre-SMP just so it runs before 'normal' initcalls */ presmp_initcall(radix_tree_init_maxindex); diff --git a/xen/common/rangeset.c b/xen/common/rangeset.c index f34cafdc7e..b06121316f 100644 --- a/xen/common/rangeset.c +++ b/xen/common/rangeset.c @@ -1,9 +1,9 @@ /****************************************************************************** * rangeset.c - * + * * Creation, maintenance and automatic destruction of per-domain sets of * numeric ranges. - * + * * Copyright (c) 2005, K A Fraser */ @@ -13,28 +13,30 @@ #include /* An inclusive range [s,e] and pointer to next range in ascending order. */ -struct range { +struct range +{ struct list_head list; unsigned long s, e; }; -struct rangeset { +struct rangeset +{ /* Owning domain and threaded list of rangesets. */ struct list_head rangeset_list; - struct domain *domain; + struct domain *domain; /* Ordered list of ranges contained in this set, and protecting lock. */ struct list_head range_list; /* Number of ranges that can be allocated */ - long nr_ranges; - rwlock_t lock; + long nr_ranges; + rwlock_t lock; /* Pretty-printing name. */ - char name[32]; + char name[32]; /* RANGESETF flags. */ - unsigned int flags; + unsigned int flags; }; /***************************** @@ -42,12 +44,11 @@ struct rangeset { */ /* Find highest range lower than or containing s. NULL if no such range. */ -static struct range *find_range( - struct rangeset *r, unsigned long s) +static struct range *find_range(struct rangeset *r, unsigned long s) { struct range *x = NULL, *y; - list_for_each_entry ( y, &r->range_list, list ) + list_for_each_entry (y, &r->range_list, list) { if ( y->s > s ) break; @@ -58,8 +59,7 @@ static struct range *find_range( } /* Return the lowest range in the set r, or NULL if r is empty. */ -static struct range *first_range( - struct rangeset *r) +static struct range *first_range(struct rangeset *r) { if ( list_empty(&r->range_list) ) return NULL; @@ -67,8 +67,7 @@ static struct range *first_range( } /* Return range following x in ascending order, or NULL if x is the highest. */ -static struct range *next_range( - struct rangeset *r, struct range *x) +static struct range *next_range(struct rangeset *r, struct range *x) { if ( x->list.next == &r->range_list ) return NULL; @@ -76,15 +75,13 @@ static struct range *next_range( } /* Insert range y after range x in r. Insert as first range if x is NULL. */ -static void insert_range( - struct rangeset *r, struct range *x, struct range *y) +static void insert_range(struct rangeset *r, struct range *x, struct range *y) { list_add(&y->list, (x != NULL) ? &x->list : &r->range_list); } /* Remove a range from its list and free it. */ -static void destroy_range( - struct rangeset *r, struct range *x) +static void destroy_range(struct rangeset *r, struct range *x) { r->nr_ranges++; @@ -93,8 +90,7 @@ static void destroy_range( } /* Allocate a new range */ -static struct range *alloc_range( - struct rangeset *r) +static struct range *alloc_range(struct rangeset *r) { struct range *x; @@ -112,8 +108,7 @@ static struct range *alloc_range( * Core public functions */ -int rangeset_add_range( - struct rangeset *r, unsigned long s, unsigned long e) +int rangeset_add_range(struct rangeset *r, unsigned long s, unsigned long e) { struct range *x, *y; int rc = 0; @@ -156,10 +151,10 @@ int rangeset_add_range( x = next_range(r, x); x->s = s; } - + x->e = (y->e > e) ? y->e : e; - for ( ; ; ) + for ( ;; ) { y = next_range(r, x); if ( (y == NULL) || (y->e > x->e) ) @@ -175,13 +170,12 @@ int rangeset_add_range( destroy_range(r, y); } - out: +out: write_unlock(&r->lock); return rc; } -int rangeset_remove_range( - struct rangeset *r, unsigned long s, unsigned long e) +int rangeset_remove_range(struct rangeset *r, unsigned long s, unsigned long e) { struct range *x, *y, *t; int rc = 0; @@ -243,13 +237,13 @@ int rangeset_remove_range( destroy_range(r, x); } - out: +out: write_unlock(&r->lock); return rc; } -bool_t rangeset_contains_range( - struct rangeset *r, unsigned long s, unsigned long e) +bool_t rangeset_contains_range(struct rangeset *r, unsigned long s, + unsigned long e) { struct range *x; bool_t contains; @@ -267,8 +261,8 @@ bool_t rangeset_contains_range( return contains; } -bool_t rangeset_overlaps_range( - struct rangeset *r, unsigned long s, unsigned long e) +bool_t rangeset_overlaps_range(struct rangeset *r, unsigned long s, + unsigned long e) { struct range *x; bool_t overlaps; @@ -286,9 +280,9 @@ bool_t rangeset_overlaps_range( return overlaps; } -int rangeset_report_ranges( - struct rangeset *r, unsigned long s, unsigned long e, - int (*cb)(unsigned long s, unsigned long e, void *), void *ctxt) +int rangeset_report_ranges(struct rangeset *r, unsigned long s, unsigned long e, + int (*cb)(unsigned long s, unsigned long e, void *), + void *ctxt) { struct range *x; int rc = 0; @@ -312,8 +306,7 @@ int rangeset_claim_range(struct rangeset *r, unsigned long size, write_lock(&r->lock); - for ( prev = NULL, next = first_range(r); - next; + for ( prev = NULL, next = first_range(r); next; prev = next, next = next_range(r, next) ) { if ( (next->s - start) >= size ) @@ -328,11 +321,11 @@ int rangeset_claim_range(struct rangeset *r, unsigned long size, if ( (~0UL - start) + 1 >= size ) goto insert; - out: +out: write_unlock(&r->lock); return -ENOSPC; - insert: +insert: if ( unlikely(!prev) ) { next = alloc_range(r); @@ -396,32 +389,27 @@ int rangeset_merge(struct rangeset *r1, struct rangeset *r2) return rangeset_report_ranges(r2, 0, ~0ul, merge, r1); } -int rangeset_add_singleton( - struct rangeset *r, unsigned long s) +int rangeset_add_singleton(struct rangeset *r, unsigned long s) { return rangeset_add_range(r, s, s); } -int rangeset_remove_singleton( - struct rangeset *r, unsigned long s) +int rangeset_remove_singleton(struct rangeset *r, unsigned long s) { return rangeset_remove_range(r, s, s); } -bool_t rangeset_contains_singleton( - struct rangeset *r, unsigned long s) +bool_t rangeset_contains_singleton(struct rangeset *r, unsigned long s) { return rangeset_contains_range(r, s, s); } -bool_t rangeset_is_empty( - const struct rangeset *r) +bool_t rangeset_is_empty(const struct rangeset *r) { return ((r == NULL) || list_empty(&r->range_list)); } -struct rangeset *rangeset_new( - struct domain *d, char *name, unsigned int flags) +struct rangeset *rangeset_new(struct domain *d, char *name, unsigned int flags) { struct rangeset *r; @@ -455,8 +443,7 @@ struct rangeset *rangeset_new( return r; } -void rangeset_destroy( - struct rangeset *r) +void rangeset_destroy(struct rangeset *r) { struct range *x; @@ -476,21 +463,18 @@ void rangeset_destroy( xfree(r); } -void rangeset_limit( - struct rangeset *r, unsigned int limit) +void rangeset_limit(struct rangeset *r, unsigned int limit) { r->nr_ranges = limit; } -void rangeset_domain_initialise( - struct domain *d) +void rangeset_domain_initialise(struct domain *d) { INIT_LIST_HEAD(&d->rangesets); spin_lock_init(&d->rangesets_lock); } -void rangeset_domain_destroy( - struct domain *d) +void rangeset_domain_destroy(struct domain *d) { struct rangeset *r; @@ -541,8 +525,7 @@ static void print_limit(struct rangeset *r, unsigned long s) printk((r->flags & RANGESETF_prettyprint_hex) ? "%lx" : "%lu", s); } -void rangeset_printk( - struct rangeset *r) +void rangeset_printk(struct rangeset *r) { int nr_printed = 0; struct range *x; @@ -569,8 +552,7 @@ void rangeset_printk( read_unlock(&r->lock); } -void rangeset_domain_printk( - struct domain *d) +void rangeset_domain_printk(struct domain *d) { struct rangeset *r; @@ -581,7 +563,7 @@ void rangeset_domain_printk( if ( list_empty(&d->rangesets) ) printk(" None\n"); - list_for_each_entry ( r, &d->rangesets, rangeset_list ) + list_for_each_entry (r, &d->rangesets, rangeset_list) { printk(" "); rangeset_printk(r); diff --git a/xen/common/rbtree.c b/xen/common/rbtree.c index 9f5498a89d..28f6a7e9da 100644 --- a/xen/common/rbtree.c +++ b/xen/common/rbtree.c @@ -25,7 +25,7 @@ #include /* - * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree + * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree * * 1) A node is either red or black * 2) The root is black @@ -44,50 +44,52 @@ * parentheses and have some accompanying text comment. */ -#define RB_RED 0 -#define RB_BLACK 1 +#define RB_RED 0 +#define RB_BLACK 1 -#define __rb_parent(pc) ((struct rb_node *)(pc & ~3)) +#define __rb_parent(pc) ((struct rb_node *)(pc & ~3)) -#define __rb_color(pc) ((pc) & 1) -#define __rb_is_black(pc) __rb_color(pc) -#define __rb_is_red(pc) (!__rb_color(pc)) -#define rb_color(rb) __rb_color((rb)->__rb_parent_color) -#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color) -#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color) +#define __rb_color(pc) ((pc)&1) +#define __rb_is_black(pc) __rb_color(pc) +#define __rb_is_red(pc) (!__rb_color(pc)) +#define rb_color(rb) __rb_color((rb)->__rb_parent_color) +#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color) +#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color) static inline void rb_set_black(struct rb_node *rb) { - rb->__rb_parent_color |= RB_BLACK; + rb->__rb_parent_color |= RB_BLACK; } static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) { - rb->__rb_parent_color = rb_color(rb) | (unsigned long)p; + rb->__rb_parent_color = rb_color(rb) | (unsigned long)p; } -static inline void rb_set_parent_color(struct rb_node *rb, - struct rb_node *p, int color) +static inline void rb_set_parent_color(struct rb_node *rb, struct rb_node *p, + int color) { - rb->__rb_parent_color = (unsigned long)p | color; + rb->__rb_parent_color = (unsigned long)p | color; } static inline struct rb_node *rb_red_parent(struct rb_node *red) { - return (struct rb_node *)red->__rb_parent_color; + return (struct rb_node *)red->__rb_parent_color; } -static inline void -__rb_change_child(struct rb_node *old, struct rb_node *new, - struct rb_node *parent, struct rb_root *root) +static inline void __rb_change_child(struct rb_node *old, struct rb_node *new, + struct rb_node *parent, + struct rb_root *root) { - if (parent) { - if (parent->rb_left == old) - parent->rb_left = new; - else - parent->rb_right = new; - } else - root->rb_node = new; + if ( parent ) + { + if ( parent->rb_left == old ) + parent->rb_left = new; + else + parent->rb_right = new; + } + else + root->rb_node = new; } /* @@ -95,377 +97,402 @@ __rb_change_child(struct rb_node *old, struct rb_node *new, * - old's parent and color get assigned to new * - old gets assigned new as a parent and 'color' as a color. */ -static inline void -__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, - struct rb_root *root, int color) +static inline void __rb_rotate_set_parents(struct rb_node *old, + struct rb_node *new, + struct rb_root *root, int color) { - struct rb_node *parent = rb_parent(old); - new->__rb_parent_color = old->__rb_parent_color; - rb_set_parent_color(old, new, color); - __rb_change_child(old, new, parent, root); + struct rb_node *parent = rb_parent(old); + new->__rb_parent_color = old->__rb_parent_color; + rb_set_parent_color(old, new, color); + __rb_change_child(old, new, parent, root); } void rb_insert_color(struct rb_node *node, struct rb_root *root) { - struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; - - while (true) { - /* - * Loop invariant: node is red - * - * If there is a black parent, we are done. - * Otherwise, take some corrective action as we don't - * want a red root or two consecutive red nodes. - */ - if (!parent) { - rb_set_parent_color(node, NULL, RB_BLACK); - break; - } else if (rb_is_black(parent)) - break; - - gparent = rb_red_parent(parent); - - tmp = gparent->rb_right; - if (parent != tmp) { /* parent == gparent->rb_left */ - if (tmp && rb_is_red(tmp)) { - /* - * Case 1 - color flips - * - * G g - * / \ / \ - * p u --> P U - * / / - * n n - * - * However, since g's parent might be red, and - * 4) does not allow this, we need to recurse - * at g. - */ - rb_set_parent_color(tmp, gparent, RB_BLACK); - rb_set_parent_color(parent, gparent, RB_BLACK); - node = gparent; - parent = rb_parent(node); - rb_set_parent_color(node, parent, RB_RED); - continue; - } - - tmp = parent->rb_right; - if (node == tmp) { - /* - * Case 2 - left rotate at parent - * - * G G - * / \ / \ - * p U --> n U - * \ / - * n p - * - * This still leaves us in violation of 4), the - * continuation into Case 3 will fix that. - */ - parent->rb_right = tmp = node->rb_left; - node->rb_left = parent; - if (tmp) - rb_set_parent_color(tmp, parent, - RB_BLACK); - rb_set_parent_color(parent, node, RB_RED); - parent = node; - tmp = node->rb_right; - } - - /* - * Case 3 - right rotate at gparent - * - * G P - * / \ / \ - * p U --> n g - * / \ - * n U - */ - gparent->rb_left = tmp; /* == parent->rb_right */ - parent->rb_right = gparent; - if (tmp) - rb_set_parent_color(tmp, gparent, RB_BLACK); - __rb_rotate_set_parents(gparent, parent, root, RB_RED); - break; - } else { - tmp = gparent->rb_left; - if (tmp && rb_is_red(tmp)) { - /* Case 1 - color flips */ - rb_set_parent_color(tmp, gparent, RB_BLACK); - rb_set_parent_color(parent, gparent, RB_BLACK); - node = gparent; - parent = rb_parent(node); - rb_set_parent_color(node, parent, RB_RED); - continue; - } - - tmp = parent->rb_left; - if (node == tmp) { - /* Case 2 - right rotate at parent */ - parent->rb_left = tmp = node->rb_right; - node->rb_right = parent; - if (tmp) - rb_set_parent_color(tmp, parent, - RB_BLACK); - rb_set_parent_color(parent, node, RB_RED); - parent = node; - tmp = node->rb_left; - } - - /* Case 3 - left rotate at gparent */ - gparent->rb_right = tmp; /* == parent->rb_left */ - parent->rb_left = gparent; - if (tmp) - rb_set_parent_color(tmp, gparent, RB_BLACK); - __rb_rotate_set_parents(gparent, parent, root, RB_RED); - break; - } - } + struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; + + while ( true ) + { + /* + * Loop invariant: node is red + * + * If there is a black parent, we are done. + * Otherwise, take some corrective action as we don't + * want a red root or two consecutive red nodes. + */ + if ( !parent ) + { + rb_set_parent_color(node, NULL, RB_BLACK); + break; + } + else if ( rb_is_black(parent) ) + break; + + gparent = rb_red_parent(parent); + + tmp = gparent->rb_right; + if ( parent != tmp ) + { /* parent == gparent->rb_left */ + if ( tmp && rb_is_red(tmp) ) + { + /* + * Case 1 - color flips + * + * G g + * / \ / \ + * p u --> P U + * / / + * n n + * + * However, since g's parent might be red, and + * 4) does not allow this, we need to recurse + * at g. + */ + rb_set_parent_color(tmp, gparent, RB_BLACK); + rb_set_parent_color(parent, gparent, RB_BLACK); + node = gparent; + parent = rb_parent(node); + rb_set_parent_color(node, parent, RB_RED); + continue; + } + + tmp = parent->rb_right; + if ( node == tmp ) + { + /* + * Case 2 - left rotate at parent + * + * G G + * / \ / \ + * p U --> n U + * \ / + * n p + * + * This still leaves us in violation of 4), the + * continuation into Case 3 will fix that. + */ + parent->rb_right = tmp = node->rb_left; + node->rb_left = parent; + if ( tmp ) + rb_set_parent_color(tmp, parent, RB_BLACK); + rb_set_parent_color(parent, node, RB_RED); + parent = node; + tmp = node->rb_right; + } + + /* + * Case 3 - right rotate at gparent + * + * G P + * / \ / \ + * p U --> n g + * / \ + * n U + */ + gparent->rb_left = tmp; /* == parent->rb_right */ + parent->rb_right = gparent; + if ( tmp ) + rb_set_parent_color(tmp, gparent, RB_BLACK); + __rb_rotate_set_parents(gparent, parent, root, RB_RED); + break; + } + else + { + tmp = gparent->rb_left; + if ( tmp && rb_is_red(tmp) ) + { + /* Case 1 - color flips */ + rb_set_parent_color(tmp, gparent, RB_BLACK); + rb_set_parent_color(parent, gparent, RB_BLACK); + node = gparent; + parent = rb_parent(node); + rb_set_parent_color(node, parent, RB_RED); + continue; + } + + tmp = parent->rb_left; + if ( node == tmp ) + { + /* Case 2 - right rotate at parent */ + parent->rb_left = tmp = node->rb_right; + node->rb_right = parent; + if ( tmp ) + rb_set_parent_color(tmp, parent, RB_BLACK); + rb_set_parent_color(parent, node, RB_RED); + parent = node; + tmp = node->rb_left; + } + + /* Case 3 - left rotate at gparent */ + gparent->rb_right = tmp; /* == parent->rb_left */ + parent->rb_left = gparent; + if ( tmp ) + rb_set_parent_color(tmp, gparent, RB_BLACK); + __rb_rotate_set_parents(gparent, parent, root, RB_RED); + break; + } + } } EXPORT_SYMBOL(rb_insert_color); static void __rb_erase_color(struct rb_node *parent, struct rb_root *root) { - struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; - - while (true) { - /* - * Loop invariants: - * - node is black (or NULL on first iteration) - * - node is not the root (parent is not NULL) - * - All leaf paths going through parent and node have a - * black node count that is 1 lower than other leaf paths. - */ - sibling = parent->rb_right; - if (node != sibling) { /* node == parent->rb_left */ - if (rb_is_red(sibling)) { - /* - * Case 1 - left rotate at parent - * - * P S - * / \ / \ - * N s --> p Sr - * / \ / \ - * Sl Sr N Sl - */ - parent->rb_right = tmp1 = sibling->rb_left; - sibling->rb_left = parent; - rb_set_parent_color(tmp1, parent, RB_BLACK); - __rb_rotate_set_parents(parent, sibling, root, - RB_RED); - sibling = tmp1; - } - tmp1 = sibling->rb_right; - if (!tmp1 || rb_is_black(tmp1)) { - tmp2 = sibling->rb_left; - if (!tmp2 || rb_is_black(tmp2)) { - /* - * Case 2 - sibling color flip - * (p could be either color here) - * - * (p) (p) - * / \ / \ - * N S --> N s - * / \ / \ - * Sl Sr Sl Sr - * - * This leaves us violating 5) which - * can be fixed by flipping p to black - * if it was red, or by recursing at p. - * p is red when coming from Case 1. - */ - rb_set_parent_color(sibling, parent, - RB_RED); - if (rb_is_red(parent)) - rb_set_black(parent); - else { - node = parent; - parent = rb_parent(node); - if (parent) - continue; - } - break; - } - /* - * Case 3 - right rotate at sibling - * (p could be either color here) - * - * (p) (p) - * / \ / \ - * N S --> N Sl - * / \ \ - * sl Sr s - * \ - * Sr - */ - sibling->rb_left = tmp1 = tmp2->rb_right; - tmp2->rb_right = sibling; - parent->rb_right = tmp2; - if (tmp1) - rb_set_parent_color(tmp1, sibling, - RB_BLACK); - tmp1 = sibling; - sibling = tmp2; - } - /* - * Case 4 - left rotate at parent + color flips - * (p and sl could be either color here. - * After rotation, p becomes black, s acquires - * p's color, and sl keeps its color) - * - * (p) (s) - * / \ / \ - * N S --> P Sr - * / \ / \ - * (sl) sr N (sl) - */ - parent->rb_right = tmp2 = sibling->rb_left; - sibling->rb_left = parent; - rb_set_parent_color(tmp1, sibling, RB_BLACK); - if (tmp2) - rb_set_parent(tmp2, parent); - __rb_rotate_set_parents(parent, sibling, root, - RB_BLACK); - break; - } else { - sibling = parent->rb_left; - if (rb_is_red(sibling)) { - /* Case 1 - right rotate at parent */ - parent->rb_left = tmp1 = sibling->rb_right; - sibling->rb_right = parent; - rb_set_parent_color(tmp1, parent, RB_BLACK); - __rb_rotate_set_parents(parent, sibling, root, - RB_RED); - sibling = tmp1; - } - tmp1 = sibling->rb_left; - if (!tmp1 || rb_is_black(tmp1)) { - tmp2 = sibling->rb_right; - if (!tmp2 || rb_is_black(tmp2)) { - /* Case 2 - sibling color flip */ - rb_set_parent_color(sibling, parent, - RB_RED); - if (rb_is_red(parent)) - rb_set_black(parent); - else { - node = parent; - parent = rb_parent(node); - if (parent) - continue; - } - break; - } - /* Case 3 - right rotate at sibling */ - sibling->rb_right = tmp1 = tmp2->rb_left; - tmp2->rb_left = sibling; - parent->rb_left = tmp2; - if (tmp1) - rb_set_parent_color(tmp1, sibling, - RB_BLACK); - tmp1 = sibling; - sibling = tmp2; - } - /* Case 4 - left rotate at parent + color flips */ - parent->rb_left = tmp2 = sibling->rb_right; - sibling->rb_right = parent; - rb_set_parent_color(tmp1, sibling, RB_BLACK); - if (tmp2) - rb_set_parent(tmp2, parent); - __rb_rotate_set_parents(parent, sibling, root, - RB_BLACK); - break; - } - } + struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; + + while ( true ) + { + /* + * Loop invariants: + * - node is black (or NULL on first iteration) + * - node is not the root (parent is not NULL) + * - All leaf paths going through parent and node have a + * black node count that is 1 lower than other leaf paths. + */ + sibling = parent->rb_right; + if ( node != sibling ) + { /* node == parent->rb_left */ + if ( rb_is_red(sibling) ) + { + /* + * Case 1 - left rotate at parent + * + * P S + * / \ / \ + * N s --> p Sr + * / \ / \ + * Sl Sr N Sl + */ + parent->rb_right = tmp1 = sibling->rb_left; + sibling->rb_left = parent; + rb_set_parent_color(tmp1, parent, RB_BLACK); + __rb_rotate_set_parents(parent, sibling, root, RB_RED); + sibling = tmp1; + } + tmp1 = sibling->rb_right; + if ( !tmp1 || rb_is_black(tmp1) ) + { + tmp2 = sibling->rb_left; + if ( !tmp2 || rb_is_black(tmp2) ) + { + /* + * Case 2 - sibling color flip + * (p could be either color here) + * + * (p) (p) + * / \ / \ + * N S --> N s + * / \ / \ + * Sl Sr Sl Sr + * + * This leaves us violating 5) which + * can be fixed by flipping p to black + * if it was red, or by recursing at p. + * p is red when coming from Case 1. + */ + rb_set_parent_color(sibling, parent, RB_RED); + if ( rb_is_red(parent) ) + rb_set_black(parent); + else + { + node = parent; + parent = rb_parent(node); + if ( parent ) + continue; + } + break; + } + /* + * Case 3 - right rotate at sibling + * (p could be either color here) + * + * (p) (p) + * / \ / \ + * N S --> N Sl + * / \ \ + * sl Sr s + * \ + * Sr + */ + sibling->rb_left = tmp1 = tmp2->rb_right; + tmp2->rb_right = sibling; + parent->rb_right = tmp2; + if ( tmp1 ) + rb_set_parent_color(tmp1, sibling, RB_BLACK); + tmp1 = sibling; + sibling = tmp2; + } + /* + * Case 4 - left rotate at parent + color flips + * (p and sl could be either color here. + * After rotation, p becomes black, s acquires + * p's color, and sl keeps its color) + * + * (p) (s) + * / \ / \ + * N S --> P Sr + * / \ / \ + * (sl) sr N (sl) + */ + parent->rb_right = tmp2 = sibling->rb_left; + sibling->rb_left = parent; + rb_set_parent_color(tmp1, sibling, RB_BLACK); + if ( tmp2 ) + rb_set_parent(tmp2, parent); + __rb_rotate_set_parents(parent, sibling, root, RB_BLACK); + break; + } + else + { + sibling = parent->rb_left; + if ( rb_is_red(sibling) ) + { + /* Case 1 - right rotate at parent */ + parent->rb_left = tmp1 = sibling->rb_right; + sibling->rb_right = parent; + rb_set_parent_color(tmp1, parent, RB_BLACK); + __rb_rotate_set_parents(parent, sibling, root, RB_RED); + sibling = tmp1; + } + tmp1 = sibling->rb_left; + if ( !tmp1 || rb_is_black(tmp1) ) + { + tmp2 = sibling->rb_right; + if ( !tmp2 || rb_is_black(tmp2) ) + { + /* Case 2 - sibling color flip */ + rb_set_parent_color(sibling, parent, RB_RED); + if ( rb_is_red(parent) ) + rb_set_black(parent); + else + { + node = parent; + parent = rb_parent(node); + if ( parent ) + continue; + } + break; + } + /* Case 3 - right rotate at sibling */ + sibling->rb_right = tmp1 = tmp2->rb_left; + tmp2->rb_left = sibling; + parent->rb_left = tmp2; + if ( tmp1 ) + rb_set_parent_color(tmp1, sibling, RB_BLACK); + tmp1 = sibling; + sibling = tmp2; + } + /* Case 4 - left rotate at parent + color flips */ + parent->rb_left = tmp2 = sibling->rb_right; + sibling->rb_right = parent; + rb_set_parent_color(tmp1, sibling, RB_BLACK); + if ( tmp2 ) + rb_set_parent(tmp2, parent); + __rb_rotate_set_parents(parent, sibling, root, RB_BLACK); + break; + } + } } void rb_erase(struct rb_node *node, struct rb_root *root) { - struct rb_node *child = node->rb_right, *tmp = node->rb_left; - struct rb_node *parent, *rebalance; - unsigned long pc; - - if (!tmp) { - /* - * Case 1: node to erase has no more than 1 child (easy!) - * - * Note that if there is one child it must be red due to 5) - * and node must be black due to 4). We adjust colors locally - * so as to bypass __rb_erase_color() later on. - */ - pc = node->__rb_parent_color; - parent = __rb_parent(pc); - __rb_change_child(node, child, parent, root); - if (child) { - child->__rb_parent_color = pc; - rebalance = NULL; - } else - rebalance = __rb_is_black(pc) ? parent : NULL; - } else if (!child) { - /* Still case 1, but this time the child is node->rb_left */ - tmp->__rb_parent_color = pc = node->__rb_parent_color; - parent = __rb_parent(pc); - __rb_change_child(node, tmp, parent, root); - rebalance = NULL; - } else { - struct rb_node *successor = child, *child2; - tmp = child->rb_left; - if (!tmp) { - /* - * Case 2: node's successor is its right child - * - * (n) (s) - * / \ / \ - * (x) (s) -> (x) (c) - * \ - * (c) - */ - parent = child; - child2 = child->rb_right; - } else { - /* - * Case 3: node's successor is leftmost under - * node's right child subtree - * - * (n) (s) - * / \ / \ - * (x) (y) -> (x) (y) - * / / - * (p) (p) - * / / - * (s) (c) - * \ - * (c) - */ - do { - parent = successor; - successor = tmp; - tmp = tmp->rb_left; - } while (tmp); - parent->rb_left = child2 = successor->rb_right; - successor->rb_right = child; - rb_set_parent(child, successor); - } - - successor->rb_left = tmp = node->rb_left; - rb_set_parent(tmp, successor); - - pc = node->__rb_parent_color; - tmp = __rb_parent(pc); - __rb_change_child(node, successor, tmp, root); - if (child2) { - successor->__rb_parent_color = pc; - rb_set_parent_color(child2, parent, RB_BLACK); - rebalance = NULL; - } else { - unsigned long pc2 = successor->__rb_parent_color; - successor->__rb_parent_color = pc; - rebalance = __rb_is_black(pc2) ? parent : NULL; - } - } - - if (rebalance) - __rb_erase_color(rebalance, root); + struct rb_node *child = node->rb_right, *tmp = node->rb_left; + struct rb_node *parent, *rebalance; + unsigned long pc; + + if ( !tmp ) + { + /* + * Case 1: node to erase has no more than 1 child (easy!) + * + * Note that if there is one child it must be red due to 5) + * and node must be black due to 4). We adjust colors locally + * so as to bypass __rb_erase_color() later on. + */ + pc = node->__rb_parent_color; + parent = __rb_parent(pc); + __rb_change_child(node, child, parent, root); + if ( child ) + { + child->__rb_parent_color = pc; + rebalance = NULL; + } + else + rebalance = __rb_is_black(pc) ? parent : NULL; + } + else if ( !child ) + { + /* Still case 1, but this time the child is node->rb_left */ + tmp->__rb_parent_color = pc = node->__rb_parent_color; + parent = __rb_parent(pc); + __rb_change_child(node, tmp, parent, root); + rebalance = NULL; + } + else + { + struct rb_node *successor = child, *child2; + tmp = child->rb_left; + if ( !tmp ) + { + /* + * Case 2: node's successor is its right child + * + * (n) (s) + * / \ / \ + * (x) (s) -> (x) (c) + * \ + * (c) + */ + parent = child; + child2 = child->rb_right; + } + else + { + /* + * Case 3: node's successor is leftmost under + * node's right child subtree + * + * (n) (s) + * / \ / \ + * (x) (y) -> (x) (y) + * / / + * (p) (p) + * / / + * (s) (c) + * \ + * (c) + */ + do { + parent = successor; + successor = tmp; + tmp = tmp->rb_left; + } while ( tmp ); + parent->rb_left = child2 = successor->rb_right; + successor->rb_right = child; + rb_set_parent(child, successor); + } + + successor->rb_left = tmp = node->rb_left; + rb_set_parent(tmp, successor); + + pc = node->__rb_parent_color; + tmp = __rb_parent(pc); + __rb_change_child(node, successor, tmp, root); + if ( child2 ) + { + successor->__rb_parent_color = pc; + rb_set_parent_color(child2, parent, RB_BLACK); + rebalance = NULL; + } + else + { + unsigned long pc2 = successor->__rb_parent_color; + successor->__rb_parent_color = pc; + rebalance = __rb_is_black(pc2) ? parent : NULL; + } + } + + if ( rebalance ) + __rb_erase_color(rebalance, root); } EXPORT_SYMBOL(rb_erase); @@ -474,104 +501,106 @@ EXPORT_SYMBOL(rb_erase); */ struct rb_node *rb_first(const struct rb_root *root) { - struct rb_node *n; - - n = root->rb_node; - if (!n) - return NULL; - while (n->rb_left) - n = n->rb_left; - return n; + struct rb_node *n; + + n = root->rb_node; + if ( !n ) + return NULL; + while ( n->rb_left ) + n = n->rb_left; + return n; } EXPORT_SYMBOL(rb_first); struct rb_node *rb_last(const struct rb_root *root) { - struct rb_node *n; - - n = root->rb_node; - if (!n) - return NULL; - while (n->rb_right) - n = n->rb_right; - return n; + struct rb_node *n; + + n = root->rb_node; + if ( !n ) + return NULL; + while ( n->rb_right ) + n = n->rb_right; + return n; } EXPORT_SYMBOL(rb_last); struct rb_node *rb_next(const struct rb_node *node) { - struct rb_node *parent; - - if (RB_EMPTY_NODE(node)) - return NULL; - - /* - * If we have a right-hand child, go down and then left as far - * as we can. - */ - if (node->rb_right) { - node = node->rb_right; - while (node->rb_left) - node=node->rb_left; - return (struct rb_node *)node; - } - - /* - * No right-hand children. Everything down and left is smaller than us, - * so any 'next' node must be in the general direction of our parent. - * Go up the tree; any time the ancestor is a right-hand child of its - * parent, keep going up. First time it's a left-hand child of its - * parent, said parent is our 'next' node. - */ - while ((parent = rb_parent(node)) && node == parent->rb_right) - node = parent; - - return parent; + struct rb_node *parent; + + if ( RB_EMPTY_NODE(node) ) + return NULL; + + /* + * If we have a right-hand child, go down and then left as far + * as we can. + */ + if ( node->rb_right ) + { + node = node->rb_right; + while ( node->rb_left ) + node = node->rb_left; + return (struct rb_node *)node; + } + + /* + * No right-hand children. Everything down and left is smaller than us, + * so any 'next' node must be in the general direction of our parent. + * Go up the tree; any time the ancestor is a right-hand child of its + * parent, keep going up. First time it's a left-hand child of its + * parent, said parent is our 'next' node. + */ + while ( (parent = rb_parent(node)) && node == parent->rb_right ) + node = parent; + + return parent; } EXPORT_SYMBOL(rb_next); struct rb_node *rb_prev(const struct rb_node *node) { - struct rb_node *parent; - - if (RB_EMPTY_NODE(node)) - return NULL; - - /* - * If we have a left-hand child, go down and then right as far - * as we can. - */ - if (node->rb_left) { - node = node->rb_left; - while (node->rb_right) - node=node->rb_right; - return (struct rb_node *)node; - } - - /* - * No left-hand children. Go up till we find an ancestor which - * is a right-hand child of its parent - */ - while ((parent = rb_parent(node)) && node == parent->rb_left) - node = parent; - - return parent; + struct rb_node *parent; + + if ( RB_EMPTY_NODE(node) ) + return NULL; + + /* + * If we have a left-hand child, go down and then right as far + * as we can. + */ + if ( node->rb_left ) + { + node = node->rb_left; + while ( node->rb_right ) + node = node->rb_right; + return (struct rb_node *)node; + } + + /* + * No left-hand children. Go up till we find an ancestor which + * is a right-hand child of its parent + */ + while ( (parent = rb_parent(node)) && node == parent->rb_left ) + node = parent; + + return parent; } EXPORT_SYMBOL(rb_prev); void rb_replace_node(struct rb_node *victim, struct rb_node *new, - struct rb_root *root) + struct rb_root *root) { - struct rb_node *parent = rb_parent(victim); + struct rb_node *parent = rb_parent(victim); - /* Set the surrounding nodes to point to the replacement */ - __rb_change_child(victim, new, parent, root); - if (victim->rb_left) - rb_set_parent(victim->rb_left, new); - if (victim->rb_right) - rb_set_parent(victim->rb_right, new); + /* Set the surrounding nodes to point to the replacement */ + __rb_change_child(victim, new, parent, root); + if ( victim->rb_left ) + rb_set_parent(victim->rb_left, new); + if ( victim->rb_right ) + rb_set_parent(victim->rb_right, new); - /* Copy the pointers/colour from the victim to the replacement */ - *new = *victim; + /* Copy the pointers/colour from the victim to the replacement */ + *new = *victim; } EXPORT_SYMBOL(rb_replace_node); diff --git a/xen/common/rcupdate.c b/xen/common/rcupdate.c index 3517790913..401063728d 100644 --- a/xen/common/rcupdate.c +++ b/xen/common/rcupdate.c @@ -18,7 +18,7 @@ * * Authors: Dipankar Sarma * Manfred Spraul - * + * * Modifications for Xen: Jose Renato Santos * Copyright (C) Hewlett-Packard, 2006 * @@ -46,14 +46,15 @@ #include /* Global control variables for rcupdate callback mechanism. */ -static struct rcu_ctrlblk { - long cur; /* Current batch number. */ - long completed; /* Number of the last completed batch */ - int next_pending; /* Is the next batch already waiting? */ - - spinlock_t lock __cacheline_aligned; - cpumask_t cpumask; /* CPUs that need to switch in order ... */ - cpumask_t idle_cpumask; /* ... unless they are already idle */ +static struct rcu_ctrlblk +{ + long cur; /* Current batch number. */ + long completed; /* Number of the last completed batch */ + int next_pending; /* Is the next batch already waiting? */ + + spinlock_t lock __cacheline_aligned; + cpumask_t cpumask; /* CPUs that need to switch in order ... */ + cpumask_t idle_cpumask; /* ... unless they are already idle */ /* for current batch to proceed. */ } __cacheline_aligned rcu_ctrlblk = { .cur = -300, @@ -66,24 +67,25 @@ static struct rcu_ctrlblk { * nxtlist - new callbacks are added here * curlist - current batch for which quiescent cycle started if any */ -struct rcu_data { +struct rcu_data +{ /* 1) quiescent state handling : */ - long quiescbatch; /* Batch # for grace period */ - int qs_pending; /* core waits for quiesc state */ + long quiescbatch; /* Batch # for grace period */ + int qs_pending; /* core waits for quiesc state */ /* 2) batch handling */ - long batch; /* Batch # for current RCU batch */ + long batch; /* Batch # for current RCU batch */ struct rcu_head *nxtlist; struct rcu_head **nxttail; - long qlen; /* # of queued callbacks */ + long qlen; /* # of queued callbacks */ struct rcu_head *curlist; struct rcu_head **curtail; struct rcu_head *donelist; struct rcu_head **donetail; - long blimit; /* Upper limit on a processed batch */ + long blimit; /* Upper limit on a processed batch */ int cpu; struct rcu_head barrier; - long last_rs_qlen; /* qlen during the last resched */ + long last_rs_qlen; /* qlen during the last resched */ /* 3) idle CPUs handling */ struct timer idle_timer; @@ -116,9 +118,9 @@ struct rcu_data { * CPU that is going idle. The user can change this, via a boot time * parameter, but only up to 100ms. */ -#define IDLE_TIMER_PERIOD_MAX MILLISECS(100) +#define IDLE_TIMER_PERIOD_MAX MILLISECS(100) #define IDLE_TIMER_PERIOD_DEFAULT MILLISECS(10) -#define IDLE_TIMER_PERIOD_MIN MICROSECS(100) +#define IDLE_TIMER_PERIOD_MIN MICROSECS(100) static s_time_t __read_mostly idle_timer_period; @@ -130,8 +132,8 @@ static s_time_t __read_mostly idle_timer_period; * - if the timer actually fires and it finds the grace period over, we * subtract IDLE_TIMER_PERIOD_DECR from the timer's period. */ -#define IDLE_TIMER_PERIOD_INCR MILLISECS(10) -#define IDLE_TIMER_PERIOD_DECR MICROSECS(100) +#define IDLE_TIMER_PERIOD_INCR MILLISECS(10) +#define IDLE_TIMER_PERIOD_DECR MICROSECS(100) static DEFINE_PER_CPU(struct rcu_data, rcu_data); @@ -140,21 +142,22 @@ static int qhimark = 10000; static int qlowmark = 100; static int rsinterval = 1000; -struct rcu_barrier_data { +struct rcu_barrier_data +{ struct rcu_head head; atomic_t *cpu_count; }; static void rcu_barrier_callback(struct rcu_head *head) { - struct rcu_barrier_data *data = container_of( - head, struct rcu_barrier_data, head); + struct rcu_barrier_data *data = + container_of(head, struct rcu_barrier_data, head); atomic_inc(data->cpu_count); } static int rcu_barrier_action(void *_cpu_count) { - struct rcu_barrier_data data = { .cpu_count = _cpu_count }; + struct rcu_barrier_data data = {.cpu_count = _cpu_count}; ASSERT(!local_irq_is_enabled()); local_irq_enable(); @@ -189,12 +192,12 @@ static inline int rcu_batch_before(long a, long b) return (a - b) < 0; } -static void force_quiescent_state(struct rcu_data *rdp, - struct rcu_ctrlblk *rcp) +static void force_quiescent_state(struct rcu_data *rdp, struct rcu_ctrlblk *rcp) { cpumask_t cpumask; raise_softirq(SCHEDULE_SOFTIRQ); - if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) { + if ( unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval) ) + { rdp->last_rs_qlen = rdp->qlen; /* * Don't send IPI to itself. With irqs disabled, @@ -216,8 +219,7 @@ static void force_quiescent_state(struct rcu_data *rdp, * sections are delimited by rcu_read_lock() and rcu_read_unlock(), * and may be nested. */ -void call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *rcu)) +void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) { unsigned long flags; struct rcu_data *rdp; @@ -228,7 +230,8 @@ void call_rcu(struct rcu_head *head, rdp = &__get_cpu_var(rcu_data); *rdp->nxttail = head; rdp->nxttail = &head->next; - if (unlikely(++rdp->qlen > qhimark)) { + if ( unlikely(++rdp->qlen > qhimark) ) + { rdp->blimit = INT_MAX; force_quiescent_state(rdp, &rcu_ctrlblk); } @@ -245,17 +248,18 @@ static void rcu_do_batch(struct rcu_data *rdp) int count = 0; list = rdp->donelist; - while (list) { + while ( list ) + { next = rdp->donelist = list->next; list->func(list); list = next; rdp->qlen--; - if (++count >= rdp->blimit) + if ( ++count >= rdp->blimit ) break; } - if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) + if ( rdp->blimit == INT_MAX && rdp->qlen <= qlowmark ) rdp->blimit = blimit; - if (!rdp->donelist) + if ( !rdp->donelist ) rdp->donetail = &rdp->donelist; else raise_softirq(RCU_SOFTIRQ); @@ -286,8 +290,8 @@ static void rcu_do_batch(struct rcu_data *rdp) */ static void rcu_start_batch(struct rcu_ctrlblk *rcp) { - if (rcp->next_pending && - rcp->completed == rcp->cur) { + if ( rcp->next_pending && rcp->completed == rcp->cur ) + { rcp->next_pending = 0; /* * next_pending == 0 must be visible in @@ -296,14 +300,14 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp) smp_wmb(); rcp->cur++; - /* - * Make sure the increment of rcp->cur is visible so, even if a - * CPU that is about to go idle, is captured inside rcp->cpumask, - * rcu_pending() will return false, which then means cpu_quiet() - * will be invoked, before the CPU would actually enter idle. - * - * This barrier is paired with the one in rcu_idle_enter(). - */ + /* + * Make sure the increment of rcp->cur is visible so, even if a + * CPU that is about to go idle, is captured inside rcp->cpumask, + * rcu_pending() will return false, which then means cpu_quiet() + * will be invoked, before the CPU would actually enter idle. + * + * This barrier is paired with the one in rcu_idle_enter(). + */ smp_mb(); cpumask_andnot(&rcp->cpumask, &cpu_online_map, &rcp->idle_cpumask); } @@ -317,7 +321,8 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp) static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) { cpumask_clear_cpu(cpu, &rcp->cpumask); - if (cpumask_empty(&rcp->cpumask)) { + if ( cpumask_empty(&rcp->cpumask) ) + { /* batch completed ! */ rcp->completed = rcp->cur; rcu_start_batch(rcp); @@ -332,7 +337,8 @@ static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) { - if (rdp->quiescbatch != rcp->cur) { + if ( rdp->quiescbatch != rcp->cur ) + { /* start new grace period: */ rdp->qs_pending = 1; rdp->quiescbatch = rcp->cur; @@ -343,7 +349,7 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, * qs_pending is checked instead of the actual bitmap to avoid * cacheline trashing. */ - if (!rdp->qs_pending) + if ( !rdp->qs_pending ) return; rdp->qs_pending = 0; @@ -353,20 +359,20 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync * during cpu startup. Ignore the quiescent state. */ - if (likely(rdp->quiescbatch == rcp->cur)) + if ( likely(rdp->quiescbatch == rcp->cur) ) cpu_quiet(rdp->cpu, rcp); spin_unlock(&rcp->lock); } - /* - * This does the RCU processing work from softirq context. + * This does the RCU processing work from softirq context. */ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) { - if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { + if ( rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch) ) + { *rdp->donetail = rdp->curlist; rdp->donetail = rdp->curtail; rdp->curlist = NULL; @@ -374,7 +380,8 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, } local_irq_disable(); - if (rdp->nxtlist && !rdp->curlist) { + if ( rdp->nxtlist && !rdp->curlist ) + { rdp->curlist = rdp->nxtlist; rdp->curtail = rdp->nxttail; rdp->nxtlist = NULL; @@ -392,18 +399,21 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, */ smp_rmb(); - if (!rcp->next_pending) { + if ( !rcp->next_pending ) + { /* and start it/schedule start if it's a new batch */ spin_lock(&rcp->lock); rcp->next_pending = 1; rcu_start_batch(rcp); spin_unlock(&rcp->lock); } - } else { + } + else + { local_irq_enable(); } rcu_check_quiescent_state(rcp, rdp); - if (rdp->donelist) + if ( rdp->donelist ) rcu_do_batch(rdp); } @@ -417,19 +427,19 @@ static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) /* This cpu has pending rcu entries and the grace period * for them has completed. */ - if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) + if ( rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch) ) return 1; /* This cpu has no pending entries, but there are new entries */ - if (!rdp->curlist && rdp->nxtlist) + if ( !rdp->curlist && rdp->nxtlist ) return 1; /* This cpu has finished callbacks to invoke */ - if (rdp->donelist) + if ( rdp->donelist ) return 1; /* The rcu core waits for a quiescent state from the cpu */ - if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) + if ( rdp->quiescbatch != rcp->cur || rdp->qs_pending ) return 1; /* nothing to do */ @@ -468,7 +478,7 @@ void rcu_idle_timer_start() * the timer armed on CPUs that are in the process of quiescing while * going idle, unless they really are the ones with a queued callback. */ - if (likely(!rdp->curlist)) + if ( likely(!rdp->curlist) ) return; set_timer(&rdp->idle_timer, NOW() + idle_timer_period); @@ -479,7 +489,7 @@ void rcu_idle_timer_stop() { struct rcu_data *rdp = &this_cpu(rcu_data); - if (likely(!rdp->idle_timer_active)) + if ( likely(!rdp->idle_timer_active) ) return; rdp->idle_timer_active = false; @@ -503,7 +513,7 @@ void rcu_idle_timer_stop() stop_timer(&rdp->idle_timer); } -static void rcu_idle_timer_handler(void* data) +static void rcu_idle_timer_handler(void *data) { perfc_incr(rcu_idle_timer); @@ -525,13 +535,13 @@ static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, { local_irq_disable(); *this_rdp->nxttail = list; - if (list) + if ( list ) this_rdp->nxttail = tail; local_irq_enable(); } -static void rcu_offline_cpu(struct rcu_data *this_rdp, - struct rcu_ctrlblk *rcp, struct rcu_data *rdp) +static void rcu_offline_cpu(struct rcu_data *this_rdp, struct rcu_ctrlblk *rcp, + struct rcu_data *rdp) { kill_timer(&rdp->idle_timer); @@ -539,7 +549,7 @@ static void rcu_offline_cpu(struct rcu_data *this_rdp, * indefinitely waiting for it, so flush it here. */ spin_lock(&rcp->lock); - if (rcp->cur != rcp->completed) + if ( rcp->cur != rcp->completed ) cpu_quiet(rdp->cpu, rcp); spin_unlock(&rcp->lock); @@ -566,13 +576,13 @@ static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, init_timer(&rdp->idle_timer, rcu_idle_timer_handler, rdp, cpu); } -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - switch ( action ) + switch (action) { case CPU_UP_PREPARE: rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); @@ -588,15 +598,13 @@ static int cpu_callback( return NOTIFY_DONE; } -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback -}; +static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback}; void __init rcu_init(void) { void *cpu = (void *)(long)smp_processor_id(); static unsigned int __initdata idle_timer_period_ms = - IDLE_TIMER_PERIOD_DEFAULT / MILLISECS(1); + IDLE_TIMER_PERIOD_DEFAULT / MILLISECS(1); integer_param("rcu-idle-timer-period-ms", idle_timer_period_ms); /* We don't allow 0, or anything higher than IDLE_TIMER_PERIOD_MAX */ @@ -605,7 +613,7 @@ void __init rcu_init(void) { idle_timer_period_ms = IDLE_TIMER_PERIOD_DEFAULT / MILLISECS(1); printk("WARNING: rcu-idle-timer-period-ms outside of " - "(0,%"PRI_stime"]. Resetting it to %u.\n", + "(0,%" PRI_stime "]. Resetting it to %u.\n", IDLE_TIMER_PERIOD_MAX / MILLISECS(1), idle_timer_period_ms); } idle_timer_period = MILLISECS(idle_timer_period_ms); diff --git a/xen/common/rwlock.c b/xen/common/rwlock.c index d568bbf6de..05b7cce34e 100644 --- a/xen/common/rwlock.c +++ b/xen/common/rwlock.c @@ -76,37 +76,35 @@ void queue_write_lock_slowpath(rwlock_t *lock) * Set the waiting flag to notify readers that a writer is pending, * or wait for a previous writer to go away. */ - for ( ; ; ) + for ( ;; ) { cnts = atomic_read(&lock->cnts); if ( !(cnts & _QW_WMASK) && - (atomic_cmpxchg(&lock->cnts, cnts, - cnts | _QW_WAITING) == cnts) ) + (atomic_cmpxchg(&lock->cnts, cnts, cnts | _QW_WAITING) == cnts) ) break; cpu_relax(); } /* When no more readers, set the locked flag. */ - for ( ; ; ) + for ( ;; ) { cnts = atomic_read(&lock->cnts); if ( (cnts == _QW_WAITING) && - (atomic_cmpxchg(&lock->cnts, _QW_WAITING, - _QW_LOCKED) == _QW_WAITING) ) + (atomic_cmpxchg(&lock->cnts, _QW_WAITING, _QW_LOCKED) == + _QW_WAITING) ) break; cpu_relax(); } - unlock: +unlock: spin_unlock(&lock->lock); } - static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers); void _percpu_write_lock(percpu_rwlock_t **per_cpudata, - percpu_rwlock_t *percpu_rwlock) + percpu_rwlock_t *percpu_rwlock) { unsigned int cpu; cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers); @@ -129,9 +127,9 @@ void _percpu_write_lock(percpu_rwlock_t **per_cpudata, cpumask_copy(rwlock_readers, &cpu_online_map); /* Check if there are any percpu readers in progress on this rwlock. */ - for ( ; ; ) + for ( ;; ) { - for_each_cpu(cpu, rwlock_readers) + for_each_cpu (cpu, rwlock_readers) { /* * Remove any percpu readers not contending on this rwlock diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c index a4c6d00b81..7ba27e5373 100644 --- a/xen/common/sched_arinc653.c +++ b/xen/common/sched_arinc653.c @@ -47,7 +47,7 @@ /** * Retrieve the idle VCPU for a given physical CPU */ -#define IDLETASK(cpu) (idle_vcpu[cpu]) +#define IDLETASK(cpu) (idle_vcpu[cpu]) /** * Return a pointer to the ARINC 653-specific scheduler data information @@ -72,12 +72,12 @@ typedef struct arinc653_vcpu_s { /* vc points to Xen's struct vcpu so we can get to it from an * arinc653_vcpu_t pointer. */ - struct vcpu * vc; + struct vcpu *vc; /* awake holds whether the VCPU has been woken with vcpu_wake() */ - bool_t awake; + bool_t awake; /* list holds the linked list information for the list this VCPU * is stored in */ - struct list_head list; + struct list_head list; } arinc653_vcpu_t; /** @@ -91,12 +91,12 @@ typedef struct sched_entry_s xen_domain_handle_t dom_handle; /* vcpu_id holds the VCPU number for the VCPU that this schedule * entry refers to. */ - int vcpu_id; + int vcpu_id; /* runtime holds the number of nanoseconds that the VCPU for this * schedule entry should be allowed to run per major frame. */ - s_time_t runtime; + s_time_t runtime; /* vc holds a pointer to the Xen VCPU structure */ - struct vcpu * vc; + struct vcpu *vc; } sched_entry_t; /** @@ -179,17 +179,15 @@ static int dom_handle_cmp(const xen_domain_handle_t h1, *
  • NULL otherwise * */ -static struct vcpu *find_vcpu( - const struct scheduler *ops, - xen_domain_handle_t handle, - int vcpu_id) +static struct vcpu *find_vcpu(const struct scheduler *ops, + xen_domain_handle_t handle, int vcpu_id) { arinc653_vcpu_t *avcpu; /* loop through the vcpu_list looking for the specified VCPU */ - list_for_each_entry ( avcpu, &SCHED_PRIV(ops)->vcpu_list, list ) - if ( (dom_handle_cmp(avcpu->vc->domain->handle, handle) == 0) - && (vcpu_id == avcpu->vc->vcpu_id) ) + list_for_each_entry (avcpu, &SCHED_PRIV(ops)->vcpu_list, list) + if ( (dom_handle_cmp(avcpu->vc->domain->handle, handle) == 0) && + (vcpu_id == avcpu->vc->vcpu_id) ) return avcpu->vc; return NULL; @@ -208,8 +206,7 @@ static void update_schedule_vcpus(const struct scheduler *ops) for ( i = 0; i < n_entries; i++ ) SCHED_PRIV(ops)->schedule[i].vc = - find_vcpu(ops, - SCHED_PRIV(ops)->schedule[i].dom_handle, + find_vcpu(ops, SCHED_PRIV(ops)->schedule[i].dom_handle, SCHED_PRIV(ops)->schedule[i].vcpu_id); } @@ -224,10 +221,8 @@ static void update_schedule_vcpus(const struct scheduler *ops) *
  • !0 = error * */ -static int -arinc653_sched_set( - const struct scheduler *ops, - struct xen_sysctl_arinc653_schedule *schedule) +static int arinc653_sched_set(const struct scheduler *ops, + struct xen_sysctl_arinc653_schedule *schedule) { a653sched_priv_t *sched_priv = SCHED_PRIV(ops); s_time_t total_runtime = 0; @@ -238,9 +233,8 @@ arinc653_sched_set( spin_lock_irqsave(&sched_priv->lock, flags); /* Check for valid major frame and number of schedule entries. */ - if ( (schedule->major_frame <= 0) - || (schedule->num_sched_entries < 1) - || (schedule->num_sched_entries > ARINC653_MAX_DOMAINS_PER_SCHEDULE) ) + if ( (schedule->major_frame <= 0) || (schedule->num_sched_entries < 1) || + (schedule->num_sched_entries > ARINC653_MAX_DOMAINS_PER_SCHEDULE) ) goto fail; for ( i = 0; i < schedule->num_sched_entries; i++ ) @@ -268,10 +262,8 @@ arinc653_sched_set( memcpy(sched_priv->schedule[i].dom_handle, schedule->sched_entries[i].dom_handle, sizeof(sched_priv->schedule[i].dom_handle)); - sched_priv->schedule[i].vcpu_id = - schedule->sched_entries[i].vcpu_id; - sched_priv->schedule[i].runtime = - schedule->sched_entries[i].runtime; + sched_priv->schedule[i].vcpu_id = schedule->sched_entries[i].vcpu_id; + sched_priv->schedule[i].runtime = schedule->sched_entries[i].runtime; } update_schedule_vcpus(ops); @@ -286,7 +278,7 @@ arinc653_sched_set( rc = 0; - fail: +fail: spin_unlock_irqrestore(&sched_priv->lock, flags); return rc; } @@ -301,10 +293,8 @@ arinc653_sched_set( *
  • !0 = error * */ -static int -arinc653_sched_get( - const struct scheduler *ops, - struct xen_sysctl_arinc653_schedule *schedule) +static int arinc653_sched_get(const struct scheduler *ops, + struct xen_sysctl_arinc653_schedule *schedule) { a653sched_priv_t *sched_priv = SCHED_PRIV(ops); unsigned int i; @@ -342,8 +332,7 @@ arinc653_sched_get( *
  • !0 = error * */ -static int -a653sched_init(struct scheduler *ops) +static int a653sched_init(struct scheduler *ops) { a653sched_priv_t *prv; @@ -365,8 +354,7 @@ a653sched_init(struct scheduler *ops) * * @param ops Pointer to this instance of the scheduler structure */ -static void -a653sched_deinit(struct scheduler *ops) +static void a653sched_deinit(struct scheduler *ops) { xfree(SCHED_PRIV(ops)); ops->sched_data = NULL; @@ -379,8 +367,8 @@ a653sched_deinit(struct scheduler *ops) * * @return Pointer to the allocated data */ -static void * -a653sched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd) +static void *a653sched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, + void *dd) { a653sched_priv_t *sched_priv = SCHED_PRIV(ops); arinc653_vcpu_t *svc; @@ -397,7 +385,7 @@ a653sched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd) spin_lock_irqsave(&sched_priv->lock, flags); - /* + /* * Add every one of dom0's vcpus to the schedule, as long as there are * slots available. */ @@ -439,12 +427,11 @@ a653sched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd) * * @param ops Pointer to this instance of the scheduler structure */ -static void -a653sched_free_vdata(const struct scheduler *ops, void *priv) +static void a653sched_free_vdata(const struct scheduler *ops, void *priv) { arinc653_vcpu_t *av = priv; - if (av == NULL) + if ( av == NULL ) return; if ( !is_idle_vcpu(av->vc) ) @@ -460,8 +447,7 @@ a653sched_free_vdata(const struct scheduler *ops, void *priv) * @param ops Pointer to this instance of the scheduler structure * @param vc Pointer to the VCPU structure for the current domain */ -static void -a653sched_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) +static void a653sched_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) { if ( AVCPU(vc) != NULL ) AVCPU(vc)->awake = 0; @@ -480,8 +466,7 @@ a653sched_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) * @param ops Pointer to this instance of the scheduler structure * @param vc Pointer to the VCPU structure for the current domain */ -static void -a653sched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) +static void a653sched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) { if ( AVCPU(vc) != NULL ) AVCPU(vc)->awake = 1; @@ -500,14 +485,12 @@ a653sched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) * Amount of time to execute the returned VCPU * Flag for whether the VCPU was migrated */ -static struct task_slice -a653sched_do_schedule( - const struct scheduler *ops, - s_time_t now, - bool_t tasklet_work_scheduled) +static struct task_slice a653sched_do_schedule(const struct scheduler *ops, + s_time_t now, + bool_t tasklet_work_scheduled) { - struct task_slice ret; /* hold the chosen domain */ - struct vcpu * new_task = NULL; + struct task_slice ret; /* hold the chosen domain */ + struct vcpu *new_task = NULL; static unsigned int sched_index = 0; static s_time_t next_switch_time; a653sched_priv_t *sched_priv = SCHED_PRIV(ops); @@ -529,8 +512,8 @@ a653sched_do_schedule( } else { - while ( (now >= next_switch_time) - && (sched_index < sched_priv->num_schedule_entries) ) + while ( (now >= next_switch_time) && + (sched_index < sched_priv->num_schedule_entries) ) { /* time to switch to the next domain in this major frame */ sched_index++; @@ -552,14 +535,12 @@ a653sched_do_schedule( * structure. */ new_task = (sched_index < sched_priv->num_schedule_entries) - ? sched_priv->schedule[sched_index].vc - : IDLETASK(cpu); + ? sched_priv->schedule[sched_index].vc + : IDLETASK(cpu); /* Check to see if the new task can be run (awake & runnable). */ - if ( !((new_task != NULL) - && (AVCPU(new_task) != NULL) - && AVCPU(new_task)->awake - && vcpu_runnable(new_task)) ) + if ( !((new_task != NULL) && (AVCPU(new_task) != NULL) && + AVCPU(new_task)->awake && vcpu_runnable(new_task)) ) new_task = IDLETASK(cpu); BUG_ON(new_task == NULL); @@ -576,8 +557,7 @@ a653sched_do_schedule( new_task = IDLETASK(cpu); /* Running this task would result in a migration */ - if ( !is_idle_vcpu(new_task) - && (new_task->processor != cpu) ) + if ( !is_idle_vcpu(new_task) && (new_task->processor != cpu) ) new_task = IDLETASK(cpu); /* @@ -601,13 +581,12 @@ a653sched_do_schedule( * * @return Number of selected physical CPU */ -static int -a653sched_pick_cpu(const struct scheduler *ops, struct vcpu *vc) +static int a653sched_pick_cpu(const struct scheduler *ops, struct vcpu *vc) { cpumask_t *online; unsigned int cpu; - /* + /* * If present, prefer vc's current processor, else * just find the first valid vcpu . */ @@ -615,8 +594,7 @@ a653sched_pick_cpu(const struct scheduler *ops, struct vcpu *vc) cpu = cpumask_first(online); - if ( cpumask_test_cpu(vc->processor, online) - || (cpu >= nr_cpu_ids) ) + if ( cpumask_test_cpu(vc->processor, online) || (cpu >= nr_cpu_ids) ) cpu = vc->processor; return cpu; @@ -630,9 +608,8 @@ a653sched_pick_cpu(const struct scheduler *ops, struct vcpu *vc) * @param pdata scheduler specific PCPU data (we don't have any) * @param vdata scheduler specific VCPU data of the idle vcpu */ -static void -a653_switch_sched(struct scheduler *new_ops, unsigned int cpu, - void *pdata, void *vdata) +static void a653_switch_sched(struct scheduler *new_ops, unsigned int cpu, + void *pdata, void *vdata) { struct schedule_data *sd = &per_cpu(schedule_data, cpu); arinc653_vcpu_t *svc = vdata; @@ -662,14 +639,13 @@ a653_switch_sched(struct scheduler *new_ops, unsigned int cpu, * @param ops Pointer to this instance of the scheduler structure * @param sc Pointer to the scheduler operation specified by Domain 0 */ -static int -a653sched_adjust_global(const struct scheduler *ops, - struct xen_sysctl_scheduler_op *sc) +static int a653sched_adjust_global(const struct scheduler *ops, + struct xen_sysctl_scheduler_op *sc) { struct xen_sysctl_arinc653_schedule local_sched; int rc = -EINVAL; - switch ( sc->cmd ) + switch (sc->cmd) { case XEN_SYSCTL_SCHEDOP_putinfo: if ( copy_from_guest(&local_sched, sc->u.sched_arinc653.schedule, 1) ) @@ -701,39 +677,39 @@ a653sched_adjust_global(const struct scheduler *ops, * The symbol must be visible to the rest of Xen at link time. */ static const struct scheduler sched_arinc653_def = { - .name = "ARINC 653 Scheduler", - .opt_name = "arinc653", - .sched_id = XEN_SCHEDULER_ARINC653, - .sched_data = NULL, + .name = "ARINC 653 Scheduler", + .opt_name = "arinc653", + .sched_id = XEN_SCHEDULER_ARINC653, + .sched_data = NULL, - .init = a653sched_init, - .deinit = a653sched_deinit, + .init = a653sched_init, + .deinit = a653sched_deinit, - .free_vdata = a653sched_free_vdata, - .alloc_vdata = a653sched_alloc_vdata, + .free_vdata = a653sched_free_vdata, + .alloc_vdata = a653sched_alloc_vdata, - .insert_vcpu = NULL, - .remove_vcpu = NULL, + .insert_vcpu = NULL, + .remove_vcpu = NULL, - .sleep = a653sched_vcpu_sleep, - .wake = a653sched_vcpu_wake, - .yield = NULL, - .context_saved = NULL, + .sleep = a653sched_vcpu_sleep, + .wake = a653sched_vcpu_wake, + .yield = NULL, + .context_saved = NULL, - .do_schedule = a653sched_do_schedule, + .do_schedule = a653sched_do_schedule, - .pick_cpu = a653sched_pick_cpu, + .pick_cpu = a653sched_pick_cpu, - .switch_sched = a653_switch_sched, + .switch_sched = a653_switch_sched, - .adjust = NULL, - .adjust_global = a653sched_adjust_global, + .adjust = NULL, + .adjust_global = a653sched_adjust_global, - .dump_settings = NULL, + .dump_settings = NULL, .dump_cpu_state = NULL, - .tick_suspend = NULL, - .tick_resume = NULL, + .tick_suspend = NULL, + .tick_resume = NULL, }; REGISTER_SCHEDULER(sched_arinc653_def); diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 3abe20def8..89ce6e0d84 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -24,7 +24,6 @@ #include #include - /* * Locking: * - Scheduler-lock (a.k.a. runqueue lock): @@ -45,23 +44,21 @@ /* * Basic constants */ -#define CSCHED_DEFAULT_WEIGHT 256 -#define CSCHED_TICKS_PER_TSLICE 3 +#define CSCHED_DEFAULT_WEIGHT 256 +#define CSCHED_TICKS_PER_TSLICE 3 /* Default timeslice: 30ms */ -#define CSCHED_DEFAULT_TSLICE_MS 30 -#define CSCHED_CREDITS_PER_MSEC 10 +#define CSCHED_DEFAULT_TSLICE_MS 30 +#define CSCHED_CREDITS_PER_MSEC 10 /* Never set a timer shorter than this value. */ -#define CSCHED_MIN_TIMER XEN_SYSCTL_SCHED_RATELIMIT_MIN - +#define CSCHED_MIN_TIMER XEN_SYSCTL_SCHED_RATELIMIT_MIN /* * Priorities */ -#define CSCHED_PRI_TS_BOOST 0 /* time-share waking up */ -#define CSCHED_PRI_TS_UNDER -1 /* time-share w/ credits */ -#define CSCHED_PRI_TS_OVER -2 /* time-share w/o credits */ -#define CSCHED_PRI_IDLE -64 /* idle */ - +#define CSCHED_PRI_TS_BOOST 0 /* time-share waking up */ +#define CSCHED_PRI_TS_UNDER -1 /* time-share w/ credits */ +#define CSCHED_PRI_TS_OVER -2 /* time-share w/o credits */ +#define CSCHED_PRI_IDLE -64 /* idle */ /* * Flags @@ -70,23 +67,20 @@ * inconsistent set of locks. Therefore atomic-safe bit operations must * be used for accessing it. */ -#define CSCHED_FLAG_VCPU_PARKED 0x0 /* VCPU over capped credits */ -#define CSCHED_FLAG_VCPU_YIELD 0x1 /* VCPU yielding */ -#define CSCHED_FLAG_VCPU_MIGRATING 0x2 /* VCPU may have moved to a new pcpu */ -#define CSCHED_FLAG_VCPU_PINNED 0x4 /* VCPU can run only on 1 pcpu */ - +#define CSCHED_FLAG_VCPU_PARKED 0x0 /* VCPU over capped credits */ +#define CSCHED_FLAG_VCPU_YIELD 0x1 /* VCPU yielding */ +#define CSCHED_FLAG_VCPU_MIGRATING 0x2 /* VCPU may have moved to a new pcpu */ +#define CSCHED_FLAG_VCPU_PINNED 0x4 /* VCPU can run only on 1 pcpu */ /* * Useful macros */ -#define CSCHED_PRIV(_ops) \ - ((struct csched_private *)((_ops)->sched_data)) -#define CSCHED_PCPU(_c) \ +#define CSCHED_PRIV(_ops) ((struct csched_private *)((_ops)->sched_data)) +#define CSCHED_PCPU(_c) \ ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv) -#define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *) (_vcpu)->sched_priv) -#define CSCHED_DOM(_dom) ((struct csched_dom *) (_dom)->sched_priv) -#define RUNQ(_cpu) (&(CSCHED_PCPU(_cpu)->runq)) - +#define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *)(_vcpu)->sched_priv) +#define CSCHED_DOM(_dom) ((struct csched_dom *)(_dom)->sched_priv) +#define RUNQ(_cpu) (&(CSCHED_PCPU(_cpu)->runq)) /* * CSCHED_STATS @@ -100,42 +94,46 @@ #define CSCHED_STATS -#define SCHED_VCPU_STATS_RESET(_V) \ - do \ - { \ - memset(&(_V)->stats, 0, sizeof((_V)->stats)); \ +#define SCHED_VCPU_STATS_RESET(_V) \ + do { \ + memset(&(_V)->stats, 0, sizeof((_V)->stats)); \ } while ( 0 ) -#define SCHED_VCPU_STAT_CRANK(_V, _X) (((_V)->stats._X)++) +#define SCHED_VCPU_STAT_CRANK(_V, _X) (((_V)->stats._X)++) -#define SCHED_VCPU_STAT_SET(_V, _X, _Y) (((_V)->stats._X) = (_Y)) +#define SCHED_VCPU_STAT_SET(_V, _X, _Y) (((_V)->stats._X) = (_Y)) #else /* !SCHED_STATS */ #undef CSCHED_STATS -#define SCHED_VCPU_STATS_RESET(_V) do {} while ( 0 ) -#define SCHED_VCPU_STAT_CRANK(_V, _X) do {} while ( 0 ) -#define SCHED_VCPU_STAT_SET(_V, _X, _Y) do {} while ( 0 ) +#define SCHED_VCPU_STATS_RESET(_V) \ + do { \ + } while ( 0 ) +#define SCHED_VCPU_STAT_CRANK(_V, _X) \ + do { \ + } while ( 0 ) +#define SCHED_VCPU_STAT_SET(_V, _X, _Y) \ + do { \ + } while ( 0 ) #endif /* SCHED_STATS */ - /* * Credit tracing events ("only" 512 available!). Check * include/public/trace.h for more details. */ #define TRC_CSCHED_SCHED_TASKLET TRC_SCHED_CLASS_EVT(CSCHED, 1) #define TRC_CSCHED_ACCOUNT_START TRC_SCHED_CLASS_EVT(CSCHED, 2) -#define TRC_CSCHED_ACCOUNT_STOP TRC_SCHED_CLASS_EVT(CSCHED, 3) -#define TRC_CSCHED_STOLEN_VCPU TRC_SCHED_CLASS_EVT(CSCHED, 4) -#define TRC_CSCHED_PICKED_CPU TRC_SCHED_CLASS_EVT(CSCHED, 5) -#define TRC_CSCHED_TICKLE TRC_SCHED_CLASS_EVT(CSCHED, 6) -#define TRC_CSCHED_BOOST_START TRC_SCHED_CLASS_EVT(CSCHED, 7) -#define TRC_CSCHED_BOOST_END TRC_SCHED_CLASS_EVT(CSCHED, 8) -#define TRC_CSCHED_SCHEDULE TRC_SCHED_CLASS_EVT(CSCHED, 9) -#define TRC_CSCHED_RATELIMIT TRC_SCHED_CLASS_EVT(CSCHED, 10) -#define TRC_CSCHED_STEAL_CHECK TRC_SCHED_CLASS_EVT(CSCHED, 11) +#define TRC_CSCHED_ACCOUNT_STOP TRC_SCHED_CLASS_EVT(CSCHED, 3) +#define TRC_CSCHED_STOLEN_VCPU TRC_SCHED_CLASS_EVT(CSCHED, 4) +#define TRC_CSCHED_PICKED_CPU TRC_SCHED_CLASS_EVT(CSCHED, 5) +#define TRC_CSCHED_TICKLE TRC_SCHED_CLASS_EVT(CSCHED, 6) +#define TRC_CSCHED_BOOST_START TRC_SCHED_CLASS_EVT(CSCHED, 7) +#define TRC_CSCHED_BOOST_END TRC_SCHED_CLASS_EVT(CSCHED, 8) +#define TRC_CSCHED_SCHEDULE TRC_SCHED_CLASS_EVT(CSCHED, 9) +#define TRC_CSCHED_RATELIMIT TRC_SCHED_CLASS_EVT(CSCHED, 10) +#define TRC_CSCHED_STEAL_CHECK TRC_SCHED_CLASS_EVT(CSCHED, 11) /* * Boot parameters @@ -146,7 +144,8 @@ integer_param("sched_credit_tslice_ms", sched_credit_tslice_ms); /* * Physical CPU */ -struct csched_pcpu { +struct csched_pcpu +{ struct list_head runq; uint32_t runq_sort_last; @@ -160,7 +159,8 @@ struct csched_pcpu { /* * Virtual CPU */ -struct csched_vcpu { +struct csched_vcpu +{ struct list_head runq_elem; struct list_head active_vcpu_elem; @@ -168,7 +168,7 @@ struct csched_vcpu { struct csched_dom *sdom; struct vcpu *vcpu; - s_time_t start_time; /* When we were scheduled (used for credit) */ + s_time_t start_time; /* When we were scheduled (used for credit) */ unsigned flags; int pri; @@ -176,7 +176,8 @@ struct csched_vcpu { unsigned int residual; #ifdef CSCHED_STATS - struct { + struct + { int credit_last; uint32_t credit_incr; uint32_t state_active; @@ -191,7 +192,8 @@ struct csched_vcpu { /* * Domain */ -struct csched_dom { +struct csched_dom +{ struct list_head active_vcpu; struct list_head active_sdom_elem; struct domain *dom; @@ -203,7 +205,8 @@ struct csched_dom { /* * System-wide private data */ -struct csched_private { +struct csched_private +{ /* lock for the whole pluggable scheduler, nests inside cpupool_lock */ spinlock_t lock; @@ -230,14 +233,12 @@ struct csched_private { static void csched_tick(void *_cpu); static void csched_acct(void *dummy); -static inline int -__vcpu_on_runq(struct csched_vcpu *svc) +static inline int __vcpu_on_runq(struct csched_vcpu *svc) { return !list_empty(&svc->runq_elem); } -static inline struct csched_vcpu * -__runq_elem(struct list_head *elem) +static inline struct csched_vcpu *__runq_elem(struct list_head *elem) { return list_entry(elem, struct csched_vcpu, runq_elem); } @@ -254,34 +255,30 @@ static inline bool_t is_runq_idle(unsigned int cpu) is_idle_vcpu(__runq_elem(RUNQ(cpu)->next)->vcpu); } -static inline void -inc_nr_runnable(unsigned int cpu) +static inline void inc_nr_runnable(unsigned int cpu) { ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock)); CSCHED_PCPU(cpu)->nr_runnable++; - } -static inline void -dec_nr_runnable(unsigned int cpu) +static inline void dec_nr_runnable(unsigned int cpu) { ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock)); ASSERT(CSCHED_PCPU(cpu)->nr_runnable >= 1); CSCHED_PCPU(cpu)->nr_runnable--; } -static inline void -__runq_insert(struct csched_vcpu *svc) +static inline void __runq_insert(struct csched_vcpu *svc) { unsigned int cpu = svc->vcpu->processor; - const struct list_head * const runq = RUNQ(cpu); + const struct list_head *const runq = RUNQ(cpu); struct list_head *iter; - BUG_ON( __vcpu_on_runq(svc) ); + BUG_ON(__vcpu_on_runq(svc)); - list_for_each( iter, runq ) + list_for_each (iter, runq) { - const struct csched_vcpu * const iter_svc = __runq_elem(iter); + const struct csched_vcpu *const iter_svc = __runq_elem(iter); if ( svc->pri > iter_svc->pri ) break; } @@ -289,10 +286,10 @@ __runq_insert(struct csched_vcpu *svc) /* If the vcpu yielded, try to put it behind one lower-priority * runnable vcpu if we can. The next runq_sort will bring it forward * within 30ms if the queue too long. */ - if ( test_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags) - && __runq_elem(iter)->pri > CSCHED_PRI_IDLE ) + if ( test_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags) && + __runq_elem(iter)->pri > CSCHED_PRI_IDLE ) { - iter=iter->next; + iter = iter->next; /* Some sanity checks */ BUG_ON(iter == runq); @@ -301,22 +298,19 @@ __runq_insert(struct csched_vcpu *svc) list_add_tail(&svc->runq_elem, iter); } -static inline void -runq_insert(struct csched_vcpu *svc) +static inline void runq_insert(struct csched_vcpu *svc) { __runq_insert(svc); inc_nr_runnable(svc->vcpu->processor); } -static inline void -__runq_remove(struct csched_vcpu *svc) +static inline void __runq_remove(struct csched_vcpu *svc) { - BUG_ON( !__vcpu_on_runq(svc) ); + BUG_ON(!__vcpu_on_runq(svc)); list_del_init(&svc->runq_elem); } -static inline void -runq_remove(struct csched_vcpu *svc) +static inline void runq_remove(struct csched_vcpu *svc) { dec_nr_runnable(svc->vcpu->processor); __runq_remove(svc); @@ -329,7 +323,7 @@ static void burn_credits(struct csched_vcpu *svc, s_time_t now) unsigned int credits; /* Assert svc is current */ - ASSERT( svc == CSCHED_VCPU(curr_on_cpu(svc->vcpu->processor)) ); + ASSERT(svc == CSCHED_VCPU(curr_on_cpu(svc->vcpu->processor))); if ( (delta = now - svc->start_time) <= 0 ) return; @@ -350,7 +344,7 @@ DEFINE_PER_CPU(unsigned int, last_tickle_cpu); static inline void __runq_tickle(struct csched_vcpu *new) { unsigned int cpu = new->vcpu->processor; - struct csched_vcpu * const cur = CSCHED_VCPU(curr_on_cpu(cpu)); + struct csched_vcpu *const cur = CSCHED_VCPU(curr_on_cpu(cpu)); struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu)); cpumask_t mask, idle_mask, *online; int balance_step, idlers_empty; @@ -390,8 +384,7 @@ static inline void __runq_tickle(struct csched_vcpu *new) * suitable idler on which to run new, run it here, but try to * find a suitable idler on which to run cur instead. */ - if ( cur->pri == CSCHED_PRI_IDLE - || (idlers_empty && new->pri > cur->pri) ) + if ( cur->pri == CSCHED_PRI_IDLE || (idlers_empty && new->pri > cur->pri) ) { if ( cur->pri != CSCHED_PRI_IDLE ) SCHED_STAT_CRANK(tickled_busy_cpu); @@ -405,19 +398,19 @@ static inline void __runq_tickle(struct csched_vcpu *new) * Soft and hard affinity balancing loop. For vcpus without * a useful soft affinity, consider hard affinity only. */ - for_each_affinity_balance_step( balance_step ) + for_each_affinity_balance_step (balance_step) { int new_idlers_empty; - if ( balance_step == BALANCE_SOFT_AFFINITY - && !has_soft_affinity(new->vcpu) ) + if ( balance_step == BALANCE_SOFT_AFFINITY && + !has_soft_affinity(new->vcpu) ) continue; /* Are there idlers suitable for new (for this balance step)? */ affinity_balance_cpumask(new->vcpu, balance_step, cpumask_scratch_cpu(cpu)); - cpumask_and(cpumask_scratch_cpu(cpu), - cpumask_scratch_cpu(cpu), &idle_mask); + cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu), + &idle_mask); new_idlers_empty = cpumask_empty(cpumask_scratch_cpu(cpu)); /* @@ -425,8 +418,7 @@ static inline void __runq_tickle(struct csched_vcpu *new) * for new in its soft affinity mask, make sure we check its * hard affinity as well, before taking final decisions. */ - if ( new_idlers_empty - && balance_step == BALANCE_SOFT_AFFINITY ) + if ( new_idlers_empty && balance_step == BALANCE_SOFT_AFFINITY ) continue; /* @@ -461,9 +453,8 @@ static inline void __runq_tickle(struct csched_vcpu *new) SCHED_STAT_CRANK(tickled_idle_cpu); if ( opt_tickle_one_idle ) { - this_cpu(last_tickle_cpu) = - cpumask_cycle(this_cpu(last_tickle_cpu), - cpumask_scratch_cpu(cpu)); + this_cpu(last_tickle_cpu) = cpumask_cycle( + this_cpu(last_tickle_cpu), cpumask_scratch_cpu(cpu)); __cpumask_set_cpu(this_cpu(last_tickle_cpu), &mask); } else @@ -476,13 +467,13 @@ static inline void __runq_tickle(struct csched_vcpu *new) } } - tickle: +tickle: if ( !cpumask_empty(&mask) ) { if ( unlikely(tb_init_done) ) { /* Avoid TRACE_*: saves checking !tb_init_done each step */ - for_each_cpu(cpu, &mask) + for_each_cpu (cpu, &mask) __trace_var(TRC_CSCHED_TICKLE, 1, sizeof(cpu), &cpu); } @@ -495,7 +486,7 @@ static inline void __runq_tickle(struct csched_vcpu *new) * In the default (and most common) case, when opt_rickle_one_idle is * true, the loop does only one step, and only one bit is cleared. */ - for_each_cpu(cpu, &mask) + for_each_cpu (cpu, &mask) cpumask_clear_cpu(cpu, prv->idlers); cpumask_raise_softirq(&mask, SCHEDULE_SOFTIRQ); } @@ -503,8 +494,7 @@ static inline void __runq_tickle(struct csched_vcpu *new) SCHED_STAT_CRANK(tickled_no_cpu); } -static void -csched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu) +static void csched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu) { struct csched_private *prv = CSCHED_PRIV(ops); @@ -520,8 +510,8 @@ csched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu) xfree(pcpu); } -static void -csched_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu) +static void csched_deinit_pdata(const struct scheduler *ops, void *pcpu, + int cpu) { struct csched_private *prv = CSCHED_PRIV(ops); struct csched_pcpu *spc = pcpu; @@ -553,7 +543,7 @@ csched_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu) { cpumask_and(cpumask_scratch, prv->cpus, &node_to_cpumask(node)); if ( !cpumask_empty(cpumask_scratch) ) - prv->balance_bias[node] = cpumask_first(cpumask_scratch); + prv->balance_bias[node] = cpumask_first(cpumask_scratch); } kill_timer(&spc->ticker); if ( prv->ncpus == 0 ) @@ -562,8 +552,7 @@ csched_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu) spin_unlock_irqrestore(&prv->lock, flags); } -static void * -csched_alloc_pdata(const struct scheduler *ops, int cpu) +static void *csched_alloc_pdata(const struct scheduler *ops, int cpu) { struct csched_pcpu *spc; @@ -575,8 +564,8 @@ csched_alloc_pdata(const struct scheduler *ops, int cpu) return spc; } -static void -init_pdata(struct csched_private *prv, struct csched_pcpu *spc, int cpu) +static void init_pdata(struct csched_private *prv, struct csched_pcpu *spc, + int cpu) { ASSERT(spin_is_locked(&prv->lock)); /* cpu data needs to be allocated, but STILL uninitialized. */ @@ -598,7 +587,7 @@ init_pdata(struct csched_private *prv, struct csched_pcpu *spc, int cpu) prv->balance_bias[cpu_to_node(cpu)] = cpu; init_timer(&spc->ticker, csched_tick, (void *)(unsigned long)cpu, cpu); - set_timer(&spc->ticker, NOW() + MICROSECS(prv->tick_period_us) ); + set_timer(&spc->ticker, NOW() + MICROSECS(prv->tick_period_us)); INIT_LIST_HEAD(&spc->runq); spc->runq_sort_last = prv->runq_sort; @@ -610,8 +599,7 @@ init_pdata(struct csched_private *prv, struct csched_pcpu *spc, int cpu) spc->nr_runnable = 0; } -static void -csched_init_pdata(const struct scheduler *ops, void *pdata, int cpu) +static void csched_init_pdata(const struct scheduler *ops, void *pdata, int cpu) { unsigned long flags; struct csched_private *prv = CSCHED_PRIV(ops); @@ -631,9 +619,8 @@ csched_init_pdata(const struct scheduler *ops, void *pdata, int cpu) } /* Change the scheduler of cpu to us (Credit). */ -static void -csched_switch_sched(struct scheduler *new_ops, unsigned int cpu, - void *pdata, void *vdata) +static void csched_switch_sched(struct scheduler *new_ops, unsigned int cpu, + void *pdata, void *vdata) { struct schedule_data *sd = &per_cpu(schedule_data, cpu); struct csched_private *prv = CSCHED_PRIV(new_ops); @@ -666,27 +653,26 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int cpu, } #ifndef NDEBUG -static inline void -__csched_vcpu_check(struct vcpu *vc) +static inline void __csched_vcpu_check(struct vcpu *vc) { - struct csched_vcpu * const svc = CSCHED_VCPU(vc); - struct csched_dom * const sdom = svc->sdom; + struct csched_vcpu *const svc = CSCHED_VCPU(vc); + struct csched_dom *const sdom = svc->sdom; - BUG_ON( svc->vcpu != vc ); - BUG_ON( sdom != CSCHED_DOM(vc->domain) ); + BUG_ON(svc->vcpu != vc); + BUG_ON(sdom != CSCHED_DOM(vc->domain)); if ( sdom ) { - BUG_ON( is_idle_vcpu(vc) ); - BUG_ON( sdom->dom != vc->domain ); + BUG_ON(is_idle_vcpu(vc)); + BUG_ON(sdom->dom != vc->domain); } else { - BUG_ON( !is_idle_vcpu(vc) ); + BUG_ON(!is_idle_vcpu(vc)); } SCHED_STAT_CRANK(vcpu_check); } -#define CSCHED_VCPU_CHECK(_vc) (__csched_vcpu_check(_vc)) +#define CSCHED_VCPU_CHECK(_vc) (__csched_vcpu_check(_vc)) #else #define CSCHED_VCPU_CHECK(_vc) #endif @@ -700,8 +686,8 @@ __csched_vcpu_check(struct vcpu *vc) static unsigned int vcpu_migration_delay_us; integer_param("vcpu_migration_delay", vcpu_migration_delay_us); -static inline bool -__csched_vcpu_is_cache_hot(const struct csched_private *prv, struct vcpu *v) +static inline bool __csched_vcpu_is_cache_hot(const struct csched_private *prv, + struct vcpu *v) { bool hot = prv->vcpu_migr_delay && (NOW() - v->last_run_time) < prv->vcpu_migr_delay; @@ -712,9 +698,9 @@ __csched_vcpu_is_cache_hot(const struct csched_private *prv, struct vcpu *v) return hot; } -static inline int -__csched_vcpu_is_migrateable(const struct csched_private *prv, struct vcpu *vc, - int dest_cpu, cpumask_t *mask) +static inline int __csched_vcpu_is_migrateable(const struct csched_private *prv, + struct vcpu *vc, int dest_cpu, + cpumask_t *mask) { /* * Don't pick up work that's hot on peer PCPU, or that can't (or @@ -729,8 +715,8 @@ __csched_vcpu_is_migrateable(const struct csched_private *prv, struct vcpu *vc, cpumask_test_cpu(dest_cpu, mask); } -static int -_csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) +static int _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, + bool_t commit) { /* We must always use vc->procssor's scratch space */ cpumask_t *cpus = cpumask_scratch_cpu(vc->processor); @@ -740,7 +726,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) int cpu = vc->processor; int balance_step; - for_each_affinity_balance_step( balance_step ) + for_each_affinity_balance_step (balance_step) { affinity_balance_cpumask(vc, balance_step, cpus); cpumask_and(cpus, online, cpus); @@ -766,7 +752,8 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) /* If present, prefer vc's current processor */ cpu = cpumask_test_cpu(vc->processor, cpus) - ? vc->processor : cpumask_cycle(vc->processor, cpus); + ? vc->processor + : cpumask_cycle(vc->processor, cpus); ASSERT(cpumask_test_cpu(cpu, cpus)); /* @@ -819,18 +806,18 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) { /* We're on the same socket, so check the busy-ness of threads. * Migrate if # of idlers is less at all */ - ASSERT( cpumask_test_cpu(nxt, per_cpu(cpu_core_mask, cpu)) ); + ASSERT(cpumask_test_cpu(nxt, per_cpu(cpu_core_mask, cpu))); migrate_factor = 1; - cpumask_and(&cpu_idlers, &idlers, per_cpu(cpu_sibling_mask, - cpu)); - cpumask_and(&nxt_idlers, &idlers, per_cpu(cpu_sibling_mask, - nxt)); + cpumask_and(&cpu_idlers, &idlers, + per_cpu(cpu_sibling_mask, cpu)); + cpumask_and(&nxt_idlers, &idlers, + per_cpu(cpu_sibling_mask, nxt)); } else { /* We're on different sockets, so check the busy-ness of cores. * Migrate only if the other core is twice as idle */ - ASSERT( !cpumask_test_cpu(nxt, per_cpu(cpu_core_mask, cpu)) ); + ASSERT(!cpumask_test_cpu(nxt, per_cpu(cpu_core_mask, cpu))); migrate_factor = 2; cpumask_and(&cpu_idlers, &idlers, per_cpu(cpu_core_mask, cpu)); cpumask_and(&nxt_idlers, &idlers, per_cpu(cpu_core_mask, nxt)); @@ -839,9 +826,9 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) weight_cpu = cpumask_weight(&cpu_idlers); weight_nxt = cpumask_weight(&nxt_idlers); /* smt_power_savings: consolidate work rather than spreading it */ - if ( sched_smt_power_savings ? - weight_cpu > weight_nxt : - weight_cpu * migrate_factor < weight_nxt ) + if ( sched_smt_power_savings + ? weight_cpu > weight_nxt + : weight_cpu * migrate_factor < weight_nxt ) { cpumask_and(&nxt_idlers, &nxt_idlers, cpus); spc = CSCHED_PCPU(nxt); @@ -860,15 +847,14 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) } if ( commit && spc ) - spc->idle_bias = cpu; + spc->idle_bias = cpu; TRACE_3D(TRC_CSCHED_PICKED_CPU, vc->domain->domain_id, vc->vcpu_id, cpu); return cpu; } -static int -csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc) +static int csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc) { struct csched_vcpu *svc = CSCHED_VCPU(vc); @@ -883,10 +869,10 @@ csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc) return _csched_cpu_pick(ops, vc, 1); } -static inline void -__csched_vcpu_acct_start(struct csched_private *prv, struct csched_vcpu *svc) +static inline void __csched_vcpu_acct_start(struct csched_private *prv, + struct csched_vcpu *svc) { - struct csched_dom * const sdom = svc->sdom; + struct csched_dom *const sdom = svc->sdom; unsigned long flags; spin_lock_irqsave(&prv->lock, flags); @@ -906,24 +892,23 @@ __csched_vcpu_acct_start(struct csched_private *prv, struct csched_vcpu *svc) } } - TRACE_3D(TRC_CSCHED_ACCOUNT_START, sdom->dom->domain_id, - svc->vcpu->vcpu_id, sdom->active_vcpu_count); + TRACE_3D(TRC_CSCHED_ACCOUNT_START, sdom->dom->domain_id, svc->vcpu->vcpu_id, + sdom->active_vcpu_count); spin_unlock_irqrestore(&prv->lock, flags); } -static inline void -__csched_vcpu_acct_stop_locked(struct csched_private *prv, - struct csched_vcpu *svc) +static inline void __csched_vcpu_acct_stop_locked(struct csched_private *prv, + struct csched_vcpu *svc) { - struct csched_dom * const sdom = svc->sdom; + struct csched_dom *const sdom = svc->sdom; - BUG_ON( list_empty(&svc->active_vcpu_elem) ); + BUG_ON(list_empty(&svc->active_vcpu_elem)); SCHED_VCPU_STAT_CRANK(svc, state_idle); SCHED_STAT_CRANK(acct_vcpu_idle); - BUG_ON( prv->weight < sdom->weight ); + BUG_ON(prv->weight < sdom->weight); sdom->active_vcpu_count--; list_del_init(&svc->active_vcpu_elem); prv->weight -= sdom->weight; @@ -932,19 +917,18 @@ __csched_vcpu_acct_stop_locked(struct csched_private *prv, list_del_init(&sdom->active_sdom_elem); } - TRACE_3D(TRC_CSCHED_ACCOUNT_STOP, sdom->dom->domain_id, - svc->vcpu->vcpu_id, sdom->active_vcpu_count); + TRACE_3D(TRC_CSCHED_ACCOUNT_STOP, sdom->dom->domain_id, svc->vcpu->vcpu_id, + sdom->active_vcpu_count); } -static void -csched_vcpu_acct(struct csched_private *prv, unsigned int cpu) +static void csched_vcpu_acct(struct csched_private *prv, unsigned int cpu) { - struct csched_vcpu * const svc = CSCHED_VCPU(current); + struct csched_vcpu *const svc = CSCHED_VCPU(current); const struct scheduler *ops = per_cpu(scheduler, cpu); - ASSERT( current->processor == cpu ); - ASSERT( svc->sdom != NULL ); - ASSERT( !is_idle_vcpu(svc->vcpu) ); + ASSERT(current->processor == cpu); + ASSERT(svc->sdom != NULL); + ASSERT(!is_idle_vcpu(svc->vcpu)); /* * If this VCPU's priority was boosted when it last awoke, reset it. @@ -996,15 +980,15 @@ csched_vcpu_acct(struct csched_private *prv, unsigned int cpu) * idlers. But, if we are here, it means there is someone running * on it, and hence the bit must be zero already. */ - ASSERT(!cpumask_test_cpu(cpu, - CSCHED_PRIV(per_cpu(scheduler, cpu))->idlers)); + ASSERT(!cpumask_test_cpu( + cpu, CSCHED_PRIV(per_cpu(scheduler, cpu))->idlers)); cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); } } } -static void * -csched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd) +static void *csched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, + void *dd) { struct csched_vcpu *svc; @@ -1017,20 +1001,19 @@ csched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd) INIT_LIST_HEAD(&svc->active_vcpu_elem); svc->sdom = dd; svc->vcpu = vc; - svc->pri = is_idle_domain(vc->domain) ? - CSCHED_PRI_IDLE : CSCHED_PRI_TS_UNDER; + svc->pri = + is_idle_domain(vc->domain) ? CSCHED_PRI_IDLE : CSCHED_PRI_TS_UNDER; SCHED_VCPU_STATS_RESET(svc); SCHED_STAT_CRANK(vcpu_alloc); return svc; } -static void -csched_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) +static void csched_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) { struct csched_vcpu *svc = vc->sched_priv; spinlock_t *lock; - BUG_ON( is_idle_vcpu(vc) ); + BUG_ON(is_idle_vcpu(vc)); /* csched_cpu_pick() looks in vc->processor's runq, so we need the lock. */ lock = vcpu_schedule_lock_irq(vc); @@ -1049,22 +1032,20 @@ csched_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) SCHED_STAT_CRANK(vcpu_insert); } -static void -csched_free_vdata(const struct scheduler *ops, void *priv) +static void csched_free_vdata(const struct scheduler *ops, void *priv) { struct csched_vcpu *svc = priv; - BUG_ON( !list_empty(&svc->runq_elem) ); + BUG_ON(!list_empty(&svc->runq_elem)); xfree(svc); } -static void -csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) +static void csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) { struct csched_private *prv = CSCHED_PRIV(ops); - struct csched_vcpu * const svc = CSCHED_VCPU(vc); - struct csched_dom * const sdom = svc->sdom; + struct csched_vcpu *const svc = CSCHED_VCPU(vc); + struct csched_dom *const sdom = svc->sdom; SCHED_STAT_CRANK(vcpu_remove); @@ -1083,18 +1064,17 @@ csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) spin_unlock_irq(&prv->lock); - BUG_ON( sdom == NULL ); + BUG_ON(sdom == NULL); } -static void -csched_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) +static void csched_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) { - struct csched_vcpu * const svc = CSCHED_VCPU(vc); + struct csched_vcpu *const svc = CSCHED_VCPU(vc); unsigned int cpu = vc->processor; SCHED_STAT_CRANK(vcpu_sleep); - BUG_ON( is_idle_vcpu(vc) ); + BUG_ON(is_idle_vcpu(vc)); if ( curr_on_cpu(cpu) == vc ) { @@ -1103,20 +1083,20 @@ csched_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) * But, we are here because vc is going to sleep while running on cpu, * so the bit must be zero already. */ - ASSERT(!cpumask_test_cpu(cpu, CSCHED_PRIV(per_cpu(scheduler, cpu))->idlers)); + ASSERT(!cpumask_test_cpu(cpu, + CSCHED_PRIV(per_cpu(scheduler, cpu))->idlers)); cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); } else if ( __vcpu_on_runq(svc) ) runq_remove(svc); } -static void -csched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) +static void csched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) { - struct csched_vcpu * const svc = CSCHED_VCPU(vc); + struct csched_vcpu *const svc = CSCHED_VCPU(vc); bool_t migrating; - BUG_ON( is_idle_vcpu(vc) ); + BUG_ON(is_idle_vcpu(vc)); if ( unlikely(curr_on_cpu(vc->processor) == vc) ) { @@ -1147,7 +1127,7 @@ csched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) * latencies. * * This allows wake-to-run latency sensitive VCPUs to preempt - * more CPU resource intensive VCPUs without impacting overall + * more CPU resource intensive VCPUs without impacting overall * system fairness. * * There are two cases, when we don't want to boost: @@ -1171,22 +1151,18 @@ csched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) __runq_tickle(svc); } -static void -csched_vcpu_yield(const struct scheduler *ops, struct vcpu *vc) +static void csched_vcpu_yield(const struct scheduler *ops, struct vcpu *vc) { - struct csched_vcpu * const svc = CSCHED_VCPU(vc); + struct csched_vcpu *const svc = CSCHED_VCPU(vc); /* Let the scheduler know that this vcpu is trying to yield */ set_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags); } -static int -csched_dom_cntl( - const struct scheduler *ops, - struct domain *d, - struct xen_domctl_scheduler_op *op) +static int csched_dom_cntl(const struct scheduler *ops, struct domain *d, + struct xen_domctl_scheduler_op *op) { - struct csched_dom * const sdom = CSCHED_DOM(d); + struct csched_dom *const sdom = CSCHED_DOM(d); struct csched_private *prv = CSCHED_PRIV(ops); unsigned long flags; int rc = 0; @@ -1195,7 +1171,7 @@ csched_dom_cntl( * lock. Runq lock not needed anywhere in here. */ spin_lock_irqsave(&prv->lock, flags); - switch ( op->cmd ) + switch (op->cmd) { case XEN_DOMCTL_SCHEDOP_getinfo: op->u.credit.weight = sdom->weight; @@ -1225,9 +1201,8 @@ csched_dom_cntl( return rc; } -static void -csched_aff_cntl(const struct scheduler *ops, struct vcpu *v, - const cpumask_t *hard, const cpumask_t *soft) +static void csched_aff_cntl(const struct scheduler *ops, struct vcpu *v, + const cpumask_t *hard, const cpumask_t *soft) { struct csched_vcpu *svc = CSCHED_VCPU(v); @@ -1241,8 +1216,8 @@ csched_aff_cntl(const struct scheduler *ops, struct vcpu *v, clear_bit(CSCHED_FLAG_VCPU_PINNED, &svc->flags); } -static inline void -__csched_set_tslice(struct csched_private *prv, unsigned int timeslice_ms) +static inline void __csched_set_tslice(struct csched_private *prv, + unsigned int timeslice_ms) { prv->tslice = MILLISECS(timeslice_ms); prv->ticks_per_tslice = CSCHED_TICKS_PER_TSLICE; @@ -1253,26 +1228,25 @@ __csched_set_tslice(struct csched_private *prv, unsigned int timeslice_ms) prv->credit = prv->credits_per_tslice * prv->ncpus; } -static int -csched_sys_cntl(const struct scheduler *ops, - struct xen_sysctl_scheduler_op *sc) +static int csched_sys_cntl(const struct scheduler *ops, + struct xen_sysctl_scheduler_op *sc) { int rc = -EINVAL; struct xen_sysctl_credit_schedule *params = &sc->u.sched_credit; struct csched_private *prv = CSCHED_PRIV(ops); unsigned long flags; - switch ( sc->cmd ) + switch (sc->cmd) { case XEN_SYSCTL_SCHEDOP_putinfo: - if ( params->tslice_ms > XEN_SYSCTL_CSCHED_TSLICE_MAX - || params->tslice_ms < XEN_SYSCTL_CSCHED_TSLICE_MIN - || (params->ratelimit_us - && (params->ratelimit_us > XEN_SYSCTL_SCHED_RATELIMIT_MAX - || params->ratelimit_us < XEN_SYSCTL_SCHED_RATELIMIT_MIN)) - || MICROSECS(params->ratelimit_us) > MILLISECS(params->tslice_ms) - || params->vcpu_migr_delay_us > XEN_SYSCTL_CSCHED_MGR_DLY_MAX_US ) - goto out; + if ( params->tslice_ms > XEN_SYSCTL_CSCHED_TSLICE_MAX || + params->tslice_ms < XEN_SYSCTL_CSCHED_TSLICE_MIN || + (params->ratelimit_us && + (params->ratelimit_us > XEN_SYSCTL_SCHED_RATELIMIT_MAX || + params->ratelimit_us < XEN_SYSCTL_SCHED_RATELIMIT_MIN)) || + MICROSECS(params->ratelimit_us) > MILLISECS(params->tslice_ms) || + params->vcpu_migr_delay_us > XEN_SYSCTL_CSCHED_MGR_DLY_MAX_US ) + goto out; spin_lock_irqsave(&prv->lock, flags); __csched_set_tslice(prv, params->tslice_ms); @@ -1292,12 +1266,12 @@ csched_sys_cntl(const struct scheduler *ops, rc = 0; break; } - out: +out: return rc; } -static void * -csched_alloc_domdata(const struct scheduler *ops, struct domain *dom) +static void *csched_alloc_domdata(const struct scheduler *ops, + struct domain *dom) { struct csched_dom *sdom; @@ -1314,8 +1288,7 @@ csched_alloc_domdata(const struct scheduler *ops, struct domain *dom) return sdom; } -static void -csched_free_domdata(const struct scheduler *ops, void *data) +static void csched_free_domdata(const struct scheduler *ops, void *data) { xfree(data); } @@ -1327,10 +1300,9 @@ csched_free_domdata(const struct scheduler *ops, void *data) * through the runq and move up any UNDERs that are preceded by OVERS. We * remember the last UNDER to make the move up operation O(1). */ -static void -csched_runq_sort(struct csched_private *prv, unsigned int cpu) +static void csched_runq_sort(struct csched_private *prv, unsigned int cpu) { - struct csched_pcpu * const spc = CSCHED_PCPU(cpu); + struct csched_pcpu *const spc = CSCHED_PCPU(cpu); struct list_head *runq, *elem, *next, *last_under; struct csched_vcpu *svc_elem; spinlock_t *lock; @@ -1371,8 +1343,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu) pcpu_schedule_unlock_irqrestore(lock, flags, cpu); } -static void -csched_acct(void* dummy) +static void csched_acct(void *dummy) { struct csched_private *prv = dummy; unsigned long flags; @@ -1390,7 +1361,6 @@ csched_acct(void* dummy) int credit_xtra; int credit; - spin_lock_irqsave(&prv->lock, flags); weight_total = prv->weight; @@ -1418,16 +1388,16 @@ csched_acct(void* dummy) credit_xtra = 0; credit_cap = 0U; - list_for_each_safe( iter_sdom, next_sdom, &prv->active_sdom ) + list_for_each_safe (iter_sdom, next_sdom, &prv->active_sdom) { sdom = list_entry(iter_sdom, struct csched_dom, active_sdom_elem); - BUG_ON( is_idle_domain(sdom->dom) ); - BUG_ON( sdom->active_vcpu_count == 0 ); - BUG_ON( sdom->weight == 0 ); - BUG_ON( (sdom->weight * sdom->active_vcpu_count) > weight_left ); + BUG_ON(is_idle_domain(sdom->dom)); + BUG_ON(sdom->active_vcpu_count == 0); + BUG_ON(sdom->weight == 0); + BUG_ON((sdom->weight * sdom->active_vcpu_count) > weight_left); - weight_left -= ( sdom->weight * sdom->active_vcpu_count ); + weight_left -= (sdom->weight * sdom->active_vcpu_count); /* * A domain's fair share is computed using its weight in competition @@ -1440,11 +1410,10 @@ csched_acct(void* dummy) credit_peak = sdom->active_vcpu_count * prv->credits_per_tslice; if ( prv->credit_balance < 0 ) { - credit_peak += ( ( -prv->credit_balance - * sdom->weight - * sdom->active_vcpu_count) + - (weight_total - 1) - ) / weight_total; + credit_peak += ((-prv->credit_balance * sdom->weight * + sdom->active_vcpu_count) + + (weight_total - 1)) / + weight_total; } if ( sdom->cap != 0U ) @@ -1454,15 +1423,13 @@ csched_acct(void* dummy) credit_peak = credit_cap; /* FIXME -- set cap per-vcpu as well...? */ - credit_cap = ( credit_cap + ( sdom->active_vcpu_count - 1 ) - ) / sdom->active_vcpu_count; + credit_cap = (credit_cap + (sdom->active_vcpu_count - 1)) / + sdom->active_vcpu_count; } - credit_fair = ( ( credit_total - * sdom->weight - * sdom->active_vcpu_count ) - + (weight_total - 1) - ) / weight_total; + credit_fair = ((credit_total * sdom->weight * sdom->active_vcpu_count) + + (weight_total - 1)) / + weight_total; if ( credit_fair < credit_peak ) { @@ -1473,10 +1440,9 @@ csched_acct(void* dummy) if ( weight_left != 0U ) { /* Give other domains a chance at unused credits */ - credit_total += ( ( ( credit_fair - credit_peak - ) * weight_total - ) + ( weight_left - 1 ) - ) / weight_left; + credit_total += (((credit_fair - credit_peak) * weight_total) + + (weight_left - 1)) / + weight_left; } if ( credit_xtra ) @@ -1495,14 +1461,13 @@ csched_acct(void* dummy) } /* Compute fair share per VCPU */ - credit_fair = ( credit_fair + ( sdom->active_vcpu_count - 1 ) - ) / sdom->active_vcpu_count; - + credit_fair = (credit_fair + (sdom->active_vcpu_count - 1)) / + sdom->active_vcpu_count; - list_for_each_safe( iter_vcpu, next_vcpu, &sdom->active_vcpu ) + list_for_each_safe (iter_vcpu, next_vcpu, &sdom->active_vcpu) { svc = list_entry(iter_vcpu, struct csched_vcpu, active_vcpu_elem); - BUG_ON( sdom != svc->sdom ); + BUG_ON(sdom != svc->sdom); /* Increment credit */ atomic_add(credit_fair, &svc->credit); @@ -1517,8 +1482,7 @@ csched_acct(void* dummy) svc->pri = CSCHED_PRI_TS_OVER; /* Park running VCPUs of capped-out domains */ - if ( sdom->cap != 0U && - credit < -credit_cap && + if ( sdom->cap != 0U && credit < -credit_cap && !test_and_set_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) ) { SCHED_STAT_CRANK(vcpu_park); @@ -1574,11 +1538,10 @@ csched_acct(void* dummy) prv->runq_sort++; out: - set_timer( &prv->master_ticker, NOW() + prv->tslice); + set_timer(&prv->master_ticker, NOW() + prv->tslice); } -static void -csched_tick(void *_cpu) +static void csched_tick(void *_cpu) { unsigned int cpu = (unsigned long)_cpu; struct csched_pcpu *spc = CSCHED_PCPU(cpu); @@ -1601,14 +1564,15 @@ csched_tick(void *_cpu) */ csched_runq_sort(prv, cpu); - set_timer(&spc->ticker, NOW() + MICROSECS(prv->tick_period_us) ); + set_timer(&spc->ticker, NOW() + MICROSECS(prv->tick_period_us)); } -static struct csched_vcpu * -csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step) +static struct csched_vcpu *csched_runq_steal(int peer_cpu, int cpu, int pri, + int balance_step) { - const struct csched_private * const prv = CSCHED_PRIV(per_cpu(scheduler, cpu)); - const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu); + const struct csched_private *const prv = + CSCHED_PRIV(per_cpu(scheduler, cpu)); + const struct csched_pcpu *const peer_pcpu = CSCHED_PCPU(peer_cpu); struct csched_vcpu *speer; struct list_head *iter; struct vcpu *vc; @@ -1622,7 +1586,7 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step) if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu))) ) goto out; - list_for_each( iter, &peer_pcpu->runq ) + list_for_each (iter, &peer_pcpu->runq) { speer = __runq_elem(iter); @@ -1635,7 +1599,7 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step) /* Is this VCPU runnable on our PCPU? */ vc = speer->vcpu; - BUG_ON( is_idle_vcpu(vc) ); + BUG_ON(is_idle_vcpu(vc)); /* * If the vcpu is still in peer_cpu's scheduling tail, or if it @@ -1651,16 +1615,16 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step) * vCPUs with useful soft affinities in some sort of bitmap * or counter. */ - if ( vc->is_running || (balance_step == BALANCE_SOFT_AFFINITY && - !has_soft_affinity(vc)) ) + if ( vc->is_running || + (balance_step == BALANCE_SOFT_AFFINITY && !has_soft_affinity(vc)) ) continue; affinity_balance_cpumask(vc, balance_step, cpumask_scratch); if ( __csched_vcpu_is_migrateable(prv, vc, cpu, cpumask_scratch) ) { /* We got a candidate. Grab it! */ - TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu, - vc->domain->domain_id, vc->vcpu_id); + TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu, vc->domain->domain_id, + vc->vcpu_id); SCHED_VCPU_STAT_CRANK(speer, migrate_q); SCHED_STAT_CRANK(migrate_queued); WARN_ON(vc->is_urgent); @@ -1675,14 +1639,15 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step) return speer; } } - out: +out: SCHED_STAT_CRANK(steal_peer_idle); return NULL; } -static struct csched_vcpu * -csched_load_balance(struct csched_private *prv, int cpu, - struct csched_vcpu *snext, bool_t *stolen) +static struct csched_vcpu *csched_load_balance(struct csched_private *prv, + int cpu, + struct csched_vcpu *snext, + bool_t *stolen) { struct cpupool *c = per_cpu(cpupool, cpu); struct csched_vcpu *speer; @@ -1691,7 +1656,7 @@ csched_load_balance(struct csched_private *prv, int cpu, int peer_cpu, first_cpu, peer_node, bstep; int node = cpu_to_node(cpu); - BUG_ON( cpu != snext->vcpu->processor ); + BUG_ON(cpu != snext->vcpu->processor); online = cpupool_online_cpumask(c); /* @@ -1715,7 +1680,7 @@ csched_load_balance(struct csched_private *prv, int cpu, * 1. any "soft-affine work" to steal first, * 2. if not finding anything, any "hard-affine work" to steal. */ - for_each_affinity_balance_step( bstep ) + for_each_affinity_balance_step (bstep) { /* * We peek at the non-idling CPUs in a node-wise fashion. In fact, @@ -1725,8 +1690,7 @@ csched_load_balance(struct csched_private *prv, int cpu, * stays local, there might be some node-wide cache[s], etc.). */ peer_node = node; - do - { + do { /* Select the pCPUs in this node that have work we can steal. */ cpumask_andnot(&workers, online, prv->idlers); cpumask_and(&workers, &workers, &node_to_cpumask(peer_node)); @@ -1736,8 +1700,7 @@ csched_load_balance(struct csched_private *prv, int cpu, if ( first_cpu >= nr_cpu_ids ) goto next_node; peer_cpu = first_cpu; - do - { + do { spinlock_t *lock; /* @@ -1756,7 +1719,8 @@ csched_load_balance(struct csched_private *prv, int cpu, * - if we race with inc_nr_runnable(), we skip a pCPU that may * have runnable vCPUs in its runqueue, but that's not a * problem because: - * + if racing with csched_vcpu_insert() or csched_vcpu_wake(), + * + if racing with csched_vcpu_insert() or + * csched_vcpu_wake(), * __runq_tickle() will be called afterwords, so the vCPU * won't get stuck in the runqueue for too long; * + if racing with csched_runq_steal(), it may be that a @@ -1791,8 +1755,10 @@ csched_load_balance(struct csched_private *prv, int cpu, TRACE_2D(TRC_CSCHED_STEAL_CHECK, peer_cpu, /* checked */ 1); /* Any work over there to steal? */ - speer = cpumask_test_cpu(peer_cpu, online) ? - csched_runq_steal(peer_cpu, cpu, snext->pri, bstep) : NULL; + speer = + cpumask_test_cpu(peer_cpu, online) + ? csched_runq_steal(peer_cpu, cpu, snext->pri, bstep) + : NULL; pcpu_schedule_unlock(lock, peer_cpu); /* As soon as one vcpu is found, balancing ends */ @@ -1808,17 +1774,17 @@ csched_load_balance(struct csched_private *prv, int cpu, return speer; } - next_cpu: + next_cpu: peer_cpu = cpumask_cycle(peer_cpu, &workers); - } while( peer_cpu != first_cpu ); + } while ( peer_cpu != first_cpu ); - next_node: + next_node: peer_node = cycle_node(peer_node, node_online_map); - } while( peer_node != node ); + } while ( peer_node != node ); } - out: +out: /* Failed to find more important work elsewhere... */ __runq_remove(snext); return snext; @@ -1828,13 +1794,13 @@ csched_load_balance(struct csched_private *prv, int cpu, * This function is in the critical path. It is designed to be simple and * fast for the common case. */ -static struct task_slice -csched_schedule( - const struct scheduler *ops, s_time_t now, bool_t tasklet_work_scheduled) +static struct task_slice csched_schedule(const struct scheduler *ops, + s_time_t now, + bool_t tasklet_work_scheduled) { const int cpu = smp_processor_id(); - struct list_head * const runq = RUNQ(cpu); - struct csched_vcpu * const scurr = CSCHED_VCPU(current); + struct list_head *const runq = RUNQ(cpu); + struct csched_vcpu *const scurr = CSCHED_VCPU(current); struct csched_private *prv = CSCHED_PRIV(ops); struct csched_vcpu *snext; struct task_slice ret; @@ -1850,14 +1816,14 @@ csched_schedule( */ if ( unlikely(tb_init_done) ) { - struct { - unsigned cpu:16, tasklet:8, idle:8; + struct + { + unsigned cpu : 16, tasklet : 8, idle : 8; } d; d.cpu = cpu; d.tasklet = tasklet_work_scheduled; d.idle = is_idle_vcpu(current); - __trace_var(TRC_CSCHED_SCHEDULE, 1, sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED_SCHEDULE, 1, sizeof(d), (unsigned char *)&d); } runtime = now - current->runstate.state_entry_time; @@ -1896,12 +1862,9 @@ csched_schedule( * In fact, it may be the case that scurr is about to spin, and there's * no point forcing it to do so until rate limiting expires. */ - if ( !test_bit(CSCHED_FLAG_VCPU_YIELD, &scurr->flags) - && !tasklet_work_scheduled - && prv->ratelimit - && vcpu_runnable(current) - && !is_idle_vcpu(current) - && runtime < prv->ratelimit ) + if ( !test_bit(CSCHED_FLAG_VCPU_YIELD, &scurr->flags) && + !tasklet_work_scheduled && prv->ratelimit && vcpu_runnable(current) && + !is_idle_vcpu(current) && runtime < prv->ratelimit ) { snext = scurr; snext->start_time += now; @@ -1917,8 +1880,9 @@ csched_schedule( tslice = CSCHED_MIN_TIMER; if ( unlikely(tb_init_done) ) { - struct { - unsigned vcpu:16, dom:16; + struct + { + unsigned vcpu : 16, dom : 16; unsigned runtime; } d; d.dom = scurr->vcpu->domain->domain_id; @@ -1940,7 +1904,7 @@ csched_schedule( __runq_insert(scurr); else { - BUG_ON( is_idle_vcpu(current) || list_empty(runq) ); + BUG_ON(is_idle_vcpu(current) || list_empty(runq)); /* Current has blocked. Update the runnable counter for this cpu. */ dec_nr_runnable(cpu); } @@ -1995,47 +1959,36 @@ out: /* * Return task to run next... */ - ret.time = (is_idle_vcpu(snext->vcpu) ? - -1 : tslice); + ret.time = (is_idle_vcpu(snext->vcpu) ? -1 : tslice); ret.task = snext->vcpu; CSCHED_VCPU_CHECK(ret.task); return ret; } -static void -csched_dump_vcpu(struct csched_vcpu *svc) +static void csched_dump_vcpu(struct csched_vcpu *svc) { - struct csched_dom * const sdom = svc->sdom; + struct csched_dom *const sdom = svc->sdom; - printk("[%i.%i] pri=%i flags=%x cpu=%i", - svc->vcpu->domain->domain_id, - svc->vcpu->vcpu_id, - svc->pri, - svc->flags, - svc->vcpu->processor); + printk("[%i.%i] pri=%i flags=%x cpu=%i", svc->vcpu->domain->domain_id, + svc->vcpu->vcpu_id, svc->pri, svc->flags, svc->vcpu->processor); if ( sdom ) { printk(" credit=%i [w=%u,cap=%u]", atomic_read(&svc->credit), - sdom->weight, sdom->cap); + sdom->weight, sdom->cap); #ifdef CSCHED_STATS - printk(" (%d+%u) {a/i=%u/%u m=%u+%u (k=%u)}", - svc->stats.credit_last, - svc->stats.credit_incr, - svc->stats.state_active, - svc->stats.state_idle, - svc->stats.migrate_q, - svc->stats.migrate_r, - svc->stats.kicked_away); + printk(" (%d+%u) {a/i=%u/%u m=%u+%u (k=%u)}", svc->stats.credit_last, + svc->stats.credit_incr, svc->stats.state_active, + svc->stats.state_idle, svc->stats.migrate_q, + svc->stats.migrate_r, svc->stats.kicked_away); #endif } printk("\n"); } -static void -csched_dump_pcpu(const struct scheduler *ops, int cpu) +static void csched_dump_pcpu(const struct scheduler *ops, int cpu) { struct list_head *runq, *iter; struct csched_private *prv = CSCHED_PRIV(ops); @@ -2058,10 +2011,10 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu) spc = CSCHED_PCPU(cpu); runq = &spc->runq; - printk("CPU[%02d] nr_run=%d, sort=%d, sibling=%*pb, core=%*pb\n", - cpu, spc->nr_runnable, spc->runq_sort_last, - nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)), - nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu))); + printk("CPU[%02d] nr_run=%d, sort=%d, sibling=%*pb, core=%*pb\n", cpu, + spc->nr_runnable, spc->runq_sort_last, nr_cpu_ids, + cpumask_bits(per_cpu(cpu_sibling_mask, cpu)), nr_cpu_ids, + cpumask_bits(per_cpu(cpu_core_mask, cpu))); /* current VCPU (nothing to say if that's the idle vcpu). */ svc = CSCHED_VCPU(curr_on_cpu(cpu)); @@ -2072,7 +2025,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu) } loop = 0; - list_for_each( iter, runq ) + list_for_each (iter, runq) { svc = __runq_elem(iter); if ( svc ) @@ -2086,8 +2039,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu) spin_unlock_irqrestore(&prv->lock, flags); } -static void -csched_dump(const struct scheduler *ops) +static void csched_dump(const struct scheduler *ops) { struct list_head *iter_sdom, *iter_svc; struct csched_private *prv = CSCHED_PRIV(ops); @@ -2104,34 +2056,27 @@ csched_dump(const struct scheduler *ops) "\tweight = %u\n" "\trunq_sort = %u\n" "\tdefault-weight = %d\n" - "\ttslice = %"PRI_stime"ms\n" - "\tratelimit = %"PRI_stime"us\n" + "\ttslice = %" PRI_stime "ms\n" + "\tratelimit = %" PRI_stime "us\n" "\tcredits per msec = %d\n" "\tticks per tslice = %d\n" - "\tmigration delay = %"PRI_stime"us\n", - prv->ncpus, - prv->master, - prv->credit, - prv->credit_balance, - prv->weight, - prv->runq_sort, - CSCHED_DEFAULT_WEIGHT, - prv->tslice / MILLISECS(1), - prv->ratelimit / MICROSECS(1), - CSCHED_CREDITS_PER_MSEC, - prv->ticks_per_tslice, - prv->vcpu_migr_delay/ MICROSECS(1)); + "\tmigration delay = %" PRI_stime "us\n", + prv->ncpus, prv->master, prv->credit, prv->credit_balance, + prv->weight, prv->runq_sort, CSCHED_DEFAULT_WEIGHT, + prv->tslice / MILLISECS(1), prv->ratelimit / MICROSECS(1), + CSCHED_CREDITS_PER_MSEC, prv->ticks_per_tslice, + prv->vcpu_migr_delay / MICROSECS(1)); printk("idlers: %*pb\n", nr_cpu_ids, cpumask_bits(prv->idlers)); printk("active vcpus:\n"); loop = 0; - list_for_each( iter_sdom, &prv->active_sdom ) + list_for_each (iter_sdom, &prv->active_sdom) { struct csched_dom *sdom; sdom = list_entry(iter_sdom, struct csched_dom, active_sdom_elem); - list_for_each( iter_svc, &sdom->active_vcpu ) + list_for_each (iter_svc, &sdom->active_vcpu) { struct csched_vcpu *svc; spinlock_t *lock; @@ -2149,17 +2094,16 @@ csched_dump(const struct scheduler *ops) spin_unlock_irqrestore(&prv->lock, flags); } -static int __init -csched_global_init(void) +static int __init csched_global_init(void) { if ( sched_credit_tslice_ms > XEN_SYSCTL_CSCHED_TSLICE_MAX || sched_credit_tslice_ms < XEN_SYSCTL_CSCHED_TSLICE_MIN ) { - printk("WARNING: sched_credit_tslice_ms outside of valid range [%d,%d].\n" - " Resetting to default %u\n", - XEN_SYSCTL_CSCHED_TSLICE_MIN, - XEN_SYSCTL_CSCHED_TSLICE_MAX, - CSCHED_DEFAULT_TSLICE_MS); + printk( + "WARNING: sched_credit_tslice_ms outside of valid range [%d,%d].\n" + " Resetting to default %u\n", + XEN_SYSCTL_CSCHED_TSLICE_MIN, XEN_SYSCTL_CSCHED_TSLICE_MAX, + CSCHED_DEFAULT_TSLICE_MS); sched_credit_tslice_ms = CSCHED_DEFAULT_TSLICE_MS; } @@ -2171,16 +2115,16 @@ csched_global_init(void) if ( vcpu_migration_delay_us > XEN_SYSCTL_CSCHED_MGR_DLY_MAX_US ) { vcpu_migration_delay_us = 0; - printk("WARNING: vcpu_migration_delay outside of valid range [0,%d]us.\n" - "Resetting to default: %u\n", - XEN_SYSCTL_CSCHED_MGR_DLY_MAX_US, vcpu_migration_delay_us); + printk( + "WARNING: vcpu_migration_delay outside of valid range [0,%d]us.\n" + "Resetting to default: %u\n", + XEN_SYSCTL_CSCHED_MGR_DLY_MAX_US, vcpu_migration_delay_us); } return 0; } -static int -csched_init(struct scheduler *ops) +static int csched_init(struct scheduler *ops) { struct csched_private *prv; @@ -2195,8 +2139,7 @@ csched_init(struct scheduler *ops) return -ENOMEM; } - if ( !zalloc_cpumask_var(&prv->cpus) || - !zalloc_cpumask_var(&prv->idlers) ) + if ( !zalloc_cpumask_var(&prv->cpus) || !zalloc_cpumask_var(&prv->idlers) ) { free_cpumask_var(prv->cpus); xfree(prv->balance_bias); @@ -2221,8 +2164,7 @@ csched_init(struct scheduler *ops) return 0; } -static void -csched_deinit(struct scheduler *ops) +static void csched_deinit(struct scheduler *ops) { struct csched_private *prv; @@ -2256,48 +2198,48 @@ static void csched_tick_resume(const struct scheduler *ops, unsigned int cpu) prv = CSCHED_PRIV(ops); - set_timer(&spc->ticker, now + MICROSECS(prv->tick_period_us) - - now % MICROSECS(prv->tick_period_us) ); + set_timer(&spc->ticker, now + MICROSECS(prv->tick_period_us) - + now % MICROSECS(prv->tick_period_us)); } static const struct scheduler sched_credit_def = { - .name = "SMP Credit Scheduler", - .opt_name = "credit", - .sched_id = XEN_SCHEDULER_CREDIT, - .sched_data = NULL, + .name = "SMP Credit Scheduler", + .opt_name = "credit", + .sched_id = XEN_SCHEDULER_CREDIT, + .sched_data = NULL, - .global_init = csched_global_init, + .global_init = csched_global_init, - .insert_vcpu = csched_vcpu_insert, - .remove_vcpu = csched_vcpu_remove, + .insert_vcpu = csched_vcpu_insert, + .remove_vcpu = csched_vcpu_remove, - .sleep = csched_vcpu_sleep, - .wake = csched_vcpu_wake, - .yield = csched_vcpu_yield, + .sleep = csched_vcpu_sleep, + .wake = csched_vcpu_wake, + .yield = csched_vcpu_yield, - .adjust = csched_dom_cntl, - .adjust_affinity= csched_aff_cntl, - .adjust_global = csched_sys_cntl, + .adjust = csched_dom_cntl, + .adjust_affinity = csched_aff_cntl, + .adjust_global = csched_sys_cntl, - .pick_cpu = csched_cpu_pick, - .do_schedule = csched_schedule, + .pick_cpu = csched_cpu_pick, + .do_schedule = csched_schedule, .dump_cpu_state = csched_dump_pcpu, - .dump_settings = csched_dump, - .init = csched_init, - .deinit = csched_deinit, - .alloc_vdata = csched_alloc_vdata, - .free_vdata = csched_free_vdata, - .alloc_pdata = csched_alloc_pdata, - .init_pdata = csched_init_pdata, - .deinit_pdata = csched_deinit_pdata, - .free_pdata = csched_free_pdata, - .switch_sched = csched_switch_sched, - .alloc_domdata = csched_alloc_domdata, - .free_domdata = csched_free_domdata, - - .tick_suspend = csched_tick_suspend, - .tick_resume = csched_tick_resume, + .dump_settings = csched_dump, + .init = csched_init, + .deinit = csched_deinit, + .alloc_vdata = csched_alloc_vdata, + .free_vdata = csched_free_vdata, + .alloc_pdata = csched_alloc_pdata, + .init_pdata = csched_init_pdata, + .deinit_pdata = csched_deinit_pdata, + .free_pdata = csched_free_pdata, + .switch_sched = csched_switch_sched, + .alloc_domdata = csched_alloc_domdata, + .free_domdata = csched_free_domdata, + + .tick_suspend = csched_tick_suspend, + .tick_resume = csched_tick_resume, }; REGISTER_SCHEDULER(sched_credit_def); diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index 543dc3664d..ea6f9e18e5 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -30,44 +30,44 @@ /* #define d2printk printk */ #define d2printk(x...) - /* * Credit2 tracing events ("only" 512 available!). Check * include/public/trace.h for more details. */ -#define TRC_CSCHED2_TICK TRC_SCHED_CLASS_EVT(CSCHED2, 1) -#define TRC_CSCHED2_RUNQ_POS TRC_SCHED_CLASS_EVT(CSCHED2, 2) -#define TRC_CSCHED2_CREDIT_BURN TRC_SCHED_CLASS_EVT(CSCHED2, 3) -#define TRC_CSCHED2_CREDIT_ADD TRC_SCHED_CLASS_EVT(CSCHED2, 4) -#define TRC_CSCHED2_TICKLE_CHECK TRC_SCHED_CLASS_EVT(CSCHED2, 5) -#define TRC_CSCHED2_TICKLE TRC_SCHED_CLASS_EVT(CSCHED2, 6) -#define TRC_CSCHED2_CREDIT_RESET TRC_SCHED_CLASS_EVT(CSCHED2, 7) -#define TRC_CSCHED2_SCHED_TASKLET TRC_SCHED_CLASS_EVT(CSCHED2, 8) -#define TRC_CSCHED2_UPDATE_LOAD TRC_SCHED_CLASS_EVT(CSCHED2, 9) -#define TRC_CSCHED2_RUNQ_ASSIGN TRC_SCHED_CLASS_EVT(CSCHED2, 10) +#define TRC_CSCHED2_TICK TRC_SCHED_CLASS_EVT(CSCHED2, 1) +#define TRC_CSCHED2_RUNQ_POS TRC_SCHED_CLASS_EVT(CSCHED2, 2) +#define TRC_CSCHED2_CREDIT_BURN TRC_SCHED_CLASS_EVT(CSCHED2, 3) +#define TRC_CSCHED2_CREDIT_ADD TRC_SCHED_CLASS_EVT(CSCHED2, 4) +#define TRC_CSCHED2_TICKLE_CHECK TRC_SCHED_CLASS_EVT(CSCHED2, 5) +#define TRC_CSCHED2_TICKLE TRC_SCHED_CLASS_EVT(CSCHED2, 6) +#define TRC_CSCHED2_CREDIT_RESET TRC_SCHED_CLASS_EVT(CSCHED2, 7) +#define TRC_CSCHED2_SCHED_TASKLET TRC_SCHED_CLASS_EVT(CSCHED2, 8) +#define TRC_CSCHED2_UPDATE_LOAD TRC_SCHED_CLASS_EVT(CSCHED2, 9) +#define TRC_CSCHED2_RUNQ_ASSIGN TRC_SCHED_CLASS_EVT(CSCHED2, 10) #define TRC_CSCHED2_UPDATE_VCPU_LOAD TRC_SCHED_CLASS_EVT(CSCHED2, 11) #define TRC_CSCHED2_UPDATE_RUNQ_LOAD TRC_SCHED_CLASS_EVT(CSCHED2, 12) -#define TRC_CSCHED2_TICKLE_NEW TRC_SCHED_CLASS_EVT(CSCHED2, 13) -#define TRC_CSCHED2_RUNQ_MAX_WEIGHT TRC_SCHED_CLASS_EVT(CSCHED2, 14) -#define TRC_CSCHED2_MIGRATE TRC_SCHED_CLASS_EVT(CSCHED2, 15) -#define TRC_CSCHED2_LOAD_CHECK TRC_SCHED_CLASS_EVT(CSCHED2, 16) -#define TRC_CSCHED2_LOAD_BALANCE TRC_SCHED_CLASS_EVT(CSCHED2, 17) -#define TRC_CSCHED2_PICKED_CPU TRC_SCHED_CLASS_EVT(CSCHED2, 19) -#define TRC_CSCHED2_RUNQ_CANDIDATE TRC_SCHED_CLASS_EVT(CSCHED2, 20) -#define TRC_CSCHED2_SCHEDULE TRC_SCHED_CLASS_EVT(CSCHED2, 21) -#define TRC_CSCHED2_RATELIMIT TRC_SCHED_CLASS_EVT(CSCHED2, 22) -#define TRC_CSCHED2_RUNQ_CAND_CHECK TRC_SCHED_CLASS_EVT(CSCHED2, 23) +#define TRC_CSCHED2_TICKLE_NEW TRC_SCHED_CLASS_EVT(CSCHED2, 13) +#define TRC_CSCHED2_RUNQ_MAX_WEIGHT TRC_SCHED_CLASS_EVT(CSCHED2, 14) +#define TRC_CSCHED2_MIGRATE TRC_SCHED_CLASS_EVT(CSCHED2, 15) +#define TRC_CSCHED2_LOAD_CHECK TRC_SCHED_CLASS_EVT(CSCHED2, 16) +#define TRC_CSCHED2_LOAD_BALANCE TRC_SCHED_CLASS_EVT(CSCHED2, 17) +#define TRC_CSCHED2_PICKED_CPU TRC_SCHED_CLASS_EVT(CSCHED2, 19) +#define TRC_CSCHED2_RUNQ_CANDIDATE TRC_SCHED_CLASS_EVT(CSCHED2, 20) +#define TRC_CSCHED2_SCHEDULE TRC_SCHED_CLASS_EVT(CSCHED2, 21) +#define TRC_CSCHED2_RATELIMIT TRC_SCHED_CLASS_EVT(CSCHED2, 22) +#define TRC_CSCHED2_RUNQ_CAND_CHECK TRC_SCHED_CLASS_EVT(CSCHED2, 23) /* * TODO: * + Hyperthreading * - "Discount" time run on a thread with busy siblings * + Algorithm: - * - "Mixed work" problem: if a VM is playing audio (5%) but also burning cpu (e.g., - * a flash animation in the background) can we schedule it with low enough latency - * so that audio doesn't skip? + * - "Mixed work" problem: if a VM is playing audio (5%) but also burning cpu + * (e.g., a flash animation in the background) can we schedule it with low + * enough latency so that audio doesn't skip? * + Optimizing - * - Profiling, making new algorithms, making math more efficient (no long division) + * - Profiling, making new algorithms, making math more efficient (no long + * division) */ /* @@ -215,44 +215,44 @@ * Basic constants */ /* Default weight: How much a new domain starts with. */ -#define CSCHED2_DEFAULT_WEIGHT 256 +#define CSCHED2_DEFAULT_WEIGHT 256 /* * Min timer: Minimum length a timer will be set, to * achieve efficiency. */ -#define CSCHED2_MIN_TIMER MICROSECS(500) +#define CSCHED2_MIN_TIMER MICROSECS(500) /* * Amount of credit VMs begin with, and are reset to. * ATM, set so that highest-weight VMs can only run for 10ms * before a reset event. */ -#define CSCHED2_CREDIT_INIT MILLISECS(10) +#define CSCHED2_CREDIT_INIT MILLISECS(10) /* * Amount of credit the idle vcpus have. It never changes, as idle * vcpus does not consume credits, and it must be lower than whatever * amount of credit 'regular' vcpu would end up with. */ -#define CSCHED2_IDLE_CREDIT (-(1U<<30)) +#define CSCHED2_IDLE_CREDIT (-(1U << 30)) /* * Carryover: How much "extra" credit may be carried over after * a reset. */ -#define CSCHED2_CARRYOVER_MAX CSCHED2_MIN_TIMER +#define CSCHED2_CARRYOVER_MAX CSCHED2_MIN_TIMER /* * Stickiness: Cross-L2 migration resistance. Should be less than * MIN_TIMER. */ -#define CSCHED2_MIGRATE_RESIST ((opt_migrate_resist)*MICROSECS(1)) +#define CSCHED2_MIGRATE_RESIST ((opt_migrate_resist)*MICROSECS(1)) /* How much to "compensate" a vcpu for L2 migration. */ #define CSCHED2_MIGRATE_COMPENSATION MICROSECS(50) /* How tolerant we should be when peeking at runtime of vcpus on other cpus */ #define CSCHED2_RATELIMIT_TICKLE_TOLERANCE MICROSECS(50) /* Reset: Value below which credit will be reset. */ -#define CSCHED2_CREDIT_RESET 0 +#define CSCHED2_CREDIT_RESET 0 /* Max timer: Maximum time a guest can be run for. */ -#define CSCHED2_MAX_TIMER CSCHED2_CREDIT_INIT +#define CSCHED2_MAX_TIMER CSCHED2_CREDIT_INIT /* Period of the cap replenishment timer. */ -#define CSCHED2_BDGT_REPL_PERIOD ((opt_cap_period)*MILLISECS(1)) +#define CSCHED2_BDGT_REPL_PERIOD ((opt_cap_period)*MILLISECS(1)) /* * Flags @@ -268,37 +268,37 @@ * + Checked to be false in runq_insert. */ #define __CSFLAG_scheduled 1 -#define CSFLAG_scheduled (1U<<__CSFLAG_scheduled) +#define CSFLAG_scheduled (1U << __CSFLAG_scheduled) /* - * CSFLAG_delayed_runq_add: Do we need to add this to the runqueue once it'd done - * being context switched out? + * CSFLAG_delayed_runq_add: Do we need to add this to the runqueue once it'd + * done being context switched out? * + Set when scheduling out in csched2_schedule() if prev is runnable * + Set in csched2_vcpu_wake if it finds CSFLAG_scheduled set * + Read in csched2_context_saved(). If set, it adds prev to the runqueue and * clears the bit. */ #define __CSFLAG_delayed_runq_add 2 -#define CSFLAG_delayed_runq_add (1U<<__CSFLAG_delayed_runq_add) +#define CSFLAG_delayed_runq_add (1U << __CSFLAG_delayed_runq_add) /* * CSFLAG_runq_migrate_request: This vcpu is being migrated as a result of a * credit2-initiated runq migrate request; migrate it to the runqueue indicated - * in the svc struct. + * in the svc struct. */ #define __CSFLAG_runq_migrate_request 3 -#define CSFLAG_runq_migrate_request (1U<<__CSFLAG_runq_migrate_request) +#define CSFLAG_runq_migrate_request (1U << __CSFLAG_runq_migrate_request) /* * CSFLAG_vcpu_yield: this vcpu was running, and has called vcpu_yield(). The * scheduler is invoked to see if we can give the cpu to someone else, and * get back to the yielding vcpu in a while. */ #define __CSFLAG_vcpu_yield 4 -#define CSFLAG_vcpu_yield (1U<<__CSFLAG_vcpu_yield) +#define CSFLAG_vcpu_yield (1U << __CSFLAG_vcpu_yield) /* * CSFLAGS_pinned: this vcpu is currently 'pinned', i.e., has its hard * affinity set to one and only 1 cpu (and, hence, can only run there). */ #define __CSFLAG_pinned 5 -#define CSFLAG_pinned (1U<<__CSFLAG_pinned) +#define CSFLAG_pinned (1U << __CSFLAG_pinned) static unsigned int __read_mostly opt_migrate_resist = 500; integer_param("sched_credit2_migrate_resist", opt_migrate_resist); @@ -365,11 +365,11 @@ integer_param("sched_credit2_migrate_resist", opt_migrate_resist); */ /* If >0, decreases the granularity of time samples used for load tracking. */ -#define LOADAVG_GRANULARITY_SHIFT (10) +#define LOADAVG_GRANULARITY_SHIFT (10) /* Time window during which we still give value to previous load history. */ -#define LOADAVG_WINDOW_SHIFT (30) +#define LOADAVG_WINDOW_SHIFT (30) /* 18 bits by default (and not less than 4) for decimals. */ -#define LOADAVG_PRECISION_SHIFT (18) +#define LOADAVG_PRECISION_SHIFT (18) #define LOADAVG_PRECISION_SHIFT_MIN (4) /* @@ -381,7 +381,8 @@ integer_param("sched_credit2_migrate_resist", opt_migrate_resist); */ static unsigned int __read_mostly opt_load_window_shift = LOADAVG_WINDOW_SHIFT; integer_param("credit2_load_window_shift", opt_load_window_shift); -static unsigned int __read_mostly opt_load_precision_shift = LOADAVG_PRECISION_SHIFT; +static unsigned int __read_mostly opt_load_precision_shift = + LOADAVG_PRECISION_SHIFT; integer_param("credit2_load_precision_shift", opt_load_precision_shift); static int __read_mostly opt_underload_balance_tolerance = 0; @@ -394,7 +395,7 @@ integer_param("credit2_balance_over", opt_overload_balance_tolerance); * they receive depends on their cap. For instance, a domain with a 50% cap * will receive 50% of 10 ms, so 5 ms. */ -static unsigned int __read_mostly opt_cap_period = 10; /* ms */ +static unsigned int __read_mostly opt_cap_period = 10; /* ms */ integer_param("credit2_cap_period_ms", opt_cap_period); /* @@ -428,18 +429,16 @@ integer_param("credit2_cap_period_ms", opt_cap_period); * either the same physical core, the same physical socket, the same NUMA * node, or just all of them, will be put together to form runqueues. */ -#define OPT_RUNQUEUE_CPU 0 -#define OPT_RUNQUEUE_CORE 1 +#define OPT_RUNQUEUE_CPU 0 +#define OPT_RUNQUEUE_CORE 1 #define OPT_RUNQUEUE_SOCKET 2 -#define OPT_RUNQUEUE_NODE 3 -#define OPT_RUNQUEUE_ALL 4 -static const char *const opt_runqueue_str[] = { - [OPT_RUNQUEUE_CPU] = "cpu", - [OPT_RUNQUEUE_CORE] = "core", - [OPT_RUNQUEUE_SOCKET] = "socket", - [OPT_RUNQUEUE_NODE] = "node", - [OPT_RUNQUEUE_ALL] = "all" -}; +#define OPT_RUNQUEUE_NODE 3 +#define OPT_RUNQUEUE_ALL 4 +static const char *const opt_runqueue_str[] = {[OPT_RUNQUEUE_CPU] = "cpu", + [OPT_RUNQUEUE_CORE] = "core", + [OPT_RUNQUEUE_SOCKET] = "socket", + [OPT_RUNQUEUE_NODE] = "node", + [OPT_RUNQUEUE_ALL] = "all"}; static int __read_mostly opt_runqueue = OPT_RUNQUEUE_SOCKET; static int __init parse_credit2_runqueue(const char *s) @@ -462,32 +461,34 @@ custom_param("credit2_runqueue", parse_credit2_runqueue); /* * Per-runqueue data */ -struct csched2_runqueue_data { - spinlock_t lock; /* Lock for this runqueue */ +struct csched2_runqueue_data +{ + spinlock_t lock; /* Lock for this runqueue */ - struct list_head runq; /* Ordered list of runnable vms */ - int id; /* ID of this runqueue (-1 if invalid) */ + struct list_head runq; /* Ordered list of runnable vms */ + int id; /* ID of this runqueue (-1 if invalid) */ int load; /* Instantaneous load (num of non-idle vcpus) */ s_time_t load_last_update; /* Last time average was updated */ s_time_t avgload; /* Decaying queue load */ s_time_t b_avgload; /* Decaying queue load modified by balancing */ - cpumask_t active, /* CPUs enabled for this runqueue */ - smt_idle, /* Fully idle-and-untickled cores (see below) */ - tickled, /* Have been asked to go through schedule */ - idle; /* Currently idle pcpus */ + cpumask_t active, /* CPUs enabled for this runqueue */ + smt_idle, /* Fully idle-and-untickled cores (see below) */ + tickled, /* Have been asked to go through schedule */ + idle; /* Currently idle pcpus */ - struct list_head svc; /* List of all vcpus assigned to the runqueue */ - unsigned int max_weight; /* Max weight of the vcpus in this runqueue */ - unsigned int pick_bias; /* Last picked pcpu. Start from it next time */ + struct list_head svc; /* List of all vcpus assigned to the runqueue */ + unsigned int max_weight; /* Max weight of the vcpus in this runqueue */ + unsigned int pick_bias; /* Last picked pcpu. Start from it next time */ }; /* * System-wide private data */ -struct csched2_private { - rwlock_t lock; /* Private scheduler lock */ +struct csched2_private +{ + rwlock_t lock; /* Private scheduler lock */ unsigned int load_precision_shift; /* Precision of load calculations */ unsigned int load_window_shift; /* Lenght of load decaying window */ @@ -496,58 +497,61 @@ struct csched2_private { cpumask_t active_queues; /* Runqueues with (maybe) active cpus */ struct csched2_runqueue_data *rqd; /* Data of the various runqueues */ - cpumask_t initialized; /* CPUs part of this scheduler */ - struct list_head sdom; /* List of domains (for debug key) */ + cpumask_t initialized; /* CPUs part of this scheduler */ + struct list_head sdom; /* List of domains (for debug key) */ }; /* * Physical CPU */ -struct csched2_pcpu { +struct csched2_pcpu +{ int runq_id; }; /* * Virtual CPU */ -struct csched2_vcpu { +struct csched2_vcpu +{ struct csched2_dom *sdom; /* Up-pointer to domain */ struct vcpu *vcpu; /* Up-pointer, to vcpu */ struct csched2_runqueue_data *rqd; /* Up-pointer to the runqueue */ - int credit; /* Current amount of credit */ - unsigned int weight; /* Weight of this vcpu */ - unsigned int residual; /* Reminder of div(max_weight/weight) */ - unsigned flags; /* Status flags (16 bits would be ok, */ - s_time_t budget; /* Current budget (if domains has cap) */ - /* but clear_bit() does not like that) */ - s_time_t budget_quota; /* Budget to which vCPU is entitled */ + int credit; /* Current amount of credit */ + unsigned int weight; /* Weight of this vcpu */ + unsigned int residual; /* Reminder of div(max_weight/weight) */ + unsigned flags; /* Status flags (16 bits would be ok, */ + s_time_t budget; /* Current budget (if domains has cap) */ + /* but clear_bit() does not like that) */ + s_time_t budget_quota; /* Budget to which vCPU is entitled */ - s_time_t start_time; /* Time we were scheduled (for credit) */ + s_time_t start_time; /* Time we were scheduled (for credit) */ /* Individual contribution to load */ - s_time_t load_last_update; /* Last time average was updated */ - s_time_t avgload; /* Decaying queue load */ + s_time_t load_last_update; /* Last time average was updated */ + s_time_t avgload; /* Decaying queue load */ - struct list_head runq_elem; /* On the runqueue (rqd->runq) */ - struct list_head parked_elem; /* On the parked_vcpus list */ - struct list_head rqd_elem; /* On csched2_runqueue_data's svc list */ + struct list_head runq_elem; /* On the runqueue (rqd->runq) */ + struct list_head parked_elem; /* On the parked_vcpus list */ + struct list_head rqd_elem; /* On csched2_runqueue_data's svc list */ struct csched2_runqueue_data *migrate_rqd; /* Pre-determined migr. target */ - int tickled_cpu; /* Cpu that will pick us (-1 if none) */ + int tickled_cpu; /* Cpu that will pick us (-1 if none) */ }; /* * Domain */ -struct csched2_dom { - struct domain *dom; /* Up-pointer to domain */ +struct csched2_dom +{ + struct domain *dom; /* Up-pointer to domain */ - spinlock_t budget_lock; /* Serialized budget calculations */ - s_time_t tot_budget; /* Total amount of budget */ - s_time_t budget; /* Currently available budget */ + spinlock_t budget_lock; /* Serialized budget calculations */ + s_time_t tot_budget; /* Total amount of budget */ + s_time_t budget; /* Currently available budget */ - struct timer repl_timer; /* Timer for periodic replenishment of budget */ - s_time_t next_repl; /* Time at which next replenishment occurs */ + struct timer repl_timer; /* Timer for periodic replenishment of budget */ + s_time_t next_repl; /* Time at which next replenishment occurs */ struct list_head parked_vcpus; /* List of CPUs waiting for budget */ struct list_head sdom_elem; /* On csched2_runqueue_data's sdom list */ @@ -652,9 +656,8 @@ static inline bool has_cap(const struct csched2_vcpu *svc) * This means changing the mask when either rqd->idle or rqd->tickled * changes. */ -static inline -void smt_idle_mask_set(unsigned int cpu, const cpumask_t *idlers, - cpumask_t *mask) +static inline void smt_idle_mask_set(unsigned int cpu, const cpumask_t *idlers, + cpumask_t *mask) { const cpumask_t *cpu_siblings = per_cpu(cpu_sibling_mask, cpu); @@ -665,8 +668,7 @@ void smt_idle_mask_set(unsigned int cpu, const cpumask_t *idlers, /* * Clear the bits of all the siblings of cpu from mask (if necessary). */ -static inline -void smt_idle_mask_clear(unsigned int cpu, cpumask_t *mask) +static inline void smt_idle_mask_clear(unsigned int cpu, cpumask_t *mask) { const cpumask_t *cpu_siblings = per_cpu(cpu_sibling_mask, cpu); @@ -694,7 +696,7 @@ static int get_fallback_cpu(struct csched2_vcpu *svc) SCHED_STAT_CRANK(need_fallback_cpu); - for_each_affinity_balance_step( bs ) + for_each_affinity_balance_step (bs) { int cpu = v->processor; @@ -772,7 +774,7 @@ static int get_fallback_cpu(struct csched2_vcpu *svc) * FIXME: Do pre-calculated division? */ static void t2c_update(struct csched2_runqueue_data *rqd, s_time_t time, - struct csched2_vcpu *svc) + struct csched2_vcpu *svc) { uint64_t val = time * rqd->max_weight + svc->residual; @@ -780,7 +782,8 @@ static void t2c_update(struct csched2_runqueue_data *rqd, s_time_t time, svc->credit -= val; } -static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct csched2_vcpu *svc) +static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, + struct csched2_vcpu *svc) { return credit * svc->weight / rqd->max_weight; } @@ -794,7 +797,7 @@ static inline int vcpu_on_runq(struct csched2_vcpu *svc) return !list_empty(&svc->runq_elem); } -static inline struct csched2_vcpu * runq_elem(struct list_head *elem) +static inline struct csched2_vcpu *runq_elem(struct list_head *elem) { return list_entry(elem, struct csched2_vcpu, runq_elem); } @@ -841,12 +844,11 @@ static inline bool same_socket(unsigned int cpua, unsigned int cpub) static inline bool same_core(unsigned int cpua, unsigned int cpub) { - return same_socket(cpua, cpub) && - cpu_to_core(cpua) == cpu_to_core(cpub); + return same_socket(cpua, cpub) && cpu_to_core(cpua) == cpu_to_core(cpub); } -static unsigned int -cpu_to_runqueue(struct csched2_private *prv, unsigned int cpu) +static unsigned int cpu_to_runqueue(struct csched2_private *prv, + unsigned int cpu) { struct csched2_runqueue_data *rqd; unsigned int rqi; @@ -877,11 +879,12 @@ cpu_to_runqueue(struct csched2_private *prv, unsigned int cpu) BUG_ON(cpu_to_socket(cpu) == XEN_INVALID_SOCKET_ID || cpu_to_socket(peer_cpu) == XEN_INVALID_SOCKET_ID); - if (opt_runqueue == OPT_RUNQUEUE_CPU) + if ( opt_runqueue == OPT_RUNQUEUE_CPU ) continue; if ( opt_runqueue == OPT_RUNQUEUE_ALL || (opt_runqueue == OPT_RUNQUEUE_CORE && same_core(peer_cpu, cpu)) || - (opt_runqueue == OPT_RUNQUEUE_SOCKET && same_socket(peer_cpu, cpu)) || + (opt_runqueue == OPT_RUNQUEUE_SOCKET && + same_socket(peer_cpu, cpu)) || (opt_runqueue == OPT_RUNQUEUE_NODE && same_node(peer_cpu, cpu)) ) break; } @@ -912,9 +915,10 @@ static void update_max_weight(struct csched2_runqueue_data *rqd, int new_weight, struct list_head *iter; int max_weight = 1; - list_for_each( iter, &rqd->svc ) + list_for_each (iter, &rqd->svc) { - struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, rqd_elem); + struct csched2_vcpu *svc = + list_entry(iter, struct csched2_vcpu, rqd_elem); if ( svc->weight > max_weight ) max_weight = svc->weight; @@ -926,22 +930,21 @@ static void update_max_weight(struct csched2_runqueue_data *rqd, int new_weight, if ( unlikely(tb_init_done) ) { - struct { - unsigned rqi:16, max_weight:16; + struct + { + unsigned rqi : 16, max_weight : 16; } d; d.rqi = rqd->id; d.max_weight = rqd->max_weight; - __trace_var(TRC_CSCHED2_RUNQ_MAX_WEIGHT, 1, - sizeof(d), + __trace_var(TRC_CSCHED2_RUNQ_MAX_WEIGHT, 1, sizeof(d), (unsigned char *)&d); } } /* Add and remove from runqueue assignment (not active run queue) */ -static void -_runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd) +static void _runq_assign(struct csched2_vcpu *svc, + struct csched2_runqueue_data *rqd) { - svc->rqd = rqd; list_add_tail(&svc->rqd_elem, &svc->rqd->svc); @@ -952,22 +955,19 @@ _runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd) if ( unlikely(tb_init_done) ) { - struct { - unsigned vcpu:16, dom:16; - unsigned rqi:16; + struct + { + unsigned vcpu : 16, dom : 16; + unsigned rqi : 16; } d; d.dom = svc->vcpu->domain->domain_id; d.vcpu = svc->vcpu->vcpu_id; - d.rqi=rqd->id; - __trace_var(TRC_CSCHED2_RUNQ_ASSIGN, 1, - sizeof(d), - (unsigned char *)&d); + d.rqi = rqd->id; + __trace_var(TRC_CSCHED2_RUNQ_ASSIGN, 1, sizeof(d), (unsigned char *)&d); } - } -static void -runq_assign(const struct scheduler *ops, struct vcpu *vc) +static void runq_assign(const struct scheduler *ops, struct vcpu *vc) { struct csched2_vcpu *svc = vc->sched_priv; @@ -976,8 +976,7 @@ runq_assign(const struct scheduler *ops, struct vcpu *vc) _runq_assign(svc, c2rqd(ops, vc->processor)); } -static void -_runq_deassign(struct csched2_vcpu *svc) +static void _runq_deassign(struct csched2_vcpu *svc) { struct csched2_runqueue_data *rqd = svc->rqd; @@ -993,8 +992,7 @@ _runq_deassign(struct csched2_vcpu *svc) svc->rqd = NULL; } -static void -runq_deassign(const struct scheduler *ops, struct vcpu *vc) +static void runq_deassign(const struct scheduler *ops, struct vcpu *vc) { struct csched2_vcpu *svc = vc->sched_priv; @@ -1109,9 +1107,9 @@ runq_deassign(const struct scheduler *ops, struct vcpu *vc) * * Which, in both cases, is what we expect. */ -static void -update_runq_load(const struct scheduler *ops, - struct csched2_runqueue_data *rqd, int change, s_time_t now) +static void update_runq_load(const struct scheduler *ops, + struct csched2_runqueue_data *rqd, int change, + s_time_t now) { struct csched2_private *prv = csched2_priv(ops); s_time_t delta, load = rqd->load; @@ -1140,7 +1138,7 @@ update_runq_load(const struct scheduler *ops, * avgload_0' = P*load */ - if ( rqd->load_last_update + (1ULL << W) < now ) + if ( rqd->load_last_update + (1ULL << W) < now ) { rqd->avgload = load << P; rqd->b_avgload = load << P; @@ -1150,7 +1148,8 @@ update_runq_load(const struct scheduler *ops, delta = now - rqd->load_last_update; if ( unlikely(delta < 0) ) { - d2printk("WARNING: %s: Time went backwards? now %"PRI_stime" llu %"PRI_stime"\n", + d2printk("WARNING: %s: Time went backwards? now %" PRI_stime + " llu %" PRI_stime "\n", __func__, now, rqd->load_last_update); delta = 0; } @@ -1166,11 +1165,9 @@ update_runq_load(const struct scheduler *ops, * * (delta * load) >> (W - P) */ - rqd->avgload = rqd->avgload + - ((delta * (load << P)) >> W) - + rqd->avgload = rqd->avgload + ((delta * (load << P)) >> W) - ((delta * rqd->avgload) >> W); - rqd->b_avgload = rqd->b_avgload + - ((delta * (load << P)) >> W) - + rqd->b_avgload = rqd->b_avgload + ((delta * (load << P)) >> W) - ((delta * rqd->b_avgload) >> W); } rqd->load += change; @@ -1181,24 +1178,23 @@ update_runq_load(const struct scheduler *ops, if ( unlikely(tb_init_done) ) { - struct { + struct + { uint64_t rq_avgload, b_avgload; - unsigned rq_load:16, rq_id:8, shift:8; + unsigned rq_load : 16, rq_id : 8, shift : 8; } d; d.rq_id = rqd->id; d.rq_load = rqd->load; d.rq_avgload = rqd->avgload; d.b_avgload = rqd->b_avgload; d.shift = P; - __trace_var(TRC_CSCHED2_UPDATE_RUNQ_LOAD, 1, - sizeof(d), + __trace_var(TRC_CSCHED2_UPDATE_RUNQ_LOAD, 1, sizeof(d), (unsigned char *)&d); } } -static void -update_svc_load(const struct scheduler *ops, - struct csched2_vcpu *svc, int change, s_time_t now) +static void update_svc_load(const struct scheduler *ops, + struct csched2_vcpu *svc, int change, s_time_t now) { struct csched2_private *prv = csched2_priv(ops); s_time_t delta, vcpu_load; @@ -1224,13 +1220,13 @@ update_svc_load(const struct scheduler *ops, delta = now - svc->load_last_update; if ( unlikely(delta < 0) ) { - d2printk("WARNING: %s: Time went backwards? now %"PRI_stime" llu %"PRI_stime"\n", + d2printk("WARNING: %s: Time went backwards? now %" PRI_stime + " llu %" PRI_stime "\n", __func__, now, svc->load_last_update); delta = 0; } - svc->avgload = svc->avgload + - ((delta * (vcpu_load << P)) >> W) - + svc->avgload = svc->avgload + ((delta * (vcpu_load << P)) >> W) - ((delta * svc->avgload) >> W); } svc->load_last_update = now; @@ -1240,39 +1236,37 @@ update_svc_load(const struct scheduler *ops, if ( unlikely(tb_init_done) ) { - struct { + struct + { uint64_t v_avgload; - unsigned vcpu:16, dom:16; + unsigned vcpu : 16, dom : 16; unsigned shift; } d; d.dom = svc->vcpu->domain->domain_id; d.vcpu = svc->vcpu->vcpu_id; d.v_avgload = svc->avgload; d.shift = P; - __trace_var(TRC_CSCHED2_UPDATE_VCPU_LOAD, 1, - sizeof(d), + __trace_var(TRC_CSCHED2_UPDATE_VCPU_LOAD, 1, sizeof(d), (unsigned char *)&d); } } -static void -update_load(const struct scheduler *ops, - struct csched2_runqueue_data *rqd, - struct csched2_vcpu *svc, int change, s_time_t now) +static void update_load(const struct scheduler *ops, + struct csched2_runqueue_data *rqd, + struct csched2_vcpu *svc, int change, s_time_t now) { - trace_var(TRC_CSCHED2_UPDATE_LOAD, 1, 0, NULL); + trace_var(TRC_CSCHED2_UPDATE_LOAD, 1, 0, NULL); update_runq_load(ops, rqd, change, now); if ( svc ) update_svc_load(ops, svc, change, now); } -static void -runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc) +static void runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc) { struct list_head *iter; unsigned int cpu = svc->vcpu->processor; - struct list_head * runq = &c2rqd(ops, cpu)->runq; + struct list_head *runq = &c2rqd(ops, cpu)->runq; int pos = 0; ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock)); @@ -1285,9 +1279,9 @@ runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc) ASSERT(!svc->vcpu->is_running); ASSERT(!(svc->flags & CSFLAG_scheduled)); - list_for_each( iter, runq ) + list_for_each (iter, runq) { - struct csched2_vcpu * iter_svc = runq_elem(iter); + struct csched2_vcpu *iter_svc = runq_elem(iter); if ( svc->credit > iter_svc->credit ) break; @@ -1298,16 +1292,15 @@ runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc) if ( unlikely(tb_init_done) ) { - struct { - unsigned vcpu:16, dom:16; + struct + { + unsigned vcpu : 16, dom : 16; unsigned pos; } d; d.dom = svc->vcpu->domain->domain_id; d.vcpu = svc->vcpu->vcpu_id; d.pos = pos; - __trace_var(TRC_CSCHED2_RUNQ_POS, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_RUNQ_POS, 1, sizeof(d), (unsigned char *)&d); } } @@ -1317,10 +1310,11 @@ static inline void runq_remove(struct csched2_vcpu *svc) list_del_init(&svc->runq_elem); } -void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_vcpu *, s_time_t); +void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_vcpu *, + s_time_t); -static inline void -tickle_cpu(unsigned int cpu, struct csched2_runqueue_data *rqd) +static inline void tickle_cpu(unsigned int cpu, + struct csched2_runqueue_data *rqd) { __cpumask_set_cpu(cpu, &rqd->tickled); smt_idle_mask_clear(cpu, &rqd->smt_idle); @@ -1333,8 +1327,8 @@ tickle_cpu(unsigned int cpu, struct csched2_runqueue_data *rqd) * whether or not it already run for more than the ratelimit, to which we * apply some tolerance). */ -static inline bool is_preemptable(const struct csched2_vcpu *svc, - s_time_t now, s_time_t ratelimit) +static inline bool is_preemptable(const struct csched2_vcpu *svc, s_time_t now, + s_time_t ratelimit) { if ( ratelimit <= CSCHED2_RATELIMIT_TICKLE_TOLERANCE ) return true; @@ -1362,7 +1356,7 @@ static s_time_t tickle_score(const struct scheduler *ops, s_time_t now, struct csched2_vcpu *new, unsigned int cpu) { struct csched2_runqueue_data *rqd = c2rqd(ops, cpu); - struct csched2_vcpu * cur = csched2_vcpu(curr_on_cpu(cpu)); + struct csched2_vcpu *cur = csched2_vcpu(curr_on_cpu(cpu)); struct csched2_private *prv = csched2_priv(ops); s_time_t score; @@ -1372,7 +1366,7 @@ static s_time_t tickle_score(const struct scheduler *ops, s_time_t now, * if taking care of tasklets. In that case, we want to leave it alone. */ if ( unlikely(is_idle_vcpu(cur->vcpu) || - !is_preemptable(cur, now, MICROSECS(prv->ratelimit_us))) ) + !is_preemptable(cur, now, MICROSECS(prv->ratelimit_us))) ) return -1; burn_credits(rqd, cur, now); @@ -1398,16 +1392,16 @@ static s_time_t tickle_score(const struct scheduler *ops, s_time_t now, if ( unlikely(tb_init_done) ) { - struct { - unsigned vcpu:16, dom:16; + struct + { + unsigned vcpu : 16, dom : 16; int credit, score; } d; d.dom = cur->vcpu->domain->domain_id; d.vcpu = cur->vcpu->vcpu_id; d.credit = cur->credit; d.score = score; - __trace_var(TRC_CSCHED2_TICKLE_CHECK, 1, - sizeof(d), + __trace_var(TRC_CSCHED2_TICKLE_CHECK, 1, sizeof(d), (unsigned char *)&d); } @@ -1430,8 +1424,8 @@ static s_time_t tickle_score(const struct scheduler *ops, s_time_t now, * processor has been tickled, it will run csched2_schedule() shortly, and * pick up some work, so it would be wrong to consider it idle. */ -static void -runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now) +static void runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, + s_time_t now) { int i, ipid = -1; s_time_t max = 0; @@ -1444,8 +1438,9 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now) if ( unlikely(tb_init_done) ) { - struct { - unsigned vcpu:16, dom:16; + struct + { + unsigned vcpu : 16, dom : 16; unsigned processor; int credit; } d; @@ -1453,9 +1448,7 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now) d.vcpu = new->vcpu->vcpu_id; d.processor = new->vcpu->processor; d.credit = new->credit; - __trace_var(TRC_CSCHED2_TICKLE_NEW, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_TICKLE_NEW, 1, sizeof(d), (unsigned char *)&d); } /* @@ -1468,7 +1461,7 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now) * Try to identify the vast majority of these situations, and deal * with them quickly. */ - if ( unlikely((new->flags & CSFLAG_pinned) && + if ( unlikely((new->flags &CSFLAG_pinned) && cpumask_test_cpu(cpu, &rqd->idle) && !cpumask_test_cpu(cpu, &rqd->tickled)) ) { @@ -1478,7 +1471,7 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now) goto tickle; } - for_each_affinity_balance_step( bs ) + for_each_affinity_balance_step (bs) { /* Just skip first step, if we don't have a soft affinity */ if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(new->vcpu) ) @@ -1564,7 +1557,7 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now) } } - for_each_cpu(i, &mask) + for_each_cpu (i, &mask) { s_time_t score; @@ -1588,18 +1581,18 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now) ASSERT(!is_idle_vcpu(curr_on_cpu(ipid))); SCHED_STAT_CRANK(tickled_busy_cpu); - tickle: +tickle: BUG_ON(ipid == -1); if ( unlikely(tb_init_done) ) { - struct { - unsigned cpu:16, pad:16; + struct + { + unsigned cpu : 16, pad : 16; } d; - d.cpu = ipid; d.pad = 0; - __trace_var(TRC_CSCHED2_TICKLE, 1, - sizeof(d), - (unsigned char *)&d); + d.cpu = ipid; + d.pad = 0; + __trace_var(TRC_CSCHED2_TICKLE, 1, sizeof(d), (unsigned char *)&d); } tickle_cpu(ipid, rqd); @@ -1640,10 +1633,10 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now, if ( snext->credit < -CSCHED2_CREDIT_INIT ) m += (-snext->credit) / CSCHED2_CREDIT_INIT; - list_for_each( iter, &rqd->svc ) + list_for_each (iter, &rqd->svc) { unsigned int svc_cpu; - struct csched2_vcpu * svc; + struct csched2_vcpu *svc; int start_credit; svc = list_entry(iter, struct csched2_vcpu, rqd_elem); @@ -1673,7 +1666,7 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now, /* * Add INIT * m, avoiding integer multiplication in the common case. */ - if ( likely(m==1) ) + if ( likely(m == 1) ) svc->credit += CSCHED2_CREDIT_INIT; else svc->credit += m * CSCHED2_CREDIT_INIT; @@ -1686,8 +1679,9 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now, if ( unlikely(tb_init_done) ) { - struct { - unsigned vcpu:16, dom:16; + struct + { + unsigned vcpu : 16, dom : 16; int credit_start, credit_end; unsigned multiplier; } d; @@ -1696,8 +1690,7 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now, d.credit_start = start_credit; d.credit_end = svc->credit; d.multiplier = m; - __trace_var(TRC_CSCHED2_CREDIT_RESET, 1, - sizeof(d), + __trace_var(TRC_CSCHED2_CREDIT_RESET, 1, sizeof(d), (unsigned char *)&d); } } @@ -1707,8 +1700,8 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now, /* No need to resort runqueue, as everyone's order should be the same. */ } -void burn_credits(struct csched2_runqueue_data *rqd, - struct csched2_vcpu *svc, s_time_t now) +void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_vcpu *svc, + s_time_t now) { s_time_t delta; @@ -1725,9 +1718,9 @@ void burn_credits(struct csched2_runqueue_data *rqd, if ( unlikely(delta <= 0) ) { if ( unlikely(delta < 0) ) - d2printk("WARNING: %s: Time went backwards? now %"PRI_stime - " start_time %"PRI_stime"\n", __func__, now, - svc->start_time); + d2printk("WARNING: %s: Time went backwards? now %" PRI_stime + " start_time %" PRI_stime "\n", + __func__, now, svc->start_time); goto out; } @@ -1739,22 +1732,21 @@ void burn_credits(struct csched2_runqueue_data *rqd, svc->start_time = now; - out: +out: if ( unlikely(tb_init_done) ) { - struct { - unsigned vcpu:16, dom:16; + struct + { + unsigned vcpu : 16, dom : 16; int credit, budget; int delta; } d; d.dom = svc->vcpu->domain->domain_id; d.vcpu = svc->vcpu->vcpu_id; d.credit = svc->credit; - d.budget = has_cap(svc) ? svc->budget : INT_MIN; + d.budget = has_cap(svc) ? svc->budget : INT_MIN; d.delta = delta; - __trace_var(TRC_CSCHED2_CREDIT_BURN, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_CREDIT_BURN, 1, sizeof(d), (unsigned char *)&d); } } @@ -1837,8 +1829,8 @@ static bool vcpu_grab_budget(struct csched2_vcpu *svc) return svc->budget > 0; } -static void -vcpu_return_budget(struct csched2_vcpu *svc, struct list_head *parked) +static void vcpu_return_budget(struct csched2_vcpu *svc, + struct list_head *parked) { struct csched2_dom *sdom = svc->sdom; unsigned int cpu = svc->vcpu->processor; @@ -1878,8 +1870,8 @@ vcpu_return_budget(struct csched2_vcpu *svc, struct list_head *parked) spin_unlock(&sdom->budget_lock); } -static void -unpark_parked_vcpus(const struct scheduler *ops, struct list_head *vcpus) +static void unpark_parked_vcpus(const struct scheduler *ops, + struct list_head *vcpus) { struct csched2_vcpu *svc, *tmp; spinlock_t *lock; @@ -1903,8 +1895,8 @@ unpark_parked_vcpus(const struct scheduler *ops, struct list_head *vcpus) * runqueue, from where it will compete with the others * for the newly replenished budget. */ - ASSERT( svc->rqd != NULL ); - ASSERT( c2rqd(ops, svc->vcpu->processor) == svc->rqd ); + ASSERT(svc->rqd != NULL); + ASSERT(c2rqd(ops, svc->vcpu->processor) == svc->rqd); __set_bit(__CSFLAG_delayed_runq_add, &svc->flags); } else if ( vcpu_runnable(svc->vcpu) ) @@ -1932,7 +1924,7 @@ static inline void do_replenish(struct csched2_dom *sdom) sdom->budget += sdom->tot_budget; } -static void replenish_domain_budget(void* data) +static void replenish_domain_budget(void *data) { struct csched2_dom *sdom = data; unsigned long flags; @@ -1995,37 +1987,36 @@ static void replenish_domain_budget(void* data) unpark_parked_vcpus(sdom->dom->cpupool->sched, &parked); - out: +out: set_timer(&sdom->repl_timer, sdom->next_repl); } #ifndef NDEBUG -static inline void -csched2_vcpu_check(struct vcpu *vc) +static inline void csched2_vcpu_check(struct vcpu *vc) { - struct csched2_vcpu * const svc = csched2_vcpu(vc); - struct csched2_dom * const sdom = svc->sdom; + struct csched2_vcpu *const svc = csched2_vcpu(vc); + struct csched2_dom *const sdom = svc->sdom; - BUG_ON( svc->vcpu != vc ); - BUG_ON( sdom != csched2_dom(vc->domain) ); + BUG_ON(svc->vcpu != vc); + BUG_ON(sdom != csched2_dom(vc->domain)); if ( sdom ) { - BUG_ON( is_idle_vcpu(vc) ); - BUG_ON( sdom->dom != vc->domain ); + BUG_ON(is_idle_vcpu(vc)); + BUG_ON(sdom->dom != vc->domain); } else { - BUG_ON( !is_idle_vcpu(vc) ); + BUG_ON(!is_idle_vcpu(vc)); } SCHED_STAT_CRANK(vcpu_check); } -#define CSCHED2_VCPU_CHECK(_vc) (csched2_vcpu_check(_vc)) +#define CSCHED2_VCPU_CHECK(_vc) (csched2_vcpu_check(_vc)) #else #define CSCHED2_VCPU_CHECK(_vc) #endif -static void * -csched2_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd) +static void *csched2_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, + void *dd) { struct csched2_vcpu *svc; @@ -2041,7 +2032,7 @@ csched2_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd) svc->vcpu = vc; svc->flags = 0U; - if ( ! is_idle_vcpu(vc) ) + if ( !is_idle_vcpu(vc) ) { ASSERT(svc->sdom != NULL); svc->credit = CSCHED2_CREDIT_INIT; @@ -2067,10 +2058,9 @@ csched2_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd) return svc; } -static void -csched2_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) +static void csched2_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) { - struct csched2_vcpu * const svc = csched2_vcpu(vc); + struct csched2_vcpu *const svc = csched2_vcpu(vc); ASSERT(!is_idle_vcpu(vc)); SCHED_STAT_CRANK(vcpu_sleep); @@ -2089,10 +2079,9 @@ csched2_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) __clear_bit(__CSFLAG_delayed_runq_add, &svc->flags); } -static void -csched2_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) +static void csched2_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) { - struct csched2_vcpu * const svc = csched2_vcpu(vc); + struct csched2_vcpu *const svc = csched2_vcpu(vc); unsigned int cpu = vc->processor; s_time_t now; @@ -2118,8 +2107,8 @@ csched2_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) SCHED_STAT_CRANK(vcpu_wake_not_runnable); /* If the context hasn't been saved for this vcpu yet, we can't put it on - * another runqueue. Instead, we set a flag so that it will be put on the runqueue - * after the context has been saved. */ + * another runqueue. Instead, we set a flag so that it will be put on the + * runqueue after the context has been saved. */ if ( unlikely(svc->flags & CSFLAG_scheduled) ) { __set_bit(__CSFLAG_delayed_runq_add, &svc->flags); @@ -2130,12 +2119,12 @@ csched2_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) if ( svc->rqd == NULL ) runq_assign(ops, vc); else - ASSERT(c2rqd(ops, vc->processor) == svc->rqd ); + ASSERT(c2rqd(ops, vc->processor) == svc->rqd); now = NOW(); update_load(ops, svc->rqd, svc, 1, now); - + /* Put the VCPU on the runq */ runq_insert(ops, svc); runq_tickle(ops, svc, now); @@ -2144,23 +2133,21 @@ out: return; } -static void -csched2_vcpu_yield(const struct scheduler *ops, struct vcpu *v) +static void csched2_vcpu_yield(const struct scheduler *ops, struct vcpu *v) { - struct csched2_vcpu * const svc = csched2_vcpu(v); + struct csched2_vcpu *const svc = csched2_vcpu(v); __set_bit(__CSFLAG_vcpu_yield, &svc->flags); } -static void -csched2_context_saved(const struct scheduler *ops, struct vcpu *vc) +static void csched2_context_saved(const struct scheduler *ops, struct vcpu *vc) { - struct csched2_vcpu * const svc = csched2_vcpu(vc); + struct csched2_vcpu *const svc = csched2_vcpu(vc); spinlock_t *lock = vcpu_schedule_lock_irq(vc); s_time_t now = NOW(); LIST_HEAD(were_parked); - BUG_ON( !is_idle_vcpu(vc) && svc->rqd != c2rqd(ops, vc->processor)); + BUG_ON(!is_idle_vcpu(vc) && svc->rqd != c2rqd(ops, vc->processor)); ASSERT(is_idle_vcpu(vc) || svc->rqd == c2rqd(ops, vc->processor)); /* This vcpu is now eligible to be put on the runqueue again */ @@ -2177,8 +2164,8 @@ csched2_context_saved(const struct scheduler *ops, struct vcpu *vc) * it seems a bit pointless; especially as we have plenty of * bits free. */ - if ( __test_and_clear_bit(__CSFLAG_delayed_runq_add, &svc->flags) - && likely(vcpu_runnable(vc)) ) + if ( __test_and_clear_bit(__CSFLAG_delayed_runq_add, &svc->flags) && + likely(vcpu_runnable(vc)) ) { ASSERT(!vcpu_on_runq(svc)); @@ -2194,8 +2181,7 @@ csched2_context_saved(const struct scheduler *ops, struct vcpu *vc) } #define MAX_LOAD (STIME_MAX) -static int -csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc) +static int csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc) { struct csched2_private *prv = csched2_priv(ops); int i, min_rqi = -1, min_s_rqi = -1; @@ -2282,7 +2268,7 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc) * Find both runqueues in one pass. */ has_soft = has_soft_affinity(vc); - for_each_cpu(i, &prv->active_queues) + for_each_cpu (i, &prv->active_queues) { struct csched2_runqueue_data *rqd; s_time_t rqd_avgload = MAX_LOAD; @@ -2322,8 +2308,7 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc) * if svc has a soft-affinity, and some cpus of rqd are part of it, * see if we need to update the "soft-affinity minimum". */ - if ( has_soft && - rqd_avgload < min_s_avgload ) + if ( has_soft && rqd_avgload < min_s_avgload ) { cpumask_t mask; @@ -2390,46 +2375,45 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc) goto out_up; } - new_cpu = cpumask_cycle(prv->rqd[min_rqi].pick_bias, - cpumask_scratch_cpu(cpu)); + new_cpu = + cpumask_cycle(prv->rqd[min_rqi].pick_bias, cpumask_scratch_cpu(cpu)); prv->rqd[min_rqi].pick_bias = new_cpu; BUG_ON(new_cpu >= nr_cpu_ids); - out_up: +out_up: read_unlock(&prv->lock); - out: +out: if ( unlikely(tb_init_done) ) { - struct { + struct + { uint64_t b_avgload; - unsigned vcpu:16, dom:16; - unsigned rq_id:16, new_cpu:16; + unsigned vcpu : 16, dom : 16; + unsigned rq_id : 16, new_cpu : 16; } d; d.dom = vc->domain->domain_id; d.vcpu = vc->vcpu_id; d.rq_id = min_rqi; d.b_avgload = min_avgload; d.new_cpu = new_cpu; - __trace_var(TRC_CSCHED2_PICKED_CPU, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_PICKED_CPU, 1, sizeof(d), (unsigned char *)&d); } return new_cpu; } /* Working state of the load-balancing algorithm */ -typedef struct { +typedef struct +{ /* NB: Modified by consider() */ s_time_t load_delta; - struct csched2_vcpu * best_push_svc, *best_pull_svc; + struct csched2_vcpu *best_push_svc, *best_pull_svc; /* NB: Read by consider() */ struct csched2_runqueue_data *lrqd; - struct csched2_runqueue_data *orqd; + struct csched2_runqueue_data *orqd; } balance_state_t; -static void consider(balance_state_t *st, - struct csched2_vcpu *push_svc, +static void consider(balance_state_t *st, struct csched2_vcpu *push_svc, struct csched2_vcpu *pull_svc) { s_time_t l_load, o_load, delta; @@ -2456,32 +2440,28 @@ static void consider(balance_state_t *st, if ( delta < st->load_delta ) { st->load_delta = delta; - st->best_push_svc=push_svc; - st->best_pull_svc=pull_svc; + st->best_push_svc = push_svc; + st->best_pull_svc = pull_svc; } } - -static void migrate(const struct scheduler *ops, - struct csched2_vcpu *svc, - struct csched2_runqueue_data *trqd, - s_time_t now) +static void migrate(const struct scheduler *ops, struct csched2_vcpu *svc, + struct csched2_runqueue_data *trqd, s_time_t now) { int cpu = svc->vcpu->processor; if ( unlikely(tb_init_done) ) { - struct { - unsigned vcpu:16, dom:16; - unsigned rqi:16, trqi:16; + struct + { + unsigned vcpu : 16, dom : 16; + unsigned rqi : 16, trqi : 16; } d; d.dom = svc->vcpu->domain->domain_id; d.vcpu = svc->vcpu->vcpu_id; d.rqi = svc->rqd->id; d.trqi = trqd->id; - __trace_var(TRC_CSCHED2_MIGRATE, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_MIGRATE, 1, sizeof(d), (unsigned char *)&d); } if ( svc->flags & CSFLAG_scheduled ) @@ -2509,8 +2489,8 @@ static void migrate(const struct scheduler *ops, cpupool_domain_cpumask(svc->vcpu->domain)); cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu), &trqd->active); - svc->vcpu->processor = cpumask_cycle(trqd->pick_bias, - cpumask_scratch_cpu(cpu)); + svc->vcpu->processor = + cpumask_cycle(trqd->pick_bias, cpumask_scratch_cpu(cpu)); trqd->pick_bias = svc->vcpu->processor; ASSERT(svc->vcpu->processor < nr_cpu_ids); @@ -2533,7 +2513,7 @@ static void migrate(const struct scheduler *ops, * - if svc is allowed to run on at least one of the pcpus of rqd. */ static bool vcpu_is_migrateable(struct csched2_vcpu *svc, - struct csched2_runqueue_data *rqd) + struct csched2_runqueue_data *rqd) { struct vcpu *v = svc->vcpu; int cpu = svc->vcpu->processor; @@ -2552,7 +2532,7 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now) struct list_head *push_iter, *pull_iter; bool inner_load_updated = 0; - balance_state_t st = { .best_push_svc = NULL, .best_pull_svc = NULL }; + balance_state_t st = {.best_push_svc = NULL, .best_pull_svc = NULL}; /* * Basic algorithm: Push, pull, or swap. @@ -2573,18 +2553,17 @@ retry: st.load_delta = 0; - for_each_cpu(i, &prv->active_queues) + for_each_cpu (i, &prv->active_queues) { s_time_t delta; - + st.orqd = prv->rqd + i; - if ( st.orqd == st.lrqd - || !spin_trylock(&st.orqd->lock) ) + if ( st.orqd == st.lrqd || !spin_trylock(&st.orqd->lock) ) continue; update_runq_load(ops, st.orqd, 0, now); - + delta = st.lrqd->b_avgload - st.orqd->b_avgload; if ( delta < 0 ) delta = -delta; @@ -2607,7 +2586,6 @@ retry: s_time_t load_max; int cpus_max; - load_max = st.lrqd->b_avgload; if ( st.orqd->b_avgload > load_max ) load_max = st.orqd->b_avgload; @@ -2619,15 +2597,15 @@ retry: if ( unlikely(tb_init_done) ) { - struct { - unsigned lrq_id:16, orq_id:16; + struct + { + unsigned lrq_id : 16, orq_id : 16; unsigned load_delta; } d; d.lrq_id = st.lrqd->id; d.orq_id = st.orqd->id; d.load_delta = st.load_delta; - __trace_var(TRC_CSCHED2_LOAD_CHECK, 1, - sizeof(d), + __trace_var(TRC_CSCHED2_LOAD_CHECK, 1, sizeof(d), (unsigned char *)&d); } @@ -2639,14 +2617,13 @@ retry: { if ( st.load_delta < (1ULL << (prv->load_precision_shift + opt_underload_balance_tolerance)) ) - goto out; - } - else - if ( st.load_delta < (1ULL << (prv->load_precision_shift + - opt_overload_balance_tolerance)) ) goto out; + } + else if ( st.load_delta < (1ULL << (prv->load_precision_shift + + opt_overload_balance_tolerance)) ) + goto out; } - + /* Try to grab the other runqueue lock; if it's been taken in the * meantime, try the process over again. This can't deadlock * because if it doesn't get any other rqd locks, it will simply @@ -2655,22 +2632,23 @@ retry: if ( !spin_trylock(&st.orqd->lock) ) goto retry; - /* Make sure the runqueue hasn't been deactivated since we released prv->lock */ + /* Make sure the runqueue hasn't been deactivated since we released + * prv->lock */ if ( unlikely(st.orqd->id < 0) ) goto out_up; if ( unlikely(tb_init_done) ) { - struct { + struct + { uint64_t lb_avgload, ob_avgload; - unsigned lrq_id:16, orq_id:16; + unsigned lrq_id : 16, orq_id : 16; } d; d.lrq_id = st.lrqd->id; d.lb_avgload = st.lrqd->b_avgload; d.orq_id = st.orqd->id; d.ob_avgload = st.orqd->b_avgload; - __trace_var(TRC_CSCHED2_LOAD_BALANCE, 1, - sizeof(d), + __trace_var(TRC_CSCHED2_LOAD_BALANCE, 1, sizeof(d), (unsigned char *)&d); } @@ -2680,22 +2658,24 @@ retry: * FIXME: O(n^2)! */ /* Reuse load delta (as we're trying to minimize it) */ - list_for_each( push_iter, &st.lrqd->svc ) + list_for_each (push_iter, &st.lrqd->svc) { - struct csched2_vcpu * push_svc = list_entry(push_iter, struct csched2_vcpu, rqd_elem); + struct csched2_vcpu *push_svc = + list_entry(push_iter, struct csched2_vcpu, rqd_elem); update_svc_load(ops, push_svc, 0, now); if ( !vcpu_is_migrateable(push_svc, st.orqd) ) continue; - list_for_each( pull_iter, &st.orqd->svc ) + list_for_each (pull_iter, &st.orqd->svc) { - struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct csched2_vcpu, rqd_elem); - + struct csched2_vcpu *pull_svc = + list_entry(pull_iter, struct csched2_vcpu, rqd_elem); + if ( !inner_load_updated ) update_svc_load(ops, pull_svc, 0, now); - + if ( !vcpu_is_migrateable(pull_svc, st.lrqd) ) continue; @@ -2708,10 +2688,11 @@ retry: consider(&st, push_svc, NULL); } - list_for_each( pull_iter, &st.orqd->svc ) + list_for_each (pull_iter, &st.orqd->svc) { - struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct csched2_vcpu, rqd_elem); - + struct csched2_vcpu *pull_svc = + list_entry(pull_iter, struct csched2_vcpu, rqd_elem); + if ( !vcpu_is_migrateable(pull_svc, st.lrqd) ) continue; @@ -2725,18 +2706,17 @@ retry: if ( st.best_pull_svc ) migrate(ops, st.best_pull_svc, st.lrqd, now); - out_up: +out_up: spin_unlock(&st.orqd->lock); - out: +out: return; } -static void -csched2_vcpu_migrate( - const struct scheduler *ops, struct vcpu *vc, unsigned int new_cpu) +static void csched2_vcpu_migrate(const struct scheduler *ops, struct vcpu *vc, + unsigned int new_cpu) { struct domain *d = vc->domain; - struct csched2_vcpu * const svc = csched2_vcpu(vc); + struct csched2_vcpu *const svc = csched2_vcpu(vc); struct csched2_runqueue_data *trqd; s_time_t now = NOW(); @@ -2788,13 +2768,10 @@ csched2_vcpu_migrate( vc->processor = new_cpu; } -static int -csched2_dom_cntl( - const struct scheduler *ops, - struct domain *d, - struct xen_domctl_scheduler_op *op) +static int csched2_dom_cntl(const struct scheduler *ops, struct domain *d, + struct xen_domctl_scheduler_op *op) { - struct csched2_dom * const sdom = csched2_dom(d); + struct csched2_dom *const sdom = csched2_dom(d); struct csched2_private *prv = csched2_priv(ops); unsigned long flags; struct vcpu *v; @@ -2811,7 +2788,7 @@ csched2_dom_cntl( * for adjusting the parameters and rescheduling any vCPU that is * running at the time of the change). */ - switch ( op->cmd ) + switch (op->cmd) { case XEN_DOMCTL_SCHEDOP_getinfo: read_lock_irqsave(&prv->lock, flags); @@ -2830,8 +2807,9 @@ csched2_dom_cntl( sdom->weight = op->u.credit2.weight; - /* Update weights for vcpus, and max_weight for runqueues on which they reside */ - for_each_vcpu ( d, v ) + /* Update weights for vcpus, and max_weight for runqueues on which + * they reside */ + for_each_vcpu (d, v) { struct csched2_vcpu *svc = csched2_vcpu(v); spinlock_t *lock = vcpu_schedule_lock(svc->vcpu); @@ -2869,7 +2847,7 @@ csched2_dom_cntl( * the total budget. Roughly speaking, this means each vCPU will * have at least one chance to run during every period. */ - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { svc = csched2_vcpu(v); lock = vcpu_schedule_lock(svc->vcpu); @@ -2878,8 +2856,8 @@ csched2_dom_cntl( * which then won't happen because, in csched2_runtime(), * CSCHED2_MIN_TIMER is what would be used anyway. */ - svc->budget_quota = max(sdom->tot_budget / sdom->nr_vcpus, - CSCHED2_MIN_TIMER); + svc->budget_quota = + max(sdom->tot_budget / sdom->nr_vcpus, CSCHED2_MIN_TIMER); vcpu_schedule_unlock(lock, svc->vcpu); } @@ -2912,7 +2890,7 @@ csched2_dom_cntl( * no budget, and the vCPU will try to get some (and be parked, * if there's none, and we'll switch to someone else). */ - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { svc = csched2_vcpu(v); lock = vcpu_schedule_lock(svc->vcpu); @@ -2959,7 +2937,7 @@ csched2_dom_cntl( stop_timer(&sdom->repl_timer); /* Disable budget accounting for all the vCPUs. */ - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { struct csched2_vcpu *svc = csched2_vcpu(v); spinlock_t *lock = vcpu_schedule_lock(svc->vcpu); @@ -2990,13 +2968,11 @@ csched2_dom_cntl( break; } - return rc; } -static void -csched2_aff_cntl(const struct scheduler *ops, struct vcpu *v, - const cpumask_t *hard, const cpumask_t *soft) +static void csched2_aff_cntl(const struct scheduler *ops, struct vcpu *v, + const cpumask_t *hard, const cpumask_t *soft) { struct csched2_vcpu *svc = csched2_vcpu(v); @@ -3017,12 +2993,12 @@ static int csched2_sys_cntl(const struct scheduler *ops, struct csched2_private *prv = csched2_priv(ops); unsigned long flags; - switch (sc->cmd ) + switch (sc->cmd) { case XEN_SYSCTL_SCHEDOP_putinfo: if ( params->ratelimit_us && (params->ratelimit_us > XEN_SYSCTL_SCHED_RATELIMIT_MAX || - params->ratelimit_us < XEN_SYSCTL_SCHED_RATELIMIT_MIN )) + params->ratelimit_us < XEN_SYSCTL_SCHED_RATELIMIT_MIN) ) return -EINVAL; write_lock_irqsave(&prv->lock, flags); @@ -3042,8 +3018,8 @@ static int csched2_sys_cntl(const struct scheduler *ops, return 0; } -static void * -csched2_alloc_domdata(const struct scheduler *ops, struct domain *dom) +static void *csched2_alloc_domdata(const struct scheduler *ops, + struct domain *dom) { struct csched2_private *prv = csched2_priv(ops); struct csched2_dom *sdom; @@ -3074,8 +3050,7 @@ csched2_alloc_domdata(const struct scheduler *ops, struct domain *dom) return sdom; } -static void -csched2_free_domdata(const struct scheduler *ops, void *data) +static void csched2_free_domdata(const struct scheduler *ops, void *data) { struct csched2_dom *sdom = data; struct csched2_private *prv = csched2_priv(ops); @@ -3094,11 +3069,10 @@ csched2_free_domdata(const struct scheduler *ops, void *data) } } -static void -csched2_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) +static void csched2_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) { struct csched2_vcpu *svc = vc->sched_priv; - struct csched2_dom * const sdom = svc->sdom; + struct csched2_dom *const sdom = svc->sdom; spinlock_t *lock; ASSERT(!is_idle_vcpu(vc)); @@ -3125,18 +3099,16 @@ csched2_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) CSCHED2_VCPU_CHECK(vc); } -static void -csched2_free_vdata(const struct scheduler *ops, void *priv) +static void csched2_free_vdata(const struct scheduler *ops, void *priv) { struct csched2_vcpu *svc = priv; xfree(svc); } -static void -csched2_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) +static void csched2_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) { - struct csched2_vcpu * const svc = csched2_vcpu(vc); + struct csched2_vcpu *const svc = csched2_vcpu(vc); spinlock_t *lock; ASSERT(!is_idle_vcpu(vc)); @@ -3155,9 +3127,8 @@ csched2_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) } /* How long should we let this vcpu run for? */ -static s_time_t -csched2_runtime(const struct scheduler *ops, int cpu, - struct csched2_vcpu *snext, s_time_t now) +static s_time_t csched2_runtime(const struct scheduler *ops, int cpu, + struct csched2_vcpu *snext, s_time_t now) { s_time_t time, min_time; int rt_credit; /* Proposed runtime measured in credits */ @@ -3200,12 +3171,11 @@ csched2_runtime(const struct scheduler *ops, int cpu, * 2) If there's someone waiting whose credit is positive, * run until your credit ~= his. */ - if ( ! list_empty(runq) ) + if ( !list_empty(runq) ) { struct csched2_vcpu *swait = runq_elem(runq->next); - if ( ! is_idle_vcpu(swait->vcpu) - && swait->credit > 0 ) + if ( !is_idle_vcpu(swait->vcpu) && swait->credit > 0 ) { rt_credit = snext->credit - swait->credit; } @@ -3241,7 +3211,7 @@ csched2_runtime(const struct scheduler *ops, int cpu, time = min_time; SCHED_STAT_CRANK(runtime_min_timer); } - else if (time > CSCHED2_MAX_TIMER) + else if ( time > CSCHED2_MAX_TIMER ) { time = CSCHED2_MAX_TIMER; SCHED_STAT_CRANK(runtime_max_timer); @@ -3253,11 +3223,9 @@ csched2_runtime(const struct scheduler *ops, int cpu, /* * Find a candidate. */ -static struct csched2_vcpu * -runq_candidate(struct csched2_runqueue_data *rqd, - struct csched2_vcpu *scurr, - int cpu, s_time_t now, - unsigned int *skipped) +static struct csched2_vcpu *runq_candidate(struct csched2_runqueue_data *rqd, + struct csched2_vcpu *scurr, int cpu, + s_time_t now, unsigned int *skipped) { struct list_head *iter, *temp; struct csched2_vcpu *snext = NULL; @@ -3285,19 +3253,19 @@ runq_candidate(struct csched2_runqueue_data *rqd, */ if ( !yield && prv->ratelimit_us && vcpu_runnable(scurr->vcpu) && (now - scurr->vcpu->runstate.state_entry_time) < - MICROSECS(prv->ratelimit_us) ) + MICROSECS(prv->ratelimit_us) ) { if ( unlikely(tb_init_done) ) { - struct { - unsigned vcpu:16, dom:16; + struct + { + unsigned vcpu : 16, dom : 16; unsigned runtime; } d; d.dom = scurr->vcpu->domain->domain_id; d.vcpu = scurr->vcpu->vcpu_id; d.runtime = now - scurr->vcpu->runstate.state_entry_time; - __trace_var(TRC_CSCHED2_RATELIMIT, 1, - sizeof(d), + __trace_var(TRC_CSCHED2_RATELIMIT, 1, sizeof(d), (unsigned char *)&d); } return scurr; @@ -3343,20 +3311,21 @@ runq_candidate(struct csched2_runqueue_data *rqd, else snext = csched2_vcpu(idle_vcpu[cpu]); - check_runq: - list_for_each_safe( iter, temp, &rqd->runq ) +check_runq: + list_for_each_safe (iter, temp, &rqd->runq) { - struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, runq_elem); + struct csched2_vcpu *svc = + list_entry(iter, struct csched2_vcpu, runq_elem); if ( unlikely(tb_init_done) ) { - struct { - unsigned vcpu:16, dom:16; + struct + { + unsigned vcpu : 16, dom : 16; } d; d.dom = svc->vcpu->domain->domain_id; d.vcpu = svc->vcpu->vcpu_id; - __trace_var(TRC_CSCHED2_RUNQ_CAND_CHECK, 1, - sizeof(d), + __trace_var(TRC_CSCHED2_RUNQ_CAND_CHECK, 1, sizeof(d), (unsigned char *)&d); } @@ -3383,8 +3352,8 @@ runq_candidate(struct csched2_runqueue_data *rqd, * If this is on a different processor, don't pull it unless * its credit is at least CSCHED2_MIGRATE_RESIST higher. */ - if ( svc->vcpu->processor != cpu - && snext->credit + CSCHED2_MIGRATE_RESIST > svc->credit ) + if ( svc->vcpu->processor != cpu && + snext->credit + CSCHED2_MIGRATE_RESIST > svc->credit ) { (*skipped)++; SCHED_STAT_CRANK(migrate_resisted); @@ -3407,8 +3376,9 @@ runq_candidate(struct csched2_runqueue_data *rqd, if ( unlikely(tb_init_done) ) { - struct { - unsigned vcpu:16, dom:16; + struct + { + unsigned vcpu : 16, dom : 16; unsigned tickled_cpu, skipped; int credit; } d; @@ -3417,8 +3387,7 @@ runq_candidate(struct csched2_runqueue_data *rqd, d.credit = snext->credit; d.tickled_cpu = snext->tickled_cpu; d.skipped = *skipped; - __trace_var(TRC_CSCHED2_RUNQ_CANDIDATE, 1, - sizeof(d), + __trace_var(TRC_CSCHED2_RUNQ_CANDIDATE, 1, sizeof(d), (unsigned char *)&d); } @@ -3439,13 +3408,13 @@ runq_candidate(struct csched2_runqueue_data *rqd, * This function is in the critical path. It is designed to be simple and * fast for the common case. */ -static struct task_slice -csched2_schedule( - const struct scheduler *ops, s_time_t now, bool tasklet_work_scheduled) +static struct task_slice csched2_schedule(const struct scheduler *ops, + s_time_t now, + bool tasklet_work_scheduled) { const int cpu = smp_processor_id(); struct csched2_runqueue_data *rqd; - struct csched2_vcpu * const scurr = csched2_vcpu(current); + struct csched2_vcpu *const scurr = csched2_vcpu(current); struct csched2_vcpu *snext = NULL; unsigned int skipped_vcpus = 0; struct task_slice ret; @@ -3474,9 +3443,10 @@ csched2_schedule( if ( unlikely(tb_init_done) ) { - struct { - unsigned cpu:16, rq_id:16; - unsigned tasklet:8, idle:8, smt_idle:8, tickled:8; + struct + { + unsigned cpu : 16, rq_id : 16; + unsigned tasklet : 8, idle : 8, smt_idle : 8, tickled : 8; } d; d.cpu = cpu; d.rq_id = c2r(cpu); @@ -3484,9 +3454,7 @@ csched2_schedule( d.idle = is_idle_vcpu(current); d.smt_idle = cpumask_test_cpu(cpu, &rqd->smt_idle); d.tickled = tickled; - __trace_var(TRC_CSCHED2_SCHEDULE, 1, - sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_CSCHED2_SCHEDULE, 1, sizeof(d), (unsigned char *)&d); } /* Update credits (and budget, if necessary). */ @@ -3529,9 +3497,8 @@ csched2_schedule( /* If switching from a non-idle runnable vcpu, put it * back on the runqueue. */ - if ( snext != scurr - && !is_idle_vcpu(scurr->vcpu) - && vcpu_runnable(current) ) + if ( snext != scurr && !is_idle_vcpu(scurr->vcpu) && + vcpu_runnable(current) ) __set_bit(__CSFLAG_delayed_runq_add, &scurr->flags); ret.migrated = 0; @@ -3623,37 +3590,32 @@ csched2_schedule( return ret; } -static void -csched2_dump_vcpu(struct csched2_private *prv, struct csched2_vcpu *svc) +static void csched2_dump_vcpu(struct csched2_private *prv, + struct csched2_vcpu *svc) { - printk("[%i.%i] flags=%x cpu=%i", - svc->vcpu->domain->domain_id, - svc->vcpu->vcpu_id, - svc->flags, - svc->vcpu->processor); + printk("[%i.%i] flags=%x cpu=%i", svc->vcpu->domain->domain_id, + svc->vcpu->vcpu_id, svc->flags, svc->vcpu->processor); - printk(" credit=%" PRIi32" [w=%u]", svc->credit, svc->weight); + printk(" credit=%" PRIi32 " [w=%u]", svc->credit, svc->weight); if ( has_cap(svc) ) - printk(" budget=%"PRI_stime"(%"PRI_stime")", - svc->budget, svc->budget_quota); + printk(" budget=%" PRI_stime "(%" PRI_stime ")", svc->budget, + svc->budget_quota); - printk(" load=%"PRI_stime" (~%"PRI_stime"%%)", svc->avgload, + printk(" load=%" PRI_stime " (~%" PRI_stime "%%)", svc->avgload, (svc->avgload * 100) >> prv->load_precision_shift); printk("\n"); } -static inline void -dump_pcpu(const struct scheduler *ops, int cpu) +static inline void dump_pcpu(const struct scheduler *ops, int cpu) { struct csched2_private *prv = csched2_priv(ops); struct csched2_vcpu *svc; - printk("CPU[%02d] runq=%d, sibling=%*pb, core=%*pb\n", - cpu, c2r(cpu), - nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)), - nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu))); + printk("CPU[%02d] runq=%d, sibling=%*pb, core=%*pb\n", cpu, c2r(cpu), + nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)), nr_cpu_ids, + cpumask_bits(per_cpu(cpu_core_mask, cpu))); /* current VCPU (nothing to say if that's the idle vcpu) */ svc = csched2_vcpu(curr_on_cpu(cpu)); @@ -3664,8 +3626,7 @@ dump_pcpu(const struct scheduler *ops, int cpu) } } -static void -csched2_dump(const struct scheduler *ops) +static void csched2_dump(const struct scheduler *ops) { struct list_head *iter_sdom; struct csched2_private *prv = csched2_priv(ops); @@ -3680,9 +3641,8 @@ csched2_dump(const struct scheduler *ops) printk("Active queues: %d\n" "\tdefault-weight = %d\n", - cpumask_weight(&prv->active_queues), - CSCHED2_DEFAULT_WEIGHT); - for_each_cpu(i, &prv->active_queues) + cpumask_weight(&prv->active_queues), CSCHED2_DEFAULT_WEIGHT); + for_each_cpu (i, &prv->active_queues) { s_time_t fraction; @@ -3694,42 +3654,35 @@ csched2_dump(const struct scheduler *ops) "\tmax_weight = %u\n" "\tpick_bias = %u\n" "\tinstload = %d\n" - "\taveload = %"PRI_stime" (~%"PRI_stime"%%)\n", - i, - cpumask_weight(&prv->rqd[i].active), - nr_cpu_ids, cpumask_bits(&prv->rqd[i].active), - prv->rqd[i].max_weight, - prv->rqd[i].pick_bias, - prv->rqd[i].load, - prv->rqd[i].avgload, + "\taveload = %" PRI_stime " (~%" PRI_stime "%%)\n", + i, cpumask_weight(&prv->rqd[i].active), nr_cpu_ids, + cpumask_bits(&prv->rqd[i].active), prv->rqd[i].max_weight, + prv->rqd[i].pick_bias, prv->rqd[i].load, prv->rqd[i].avgload, fraction); printk("\tidlers: %*pb\n" "\ttickled: %*pb\n" "\tfully idle cores: %*pb\n", - nr_cpu_ids, cpumask_bits(&prv->rqd[i].idle), - nr_cpu_ids, cpumask_bits(&prv->rqd[i].tickled), - nr_cpu_ids, cpumask_bits(&prv->rqd[i].smt_idle)); + nr_cpu_ids, cpumask_bits(&prv->rqd[i].idle), nr_cpu_ids, + cpumask_bits(&prv->rqd[i].tickled), nr_cpu_ids, + cpumask_bits(&prv->rqd[i].smt_idle)); } printk("Domain info:\n"); loop = 0; - list_for_each( iter_sdom, &prv->sdom ) + list_for_each (iter_sdom, &prv->sdom) { struct csched2_dom *sdom; struct vcpu *v; sdom = list_entry(iter_sdom, struct csched2_dom, sdom_elem); - printk("\tDomain: %d w %d c %u v %d\n", - sdom->dom->domain_id, - sdom->weight, - sdom->cap, - sdom->nr_vcpus); + printk("\tDomain: %d w %d c %u v %d\n", sdom->dom->domain_id, + sdom->weight, sdom->cap, sdom->nr_vcpus); - for_each_vcpu( sdom->dom, v ) + for_each_vcpu (sdom->dom, v) { - struct csched2_vcpu * const svc = csched2_vcpu(v); + struct csched2_vcpu *const svc = csched2_vcpu(v); spinlock_t *lock; lock = vcpu_schedule_lock(svc->vcpu); @@ -3741,7 +3694,7 @@ csched2_dump(const struct scheduler *ops) } } - for_each_cpu(i, &prv->active_queues) + for_each_cpu (i, &prv->active_queues) { struct csched2_runqueue_data *rqd = prv->rqd + i; struct list_head *iter, *runq = &rqd->runq; @@ -3752,11 +3705,11 @@ csched2_dump(const struct scheduler *ops) printk("Runqueue %d:\n", i); - for_each_cpu(j, &rqd->active) + for_each_cpu (j, &rqd->active) dump_pcpu(ops, j); printk("RUNQ:\n"); - list_for_each( iter, runq ) + list_for_each (iter, runq) { struct csched2_vcpu *svc = runq_elem(iter); @@ -3772,8 +3725,7 @@ csched2_dump(const struct scheduler *ops) read_unlock_irqrestore(&prv->lock, flags); } -static void * -csched2_alloc_pdata(const struct scheduler *ops, int cpu) +static void *csched2_alloc_pdata(const struct scheduler *ops, int cpu) { struct csched2_pcpu *spc; @@ -3788,9 +3740,8 @@ csched2_alloc_pdata(const struct scheduler *ops, int cpu) } /* Returns the ID of the runqueue the cpu is assigned to. */ -static unsigned -init_pdata(struct csched2_private *prv, struct csched2_pcpu *spc, - unsigned int cpu) +static unsigned init_pdata(struct csched2_private *prv, + struct csched2_pcpu *spc, unsigned int cpu) { struct csched2_runqueue_data *rqd; @@ -3805,12 +3756,12 @@ init_pdata(struct csched2_private *prv, struct csched2_pcpu *spc, rqd = prv->rqd + spc->runq_id; printk(XENLOG_INFO "Adding cpu %d to runqueue %d\n", cpu, spc->runq_id); - if ( ! cpumask_test_cpu(spc->runq_id, &prv->active_queues) ) + if ( !cpumask_test_cpu(spc->runq_id, &prv->active_queues) ) { printk(XENLOG_INFO " First cpu on runqueue, activating\n"); activate_runqueue(prv, spc->runq_id); } - + __cpumask_set_cpu(cpu, &rqd->idle); __cpumask_set_cpu(cpu, &rqd->active); __cpumask_set_cpu(cpu, &prv->initialized); @@ -3822,8 +3773,8 @@ init_pdata(struct csched2_private *prv, struct csched2_pcpu *spc, return spc->runq_id; } -static void -csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu) +static void csched2_init_pdata(const struct scheduler *ops, void *pdata, + int cpu) { struct csched2_private *prv = csched2_priv(ops); spinlock_t *old_lock; @@ -3843,9 +3794,8 @@ csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu) } /* Change the scheduler of cpu to us (Credit2). */ -static void -csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu, - void *pdata, void *vdata) +static void csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu, + void *pdata, void *vdata) { struct csched2_private *prv = csched2_priv(new_ops); struct csched2_vcpu *svc = vdata; @@ -3890,8 +3840,8 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu, write_unlock(&prv->lock); } -static void -csched2_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu) +static void csched2_deinit_pdata(const struct scheduler *ops, void *pcpu, + int cpu) { unsigned long flags; struct csched2_private *prv = csched2_priv(ops); @@ -3914,7 +3864,7 @@ csched2_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu) */ ASSERT(spc && spc->runq_id != -1); ASSERT(cpumask_test_cpu(cpu, &prv->initialized)); - + /* Find the old runqueue and remove this cpu from it */ rqd = prv->rqd + spc->runq_id; @@ -3946,8 +3896,7 @@ csched2_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu) return; } -static void -csched2_free_pdata(const struct scheduler *ops, void *pcpu, int cpu) +static void csched2_free_pdata(const struct scheduler *ops, void *pcpu, int cpu) { struct csched2_pcpu *spc = pcpu; @@ -3964,12 +3913,12 @@ csched2_free_pdata(const struct scheduler *ops, void *pcpu, int cpu) xfree(pcpu); } -static int __init -csched2_global_init(void) +static int __init csched2_global_init(void) { if ( opt_load_precision_shift < LOADAVG_PRECISION_SHIFT_MIN ) { - printk("WARNING: %s: opt_load_precision_shift %u below min %d, resetting\n", + printk("WARNING: %s: opt_load_precision_shift %u below min %d, " + "resetting\n", __func__, opt_load_precision_shift, LOADAVG_PRECISION_SHIFT_MIN); opt_load_precision_shift = LOADAVG_PRECISION_SHIFT_MIN; } @@ -3991,26 +3940,22 @@ csched2_global_init(void) return 0; } -static int -csched2_init(struct scheduler *ops) +static int csched2_init(struct scheduler *ops) { int i; struct csched2_private *prv; printk("Initializing Credit2 scheduler\n"); - printk(XENLOG_INFO " load_precision_shift: %d\n" - XENLOG_INFO " load_window_shift: %d\n" - XENLOG_INFO " underload_balance_tolerance: %d\n" - XENLOG_INFO " overload_balance_tolerance: %d\n" - XENLOG_INFO " runqueues arrangement: %s\n" - XENLOG_INFO " cap enforcement granularity: %dms\n", - opt_load_precision_shift, - opt_load_window_shift, - opt_underload_balance_tolerance, - opt_overload_balance_tolerance, - opt_runqueue_str[opt_runqueue], - opt_cap_period); + printk(XENLOG_INFO " load_precision_shift: %d\n" XENLOG_INFO + " load_window_shift: %d\n" XENLOG_INFO + " underload_balance_tolerance: %d\n" XENLOG_INFO + " overload_balance_tolerance: %d\n" XENLOG_INFO + " runqueues arrangement: %s\n" XENLOG_INFO + " cap enforcement granularity: %dms\n", + opt_load_precision_shift, opt_load_window_shift, + opt_underload_balance_tolerance, opt_overload_balance_tolerance, + opt_runqueue_str[opt_runqueue], opt_cap_period); printk(XENLOG_INFO "load tracking window length %llu ns\n", 1ULL << opt_load_window_shift); @@ -4049,8 +3994,7 @@ csched2_init(struct scheduler *ops) return 0; } -static void -csched2_deinit(struct scheduler *ops) +static void csched2_deinit(struct scheduler *ops) { struct csched2_private *prv; @@ -4060,41 +4004,41 @@ csched2_deinit(struct scheduler *ops) } static const struct scheduler sched_credit2_def = { - .name = "SMP Credit Scheduler rev2", - .opt_name = "credit2", - .sched_id = XEN_SCHEDULER_CREDIT2, - .sched_data = NULL, - - .global_init = csched2_global_init, - - .insert_vcpu = csched2_vcpu_insert, - .remove_vcpu = csched2_vcpu_remove, - - .sleep = csched2_vcpu_sleep, - .wake = csched2_vcpu_wake, - .yield = csched2_vcpu_yield, - - .adjust = csched2_dom_cntl, - .adjust_affinity= csched2_aff_cntl, - .adjust_global = csched2_sys_cntl, - - .pick_cpu = csched2_cpu_pick, - .migrate = csched2_vcpu_migrate, - .do_schedule = csched2_schedule, - .context_saved = csched2_context_saved, - - .dump_settings = csched2_dump, - .init = csched2_init, - .deinit = csched2_deinit, - .alloc_vdata = csched2_alloc_vdata, - .free_vdata = csched2_free_vdata, - .alloc_pdata = csched2_alloc_pdata, - .init_pdata = csched2_init_pdata, - .deinit_pdata = csched2_deinit_pdata, - .free_pdata = csched2_free_pdata, - .switch_sched = csched2_switch_sched, - .alloc_domdata = csched2_alloc_domdata, - .free_domdata = csched2_free_domdata, + .name = "SMP Credit Scheduler rev2", + .opt_name = "credit2", + .sched_id = XEN_SCHEDULER_CREDIT2, + .sched_data = NULL, + + .global_init = csched2_global_init, + + .insert_vcpu = csched2_vcpu_insert, + .remove_vcpu = csched2_vcpu_remove, + + .sleep = csched2_vcpu_sleep, + .wake = csched2_vcpu_wake, + .yield = csched2_vcpu_yield, + + .adjust = csched2_dom_cntl, + .adjust_affinity = csched2_aff_cntl, + .adjust_global = csched2_sys_cntl, + + .pick_cpu = csched2_cpu_pick, + .migrate = csched2_vcpu_migrate, + .do_schedule = csched2_schedule, + .context_saved = csched2_context_saved, + + .dump_settings = csched2_dump, + .init = csched2_init, + .deinit = csched2_deinit, + .alloc_vdata = csched2_alloc_vdata, + .free_vdata = csched2_free_vdata, + .alloc_pdata = csched2_alloc_pdata, + .init_pdata = csched2_init_pdata, + .deinit_pdata = csched2_deinit_pdata, + .free_pdata = csched2_free_pdata, + .switch_sched = csched2_switch_sched, + .alloc_domdata = csched2_alloc_domdata, + .free_domdata = csched2_free_domdata, }; REGISTER_SCHEDULER(sched_credit2_def); diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c index a59dbb2692..ad6058fc33 100644 --- a/xen/common/sched_null.c +++ b/xen/common/sched_null.c @@ -37,12 +37,12 @@ /* * null tracing events. Check include/public/trace.h for more details. */ -#define TRC_SNULL_PICKED_CPU TRC_SCHED_CLASS_EVT(SNULL, 1) -#define TRC_SNULL_VCPU_ASSIGN TRC_SCHED_CLASS_EVT(SNULL, 2) +#define TRC_SNULL_PICKED_CPU TRC_SCHED_CLASS_EVT(SNULL, 1) +#define TRC_SNULL_VCPU_ASSIGN TRC_SCHED_CLASS_EVT(SNULL, 2) #define TRC_SNULL_VCPU_DEASSIGN TRC_SCHED_CLASS_EVT(SNULL, 3) -#define TRC_SNULL_MIGRATE TRC_SCHED_CLASS_EVT(SNULL, 4) -#define TRC_SNULL_SCHEDULE TRC_SCHED_CLASS_EVT(SNULL, 5) -#define TRC_SNULL_TASKLET TRC_SCHED_CLASS_EVT(SNULL, 6) +#define TRC_SNULL_MIGRATE TRC_SCHED_CLASS_EVT(SNULL, 4) +#define TRC_SNULL_SCHEDULE TRC_SCHED_CLASS_EVT(SNULL, 5) +#define TRC_SNULL_TASKLET TRC_SCHED_CLASS_EVT(SNULL, 6) /* * Locking: @@ -75,7 +75,8 @@ /* * System-wide private data */ -struct null_private { +struct null_private +{ spinlock_t lock; /* scheduler lock; nests inside cpupool_lock */ struct list_head ndom; /* Domains of this scheduler */ struct list_head waitq; /* vCPUs not assigned to any pCPU */ @@ -86,7 +87,8 @@ struct null_private { /* * Physical CPU */ -struct null_pcpu { +struct null_pcpu +{ struct vcpu *vcpu; }; DEFINE_PER_CPU(struct null_pcpu, npc); @@ -94,7 +96,8 @@ DEFINE_PER_CPU(struct null_pcpu, npc); /* * Virtual CPU */ -struct null_vcpu { +struct null_vcpu +{ struct list_head waitq_elem; struct vcpu *vcpu; }; @@ -102,7 +105,8 @@ struct null_vcpu { /* * Domain */ -struct null_dom { +struct null_dom +{ struct list_head ndom_elem; struct domain *dom; }; @@ -193,8 +197,8 @@ static void null_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu) per_cpu(npc, cpu).vcpu = NULL; } -static void *null_alloc_vdata(const struct scheduler *ops, - struct vcpu *v, void *dd) +static void *null_alloc_vdata(const struct scheduler *ops, struct vcpu *v, + void *dd) { struct null_vcpu *nvc; @@ -217,8 +221,7 @@ static void null_free_vdata(const struct scheduler *ops, void *priv) xfree(nvc); } -static void * null_alloc_domdata(const struct scheduler *ops, - struct domain *d) +static void *null_alloc_domdata(const struct scheduler *ops, struct domain *d) { struct null_private *prv = null_priv(ops); struct null_dom *ndom; @@ -276,7 +279,7 @@ static unsigned int pick_cpu(struct null_private *prv, struct vcpu *v) ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock)); - for_each_affinity_balance_step( bs ) + for_each_affinity_balance_step (bs) { if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v) ) continue; @@ -291,8 +294,9 @@ static unsigned int pick_cpu(struct null_private *prv, struct vcpu *v) * don't, so we get to keep in the scratch cpumask what we have just * put in it.) */ - if ( likely((per_cpu(npc, cpu).vcpu == NULL || per_cpu(npc, cpu).vcpu == v) - && cpumask_test_cpu(cpu, cpumask_scratch_cpu(cpu))) ) + if ( likely((per_cpu(npc, cpu).vcpu == NULL || + per_cpu(npc, cpu).vcpu == v) && + cpumask_test_cpu(cpu, cpumask_scratch_cpu(cpu))) ) { new_cpu = cpu; goto out; @@ -321,10 +325,11 @@ static unsigned int pick_cpu(struct null_private *prv, struct vcpu *v) cpumask_and(cpumask_scratch_cpu(cpu), cpus, v->cpu_hard_affinity); new_cpu = cpumask_any(cpumask_scratch_cpu(cpu)); - out: +out: if ( unlikely(tb_init_done) ) { - struct { + struct + { uint16_t vcpu, dom; uint32_t new_cpu; } d; @@ -348,7 +353,8 @@ static void vcpu_assign(struct null_private *prv, struct vcpu *v, if ( unlikely(tb_init_done) ) { - struct { + struct + { uint16_t vcpu, dom; uint32_t cpu; } d; @@ -369,7 +375,8 @@ static void vcpu_deassign(struct null_private *prv, struct vcpu *v, if ( unlikely(tb_init_done) ) { - struct { + struct + { uint16_t vcpu, dom; uint32_t cpu; } d; @@ -423,7 +430,7 @@ static void null_vcpu_insert(const struct scheduler *ops, struct vcpu *v) ASSERT(!is_idle_vcpu(v)); lock = vcpu_schedule_lock_irq(v); - retry: +retry: cpu = v->processor = pick_cpu(prv, v); @@ -450,7 +457,7 @@ static void null_vcpu_insert(const struct scheduler *ops, struct vcpu *v) * insert or a migrate), but there are other free pCPUs, we can * try to pick again. */ - goto retry; + goto retry; } else { @@ -485,9 +492,9 @@ static void _vcpu_remove(struct null_private *prv, struct vcpu *v) * suitable to be assigned to it (prioritizing vcpus that have * soft-affinity with cpu). */ - for_each_affinity_balance_step( bs ) + for_each_affinity_balance_step (bs) { - list_for_each_entry( wvc, &prv->waitq, waitq_elem ) + list_for_each_entry (wvc, &prv->waitq, waitq_elem) { if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(wvc->vcpu) ) continue; @@ -530,7 +537,7 @@ static void null_vcpu_remove(const struct scheduler *ops, struct vcpu *v) _vcpu_remove(prv, v); - out: +out: vcpu_schedule_unlock_irq(lock, v); SCHED_STAT_CRANK(vcpu_remove); @@ -592,7 +599,8 @@ static void null_vcpu_migrate(const struct scheduler *ops, struct vcpu *v, if ( unlikely(tb_init_done) ) { - struct { + struct + { uint16_t vcpu, dom; uint16_t cpu, new_cpu; } d; @@ -648,7 +656,8 @@ static void null_vcpu_migrate(const struct scheduler *ops, struct vcpu *v, if ( list_empty(&nvc->waitq_elem) ) { list_add_tail(&nvc->waitq_elem, &prv->waitq); - dprintk(XENLOG_G_WARNING, "WARNING: %pv not assigned to any CPU!\n", v); + dprintk(XENLOG_G_WARNING, "WARNING: %pv not assigned to any CPU!\n", + v); } spin_unlock(&prv->waitq_lock); } @@ -667,8 +676,8 @@ static void null_vcpu_migrate(const struct scheduler *ops, struct vcpu *v, #ifndef NDEBUG static inline void null_vcpu_check(struct vcpu *v) { - struct null_vcpu * const nvc = null_vcpu(v); - struct null_dom * const ndom = v->domain->sched_priv; + struct null_vcpu *const nvc = null_vcpu(v); + struct null_dom *const ndom = v->domain->sched_priv; BUG_ON(nvc->vcpu != v); @@ -679,12 +688,11 @@ static inline void null_vcpu_check(struct vcpu *v) SCHED_STAT_CRANK(vcpu_check); } -#define NULL_VCPU_CHECK(v) (null_vcpu_check(v)) +#define NULL_VCPU_CHECK(v) (null_vcpu_check(v)) #else #define NULL_VCPU_CHECK(v) #endif - /* * The most simple scheduling function of all times! We either return: * - the vCPU assigned to the pCPU, if there's one and it can run; @@ -705,7 +713,8 @@ static struct task_slice null_schedule(const struct scheduler *ops, if ( unlikely(tb_init_done) ) { - struct { + struct + { uint16_t tasklet, cpu; int16_t vcpu, dom; } d; @@ -752,9 +761,9 @@ static struct task_slice null_schedule(const struct scheduler *ops, * it only in cases where a pcpu has no vcpu associated (e.g., as * said above, the cpu has just joined a cpupool). */ - for_each_affinity_balance_step( bs ) + for_each_affinity_balance_step (bs) { - list_for_each_entry( wvc, &prv->waitq, waitq_elem ) + list_for_each_entry (wvc, &prv->waitq, waitq_elem) { if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(wvc->vcpu) ) @@ -769,7 +778,7 @@ static struct task_slice null_schedule(const struct scheduler *ops, } } } - unlock: + unlock: spin_unlock(&prv->waitq_lock); } @@ -782,9 +791,8 @@ static struct task_slice null_schedule(const struct scheduler *ops, static inline void dump_vcpu(struct null_private *prv, struct null_vcpu *nvc) { - printk("[%i.%i] pcpu=%d", nvc->vcpu->domain->domain_id, - nvc->vcpu->vcpu_id, list_empty(&nvc->waitq_elem) ? - nvc->vcpu->processor : -1); + printk("[%i.%i] pcpu=%d", nvc->vcpu->domain->domain_id, nvc->vcpu->vcpu_id, + list_empty(&nvc->waitq_elem) ? nvc->vcpu->processor : -1); } static void null_dump_pcpu(const struct scheduler *ops, int cpu) @@ -796,10 +804,9 @@ static void null_dump_pcpu(const struct scheduler *ops, int cpu) lock = pcpu_schedule_lock_irqsave(cpu, &flags); - printk("CPU[%02d] sibling=%*pb, core=%*pb", - cpu, - nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)), - nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu))); + printk("CPU[%02d] sibling=%*pb, core=%*pb", cpu, nr_cpu_ids, + cpumask_bits(per_cpu(cpu_sibling_mask, cpu)), nr_cpu_ids, + cpumask_bits(per_cpu(cpu_core_mask, cpu))); if ( per_cpu(npc, cpu).vcpu != NULL ) printk(", vcpu=%pv", per_cpu(npc, cpu).vcpu); printk("\n"); @@ -829,7 +836,7 @@ static void null_dump(const struct scheduler *ops) printk("Domain info:\n"); loop = 0; - list_for_each( iter, &prv->ndom ) + list_for_each (iter, &prv->ndom) { struct null_dom *ndom; struct vcpu *v; @@ -837,9 +844,9 @@ static void null_dump(const struct scheduler *ops) ndom = list_entry(iter, struct null_dom, ndom_elem); printk("\tDomain: %d\n", ndom->dom->domain_id); - for_each_vcpu( ndom->dom, v ) + for_each_vcpu (ndom->dom, v) { - struct null_vcpu * const nvc = null_vcpu(v); + struct null_vcpu *const nvc = null_vcpu(v); spinlock_t *lock; lock = vcpu_schedule_lock(nvc->vcpu); @@ -855,7 +862,7 @@ static void null_dump(const struct scheduler *ops) printk("Waitqueue: "); loop = 0; spin_lock(&prv->waitq_lock); - list_for_each( iter, &prv->waitq ) + list_for_each (iter, &prv->waitq) { struct null_vcpu *nvc = list_entry(iter, struct null_vcpu, waitq_elem); @@ -872,33 +879,33 @@ static void null_dump(const struct scheduler *ops) } const struct scheduler sched_null_def = { - .name = "null Scheduler", - .opt_name = "null", - .sched_id = XEN_SCHEDULER_NULL, - .sched_data = NULL, - - .init = null_init, - .deinit = null_deinit, - .init_pdata = null_init_pdata, - .switch_sched = null_switch_sched, - .deinit_pdata = null_deinit_pdata, - - .alloc_vdata = null_alloc_vdata, - .free_vdata = null_free_vdata, - .alloc_domdata = null_alloc_domdata, - .free_domdata = null_free_domdata, - - .insert_vcpu = null_vcpu_insert, - .remove_vcpu = null_vcpu_remove, - - .wake = null_vcpu_wake, - .sleep = null_vcpu_sleep, - .pick_cpu = null_cpu_pick, - .migrate = null_vcpu_migrate, - .do_schedule = null_schedule, + .name = "null Scheduler", + .opt_name = "null", + .sched_id = XEN_SCHEDULER_NULL, + .sched_data = NULL, + + .init = null_init, + .deinit = null_deinit, + .init_pdata = null_init_pdata, + .switch_sched = null_switch_sched, + .deinit_pdata = null_deinit_pdata, + + .alloc_vdata = null_alloc_vdata, + .free_vdata = null_free_vdata, + .alloc_domdata = null_alloc_domdata, + .free_domdata = null_free_domdata, + + .insert_vcpu = null_vcpu_insert, + .remove_vcpu = null_vcpu_remove, + + .wake = null_vcpu_wake, + .sleep = null_vcpu_sleep, + .pick_cpu = null_cpu_pick, + .migrate = null_vcpu_migrate, + .do_schedule = null_schedule, .dump_cpu_state = null_dump_pcpu, - .dump_settings = null_dump, + .dump_settings = null_dump, }; REGISTER_SCHEDULER(sched_null_def); diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c index f1b81f0373..36c5982ad0 100644 --- a/xen/common/sched_rt.c +++ b/xen/common/sched_rt.c @@ -85,13 +85,12 @@ * vcpu_insert, vcpu_remove, context_saved, runq_insert */ - /* * Default parameters: * Period and budget in default is 10 and 4 ms, respectively */ -#define RTDS_DEFAULT_PERIOD (MICROSECS(10000)) -#define RTDS_DEFAULT_BUDGET (MICROSECS(4000)) +#define RTDS_DEFAULT_PERIOD (MICROSECS(10000)) +#define RTDS_DEFAULT_BUDGET (MICROSECS(4000)) /* * Max period: max delta of time type, because period is added to the time @@ -99,15 +98,15 @@ * Min period: 10 us, considering the scheduling overhead (when period is * too low, scheduling is invoked too frequently, causing high overhead). */ -#define RTDS_MAX_PERIOD (STIME_DELTA_MAX) -#define RTDS_MIN_PERIOD (MICROSECS(10)) +#define RTDS_MAX_PERIOD (STIME_DELTA_MAX) +#define RTDS_MIN_PERIOD (MICROSECS(10)) /* * Min budget: 10 us, considering the scheduling overhead (when budget is * consumed too fast, scheduling is invoked too frequently, causing * high overhead). */ -#define RTDS_MIN_BUDGET (MICROSECS(10)) +#define RTDS_MIN_BUDGET (MICROSECS(10)) /* * UPDATE_LIMIT_SHIFT: a constant used in rt_update_deadline(). When finding @@ -115,7 +114,7 @@ * between cur_deadline and now is small. If the difference is bigger than * 1024 * period, use multiplication. */ -#define UPDATE_LIMIT_SHIFT 10 +#define UPDATE_LIMIT_SHIFT 10 /* * Flags @@ -130,8 +129,8 @@ * set RTDS_delayed_runq_add * + Checked to be false in runq_insert. */ -#define __RTDS_scheduled 1 -#define RTDS_scheduled (1<<__RTDS_scheduled) +#define __RTDS_scheduled 1 +#define RTDS_scheduled (1 << __RTDS_scheduled) /* * RTDS_delayed_runq_add: Do we need to add this to the RunQ/DepletedQ * once it's done being context switching out? @@ -140,8 +139,8 @@ * + Read in rt_context_saved(). If set, it adds prev to the Runqueue/DepletedQ * and clears the bit. */ -#define __RTDS_delayed_runq_add 2 -#define RTDS_delayed_runq_add (1<<__RTDS_delayed_runq_add) +#define __RTDS_delayed_runq_add 2 +#define RTDS_delayed_runq_add (1 << __RTDS_delayed_runq_add) /* * RTDS_depleted: Does this vcp run out of budget? @@ -150,27 +149,27 @@ * + cleared and checked in the repenishment handler, * for the vcpus that are being replenished. */ -#define __RTDS_depleted 3 -#define RTDS_depleted (1<<__RTDS_depleted) +#define __RTDS_depleted 3 +#define RTDS_depleted (1 << __RTDS_depleted) /* * RTDS_extratime: Can the vcpu run in the time that is * not part of any real-time reservation, and would therefore * be otherwise left idle? */ -#define __RTDS_extratime 4 -#define RTDS_extratime (1<<__RTDS_extratime) +#define __RTDS_extratime 4 +#define RTDS_extratime (1 << __RTDS_extratime) /* * rt tracing events ("only" 512 available!). Check * include/public/trace.h for more details. */ -#define TRC_RTDS_TICKLE TRC_SCHED_CLASS_EVT(RTDS, 1) -#define TRC_RTDS_RUNQ_PICK TRC_SCHED_CLASS_EVT(RTDS, 2) -#define TRC_RTDS_BUDGET_BURN TRC_SCHED_CLASS_EVT(RTDS, 3) +#define TRC_RTDS_TICKLE TRC_SCHED_CLASS_EVT(RTDS, 1) +#define TRC_RTDS_RUNQ_PICK TRC_SCHED_CLASS_EVT(RTDS, 2) +#define TRC_RTDS_BUDGET_BURN TRC_SCHED_CLASS_EVT(RTDS, 3) #define TRC_RTDS_BUDGET_REPLENISH TRC_SCHED_CLASS_EVT(RTDS, 4) -#define TRC_RTDS_SCHED_TASKLET TRC_SCHED_CLASS_EVT(RTDS, 5) -#define TRC_RTDS_SCHEDULE TRC_SCHED_CLASS_EVT(RTDS, 6) +#define TRC_RTDS_SCHED_TASKLET TRC_SCHED_CLASS_EVT(RTDS, 5) +#define TRC_RTDS_SCHEDULE TRC_SCHED_CLASS_EVT(RTDS, 6) static void repl_timer_handler(void *data); @@ -179,23 +178,25 @@ static void repl_timer_handler(void *data); * Global lock is referenced by schedule_data.schedule_lock from all * physical cpus. It can be grabbed via vcpu_schedule_lock_irq() */ -struct rt_private { - spinlock_t lock; /* the global coarse-grained lock */ - struct list_head sdom; /* list of availalbe domains, used for dump */ +struct rt_private +{ + spinlock_t lock; /* the global coarse-grained lock */ + struct list_head sdom; /* list of availalbe domains, used for dump */ struct list_head runq; /* ordered list of runnable vcpus */ struct list_head depletedq; /* unordered list of depleted vcpus */ - struct timer repl_timer; /* replenishment timer */ - struct list_head replq; /* ordered list of vcpus that need replenishment */ + struct timer repl_timer; /* replenishment timer */ + struct list_head replq; /* ordered list of vcpus that need replenishment */ - cpumask_t tickled; /* cpus been tickled */ + cpumask_t tickled; /* cpus been tickled */ }; /* * Virtual CPU */ -struct rt_vcpu { +struct rt_vcpu +{ struct list_head q_elem; /* on the runq/depletedq list */ struct list_head replq_elem; /* on the replenishment events list */ @@ -204,9 +205,9 @@ struct rt_vcpu { s_time_t budget; /* VCPU current information in nanosecond */ - s_time_t cur_budget; /* current budget */ - s_time_t last_start; /* last start time */ - s_time_t cur_deadline; /* current deadline for EDF */ + s_time_t cur_budget; /* current budget */ + s_time_t last_start; /* last start time */ + s_time_t cur_deadline; /* current deadline for EDF */ /* Up-pointers */ struct rt_dom *sdom; @@ -214,13 +215,14 @@ struct rt_vcpu { unsigned priority_level; - unsigned flags; /* mark __RTDS_scheduled, etc.. */ + unsigned flags; /* mark __RTDS_scheduled, etc.. */ }; /* * Domain */ -struct rt_dom { +struct rt_dom +{ struct list_head sdom_elem; /* link list on rt_priv */ struct domain *dom; /* pointer to upper domain */ }; @@ -262,26 +264,22 @@ static inline bool has_extratime(const struct rt_vcpu *svc) * Helper functions for manipulating the runqueue, the depleted queue, * and the replenishment events queue. */ -static int -vcpu_on_q(const struct rt_vcpu *svc) +static int vcpu_on_q(const struct rt_vcpu *svc) { - return !list_empty(&svc->q_elem); + return !list_empty(&svc->q_elem); } -static struct rt_vcpu * -q_elem(struct list_head *elem) +static struct rt_vcpu *q_elem(struct list_head *elem) { return list_entry(elem, struct rt_vcpu, q_elem); } -static struct rt_vcpu * -replq_elem(struct list_head *elem) +static struct rt_vcpu *replq_elem(struct list_head *elem) { return list_entry(elem, struct rt_vcpu, replq_elem); } -static int -vcpu_on_replq(const struct rt_vcpu *svc) +static int vcpu_on_replq(const struct rt_vcpu *svc) { return !list_empty(&svc->replq_elem); } @@ -290,8 +288,8 @@ vcpu_on_replq(const struct rt_vcpu *svc) * If v1 priority >= v2 priority, return value > 0 * Otherwise, return value < 0 */ -static s_time_t -compare_vcpu_priority(const struct rt_vcpu *v1, const struct rt_vcpu *v2) +static s_time_t compare_vcpu_priority(const struct rt_vcpu *v1, + const struct rt_vcpu *v2) { int prio = v2->priority_level - v1->priority_level; @@ -304,14 +302,13 @@ compare_vcpu_priority(const struct rt_vcpu *v1, const struct rt_vcpu *v2) /* * Debug related code, dump vcpu/cpu information */ -static void -rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc) +static void rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc) { cpumask_t *cpupool_mask, *mask; ASSERT(svc != NULL); /* idle vcpu */ - if( svc->sdom == NULL ) + if ( svc->sdom == NULL ) { printk("\n"); return; @@ -328,28 +325,19 @@ rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc) cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain); cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity); - printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime")," - " cur_b=%"PRI_stime" cur_d=%"PRI_stime" last_start=%"PRI_stime"\n" + printk("[%5d.%-2u] cpu %u, (%" PRI_stime ", %" PRI_stime ")," + " cur_b=%" PRI_stime " cur_d=%" PRI_stime " last_start=%" PRI_stime + "\n" " \t\t priority_level=%d has_extratime=%d\n" " \t\t onQ=%d runnable=%d flags=%x effective hard_affinity=%*pbl\n", - svc->vcpu->domain->domain_id, - svc->vcpu->vcpu_id, - svc->vcpu->processor, - svc->period, - svc->budget, - svc->cur_budget, - svc->cur_deadline, - svc->last_start, - svc->priority_level, - has_extratime(svc), - vcpu_on_q(svc), - vcpu_runnable(svc->vcpu), - svc->flags, - nr_cpu_ids, cpumask_bits(mask)); + svc->vcpu->domain->domain_id, svc->vcpu->vcpu_id, + svc->vcpu->processor, svc->period, svc->budget, svc->cur_budget, + svc->cur_deadline, svc->last_start, svc->priority_level, + has_extratime(svc), vcpu_on_q(svc), vcpu_runnable(svc->vcpu), + svc->flags, nr_cpu_ids, cpumask_bits(mask)); } -static void -rt_dump_pcpu(const struct scheduler *ops, int cpu) +static void rt_dump_pcpu(const struct scheduler *ops, int cpu) { struct rt_private *prv = rt_priv(ops); struct rt_vcpu *svc; @@ -366,8 +354,7 @@ rt_dump_pcpu(const struct scheduler *ops, int cpu) spin_unlock_irqrestore(&prv->lock, flags); } -static void -rt_dump(const struct scheduler *ops) +static void rt_dump(const struct scheduler *ops) { struct list_head *runq, *depletedq, *replq, *iter; struct rt_private *prv = rt_priv(ops); @@ -385,42 +372,42 @@ rt_dump(const struct scheduler *ops) replq = rt_replq(ops); printk("Global RunQueue info:\n"); - list_for_each ( iter, runq ) + list_for_each (iter, runq) { svc = q_elem(iter); rt_dump_vcpu(ops, svc); } printk("Global DepletedQueue info:\n"); - list_for_each ( iter, depletedq ) + list_for_each (iter, depletedq) { svc = q_elem(iter); rt_dump_vcpu(ops, svc); } printk("Global Replenishment Events info:\n"); - list_for_each ( iter, replq ) + list_for_each (iter, replq) { svc = replq_elem(iter); rt_dump_vcpu(ops, svc); } printk("Domain info:\n"); - list_for_each ( iter, &prv->sdom ) + list_for_each (iter, &prv->sdom) { struct vcpu *v; sdom = list_entry(iter, struct rt_dom, sdom_elem); printk("\tdomain: %d\n", sdom->dom->domain_id); - for_each_vcpu ( sdom->dom, v ) + for_each_vcpu (sdom->dom, v) { svc = rt_vcpu(v); rt_dump_vcpu(ops, svc); } } - out: +out: spin_unlock_irqrestore(&prv->lock, flags); } @@ -428,8 +415,7 @@ rt_dump(const struct scheduler *ops) * update deadline and budget when now >= cur_deadline * it needs to be updated to the deadline of the current period */ -static void -rt_update_deadline(s_time_t now, struct rt_vcpu *svc) +static void rt_update_deadline(s_time_t now, struct rt_vcpu *svc) { ASSERT(now >= svc->cur_deadline); ASSERT(svc->period != 0); @@ -457,19 +443,18 @@ rt_update_deadline(s_time_t now, struct rt_vcpu *svc) /* TRACE */ { - struct __packed { - unsigned vcpu:16, dom:16; + struct __packed + { + unsigned vcpu : 16, dom : 16; unsigned priority_level; uint64_t cur_deadline, cur_budget; } d; d.dom = svc->vcpu->domain->domain_id; d.vcpu = svc->vcpu->vcpu_id; d.priority_level = svc->priority_level; - d.cur_deadline = (uint64_t) svc->cur_deadline; - d.cur_budget = (uint64_t) svc->cur_budget; - trace_var(TRC_RTDS_BUDGET_REPLENISH, 1, - sizeof(d), - (unsigned char *) &d); + d.cur_deadline = (uint64_t)svc->cur_deadline; + d.cur_budget = (uint64_t)svc->cur_budget; + trace_var(TRC_RTDS_BUDGET_REPLENISH, 1, sizeof(d), (unsigned char *)&d); } return; @@ -487,8 +472,8 @@ rt_update_deadline(s_time_t now, struct rt_vcpu *svc) * cases, if the vcpu with the earliest deadline is what we * are dealing with). */ -static inline bool -deadline_queue_remove(struct list_head *queue, struct list_head *elem) +static inline bool deadline_queue_remove(struct list_head *queue, + struct list_head *elem) { int pos = 0; @@ -500,16 +485,16 @@ deadline_queue_remove(struct list_head *queue, struct list_head *elem) } static inline bool -deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *), +deadline_queue_insert(struct rt_vcpu *(*qelem)(struct list_head *), struct rt_vcpu *svc, struct list_head *elem, struct list_head *queue) { struct list_head *iter; int pos = 0; - list_for_each ( iter, queue ) + list_for_each (iter, queue) { - struct rt_vcpu * iter_svc = (*qelem)(iter); + struct rt_vcpu *iter_svc = (*qelem)(iter); if ( compare_vcpu_priority(svc, iter_svc) > 0 ) break; pos++; @@ -517,25 +502,23 @@ deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *), list_add_tail(elem, iter); return !pos; } -#define deadline_runq_insert(...) \ - deadline_queue_insert(&q_elem, ##__VA_ARGS__) +#define deadline_runq_insert(...) deadline_queue_insert(&q_elem, ##__VA_ARGS__) #define deadline_replq_insert(...) \ - deadline_queue_insert(&replq_elem, ##__VA_ARGS__) + deadline_queue_insert(&replq_elem, ##__VA_ARGS__) -static inline void -q_remove(struct rt_vcpu *svc) +static inline void q_remove(struct rt_vcpu *svc) { - ASSERT( vcpu_on_q(svc) ); + ASSERT(vcpu_on_q(svc)); list_del_init(&svc->q_elem); } -static inline void -replq_remove(const struct scheduler *ops, struct rt_vcpu *svc) +static inline void replq_remove(const struct scheduler *ops, + struct rt_vcpu *svc) { struct rt_private *prv = rt_priv(ops); struct list_head *replq = rt_replq(ops); - ASSERT( vcpu_on_replq(svc) ); + ASSERT(vcpu_on_replq(svc)); if ( deadline_queue_remove(replq, &svc->replq_elem) ) { @@ -560,31 +543,28 @@ replq_remove(const struct scheduler *ops, struct rt_vcpu *svc) * vcpus with smaller deadlines go first. * Insert svc without budget in DepletedQ unsorted; */ -static void -runq_insert(const struct scheduler *ops, struct rt_vcpu *svc) +static void runq_insert(const struct scheduler *ops, struct rt_vcpu *svc) { struct rt_private *prv = rt_priv(ops); struct list_head *runq = rt_runq(ops); - ASSERT( spin_is_locked(&prv->lock) ); - ASSERT( !vcpu_on_q(svc) ); - ASSERT( vcpu_on_replq(svc) ); + ASSERT(spin_is_locked(&prv->lock)); + ASSERT(!vcpu_on_q(svc)); + ASSERT(vcpu_on_replq(svc)); /* add svc to runq if svc still has budget or its extratime is set */ - if ( svc->cur_budget > 0 || - has_extratime(svc) ) + if ( svc->cur_budget > 0 || has_extratime(svc) ) deadline_runq_insert(svc, &svc->q_elem, runq); else list_add(&svc->q_elem, &prv->depletedq); } -static void -replq_insert(const struct scheduler *ops, struct rt_vcpu *svc) +static void replq_insert(const struct scheduler *ops, struct rt_vcpu *svc) { struct list_head *replq = rt_replq(ops); struct rt_private *prv = rt_priv(ops); - ASSERT( !vcpu_on_replq(svc) ); + ASSERT(!vcpu_on_replq(svc)); /* * The timer may be re-programmed if svc is inserted @@ -600,14 +580,13 @@ replq_insert(const struct scheduler *ops, struct rt_vcpu *svc) * deadline (and hence its replenishment time) could have * changed. */ -static void -replq_reinsert(const struct scheduler *ops, struct rt_vcpu *svc) +static void replq_reinsert(const struct scheduler *ops, struct rt_vcpu *svc) { struct list_head *replq = rt_replq(ops); struct rt_vcpu *rearm_svc = svc; bool_t rearm = 0; - ASSERT( vcpu_on_replq(svc) ); + ASSERT(vcpu_on_replq(svc)); /* * If svc was at the front of the replenishment queue, we certainly @@ -636,8 +615,7 @@ replq_reinsert(const struct scheduler *ops, struct rt_vcpu *svc) * Valid CPU of a vcpu is intesection of vcpu's affinity * and available cpus */ -static int -rt_cpu_pick(const struct scheduler *ops, struct vcpu *vc) +static int rt_cpu_pick(const struct scheduler *ops, struct vcpu *vc) { cpumask_t cpus; cpumask_t *online; @@ -647,9 +625,9 @@ rt_cpu_pick(const struct scheduler *ops, struct vcpu *vc) cpumask_and(&cpus, online, vc->cpu_hard_affinity); cpu = cpumask_test_cpu(vc->processor, &cpus) - ? vc->processor - : cpumask_cycle(vc->processor, &cpus); - ASSERT( !cpumask_empty(&cpus) && cpumask_test_cpu(cpu, &cpus) ); + ? vc->processor + : cpumask_cycle(vc->processor, &cpus); + ASSERT(!cpumask_empty(&cpus) && cpumask_test_cpu(cpu, &cpus)); return cpu; } @@ -657,8 +635,7 @@ rt_cpu_pick(const struct scheduler *ops, struct vcpu *vc) /* * Init/Free related code */ -static int -rt_init(struct scheduler *ops) +static int rt_init(struct scheduler *ops) { int rc = -ENOMEM; struct rt_private *prv = xzalloc(struct rt_private); @@ -679,15 +656,14 @@ rt_init(struct scheduler *ops) ops->sched_data = prv; rc = 0; - err: +err: if ( rc ) xfree(prv); return rc; } -static void -rt_deinit(struct scheduler *ops) +static void rt_deinit(struct scheduler *ops) { struct rt_private *prv = rt_priv(ops); @@ -702,8 +678,7 @@ rt_deinit(struct scheduler *ops) * Point per_cpu spinlock to the global system lock; * All cpu have same global system lock */ -static void -rt_init_pdata(const struct scheduler *ops, void *pdata, int cpu) +static void rt_init_pdata(const struct scheduler *ops, void *pdata, int cpu) { struct rt_private *prv = rt_priv(ops); spinlock_t *old_lock; @@ -729,9 +704,8 @@ rt_init_pdata(const struct scheduler *ops, void *pdata, int cpu) } /* Change the scheduler of cpu to us (RTDS). */ -static void -rt_switch_sched(struct scheduler *new_ops, unsigned int cpu, - void *pdata, void *vdata) +static void rt_switch_sched(struct scheduler *new_ops, unsigned int cpu, + void *pdata, void *vdata) { struct rt_private *prv = rt_priv(new_ops); struct rt_vcpu *svc = vdata; @@ -773,8 +747,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu, per_cpu(schedule_data, cpu).schedule_lock = &prv->lock; } -static void -rt_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu) +static void rt_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu) { unsigned long flags; struct rt_private *prv = rt_priv(ops); @@ -805,12 +778,11 @@ rt_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu) spin_unlock_irqrestore(&prv->lock, flags); } -static void * -rt_alloc_domdata(const struct scheduler *ops, struct domain *dom) +static void *rt_alloc_domdata(const struct scheduler *ops, struct domain *dom) { unsigned long flags; struct rt_dom *sdom; - struct rt_private * prv = rt_priv(ops); + struct rt_private *prv = rt_priv(ops); sdom = xzalloc(struct rt_dom); if ( sdom == NULL ) @@ -827,8 +799,7 @@ rt_alloc_domdata(const struct scheduler *ops, struct domain *dom) return sdom; } -static void -rt_free_domdata(const struct scheduler *ops, void *data) +static void rt_free_domdata(const struct scheduler *ops, void *data) { struct rt_dom *sdom = data; struct rt_private *prv = rt_priv(ops); @@ -845,8 +816,8 @@ rt_free_domdata(const struct scheduler *ops, void *data) } } -static void * -rt_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd) +static void *rt_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, + void *dd) { struct rt_vcpu *svc; @@ -873,8 +844,7 @@ rt_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd) return svc; } -static void -rt_free_vdata(const struct scheduler *ops, void *priv) +static void rt_free_vdata(const struct scheduler *ops, void *priv) { struct rt_vcpu *svc = priv; @@ -888,14 +858,13 @@ rt_free_vdata(const struct scheduler *ops, void *priv) * It inserts vcpus of moving domain to the scheduler's RunQ in * dest. cpupool. */ -static void -rt_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) +static void rt_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) { struct rt_vcpu *svc = rt_vcpu(vc); s_time_t now; spinlock_t *lock; - BUG_ON( is_idle_vcpu(vc) ); + BUG_ON(is_idle_vcpu(vc)); /* This is safe because vc isn't yet being scheduled */ vc->processor = rt_cpu_pick(ops, vc); @@ -921,23 +890,22 @@ rt_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) /* * Remove rt_vcpu svc from the old scheduler in source cpupool. */ -static void -rt_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) +static void rt_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) { - struct rt_vcpu * const svc = rt_vcpu(vc); - struct rt_dom * const sdom = svc->sdom; + struct rt_vcpu *const svc = rt_vcpu(vc); + struct rt_dom *const sdom = svc->sdom; spinlock_t *lock; SCHED_STAT_CRANK(vcpu_remove); - BUG_ON( sdom == NULL ); + BUG_ON(sdom == NULL); lock = vcpu_schedule_lock_irq(vc); if ( vcpu_on_q(svc) ) q_remove(svc); if ( vcpu_on_replq(svc) ) - replq_remove(ops,svc); + replq_remove(ops, svc); vcpu_schedule_unlock_irq(lock, vc); } @@ -945,8 +913,8 @@ rt_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) /* * Burn budget in nanosecond granularity */ -static void -burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now) +static void burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, + s_time_t now) { s_time_t delta; @@ -962,8 +930,9 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now) */ if ( delta < 0 ) { - printk("%s, ATTENTION: now is behind last_start! delta=%"PRI_stime"\n", - __func__, delta); + printk("%s, ATTENTION: now is behind last_start! delta=%" PRI_stime + "\n", + __func__, delta); svc->last_start = now; return; } @@ -987,8 +956,9 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now) /* TRACE */ { - struct __packed { - unsigned vcpu:16, dom:16; + struct __packed + { + unsigned vcpu : 16, dom : 16; uint64_t cur_budget; int delta; unsigned priority_level; @@ -996,13 +966,11 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now) } d; d.dom = svc->vcpu->domain->domain_id; d.vcpu = svc->vcpu->vcpu_id; - d.cur_budget = (uint64_t) svc->cur_budget; + d.cur_budget = (uint64_t)svc->cur_budget; d.delta = delta; d.priority_level = svc->priority_level; d.has_extratime = svc->flags & RTDS_extratime; - trace_var(TRC_RTDS_BUDGET_BURN, 1, - sizeof(d), - (unsigned char *) &d); + trace_var(TRC_RTDS_BUDGET_BURN, 1, sizeof(d), (unsigned char *)&d); } } @@ -1010,8 +978,8 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now) * RunQ is sorted. Pick first one within cpumask. If no one, return NULL * lock is grabbed before calling this function */ -static struct rt_vcpu * -runq_pick(const struct scheduler *ops, const cpumask_t *mask) +static struct rt_vcpu *runq_pick(const struct scheduler *ops, + const cpumask_t *mask) { struct list_head *runq = rt_runq(ops); struct list_head *iter; @@ -1020,7 +988,7 @@ runq_pick(const struct scheduler *ops, const cpumask_t *mask) cpumask_t cpu_common; cpumask_t *online; - list_for_each ( iter, runq ) + list_for_each (iter, runq) { iter_svc = q_elem(iter); @@ -1031,7 +999,7 @@ runq_pick(const struct scheduler *ops, const cpumask_t *mask) if ( cpumask_empty(&cpu_common) ) continue; - ASSERT( iter_svc->cur_budget > 0 ); + ASSERT(iter_svc->cur_budget > 0); svc = iter_svc; break; @@ -1039,19 +1007,18 @@ runq_pick(const struct scheduler *ops, const cpumask_t *mask) /* TRACE */ { - if( svc != NULL ) + if ( svc != NULL ) { - struct __packed { - unsigned vcpu:16, dom:16; + struct __packed + { + unsigned vcpu : 16, dom : 16; uint64_t cur_deadline, cur_budget; } d; d.dom = svc->vcpu->domain->domain_id; d.vcpu = svc->vcpu->vcpu_id; - d.cur_deadline = (uint64_t) svc->cur_deadline; - d.cur_budget = (uint64_t) svc->cur_budget; - trace_var(TRC_RTDS_RUNQ_PICK, 1, - sizeof(d), - (unsigned char *) &d); + d.cur_deadline = (uint64_t)svc->cur_deadline; + d.cur_budget = (uint64_t)svc->cur_budget; + trace_var(TRC_RTDS_RUNQ_PICK, 1, sizeof(d), (unsigned char *)&d); } } @@ -1062,27 +1029,26 @@ runq_pick(const struct scheduler *ops, const cpumask_t *mask) * schedule function for rt scheduler. * The lock is already grabbed in schedule.c, no need to lock here */ -static struct task_slice -rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_scheduled) +static struct task_slice rt_schedule(const struct scheduler *ops, s_time_t now, + bool_t tasklet_work_scheduled) { const int cpu = smp_processor_id(); struct rt_private *prv = rt_priv(ops); struct rt_vcpu *const scurr = rt_vcpu(current); struct rt_vcpu *snext = NULL; - struct task_slice ret = { .migrated = 0 }; + struct task_slice ret = {.migrated = 0}; /* TRACE */ { - struct __packed { - unsigned cpu:16, tasklet:8, tickled:4, idle:4; + struct __packed + { + unsigned cpu : 16, tasklet : 8, tickled : 4, idle : 4; } d; d.cpu = cpu; d.tasklet = tasklet_work_scheduled; d.tickled = cpumask_test_cpu(cpu, &prv->tickled); d.idle = is_idle_vcpu(current); - trace_var(TRC_RTDS_SCHEDULE, 1, - sizeof(d), - (unsigned char *)&d); + trace_var(TRC_RTDS_SCHEDULE, 1, sizeof(d), (unsigned char *)&d); } /* clear ticked bit now that we've been scheduled */ @@ -1093,7 +1059,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched if ( tasklet_work_scheduled ) { - trace_var(TRC_RTDS_SCHED_TASKLET, 1, 0, NULL); + trace_var(TRC_RTDS_SCHED_TASKLET, 1, 0, NULL); snext = rt_vcpu(idle_vcpu[cpu]); } else @@ -1103,21 +1069,18 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched snext = rt_vcpu(idle_vcpu[cpu]); /* if scurr has higher priority and budget, still pick scurr */ - if ( !is_idle_vcpu(current) && - vcpu_runnable(current) && + if ( !is_idle_vcpu(current) && vcpu_runnable(current) && scurr->cur_budget > 0 && - ( is_idle_vcpu(snext->vcpu) || - compare_vcpu_priority(scurr, snext) > 0 ) ) + (is_idle_vcpu(snext->vcpu) || + compare_vcpu_priority(scurr, snext) > 0) ) snext = scurr; } - if ( snext != scurr && - !is_idle_vcpu(current) && - vcpu_runnable(current) ) + if ( snext != scurr && !is_idle_vcpu(current) && vcpu_runnable(current) ) __set_bit(__RTDS_delayed_runq_add, &scurr->flags); snext->last_start = now; - ret.time = -1; /* if an idle vcpu is picked */ + ret.time = -1; /* if an idle vcpu is picked */ if ( !is_idle_vcpu(snext->vcpu) ) { if ( snext != scurr ) @@ -1141,12 +1104,11 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched * Remove VCPU from RunQ * The lock is already grabbed in schedule.c, no need to lock here */ -static void -rt_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) +static void rt_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) { - struct rt_vcpu * const svc = rt_vcpu(vc); + struct rt_vcpu *const svc = rt_vcpu(vc); - BUG_ON( is_idle_vcpu(vc) ); + BUG_ON(is_idle_vcpu(vc)); SCHED_STAT_CRANK(vcpu_sleep); if ( curr_on_cpu(vc->processor) == vc ) @@ -1178,8 +1140,7 @@ rt_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) * * lock is grabbed before calling this function */ -static void -runq_tickle(const struct scheduler *ops, struct rt_vcpu *new) +static void runq_tickle(const struct scheduler *ops, struct rt_vcpu *new) { struct rt_private *prv = rt_priv(ops); struct rt_vcpu *latest_deadline_vcpu = NULL; /* lowest priority */ @@ -1202,7 +1163,7 @@ runq_tickle(const struct scheduler *ops, struct rt_vcpu *new) * The same loop also find the one with lowest priority. */ cpu = cpumask_test_or_cycle(new->vcpu->processor, ¬_tickled); - while ( cpu!= nr_cpu_ids ) + while ( cpu != nr_cpu_ids ) { iter_vc = curr_on_cpu(cpu); if ( is_idle_vcpu(iter_vc) ) @@ -1232,17 +1193,16 @@ runq_tickle(const struct scheduler *ops, struct rt_vcpu *new) /* didn't tickle any cpu */ SCHED_STAT_CRANK(tickled_no_cpu); return; - out: +out: /* TRACE */ { - struct { - unsigned cpu:16, pad:16; + struct + { + unsigned cpu : 16, pad : 16; } d; d.cpu = cpu_to_tickle; d.pad = 0; - trace_var(TRC_RTDS_TICKLE, 1, - sizeof(d), - (unsigned char *)&d); + trace_var(TRC_RTDS_TICKLE, 1, sizeof(d), (unsigned char *)&d); } cpumask_set_cpu(cpu_to_tickle, &prv->tickled); @@ -1256,14 +1216,13 @@ runq_tickle(const struct scheduler *ops, struct rt_vcpu *new) * The lock is already grabbed in schedule.c, no need to lock here * TODO: what if these two vcpus belongs to the same domain? */ -static void -rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) +static void rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) { - struct rt_vcpu * const svc = rt_vcpu(vc); + struct rt_vcpu *const svc = rt_vcpu(vc); s_time_t now; bool_t missed; - BUG_ON( is_idle_vcpu(vc) ); + BUG_ON(is_idle_vcpu(vc)); if ( unlikely(curr_on_cpu(vc->processor) == vc) ) { @@ -1289,7 +1248,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) */ now = NOW(); - missed = ( now >= svc->cur_deadline ); + missed = (now >= svc->cur_deadline); if ( missed ) rt_update_deadline(now, svc); @@ -1310,7 +1269,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) * and queue a new one (to occur at our new deadline). */ if ( missed ) - replq_reinsert(ops, svc); + replq_reinsert(ops, svc); return; } @@ -1326,8 +1285,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) * scurr has finished context switch, insert it back to the RunQ, * and then pick the highest priority vcpu from runq to run */ -static void -rt_context_saved(const struct scheduler *ops, struct vcpu *vc) +static void rt_context_saved(const struct scheduler *ops, struct vcpu *vc) { struct rt_vcpu *svc = rt_vcpu(vc); spinlock_t *lock = vcpu_schedule_lock_irq(vc); @@ -1353,11 +1311,8 @@ out: /* * set/get each vcpu info of each domain */ -static int -rt_dom_cntl( - const struct scheduler *ops, - struct domain *d, - struct xen_domctl_scheduler_op *op) +static int rt_dom_cntl(const struct scheduler *ops, struct domain *d, + struct xen_domctl_scheduler_op *op) { struct rt_private *prv = rt_priv(ops); struct rt_vcpu *svc; @@ -1368,7 +1323,7 @@ rt_dom_cntl( s_time_t period, budget; uint32_t index = 0; - switch ( op->cmd ) + switch (op->cmd) { case XEN_DOMCTL_SCHEDOP_getinfo: /* Return the default parameters. */ @@ -1382,10 +1337,11 @@ rt_dom_cntl( break; } spin_lock_irqsave(&prv->lock, flags); - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { svc = rt_vcpu(v); - svc->period = MICROSECS(op->u.rtds.period); /* transfer to nanosec */ + svc->period = + MICROSECS(op->u.rtds.period); /* transfer to nanosec */ svc->budget = MICROSECS(op->u.rtds.budget); } spin_unlock_irqrestore(&prv->lock, flags); @@ -1394,8 +1350,7 @@ rt_dom_cntl( case XEN_DOMCTL_SCHEDOP_putvcpuinfo: while ( index < op->u.v.nr_vcpus ) { - if ( copy_from_guest_offset(&local_sched, - op->u.v.vcpus, index, 1) ) + if ( copy_from_guest_offset(&local_sched, op->u.v.vcpus, index, 1) ) { rc = -EFAULT; break; @@ -1419,8 +1374,8 @@ rt_dom_cntl( local_sched.u.rtds.flags &= ~XEN_DOMCTL_SCHEDRT_extra; spin_unlock_irqrestore(&prv->lock, flags); - if ( copy_to_guest_offset(op->u.v.vcpus, index, - &local_sched, 1) ) + if ( copy_to_guest_offset(op->u.v.vcpus, index, &local_sched, + 1) ) { rc = -EFAULT; break; @@ -1464,7 +1419,8 @@ rt_dom_cntl( * The replenishment timer handler picks vcpus * from the replq and does the actual replenishment. */ -static void repl_timer_handler(void *data){ +static void repl_timer_handler(void *data) +{ s_time_t now; struct scheduler *ops = data; struct rt_private *prv = rt_priv(ops); @@ -1484,7 +1440,7 @@ static void repl_timer_handler(void *data){ * If svc is on run queue, we need to put it at * the correct place since its deadline changes. */ - list_for_each_safe ( iter, tmp, replq ) + list_for_each_safe (iter, tmp, replq) { svc = replq_elem(iter); @@ -1509,7 +1465,7 @@ static void repl_timer_handler(void *data){ * If an updated vcpu was depleted and on the runqueue, tickle it. * Finally, reinsert the vcpus back to replenishement events list. */ - list_for_each_safe ( iter, tmp, &tmp_replq ) + list_for_each_safe (iter, tmp, &tmp_replq) { svc = replq_elem(iter); @@ -1541,32 +1497,32 @@ static void repl_timer_handler(void *data){ } static const struct scheduler sched_rtds_def = { - .name = "SMP RTDS Scheduler", - .opt_name = "rtds", - .sched_id = XEN_SCHEDULER_RTDS, - .sched_data = NULL, + .name = "SMP RTDS Scheduler", + .opt_name = "rtds", + .sched_id = XEN_SCHEDULER_RTDS, + .sched_data = NULL, .dump_cpu_state = rt_dump_pcpu, - .dump_settings = rt_dump, - .init = rt_init, - .deinit = rt_deinit, - .init_pdata = rt_init_pdata, - .switch_sched = rt_switch_sched, - .deinit_pdata = rt_deinit_pdata, - .alloc_domdata = rt_alloc_domdata, - .free_domdata = rt_free_domdata, - .alloc_vdata = rt_alloc_vdata, - .free_vdata = rt_free_vdata, - .insert_vcpu = rt_vcpu_insert, - .remove_vcpu = rt_vcpu_remove, - - .adjust = rt_dom_cntl, - - .pick_cpu = rt_cpu_pick, - .do_schedule = rt_schedule, - .sleep = rt_vcpu_sleep, - .wake = rt_vcpu_wake, - .context_saved = rt_context_saved, + .dump_settings = rt_dump, + .init = rt_init, + .deinit = rt_deinit, + .init_pdata = rt_init_pdata, + .switch_sched = rt_switch_sched, + .deinit_pdata = rt_deinit_pdata, + .alloc_domdata = rt_alloc_domdata, + .free_domdata = rt_free_domdata, + .alloc_vdata = rt_alloc_vdata, + .free_vdata = rt_free_vdata, + .insert_vcpu = rt_vcpu_insert, + .remove_vcpu = rt_vcpu_remove, + + .adjust = rt_dom_cntl, + + .pick_cpu = rt_cpu_pick, + .do_schedule = rt_schedule, + .sleep = rt_vcpu_sleep, + .wake = rt_vcpu_wake, + .context_saved = rt_context_saved, }; REGISTER_SCHEDULER(sched_rtds_def); diff --git a/xen/common/schedule.c b/xen/common/schedule.c index fd587622f4..e26d3f83b9 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -50,7 +50,8 @@ bool_t sched_smt_power_savings = 0; boolean_param("sched_smt_power_savings", sched_smt_power_savings); /* Default scheduling rate limit: 1ms - * The behavior when sched_ratelimit_us is greater than sched_credit_tslice_ms is undefined + * The behavior when sched_ratelimit_us is greater than sched_credit_tslice_ms + * is undefined * */ int sched_ratelimit_us = SCHED_DEFAULT_RATELIMIT_US; integer_param("sched_ratelimit_us", sched_ratelimit_us); @@ -67,15 +68,16 @@ DEFINE_PER_CPU(struct scheduler *, scheduler); /* Scratch space for cpumasks. */ DEFINE_PER_CPU(cpumask_t, cpumask_scratch); -extern const struct scheduler *__start_schedulers_array[], *__end_schedulers_array[]; +extern const struct scheduler *__start_schedulers_array[], + *__end_schedulers_array[]; #define NUM_SCHEDULERS (__end_schedulers_array - __start_schedulers_array) #define schedulers __start_schedulers_array static struct scheduler __read_mostly ops; -#define SCHED_OP(opsptr, fn, ...) \ - (( (opsptr)->fn != NULL ) ? (opsptr)->fn(opsptr, ##__VA_ARGS__ ) \ - : (typeof((opsptr)->fn(opsptr, ##__VA_ARGS__)))0 ) +#define SCHED_OP(opsptr, fn, ...) \ + (((opsptr)->fn != NULL) ? (opsptr)->fn(opsptr, ##__VA_ARGS__) \ + : (typeof((opsptr)->fn(opsptr, ##__VA_ARGS__)))0) static inline struct scheduler *dom_scheduler(const struct domain *d) { @@ -120,7 +122,10 @@ static inline struct scheduler *vcpu_scheduler(const struct vcpu *v) static inline void trace_runstate_change(struct vcpu *v, int new_state) { - struct { uint32_t vcpu:16, domain:16; } d; + struct + { + uint32_t vcpu : 16, domain : 16; + } d; uint32_t event; if ( likely(!tb_init_done) ) @@ -130,15 +135,18 @@ static inline void trace_runstate_change(struct vcpu *v, int new_state) d.domain = v->domain->domain_id; event = TRC_SCHED_RUNSTATE_CHANGE; - event |= ( v->runstate.state & 0x3 ) << 8; - event |= ( new_state & 0x3 ) << 4; + event |= (v->runstate.state & 0x3) << 8; + event |= (new_state & 0x3) << 4; - __trace_var(event, 1/*tsc*/, sizeof(d), &d); + __trace_var(event, 1 /*tsc*/, sizeof(d), &d); } static inline void trace_continue_running(struct vcpu *v) { - struct { uint32_t vcpu:16, domain:16; } d; + struct + { + uint32_t vcpu : 16, domain : 16; + } d; if ( likely(!tb_init_done) ) return; @@ -146,7 +154,7 @@ static inline void trace_continue_running(struct vcpu *v) d.vcpu = v->vcpu_id; d.domain = v->domain->domain_id; - __trace_var(TRC_SCHED_CONTINUE_RUNNING, 1/*tsc*/, sizeof(d), &d); + __trace_var(TRC_SCHED_CONTINUE_RUNNING, 1 /*tsc*/, sizeof(d), &d); } static inline void vcpu_urgent_count_update(struct vcpu *v) @@ -160,7 +168,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v) !test_bit(v->vcpu_id, v->domain->poll_mask) ) { v->is_urgent = 0; - atomic_dec(&per_cpu(schedule_data,v->processor).urgent_count); + atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count); } } else @@ -169,18 +177,18 @@ static inline void vcpu_urgent_count_update(struct vcpu *v) unlikely(test_bit(v->vcpu_id, v->domain->poll_mask)) ) { v->is_urgent = 1; - atomic_inc(&per_cpu(schedule_data,v->processor).urgent_count); + atomic_inc(&per_cpu(schedule_data, v->processor).urgent_count); } } } -static inline void vcpu_runstate_change( - struct vcpu *v, int new_state, s_time_t new_entry_time) +static inline void vcpu_runstate_change(struct vcpu *v, int new_state, + s_time_t new_entry_time) { s_time_t delta; ASSERT(v->runstate.state != new_state); - ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock)); + ASSERT(spin_is_locked(per_cpu(schedule_data, v->processor).schedule_lock)); vcpu_urgent_count_update(v); @@ -212,7 +220,7 @@ void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate) uint64_t get_cpu_idle_time(unsigned int cpu) { - struct vcpu_runstate_info state = { 0 }; + struct vcpu_runstate_info state = {0}; struct vcpu *v = idle_vcpu[cpu]; if ( cpu_online(cpu) && v ) @@ -263,15 +271,11 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor) v->processor = processor; /* Initialise the per-vcpu timers. */ - init_timer(&v->periodic_timer, vcpu_periodic_timer_fn, - v, v->processor); - init_timer(&v->singleshot_timer, vcpu_singleshot_timer_fn, - v, v->processor); - init_timer(&v->poll_timer, poll_timer_fn, - v, v->processor); - - v->sched_priv = SCHED_OP(dom_scheduler(d), alloc_vdata, v, - d->sched_priv); + init_timer(&v->periodic_timer, vcpu_periodic_timer_fn, v, v->processor); + init_timer(&v->singleshot_timer, vcpu_singleshot_timer_fn, v, v->processor); + init_timer(&v->poll_timer, poll_timer_fn, v, v->processor); + + v->sched_priv = SCHED_OP(dom_scheduler(d), alloc_vdata, v, d->sched_priv); if ( v->sched_priv == NULL ) return 1; @@ -314,7 +318,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c) struct scheduler *old_ops; void *old_domdata; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { if ( v->affinity_broken ) return -EBUSY; @@ -331,12 +335,12 @@ int sched_move_domain(struct domain *d, struct cpupool *c) return -ENOMEM; } - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { vcpu_priv[v->vcpu_id] = SCHED_OP(c->sched, alloc_vdata, v, domdata); if ( vcpu_priv[v->vcpu_id] == NULL ) { - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) xfree(vcpu_priv[v->vcpu_id]); xfree(vcpu_priv); sched_free_domdata(c->sched, domdata); @@ -349,7 +353,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c) old_ops = dom_scheduler(d); old_domdata = d->sched_priv; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { SCHED_OP(old_ops, remove_vcpu, v); } @@ -358,7 +362,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c) d->sched_priv = domdata; new_p = cpumask_first(c->cpu_valid); - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { spinlock_t *lock; cpumask_t allcpus; @@ -457,7 +461,7 @@ void sched_destroy_domain(struct domain *d) void vcpu_sleep_nosync_locked(struct vcpu *v) { - ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock)); + ASSERT(spin_is_locked(per_cpu(schedule_data, v->processor).schedule_lock)); if ( likely(!vcpu_runnable(v)) ) { @@ -642,7 +646,7 @@ static void vcpu_migrate_finish(struct vcpu *v) return; old_cpu = new_cpu = v->processor; - for ( ; ; ) + for ( ;; ) { /* * We need another iteration if the pre-calculated lock addresses @@ -691,8 +695,7 @@ static void vcpu_migrate_finish(struct vcpu *v) * because they both happen in (different) spinlock regions, and those * regions are strictly serialised. */ - if ( v->is_running || - !test_and_clear_bit(_VPF_migrating, &v->pause_flags) ) + if ( v->is_running || !test_and_clear_bit(_VPF_migrating, &v->pause_flags) ) { sched_spin_unlock_double(old_lock, new_lock, flags); return; @@ -734,7 +737,7 @@ void restore_vcpu_affinity(struct domain *d) ASSERT(system_state == SYS_STATE_resume); - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { spinlock_t *lock; unsigned int old_cpu = v->processor; @@ -747,7 +750,6 @@ void restore_vcpu_affinity(struct domain *d) { sched_set_affinity(v, v->cpu_hard_affinity_saved, NULL); v->affinity_broken = 0; - } /* @@ -801,9 +803,9 @@ int cpu_disable_scheduler(unsigned int cpu) * - when we are called for CPU teardown, we're in stop-machine context, * so that's not be a problem. */ - for_each_domain_in_cpupool ( d, c ) + for_each_domain_in_cpupool(d, c) { - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { unsigned long flags; spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags); @@ -822,7 +824,7 @@ int cpu_disable_scheduler(unsigned int cpu) break; } - if (system_state == SYS_STATE_suspend) + if ( system_state == SYS_STATE_suspend ) { cpumask_copy(v->cpu_hard_affinity_saved, v->cpu_hard_affinity); @@ -909,8 +911,8 @@ int cpu_disable_scheduler(unsigned int cpu) * races, and it's fine to not take the look (we're talking about * dom0_setup_vcpu() an sched_init_vcpu()). */ -void sched_set_affinity( - struct vcpu *v, const cpumask_t *hard, const cpumask_t *soft) +void sched_set_affinity(struct vcpu *v, const cpumask_t *hard, + const cpumask_t *soft) { SCHED_OP(dom_scheduler(v->domain), adjust_affinity, v, hard, soft); @@ -919,14 +921,13 @@ void sched_set_affinity( if ( soft ) cpumask_copy(v->cpu_soft_affinity, soft); - v->soft_aff_effective = !cpumask_subset(v->cpu_hard_affinity, - v->cpu_soft_affinity) && - cpumask_intersects(v->cpu_soft_affinity, - v->cpu_hard_affinity); + v->soft_aff_effective = + !cpumask_subset(v->cpu_hard_affinity, v->cpu_soft_affinity) && + cpumask_intersects(v->cpu_soft_affinity, v->cpu_hard_affinity); } -static int vcpu_set_affinity( - struct vcpu *v, const cpumask_t *affinity, const cpumask_t *which) +static int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity, + const cpumask_t *which) { spinlock_t *lock; int ret = 0; @@ -1012,11 +1013,11 @@ static void vcpu_block_enable_events(void) static long do_poll(struct sched_poll *sched_poll) { - struct vcpu *v = current; + struct vcpu *v = current; struct domain *d = v->domain; - evtchn_port_t port = 0; - long rc; - unsigned int i; + evtchn_port_t port = 0; + long rc; + unsigned int i; /* Fairly arbitrary limit. */ if ( sched_poll->nr_ports > 128 ) @@ -1041,8 +1042,7 @@ static long do_poll(struct sched_poll *sched_poll) * this point others can be guaranteed to clean up for us if they wake us. */ rc = 0; - if ( (v->poll_evtchn == 0) || - !test_bit(_VPF_blocked, &v->pause_flags) || + if ( (v->poll_evtchn == 0) || !test_bit(_VPF_blocked, &v->pause_flags) || !test_bit(v->vcpu_id, d->poll_mask) ) goto out; #endif @@ -1077,7 +1077,7 @@ static long do_poll(struct sched_poll *sched_poll) return 0; - out: +out: v->poll_evtchn = 0; clear_bit(v->vcpu_id, d->poll_mask); clear_bit(_VPF_blocked, &v->pause_flags); @@ -1087,7 +1087,7 @@ static long do_poll(struct sched_poll *sched_poll) /* Voluntarily yield the processor for this allocation. */ long vcpu_yield(void) { - struct vcpu * v=current; + struct vcpu *v = current; spinlock_t *lock = vcpu_schedule_lock_irq(v); SCHED_OP(vcpu_scheduler(v), yield, v); @@ -1221,7 +1221,7 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { ret_t ret = 0; - switch ( cmd ) + switch (cmd) { case SCHEDOP_yield: { @@ -1243,9 +1243,8 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) if ( copy_from_guest(&sched_shutdown, arg, 1) ) break; - TRACE_3D(TRC_SCHED_SHUTDOWN, - current->domain->domain_id, current->vcpu_id, - sched_shutdown.reason); + TRACE_3D(TRC_SCHED_SHUTDOWN, current->domain->domain_id, + current->vcpu_id, sched_shutdown.reason); ret = domain_shutdown(current->domain, (u8)sched_shutdown.reason); break; @@ -1260,8 +1259,8 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) if ( copy_from_guest(&sched_shutdown, arg, 1) ) break; - TRACE_3D(TRC_SCHED_SHUTDOWN_CODE, - d->domain_id, current->vcpu_id, sched_shutdown.reason); + TRACE_3D(TRC_SCHED_SHUTDOWN_CODE, d->domain_id, current->vcpu_id, + sched_shutdown.reason); spin_lock(&d->shutdown_lock); if ( d->shutdown_code == SHUTDOWN_CODE_INVALID ) @@ -1316,8 +1315,8 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) if ( copy_from_guest(&sched_watchdog, arg, 1) ) break; - ret = domain_watchdog( - current->domain, sched_watchdog.id, sched_watchdog.timeout); + ret = domain_watchdog(current->domain, sched_watchdog.id, + sched_watchdog.timeout); break; } @@ -1371,7 +1370,7 @@ long do_set_timer_op(s_time_t timeout) * timeout in this case can burn a lot of CPU. We therefore go for a * reasonable middleground of triggering a timer event in 100ms. */ - gdprintk(XENLOG_INFO, "Warning: huge timeout set: %"PRIx64"\n", + gdprintk(XENLOG_INFO, "Warning: huge timeout set: %" PRIx64 "\n", timeout); set_timer(&v->singleshot_timer, NOW() + MILLISECS(100)); } @@ -1402,7 +1401,7 @@ long sched_adjust(struct domain *d, struct xen_domctl_scheduler_op *op) if ( op->sched_id != dom_scheduler(d)->sched_id ) return -EINVAL; - switch ( op->cmd ) + switch (op->cmd) { case XEN_DOMCTL_SCHEDOP_putinfo: case XEN_DOMCTL_SCHEDOP_getinfo: @@ -1439,7 +1438,8 @@ long sched_adjust_global(struct xen_sysctl_scheduler_op *op) return -ESRCH; rc = ((op->sched_id == pool->sched->sched_id) - ? SCHED_OP(pool->sched, adjust_global, op) : -EINVAL); + ? SCHED_OP(pool->sched, adjust_global, op) + : -EINVAL); cpupool_put(pool); @@ -1474,14 +1474,14 @@ static void vcpu_periodic_timer_work(struct vcpu *v) */ static void schedule(void) { - struct vcpu *prev = current, *next = NULL; - s_time_t now; - struct scheduler *sched; - unsigned long *tasklet_work = &this_cpu(tasklet_work_to_do); - bool_t tasklet_work_scheduled = 0; + struct vcpu *prev = current, *next = NULL; + s_time_t now; + struct scheduler *sched; + unsigned long *tasklet_work = &this_cpu(tasklet_work_to_do); + bool_t tasklet_work_scheduled = 0; struct schedule_data *sd; - spinlock_t *lock; - struct task_slice next_slice; + spinlock_t *lock; + struct task_slice next_slice; int cpu = smp_processor_id(); ASSERT_NOT_IN_ATOMIC(); @@ -1491,12 +1491,12 @@ static void schedule(void) sd = &this_cpu(schedule_data); /* Update tasklet scheduling status. */ - switch ( *tasklet_work ) + switch (*tasklet_work) { case TASKLET_enqueued: set_bit(_TASKLET_scheduled, tasklet_work); /* fallthrough */ - case TASKLET_enqueued|TASKLET_scheduled: + case TASKLET_enqueued | TASKLET_scheduled: tasklet_work_scheduled = 1; break; case TASKLET_scheduled: @@ -1528,33 +1528,31 @@ static void schedule(void) if ( unlikely(prev == next) ) { pcpu_schedule_unlock_irq(lock, cpu); - TRACE_4D(TRC_SCHED_SWITCH_INFCONT, - next->domain->domain_id, next->vcpu_id, - now - prev->runstate.state_entry_time, + TRACE_4D(TRC_SCHED_SWITCH_INFCONT, next->domain->domain_id, + next->vcpu_id, now - prev->runstate.state_entry_time, next_slice.time); trace_continue_running(next); return continue_running(prev); } - TRACE_3D(TRC_SCHED_SWITCH_INFPREV, - prev->domain->domain_id, prev->vcpu_id, + TRACE_3D(TRC_SCHED_SWITCH_INFPREV, prev->domain->domain_id, prev->vcpu_id, now - prev->runstate.state_entry_time); - TRACE_4D(TRC_SCHED_SWITCH_INFNEXT, - next->domain->domain_id, next->vcpu_id, - (next->runstate.state == RUNSTATE_runnable) ? - (now - next->runstate.state_entry_time) : 0, + TRACE_4D(TRC_SCHED_SWITCH_INFNEXT, next->domain->domain_id, next->vcpu_id, + (next->runstate.state == RUNSTATE_runnable) + ? (now - next->runstate.state_entry_time) + : 0, next_slice.time); ASSERT(prev->runstate.state == RUNSTATE_running); - TRACE_4D(TRC_SCHED_SWITCH, - prev->domain->domain_id, prev->vcpu_id, + TRACE_4D(TRC_SCHED_SWITCH, prev->domain->domain_id, prev->vcpu_id, next->domain->domain_id, next->vcpu_id); vcpu_runstate_change( prev, - ((prev->pause_flags & VPF_blocked) ? RUNSTATE_blocked : - (vcpu_runnable(prev) ? RUNSTATE_runnable : RUNSTATE_offline)), + ((prev->pause_flags & VPF_blocked) + ? RUNSTATE_blocked + : (vcpu_runnable(prev) ? RUNSTATE_runnable : RUNSTATE_offline)), now); prev->last_run_time = now; @@ -1662,8 +1660,8 @@ static int cpu_schedule_up(unsigned int cpu) */ ASSERT(idle->sched_priv == NULL); - idle->sched_priv = SCHED_OP(&ops, alloc_vdata, idle, - idle->domain->sched_priv); + idle->sched_priv = + SCHED_OP(&ops, alloc_vdata, idle, idle->domain->sched_priv); if ( idle->sched_priv == NULL ) return -ENOMEM; } @@ -1698,8 +1696,8 @@ static void cpu_schedule_down(unsigned int cpu) kill_timer(&sd->s_timer); } -static int cpu_schedule_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_schedule_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct scheduler *sched = per_cpu(scheduler, cpu); @@ -1738,7 +1736,7 @@ static int cpu_schedule_callback( * - alloc_pdata-->init_pdata-->deinit_pdata-->free_pdata, or * - alloc_pdata-->free_pdata. */ - switch ( action ) + switch (action) { case CPU_STARTING: SCHED_OP(sched, init_pdata, sd->sched_priv, cpu); @@ -1759,9 +1757,8 @@ static int cpu_schedule_callback( return !rc ? NOTIFY_DONE : notifier_from_errno(rc); } -static struct notifier_block cpu_schedule_nfb = { - .notifier_call = cpu_schedule_callback -}; +static struct notifier_block cpu_schedule_nfb = {.notifier_call = + cpu_schedule_callback}; /* Initialise the data structures. */ void __init scheduler_init(void) @@ -1771,7 +1768,7 @@ void __init scheduler_init(void) open_softirq(SCHEDULE_SOFTIRQ, schedule); - for ( i = 0; i < NUM_SCHEDULERS; i++) + for ( i = 0; i < NUM_SCHEDULERS; i++ ) { if ( schedulers[i]->global_init && schedulers[i]->global_init() < 0 ) schedulers[i] = NULL; @@ -1802,13 +1799,12 @@ void __init scheduler_init(void) panic("scheduler returned error on init\n"); if ( sched_ratelimit_us && - (sched_ratelimit_us > XEN_SYSCTL_SCHED_RATELIMIT_MAX - || sched_ratelimit_us < XEN_SYSCTL_SCHED_RATELIMIT_MIN) ) + (sched_ratelimit_us > XEN_SYSCTL_SCHED_RATELIMIT_MAX || + sched_ratelimit_us < XEN_SYSCTL_SCHED_RATELIMIT_MIN) ) { printk("WARNING: sched_ratelimit_us outside of valid range [%d,%d].\n" " Resetting to default %u\n", - XEN_SYSCTL_SCHED_RATELIMIT_MIN, - XEN_SYSCTL_SCHED_RATELIMIT_MAX, + XEN_SYSCTL_SCHED_RATELIMIT_MIN, XEN_SYSCTL_SCHED_RATELIMIT_MAX, SCHED_DEFAULT_RATELIMIT_US); sched_ratelimit_us = SCHED_DEFAULT_RATELIMIT_US; } @@ -1843,7 +1839,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) struct scheduler *old_ops = per_cpu(scheduler, cpu); struct scheduler *new_ops = (c == NULL) ? &ops : c->sched; struct cpupool *old_pool = per_cpu(cpupool, cpu); - spinlock_t * old_lock; + spinlock_t *old_lock; /* * pCPUs only move from a valid cpupool to free (i.e., out of any pool), @@ -1914,7 +1910,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) SCHED_OP(old_ops, free_vdata, vpriv_old); SCHED_OP(old_ops, free_pdata, ppriv_old, cpu); - out: +out: per_cpu(cpupool, cpu) = c; /* When a cpu is added to a pool, trigger it to go pick up some work */ if ( c != NULL ) @@ -1939,7 +1935,7 @@ struct scheduler *scheduler_alloc(unsigned int sched_id, int *perr) *perr = -ENOENT; return NULL; - found: +found: *perr = -ENOMEM; if ( (sched = xmalloc(struct scheduler)) == NULL ) return NULL; @@ -1962,9 +1958,9 @@ void scheduler_free(struct scheduler *sched) void schedule_dump(struct cpupool *c) { - unsigned int i; + unsigned int i; struct scheduler *sched; - cpumask_t *cpus; + cpumask_t *cpus; /* Locking, if necessary, must be handled withing each scheduler */ diff --git a/xen/common/shutdown.c b/xen/common/shutdown.c index 2ed4d62214..ccd1b0964d 100644 --- a/xen/common/shutdown.c +++ b/xen/common/shutdown.c @@ -31,7 +31,7 @@ static void noreturn maybe_reboot(void) void hwdom_shutdown(u8 reason) { - switch ( reason ) + switch (reason) { case SHUTDOWN_poweroff: printk("Hardware Dom%u halted: halting machine\n", @@ -71,5 +71,4 @@ void hwdom_shutdown(u8 reason) maybe_reboot(); break; /* not reached */ } -} - +} diff --git a/xen/common/smp.c b/xen/common/smp.c index 79f4ebd145..de44da88a1 100644 --- a/xen/common/smp.c +++ b/xen/common/smp.c @@ -25,17 +25,15 @@ * Structure and data for smp_call_function()/on_selected_cpus(). */ static DEFINE_SPINLOCK(call_lock); -static struct call_data_struct { - void (*func) (void *info); +static struct call_data_struct +{ + void (*func)(void *info); void *info; int wait; cpumask_t selected; } call_data; -void smp_call_function( - void (*func) (void *info), - void *info, - int wait) +void smp_call_function(void (*func)(void *info), void *info, int wait) { cpumask_t allbutself; @@ -44,11 +42,8 @@ void smp_call_function( on_selected_cpus(&allbutself, func, info, wait); } -void on_selected_cpus( - const cpumask_t *selected, - void (*func) (void *info), - void *info, - int wait) +void on_selected_cpus(const cpumask_t *selected, void (*func)(void *info), + void *info, int wait) { unsigned int nr_cpus; diff --git a/xen/common/softirq.c b/xen/common/softirq.c index 83c3c09bd5..50054118d9 100644 --- a/xen/common/softirq.c +++ b/xen/common/softirq.c @@ -1,10 +1,10 @@ /****************************************************************************** * common/softirq.c - * - * Softirqs in Xen are only executed in an outermost activation (e.g., never - * within an interrupt activation). This simplifies some things and generally + * + * Softirqs in Xen are only executed in an outermost activation (e.g., never + * within an interrupt activation). This simplifies some things and generally * seems a good thing. - * + * * Copyright (c) 2003, K A Fraser * Copyright (c) 1992, Linus Torvalds */ @@ -30,7 +30,7 @@ static void __do_softirq(unsigned long ignore_mask) unsigned int i, cpu; unsigned long pending; - for ( ; ; ) + for ( ;; ) { /* * Initialise @cpu on every iteration: SCHEDULE_SOFTIRQ may move @@ -41,8 +41,8 @@ static void __do_softirq(unsigned long ignore_mask) if ( rcu_pending(cpu) ) rcu_check_callbacks(cpu); - if ( ((pending = (softirq_pending(cpu) & ~ignore_mask)) == 0) - || cpu_is_offline(cpu) ) + if ( ((pending = (softirq_pending(cpu) & ~ignore_mask)) == 0) || + cpu_is_offline(cpu) ) break; i = find_first_set_bit(pending); @@ -55,7 +55,7 @@ void process_pending_softirqs(void) { ASSERT(!in_irq() && local_irq_is_enabled()); /* Do not enter scheduler as it can preempt the calling context. */ - __do_softirq(1ul<= 0; i -= size ) { - for ( r = i; r * 2 + size < n; r = c ) + for ( r = i; r * 2 + size < n; r = c ) { c = r * 2 + size; if ( (c < n - size) && (cmp(base + c, base + c + size) < 0) ) diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c index 6bc52d70c0..5f7958dd6e 100644 --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -29,7 +29,7 @@ static void check_lock(struct lock_debug *debug) * every lock must be consistently observed else we can deadlock in * IRQ-context rendezvous functions (a rendezvous which gets every CPU * into IRQ context before any CPU is released from the rendezvous). - * + * * If we can mix IRQ-disabled and IRQ-enabled callers, the following can * happen: * * Lock is held by CPU A, with IRQs enabled @@ -37,7 +37,7 @@ static void check_lock(struct lock_debug *debug) * * Rendezvous starts -- CPU A takes interrupt and enters rendezbous spin * * DEADLOCK -- CPU B will never enter rendezvous, CPU A will never exit * the rendezvous, and will hence never release the lock. - * + * * To guard against this subtle bug we latch the IRQ safety of every * spinlock in the system, on first use. */ @@ -61,12 +61,12 @@ static void check_barrier(struct lock_debug *debug) /* * For a barrier, we have a relaxed IRQ-safety-consistency check. - * + * * It is always safe to spin at the barrier with IRQs enabled -- that does * not prevent us from entering an IRQ-context rendezvous, and nor are * we preventing anyone else from doing so (since we do not actually * acquire the lock during a barrier operation). - * + * * However, if we spin on an IRQ-unsafe lock with IRQs disabled then that * is clearly wrong, for the same reason outlined in check_lock() above. */ @@ -92,19 +92,19 @@ void spin_debug_disable(void) #ifdef CONFIG_LOCK_PROFILE -#define LOCK_PROFILE_REL \ - if (lock->profile) \ - { \ - lock->profile->time_hold += NOW() - lock->profile->time_locked; \ - lock->profile->lock_cnt++; \ +#define LOCK_PROFILE_REL \ + if ( lock->profile ) \ + { \ + lock->profile->time_hold += NOW() - lock->profile->time_locked; \ + lock->profile->lock_cnt++; \ } -#define LOCK_PROFILE_VAR s_time_t block = 0 -#define LOCK_PROFILE_BLOCK block = block ? : NOW(); +#define LOCK_PROFILE_VAR s_time_t block = 0 +#define LOCK_PROFILE_BLOCK block = block ?: NOW(); #define LOCK_PROFILE_GOT \ - if (lock->profile) \ + if ( lock->profile ) \ { \ lock->profile->time_locked = NOW(); \ - if (block) \ + if ( block ) \ { \ lock->profile->time_block += lock->profile->time_locked - block; \ lock->profile->block_cnt++; \ @@ -141,8 +141,8 @@ void inline _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data) LOCK_PROFILE_VAR; check_lock(&lock->debug); - tickets.head_tail = arch_fetch_and_add(&lock->tickets.head_tail, - tickets.head_tail); + tickets.head_tail = + arch_fetch_and_add(&lock->tickets.head_tail, tickets.head_tail); while ( tickets.tail != observe_head(&lock->tickets) ) { LOCK_PROFILE_BLOCK; @@ -157,7 +157,7 @@ void inline _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data) void _spin_lock(spinlock_t *lock) { - _spin_lock_cb(lock, NULL, NULL); + _spin_lock_cb(lock, NULL, NULL); } void _spin_lock_irq(spinlock_t *lock) @@ -207,8 +207,8 @@ int _spin_is_locked(spinlock_t *lock) * ASSERT()s and alike. */ return lock->recurse_cpu == SPINLOCK_NO_CPU - ? lock->tickets.head != lock->tickets.tail - : lock->recurse_cpu == smp_processor_id(); + ? lock->tickets.head != lock->tickets.tail + : lock->recurse_cpu == smp_processor_id(); } int _spin_trylock(spinlock_t *lock) @@ -221,11 +221,11 @@ int _spin_trylock(spinlock_t *lock) return 0; new = old; new.tail++; - if ( cmpxchg(&lock->tickets.head_tail, - old.head_tail, new.head_tail) != old.head_tail ) + if ( cmpxchg(&lock->tickets.head_tail, old.head_tail, new.head_tail) != + old.head_tail ) return 0; #ifdef CONFIG_LOCK_PROFILE - if (lock->profile) + if ( lock->profile ) lock->profile->time_locked = NOW(); #endif preempt_disable(); @@ -310,13 +310,14 @@ void _spin_unlock_recursive(spinlock_t *lock) #ifdef CONFIG_LOCK_PROFILE -struct lock_profile_anc { - struct lock_profile_qhead *head_q; /* first head of this type */ - char *name; /* descriptive string for print */ +struct lock_profile_anc +{ + struct lock_profile_qhead *head_q; /* first head of this type */ + char *name; /* descriptive string for print */ }; -typedef void lock_profile_subfunc( - struct lock_profile *, int32_t, int32_t, void *); +typedef void lock_profile_subfunc(struct lock_profile *, int32_t, int32_t, + void *); extern struct lock_profile *__lock_profile_start; extern struct lock_profile *__lock_profile_end; @@ -340,14 +341,14 @@ static void spinlock_profile_iterate(lock_profile_subfunc *sub, void *par) spin_unlock(&lock_profile_lock); } -static void spinlock_profile_print_elem(struct lock_profile *data, - int32_t type, int32_t idx, void *par) +static void spinlock_profile_print_elem(struct lock_profile *data, int32_t type, + int32_t idx, void *par) { if ( type == LOCKPROF_TYPE_GLOBAL ) printk("%s %s:\n", lock_profile_ancs[type].name, data->name); else printk("%s %d %s:\n", lock_profile_ancs[type].name, idx, data->name); - printk(" lock:%12"PRId64"(%08X:%08X), block:%12"PRId64"(%08X:%08X)\n", + printk(" lock:%12" PRId64 "(%08X:%08X), block:%12" PRId64 "(%08X:%08X)\n", data->lock_cnt, (u32)(data->time_hold >> 32), (u32)data->time_hold, data->block_cnt, (u32)(data->time_block >> 32), (u32)data->time_block); @@ -359,13 +360,14 @@ void spinlock_profile_printall(unsigned char key) s_time_t diff; diff = now - lock_profile_start; - printk("Xen lock profile info SHOW (now = %"PRI_stime" total = " - "%"PRI_stime")\n", now, diff); + printk("Xen lock profile info SHOW (now = %" PRI_stime " total = " + "%" PRI_stime ")\n", + now, diff); spinlock_profile_iterate(spinlock_profile_print_elem, NULL); } -static void spinlock_profile_reset_elem(struct lock_profile *data, - int32_t type, int32_t idx, void *par) +static void spinlock_profile_reset_elem(struct lock_profile *data, int32_t type, + int32_t idx, void *par) { data->lock_cnt = 0; data->block_cnt = 0; @@ -378,18 +380,19 @@ void spinlock_profile_reset(unsigned char key) s_time_t now = NOW(); if ( key != '\0' ) - printk("Xen lock profile info RESET (now = %"PRI_stime")\n", now); + printk("Xen lock profile info RESET (now = %" PRI_stime ")\n", now); lock_profile_start = now; spinlock_profile_iterate(spinlock_profile_reset_elem, NULL); } -typedef struct { +typedef struct +{ struct xen_sysctl_lockprof_op *pc; - int rc; + int rc; } spinlock_profile_ucopy_t; -static void spinlock_profile_ucopy_elem(struct lock_profile *data, - int32_t type, int32_t idx, void *par) +static void spinlock_profile_ucopy_elem(struct lock_profile *data, int32_t type, + int32_t idx, void *par) { spinlock_profile_ucopy_t *p = par; struct xen_sysctl_lockprof_data elem; @@ -420,7 +423,7 @@ int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc) int rc = 0; spinlock_profile_ucopy_t par; - switch ( pc->cmd ) + switch (pc->cmd) { case XEN_SYSCTL_LOCKPROF_reset: spinlock_profile_reset('\0'); @@ -441,8 +444,9 @@ int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc) return rc; } -void _lock_profile_register_struct( - int32_t type, struct lock_profile_qhead *qhead, int32_t idx, char *name) +void _lock_profile_register_struct(int32_t type, + struct lock_profile_qhead *qhead, + int32_t idx, char *name) { qhead->idx = idx; spin_lock(&lock_profile_lock); @@ -452,8 +456,8 @@ void _lock_profile_register_struct( spin_unlock(&lock_profile_lock); } -void _lock_profile_deregister_struct( - int32_t type, struct lock_profile_qhead *qhead) +void _lock_profile_deregister_struct(int32_t type, + struct lock_profile_qhead *qhead) { struct lock_profile_qhead **q; @@ -481,9 +485,8 @@ static int __init lock_prof_init(void) (*q)->lock->profile = *q; } - _lock_profile_register_struct( - LOCKPROF_TYPE_GLOBAL, &lock_profile_glb_q, - 0, "Global lock"); + _lock_profile_register_struct(LOCKPROF_TYPE_GLOBAL, &lock_profile_glb_q, 0, + "Global lock"); return 0; } diff --git a/xen/common/stop_machine.c b/xen/common/stop_machine.c index ce6f5624c4..b653c627a1 100644 --- a/xen/common/stop_machine.c +++ b/xen/common/stop_machine.c @@ -30,7 +30,8 @@ #include #include -enum stopmachine_state { +enum stopmachine_state +{ STOPMACHINE_START, STOPMACHINE_PREPARE, STOPMACHINE_DISABLE_IRQ, @@ -38,7 +39,8 @@ enum stopmachine_state { STOPMACHINE_EXIT }; -struct stopmachine_data { +struct stopmachine_data +{ unsigned int nr_cpus; enum stopmachine_state state; @@ -100,7 +102,7 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) smp_wmb(); - for_each_cpu ( i, &allbutself ) + for_each_cpu (i, &allbutself) tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i); stopmachine_set_state(STOPMACHINE_PREPARE); @@ -147,7 +149,7 @@ static void stopmachine_action(unsigned long cpu) cpu_relax(); state = stopmachine_data.state; - switch ( state ) + switch (state) { case STOPMACHINE_DISABLE_IRQ: local_irq_disable(); @@ -173,26 +175,24 @@ static void stopmachine_action(unsigned long cpu) local_irq_enable(); } -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; if ( action == CPU_UP_PREPARE ) - tasklet_init(&per_cpu(stopmachine_tasklet, cpu), - stopmachine_action, cpu); + tasklet_init(&per_cpu(stopmachine_tasklet, cpu), stopmachine_action, + cpu); return NOTIFY_DONE; } -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback -}; +static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback}; static int __init cpu_stopmachine_init(void) { unsigned int cpu; - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) { void *hcpu = (void *)(long)cpu; cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu); diff --git a/xen/common/string.c b/xen/common/string.c index 1e122abca8..b493b773f6 100644 --- a/xen/common/string.c +++ b/xen/common/string.c @@ -17,37 +17,40 @@ */ int strnicmp(const char *s1, const char *s2, size_t len) { - /* Yes, Virginia, it had better be unsigned */ - unsigned char c1, c2; - - c1 = 0; c2 = 0; - if (len) { - do { - c1 = *s1; c2 = *s2; - s1++; s2++; - if (!c1) - break; - if (!c2) - break; - if (c1 == c2) - continue; - c1 = tolower(c1); - c2 = tolower(c2); - if (c1 != c2) - break; - } while (--len); - } - return (int)c1 - (int)c2; + /* Yes, Virginia, it had better be unsigned */ + unsigned char c1, c2; + + c1 = 0; + c2 = 0; + if ( len ) + { + do { + c1 = *s1; + c2 = *s2; + s1++; + s2++; + if ( !c1 ) + break; + if ( !c2 ) + break; + if ( c1 == c2 ) + continue; + c1 = tolower(c1); + c2 = tolower(c2); + if ( c1 != c2 ) + break; + } while ( --len ); + } + return (int)c1 - (int)c2; } #endif #ifndef __HAVE_ARCH_STRCASECMP -int (strcasecmp)(const char *s1, const char *s2) +int(strcasecmp)(const char *s1, const char *s2) { int c1, c2; - do - { + do { c1 = tolower(*s1++); c2 = tolower(*s2++); } while ( c1 == c2 && c1 != 0 ); @@ -70,14 +73,15 @@ int (strcasecmp)(const char *s1, const char *s2) */ size_t strlcpy(char *dest, const char *src, size_t size) { - size_t ret = strlen(src); - - if (size) { - size_t len = (ret >= size) ? size-1 : ret; - memcpy(dest, src, len); - dest[len] = '\0'; - } - return ret; + size_t ret = strlen(src); + + if ( size ) + { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; } EXPORT_SYMBOL(strlcpy); #endif @@ -95,18 +99,18 @@ EXPORT_SYMBOL(strlcpy); */ size_t strlcat(char *dest, const char *src, size_t size) { - size_t slen = strlen(src); - size_t dlen = strnlen(dest, size); - char *p = dest + dlen; + size_t slen = strlen(src); + size_t dlen = strnlen(dest, size); + char *p = dest + dlen; - while ((p - dest) < size) - if ((*p++ = *src++) == '\0') - break; + while ( (p - dest) < size ) + if ( (*p++ = *src++) == '\0' ) + break; - if (dlen < size) - *(p-1) = '\0'; + if ( dlen < size ) + *(p - 1) = '\0'; - return slen + dlen; + return slen + dlen; } EXPORT_SYMBOL(strlcat); #endif @@ -117,16 +121,17 @@ EXPORT_SYMBOL(strlcat); * @cs: One string * @ct: Another string */ -int (strcmp)(const char *cs, const char *ct) +int(strcmp)(const char *cs, const char *ct) { - register signed char __res; + register signed char __res; - while (1) { - if ((__res = *cs - *ct++) != 0 || !*cs++) - break; - } + while ( 1 ) + { + if ( (__res = *cs - *ct++) != 0 || !*cs++ ) + break; + } - return __res; + return __res; } #endif @@ -137,17 +142,18 @@ int (strcmp)(const char *cs, const char *ct) * @ct: Another string * @count: The maximum number of bytes to compare */ -int (strncmp)(const char *cs, const char *ct, size_t count) +int(strncmp)(const char *cs, const char *ct, size_t count) { - register signed char __res = 0; + register signed char __res = 0; - while (count) { - if ((__res = *cs - *ct++) != 0 || !*cs++) - break; - count--; - } + while ( count ) + { + if ( (__res = *cs - *ct++) != 0 || !*cs++ ) + break; + count--; + } - return __res; + return __res; } #endif @@ -159,10 +165,10 @@ int (strncmp)(const char *cs, const char *ct, size_t count) */ char *(strchr)(const char *s, int c) { - for(; *s != (char) c; ++s) - if (*s == '\0') - return NULL; - return (char *) s; + for ( ; *s != (char)c; ++s ) + if ( *s == '\0' ) + return NULL; + return (char *)s; } #endif @@ -174,12 +180,12 @@ char *(strchr)(const char *s, int c) */ char *(strrchr)(const char *s, int c) { - const char *p = s + strlen(s); - do { - if (*p == (char)c) - return (char *)p; - } while (--p >= s); - return NULL; + const char *p = s + strlen(s); + do { + if ( *p == (char)c ) + return (char *)p; + } while ( --p >= s ); + return NULL; } #endif @@ -188,13 +194,13 @@ char *(strrchr)(const char *s, int c) * strlen - Find the length of a string * @s: The string to be sized */ -size_t (strlen)(const char * s) +size_t(strlen)(const char *s) { - const char *sc; + const char *sc; - for (sc = s; *sc != '\0'; ++sc) - /* nothing */; - return sc - s; + for ( sc = s; *sc != '\0'; ++sc ) + /* nothing */; + return sc - s; } #endif @@ -204,13 +210,13 @@ size_t (strlen)(const char * s) * @s: The string to be sized * @count: The maximum number of bytes to search */ -size_t strnlen(const char * s, size_t count) +size_t strnlen(const char *s, size_t count) { - const char *sc; + const char *sc; - for (sc = s; count-- && *sc != '\0'; ++sc) - /* nothing */; - return sc - s; + for ( sc = s; count-- && *sc != '\0'; ++sc ) + /* nothing */; + return sc - s; } #endif @@ -223,21 +229,23 @@ size_t strnlen(const char * s, size_t count) */ size_t strspn(const char *s, const char *accept) { - const char *p; - const char *a; - size_t count = 0; - - for (p = s; *p != '\0'; ++p) { - for (a = accept; *a != '\0'; ++a) { - if (*p == *a) - break; - } - if (*a == '\0') - return count; - ++count; - } - - return count; + const char *p; + const char *a; + size_t count = 0; + + for ( p = s; *p != '\0'; ++p ) + { + for ( a = accept; *a != '\0'; ++a ) + { + if ( *p == *a ) + break; + } + if ( *a == '\0' ) + return count; + ++count; + } + + return count; } #endif @@ -247,17 +255,19 @@ size_t strspn(const char *s, const char *accept) * @cs: The string to be searched * @ct: The characters to search for */ -char * strpbrk(const char * cs,const char * ct) +char *strpbrk(const char *cs, const char *ct) { - const char *sc1,*sc2; - - for( sc1 = cs; *sc1 != '\0'; ++sc1) { - for( sc2 = ct; *sc2 != '\0'; ++sc2) { - if (*sc1 == *sc2) - return (char *) sc1; - } - } - return NULL; + const char *sc1, *sc2; + + for ( sc1 = cs; *sc1 != '\0'; ++sc1 ) + { + for ( sc2 = ct; *sc2 != '\0'; ++sc2 ) + { + if ( *sc1 == *sc2 ) + return (char *)sc1; + } + } + return NULL; } #endif @@ -273,19 +283,19 @@ char * strpbrk(const char * cs,const char * ct) * of that name. In fact, it was stolen from glibc2 and de-fancy-fied. * Same semantics, slimmer shape. ;) */ -char * strsep(char **s, const char *ct) +char *strsep(char **s, const char *ct) { - char *sbegin = *s, *end; + char *sbegin = *s, *end; - if (sbegin == NULL) - return NULL; + if ( sbegin == NULL ) + return NULL; - end = strpbrk(sbegin, ct); - if (end) - *end++ = '\0'; - *s = end; + end = strpbrk(sbegin, ct); + if ( end ) + *end++ = '\0'; + *s = end; - return sbegin; + return sbegin; } #endif @@ -300,12 +310,12 @@ char * strsep(char **s, const char *ct) */ void *(memset)(void *s, int c, size_t count) { - char *xs = (char *) s; + char *xs = (char *)s; - while (count--) - *xs++ = c; + while ( count-- ) + *xs++ = c; - return s; + return s; } #endif @@ -321,12 +331,12 @@ void *(memset)(void *s, int c, size_t count) */ void *(memcpy)(void *dest, const void *src, size_t count) { - char *tmp = (char *) dest, *s = (char *) src; + char *tmp = (char *)dest, *s = (char *)src; - while (count--) - *tmp++ = *s++; + while ( count-- ) + *tmp++ = *s++; - return dest; + return dest; } #endif @@ -341,22 +351,24 @@ void *(memcpy)(void *dest, const void *src, size_t count) */ void *(memmove)(void *dest, const void *src, size_t count) { - char *tmp, *s; - - if (dest <= src) { - tmp = (char *) dest; - s = (char *) src; - while (count--) - *tmp++ = *s++; - } - else { - tmp = (char *) dest + count; - s = (char *) src + count; - while (count--) - *--tmp = *--s; - } - - return dest; + char *tmp, *s; + + if ( dest <= src ) + { + tmp = (char *)dest; + s = (char *)src; + while ( count-- ) + *tmp++ = *s++; + } + else + { + tmp = (char *)dest + count; + s = (char *)src + count; + while ( count-- ) + *--tmp = *--s; + } + + return dest; } #endif @@ -367,15 +379,15 @@ void *(memmove)(void *dest, const void *src, size_t count) * @ct: Another area of memory * @count: The size of the area. */ -int (memcmp)(const void *cs, const void *ct, size_t count) +int(memcmp)(const void *cs, const void *ct, size_t count) { - const unsigned char *su1, *su2; - int res = 0; + const unsigned char *su1, *su2; + int res = 0; - for( su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) - if ((res = *su1 - *su2) != 0) - break; - return res; + for ( su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count-- ) + if ( (res = *su1 - *su2) != 0 ) + break; + return res; } #endif @@ -389,17 +401,18 @@ int (memcmp)(const void *cs, const void *ct, size_t count) * returns the address of the first occurrence of @c, or 1 byte past * the area if @c is not found */ -void * memscan(void * addr, int c, size_t size) +void *memscan(void *addr, int c, size_t size) { - unsigned char * p = (unsigned char *) addr; - - while (size) { - if (*p == c) - return (void *) p; - p++; - size--; - } - return (void *) p; + unsigned char *p = (unsigned char *)addr; + + while ( size ) + { + if ( *p == c ) + return (void *)p; + p++; + size--; + } + return (void *)p; } #endif @@ -411,19 +424,20 @@ void * memscan(void * addr, int c, size_t size) */ char *(strstr)(const char *s1, const char *s2) { - int l1, l2; - - l2 = strlen(s2); - if (!l2) - return (char *) s1; - l1 = strlen(s1); - while (l1 >= l2) { - l1--; - if (!memcmp(s1,s2,l2)) - return (char *) s1; - s1++; - } - return NULL; + int l1, l2; + + l2 = strlen(s2); + if ( !l2 ) + return (char *)s1; + l1 = strlen(s1); + while ( l1 >= l2 ) + { + l1--; + if ( !memcmp(s1, s2, l2) ) + return (char *)s1; + s1++; + } + return NULL; } #endif @@ -439,13 +453,15 @@ char *(strstr)(const char *s1, const char *s2) */ void *(memchr)(const void *s, int c, size_t n) { - const unsigned char *p = s; - while (n-- != 0) { - if ((unsigned char)c == *p++) { - return (void *)(p-1); - } - } - return NULL; + const unsigned char *p = s; + while ( n-- != 0 ) + { + if ( (unsigned char)c == *p++ ) + { + return (void *)(p - 1); + } + } + return NULL; } #endif diff --git a/xen/common/symbols.c b/xen/common/symbols.c index 9377f41424..60ccc36285 100644 --- a/xen/common/symbols.c +++ b/xen/common/symbols.c @@ -55,16 +55,20 @@ static unsigned int symbols_expand_symbol(unsigned int off, char *result) /* for every byte on the compressed symbol data, copy the table entry for that byte */ - while(len) { - tptr = &symbols_token_table[ symbols_token_index[*data] ]; + while ( len ) + { + tptr = &symbols_token_table[symbols_token_index[*data]]; data++; len--; - while (*tptr) { - if(skipped_first) { + while ( *tptr ) + { + if ( skipped_first ) + { *result = *tptr; result++; - } else + } + else skipped_first = 1; tptr++; } @@ -85,13 +89,13 @@ static unsigned int get_symbol_offset(unsigned long pos) /* use the closest marker we have. We have markers every 256 positions, * so that should be close enough */ - name = &symbols_names[ symbols_markers[pos>>8] ]; + name = &symbols_names[symbols_markers[pos >> 8]]; /* sequentially scan all the symbols up to the point we're searching for. * Every symbol is stored in a [][ bytes of data] format, so we * just need to add the len to the current pointer for every symbol we * wish to skip */ - for(i = 0; i < (pos&0xFF); i++) + for ( i = 0; i < (pos & 0xFF); i++ ) name = name + (*name) + 1; return name - symbols_names; @@ -102,10 +106,8 @@ bool_t is_active_kernel_text(unsigned long addr) return !!find_text_region(addr); } -const char *symbols_lookup(unsigned long addr, - unsigned long *symbolsize, - unsigned long *offset, - char *namebuf) +const char *symbols_lookup(unsigned long addr, unsigned long *symbolsize, + unsigned long *offset, char *namebuf) { unsigned long i, low, high, mid; unsigned long symbol_end = 0; @@ -115,42 +117,47 @@ const char *symbols_lookup(unsigned long addr, namebuf[0] = 0; region = find_text_region(addr); - if (!region) + if ( !region ) return NULL; - if (region->symbols_lookup) + if ( region->symbols_lookup ) return region->symbols_lookup(addr, symbolsize, offset, namebuf); - /* do a binary search on the sorted symbols_addresses array */ + /* do a binary search on the sorted symbols_addresses array */ low = 0; high = symbols_num_syms; - while (high-low > 1) { + while ( high - low > 1 ) + { mid = (low + high) / 2; - if (symbols_address(mid) <= addr) low = mid; - else high = mid; + if ( symbols_address(mid) <= addr ) + low = mid; + else + high = mid; } /* search for the first aliased symbol. Aliased symbols are symbols with the same address */ - while (low && symbols_address(low - 1) == symbols_address(low)) + while ( low && symbols_address(low - 1) == symbols_address(low) ) --low; - /* Grab name */ + /* Grab name */ symbols_expand_symbol(get_symbol_offset(low), namebuf); /* Search for next non-aliased symbol */ - for (i = low + 1; i < symbols_num_syms; i++) { - if (symbols_address(i) > symbols_address(low)) { + for ( i = low + 1; i < symbols_num_syms; i++ ) + { + if ( symbols_address(i) > symbols_address(low) ) + { symbol_end = symbols_address(i); break; } } /* if we found no next symbol, we use the end of the section */ - if (!symbol_end) - symbol_end = is_kernel_inittext(addr) ? - (unsigned long)_einittext : (unsigned long)_etext; + if ( !symbol_end ) + symbol_end = is_kernel_inittext(addr) ? (unsigned long)_einittext + : (unsigned long)_etext; *symbolsize = symbol_end - symbols_address(low); *offset = addr - symbols_address(low); @@ -170,8 +177,8 @@ static char symbols_get_symbol_type(unsigned int off) return symbols_token_table[symbols_token_index[symbols_names[off + 1]]]; } -int xensyms_read(uint32_t *symnum, char *type, - unsigned long *address, char *name) +int xensyms_read(uint32_t *symnum, char *type, unsigned long *address, + char *name) { /* * Symbols are most likely accessed sequentially so we remember position @@ -248,7 +255,7 @@ unsigned long symbols_lookup_by_name(const char *symname) do { rc = xensyms_read(&symnum, &type, &addr, name); if ( rc ) - break; + break; if ( !strcmp(name, symname) ) return addr; diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c index c0aa6bde4e..3a17868a55 100644 --- a/xen/common/sysctl.c +++ b/xen/common/sysctl.c @@ -1,8 +1,8 @@ /****************************************************************************** * sysctl.c - * + * * System management operations. For use by node control stack. - * + * * Copyright (c) 2002-2006, K Fraser */ @@ -53,10 +53,10 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) */ while ( !spin_trylock(&sysctl_lock) ) if ( hypercall_preempt_check() ) - return hypercall_create_continuation( - __HYPERVISOR_sysctl, "h", u_sysctl); + return hypercall_create_continuation(__HYPERVISOR_sysctl, "h", + u_sysctl); - switch ( op->cmd ) + switch (op->cmd) { case XEN_SYSCTL_readconsole: ret = xsm_readconsole(XSM_HOOK, op->u.readconsole.clear); @@ -75,14 +75,14 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) break; case XEN_SYSCTL_getdomaininfolist: - { + { struct domain *d; - struct xen_domctl_getdomaininfo info = { 0 }; + struct xen_domctl_getdomaininfo info = {0}; u32 num_domains = 0; rcu_read_lock(&domlist_read_lock); - for_each_domain ( d ) + for_each_domain (d) { if ( d->domain_id < op->u.getdomaininfolist.first_domain ) continue; @@ -101,15 +101,15 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) ret = -EFAULT; break; } - + num_domains++; } - + rcu_read_unlock(&domlist_read_lock); - + if ( ret != 0 ) break; - + op->u.getdomaininfolist.num_domains = num_domains; } break; @@ -145,7 +145,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) case XEN_SYSCTL_getcpuinfo: { uint32_t i, nr_cpus; - struct xen_sysctl_cpuinfo cpuinfo = { 0 }; + struct xen_sysctl_cpuinfo cpuinfo = {0}; nr_cpus = min(op->u.getcpuinfo.max_cpus, nr_cpu_ids); @@ -165,13 +165,12 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) case XEN_SYSCTL_availheap: op->u.availheap.avail_bytes = avail_domheap_pages_region( - op->u.availheap.node, - op->u.availheap.min_bitwidth, + op->u.availheap.node, op->u.availheap.min_bitwidth, op->u.availheap.max_bitwidth); op->u.availheap.avail_bytes <<= PAGE_SHIFT; break; -#if defined (CONFIG_ACPI) && defined (CONFIG_HAS_CPUFREQ) +#if defined(CONFIG_ACPI) && defined(CONFIG_HAS_CPUFREQ) case XEN_SYSCTL_get_pmstat: ret = do_get_pm_info(&op->u.get_pmstat); break; @@ -192,9 +191,9 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) if ( ret ) break; - ptr = status = xmalloc_bytes( sizeof(uint32_t) * - (op->u.page_offline.end - - op->u.page_offline.start + 1)); + ptr = status = + xmalloc_bytes(sizeof(uint32_t) * (op->u.page_offline.end - + op->u.page_offline.start + 1)); if ( !status ) { dprintk(XENLOG_WARNING, "Out of memory for page offline op\n"); @@ -202,37 +201,37 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) break; } - memset(status, PG_OFFLINE_INVALID, sizeof(uint32_t) * - (op->u.page_offline.end - op->u.page_offline.start + 1)); + memset(status, PG_OFFLINE_INVALID, + sizeof(uint32_t) * + (op->u.page_offline.end - op->u.page_offline.start + 1)); - for ( pfn = op->u.page_offline.start; - pfn <= op->u.page_offline.end; - pfn ++ ) + for ( pfn = op->u.page_offline.start; pfn <= op->u.page_offline.end; + pfn++ ) { - switch ( op->u.page_offline.cmd ) + switch (op->u.page_offline.cmd) { - /* Shall revert her if failed, or leave caller do it? */ - case sysctl_page_offline: - ret = offline_page(pfn, 0, ptr++); - break; - case sysctl_page_online: - ret = online_page(pfn, ptr++); - break; - case sysctl_query_page_offline: - ret = query_page_offline(pfn, ptr++); - break; - default: - ret = -EINVAL; - break; + /* Shall revert her if failed, or leave caller do it? */ + case sysctl_page_offline: + ret = offline_page(pfn, 0, ptr++); + break; + case sysctl_page_online: + ret = online_page(pfn, ptr++); + break; + case sysctl_query_page_offline: + ret = query_page_offline(pfn, ptr++); + break; + default: + ret = -EINVAL; + break; } - if (ret) + if ( ret ) break; } - if ( copy_to_guest( - op->u.page_offline.status, status, - op->u.page_offline.end - op->u.page_offline.start + 1) ) + if ( copy_to_guest(op->u.page_offline.status, status, + op->u.page_offline.end - op->u.page_offline.start + + 1) ) ret = -EFAULT; xfree(status); @@ -253,13 +252,12 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) struct xen_sysctl_physinfo *pi = &op->u.physinfo; memset(pi, 0, sizeof(*pi)); - pi->threads_per_core = - cpumask_weight(per_cpu(cpu_sibling_mask, 0)); + pi->threads_per_core = cpumask_weight(per_cpu(cpu_sibling_mask, 0)); pi->cores_per_socket = cpumask_weight(per_cpu(cpu_core_mask, 0)) / pi->threads_per_core; pi->nr_cpus = num_online_cpus(); pi->nr_nodes = num_online_nodes(); - pi->max_node_id = MAX_NUMNODES-1; + pi->max_node_id = MAX_NUMNODES - 1; pi->max_cpu_id = nr_cpu_ids - 1; pi->total_pages = total_pages; /* Protected by lock */ @@ -285,7 +283,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) if ( do_meminfo || do_distance ) { - struct xen_sysctl_meminfo meminfo = { }; + struct xen_sysctl_meminfo meminfo = {}; if ( num_nodes > ni->num_nodes ) num_nodes = ni->num_nodes; @@ -298,7 +296,8 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) if ( node_online(i) ) { meminfo.memsize = node_spanned_pages(i) << PAGE_SHIFT; - meminfo.memfree = avail_node_heap_pages(i) << PAGE_SHIFT; + meminfo.memfree = avail_node_heap_pages(i) + << PAGE_SHIFT; } else meminfo.memsize = meminfo.memfree = XEN_INVALID_MEM_SZ; @@ -334,8 +333,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) if ( !ret && (ni->num_nodes != i) ) { ni->num_nodes = i; - if ( __copy_field_to_guest(u_sysctl, op, - u.numainfo.num_nodes) ) + if ( __copy_field_to_guest(u_sysctl, op, u.numainfo.num_nodes) ) { ret = -EFAULT; break; @@ -352,7 +350,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) num_cpus = cpumask_last(&cpu_present_map) + 1; if ( !guest_handle_is_null(ti->cputopo) ) { - struct xen_sysctl_cputopo cputopo = { }; + struct xen_sysctl_cputopo cputopo = {}; if ( num_cpus > ti->num_cpus ) num_cpus = ti->num_cpus; @@ -386,8 +384,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) if ( !ret && (ti->num_cpus != i) ) { ti->num_cpus = i; - if ( __copy_field_to_guest(u_sysctl, op, - u.cputopoinfo.num_cpus) ) + if ( __copy_field_to_guest(u_sysctl, op, u.cputopoinfo.num_cpus) ) { ret = -EFAULT; break; @@ -407,8 +404,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) struct xen_sysctl_pcitopoinfo *ti = &op->u.pcitopoinfo; unsigned int i = 0; - if ( guest_handle_is_null(ti->devs) || - guest_handle_is_null(ti->nodes) ) + if ( guest_handle_is_null(ti->devs) || guest_handle_is_null(ti->nodes) ) { ret = -EINVAL; break; @@ -508,7 +504,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) break; } - out: +out: spin_unlock(&sysctl_lock); if ( copyback && (!ret || copyback > 0) && diff --git a/xen/common/tasklet.c b/xen/common/tasklet.c index d4fea3151c..3e1fd811c0 100644 --- a/xen/common/tasklet.c +++ b/xen/common/tasklet.c @@ -1,14 +1,14 @@ /****************************************************************************** * tasklet.c - * + * * Tasklets are dynamically-allocatable tasks run in either VCPU context * (specifically, the idle VCPU's context) or in softirq context, on at most * one CPU at a time. Softirq versus VCPU context execution is specified * during per-tasklet initialisation. - * + * * Copyright (c) 2010, Citrix Systems, Inc. * Copyright (c) 1992, Linus Torvalds - * + * * Authors: * Keir Fraser */ @@ -127,7 +127,7 @@ void do_tasklet(void) if ( list_empty(list) ) { - clear_bit(_TASKLET_enqueued, work_to_do); + clear_bit(_TASKLET_enqueued, work_to_do); raise_softirq(SCHEDULE_SOFTIRQ); } @@ -176,7 +176,7 @@ void tasklet_kill(struct tasklet *t) spin_lock_irqsave(&tasklet_lock, flags); } - unlock: +unlock: spin_unlock_irqrestore(&tasklet_lock, flags); } @@ -199,8 +199,8 @@ static void migrate_tasklets_from_cpu(unsigned int cpu, struct list_head *list) spin_unlock_irqrestore(&tasklet_lock, flags); } -void tasklet_init( - struct tasklet *t, void (*func)(unsigned long), unsigned long data) +void tasklet_init(struct tasklet *t, void (*func)(unsigned long), + unsigned long data) { memset(t, 0, sizeof(*t)); INIT_LIST_HEAD(&t->list); @@ -209,19 +209,19 @@ void tasklet_init( t->data = data; } -void softirq_tasklet_init( - struct tasklet *t, void (*func)(unsigned long), unsigned long data) +void softirq_tasklet_init(struct tasklet *t, void (*func)(unsigned long), + unsigned long data) { tasklet_init(t, func, data); t->is_softirq = 1; } -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - switch ( action ) + switch (action) { case CPU_UP_PREPARE: INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu)); @@ -239,10 +239,8 @@ static int cpu_callback( return NOTIFY_DONE; } -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback, - .priority = 99 -}; +static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback, + .priority = 99}; void __init tasklet_subsys_init(void) { diff --git a/xen/common/time.c b/xen/common/time.c index a7caea99e0..a5d42f69c1 100644 --- a/xen/common/time.c +++ b/xen/common/time.c @@ -1,16 +1,16 @@ /****************************************************************************** * time.c - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; If not, see . */ @@ -25,18 +25,17 @@ /* Nonzero if YEAR is a leap year (every 4 years, except every 100th isn't, and every 400th is). */ #define __isleap(year) \ - ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) + ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) /* How many days are in each month. */ const unsigned short int __mon_lengths[2][12] = { /* Normal years. */ {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, /* Leap years. */ - {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31} -}; + {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}}; #define SECS_PER_HOUR (60 * 60) -#define SECS_PER_DAY (SECS_PER_HOUR * 24) +#define SECS_PER_DAY (SECS_PER_HOUR * 24) static uint64_t wc_sec; /* UTC time at last 'time update'. */ static unsigned int wc_nsec; @@ -53,7 +52,7 @@ struct tm gmtime(unsigned long t) #if BITS_PER_LONG >= 64 /* Allow the concept of time before 1970. 64-bit only; for 32-bit * time after 2038 seems more important than time before 1970. */ - while ( t & (1UL<<39) ) + while ( t & (1UL << 39) ) { y -= 400; t += ((unsigned long)(365 * 303 + 366 * 97)) * SECS_PER_DAY; @@ -106,8 +105,8 @@ void update_domain_wallclock_time(struct domain *d) smp_wmb(); sec = wc_sec + d->time_offset_seconds; - shared_info(d, wc_sec) = sec; - shared_info(d, wc_nsec) = wc_nsec; + shared_info(d, wc_sec) = sec; + shared_info(d, wc_nsec) = wc_nsec; #ifdef CONFIG_X86 if ( likely(!has_32bit_shinfo(d)) ) d->shared_info->native.wc_sec_hi = sec >> 32; @@ -134,12 +133,12 @@ void do_settime(u64 secs, unsigned int nsecs, u64 system_time_base) y = do_div(x, 1000000000); spin_lock(&wc_lock); - wc_sec = x; + wc_sec = x; wc_nsec = y; spin_unlock(&wc_lock); rcu_read_lock(&domlist_read_lock); - for_each_domain ( d ) + for_each_domain (d) update_domain_wallclock_time(d); rcu_read_unlock(&domlist_read_lock); } @@ -147,15 +146,14 @@ void do_settime(u64 secs, unsigned int nsecs, u64 system_time_base) /* Return secs after 00:00:00 localtime, 1 January, 1970. */ unsigned long get_localtime(struct domain *d) { - return wc_sec + (wc_nsec + NOW()) / 1000000000ULL - + d->time_offset_seconds; + return wc_sec + (wc_nsec + NOW()) / 1000000000ULL + d->time_offset_seconds; } /* Return microsecs after 00:00:00 localtime, 1 January, 1970. */ uint64_t get_localtime_us(struct domain *d) { - return (SECONDS(wc_sec + d->time_offset_seconds) + wc_nsec + NOW()) - / 1000UL; + return (SECONDS(wc_sec + d->time_offset_seconds) + wc_nsec + NOW()) / + 1000UL; } unsigned long get_sec(void) @@ -168,7 +166,7 @@ struct tm wallclock_time(uint64_t *ns) uint64_t seconds, nsec; if ( !wc_sec ) - return (struct tm) { 0 }; + return (struct tm){0}; seconds = NOW() + SECONDS(wc_sec) + wc_nsec; nsec = do_div(seconds, 1000000000); diff --git a/xen/common/timer.c b/xen/common/timer.c index 376581bd54..aaaffecfcc 100644 --- a/xen/common/timer.c +++ b/xen/common/timer.c @@ -1,6 +1,6 @@ /****************************************************************************** * timer.c - * + * * Copyright (c) 2002-2003 Rolf Neugebauer * Copyright (c) 2002-2005 K A Fraser */ @@ -28,11 +28,12 @@ static unsigned int timer_slop __read_mostly = 50000; /* 50 us */ integer_param("timer_slop", timer_slop); -struct timers { - spinlock_t lock; +struct timers +{ + spinlock_t lock; struct timer **heap; - struct timer *list; - struct timer *running; + struct timer *list; + struct timer *running; struct list_head inactive; } __cacheline_aligned; @@ -47,11 +48,11 @@ DEFINE_PER_CPU(s_time_t, timer_deadline); * HEAP OPERATIONS. */ -#define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0])) -#define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v)) +#define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0])) +#define SET_HEAP_SIZE(_h, _v) (((u16 *)(_h))[0] = (u16)(_v)) -#define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1])) -#define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v)) +#define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1])) +#define SET_HEAP_LIMIT(_h, _v) (((u16 *)(_h))[1] = (u16)(_v)) /* Sink down element @pos of @heap. */ static void down_heap(struct timer **heap, int pos) @@ -61,7 +62,8 @@ static void down_heap(struct timer **heap, int pos) while ( (nxt = (pos << 1)) <= sz ) { - if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) ) + if ( ((nxt + 1) <= sz) && + (heap[nxt + 1]->expires < heap[nxt]->expires) ) nxt++; if ( heap[nxt]->expires > t->expires ) break; @@ -79,9 +81,9 @@ static void up_heap(struct timer **heap, int pos) { struct timer *t = heap[pos]; - while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) ) + while ( (pos > 1) && (t->expires < heap[pos >> 1]->expires) ) { - heap[pos] = heap[pos>>1]; + heap[pos] = heap[pos >> 1]; heap[pos]->heap_offset = pos; pos >>= 1; } @@ -90,7 +92,6 @@ static void up_heap(struct timer **heap, int pos) t->heap_offset = pos; } - /* Delete @t from @heap. Return TRUE if new top of heap. */ static int remove_from_heap(struct timer **heap, struct timer *t) { @@ -99,7 +100,7 @@ static int remove_from_heap(struct timer **heap, struct timer *t) if ( unlikely(pos == sz) ) { - SET_HEAP_SIZE(heap, sz-1); + SET_HEAP_SIZE(heap, sz - 1); goto out; } @@ -108,16 +109,15 @@ static int remove_from_heap(struct timer **heap, struct timer *t) SET_HEAP_SIZE(heap, --sz); - if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) ) + if ( (pos > 1) && (heap[pos]->expires < heap[pos >> 1]->expires) ) up_heap(heap, pos); else down_heap(heap, pos); - out: +out: return (pos == 1); } - /* Add new entry @t to @heap. Return TRUE if new top of heap. */ static int add_to_heap(struct timer **heap, struct timer *t) { @@ -135,7 +135,6 @@ static int add_to_heap(struct timer **heap, struct timer *t) return (t->heap_offset == 1); } - /**************************************************************************** * LINKED LIST OPERATIONS. */ @@ -165,7 +164,6 @@ static int add_to_list(struct timer **pprev, struct timer *t) return (_pprev == pprev); } - /**************************************************************************** * TIMER OPERATIONS. */ @@ -175,7 +173,7 @@ static int remove_entry(struct timer *t) struct timers *timers = &per_cpu(timers, t->cpu); int rc; - switch ( t->status ) + switch (t->status) { case TIMER_STATUS_in_heap: rc = remove_from_heap(timers->heap, t); @@ -236,7 +234,7 @@ static inline bool_t timer_lock(struct timer *timer) rcu_read_lock(&timer_cpu_read_lock); - for ( ; ; ) + for ( ;; ) { cpu = read_atomic(&timer->cpu); if ( unlikely(cpu == TIMER_CPU_status_killed) ) @@ -254,24 +252,25 @@ static inline bool_t timer_lock(struct timer *timer) return 1; } -#define timer_lock_irqsave(t, flags) ({ \ - bool_t __x; \ - local_irq_save(flags); \ - if ( !(__x = timer_lock(t)) ) \ - local_irq_restore(flags); \ - __x; \ -}) +#define timer_lock_irqsave(t, flags) \ + ({ \ + bool_t __x; \ + local_irq_save(flags); \ + if ( !(__x = timer_lock(t)) ) \ + local_irq_restore(flags); \ + __x; \ + }) static inline void timer_unlock(struct timer *timer) { spin_unlock(&per_cpu(timers, timer->cpu).lock); } -#define timer_unlock_irqrestore(t, flags) ({ \ - timer_unlock(t); \ - local_irq_restore(flags); \ -}) - +#define timer_unlock_irqrestore(t, flags) \ + ({ \ + timer_unlock(t); \ + local_irq_restore(flags); \ + }) static bool_t active_timer(struct timer *timer) { @@ -280,12 +279,8 @@ static bool_t active_timer(struct timer *timer) return (timer->status >= TIMER_STATUS_in_heap); } - -void init_timer( - struct timer *timer, - void (*function)(void *), - void *data, - unsigned int cpu) +void init_timer(struct timer *timer, void (*function)(void *), void *data, + unsigned int cpu) { unsigned long flags; memset(timer, 0, sizeof(*timer)); @@ -299,7 +294,6 @@ void init_timer( timer_unlock_irqrestore(timer, flags); } - void set_timer(struct timer *timer, s_time_t expires) { unsigned long flags; @@ -317,7 +311,6 @@ void set_timer(struct timer *timer, s_time_t expires) timer_unlock_irqrestore(timer, flags); } - void stop_timer(struct timer *timer) { unsigned long flags; @@ -354,7 +347,7 @@ void migrate_timer(struct timer *timer, unsigned int new_cpu) rcu_read_lock(&timer_cpu_read_lock); - for ( ; ; ) + for ( ;; ) { old_cpu = read_atomic(&timer->cpu); if ( (old_cpu == new_cpu) || (old_cpu == TIMER_CPU_status_killed) ) @@ -375,7 +368,7 @@ void migrate_timer(struct timer *timer, unsigned int new_cpu) } if ( likely(timer->cpu == old_cpu) ) - break; + break; spin_unlock(&per_cpu(timers, old_cpu).lock); spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags); @@ -398,7 +391,6 @@ void migrate_timer(struct timer *timer, unsigned int new_cpu) spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags); } - void kill_timer(struct timer *timer) { unsigned int old_cpu, cpu; @@ -419,12 +411,11 @@ void kill_timer(struct timer *timer) spin_unlock_irqrestore(&per_cpu(timers, old_cpu).lock, flags); - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) while ( per_cpu(timers, cpu).running == timer ) cpu_relax(); } - static void execute_timer(struct timers *ts, struct timer *t) { void (*fn)(void *) = t->function; @@ -440,12 +431,11 @@ static void execute_timer(struct timers *ts, struct timer *t) ts->running = NULL; } - static void timer_softirq_action(void) { - struct timer *t, **heap, *next; + struct timer *t, **heap, *next; struct timers *ts; - s_time_t now, deadline; + s_time_t now, deadline; ts = &this_cpu(timers); heap = ts->heap; @@ -475,8 +465,7 @@ static void timer_softirq_action(void) now = NOW(); /* Execute ready heap timers. */ - while ( (GET_HEAP_SIZE(heap) != 0) && - ((t = heap[1])->expires < now) ) + while ( (GET_HEAP_SIZE(heap) != 0) && ((t = heap[1])->expires < now) ) { remove_from_heap(heap, t); execute_timer(ts, t); @@ -525,21 +514,21 @@ s_time_t align_timer(s_time_t firsttick, uint64_t period) static void dump_timer(struct timer *t, s_time_t now) { - printk(" ex=%12"PRId64"us timer=%p cb=%ps(%p)\n", + printk(" ex=%12" PRId64 "us timer=%p cb=%ps(%p)\n", (t->expires - now) / 1000, t, t->function, t->data); } static void dump_timerq(unsigned char key) { - struct timer *t; + struct timer *t; struct timers *ts; - unsigned long flags; - s_time_t now = NOW(); - int i, j; + unsigned long flags; + s_time_t now = NOW(); + int i, j; printk("Dumping timer queues:\n"); - for_each_online_cpu( i ) + for_each_online_cpu (i) { ts = &per_cpu(timers, i); @@ -576,8 +565,8 @@ static void migrate_timers_from_cpu(unsigned int old_cpu) spin_lock(&old_ts->lock); } - while ( (t = GET_HEAP_SIZE(old_ts->heap) - ? old_ts->heap[1] : old_ts->list) != NULL ) + while ( (t = GET_HEAP_SIZE(old_ts->heap) ? old_ts->heap[1] + : old_ts->list) != NULL ) { remove_entry(t); write_atomic(&t->cpu, new_cpu); @@ -601,13 +590,13 @@ static void migrate_timers_from_cpu(unsigned int old_cpu) static struct timer *dummy_heap; -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct timers *ts = &per_cpu(timers, cpu); - switch ( action ) + switch (action) { case CPU_UP_PREPARE: INIT_LIST_HEAD(&ts->inactive); @@ -625,10 +614,8 @@ static int cpu_callback( return NOTIFY_DONE; } -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback, - .priority = 99 -}; +static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback, + .priority = 99}; void __init timer_init(void) { diff --git a/xen/common/tmem.c b/xen/common/tmem.c index c077f87e77..ca922c8ec8 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -37,62 +37,67 @@ struct tmem_statistics tmem_stats = { /************ CORE DATA STRUCTURES ************************************/ -struct tmem_object_root { +struct tmem_object_root +{ struct xen_tmem_oid oid; - struct rb_node rb_tree_node; /* Protected by pool->pool_rwlock. */ - unsigned long objnode_count; /* Atomicity depends on obj_spinlock. */ - long pgp_count; /* Atomicity depends on obj_spinlock. */ + struct rb_node rb_tree_node; /* Protected by pool->pool_rwlock. */ + unsigned long objnode_count; /* Atomicity depends on obj_spinlock. */ + long pgp_count; /* Atomicity depends on obj_spinlock. */ struct radix_tree_root tree_root; /* Tree of pages within object. */ struct tmem_pool *pool; domid_t last_client; spinlock_t obj_spinlock; }; -struct tmem_object_node { +struct tmem_object_node +{ struct tmem_object_root *obj; struct radix_tree_node rtn; }; -struct tmem_page_descriptor { +struct tmem_page_descriptor +{ union { struct list_head global_eph_pages; struct list_head client_inv_pages; }; union { - struct { + struct + { union { struct list_head client_eph_pages; struct list_head pool_pers_pages; }; struct tmem_object_root *obj; } us; - struct xen_tmem_oid inv_oid; /* Used for invalid list only. */ + struct xen_tmem_oid inv_oid; /* Used for invalid list only. */ }; pagesize_t size; /* 0 == PAGE_SIZE (pfp), -1 == data invalid, else compressed data (cdata). */ uint32_t index; - bool eviction_attempted; /* CHANGE TO lifetimes? (settable). */ + bool eviction_attempted; /* CHANGE TO lifetimes? (settable). */ union { - struct page_info *pfp; /* Page frame pointer. */ - char *cdata; /* Compressed data. */ + struct page_info *pfp; /* Page frame pointer. */ + char *cdata; /* Compressed data. */ struct tmem_page_content_descriptor *pcd; /* Page dedup. */ }; union { uint64_t timestamp; - uint32_t pool_id; /* Used for invalid list only. */ + uint32_t pool_id; /* Used for invalid list only. */ }; }; -#define PCD_TZE_MAX_SIZE (PAGE_SIZE - (PAGE_SIZE/64)) +#define PCD_TZE_MAX_SIZE (PAGE_SIZE - (PAGE_SIZE / 64)) -struct tmem_page_content_descriptor { +struct tmem_page_content_descriptor +{ union { - struct page_info *pfp; /* Page frame pointer. */ - char *cdata; /* If compression_enabled. */ + struct page_info *pfp; /* Page frame pointer. */ + char *cdata; /* If compression_enabled. */ }; pagesize_t size; /* If compression_enabled -> 0 *pfp. */ + * else if tze, 0<=size *pfp. */ }; static int tmem_initialized = 0; @@ -111,7 +116,7 @@ static DEFINE_SPINLOCK(pers_lists_spinlock); #define ASSERT_SPINLOCK(_l) ASSERT(spin_is_locked(_l)) #define ASSERT_WRITELOCK(_l) ASSERT(rw_is_write_locked(_l)) - atomic_t client_weight_total; +atomic_t client_weight_total; struct tmem_global tmem_global = { .ephemeral_page_list = LIST_HEAD_INIT(tmem_global.ephemeral_page_list), @@ -122,8 +127,8 @@ struct tmem_global tmem_global = { /* * There two types of memory allocation interfaces in tmem. * One is based on xmem_pool and the other is used for allocate a whole page. - * Both of them are based on the lowlevel function __tmem_alloc_page/_thispool(). - * The call trace of alloc path is like below. + * Both of them are based on the lowlevel function + * __tmem_alloc_page/_thispool(). The call trace of alloc path is like below. * Persistant pool: * 1.tmem_malloc() * > xmem_pool_alloc() @@ -146,14 +151,15 @@ static void *tmem_malloc(size_t size, struct tmem_pool *pool) { void *v = NULL; - if ( (pool != NULL) && is_persistent(pool) ) { + if ( (pool != NULL) && is_persistent(pool) ) + { if ( pool->client->persistent_pool ) v = xmem_pool_alloc(size, pool->client->persistent_pool); } else { - ASSERT( size < tmem_mempool_maxalloc ); - ASSERT( tmem_mempool != NULL ); + ASSERT(size < tmem_mempool_maxalloc); + ASSERT(tmem_mempool != NULL); v = xmem_pool_alloc(size, tmem_mempool); } if ( v == NULL ) @@ -165,12 +171,12 @@ static void tmem_free(void *p, struct tmem_pool *pool) { if ( pool == NULL || !is_persistent(pool) ) { - ASSERT( tmem_mempool != NULL ); + ASSERT(tmem_mempool != NULL); xmem_pool_free(p, tmem_mempool); } else { - ASSERT( pool->client->persistent_pool != NULL ); + ASSERT(pool->client->persistent_pool != NULL); xmem_pool_free(p, pool->client->persistent_pool); } } @@ -218,8 +224,9 @@ static void tmem_mempool_page_put(void *page_va) static int __init tmem_mempool_init(void) { - tmem_mempool = xmem_pool_create("tmem", tmem_mempool_page_get, - tmem_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE); + tmem_mempool = + xmem_pool_create("tmem", tmem_mempool_page_get, tmem_mempool_page_put, + PAGE_SIZE, 0, PAGE_SIZE); if ( tmem_mempool ) tmem_mempool_maxalloc = xmem_pool_maxalloc(tmem_mempool); return tmem_mempool != NULL; @@ -264,7 +271,8 @@ static struct tmem_page_descriptor *pgp_alloc(struct tmem_object_root *obj) ASSERT(obj != NULL); ASSERT(obj->pool != NULL); pool = obj->pool; - if ( (pgp = tmem_malloc(sizeof(struct tmem_page_descriptor), pool)) == NULL ) + if ( (pgp = tmem_malloc(sizeof(struct tmem_page_descriptor), pool)) == + NULL ) return NULL; pgp->us.obj = obj; INIT_LIST_HEAD(&pgp->global_eph_pages); @@ -280,7 +288,8 @@ static struct tmem_page_descriptor *pgp_alloc(struct tmem_object_root *obj) return pgp; } -static struct tmem_page_descriptor *pgp_lookup_in_obj(struct tmem_object_root *obj, uint32_t index) +static struct tmem_page_descriptor * +pgp_lookup_in_obj(struct tmem_object_root *obj, uint32_t index) { ASSERT(obj != NULL); ASSERT_SPINLOCK(&obj->obj_spinlock); @@ -288,7 +297,8 @@ static struct tmem_page_descriptor *pgp_lookup_in_obj(struct tmem_object_root *o return radix_tree_lookup(&obj->tree_root, index); } -static void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem_pool *pool) +static void pgp_free_data(struct tmem_page_descriptor *pgp, + struct tmem_pool *pool) { pagesize_t pgp_size = pgp->size; @@ -297,7 +307,7 @@ static void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem_pool *po if ( pgp_size ) tmem_free(pgp->cdata, pool); else - tmem_free_page(pgp->us.obj->pool,pgp->pfp); + tmem_free_page(pgp->us.obj->pool, pgp->pfp); if ( pool != NULL && pgp_size ) { pool->client->compressed_pages--; @@ -402,7 +412,8 @@ static void pgp_destroy(void *v) pgp_delist_free(pgp); } -static int pgp_add_to_obj(struct tmem_object_root *obj, uint32_t index, struct tmem_page_descriptor *pgp) +static int pgp_add_to_obj(struct tmem_object_root *obj, uint32_t index, + struct tmem_page_descriptor *pgp) { int ret; @@ -413,7 +424,8 @@ static int pgp_add_to_obj(struct tmem_object_root *obj, uint32_t index, struct t return ret; } -static struct tmem_page_descriptor *pgp_delete_from_obj(struct tmem_object_root *obj, uint32_t index) +static struct tmem_page_descriptor * +pgp_delete_from_obj(struct tmem_object_root *obj, uint32_t index) { struct tmem_page_descriptor *pgp; @@ -437,12 +449,12 @@ static struct radix_tree_node *rtn_alloc(void *arg) struct tmem_object_root *obj = (struct tmem_object_root *)arg; ASSERT(obj->pool != NULL); - objnode = tmem_malloc(sizeof(struct tmem_object_node),obj->pool); - if (objnode == NULL) + objnode = tmem_malloc(sizeof(struct tmem_object_node), obj->pool); + if ( objnode == NULL ) return NULL; objnode->obj = obj; memset(&objnode->rtn, 0, sizeof(struct radix_tree_node)); - if (++obj->pool->objnode_count > obj->pool->objnode_count_max) + if ( ++obj->pool->objnode_count > obj->pool->objnode_count_max ) obj->pool->objnode_count_max = obj->pool->objnode_count; atomic_inc_and_max(global_rtree_node_count); obj->objnode_count++; @@ -456,7 +468,7 @@ static void rtn_free(struct radix_tree_node *rtn, void *arg) struct tmem_object_node *objnode; ASSERT(rtn != NULL); - objnode = container_of(rtn,struct tmem_object_node,rtn); + objnode = container_of(rtn, struct tmem_object_node, rtn); ASSERT(objnode->obj != NULL); ASSERT_SPINLOCK(&objnode->obj->obj_spinlock); pool = objnode->obj->pool; @@ -470,8 +482,7 @@ static void rtn_free(struct radix_tree_node *rtn, void *arg) /************ POOL OBJECT COLLECTION MANIPULATION ROUTINES *******************/ -static int oid_compare(struct xen_tmem_oid *left, - struct xen_tmem_oid *right) +static int oid_compare(struct xen_tmem_oid *left, struct xen_tmem_oid *right) { if ( left->oid[2] == right->oid[2] ) { @@ -502,13 +513,14 @@ static void oid_set_invalid(struct xen_tmem_oid *oidp) static unsigned oid_hash(struct xen_tmem_oid *oidp) { - return (tmem_hash(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2], - BITS_PER_LONG) & OBJ_HASH_BUCKETS_MASK); + return ( + tmem_hash(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2], BITS_PER_LONG) & + OBJ_HASH_BUCKETS_MASK); } /* Searches for object==oid in pool, returns locked object if found. */ -static struct tmem_object_root * obj_find(struct tmem_pool *pool, - struct xen_tmem_oid *oidp) +static struct tmem_object_root *obj_find(struct tmem_pool *pool, + struct xen_tmem_oid *oidp) { struct rb_node *node; struct tmem_object_root *obj; @@ -519,21 +531,21 @@ restart_find: while ( node ) { obj = container_of(node, struct tmem_object_root, rb_tree_node); - switch ( oid_compare(&obj->oid, oidp) ) + switch (oid_compare(&obj->oid, oidp)) { - case 0: /* Equal. */ - if ( !spin_trylock(&obj->obj_spinlock) ) - { - read_unlock(&pool->pool_rwlock); - goto restart_find; - } + case 0: /* Equal. */ + if ( !spin_trylock(&obj->obj_spinlock) ) + { read_unlock(&pool->pool_rwlock); - return obj; - case -1: - node = node->rb_left; - break; - case 1: - node = node->rb_right; + goto restart_find; + } + read_unlock(&pool->pool_rwlock); + return obj; + case -1: + node = node->rb_left; + break; + case 1: + node = node->rb_right; } } read_unlock(&pool->pool_rwlock); @@ -582,16 +594,16 @@ static int obj_rb_insert(struct rb_root *root, struct tmem_object_root *obj) { this = container_of(*new, struct tmem_object_root, rb_tree_node); parent = *new; - switch ( oid_compare(&this->oid, &obj->oid) ) + switch (oid_compare(&this->oid, &obj->oid)) { - case 0: - return 0; - case -1: - new = &((*new)->rb_left); - break; - case 1: - new = &((*new)->rb_right); - break; + case 0: + return 0; + case -1: + new = &((*new)->rb_left); + break; + case 1: + new = &((*new)->rb_right); + break; } } rb_link_node(&obj->rb_tree_node, parent, new); @@ -603,8 +615,8 @@ static int obj_rb_insert(struct rb_root *root, struct tmem_object_root *obj) * Allocate, initialize, and insert an tmem_object_root * (should be called only if find failed). */ -static struct tmem_object_root * obj_alloc(struct tmem_pool *pool, - struct xen_tmem_oid *oidp) +static struct tmem_object_root *obj_alloc(struct tmem_pool *pool, + struct xen_tmem_oid *oidp) { struct tmem_object_root *obj; @@ -612,7 +624,7 @@ static struct tmem_object_root * obj_alloc(struct tmem_pool *pool, if ( (obj = tmem_malloc(sizeof(struct tmem_object_root), pool)) == NULL ) return NULL; pool->obj_count++; - if (pool->obj_count > pool->obj_count_max) + if ( pool->obj_count > pool->obj_count_max ) pool->obj_count_max = pool->obj_count; atomic_inc_and_max(global_obj_count); radix_tree_init(&obj->tree_root); @@ -643,7 +655,7 @@ static void pool_destroy_objs(struct tmem_pool *pool, domid_t cli_id) write_lock(&pool->pool_rwlock); pool->is_dying = 1; - for (i = 0; i < OBJ_HASH_BUCKETS; i++) + for ( i = 0; i < OBJ_HASH_BUCKETS; i++ ) { node = rb_first(&pool->obj_rb_root[i]); while ( node != NULL ) @@ -660,17 +672,16 @@ static void pool_destroy_objs(struct tmem_pool *pool, domid_t cli_id) write_unlock(&pool->pool_rwlock); } - /************ POOL MANIPULATION ROUTINES ******************************/ -static struct tmem_pool * pool_alloc(void) +static struct tmem_pool *pool_alloc(void) { struct tmem_pool *pool; int i; if ( (pool = xzalloc(struct tmem_pool)) == NULL ) return NULL; - for (i = 0; i < OBJ_HASH_BUCKETS; i++) + for ( i = 0; i < OBJ_HASH_BUCKETS; i++ ) pool->obj_rb_root[i] = RB_ROOT; INIT_LIST_HEAD(&pool->persistent_page_list); rwlock_init(&pool->pool_rwlock); @@ -697,8 +708,8 @@ static int shared_pool_join(struct tmem_pool *pool, struct client *new_client) list_add_tail(&sl->share_list, &pool->share_list); if ( new_client->cli_id != pool->client->cli_id ) tmem_client_info("adding new %s %d to shared pool owned by %s %d\n", - tmem_client_str, new_client->cli_id, tmem_client_str, - pool->client->cli_id); + tmem_client_str, new_client->cli_id, tmem_client_str, + pool->client->cli_id); else if ( pool->shared_count ) tmem_client_info("inter-guest sharing of shared pool %s by client %d\n", tmem_client_str, pool->client->cli_id); @@ -726,8 +737,8 @@ static void shared_pool_reassign(struct tmem_pool *pool) * within an guest. */ pool->client = new_client = sl->client; - for (poolid = 0; poolid < MAX_POOLS_PER_DOMAIN; poolid++) - if (new_client->pools[poolid] == pool) + for ( poolid = 0; poolid < MAX_POOLS_PER_DOMAIN; poolid++ ) + if ( new_client->pools[poolid] == pool ) break; ASSERT(poolid != MAX_POOLS_PER_DOMAIN); new_client->eph_count += _atomic_read(pool->pgp_count); @@ -735,7 +746,8 @@ static void shared_pool_reassign(struct tmem_pool *pool) list_splice_init(&old_client->ephemeral_page_list, &new_client->ephemeral_page_list); tmem_client_info("reassigned shared pool from %s=%d to %s=%d pool_id=%d\n", - tmem_cli_id_str, old_client->cli_id, tmem_cli_id_str, new_client->cli_id, poolid); + tmem_cli_id_str, old_client->cli_id, tmem_cli_id_str, + new_client->cli_id, poolid); pool->pool_id = poolid; } @@ -753,18 +765,18 @@ static int shared_pool_quit(struct tmem_pool *pool, domid_t cli_id) ASSERT_WRITELOCK(&tmem_rwlock); pool_destroy_objs(pool, cli_id); - list_for_each_entry(sl,&pool->share_list, share_list) + list_for_each_entry (sl, &pool->share_list, share_list) { - if (sl->client->cli_id != cli_id) + if ( sl->client->cli_id != cli_id ) continue; list_del(&sl->share_list); tmem_free(sl, pool); --pool->shared_count; - if (pool->client->cli_id == cli_id) + if ( pool->client->cli_id == cli_id ) shared_pool_reassign(pool); - if (pool->shared_count) + if ( pool->shared_count ) return pool->shared_count; - for (s_poolid = 0; s_poolid < MAX_GLOBAL_SHARED_POOLS; s_poolid++) + for ( s_poolid = 0; s_poolid < MAX_GLOBAL_SHARED_POOLS; s_poolid++ ) if ( (tmem_global.shared_pools[s_poolid]) == pool ) { tmem_global.shared_pools[s_poolid] = NULL; @@ -772,8 +784,8 @@ static int shared_pool_quit(struct tmem_pool *pool, domid_t cli_id) } return 0; } - tmem_client_warn("tmem: no match unsharing pool, %s=%d\n", - tmem_cli_id_str,pool->client->cli_id); + tmem_client_warn("tmem: no match unsharing pool, %s=%d\n", tmem_cli_id_str, + pool->client->cli_id); return -1; } @@ -781,20 +793,22 @@ static int shared_pool_quit(struct tmem_pool *pool, domid_t cli_id) static void pool_flush(struct tmem_pool *pool, domid_t cli_id) { ASSERT(pool != NULL); - if ( (is_shared(pool)) && (shared_pool_quit(pool,cli_id) > 0) ) + if ( (is_shared(pool)) && (shared_pool_quit(pool, cli_id) > 0) ) { - tmem_client_warn("tmem: %s=%d no longer using shared pool %d owned by %s=%d\n", - tmem_cli_id_str, cli_id, pool->pool_id, tmem_cli_id_str,pool->client->cli_id); + tmem_client_warn( + "tmem: %s=%d no longer using shared pool %d owned by %s=%d\n", + tmem_cli_id_str, cli_id, pool->pool_id, tmem_cli_id_str, + pool->client->cli_id); return; } tmem_client_info("Destroying %s-%s tmem pool %s=%d pool_id=%d\n", - is_persistent(pool) ? "persistent" : "ephemeral" , - is_shared(pool) ? "shared" : "private", - tmem_cli_id_str, pool->client->cli_id, pool->pool_id); + is_persistent(pool) ? "persistent" : "ephemeral", + is_shared(pool) ? "shared" : "private", tmem_cli_id_str, + pool->client->cli_id, pool->pool_id); if ( pool->client->info.flags.u.migrating ) { tmem_client_warn("can't destroy pool while %s is live-migrating\n", - tmem_client_str); + tmem_client_str); return; } pool_destroy_objs(pool, TMEM_CLI_ID_NULL); @@ -812,18 +826,19 @@ struct client *client_create(domid_t cli_id) struct domain *d; tmem_client_info("tmem: initializing tmem capability for %s=%d...", - tmem_cli_id_str, cli_id); + tmem_cli_id_str, cli_id); if ( client == NULL ) { tmem_client_err("failed... out of memory\n"); goto fail; } - for (i = 0, shift = 12; i < 4; shift -=4, i++) + for ( i = 0, shift = 12; i < 4; shift -= 4, i++ ) name[i] = (((unsigned short)cli_id >> shift) & 0xf) + '0'; name[4] = '\0'; - client->persistent_pool = xmem_pool_create(name, tmem_persistent_pool_page_get, - tmem_persistent_pool_page_put, PAGE_SIZE, 0, PAGE_SIZE); + client->persistent_pool = xmem_pool_create( + name, tmem_persistent_pool_page_get, tmem_persistent_pool_page_put, + PAGE_SIZE, 0, PAGE_SIZE); if ( client->persistent_pool == NULL ) { tmem_client_err("failed... can't alloc persistent pool\n"); @@ -831,12 +846,14 @@ struct client *client_create(domid_t cli_id) } d = rcu_lock_domain_by_id(cli_id); - if ( d == NULL ) { + if ( d == NULL ) + { tmem_client_err("failed... can't set client\n"); xmem_pool_destroy(client->persistent_pool); goto fail; } - if ( !d->is_dying ) { + if ( !d->is_dying ) + { d->tmem_client = client; client->domain = d; } @@ -846,16 +863,15 @@ struct client *client_create(domid_t cli_id) client->info.version = TMEM_SPEC_VERSION; client->info.maxpools = MAX_POOLS_PER_DOMAIN; client->info.flags.u.compress = tmem_compression_enabled(); - for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++) - client->shared_auth_uuid[i][0] = - client->shared_auth_uuid[i][1] = -1L; + for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++ ) + client->shared_auth_uuid[i][0] = client->shared_auth_uuid[i][1] = -1L; list_add_tail(&client->client_list, &tmem_global.client_list); INIT_LIST_HEAD(&client->ephemeral_page_list); INIT_LIST_HEAD(&client->persistent_invalidated_list); tmem_client_info("ok\n"); return client; - fail: +fail: xfree(client); return NULL; } @@ -873,7 +889,7 @@ static void client_flush(struct client *client) int i; struct tmem_pool *pool; - for (i = 0; i < MAX_POOLS_PER_DOMAIN; i++) + for ( i = 0; i < MAX_POOLS_PER_DOMAIN; i++ ) { if ( (pool = client->pools[i]) == NULL ) continue; @@ -890,7 +906,7 @@ static bool client_over_quota(const struct client *client) ASSERT(client != NULL); if ( (total == 0) || (client->info.weight == 0) || - (client->eph_count == 0) ) + (client->eph_count == 0) ) return false; return (((tmem_global.eph_count * 100L) / client->eph_count) > @@ -935,20 +951,22 @@ int tmem_evict(void) if ( (client != NULL) && client_over_quota(client) && !list_empty(&client->ephemeral_page_list) ) { - list_for_each_entry(pgp, &client->ephemeral_page_list, us.client_eph_pages) + list_for_each_entry (pgp, &client->ephemeral_page_list, + us.client_eph_pages) if ( tmem_try_to_evict_pgp(pgp, &hold_pool_rwlock) ) goto found; } else if ( !list_empty(&tmem_global.ephemeral_page_list) ) { - list_for_each_entry(pgp, &tmem_global.ephemeral_page_list, global_eph_pages) + list_for_each_entry (pgp, &tmem_global.ephemeral_page_list, + global_eph_pages) if ( tmem_try_to_evict_pgp(pgp, &hold_pool_rwlock) ) { client = pgp->us.obj->pool->client; goto found; } } - /* Global_ephemeral_page_list is empty, so we bail out. */ + /* Global_ephemeral_page_list is empty, so we bail out. */ spin_unlock(&eph_lists_spinlock); goto out; @@ -989,7 +1007,6 @@ out: return ret; } - /* * Under certain conditions (e.g. if each client is putting pages for exactly * one object), once locks are held, freeing up memory may @@ -1003,8 +1020,8 @@ static inline bool tmem_ensure_avail_pages(void) unsigned long free_mem; do { - free_mem = (tmem_page_list_pages + total_free_pages()) - >> (20 - PAGE_SHIFT); + free_mem = + (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT); if ( free_mem ) return true; if ( !tmem_evict() ) @@ -1016,8 +1033,8 @@ static inline bool tmem_ensure_avail_pages(void) /************ TMEM CORE OPERATIONS ************************************/ -static int do_tmem_put_compress(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, - tmem_cli_va_param_t clibuf) +static int do_tmem_put_compress(struct tmem_page_descriptor *pgp, + xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) { void *dst, *p; size_t size; @@ -1034,14 +1051,19 @@ static int do_tmem_put_compress(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn ret = tmem_compress_from_client(cmfn, &dst, &size, clibuf); if ( ret <= 0 ) goto out; - else if ( (size == 0) || (size >= tmem_mempool_maxalloc) ) { + else if ( (size == 0) || (size >= tmem_mempool_maxalloc) ) + { ret = 0; goto out; - } else if ( (p = tmem_malloc(size,pgp->us.obj->pool)) == NULL ) { + } + else if ( (p = tmem_malloc(size, pgp->us.obj->pool)) == NULL ) + { ret = -ENOMEM; goto out; - } else { - memcpy(p,dst,size); + } + else + { + memcpy(p, dst, size); pgp->cdata = p; } pgp->size = size; @@ -1054,7 +1076,7 @@ out: } static int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, - tmem_cli_va_param_t clibuf) + tmem_cli_va_param_t clibuf) { struct tmem_pool *pool; struct tmem_object_root *obj; @@ -1090,7 +1112,7 @@ static int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, copy_uncompressed: if ( pgp->pfp ) pgp_free_data(pgp, pool); - if ( ( pgp->pfp = tmem_alloc_page(pool) ) == NULL ) + if ( (pgp->pfp = tmem_alloc_page(pool)) == NULL ) goto failed_dup; pgp->size = 0; ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_cli_buf_null); @@ -1127,16 +1149,18 @@ cleanup: write_lock(&pool->pool_rwlock); obj_free(obj); write_unlock(&pool->pool_rwlock); - } else { + } + else + { spin_unlock(&obj->obj_spinlock); } pool->dup_puts_flushed++; return ret; } -static int do_tmem_put(struct tmem_pool *pool, - struct xen_tmem_oid *oidp, uint32_t index, - xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) +static int do_tmem_put(struct tmem_pool *pool, struct xen_tmem_oid *oidp, + uint32_t index, xen_pfn_t cmfn, + tmem_cli_va_param_t clibuf) { struct tmem_object_root *obj = NULL; struct tmem_page_descriptor *pgp = NULL; @@ -1146,14 +1170,14 @@ static int do_tmem_put(struct tmem_pool *pool, ASSERT(pool != NULL); client = pool->client; ASSERT(client != NULL); - ret = client->info.flags.u.frozen ? -EFROZEN : -ENOMEM; + ret = client->info.flags.u.frozen ? -EFROZEN : -ENOMEM; pool->puts++; refind: /* Does page already exist (dup)? if so, handle specially. */ if ( (obj = obj_find(pool, oidp)) != NULL ) { - if ((pgp = pgp_lookup_in_obj(obj, index)) != NULL) + if ( (pgp = pgp_lookup_in_obj(obj, index)) != NULL ) { return do_tmem_dup_put(pgp, cmfn, clibuf); } @@ -1174,8 +1198,8 @@ refind: write_lock(&pool->pool_rwlock); /* - * Parallel callers may already allocated obj and inserted to obj_rb_root - * before us. + * Parallel callers may already allocated obj and inserted to + * obj_rb_root before us. */ if ( !obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj) ) { @@ -1195,7 +1219,7 @@ refind: goto unlock_obj; ret = pgp_add_to_obj(obj, index, pgp); - if ( ret == -ENOMEM ) + if ( ret == -ENOMEM ) /* Warning: may result in partially built radix tree ("stump"). */ goto free_pgp; @@ -1223,7 +1247,7 @@ refind: } copy_uncompressed: - if ( ( pgp->pfp = tmem_alloc_page(pool) ) == NULL ) + if ( (pgp->pfp = tmem_alloc_page(pool)) == NULL ) { ret = -ENOMEM; goto del_pgp_from_obj; @@ -1237,19 +1261,17 @@ insert_page: { spin_lock(&eph_lists_spinlock); list_add_tail(&pgp->global_eph_pages, &tmem_global.ephemeral_page_list); - if (++tmem_global.eph_count > tmem_stats.global_eph_count_max) + if ( ++tmem_global.eph_count > tmem_stats.global_eph_count_max ) tmem_stats.global_eph_count_max = tmem_global.eph_count; - list_add_tail(&pgp->us.client_eph_pages, - &client->ephemeral_page_list); - if (++client->eph_count > client->eph_count_max) + list_add_tail(&pgp->us.client_eph_pages, &client->ephemeral_page_list); + if ( ++client->eph_count > client->eph_count_max ) client->eph_count_max = client->eph_count; spin_unlock(&eph_lists_spinlock); } else { /* is_persistent. */ spin_lock(&pers_lists_spinlock); - list_add_tail(&pgp->us.pool_pers_pages, - &pool->persistent_page_list); + list_add_tail(&pgp->us.pool_pers_pages, &pool->persistent_page_list); spin_unlock(&pers_lists_spinlock); } @@ -1290,9 +1312,9 @@ unlock_obj: return ret; } -static int do_tmem_get(struct tmem_pool *pool, - struct xen_tmem_oid *oidp, uint32_t index, - xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) +static int do_tmem_get(struct tmem_pool *pool, struct xen_tmem_oid *oidp, + uint32_t index, xen_pfn_t cmfn, + tmem_cli_va_param_t clibuf) { struct tmem_object_root *obj; struct tmem_page_descriptor *pgp; @@ -1303,12 +1325,12 @@ static int do_tmem_get(struct tmem_pool *pool, return -EEMPTY; pool->gets++; - obj = obj_find(pool,oidp); + obj = obj_find(pool, oidp); if ( obj == NULL ) return 0; ASSERT_SPINLOCK(&obj->obj_spinlock); - if (is_shared(pool) || is_persistent(pool) ) + if ( is_shared(pool) || is_persistent(pool) ) pgp = pgp_lookup_in_obj(obj, index); else pgp = pgp_delete_from_obj(obj, index); @@ -1339,12 +1361,16 @@ static int do_tmem_get(struct tmem_pool *pool, obj = NULL; write_unlock(&pool->pool_rwlock); } - } else { + } + else + { spin_lock(&eph_lists_spinlock); list_del(&pgp->global_eph_pages); - list_add_tail(&pgp->global_eph_pages,&tmem_global.ephemeral_page_list); + list_add_tail(&pgp->global_eph_pages, + &tmem_global.ephemeral_page_list); list_del(&pgp->us.client_eph_pages); - list_add_tail(&pgp->us.client_eph_pages,&client->ephemeral_page_list); + list_add_tail(&pgp->us.client_eph_pages, + &client->ephemeral_page_list); spin_unlock(&eph_lists_spinlock); obj->last_client = current->domain->domain_id; } @@ -1366,14 +1392,14 @@ bad_copy: return rc; } -static int do_tmem_flush_page(struct tmem_pool *pool, - struct xen_tmem_oid *oidp, uint32_t index) +static int do_tmem_flush_page(struct tmem_pool *pool, struct xen_tmem_oid *oidp, + uint32_t index) { struct tmem_object_root *obj; struct tmem_page_descriptor *pgp; pool->flushs++; - obj = obj_find(pool,oidp); + obj = obj_find(pool, oidp); if ( obj == NULL ) goto out; pgp = pgp_delete_from_obj(obj, index); @@ -1388,7 +1414,9 @@ static int do_tmem_flush_page(struct tmem_pool *pool, write_lock(&pool->pool_rwlock); obj_free(obj); write_unlock(&pool->pool_rwlock); - } else { + } + else + { spin_unlock(&obj->obj_spinlock); } pool->flushs_found++; @@ -1406,7 +1434,7 @@ static int do_tmem_flush_object(struct tmem_pool *pool, struct tmem_object_root *obj; pool->flush_objs++; - obj = obj_find(pool,oidp); + obj = obj_find(pool, oidp); if ( obj == NULL ) goto out; write_lock(&pool->pool_rwlock); @@ -1436,18 +1464,17 @@ static int do_tmem_destroy_pool(uint32_t pool_id) return 1; } -int do_tmem_new_pool(domid_t this_cli_id, - uint32_t d_poolid, uint32_t flags, +int do_tmem_new_pool(domid_t this_cli_id, uint32_t d_poolid, uint32_t flags, uint64_t uuid_lo, uint64_t uuid_hi) { struct client *client; domid_t cli_id; int persistent = flags & TMEM_POOL_PERSIST; int shared = flags & TMEM_POOL_SHARED; - int pagebits = (flags >> TMEM_POOL_PAGESIZE_SHIFT) - & TMEM_POOL_PAGESIZE_MASK; - int specversion = (flags >> TMEM_POOL_VERSION_SHIFT) - & TMEM_POOL_VERSION_MASK; + int pagebits = + (flags >> TMEM_POOL_PAGESIZE_SHIFT) & TMEM_POOL_PAGESIZE_MASK; + int specversion = + (flags >> TMEM_POOL_VERSION_SHIFT) & TMEM_POOL_VERSION_MASK; struct tmem_pool *pool, *shpool; int i, first_unused_s_poolid; @@ -1456,8 +1483,8 @@ int do_tmem_new_pool(domid_t this_cli_id, else cli_id = this_cli_id; tmem_client_info("tmem: allocating %s-%s tmem pool for %s=%d...", - persistent ? "persistent" : "ephemeral" , - shared ? "shared" : "private", tmem_cli_id_str, cli_id); + persistent ? "persistent" : "ephemeral", + shared ? "shared" : "private", tmem_cli_id_str, cli_id); if ( specversion != TMEM_SPEC_VERSION ) { tmem_client_err("failed... unsupported spec version\n"); @@ -1465,13 +1492,14 @@ int do_tmem_new_pool(domid_t this_cli_id, } if ( shared && persistent ) { - tmem_client_err("failed... unable to create a shared-persistant pool\n"); + tmem_client_err( + "failed... unable to create a shared-persistant pool\n"); return -EPERM; } if ( pagebits != (PAGE_SHIFT - 12) ) { tmem_client_err("failed... unsupported pagesize %d\n", - 1 << (pagebits + 12)); + 1 << (pagebits + 12)); return -EPERM; } if ( flags & TMEM_POOL_PRECOMPRESSED ) @@ -1486,9 +1514,9 @@ int do_tmem_new_pool(domid_t this_cli_id, } if ( this_cli_id != TMEM_CLI_ID_NULL ) { - if ( (client = tmem_client_from_cli_id(this_cli_id)) == NULL - || d_poolid >= MAX_POOLS_PER_DOMAIN - || client->pools[d_poolid] != NULL ) + if ( (client = tmem_client_from_cli_id(this_cli_id)) == NULL || + d_poolid >= MAX_POOLS_PER_DOMAIN || + client->pools[d_poolid] != NULL ) return -EPERM; } else @@ -1500,8 +1528,9 @@ int do_tmem_new_pool(domid_t this_cli_id, break; if ( d_poolid >= MAX_POOLS_PER_DOMAIN ) { - tmem_client_err("failed... no more pool slots available for this %s\n", - tmem_client_str); + tmem_client_err( + "failed... no more pool slots available for this %s\n", + tmem_client_str); return -EPERM; } } @@ -1533,13 +1562,14 @@ int do_tmem_new_pool(domid_t this_cli_id, } if ( !tmem_global.shared_auth ) { - for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++) + for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++ ) if ( (client->shared_auth_uuid[i][0] == uuid_lo) && (client->shared_auth_uuid[i][1] == uuid_hi) ) break; if ( i == MAX_GLOBAL_SHARED_POOLS ) { - tmem_client_info("Shared auth failed, create non shared pool instead!\n"); + tmem_client_info( + "Shared auth failed, create non shared pool instead!\n"); pool->shared = 0; goto out; } @@ -1557,8 +1587,9 @@ int do_tmem_new_pool(domid_t this_cli_id, if ( shpool->uuid[0] == uuid_lo && shpool->uuid[1] == uuid_hi ) { /* Succ to match a global shared pool. */ - tmem_client_info("(matches shared pool uuid=%"PRIx64".%"PRIx64") pool_id=%d\n", - uuid_hi, uuid_lo, d_poolid); + tmem_client_info("(matches shared pool uuid=%" PRIx64 + ".%" PRIx64 ") pool_id=%d\n", + uuid_hi, uuid_lo, d_poolid); client->pools[d_poolid] = shpool; if ( !shared_pool_join(shpool, client) ) { @@ -1579,7 +1610,8 @@ int do_tmem_new_pool(domid_t this_cli_id, /* Failed to find a global shared pool slot. */ if ( first_unused_s_poolid == MAX_GLOBAL_SHARED_POOLS ) { - tmem_client_warn("tmem: failed... no global shared pool slots available\n"); + tmem_client_warn( + "tmem: failed... no global shared pool slots available\n"); goto fail; } /* Add pool to global shared pool. */ @@ -1605,8 +1637,8 @@ fail: /************ TMEM CONTROL OPERATIONS ************************************/ -int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo, - uint64_t uuid_hi, bool auth) +int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo, uint64_t uuid_hi, + bool auth) { struct client *client; int i, free = -1; @@ -1620,12 +1652,12 @@ int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo, if ( client == NULL ) return -EINVAL; - for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++) + for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++ ) { if ( auth == 0 ) { if ( (client->shared_auth_uuid[i][0] == uuid_lo) && - (client->shared_auth_uuid[i][1] == uuid_hi) ) + (client->shared_auth_uuid[i][1] == uuid_hi) ) { client->shared_auth_uuid[i][0] = -1L; client->shared_auth_uuid[i][1] = -1L; @@ -1635,16 +1667,16 @@ int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo, else { if ( (client->shared_auth_uuid[i][0] == -1L) && - (client->shared_auth_uuid[i][1] == -1L) ) + (client->shared_auth_uuid[i][1] == -1L) ) { free = i; break; } - } + } } if ( auth == 0 ) return 0; - else if ( free == -1) + else if ( free == -1 ) return -ENOMEM; else { @@ -1654,20 +1686,20 @@ int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo, } } -static int tmemc_save_subop(int cli_id, uint32_t pool_id, - uint32_t subop, tmem_cli_va_param_t buf, uint32_t arg) +static int tmemc_save_subop(int cli_id, uint32_t pool_id, uint32_t subop, + tmem_cli_va_param_t buf, uint32_t arg) { struct client *client = tmem_client_from_cli_id(cli_id); uint32_t p; struct tmem_page_descriptor *pgp, *pgp2; int rc = -ENOENT; - switch(subop) + switch (subop) { case XEN_SYSCTL_TMEM_OP_SAVE_BEGIN: if ( client == NULL ) break; - for (p = 0; p < MAX_POOLS_PER_DOMAIN; p++) + for ( p = 0; p < MAX_POOLS_PER_DOMAIN; p++ ) if ( client->pools[p] != NULL ) break; @@ -1691,9 +1723,9 @@ static int tmemc_save_subop(int cli_id, uint32_t pool_id, break; client->info.flags.u.migrating = 0; if ( !list_empty(&client->persistent_invalidated_list) ) - list_for_each_entry_safe(pgp,pgp2, - &client->persistent_invalidated_list, client_inv_pages) - __pgp_free(pgp, client->pools[pgp->pool_id]); + list_for_each_entry_safe( + pgp, pgp2, &client->persistent_invalidated_list, + client_inv_pages) __pgp_free(pgp, client->pools[pgp->pool_id]); client->info.flags.u.frozen = client->was_frozen; rc = 0; break; @@ -1702,11 +1734,12 @@ static int tmemc_save_subop(int cli_id, uint32_t pool_id, } static int tmemc_save_get_next_page(int cli_id, uint32_t pool_id, - tmem_cli_va_param_t buf, uint32_t bufsize) + tmem_cli_va_param_t buf, uint32_t bufsize) { struct client *client = tmem_client_from_cli_id(cli_id); struct tmem_pool *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN) - ? NULL : client->pools[pool_id]; + ? NULL + : client->pools[pool_id]; struct tmem_page_descriptor *pgp; struct xen_tmem_oid *oid; int ret = 0; @@ -1728,17 +1761,19 @@ static int tmemc_save_get_next_page(int cli_id, uint32_t pool_id, if ( pool->cur_pgp == NULL ) { /* Process the first one. */ - pool->cur_pgp = pgp = list_entry((&pool->persistent_page_list)->next, - struct tmem_page_descriptor,us.pool_pers_pages); - } else if ( list_is_last(&pool->cur_pgp->us.pool_pers_pages, - &pool->persistent_page_list) ) + pool->cur_pgp = pgp = + list_entry((&pool->persistent_page_list)->next, + struct tmem_page_descriptor, us.pool_pers_pages); + } + else if ( list_is_last(&pool->cur_pgp->us.pool_pers_pages, + &pool->persistent_page_list) ) { /* Already processed the last one in the list. */ ret = -1; goto out; } pgp = list_entry((&pool->cur_pgp->us.pool_pers_pages)->next, - struct tmem_page_descriptor,us.pool_pers_pages); + struct tmem_page_descriptor, us.pool_pers_pages); pool->cur_pgp = pgp; oid = &pgp->us.obj->oid; h.pool_id = pool_id; @@ -1759,7 +1794,7 @@ out: } static int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_param_t buf, - uint32_t bufsize) + uint32_t bufsize) { struct client *client = tmem_client_from_cli_id(cli_id); struct tmem_page_descriptor *pgp; @@ -1776,17 +1811,20 @@ static int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_param_t buf, if ( client->cur_pgp == NULL ) { pgp = list_entry((&client->persistent_invalidated_list)->next, - struct tmem_page_descriptor,client_inv_pages); + struct tmem_page_descriptor, client_inv_pages); client->cur_pgp = pgp; - } else if ( list_is_last(&client->cur_pgp->client_inv_pages, - &client->persistent_invalidated_list) ) + } + else if ( list_is_last(&client->cur_pgp->client_inv_pages, + &client->persistent_invalidated_list) ) { client->cur_pgp = NULL; ret = 0; goto out; - } else { + } + else + { pgp = list_entry((&client->cur_pgp->client_inv_pages)->next, - struct tmem_page_descriptor,client_inv_pages); + struct tmem_page_descriptor, client_inv_pages); client->cur_pgp = pgp; } h.pool_id = pgp->pool_id; @@ -1802,35 +1840,36 @@ out: } static int tmemc_restore_put_page(int cli_id, uint32_t pool_id, - struct xen_tmem_oid *oidp, - uint32_t index, tmem_cli_va_param_t buf, - uint32_t bufsize) + struct xen_tmem_oid *oidp, uint32_t index, + tmem_cli_va_param_t buf, uint32_t bufsize) { struct client *client = tmem_client_from_cli_id(cli_id); struct tmem_pool *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN) - ? NULL : client->pools[pool_id]; + ? NULL + : client->pools[pool_id]; if ( pool == NULL ) return -1; - if (bufsize != PAGE_SIZE) { + if ( bufsize != PAGE_SIZE ) + { tmem_client_err("tmem: %s: invalid parameter bufsize(%d) != (%ld)\n", - __func__, bufsize, PAGE_SIZE); + __func__, bufsize, PAGE_SIZE); return -EINVAL; } return do_tmem_put(pool, oidp, index, 0, buf); } static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id, - struct xen_tmem_oid *oidp, - uint32_t index) + struct xen_tmem_oid *oidp, uint32_t index) { struct client *client = tmem_client_from_cli_id(cli_id); struct tmem_pool *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN) - ? NULL : client->pools[pool_id]; + ? NULL + : client->pools[pool_id]; if ( pool == NULL ) return -1; - return do_tmem_flush_page(pool,oidp,index); + return do_tmem_flush_page(pool, oidp, index); } int do_tmem_control(struct xen_sysctl_tmem_op *op) @@ -1851,16 +1890,17 @@ int do_tmem_control(struct xen_sysctl_tmem_op *op) guest_handle_cast(op->u.buf, char), op->arg); break; case XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE: - ret = tmemc_save_get_next_page(op->cli_id, pool_id, - guest_handle_cast(op->u.buf, char), op->len); + ret = tmemc_save_get_next_page( + op->cli_id, pool_id, guest_handle_cast(op->u.buf, char), op->len); break; case XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV: - ret = tmemc_save_get_next_inv(op->cli_id, - guest_handle_cast(op->u.buf, char), op->len); + ret = tmemc_save_get_next_inv( + op->cli_id, guest_handle_cast(op->u.buf, char), op->len); break; case XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE: - ret = tmemc_restore_put_page(op->cli_id, pool_id, oidp, op->arg, - guest_handle_cast(op->u.buf, char), op->len); + ret = + tmemc_restore_put_page(op->cli_id, pool_id, oidp, op->arg, + guest_handle_cast(op->u.buf, char), op->len); break; case XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE: ret = tmemc_restore_flush_page(op->cli_id, pool_id, oidp, op->arg); @@ -1898,7 +1938,8 @@ long do_tmem_op(tmem_cli_op_t uops) if ( unlikely(tmem_get_tmemop_from_client(&op, uops) != 0) ) { - tmem_client_err("tmem: can't get tmem struct from %s\n", tmem_client_str); + tmem_client_err("tmem: can't get tmem struct from %s\n", + tmem_client_str); tmem_stats.errored_tmem_ops++; return -EFAULT; } @@ -1906,7 +1947,7 @@ long do_tmem_op(tmem_cli_op_t uops) /* Acquire write lock for all commands at first. */ write_lock(&tmem_rwlock); - switch ( op.cmd ) + switch (op.cmd) { case TMEM_CONTROL: case TMEM_RESTORE_NEW: @@ -1915,16 +1956,16 @@ long do_tmem_op(tmem_cli_op_t uops) break; default: - /* - * For other commands, create per-client tmem structure dynamically on - * first use by client. - */ + /* + * For other commands, create per-client tmem structure dynamically on + * first use by client. + */ if ( client == NULL ) { if ( (client = client_create(current->domain->domain_id)) == NULL ) { tmem_client_err("tmem: can't create tmem structure for %s\n", - tmem_client_str); + tmem_client_str); rc = -ENOMEM; goto out; } @@ -1934,8 +1975,8 @@ long do_tmem_op(tmem_cli_op_t uops) { if ( op.cmd == TMEM_NEW_POOL ) rc = do_tmem_new_pool(TMEM_CLI_ID_NULL, 0, op.u.creat.flags, - op.u.creat.uuid[0], op.u.creat.uuid[1]); - else + op.u.creat.uuid[0], op.u.creat.uuid[1]); + else rc = do_tmem_destroy_pool(op.pool_id); } else @@ -1943,7 +1984,8 @@ long do_tmem_op(tmem_cli_op_t uops) if ( ((uint32_t)op.pool_id >= MAX_POOLS_PER_DOMAIN) || ((pool = client->pools[op.pool_id]) == NULL) ) { - tmem_client_err("tmem: operation requested on uncreated pool\n"); + tmem_client_err( + "tmem: operation requested on uncreated pool\n"); rc = -ENODEV; goto out; } @@ -1952,22 +1994,22 @@ long do_tmem_op(tmem_cli_op_t uops) read_lock(&tmem_rwlock); oidp = &op.u.gen.oid; - switch ( op.cmd ) + switch (op.cmd) { case TMEM_NEW_POOL: case TMEM_DESTROY_POOL: BUG(); /* Done earlier. */ break; case TMEM_PUT_PAGE: - if (tmem_ensure_avail_pages()) + if ( tmem_ensure_avail_pages() ) rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, - tmem_cli_buf_null); + tmem_cli_buf_null); else rc = -ENOMEM; break; case TMEM_GET_PAGE: rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn, - tmem_cli_buf_null); + tmem_cli_buf_null); break; case TMEM_FLUSH_PAGE: rc = do_tmem_flush_page(pool, oidp, op.u.gen.index); @@ -1986,7 +2028,6 @@ long do_tmem_op(tmem_cli_op_t uops) return rc; } break; - } out: write_unlock(&tmem_rwlock); @@ -2011,21 +2052,22 @@ void tmem_destroy(void *v) write_lock(&tmem_rwlock); - printk("tmem: flushing tmem pools for %s=%d\n", - tmem_cli_id_str, client->cli_id); + printk("tmem: flushing tmem pools for %s=%d\n", tmem_cli_id_str, + client->cli_id); client_flush(client); write_unlock(&tmem_rwlock); } -#define MAX_EVICTS 10 /* Should be variable or set via XEN_SYSCTL_TMEM_OP_ ?? */ +#define MAX_EVICTS 10 /* Should be variable or set via XEN_SYSCTL_TMEM_OP_ ?? \ + */ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) { struct page_info *pfp; unsigned long evicts_per_relinq = 0; int max_evictions = 10; - if (!tmem_enabled() || !tmem_freeable_pages()) + if ( !tmem_enabled() || !tmem_freeable_pages() ) return NULL; tmem_stats.relinq_attempts++; @@ -2039,7 +2081,7 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) while ( (pfp = tmem_page_list_get()) == NULL ) { - if ( (max_evictions-- <= 0) || !tmem_evict()) + if ( (max_evictions-- <= 0) || !tmem_evict() ) break; evicts_per_relinq++; } diff --git a/xen/common/tmem_control.c b/xen/common/tmem_control.c index 30bf6fb362..77a1eb0889 100644 --- a/xen/common/tmem_control.c +++ b/xen/common/tmem_control.c @@ -23,20 +23,21 @@ static int tmemc_freeze_pools(domid_t cli_id, int arg) bool destroy = arg == XEN_SYSCTL_TMEM_OP_DESTROY; char *s; - s = destroy ? "destroyed" : ( freeze ? "frozen" : "thawed" ); + s = destroy ? "destroyed" : (freeze ? "frozen" : "thawed"); if ( cli_id == TMEM_CLI_ID_NULL ) { - list_for_each_entry(client,&tmem_global.client_list,client_list) + list_for_each_entry (client, &tmem_global.client_list, client_list) client->info.flags.u.frozen = freeze; - tmem_client_info("tmem: all pools %s for all %ss\n", s, tmem_client_str); + tmem_client_info("tmem: all pools %s for all %ss\n", s, + tmem_client_str); } else { - if ( (client = tmem_client_from_cli_id(cli_id)) == NULL) + if ( (client = tmem_client_from_cli_id(cli_id)) == NULL ) return -1; client->info.flags.u.frozen = freeze; - tmem_client_info("tmem: all pools %s for %s=%d\n", - s, tmem_cli_id_str, cli_id); + tmem_client_info("tmem: all pools %s for %s=%d\n", s, tmem_cli_id_str, + cli_id); } return 0; } @@ -47,7 +48,7 @@ static unsigned long tmem_flush_npages(unsigned long n) while ( (avail_pages = tmem_page_list_pages) < n ) { - if ( !tmem_evict() ) + if ( !tmem_evict() ) break; } if ( avail_pages ) @@ -73,14 +74,15 @@ static int tmemc_flush_mem(domid_t cli_id, uint32_t kb) if ( cli_id != TMEM_CLI_ID_NULL ) { - tmem_client_warn("tmem: %s-specific flush not supported yet, use --all\n", - tmem_client_str); + tmem_client_warn( + "tmem: %s-specific flush not supported yet, use --all\n", + tmem_client_str); return -1; } /* Convert kb to pages, rounding up if necessary. */ - npages = (kb + ((1 << (PAGE_SHIFT-10))-1)) >> (PAGE_SHIFT-10); + npages = (kb + ((1 << (PAGE_SHIFT - 10)) - 1)) >> (PAGE_SHIFT - 10); flushed_pages = tmem_flush_npages(npages); - flushed_kb = flushed_pages << (PAGE_SHIFT-10); + flushed_kb = flushed_pages << (PAGE_SHIFT - 10); return flushed_kb; } @@ -95,25 +97,26 @@ static int tmemc_flush_mem(domid_t cli_id, uint32_t kb) */ #define BSIZE 1024 -static int tmemc_list_client(struct client *c, tmem_cli_va_param_t buf, - int off, uint32_t len, bool use_long) +static int tmemc_list_client(struct client *c, tmem_cli_va_param_t buf, int off, + uint32_t len, bool use_long) { char info[BSIZE]; int i, n = 0, sum = 0; struct tmem_pool *p; bool s; - n = scnprintf(info,BSIZE,"C=CI:%d,ww:%d,co:%d,fr:%d," - "Tc:%"PRIu64",Ge:%ld,Pp:%ld,Gp:%ld%c", - c->cli_id, c->info.weight, c->info.flags.u.compress, c->info.flags.u.frozen, - c->total_cycles, c->succ_eph_gets, c->succ_pers_puts, c->succ_pers_gets, - use_long ? ',' : '\n'); - if (use_long) - n += scnprintf(info+n,BSIZE-n, - "Ec:%ld,Em:%ld,cp:%ld,cb:%"PRId64",cn:%ld,cm:%ld\n", - c->eph_count, c->eph_count_max, - c->compressed_pages, c->compressed_sum_size, - c->compress_poor, c->compress_nomem); + n = scnprintf(info, BSIZE, + "C=CI:%d,ww:%d,co:%d,fr:%d," + "Tc:%" PRIu64 ",Ge:%ld,Pp:%ld,Gp:%ld%c", + c->cli_id, c->info.weight, c->info.flags.u.compress, + c->info.flags.u.frozen, c->total_cycles, c->succ_eph_gets, + c->succ_pers_puts, c->succ_pers_gets, use_long ? ',' : '\n'); + if ( use_long ) + n += scnprintf(info + n, BSIZE - n, + "Ec:%ld,Em:%ld,cp:%ld,cb:%" PRId64 ",cn:%ld,cm:%ld\n", + c->eph_count, c->eph_count_max, c->compressed_pages, + c->compressed_sum_size, c->compress_poor, + c->compress_nomem); if ( !copy_to_guest_offset(buf, off + sum, info, n + 1) ) sum += n; for ( i = 0; i < MAX_POOLS_PER_DOMAIN; i++ ) @@ -121,25 +124,23 @@ static int tmemc_list_client(struct client *c, tmem_cli_va_param_t buf, if ( (p = c->pools[i]) == NULL ) continue; s = is_shared(p); - n = scnprintf(info,BSIZE,"P=CI:%d,PI:%d," - "PT:%c%c,U0:%"PRIx64",U1:%"PRIx64"%c", - c->cli_id, p->pool_id, - is_persistent(p) ? 'P' : 'E', s ? 'S' : 'P', - (uint64_t)(s ? p->uuid[0] : 0), - (uint64_t)(s ? p->uuid[1] : 0LL), - use_long ? ',' : '\n'); - if (use_long) - n += scnprintf(info+n,BSIZE-n, - "Pc:%d,Pm:%d,Oc:%ld,Om:%ld,Nc:%lu,Nm:%lu," - "ps:%lu,pt:%lu,pd:%lu,pr:%lu,px:%lu,gs:%lu,gt:%lu," - "fs:%lu,ft:%lu,os:%lu,ot:%lu\n", - _atomic_read(p->pgp_count), p->pgp_count_max, - p->obj_count, p->obj_count_max, - p->objnode_count, p->objnode_count_max, - p->good_puts, p->puts,p->dup_puts_flushed, p->dup_puts_replaced, - p->no_mem_puts, - p->found_gets, p->gets, - p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs); + n = scnprintf(info, BSIZE, + "P=CI:%d,PI:%d," + "PT:%c%c,U0:%" PRIx64 ",U1:%" PRIx64 "%c", + c->cli_id, p->pool_id, is_persistent(p) ? 'P' : 'E', + s ? 'S' : 'P', (uint64_t)(s ? p->uuid[0] : 0), + (uint64_t)(s ? p->uuid[1] : 0LL), use_long ? ',' : '\n'); + if ( use_long ) + n += scnprintf( + info + n, BSIZE - n, + "Pc:%d,Pm:%d,Oc:%ld,Om:%ld,Nc:%lu,Nm:%lu," + "ps:%lu,pt:%lu,pd:%lu,pr:%lu,px:%lu,gs:%lu,gt:%lu," + "fs:%lu,ft:%lu,os:%lu,ot:%lu\n", + _atomic_read(p->pgp_count), p->pgp_count_max, p->obj_count, + p->obj_count_max, p->objnode_count, p->objnode_count_max, + p->good_puts, p->puts, p->dup_puts_flushed, + p->dup_puts_replaced, p->no_mem_puts, p->found_gets, p->gets, + p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs); if ( sum + n >= len ) return sum; if ( !copy_to_guest_offset(buf, off + sum, info, n + 1) ) @@ -160,25 +161,24 @@ static int tmemc_list_shared(tmem_cli_va_param_t buf, int off, uint32_t len, { if ( (p = tmem_global.shared_pools[i]) == NULL ) continue; - n = scnprintf(info+n,BSIZE-n,"S=SI:%d,PT:%c%c,U0:%"PRIx64",U1:%"PRIx64, - i, is_persistent(p) ? 'P' : 'E', - is_shared(p) ? 'S' : 'P', + n = scnprintf(info + n, BSIZE - n, + "S=SI:%d,PT:%c%c,U0:%" PRIx64 ",U1:%" PRIx64, i, + is_persistent(p) ? 'P' : 'E', is_shared(p) ? 'S' : 'P', p->uuid[0], p->uuid[1]); - list_for_each_entry(sl,&p->share_list, share_list) - n += scnprintf(info+n,BSIZE-n,",SC:%d",sl->client->cli_id); - n += scnprintf(info+n,BSIZE-n,"%c", use_long ? ',' : '\n'); - if (use_long) - n += scnprintf(info+n,BSIZE-n, - "Pc:%d,Pm:%d,Oc:%ld,Om:%ld,Nc:%lu,Nm:%lu," - "ps:%lu,pt:%lu,pd:%lu,pr:%lu,px:%lu,gs:%lu,gt:%lu," - "fs:%lu,ft:%lu,os:%lu,ot:%lu\n", - _atomic_read(p->pgp_count), p->pgp_count_max, - p->obj_count, p->obj_count_max, - p->objnode_count, p->objnode_count_max, - p->good_puts, p->puts,p->dup_puts_flushed, p->dup_puts_replaced, - p->no_mem_puts, - p->found_gets, p->gets, - p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs); + list_for_each_entry (sl, &p->share_list, share_list) + n += scnprintf(info + n, BSIZE - n, ",SC:%d", sl->client->cli_id); + n += scnprintf(info + n, BSIZE - n, "%c", use_long ? ',' : '\n'); + if ( use_long ) + n += scnprintf( + info + n, BSIZE - n, + "Pc:%d,Pm:%d,Oc:%ld,Om:%ld,Nc:%lu,Nm:%lu," + "ps:%lu,pt:%lu,pd:%lu,pr:%lu,px:%lu,gs:%lu,gt:%lu," + "fs:%lu,ft:%lu,os:%lu,ot:%lu\n", + _atomic_read(p->pgp_count), p->pgp_count_max, p->obj_count, + p->obj_count_max, p->objnode_count, p->objnode_count_max, + p->good_puts, p->puts, p->dup_puts_flushed, + p->dup_puts_replaced, p->no_mem_puts, p->found_gets, p->gets, + p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs); if ( sum + n >= len ) return sum; if ( !copy_to_guest_offset(buf, off + sum, info, n + 1) ) @@ -193,9 +193,9 @@ static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off, char info[BSIZE]; int n = 0, sum = 0; - n = scnprintf(info+n,BSIZE-n,"T="); + n = scnprintf(info + n, BSIZE - n, "T="); n--; /* Overwrite trailing comma. */ - n += scnprintf(info+n,BSIZE-n,"\n"); + n += scnprintf(info + n, BSIZE - n, "\n"); if ( sum + n >= len ) return sum; if ( !copy_to_guest_offset(buf, off + sum, info, n + 1) ) @@ -209,27 +209,34 @@ static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len, char info[BSIZE]; int n = 0, sum = off; - n += scnprintf(info,BSIZE,"G=" - "Tt:%lu,Te:%lu,Cf:%lu,Af:%lu,Pf:%lu,Ta:%lu," - "Lm:%lu,Et:%lu,Ea:%lu,Rt:%lu,Ra:%lu,Rx:%lu,Fp:%lu%c", - tmem_stats.total_tmem_ops, tmem_stats.errored_tmem_ops, tmem_stats.failed_copies, - tmem_stats.alloc_failed, tmem_stats.alloc_page_failed, tmem_page_list_pages, - tmem_stats.low_on_memory, tmem_stats.evicted_pgs, - tmem_stats.evict_attempts, tmem_stats.relinq_pgs, tmem_stats.relinq_attempts, - tmem_stats.max_evicts_per_relinq, - tmem_stats.total_flush_pool, use_long ? ',' : '\n'); - if (use_long) - n += scnprintf(info+n,BSIZE-n, - "Ec:%ld,Em:%ld,Oc:%d,Om:%d,Nc:%d,Nm:%d,Pc:%d,Pm:%d," - "Fc:%d,Fm:%d,Sc:%d,Sm:%d,Ep:%lu,Gd:%lu,Zt:%lu,Gz:%lu\n", - tmem_global.eph_count, tmem_stats.global_eph_count_max, - _atomic_read(tmem_stats.global_obj_count), tmem_stats.global_obj_count_max, - _atomic_read(tmem_stats.global_rtree_node_count), tmem_stats.global_rtree_node_count_max, - _atomic_read(tmem_stats.global_pgp_count), tmem_stats.global_pgp_count_max, - _atomic_read(tmem_stats.global_page_count), tmem_stats.global_page_count_max, - _atomic_read(tmem_stats.global_pcd_count), tmem_stats.global_pcd_count_max, - tmem_stats.tot_good_eph_puts,tmem_stats.deduped_puts,tmem_stats.pcd_tot_tze_size, - tmem_stats.pcd_tot_csize); + n += scnprintf(info, BSIZE, + "G=" + "Tt:%lu,Te:%lu,Cf:%lu,Af:%lu,Pf:%lu,Ta:%lu," + "Lm:%lu,Et:%lu,Ea:%lu,Rt:%lu,Ra:%lu,Rx:%lu,Fp:%lu%c", + tmem_stats.total_tmem_ops, tmem_stats.errored_tmem_ops, + tmem_stats.failed_copies, tmem_stats.alloc_failed, + tmem_stats.alloc_page_failed, tmem_page_list_pages, + tmem_stats.low_on_memory, tmem_stats.evicted_pgs, + tmem_stats.evict_attempts, tmem_stats.relinq_pgs, + tmem_stats.relinq_attempts, tmem_stats.max_evicts_per_relinq, + tmem_stats.total_flush_pool, use_long ? ',' : '\n'); + if ( use_long ) + n += scnprintf(info + n, BSIZE - n, + "Ec:%ld,Em:%ld,Oc:%d,Om:%d,Nc:%d,Nm:%d,Pc:%d,Pm:%d," + "Fc:%d,Fm:%d,Sc:%d,Sm:%d,Ep:%lu,Gd:%lu,Zt:%lu,Gz:%lu\n", + tmem_global.eph_count, tmem_stats.global_eph_count_max, + _atomic_read(tmem_stats.global_obj_count), + tmem_stats.global_obj_count_max, + _atomic_read(tmem_stats.global_rtree_node_count), + tmem_stats.global_rtree_node_count_max, + _atomic_read(tmem_stats.global_pgp_count), + tmem_stats.global_pgp_count_max, + _atomic_read(tmem_stats.global_page_count), + tmem_stats.global_page_count_max, + _atomic_read(tmem_stats.global_pcd_count), + tmem_stats.global_pcd_count_max, + tmem_stats.tot_good_eph_puts, tmem_stats.deduped_puts, + tmem_stats.pcd_tot_tze_size, tmem_stats.pcd_tot_csize); if ( sum + n >= len ) return sum; if ( !copy_to_guest_offset(buf, off + sum, info, n + 1) ) @@ -243,14 +250,15 @@ static int tmemc_list(domid_t cli_id, tmem_cli_va_param_t buf, uint32_t len, struct client *client; int off = 0; - if ( cli_id == TMEM_CLI_ID_NULL ) { - off = tmemc_list_global(buf,0,len,use_long); - off += tmemc_list_shared(buf,off,len-off,use_long); - list_for_each_entry(client,&tmem_global.client_list,client_list) - off += tmemc_list_client(client, buf, off, len-off, use_long); - off += tmemc_list_global_perf(buf,off,len-off,use_long); + if ( cli_id == TMEM_CLI_ID_NULL ) + { + off = tmemc_list_global(buf, 0, len, use_long); + off += tmemc_list_shared(buf, off, len - off, use_long); + list_for_each_entry (client, &tmem_global.client_list, client_list) + off += tmemc_list_client(client, buf, off, len - off, use_long); + off += tmemc_list_global_perf(buf, off, len - off, use_long); } - else if ( (client = tmem_client_from_cli_id(cli_id)) == NULL) + else if ( (client = tmem_client_from_cli_id(cli_id)) == NULL ) return -1; else off = tmemc_list_client(client, buf, 0, len, use_long); @@ -263,7 +271,7 @@ static int __tmemc_set_client_info(struct client *client, { domid_t cli_id; uint32_t old_weight; - xen_tmem_client_t info = { }; + xen_tmem_client_t info = {}; ASSERT(client); @@ -283,19 +291,18 @@ static int __tmemc_set_client_info(struct client *client, { old_weight = client->info.weight; client->info.weight = info.weight; - tmem_client_info("tmem: weight set to %d for %s=%d\n", - info.weight, tmem_cli_id_str, cli_id); - atomic_sub(old_weight,&tmem_global.client_weight_total); - atomic_add(client->info.weight,&tmem_global.client_weight_total); + tmem_client_info("tmem: weight set to %d for %s=%d\n", info.weight, + tmem_cli_id_str, cli_id); + atomic_sub(old_weight, &tmem_global.client_weight_total); + atomic_add(client->info.weight, &tmem_global.client_weight_total); } - if ( info.flags.u.compress != client->info.flags.u.compress ) { client->info.flags.u.compress = info.flags.u.compress; tmem_client_info("tmem: compression %s for %s=%d\n", info.flags.u.compress ? "enabled" : "disabled", - tmem_cli_id_str,cli_id); + tmem_cli_id_str, cli_id); } return 0; } @@ -308,10 +315,10 @@ static int tmemc_set_client_info(domid_t cli_id, if ( cli_id == TMEM_CLI_ID_NULL ) { - list_for_each_entry(client,&tmem_global.client_list,client_list) + list_for_each_entry (client, &tmem_global.client_list, client_list) { - ret = __tmemc_set_client_info(client, info); - if (ret) + ret = __tmemc_set_client_info(client, info); + if ( ret ) break; } } @@ -332,14 +339,12 @@ static int tmemc_get_client_info(int cli_id, if ( client ) { if ( copy_to_guest(info, &client->info, 1) ) - return -EFAULT; + return -EFAULT; } else { static const xen_tmem_client_t generic = { - .version = TMEM_SPEC_VERSION, - .maxpools = MAX_POOLS_PER_DOMAIN - }; + .version = TMEM_SPEC_VERSION, .maxpools = MAX_POOLS_PER_DOMAIN}; if ( copy_to_guest(info, &generic, 1) ) return -EFAULT; @@ -378,9 +383,9 @@ static int tmemc_get_pool(int cli_id, continue; out.flags.raw = (pool->persistent ? TMEM_POOL_PERSIST : 0) | - (pool->shared ? TMEM_POOL_SHARED : 0) | - (POOL_PAGESHIFT << TMEM_POOL_PAGESIZE_SHIFT) | - (TMEM_SPEC_VERSION << TMEM_POOL_VERSION_SHIFT); + (pool->shared ? TMEM_POOL_SHARED : 0) | + (POOL_PAGESHIFT << TMEM_POOL_PAGESIZE_SHIFT) | + (TMEM_SPEC_VERSION << TMEM_POOL_VERSION_SHIFT); out.n_pages = _atomic_read(pool->pgp_count); out.uuid[0] = pool->uuid[0]; out.uuid[1] = pool->uuid[1]; @@ -399,7 +404,7 @@ static int tmemc_get_pool(int cli_id, } /* And how many we have processed. */ - return rc ? : idx; + return rc ?: idx; } static int tmemc_set_pools(int cli_id, @@ -430,14 +435,14 @@ static int tmemc_set_pools(int cli_id, { xen_tmem_pool_info_t pool; - if ( __copy_from_guest_offset(&pool, pools, i, 1 ) ) + if ( __copy_from_guest_offset(&pool, pools, i, 1) ) return -EFAULT; if ( pool.n_pages ) return -EINVAL; - rc = do_tmem_new_pool(cli_id, pool.id, pool.flags.raw, - pool.uuid[0], pool.uuid[1]); + rc = do_tmem_new_pool(cli_id, pool.id, pool.flags.raw, pool.uuid[0], + pool.uuid[1]); if ( rc < 0 ) break; @@ -447,7 +452,7 @@ static int tmemc_set_pools(int cli_id, } /* And how many we have processed. */ - return rc ? : i; + return rc ?: i; } static int tmemc_auth_pools(int cli_id, @@ -479,7 +484,7 @@ static int tmemc_auth_pools(int cli_id, { xen_tmem_pool_info_t pool; - if ( __copy_from_guest_offset(&pool, pools, i, 1 ) ) + if ( __copy_from_guest_offset(&pool, pools, i, 1) ) return -EFAULT; if ( pool.n_pages ) @@ -490,11 +495,10 @@ static int tmemc_auth_pools(int cli_id, if ( rc < 0 ) break; - } /* And how many we have processed. */ - return rc ? : i; + return rc ?: i; } int tmem_control(struct xen_sysctl_tmem_op *op) @@ -518,8 +522,8 @@ int tmem_control(struct xen_sysctl_tmem_op *op) ret = tmemc_flush_mem(op->cli_id, op->arg); break; case XEN_SYSCTL_TMEM_OP_LIST: - ret = tmemc_list(op->cli_id, - guest_handle_cast(op->u.buf, char), op->len, op->arg); + ret = tmemc_list(op->cli_id, guest_handle_cast(op->u.buf, char), + op->len, op->arg); break; case XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO: ret = tmemc_set_client_info(op->cli_id, op->u.client); diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c index bf7b14f79a..e4093b6fb3 100644 --- a/xen/common/tmem_xen.c +++ b/xen/common/tmem_xen.c @@ -86,8 +86,8 @@ static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp, } #endif -int tmem_copy_from_client(struct page_info *pfp, - xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) +int tmem_copy_from_client(struct page_info *pfp, xen_pfn_t cmfn, + tmem_cli_va_param_t clibuf) { mfn_t tmem_mfn, cli_mfn = INVALID_MFN; char *tmem_va, *cli_va = NULL; @@ -118,8 +118,8 @@ int tmem_copy_from_client(struct page_info *pfp, return rc; } -int tmem_compress_from_client(xen_pfn_t cmfn, - void **out_va, size_t *out_len, tmem_cli_va_param_t clibuf) +int tmem_compress_from_client(xen_pfn_t cmfn, void **out_va, size_t *out_len, + tmem_cli_va_param_t clibuf) { int ret = 0; unsigned char *dmem = this_cpu(dstmem); @@ -130,7 +130,7 @@ int tmem_compress_from_client(xen_pfn_t cmfn, void *cli_va = NULL; if ( dmem == NULL || wmem == NULL ) - return 0; /* no buffer, so can't compress */ + return 0; /* no buffer, so can't compress */ if ( guest_handle_is_null(clibuf) ) { cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0); @@ -151,7 +151,7 @@ int tmem_compress_from_client(xen_pfn_t cmfn, } int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, - tmem_cli_va_param_t clibuf) + tmem_cli_va_param_t clibuf) { mfn_t tmem_mfn, cli_mfn = INVALID_MFN; char *tmem_va, *cli_va = NULL; @@ -180,8 +180,8 @@ int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, return rc; } -int tmem_decompress_to_client(xen_pfn_t cmfn, void *tmem_va, - size_t size, tmem_cli_va_param_t clibuf) +int tmem_decompress_to_client(xen_pfn_t cmfn, void *tmem_va, size_t size, + tmem_cli_va_param_t clibuf) { mfn_t cli_mfn = INVALID_MFN; struct page_info *cli_pfp = NULL; @@ -212,14 +212,15 @@ int tmem_decompress_to_client(xen_pfn_t cmfn, void *tmem_va, /****************** XEN-SPECIFIC HOST INITIALIZATION ********************/ static int dstmem_order, workmem_order; -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - switch ( action ) + switch (action) + { + case CPU_UP_PREPARE: { - case CPU_UP_PREPARE: { if ( per_cpu(dstmem, cpu) == NULL ) per_cpu(dstmem, cpu) = alloc_xenheap_pages(dstmem_order, 0); if ( per_cpu(workmem, cpu) == NULL ) @@ -229,7 +230,8 @@ static int cpu_callback( break; } case CPU_DEAD: - case CPU_UP_CANCELED: { + case CPU_UP_CANCELED: + { if ( per_cpu(dstmem, cpu) != NULL ) { free_xenheap_pages(per_cpu(dstmem, cpu), dstmem_order); @@ -254,9 +256,7 @@ static int cpu_callback( return NOTIFY_DONE; } -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback -}; +static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback}; int __init tmem_init(void) { @@ -265,7 +265,7 @@ int __init tmem_init(void) dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES); workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS); - for_each_online_cpu ( cpu ) + for_each_online_cpu (cpu) { void *hcpu = (void *)(long)cpu; cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu); diff --git a/xen/common/trace.c b/xen/common/trace.c index cc294fc384..bdccf9dfbb 100644 --- a/xen/common/trace.c +++ b/xen/common/trace.c @@ -74,12 +74,12 @@ static cpumask_t tb_cpu_mask; /* which tracing events are enabled */ static u32 tb_event_mask = TRC_ALL; -/* Return the number of elements _type necessary to store at least _x bytes of data - * i.e., sizeof(_type) * ans >= _x. */ -#define fit_to_type(_type, _x) (((_x)+sizeof(_type)-1) / sizeof(_type)) +/* Return the number of elements _type necessary to store at least _x bytes of + * data i.e., sizeof(_type) * ans >= _x. */ +#define fit_to_type(_type, _x) (((_x) + sizeof(_type) - 1) / sizeof(_type)) -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; @@ -89,9 +89,7 @@ static int cpu_callback( return NOTIFY_DONE; } -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback -}; +static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback}; static uint32_t calc_tinfo_first_offset(void) { @@ -130,7 +128,8 @@ static int calculate_tbuf_size(unsigned int pages, uint16_t t_info_first_offset) /* * max mfn_offset holds up to n pages per cpu * The array of mfns for the highest cpu can start at the maximum value - * mfn_offset can hold. So reduce the number of cpus and also the mfn_offset. + * mfn_offset can hold. So reduce the number of cpus and also the + * mfn_offset. */ max_mfn_offset -= t_info_first_offset; max_cpus--; @@ -142,19 +141,19 @@ static int calculate_tbuf_size(unsigned int pages, uint16_t t_info_first_offset) if ( pages > max_pages ) { printk(XENLOG_INFO "xentrace: requested number of %u pages " - "reduced to %u\n", + "reduced to %u\n", pages, max_pages); pages = max_pages; } - /* + /* * NB this calculation is correct, because t_info_first_offset is * in words, not bytes */ t_info_words = nr_cpu_ids * pages + t_info_first_offset; t_info_pages = PFN_UP(t_info_words * sizeof(uint32_t)); printk(XENLOG_INFO "xentrace: requesting %u t_info pages " - "for %u trace pages on %u cpus\n", + "for %u trace pages on %u cpus\n", t_info_pages, pages, nr_cpu_ids); return pages; } @@ -166,7 +165,7 @@ static int calculate_tbuf_size(unsigned int pages, uint16_t t_info_first_offset) * trace buffers. The trace buffers are then available for debugging use, via * the %TRACE_xD macros exported in . * - * This function may also be called later when enabling trace buffers + * This function may also be called later when enabling trace buffers * via the SET_SIZE hypercall. */ static int alloc_trace_bufs(unsigned int pages) @@ -192,7 +191,7 @@ static int alloc_trace_bufs(unsigned int pages) if ( t_info == NULL ) goto out_fail; - memset(t_info, 0, t_info_pages*PAGE_SIZE); + memset(t_info, 0, t_info_pages * PAGE_SIZE); t_info_mfn_list = (uint32_t *)t_info; @@ -202,7 +201,7 @@ static int alloc_trace_bufs(unsigned int pages) * Allocate buffers for all of the cpus. * If any fails, deallocate what you have so far and exit. */ - for_each_online_cpu(cpu) + for_each_online_cpu (cpu) { offset = t_info_first_offset + (cpu * pages); t_info->mfn_offset[cpu] = offset; @@ -213,7 +212,8 @@ static int alloc_trace_bufs(unsigned int pages) if ( !p ) { printk(XENLOG_INFO "xentrace: memory allocation failed " - "on cpu %d after %d pages\n", cpu, i); + "on cpu %d after %d pages\n", + cpu, i); t_info_mfn_list[offset + i] = 0; goto out_dealloc; } @@ -224,7 +224,7 @@ static int alloc_trace_bufs(unsigned int pages) /* * Initialize buffers for all of the cpus. */ - for_each_online_cpu(cpu) + for_each_online_cpu (cpu) { struct t_buf *buf; @@ -236,8 +236,8 @@ static int alloc_trace_bufs(unsigned int pages) per_cpu(t_bufs, cpu) = buf = mfn_to_virt(t_info_mfn_list[offset]); buf->cons = buf->prod = 0; - printk(XENLOG_INFO "xentrace: p%d mfn %x offset %u\n", - cpu, t_info_mfn_list[offset], offset); + printk(XENLOG_INFO "xentrace: p%d mfn %x offset %u\n", cpu, + t_info_mfn_list[offset], offset); /* Now share the trace pages */ for ( i = 0; i < pages; i++ ) @@ -246,11 +246,11 @@ static int alloc_trace_bufs(unsigned int pages) } /* Finally, share the t_info page */ - for(i = 0; i < t_info_pages; i++) - share_xen_page_with_privileged_guests( - virt_to_page(t_info) + i, SHARE_ro); + for ( i = 0; i < t_info_pages; i++ ) + share_xen_page_with_privileged_guests(virt_to_page(t_info) + i, + SHARE_ro); - data_size = (pages * PAGE_SIZE - sizeof(struct t_buf)); + data_size = (pages * PAGE_SIZE - sizeof(struct t_buf)); t_buf_highwater = data_size >> 1; /* 50% high water */ opt_tbuf_size = pages; @@ -261,7 +261,7 @@ static int alloc_trace_bufs(unsigned int pages) return 0; out_dealloc: - for_each_online_cpu(cpu) + for_each_online_cpu (cpu) { offset = t_info->mfn_offset[cpu]; if ( !offset ) @@ -282,7 +282,6 @@ out_fail: return -ENOMEM; } - /** * tb_set_size - handle the logic involved with dynamically allocating tbufs * @@ -298,7 +297,7 @@ static int tb_set_size(unsigned int pages) if ( opt_tbuf_size && pages != opt_tbuf_size ) { printk(XENLOG_INFO "xentrace: tb_set_size from %d to %d " - "not implemented\n", + "not implemented\n", opt_tbuf_size, pages); return -EINVAL; } @@ -322,8 +321,8 @@ int trace_will_trace_event(u32 event) return 0; /* then match subclass */ - if ( (((tb_event_mask >> TRC_SUBCLS_SHIFT) & 0xf ) - & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 ) + if ( (((tb_event_mask >> TRC_SUBCLS_SHIFT) & 0xf) & + ((event >> TRC_SUBCLS_SHIFT) & 0xf)) == 0 ) return 0; if ( !cpumask_test_cpu(smp_processor_id(), &tb_cpu_mask) ) @@ -357,7 +356,7 @@ void __init init_trace_bufs(void) printk("xentrace: Starting tracing, enabling mask %x\n", opt_tevt_mask); tb_event_mask = opt_tevt_mask; - tb_init_done=1; + tb_init_done = 1; } } } @@ -373,10 +372,10 @@ int tb_control(struct xen_sysctl_tbuf_op *tbc) spin_lock(&lock); - switch ( tbc->cmd ) + switch (tbc->cmd) { case XEN_SYSCTL_TBUFOP_get_info: - tbc->evt_mask = tb_event_mask; + tbc->evt_mask = tb_event_mask; tbc->buffer_mfn = t_info ? virt_to_mfn(t_info) : 0; tbc->size = t_info_pages * PAGE_SIZE; break; @@ -391,7 +390,7 @@ int tb_control(struct xen_sysctl_tbuf_op *tbc) free_cpumask_var(mask); } } - break; + break; case XEN_SYSCTL_TBUFOP_set_evt_mask: tb_event_mask = tbc->evt_mask; break; @@ -400,7 +399,7 @@ int tb_control(struct xen_sysctl_tbuf_op *tbc) break; case XEN_SYSCTL_TBUFOP_enable: /* Enable trace buffers. Check buffers are already allocated. */ - if ( opt_tbuf_size == 0 ) + if ( opt_tbuf_size == 0 ) rc = -EINVAL; else tb_init_done = 1; @@ -415,18 +414,19 @@ int tb_control(struct xen_sysctl_tbuf_op *tbc) tb_init_done = 0; smp_wmb(); - /* Clear any lost-record info so we don't get phantom lost records next time we - * start tracing. Grab the lock to make sure we're not racing anyone. After this - * hypercall returns, no more records should be placed into the buffers. */ - for_each_online_cpu(i) + /* Clear any lost-record info so we don't get phantom lost records next + * time we start tracing. Grab the lock to make sure we're not racing + * anyone. After this hypercall returns, no more records should be + * placed into the buffers. */ + for_each_online_cpu (i) { unsigned long flags; spin_lock_irqsave(&per_cpu(t_lock, i), flags); - per_cpu(lost_records, i)=0; + per_cpu(lost_records, i) = 0; spin_unlock_irqrestore(&per_cpu(t_lock, i), flags); } } - break; + break; default: rc = -EINVAL; break; @@ -437,7 +437,7 @@ int tb_control(struct xen_sysctl_tbuf_op *tbc) return rc; } -static inline unsigned int calc_rec_size(bool_t cycles, unsigned int extra) +static inline unsigned int calc_rec_size(bool_t cycles, unsigned int extra) { unsigned int rec_size = 4; @@ -471,7 +471,7 @@ static inline u32 calc_unconsumed_bytes(const struct t_buf *buf) x = prod - cons; if ( x < 0 ) - x += 2*data_size; + x += 2 * data_size; ASSERT(x >= 0); ASSERT(x <= data_size); @@ -504,8 +504,8 @@ static inline u32 calc_bytes_avail(const struct t_buf *buf) } static unsigned char *next_record(const struct t_buf *buf, uint32_t *next, - unsigned char **next_page, - uint32_t *offset_in_page) + unsigned char **next_page, + uint32_t *offset_in_page) { u32 x = buf->prod, cons = buf->cons; uint16_t per_cpu_mfn_offset; @@ -534,7 +534,7 @@ static unsigned char *next_record(const struct t_buf *buf, uint32_t *next, mfn_list = (uint32_t *)t_info; mfn = mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr]; this_page = mfn_to_virt(mfn); - if (per_cpu_mfn_nr + 1 >= opt_tbuf_size) + if ( per_cpu_mfn_nr + 1 >= opt_tbuf_size ) { /* reached end of buffer? */ *next_page = NULL; @@ -547,10 +547,8 @@ static unsigned char *next_record(const struct t_buf *buf, uint32_t *next, return this_page; } -static inline void __insert_record(struct t_buf *buf, - unsigned long event, - unsigned int extra, - bool_t cycles, +static inline void __insert_record(struct t_buf *buf, unsigned long event, + unsigned int extra, bool_t cycles, unsigned int rec_size, const void *extra_data) { @@ -583,8 +581,10 @@ static inline void __insert_record(struct t_buf *buf, return; } rec = &split_rec; - } else { - rec = (struct t_rec*)(this_page + offset); + } + else + { + rec = (struct t_rec *)(this_page + offset); } rec->event = event; @@ -596,7 +596,7 @@ static inline void __insert_record(struct t_buf *buf, rec->u.cycles.cycles_lo = (uint32_t)tsc; rec->u.cycles.cycles_hi = (uint32_t)(tsc >> 32); dst = rec->u.cycles.extra_u32; - } + } if ( extra_data && extra ) memcpy(dst, extra_data, extra); @@ -610,14 +610,13 @@ static inline void __insert_record(struct t_buf *buf, smp_wmb(); next += rec_size; - if ( next >= 2*data_size ) - next -= 2*data_size; - ASSERT(next < 2*data_size); + if ( next >= 2 * data_size ) + next -= 2 * data_size; + ASSERT(next < 2 * data_size); buf->prod = next; } -static inline void insert_wrap_record(struct t_buf *buf, - unsigned int size) +static inline void insert_wrap_record(struct t_buf *buf, unsigned int size) { u32 space_left = calc_bytes_to_wrap(buf); unsigned int extra_space = space_left - sizeof(u32); @@ -626,22 +625,23 @@ static inline void insert_wrap_record(struct t_buf *buf, BUG_ON(space_left > size); /* We may need to add cycles to take up enough space... */ - if ( (extra_space/sizeof(u32)) > TRACE_EXTRA_MAX ) + if ( (extra_space / sizeof(u32)) > TRACE_EXTRA_MAX ) { cycles = 1; extra_space -= sizeof(u64); - ASSERT((extra_space/sizeof(u32)) <= TRACE_EXTRA_MAX); + ASSERT((extra_space / sizeof(u32)) <= TRACE_EXTRA_MAX); } - __insert_record(buf, TRC_TRACE_WRAP_BUFFER, extra_space, cycles, - space_left, NULL); + __insert_record(buf, TRC_TRACE_WRAP_BUFFER, extra_space, cycles, space_left, + NULL); } #define LOST_REC_SIZE (4 + 8 + 16) /* header + tsc + sizeof(struct ed) */ static inline void insert_lost_records(struct t_buf *buf) { - struct __packed { + struct __packed + { u32 lost_records; u16 did, vid; u64 first_tsc; @@ -666,8 +666,7 @@ static void trace_notify_dom0(unsigned long unused) { send_global_virq(VIRQ_TBUF); } -static DECLARE_SOFTIRQ_TASKLET(trace_notify_dom0_tasklet, - trace_notify_dom0, 0); +static DECLARE_SOFTIRQ_TASKLET(trace_notify_dom0_tasklet, trace_notify_dom0, 0); /** * __trace_var - Enters a trace tuple into the trace buffer for the current CPU. @@ -688,14 +687,14 @@ void __trace_var(u32 event, bool_t cycles, unsigned int extra, unsigned int extra_word; bool_t started_below_highwater; - if( !tb_init_done ) + if ( !tb_init_done ) return; /* Convert byte count into word count, rounding up */ extra_word = (extra / sizeof(u32)); if ( (extra % sizeof(u32)) != 0 ) extra_word++; - + ASSERT(extra_word <= TRACE_EXTRA_MAX); extra_word = min_t(int, extra_word, TRACE_EXTRA_MAX); @@ -710,8 +709,8 @@ void __trace_var(u32 event, bool_t cycles, unsigned int extra, return; /* then match subclass */ - if ( (((tb_event_mask >> TRC_SUBCLS_SHIFT) & 0xf ) - & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 ) + if ( (((tb_event_mask >> TRC_SUBCLS_SHIFT) & 0xf) & + ((event >> TRC_SUBCLS_SHIFT) & 0xf)) == 0 ) return; if ( !cpumask_test_cpu(smp_processor_id(), &tb_cpu_mask) ) @@ -735,14 +734,14 @@ void __trace_var(u32 event, bool_t cycles, unsigned int extra, /* Calculate the record size */ rec_size = calc_rec_size(cycles, extra); - + /* How many bytes are available in the buffer? */ bytes_to_tail = calc_bytes_avail(buf); - + /* How many bytes until the next wrap-around? */ bytes_to_wrap = calc_bytes_to_wrap(buf); - - /* + + /* * Calculate expected total size to commit this record by * doing a dry-run. */ @@ -756,7 +755,7 @@ void __trace_var(u32 event, bool_t cycles, unsigned int extra, { total_size += bytes_to_wrap; bytes_to_wrap = data_size; - } + } total_size += LOST_REC_SIZE; bytes_to_wrap -= LOST_REC_SIZE; @@ -768,20 +767,20 @@ void __trace_var(u32 event, bool_t cycles, unsigned int extra, if ( rec_size > bytes_to_wrap ) { total_size += bytes_to_wrap; - } + } total_size += rec_size; /* Do we have enough space for everything? */ if ( total_size > bytes_to_tail ) { if ( ++this_cpu(lost_records) == 1 ) - this_cpu(lost_records_first_tsc)=(u64)get_cycles(); + this_cpu(lost_records_first_tsc) = (u64)get_cycles(); started_below_highwater = 0; goto unlock; } /* - * Now, actually write information + * Now, actually write information */ bytes_to_wrap = calc_bytes_to_wrap(buf); @@ -791,7 +790,7 @@ void __trace_var(u32 event, bool_t cycles, unsigned int extra, { insert_wrap_record(buf, LOST_REC_SIZE); bytes_to_wrap = data_size; - } + } insert_lost_records(buf); bytes_to_wrap -= LOST_REC_SIZE; @@ -810,16 +809,16 @@ unlock: spin_unlock_irqrestore(&this_cpu(t_lock), flags); /* Notify trace buffer consumer that we've crossed the high water mark. */ - if ( likely(buf!=NULL) - && started_below_highwater - && (calc_unconsumed_bytes(buf) >= t_buf_highwater) ) + if ( likely(buf != NULL) && started_below_highwater && + (calc_unconsumed_bytes(buf) >= t_buf_highwater) ) tasklet_schedule(&trace_notify_dom0_tasklet); } void __trace_hypercall(uint32_t event, unsigned long op, const xen_ulong_t *args) { - struct __packed { + struct __packed + { uint32_t op; uint32_t args[6]; } d; @@ -830,7 +829,7 @@ void __trace_hypercall(uint32_t event, unsigned long op, unsigned i_ = (i); \ *a++ = args[(i_)]; \ d.op |= TRC_PV_HYPERCALL_V2_ARG_32(i_); \ - } while( 0 ) + } while ( 0 ) /* * This shouldn't happen as @op should be small enough but just in @@ -841,7 +840,7 @@ void __trace_hypercall(uint32_t event, unsigned long op, d.op = op; - switch ( op ) + switch (op) { case __HYPERVISOR_mmu_update: APPEND_ARG32(1); /* count */ diff --git a/xen/common/ubsan/ubsan.c b/xen/common/ubsan/ubsan.c index 50a4e14fac..30521d2762 100644 --- a/xen/common/ubsan/ubsan.c +++ b/xen/common/ubsan/ubsan.c @@ -13,9 +13,12 @@ #include #include -#define __noreturn noreturn +#define __noreturn noreturn #define pr_err(...) printk(XENLOG_ERR __VA_ARGS__) -struct xen_ubsan { int in_ubsan; }; +struct xen_ubsan +{ + int in_ubsan; +}; static DEFINE_PER_CPU(struct xen_ubsan[1], in_ubsan); #undef current #define current this_cpu(in_ubsan) @@ -26,26 +29,26 @@ static DEFINE_PER_CPU(struct xen_ubsan[1], in_ubsan); #include "ubsan.h" const char *type_check_kinds[] = { - "load of", - "store to", - "reference binding to", - "member access within", - "member call on", - "constructor call on", - "downcast of", - "downcast of", - "upcast of", - "cast to virtual base of", - "_Nonnull binding to", + "load of", + "store to", + "reference binding to", + "member access within", + "member call on", + "constructor call on", + "downcast of", + "downcast of", + "upcast of", + "cast to virtual base of", + "_Nonnull binding to", }; #define REPORTED_BIT 31 -#if (BITS_PER_LONG == 64) && defined(__BIG_ENDIAN) +#if ( BITS_PER_LONG == 64 ) && defined(__BIG_ENDIAN) #define COLUMN_MASK (~(1U << REPORTED_BIT)) -#define LINE_MASK (~0U) +#define LINE_MASK (~0U) #else -#define COLUMN_MASK (~0U) +#define COLUMN_MASK (~0U) #define LINE_MASK (~(1U << REPORTED_BIT)) #endif @@ -53,468 +56,452 @@ const char *type_check_kinds[] = { static bool was_reported(struct source_location *location) { - return test_and_set_bit(REPORTED_BIT, &location->reported); + return test_and_set_bit(REPORTED_BIT, &location->reported); } static void print_source_location(const char *prefix, - struct source_location *loc) + struct source_location *loc) { - pr_err("%s %s:%d:%d\n", prefix, loc->file_name, - loc->line & LINE_MASK, loc->column & COLUMN_MASK); + pr_err("%s %s:%d:%d\n", prefix, loc->file_name, loc->line & LINE_MASK, + loc->column & COLUMN_MASK); } static bool suppress_report(struct source_location *loc) { - return current->in_ubsan || was_reported(loc); + return current->in_ubsan || was_reported(loc); } static bool type_is_int(struct type_descriptor *type) { - return type->type_kind == type_kind_int; + return type->type_kind == type_kind_int; } static bool type_is_signed(struct type_descriptor *type) { - WARN_ON(!type_is_int(type)); - return type->type_info & 1; + WARN_ON(!type_is_int(type)); + return type->type_info & 1; } static unsigned type_bit_width(struct type_descriptor *type) { - return 1 << (type->type_info >> 1); + return 1 << (type->type_info >> 1); } static bool is_inline_int(struct type_descriptor *type) { - unsigned inline_bits = sizeof(unsigned long)*8; - unsigned bits = type_bit_width(type); + unsigned inline_bits = sizeof(unsigned long) * 8; + unsigned bits = type_bit_width(type); - WARN_ON(!type_is_int(type)); + WARN_ON(!type_is_int(type)); - return bits <= inline_bits; + return bits <= inline_bits; } static s_max get_signed_val(struct type_descriptor *type, unsigned long val) { - if (is_inline_int(type)) { - unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type); - return ((s_max)val) << extra_bits >> extra_bits; - } + if ( is_inline_int(type) ) + { + unsigned extra_bits = sizeof(s_max) * 8 - type_bit_width(type); + return ((s_max)val) << extra_bits >> extra_bits; + } - if (type_bit_width(type) == 64) - return *(s64 *)val; + if ( type_bit_width(type) == 64 ) + return *(s64 *)val; - return *(s_max *)val; + return *(s_max *)val; } static bool val_is_negative(struct type_descriptor *type, unsigned long val) { - return type_is_signed(type) && get_signed_val(type, val) < 0; + return type_is_signed(type) && get_signed_val(type, val) < 0; } static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val) { - if (is_inline_int(type)) - return val; + if ( is_inline_int(type) ) + return val; - if (type_bit_width(type) == 64) - return *(u64 *)val; + if ( type_bit_width(type) == 64 ) + return *(u64 *)val; - return *(u_max *)val; + return *(u_max *)val; } static void val_to_string(char *str, size_t size, struct type_descriptor *type, - unsigned long value) + unsigned long value) { - if (type_is_int(type)) { - if (type_bit_width(type) == 128) { + if ( type_is_int(type) ) + { + if ( type_bit_width(type) == 128 ) + { #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) - u_max val = get_unsigned_val(type, value); + u_max val = get_unsigned_val(type, value); - scnprintf(str, size, "0x%08x%08x%08x%08x", - (u32)(val >> 96), - (u32)(val >> 64), - (u32)(val >> 32), - (u32)(val)); + scnprintf(str, size, "0x%08x%08x%08x%08x", (u32)(val >> 96), + (u32)(val >> 64), (u32)(val >> 32), (u32)(val)); #else - WARN_ON(1); + WARN_ON(1); #endif - } else if (type_is_signed(type)) { - scnprintf(str, size, "%lld", - (s64)get_signed_val(type, value)); - } else { - scnprintf(str, size, "%llu", - (u64)get_unsigned_val(type, value)); - } - } + } + else if ( type_is_signed(type) ) + { + scnprintf(str, size, "%lld", (s64)get_signed_val(type, value)); + } + else + { + scnprintf(str, size, "%llu", (u64)get_unsigned_val(type, value)); + } + } } static bool location_is_valid(struct source_location *loc) { - return loc->file_name != NULL; + return loc->file_name != NULL; } static DEFINE_SPINLOCK(report_lock); static void ubsan_prologue(struct source_location *location, - unsigned long *flags) + unsigned long *flags) { - current->in_ubsan++; - spin_lock_irqsave(&report_lock, *flags); + current->in_ubsan++; + spin_lock_irqsave(&report_lock, *flags); - pr_err("========================================" - "========================================\n"); - print_source_location("UBSAN: Undefined behaviour in", location); + pr_err("========================================" + "========================================\n"); + print_source_location("UBSAN: Undefined behaviour in", location); } static void ubsan_epilogue(unsigned long *flags) { - dump_stack(); - pr_err("========================================" - "========================================\n"); - spin_unlock_irqrestore(&report_lock, *flags); - current->in_ubsan--; + dump_stack(); + pr_err("========================================" + "========================================\n"); + spin_unlock_irqrestore(&report_lock, *flags); + current->in_ubsan--; } static void handle_overflow(struct overflow_data *data, unsigned long lhs, - unsigned long rhs, char op) + unsigned long rhs, char op) { + struct type_descriptor *type = data->type; + unsigned long flags; + char lhs_val_str[VALUE_LENGTH]; + char rhs_val_str[VALUE_LENGTH]; - struct type_descriptor *type = data->type; - unsigned long flags; - char lhs_val_str[VALUE_LENGTH]; - char rhs_val_str[VALUE_LENGTH]; + if ( suppress_report(&data->location) ) + return; - if (suppress_report(&data->location)) - return; + ubsan_prologue(&data->location, &flags); - ubsan_prologue(&data->location, &flags); + val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); + val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); + pr_err("%s integer overflow:\n", + type_is_signed(type) ? "signed" : "unsigned"); + pr_err("%s %c %s cannot be represented in type %s\n", lhs_val_str, op, + rhs_val_str, type->type_name); - val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); - val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); - pr_err("%s integer overflow:\n", - type_is_signed(type) ? "signed" : "unsigned"); - pr_err("%s %c %s cannot be represented in type %s\n", - lhs_val_str, - op, - rhs_val_str, - type->type_name); - - ubsan_epilogue(&flags); + ubsan_epilogue(&flags); } -void __ubsan_handle_add_overflow(struct overflow_data *data, - unsigned long lhs, - unsigned long rhs) +void __ubsan_handle_add_overflow(struct overflow_data *data, unsigned long lhs, + unsigned long rhs) { - - handle_overflow(data, lhs, rhs, '+'); + handle_overflow(data, lhs, rhs, '+'); } EXPORT_SYMBOL(__ubsan_handle_add_overflow); -void __ubsan_handle_sub_overflow(struct overflow_data *data, - unsigned long lhs, - unsigned long rhs) +void __ubsan_handle_sub_overflow(struct overflow_data *data, unsigned long lhs, + unsigned long rhs) { - handle_overflow(data, lhs, rhs, '-'); + handle_overflow(data, lhs, rhs, '-'); } EXPORT_SYMBOL(__ubsan_handle_sub_overflow); -void __ubsan_handle_mul_overflow(struct overflow_data *data, - unsigned long lhs, - unsigned long rhs) +void __ubsan_handle_mul_overflow(struct overflow_data *data, unsigned long lhs, + unsigned long rhs) { - handle_overflow(data, lhs, rhs, '*'); + handle_overflow(data, lhs, rhs, '*'); } EXPORT_SYMBOL(__ubsan_handle_mul_overflow); void __ubsan_handle_negate_overflow(struct overflow_data *data, - unsigned long old_val) + unsigned long old_val) { - unsigned long flags; - char old_val_str[VALUE_LENGTH]; + unsigned long flags; + char old_val_str[VALUE_LENGTH]; - if (suppress_report(&data->location)) - return; + if ( suppress_report(&data->location) ) + return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location, &flags); - val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); + val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); - pr_err("negation of %s cannot be represented in type %s:\n", - old_val_str, data->type->type_name); + pr_err("negation of %s cannot be represented in type %s:\n", old_val_str, + data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(&flags); } EXPORT_SYMBOL(__ubsan_handle_negate_overflow); - void __ubsan_handle_divrem_overflow(struct overflow_data *data, - unsigned long lhs, - unsigned long rhs) + unsigned long lhs, unsigned long rhs) { - unsigned long flags; - char rhs_val_str[VALUE_LENGTH]; + unsigned long flags; + char rhs_val_str[VALUE_LENGTH]; - if (suppress_report(&data->location)) - return; + if ( suppress_report(&data->location) ) + return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location, &flags); - val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); + val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); - if (type_is_signed(data->type) && get_signed_val(data->type, rhs) == -1) - pr_err("division of %s by -1 cannot be represented in type %s\n", - rhs_val_str, data->type->type_name); - else - pr_err("division by zero\n"); + if ( type_is_signed(data->type) && get_signed_val(data->type, rhs) == -1 ) + pr_err("division of %s by -1 cannot be represented in type %s\n", + rhs_val_str, data->type->type_name); + else + pr_err("division by zero\n"); - ubsan_epilogue(&flags); + ubsan_epilogue(&flags); } EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); static void handle_null_ptr_deref(struct type_mismatch_data *data) { - unsigned long flags; + unsigned long flags; - if (suppress_report(&data->location)) - return; + if ( suppress_report(&data->location) ) + return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location, &flags); - pr_err("%s null pointer of type %s\n", - type_check_kinds[data->type_check_kind], - data->type->type_name); + pr_err("%s null pointer of type %s\n", + type_check_kinds[data->type_check_kind], data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(&flags); } static void handle_missaligned_access(struct type_mismatch_data *data, - unsigned long ptr) + unsigned long ptr) { - unsigned long flags; + unsigned long flags; - if (suppress_report(&data->location)) - return; + if ( suppress_report(&data->location) ) + return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location, &flags); - pr_err("%s misaligned address %p for type %s\n", - type_check_kinds[data->type_check_kind], - (void *)ptr, data->type->type_name); - pr_err("which requires %ld byte alignment\n", data->alignment); + pr_err("%s misaligned address %p for type %s\n", + type_check_kinds[data->type_check_kind], (void *)ptr, + data->type->type_name); + pr_err("which requires %ld byte alignment\n", data->alignment); - ubsan_epilogue(&flags); + ubsan_epilogue(&flags); } static void handle_object_size_mismatch(struct type_mismatch_data *data, - unsigned long ptr) + unsigned long ptr) { - unsigned long flags; + unsigned long flags; - if (suppress_report(&data->location)) - return; + if ( suppress_report(&data->location) ) + return; - ubsan_prologue(&data->location, &flags); - pr_err("%s address %p with insufficient space\n", - type_check_kinds[data->type_check_kind], - (void *) ptr); - pr_err("for an object of type %s\n", data->type->type_name); - ubsan_epilogue(&flags); + ubsan_prologue(&data->location, &flags); + pr_err("%s address %p with insufficient space\n", + type_check_kinds[data->type_check_kind], (void *)ptr); + pr_err("for an object of type %s\n", data->type->type_name); + ubsan_epilogue(&flags); } void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, - unsigned long ptr) + unsigned long ptr) { - - if (!ptr) - handle_null_ptr_deref(data); - else if (data->alignment && !IS_ALIGNED(ptr, data->alignment)) - handle_missaligned_access(data, ptr); - else - handle_object_size_mismatch(data, ptr); + if ( !ptr ) + handle_null_ptr_deref(data); + else if ( data->alignment && !IS_ALIGNED(ptr, data->alignment) ) + handle_missaligned_access(data, ptr); + else + handle_object_size_mismatch(data, ptr); } EXPORT_SYMBOL(__ubsan_handle_type_mismatch); void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data, - unsigned long ptr) + unsigned long ptr) { - struct type_mismatch_data d = { - .location = data->location, - .type = data->type, - .alignment = 1ul << data->log_alignment, - .type_check_kind = data->type_check_kind, - }; + struct type_mismatch_data d = { + .location = data->location, + .type = data->type, + .alignment = 1ul << data->log_alignment, + .type_check_kind = data->type_check_kind, + }; - /* - * NB: do the check with data->location, d->location is just a local - * copy and the modifications will be lost. - */ - if (suppress_report(&data->location)) - return; + /* + * NB: do the check with data->location, d->location is just a local + * copy and the modifications will be lost. + */ + if ( suppress_report(&data->location) ) + return; - __ubsan_handle_type_mismatch(&d, ptr); + __ubsan_handle_type_mismatch(&d, ptr); } void __ubsan_handle_nonnull_arg(struct nonnull_arg_data *data) { - unsigned long flags; + unsigned long flags; - if (suppress_report(&data->location)) - return; + if ( suppress_report(&data->location) ) + return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location, &flags); - pr_err("null pointer passed as argument %d, declared with nonnull attribute\n", - data->arg_index); + pr_err( + "null pointer passed as argument %d, declared with nonnull attribute\n", + data->arg_index); - if (location_is_valid(&data->attr_location)) - print_source_location("nonnull attribute declared in ", - &data->attr_location); + if ( location_is_valid(&data->attr_location) ) + print_source_location("nonnull attribute declared in ", + &data->attr_location); - ubsan_epilogue(&flags); + ubsan_epilogue(&flags); } void __ubsan_handle_nonnull_return(struct nonnull_return_data *data) { - unsigned long flags; + unsigned long flags; - if (suppress_report(&data->location)) - return; + if ( suppress_report(&data->location) ) + return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location, &flags); - pr_err("null pointer returned from function declared to never return null\n"); + pr_err( + "null pointer returned from function declared to never return null\n"); - if (location_is_valid(&data->attr_location)) - print_source_location("returns_nonnull attribute specified in", - &data->attr_location); + if ( location_is_valid(&data->attr_location) ) + print_source_location("returns_nonnull attribute specified in", + &data->attr_location); - ubsan_epilogue(&flags); + ubsan_epilogue(&flags); } EXPORT_SYMBOL(__ubsan_handle_nonnull_return); void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data, - unsigned long bound) + unsigned long bound) { - unsigned long flags; - char bound_str[VALUE_LENGTH]; + unsigned long flags; + char bound_str[VALUE_LENGTH]; - if (suppress_report(&data->location)) - return; + if ( suppress_report(&data->location) ) + return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location, &flags); - val_to_string(bound_str, sizeof(bound_str), data->type, bound); - pr_err("variable length array bound value %s <= 0\n", bound_str); + val_to_string(bound_str, sizeof(bound_str), data->type, bound); + pr_err("variable length array bound value %s <= 0\n", bound_str); - ubsan_epilogue(&flags); + ubsan_epilogue(&flags); } EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive); void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, - unsigned long index) + unsigned long index) { - unsigned long flags; - char index_str[VALUE_LENGTH]; + unsigned long flags; + char index_str[VALUE_LENGTH]; - if (suppress_report(&data->location)) - return; + if ( suppress_report(&data->location) ) + return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location, &flags); - val_to_string(index_str, sizeof(index_str), data->index_type, index); - pr_err("index %s is out of range for type %s\n", index_str, - data->array_type->type_name); - ubsan_epilogue(&flags); + val_to_string(index_str, sizeof(index_str), data->index_type, index); + pr_err("index %s is out of range for type %s\n", index_str, + data->array_type->type_name); + ubsan_epilogue(&flags); } EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, - unsigned long lhs, unsigned long rhs) -{ - unsigned long flags; - struct type_descriptor *rhs_type = data->rhs_type; - struct type_descriptor *lhs_type = data->lhs_type; - char rhs_str[VALUE_LENGTH]; - char lhs_str[VALUE_LENGTH]; - - if (suppress_report(&data->location)) - return; - - ubsan_prologue(&data->location, &flags); - - val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); - val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); - - if (val_is_negative(rhs_type, rhs)) - pr_err("shift exponent %s is negative\n", rhs_str); - - else if (get_unsigned_val(rhs_type, rhs) >= - type_bit_width(lhs_type)) - pr_err("shift exponent %s is too large for %u-bit type %s\n", - rhs_str, - type_bit_width(lhs_type), - lhs_type->type_name); - else if (val_is_negative(lhs_type, lhs)) - pr_err("left shift of negative value %s\n", - lhs_str); - else - pr_err("left shift of %s by %s places cannot be" - " represented in type %s\n", - lhs_str, rhs_str, - lhs_type->type_name); - - ubsan_epilogue(&flags); + unsigned long lhs, unsigned long rhs) +{ + unsigned long flags; + struct type_descriptor *rhs_type = data->rhs_type; + struct type_descriptor *lhs_type = data->lhs_type; + char rhs_str[VALUE_LENGTH]; + char lhs_str[VALUE_LENGTH]; + + if ( suppress_report(&data->location) ) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); + val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); + + if ( val_is_negative(rhs_type, rhs) ) + pr_err("shift exponent %s is negative\n", rhs_str); + + else if ( get_unsigned_val(rhs_type, rhs) >= type_bit_width(lhs_type) ) + pr_err("shift exponent %s is too large for %u-bit type %s\n", rhs_str, + type_bit_width(lhs_type), lhs_type->type_name); + else if ( val_is_negative(lhs_type, lhs) ) + pr_err("left shift of negative value %s\n", lhs_str); + else + pr_err("left shift of %s by %s places cannot be" + " represented in type %s\n", + lhs_str, rhs_str, lhs_type->type_name); + + ubsan_epilogue(&flags); } EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); - void __noreturn __ubsan_handle_builtin_unreachable(struct unreachable_data *data) { - unsigned long flags; + unsigned long flags; - ubsan_prologue(&data->location, &flags); - pr_err("calling __builtin_unreachable()\n"); - ubsan_epilogue(&flags); - panic("can't return from __builtin_unreachable()\n"); + ubsan_prologue(&data->location, &flags); + pr_err("calling __builtin_unreachable()\n"); + ubsan_epilogue(&flags); + panic("can't return from __builtin_unreachable()\n"); } EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, - unsigned long val) + unsigned long val) { - unsigned long flags; - char val_str[VALUE_LENGTH]; + unsigned long flags; + char val_str[VALUE_LENGTH]; - if (suppress_report(&data->location)) - return; + if ( suppress_report(&data->location) ) + return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location, &flags); - val_to_string(val_str, sizeof(val_str), data->type, val); + val_to_string(val_str, sizeof(val_str), data->type, val); - pr_err("load of value %s is not a valid value for type %s\n", - val_str, data->type->type_name); + pr_err("load of value %s is not a valid value for type %s\n", val_str, + data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(&flags); } EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); void __ubsan_handle_pointer_overflow(struct pointer_overflow_data *data, - unsigned long base, unsigned long result) + unsigned long base, unsigned long result) { - unsigned long flags; + unsigned long flags; - if (suppress_report(&data->location)) - return; + if ( suppress_report(&data->location) ) + return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location, &flags); - pr_err("pointer operation %s %p to %p\n", - base > result ? "underflowed" : "overflowed", - _p(base), _p(result)); + pr_err("pointer operation %s %p to %p\n", + base > result ? "underflowed" : "overflowed", _p(base), _p(result)); - ubsan_epilogue(&flags); + ubsan_epilogue(&flags); } diff --git a/xen/common/unlz4.c b/xen/common/unlz4.c index 9dcaec7e84..2cd73d2194 100644 --- a/xen/common/unlz4.c +++ b/xen/common/unlz4.c @@ -23,145 +23,165 @@ #define ARCHIVE_MAGICNUMBER 0x184C2102 STATIC int INIT unlz4(unsigned char *input, unsigned int in_len, - int (*fill)(void *, unsigned int), - int (*flush)(void *, unsigned int), - unsigned char *output, - unsigned int *posp, - void (*error)(const char *x)) + int (*fill)(void *, unsigned int), + int (*flush)(void *, unsigned int), unsigned char *output, + unsigned int *posp, void (*error)(const char *x)) { - int ret = -1; - size_t chunksize = 0; - size_t uncomp_chunksize = LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE; - u8 *inp; - u8 *inp_start; - u8 *outp; - int size = in_len -= 4; + int ret = -1; + size_t chunksize = 0; + size_t uncomp_chunksize = LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE; + u8 *inp; + u8 *inp_start; + u8 *outp; + int size = in_len -= 4; #if defined(__XEN__) || defined(__MINIOS__) - size_t out_len = get_unaligned_le32(input + in_len); + size_t out_len = get_unaligned_le32(input + in_len); #endif - size_t dest_len; - - - if (output) { - outp = output; - } else if (!flush) { - error("NULL output pointer and no flush function provided"); - goto exit_0; - } else { - outp = large_malloc(uncomp_chunksize); - if (!outp) { - error("Could not allocate output buffer"); - goto exit_0; - } - } - - if (input && fill) { - error("Both input pointer and fill function provided,"); - goto exit_1; - } else if (input) { - inp = input; - } else if (!fill) { - error("NULL input pointer and missing fill function"); - goto exit_1; - } else { - inp = large_malloc(lz4_compressbound(uncomp_chunksize)); - if (!inp) { - error("Could not allocate input buffer"); - goto exit_1; - } - } - inp_start = inp; - - if (posp) - *posp = 0; - - if (fill) - fill(inp, 4); - - chunksize = get_unaligned_le32(inp); - if (chunksize == ARCHIVE_MAGICNUMBER) { - inp += 4; - size -= 4; - } else { - error("invalid header"); - goto exit_2; - } - - if (posp) - *posp += 4; - - for (;;) { - - if (fill) - fill(inp, 4); - - chunksize = get_unaligned_le32(inp); - if (chunksize == ARCHIVE_MAGICNUMBER) { - inp += 4; - size -= 4; - if (posp) - *posp += 4; - continue; - } - inp += 4; - size -= 4; - - if (posp) - *posp += 4; - - if (fill) { - if (chunksize > lz4_compressbound(uncomp_chunksize)) { - error("chunk length is longer than allocated"); - goto exit_2; - } - fill(inp, chunksize); - } + size_t dest_len; + + if ( output ) + { + outp = output; + } + else if ( !flush ) + { + error("NULL output pointer and no flush function provided"); + goto exit_0; + } + else + { + outp = large_malloc(uncomp_chunksize); + if ( !outp ) + { + error("Could not allocate output buffer"); + goto exit_0; + } + } + + if ( input && fill ) + { + error("Both input pointer and fill function provided,"); + goto exit_1; + } + else if ( input ) + { + inp = input; + } + else if ( !fill ) + { + error("NULL input pointer and missing fill function"); + goto exit_1; + } + else + { + inp = large_malloc(lz4_compressbound(uncomp_chunksize)); + if ( !inp ) + { + error("Could not allocate input buffer"); + goto exit_1; + } + } + inp_start = inp; + + if ( posp ) + *posp = 0; + + if ( fill ) + fill(inp, 4); + + chunksize = get_unaligned_le32(inp); + if ( chunksize == ARCHIVE_MAGICNUMBER ) + { + inp += 4; + size -= 4; + } + else + { + error("invalid header"); + goto exit_2; + } + + if ( posp ) + *posp += 4; + + for ( ;; ) + { + if ( fill ) + fill(inp, 4); + + chunksize = get_unaligned_le32(inp); + if ( chunksize == ARCHIVE_MAGICNUMBER ) + { + inp += 4; + size -= 4; + if ( posp ) + *posp += 4; + continue; + } + inp += 4; + size -= 4; + + if ( posp ) + *posp += 4; + + if ( fill ) + { + if ( chunksize > lz4_compressbound(uncomp_chunksize) ) + { + error("chunk length is longer than allocated"); + goto exit_2; + } + fill(inp, chunksize); + } #if defined(__XEN__) || defined(__MINIOS__) - if (out_len >= uncomp_chunksize) { - dest_len = uncomp_chunksize; - out_len -= dest_len; - } else - dest_len = out_len; - ret = lz4_decompress(inp, &chunksize, outp, dest_len); + if ( out_len >= uncomp_chunksize ) + { + dest_len = uncomp_chunksize; + out_len -= dest_len; + } + else + dest_len = out_len; + ret = lz4_decompress(inp, &chunksize, outp, dest_len); #else - dest_len = uncomp_chunksize; - ret = lz4_decompress_unknownoutputsize(inp, chunksize, outp, - &dest_len); + dest_len = uncomp_chunksize; + ret = lz4_decompress_unknownoutputsize(inp, chunksize, outp, &dest_len); #endif - if (ret < 0) { - error("Decoding failed"); - goto exit_2; - } - - ret = -1; - if (flush && flush(outp, dest_len) != dest_len) - goto exit_2; - if (output) - outp += dest_len; - if (posp) - *posp += chunksize; - - size -= chunksize; - - if (size == 0) - break; - else if (size < 0) { - error("data corrupted"); - goto exit_2; - } - - inp += chunksize; - if (fill) - inp = inp_start; - } - - ret = 0; + if ( ret < 0 ) + { + error("Decoding failed"); + goto exit_2; + } + + ret = -1; + if ( flush && flush(outp, dest_len) != dest_len ) + goto exit_2; + if ( output ) + outp += dest_len; + if ( posp ) + *posp += chunksize; + + size -= chunksize; + + if ( size == 0 ) + break; + else if ( size < 0 ) + { + error("data corrupted"); + goto exit_2; + } + + inp += chunksize; + if ( fill ) + inp = inp_start; + } + + ret = 0; exit_2: - if (!input) - large_free(inp_start); + if ( !input ) + large_free(inp_start); exit_1: - if (!output) - large_free(outp); + if ( !output ) + large_free(outp); exit_0: - return ret; + return ret; } diff --git a/xen/common/unlzma.c b/xen/common/unlzma.c index 9134277bba..61cefb1f13 100644 --- a/xen/common/unlzma.c +++ b/xen/common/unlzma.c @@ -32,17 +32,16 @@ static long long INIT read_int(unsigned char *ptr, int size) { - int i; - long long ret = 0; + int i; + long long ret = 0; - for (i = 0; i < size; i++) - ret = (ret << 8) | ptr[size-i-1]; - return ret; + for ( i = 0; i < size; i++ ) + ret = (ret << 8) | ptr[size - i - 1]; + return ret; } #define ENDIAN_CONVERT(x) \ - x = (typeof(x))read_int((unsigned char *)&x, sizeof(x)) - + x = (typeof(x))read_int((unsigned char *)&x, sizeof(x)) /* Small range coder implementation for lzma. * Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org > @@ -56,83 +55,82 @@ static long long INIT read_int(unsigned char *ptr, int size) #include #endif -#define LZMA_IOBUF_SIZE 0x10000 - -struct rc { - int (*fill)(void*, unsigned int); - uint8_t *ptr; - uint8_t *buffer; - uint8_t *buffer_end; - int buffer_size; - uint32_t code; - uint32_t range; - uint32_t bound; - void (*error)(const char *); -}; +#define LZMA_IOBUF_SIZE 0x10000 +struct rc +{ + int (*fill)(void *, unsigned int); + uint8_t *ptr; + uint8_t *buffer; + uint8_t *buffer_end; + int buffer_size; + uint32_t code; + uint32_t range; + uint32_t bound; + void (*error)(const char *); +}; #define RC_TOP_BITS 24 #define RC_MOVE_BITS 5 #define RC_MODEL_TOTAL_BITS 11 - static int INIT nofill(void *buffer, unsigned int len) { - return -1; + return -1; } /* Called twice: once at startup and once in rc_normalize() */ static void INIT rc_read(struct rc *rc) { - rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE); - if (rc->buffer_size <= 0) - rc->error("unexpected EOF"); - rc->ptr = rc->buffer; - rc->buffer_end = rc->buffer + rc->buffer_size; + rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE); + if ( rc->buffer_size <= 0 ) + rc->error("unexpected EOF"); + rc->ptr = rc->buffer; + rc->buffer_end = rc->buffer + rc->buffer_size; } /* Called once */ static inline void INIT rc_init(struct rc *rc, - int (*fill)(void*, unsigned int), - unsigned char *buffer, int buffer_size) + int (*fill)(void *, unsigned int), + unsigned char *buffer, int buffer_size) { - if (fill) - rc->fill = fill; - else - rc->fill = nofill; - rc->buffer = (uint8_t *)buffer; - rc->buffer_size = buffer_size; - rc->buffer_end = rc->buffer + rc->buffer_size; - rc->ptr = rc->buffer; - - rc->code = 0; - rc->range = 0xFFFFFFFF; + if ( fill ) + rc->fill = fill; + else + rc->fill = nofill; + rc->buffer = (uint8_t *)buffer; + rc->buffer_size = buffer_size; + rc->buffer_end = rc->buffer + rc->buffer_size; + rc->ptr = rc->buffer; + + rc->code = 0; + rc->range = 0xFFFFFFFF; } static inline void INIT rc_init_code(struct rc *rc) { - int i; - - for (i = 0; i < 5; i++) { - if (rc->ptr >= rc->buffer_end) - rc_read(rc); - rc->code = (rc->code << 8) | *rc->ptr++; - } + int i; + + for ( i = 0; i < 5; i++ ) + { + if ( rc->ptr >= rc->buffer_end ) + rc_read(rc); + rc->code = (rc->code << 8) | *rc->ptr++; + } } - /* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */ static void INIT rc_do_normalize(struct rc *rc) { - if (rc->ptr >= rc->buffer_end) - rc_read(rc); - rc->range <<= 8; - rc->code = (rc->code << 8) | *rc->ptr++; + if ( rc->ptr >= rc->buffer_end ) + rc_read(rc); + rc->range <<= 8; + rc->code = (rc->code << 8) | *rc->ptr++; } static inline void INIT rc_normalize(struct rc *rc) { - if (rc->range < (1 << RC_TOP_BITS)) - rc_do_normalize(rc); + if ( rc->range < (1 << RC_TOP_BITS) ) + rc_do_normalize(rc); } /* Called 9 times */ @@ -141,68 +139,71 @@ static inline void INIT rc_normalize(struct rc *rc) */ static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p) { - rc_normalize(rc); - rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS); - return rc->bound; + rc_normalize(rc); + rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS); + return rc->bound; } static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p) { - uint32_t t = rc_is_bit_0_helper(rc, p); - return rc->code < t; + uint32_t t = rc_is_bit_0_helper(rc, p); + return rc->code < t; } /* Called ~10 times, but very small, thus inlined */ static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p) { - rc->range = rc->bound; - *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS; + rc->range = rc->bound; + *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS; } static inline void rc_update_bit_1(struct rc *rc, uint16_t *p) { - rc->range -= rc->bound; - rc->code -= rc->bound; - *p -= *p >> RC_MOVE_BITS; + rc->range -= rc->bound; + rc->code -= rc->bound; + *p -= *p >> RC_MOVE_BITS; } /* Called 4 times in unlzma loop */ static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol) { - if (rc_is_bit_0(rc, p)) { - rc_update_bit_0(rc, p); - *symbol *= 2; - return 0; - } else { - rc_update_bit_1(rc, p); - *symbol = *symbol * 2 + 1; - return 1; - } + if ( rc_is_bit_0(rc, p) ) + { + rc_update_bit_0(rc, p); + *symbol *= 2; + return 0; + } + else + { + rc_update_bit_1(rc, p); + *symbol = *symbol * 2 + 1; + return 1; + } } /* Called once */ static inline int INIT rc_direct_bit(struct rc *rc) { - rc_normalize(rc); - rc->range >>= 1; - if (rc->code >= rc->range) { - rc->code -= rc->range; - return 1; - } - return 0; + rc_normalize(rc); + rc->range >>= 1; + if ( rc->code >= rc->range ) + { + rc->code -= rc->range; + return 1; + } + return 0; } /* Called twice */ -static inline void INIT -rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol) +static inline void INIT rc_bit_tree_decode(struct rc *rc, uint16_t *p, + int num_levels, int *symbol) { - int i = num_levels; + int i = num_levels; - *symbol = 1; - while (i--) - rc_get_bit(rc, p + *symbol, symbol); - *symbol -= 1 << num_levels; + *symbol = 1; + while ( i-- ) + rc_get_bit(rc, p + *symbol, symbol); + *symbol -= 1 << num_levels; } - /* * Small lzma deflate implementation. * Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org > @@ -211,13 +212,12 @@ rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol) * Copyright (C) 1999-2005 Igor Pavlov */ - -struct lzma_header { - uint8_t pos; - uint32_t dict_size; - uint64_t dst_size; -} __attribute__((packed)) ; - +struct lzma_header +{ + uint8_t pos; + uint32_t dict_size; + uint64_t dst_size; +} __attribute__((packed)); #define LZMA_BASE_SIZE 1846 #define LZMA_LIT_SIZE 768 @@ -231,10 +231,10 @@ struct lzma_header { #define LZMA_LEN_CHOICE 0 #define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1) #define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1) -#define LZMA_LEN_MID (LZMA_LEN_LOW \ - + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS))) -#define LZMA_LEN_HIGH (LZMA_LEN_MID \ - +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS))) +#define LZMA_LEN_MID \ + (LZMA_LEN_LOW + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS))) +#define LZMA_LEN_HIGH \ + (LZMA_LEN_MID + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS))) #define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS)) #define LZMA_NUM_STATES 12 @@ -257,405 +257,434 @@ struct lzma_header { #define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES) #define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES) #define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES) -#define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \ - + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)) -#define LZMA_SPEC_POS (LZMA_POS_SLOT \ - +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS)) -#define LZMA_ALIGN (LZMA_SPEC_POS \ - + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX) +#define LZMA_POS_SLOT \ + (LZMA_IS_REP_0_LONG + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)) +#define LZMA_SPEC_POS \ + (LZMA_POS_SLOT + (LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS)) +#define LZMA_ALIGN \ + (LZMA_SPEC_POS + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX) #define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS)) #define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS) #define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS) - -struct writer { - uint8_t *buffer; - uint8_t previous_byte; - size_t buffer_pos; - int bufsize; - size_t global_pos; - int(*flush)(void*, unsigned int); - struct lzma_header *header; +struct writer +{ + uint8_t *buffer; + uint8_t previous_byte; + size_t buffer_pos; + int bufsize; + size_t global_pos; + int (*flush)(void *, unsigned int); + struct lzma_header *header; }; -struct cstate { - int state; - uint32_t rep0, rep1, rep2, rep3; +struct cstate +{ + int state; + uint32_t rep0, rep1, rep2, rep3; }; static inline size_t INIT get_pos(struct writer *wr) { - return - wr->global_pos + wr->buffer_pos; + return wr->global_pos + wr->buffer_pos; } -static inline uint8_t INIT peek_old_byte(struct writer *wr, - uint32_t offs) +static inline uint8_t INIT peek_old_byte(struct writer *wr, uint32_t offs) { - if (!wr->flush) { - int32_t pos; - while (offs > wr->header->dict_size) - offs -= wr->header->dict_size; - pos = wr->buffer_pos - offs; - return wr->buffer[pos]; - } else { - uint32_t pos = wr->buffer_pos - offs; - while (pos >= wr->header->dict_size) - pos += wr->header->dict_size; - return wr->buffer[pos]; - } - + if ( !wr->flush ) + { + int32_t pos; + while ( offs > wr->header->dict_size ) + offs -= wr->header->dict_size; + pos = wr->buffer_pos - offs; + return wr->buffer[pos]; + } + else + { + uint32_t pos = wr->buffer_pos - offs; + while ( pos >= wr->header->dict_size ) + pos += wr->header->dict_size; + return wr->buffer[pos]; + } } static inline int INIT write_byte(struct writer *wr, uint8_t byte) { - wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte; - if (wr->flush && wr->buffer_pos == wr->header->dict_size) { - wr->buffer_pos = 0; - wr->global_pos += wr->header->dict_size; - if (wr->flush((char *)wr->buffer, wr->header->dict_size) - != wr->header->dict_size) - return -1; - } - return 0; + wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte; + if ( wr->flush && wr->buffer_pos == wr->header->dict_size ) + { + wr->buffer_pos = 0; + wr->global_pos += wr->header->dict_size; + if ( wr->flush((char *)wr->buffer, wr->header->dict_size) != + wr->header->dict_size ) + return -1; + } + return 0; } - static inline int INIT copy_byte(struct writer *wr, uint32_t offs) { - return write_byte(wr, peek_old_byte(wr, offs)); + return write_byte(wr, peek_old_byte(wr, offs)); } -static inline int INIT copy_bytes(struct writer *wr, - uint32_t rep0, int len) +static inline int INIT copy_bytes(struct writer *wr, uint32_t rep0, int len) { - do { - if (copy_byte(wr, rep0)) - return -1; - len--; - } while (len != 0 && wr->buffer_pos < wr->header->dst_size); + do { + if ( copy_byte(wr, rep0) ) + return -1; + len--; + } while ( len != 0 && wr->buffer_pos < wr->header->dst_size ); - return len; + return len; } static inline int INIT process_bit0(struct writer *wr, struct rc *rc, - struct cstate *cst, uint16_t *p, - int pos_state, uint16_t *prob, - int lc, uint32_t literal_pos_mask) { - int mi = 1; - rc_update_bit_0(rc, prob); - prob = (p + LZMA_LITERAL + - (LZMA_LIT_SIZE - * (((get_pos(wr) & literal_pos_mask) << lc) - + (wr->previous_byte >> (8 - lc)))) - ); - - if (cst->state >= LZMA_NUM_LIT_STATES) { - int match_byte = peek_old_byte(wr, cst->rep0); - do { - int bit; - uint16_t *prob_lit; - - match_byte <<= 1; - bit = match_byte & 0x100; - prob_lit = prob + 0x100 + bit + mi; - if (rc_get_bit(rc, prob_lit, &mi)) { - if (!bit) - break; - } else { - if (bit) - break; - } - } while (mi < 0x100); - } - while (mi < 0x100) { - uint16_t *prob_lit = prob + mi; - rc_get_bit(rc, prob_lit, &mi); - } - if (cst->state < 4) - cst->state = 0; - else if (cst->state < 10) - cst->state -= 3; - else - cst->state -= 6; - - return write_byte(wr, mi); + struct cstate *cst, uint16_t *p, + int pos_state, uint16_t *prob, int lc, + uint32_t literal_pos_mask) +{ + int mi = 1; + rc_update_bit_0(rc, prob); + prob = (p + LZMA_LITERAL + + (LZMA_LIT_SIZE * (((get_pos(wr) & literal_pos_mask) << lc) + + (wr->previous_byte >> (8 - lc))))); + + if ( cst->state >= LZMA_NUM_LIT_STATES ) + { + int match_byte = peek_old_byte(wr, cst->rep0); + do { + int bit; + uint16_t *prob_lit; + + match_byte <<= 1; + bit = match_byte & 0x100; + prob_lit = prob + 0x100 + bit + mi; + if ( rc_get_bit(rc, prob_lit, &mi) ) + { + if ( !bit ) + break; + } + else + { + if ( bit ) + break; + } + } while ( mi < 0x100 ); + } + while ( mi < 0x100 ) + { + uint16_t *prob_lit = prob + mi; + rc_get_bit(rc, prob_lit, &mi); + } + if ( cst->state < 4 ) + cst->state = 0; + else if ( cst->state < 10 ) + cst->state -= 3; + else + cst->state -= 6; + + return write_byte(wr, mi); } static inline int INIT process_bit1(struct writer *wr, struct rc *rc, - struct cstate *cst, uint16_t *p, - int pos_state, uint16_t *prob) { - int offset; - uint16_t *prob_len; - int num_bits; - int len; - - rc_update_bit_1(rc, prob); - prob = p + LZMA_IS_REP + cst->state; - if (rc_is_bit_0(rc, prob)) { - rc_update_bit_0(rc, prob); - cst->rep3 = cst->rep2; - cst->rep2 = cst->rep1; - cst->rep1 = cst->rep0; - cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3; - prob = p + LZMA_LEN_CODER; - } else { - rc_update_bit_1(rc, prob); - prob = p + LZMA_IS_REP_G0 + cst->state; - if (rc_is_bit_0(rc, prob)) { - rc_update_bit_0(rc, prob); - prob = (p + LZMA_IS_REP_0_LONG - + (cst->state << - LZMA_NUM_POS_BITS_MAX) + - pos_state); - if (rc_is_bit_0(rc, prob)) { - rc_update_bit_0(rc, prob); - - cst->state = cst->state < LZMA_NUM_LIT_STATES ? - 9 : 11; - return copy_byte(wr, cst->rep0); - } else { - rc_update_bit_1(rc, prob); - } - } else { - uint32_t distance; - - rc_update_bit_1(rc, prob); - prob = p + LZMA_IS_REP_G1 + cst->state; - if (rc_is_bit_0(rc, prob)) { - rc_update_bit_0(rc, prob); - distance = cst->rep1; - } else { - rc_update_bit_1(rc, prob); - prob = p + LZMA_IS_REP_G2 + cst->state; - if (rc_is_bit_0(rc, prob)) { - rc_update_bit_0(rc, prob); - distance = cst->rep2; - } else { - rc_update_bit_1(rc, prob); - distance = cst->rep3; - cst->rep3 = cst->rep2; - } - cst->rep2 = cst->rep1; - } - cst->rep1 = cst->rep0; - cst->rep0 = distance; - } - cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11; - prob = p + LZMA_REP_LEN_CODER; - } - - prob_len = prob + LZMA_LEN_CHOICE; - if (rc_is_bit_0(rc, prob_len)) { - rc_update_bit_0(rc, prob_len); - prob_len = (prob + LZMA_LEN_LOW - + (pos_state << - LZMA_LEN_NUM_LOW_BITS)); - offset = 0; - num_bits = LZMA_LEN_NUM_LOW_BITS; - } else { - rc_update_bit_1(rc, prob_len); - prob_len = prob + LZMA_LEN_CHOICE_2; - if (rc_is_bit_0(rc, prob_len)) { - rc_update_bit_0(rc, prob_len); - prob_len = (prob + LZMA_LEN_MID - + (pos_state << - LZMA_LEN_NUM_MID_BITS)); - offset = 1 << LZMA_LEN_NUM_LOW_BITS; - num_bits = LZMA_LEN_NUM_MID_BITS; - } else { - rc_update_bit_1(rc, prob_len); - prob_len = prob + LZMA_LEN_HIGH; - offset = ((1 << LZMA_LEN_NUM_LOW_BITS) - + (1 << LZMA_LEN_NUM_MID_BITS)); - num_bits = LZMA_LEN_NUM_HIGH_BITS; - } - } - - rc_bit_tree_decode(rc, prob_len, num_bits, &len); - len += offset; - - if (cst->state < 4) { - int pos_slot; - - cst->state += LZMA_NUM_LIT_STATES; - prob = - p + LZMA_POS_SLOT + - ((len < - LZMA_NUM_LEN_TO_POS_STATES ? len : - LZMA_NUM_LEN_TO_POS_STATES - 1) - << LZMA_NUM_POS_SLOT_BITS); - rc_bit_tree_decode(rc, prob, - LZMA_NUM_POS_SLOT_BITS, - &pos_slot); - if (pos_slot >= LZMA_START_POS_MODEL_INDEX) { - int i, mi; - num_bits = (pos_slot >> 1) - 1; - cst->rep0 = 2 | (pos_slot & 1); - if (pos_slot < LZMA_END_POS_MODEL_INDEX) { - cst->rep0 <<= num_bits; - prob = p + LZMA_SPEC_POS + - cst->rep0 - pos_slot - 1; - } else { - num_bits -= LZMA_NUM_ALIGN_BITS; - while (num_bits--) - cst->rep0 = (cst->rep0 << 1) | - rc_direct_bit(rc); - prob = p + LZMA_ALIGN; - cst->rep0 <<= LZMA_NUM_ALIGN_BITS; - num_bits = LZMA_NUM_ALIGN_BITS; - } - i = 1; - mi = 1; - while (num_bits--) { - if (rc_get_bit(rc, prob + mi, &mi)) - cst->rep0 |= i; - i <<= 1; - } - } else - cst->rep0 = pos_slot; - if (++(cst->rep0) == 0) - return 0; - if (cst->rep0 > wr->header->dict_size - || cst->rep0 > get_pos(wr)) - return -1; - } - - len += LZMA_MATCH_MIN_LEN; - - return copy_bytes(wr, cst->rep0, len); + struct cstate *cst, uint16_t *p, + int pos_state, uint16_t *prob) +{ + int offset; + uint16_t *prob_len; + int num_bits; + int len; + + rc_update_bit_1(rc, prob); + prob = p + LZMA_IS_REP + cst->state; + if ( rc_is_bit_0(rc, prob) ) + { + rc_update_bit_0(rc, prob); + cst->rep3 = cst->rep2; + cst->rep2 = cst->rep1; + cst->rep1 = cst->rep0; + cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3; + prob = p + LZMA_LEN_CODER; + } + else + { + rc_update_bit_1(rc, prob); + prob = p + LZMA_IS_REP_G0 + cst->state; + if ( rc_is_bit_0(rc, prob) ) + { + rc_update_bit_0(rc, prob); + prob = (p + LZMA_IS_REP_0_LONG + + (cst->state << LZMA_NUM_POS_BITS_MAX) + pos_state); + if ( rc_is_bit_0(rc, prob) ) + { + rc_update_bit_0(rc, prob); + + cst->state = cst->state < LZMA_NUM_LIT_STATES ? 9 : 11; + return copy_byte(wr, cst->rep0); + } + else + { + rc_update_bit_1(rc, prob); + } + } + else + { + uint32_t distance; + + rc_update_bit_1(rc, prob); + prob = p + LZMA_IS_REP_G1 + cst->state; + if ( rc_is_bit_0(rc, prob) ) + { + rc_update_bit_0(rc, prob); + distance = cst->rep1; + } + else + { + rc_update_bit_1(rc, prob); + prob = p + LZMA_IS_REP_G2 + cst->state; + if ( rc_is_bit_0(rc, prob) ) + { + rc_update_bit_0(rc, prob); + distance = cst->rep2; + } + else + { + rc_update_bit_1(rc, prob); + distance = cst->rep3; + cst->rep3 = cst->rep2; + } + cst->rep2 = cst->rep1; + } + cst->rep1 = cst->rep0; + cst->rep0 = distance; + } + cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11; + prob = p + LZMA_REP_LEN_CODER; + } + + prob_len = prob + LZMA_LEN_CHOICE; + if ( rc_is_bit_0(rc, prob_len) ) + { + rc_update_bit_0(rc, prob_len); + prob_len = (prob + LZMA_LEN_LOW + (pos_state << LZMA_LEN_NUM_LOW_BITS)); + offset = 0; + num_bits = LZMA_LEN_NUM_LOW_BITS; + } + else + { + rc_update_bit_1(rc, prob_len); + prob_len = prob + LZMA_LEN_CHOICE_2; + if ( rc_is_bit_0(rc, prob_len) ) + { + rc_update_bit_0(rc, prob_len); + prob_len = + (prob + LZMA_LEN_MID + (pos_state << LZMA_LEN_NUM_MID_BITS)); + offset = 1 << LZMA_LEN_NUM_LOW_BITS; + num_bits = LZMA_LEN_NUM_MID_BITS; + } + else + { + rc_update_bit_1(rc, prob_len); + prob_len = prob + LZMA_LEN_HIGH; + offset = + ((1 << LZMA_LEN_NUM_LOW_BITS) + (1 << LZMA_LEN_NUM_MID_BITS)); + num_bits = LZMA_LEN_NUM_HIGH_BITS; + } + } + + rc_bit_tree_decode(rc, prob_len, num_bits, &len); + len += offset; + + if ( cst->state < 4 ) + { + int pos_slot; + + cst->state += LZMA_NUM_LIT_STATES; + prob = + p + LZMA_POS_SLOT + + ((len < LZMA_NUM_LEN_TO_POS_STATES ? len + : LZMA_NUM_LEN_TO_POS_STATES - 1) + << LZMA_NUM_POS_SLOT_BITS); + rc_bit_tree_decode(rc, prob, LZMA_NUM_POS_SLOT_BITS, &pos_slot); + if ( pos_slot >= LZMA_START_POS_MODEL_INDEX ) + { + int i, mi; + num_bits = (pos_slot >> 1) - 1; + cst->rep0 = 2 | (pos_slot & 1); + if ( pos_slot < LZMA_END_POS_MODEL_INDEX ) + { + cst->rep0 <<= num_bits; + prob = p + LZMA_SPEC_POS + cst->rep0 - pos_slot - 1; + } + else + { + num_bits -= LZMA_NUM_ALIGN_BITS; + while ( num_bits-- ) + cst->rep0 = (cst->rep0 << 1) | rc_direct_bit(rc); + prob = p + LZMA_ALIGN; + cst->rep0 <<= LZMA_NUM_ALIGN_BITS; + num_bits = LZMA_NUM_ALIGN_BITS; + } + i = 1; + mi = 1; + while ( num_bits-- ) + { + if ( rc_get_bit(rc, prob + mi, &mi) ) + cst->rep0 |= i; + i <<= 1; + } + } + else + cst->rep0 = pos_slot; + if ( ++(cst->rep0) == 0 ) + return 0; + if ( cst->rep0 > wr->header->dict_size || cst->rep0 > get_pos(wr) ) + return -1; + } + + len += LZMA_MATCH_MIN_LEN; + + return copy_bytes(wr, cst->rep0, len); } - - STATIC int INIT unlzma(unsigned char *buf, unsigned int in_len, - int(*fill)(void*, unsigned int), - int(*flush)(void*, unsigned int), - unsigned char *output, - unsigned int *posp, - void(*error)(const char *x) - ) + int (*fill)(void *, unsigned int), + int (*flush)(void *, unsigned int), + unsigned char *output, unsigned int *posp, + void (*error)(const char *x)) { - struct lzma_header header; - int lc, pb, lp; - uint32_t pos_state_mask; - uint32_t literal_pos_mask; - uint16_t *p; - int num_probs; - struct rc rc; - int i, mi; - struct writer wr; - struct cstate cst; - unsigned char *inbuf; - int ret = -1; - - rc.error = error; - - if (buf) - inbuf = buf; - else - inbuf = malloc(LZMA_IOBUF_SIZE); - if (!inbuf) { - error("Could not allocate input buffer"); - goto exit_0; - } - - cst.state = 0; - cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1; - - wr.header = &header; - wr.flush = flush; - wr.global_pos = 0; - wr.previous_byte = 0; - wr.buffer_pos = 0; - - rc_init(&rc, fill, inbuf, in_len); - - for (i = 0; i < sizeof(header); i++) { - if (rc.ptr >= rc.buffer_end) - rc_read(&rc); - ((unsigned char *)&header)[i] = *rc.ptr++; - } - - if (header.pos >= (9 * 5 * 5)) { - error("bad header"); - goto exit_1; - } - - mi = 0; - lc = header.pos; - while (lc >= 9) { - mi++; - lc -= 9; - } - pb = 0; - lp = mi; - while (lp >= 5) { - pb++; - lp -= 5; - } - pos_state_mask = (1 << pb) - 1; - literal_pos_mask = (1 << lp) - 1; - - ENDIAN_CONVERT(header.dict_size); - ENDIAN_CONVERT(header.dst_size); - - if (header.dict_size == 0) - header.dict_size = 1; - - if (output) - wr.buffer = output; - else { - wr.bufsize = MIN(header.dst_size, header.dict_size); - wr.buffer = large_malloc(wr.bufsize); - } - if (wr.buffer == NULL) - goto exit_1; - - num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)); - p = (uint16_t *) large_malloc(num_probs * sizeof(*p)); - if (p == 0) - goto exit_2; - num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp)); - for (i = 0; i < num_probs; i++) - p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1; - - rc_init_code(&rc); - - while (get_pos(&wr) < header.dst_size) { - int pos_state = get_pos(&wr) & pos_state_mask; - uint16_t *prob = p + LZMA_IS_MATCH + - (cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state; - if (rc_is_bit_0(&rc, prob)) { - if (process_bit0(&wr, &rc, &cst, p, pos_state, prob, - lc, literal_pos_mask)) { - error("LZMA data is corrupt"); - goto exit_3; - } - } else { - if (process_bit1(&wr, &rc, &cst, p, pos_state, prob)) { - error("LZMA data is corrupt"); - goto exit_3; - } - if (cst.rep0 == 0) - break; - } - if (rc.buffer_size <= 0) - goto exit_3; - } - - if (posp) - *posp = rc.ptr-rc.buffer; - if (!wr.flush || wr.flush(wr.buffer, wr.buffer_pos) == wr.buffer_pos) - ret = 0; + struct lzma_header header; + int lc, pb, lp; + uint32_t pos_state_mask; + uint32_t literal_pos_mask; + uint16_t *p; + int num_probs; + struct rc rc; + int i, mi; + struct writer wr; + struct cstate cst; + unsigned char *inbuf; + int ret = -1; + + rc.error = error; + + if ( buf ) + inbuf = buf; + else + inbuf = malloc(LZMA_IOBUF_SIZE); + if ( !inbuf ) + { + error("Could not allocate input buffer"); + goto exit_0; + } + + cst.state = 0; + cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1; + + wr.header = &header; + wr.flush = flush; + wr.global_pos = 0; + wr.previous_byte = 0; + wr.buffer_pos = 0; + + rc_init(&rc, fill, inbuf, in_len); + + for ( i = 0; i < sizeof(header); i++ ) + { + if ( rc.ptr >= rc.buffer_end ) + rc_read(&rc); + ((unsigned char *)&header)[i] = *rc.ptr++; + } + + if ( header.pos >= (9 * 5 * 5) ) + { + error("bad header"); + goto exit_1; + } + + mi = 0; + lc = header.pos; + while ( lc >= 9 ) + { + mi++; + lc -= 9; + } + pb = 0; + lp = mi; + while ( lp >= 5 ) + { + pb++; + lp -= 5; + } + pos_state_mask = (1 << pb) - 1; + literal_pos_mask = (1 << lp) - 1; + + ENDIAN_CONVERT(header.dict_size); + ENDIAN_CONVERT(header.dst_size); + + if ( header.dict_size == 0 ) + header.dict_size = 1; + + if ( output ) + wr.buffer = output; + else + { + wr.bufsize = MIN(header.dst_size, header.dict_size); + wr.buffer = large_malloc(wr.bufsize); + } + if ( wr.buffer == NULL ) + goto exit_1; + + num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)); + p = (uint16_t *)large_malloc(num_probs * sizeof(*p)); + if ( p == 0 ) + goto exit_2; + num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp)); + for ( i = 0; i < num_probs; i++ ) + p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1; + + rc_init_code(&rc); + + while ( get_pos(&wr) < header.dst_size ) + { + int pos_state = get_pos(&wr) & pos_state_mask; + uint16_t *prob = p + LZMA_IS_MATCH + + (cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state; + if ( rc_is_bit_0(&rc, prob) ) + { + if ( process_bit0(&wr, &rc, &cst, p, pos_state, prob, lc, + literal_pos_mask) ) + { + error("LZMA data is corrupt"); + goto exit_3; + } + } + else + { + if ( process_bit1(&wr, &rc, &cst, p, pos_state, prob) ) + { + error("LZMA data is corrupt"); + goto exit_3; + } + if ( cst.rep0 == 0 ) + break; + } + if ( rc.buffer_size <= 0 ) + goto exit_3; + } + + if ( posp ) + *posp = rc.ptr - rc.buffer; + if ( !wr.flush || wr.flush(wr.buffer, wr.buffer_pos) == wr.buffer_pos ) + ret = 0; exit_3: - large_free(p); + large_free(p); exit_2: - if (!output) - large_free(wr.buffer); + if ( !output ) + large_free(wr.buffer); exit_1: - if (!buf) - free(inbuf); + if ( !buf ) + free(inbuf); exit_0: - return ret; + return ret; } diff --git a/xen/common/unlzo.c b/xen/common/unlzo.c index 5ae6cf911e..460fa05ffd 100644 --- a/xen/common/unlzo.c +++ b/xen/common/unlzo.c @@ -39,227 +39,254 @@ #if 1 /* ndef CONFIG_??? */ static inline u16 INIT get_unaligned_be16(void *p) { - return be16_to_cpup(p); + return be16_to_cpup(p); } static inline u32 INIT get_unaligned_be32(void *p) { - return be32_to_cpup(p); + return be32_to_cpup(p); } #else #include static inline u16 INIT get_unaligned_be16(void *p) { - return be16_to_cpu(__get_unaligned(p, 2)); + return be16_to_cpu(__get_unaligned(p, 2)); } static inline u32 INIT get_unaligned_be32(void *p) { - return be32_to_cpu(__get_unaligned(p, 4)); + return be32_to_cpu(__get_unaligned(p, 4)); } #endif -static const unsigned char lzop_magic[] = { - 0x89, 0x4c, 0x5a, 0x4f, 0x00, 0x0d, 0x0a, 0x1a, 0x0a }; +static const unsigned char lzop_magic[] = {0x89, 0x4c, 0x5a, 0x4f, 0x00, + 0x0d, 0x0a, 0x1a, 0x0a}; -#define LZO_BLOCK_SIZE (256*1024l) -#define HEADER_HAS_FILTER 0x00000800L -#define HEADER_SIZE_MIN (9 + 7 + 4 + 8 + 1 + 4) -#define HEADER_SIZE_MAX (9 + 7 + 1 + 8 + 8 + 4 + 1 + 255 + 4) +#define LZO_BLOCK_SIZE (256 * 1024l) +#define HEADER_HAS_FILTER 0x00000800L +#define HEADER_SIZE_MIN (9 + 7 + 4 + 8 + 1 + 4) +#define HEADER_SIZE_MAX (9 + 7 + 1 + 8 + 8 + 4 + 1 + 255 + 4) static int INIT parse_header(u8 *input, int *skip, int in_len) { - int l; - u8 *parse = input; - u8 *end = input + in_len; - u8 level = 0; - u16 version; - - /* - * Check that there's enough input to possibly have a valid header. - * Then it is possible to parse several fields until the minimum - * size may have been used. - */ - if (in_len < HEADER_SIZE_MIN) - return 0; - - /* read magic: 9 first bits */ - for (l = 0; l < 9; l++) { - if (*parse++ != lzop_magic[l]) - return 0; - } - /* get version (2bytes), skip library version (2), - * 'need to be extracted' version (2) and - * method (1) */ - version = get_unaligned_be16(parse); - parse += 7; - if (version >= 0x0940) - level = *parse++; - if (get_unaligned_be32(parse) & HEADER_HAS_FILTER) - parse += 8; /* flags + filter info */ - else - parse += 4; /* flags */ - - /* - * At least mode, mtime_low, filename length, and checksum must - * be left to be parsed. If also mtime_high is present, it's OK - * because the next input buffer check is after reading the - * filename length. - */ - if (end - parse < 8 + 1 + 4) - return 0; - - /* skip mode and mtime_low */ - parse += 8; - if (version >= 0x0940) - parse += 4; /* skip mtime_high */ - - l = *parse++; - /* don't care about the file name, and skip checksum */ - if (end - parse < l + 4) - return 0; - parse += l + 4; - - *skip = parse - input; - return 1; + int l; + u8 *parse = input; + u8 *end = input + in_len; + u8 level = 0; + u16 version; + + /* + * Check that there's enough input to possibly have a valid header. + * Then it is possible to parse several fields until the minimum + * size may have been used. + */ + if ( in_len < HEADER_SIZE_MIN ) + return 0; + + /* read magic: 9 first bits */ + for ( l = 0; l < 9; l++ ) + { + if ( *parse++ != lzop_magic[l] ) + return 0; + } + /* get version (2bytes), skip library version (2), + * 'need to be extracted' version (2) and + * method (1) */ + version = get_unaligned_be16(parse); + parse += 7; + if ( version >= 0x0940 ) + level = *parse++; + if ( get_unaligned_be32(parse) & HEADER_HAS_FILTER ) + parse += 8; /* flags + filter info */ + else + parse += 4; /* flags */ + + /* + * At least mode, mtime_low, filename length, and checksum must + * be left to be parsed. If also mtime_high is present, it's OK + * because the next input buffer check is after reading the + * filename length. + */ + if ( end - parse < 8 + 1 + 4 ) + return 0; + + /* skip mode and mtime_low */ + parse += 8; + if ( version >= 0x0940 ) + parse += 4; /* skip mtime_high */ + + l = *parse++; + /* don't care about the file name, and skip checksum */ + if ( end - parse < l + 4 ) + return 0; + parse += l + 4; + + *skip = parse - input; + return 1; } STATIC int INIT unlzo(u8 *input, unsigned int in_len, - int (*fill) (void *, unsigned int), - int (*flush) (void *, unsigned int), - u8 *output, unsigned int *posp, - void (*error) (const char *x)) + int (*fill)(void *, unsigned int), + int (*flush)(void *, unsigned int), u8 *output, + unsigned int *posp, void (*error)(const char *x)) { - u8 r = 0; - int skip = 0; - u32 src_len, dst_len; - size_t tmp; - u8 *in_buf, *in_buf_save, *out_buf; - int ret = -1; - - if (output) { - out_buf = output; - } else if (!flush) { - error("NULL output pointer and no flush function provided"); - goto exit; - } else { - out_buf = malloc(LZO_BLOCK_SIZE); - if (!out_buf) { - error("Could not allocate output buffer"); - goto exit; - } - } - - if (input && fill) { - error("Both input pointer and fill function provided, don't know what to do"); - goto exit_1; - } else if (input) { - in_buf = input; - } else if (!fill || !posp) { - error("NULL input pointer and missing position pointer or fill function"); - goto exit_1; - } else { - in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE)); - if (!in_buf) { - error("Could not allocate input buffer"); - goto exit_1; - } - } - in_buf_save = in_buf; - - if (posp) - *posp = 0; - - if (fill) - fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE)); - - if (!parse_header(input, &skip, in_len)) { - error("invalid header"); - goto exit_2; - } - in_buf += skip; - in_len -= skip; - - if (posp) - *posp = skip; - - for (;;) { - /* read uncompressed block size */ - if (in_len < 4) { - error("file corrupted"); - goto exit_2; - } - dst_len = get_unaligned_be32(in_buf); - in_buf += 4; - in_len -= 4; - - /* exit if last block */ - if (dst_len == 0) { - if (posp) - *posp += 4; - break; - } - - if (dst_len > LZO_BLOCK_SIZE) { - error("dest len longer than block size"); - goto exit_2; - } - - /* read compressed block size, and skip block checksum info */ - if (in_len < 8) { - error("file corrupted"); - goto exit_2; - } - src_len = get_unaligned_be32(in_buf); - in_buf += 8; - in_len -= 8; - - if (src_len <= 0 || src_len > dst_len || src_len > in_len) { - error("file corrupted"); - goto exit_2; - } - - /* decompress */ - tmp = dst_len; - - /* When the input data is not compressed at all, - * lzo1x_decompress_safe will fail, so call memcpy() - * instead */ - if (unlikely(dst_len == src_len)) - memcpy(out_buf, in_buf, src_len); - else { - r = lzo1x_decompress_safe(in_buf, src_len, - out_buf, &tmp); - - if (r != LZO_E_OK || dst_len != tmp) { - error("Compressed data violation"); - goto exit_2; - } - } - - if (flush && flush(out_buf, dst_len) != dst_len) - goto exit_2; - if (output) - out_buf += dst_len; - if (posp) - *posp += src_len + 12; - if (fill) { - in_buf = in_buf_save; - fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE)); - } else { - in_buf += src_len; - in_len -= src_len; - } - } - - ret = 0; + u8 r = 0; + int skip = 0; + u32 src_len, dst_len; + size_t tmp; + u8 *in_buf, *in_buf_save, *out_buf; + int ret = -1; + + if ( output ) + { + out_buf = output; + } + else if ( !flush ) + { + error("NULL output pointer and no flush function provided"); + goto exit; + } + else + { + out_buf = malloc(LZO_BLOCK_SIZE); + if ( !out_buf ) + { + error("Could not allocate output buffer"); + goto exit; + } + } + + if ( input && fill ) + { + error("Both input pointer and fill function provided, don't know what " + "to do"); + goto exit_1; + } + else if ( input ) + { + in_buf = input; + } + else if ( !fill || !posp ) + { + error( + "NULL input pointer and missing position pointer or fill function"); + goto exit_1; + } + else + { + in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE)); + if ( !in_buf ) + { + error("Could not allocate input buffer"); + goto exit_1; + } + } + in_buf_save = in_buf; + + if ( posp ) + *posp = 0; + + if ( fill ) + fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE)); + + if ( !parse_header(input, &skip, in_len) ) + { + error("invalid header"); + goto exit_2; + } + in_buf += skip; + in_len -= skip; + + if ( posp ) + *posp = skip; + + for ( ;; ) + { + /* read uncompressed block size */ + if ( in_len < 4 ) + { + error("file corrupted"); + goto exit_2; + } + dst_len = get_unaligned_be32(in_buf); + in_buf += 4; + in_len -= 4; + + /* exit if last block */ + if ( dst_len == 0 ) + { + if ( posp ) + *posp += 4; + break; + } + + if ( dst_len > LZO_BLOCK_SIZE ) + { + error("dest len longer than block size"); + goto exit_2; + } + + /* read compressed block size, and skip block checksum info */ + if ( in_len < 8 ) + { + error("file corrupted"); + goto exit_2; + } + src_len = get_unaligned_be32(in_buf); + in_buf += 8; + in_len -= 8; + + if ( src_len <= 0 || src_len > dst_len || src_len > in_len ) + { + error("file corrupted"); + goto exit_2; + } + + /* decompress */ + tmp = dst_len; + + /* When the input data is not compressed at all, + * lzo1x_decompress_safe will fail, so call memcpy() + * instead */ + if ( unlikely(dst_len == src_len) ) + memcpy(out_buf, in_buf, src_len); + else + { + r = lzo1x_decompress_safe(in_buf, src_len, out_buf, &tmp); + + if ( r != LZO_E_OK || dst_len != tmp ) + { + error("Compressed data violation"); + goto exit_2; + } + } + + if ( flush && flush(out_buf, dst_len) != dst_len ) + goto exit_2; + if ( output ) + out_buf += dst_len; + if ( posp ) + *posp += src_len + 12; + if ( fill ) + { + in_buf = in_buf_save; + fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE)); + } + else + { + in_buf += src_len; + in_len -= src_len; + } + } + + ret = 0; exit_2: - if (!input) - free(in_buf_save); + if ( !input ) + free(in_buf_save); exit_1: - if (!output) - free(out_buf); + if ( !output ) + free(out_buf); exit: - return ret; + return ret; } diff --git a/xen/common/unxz.c b/xen/common/unxz.c index cf25c9fc8e..0bd44a0cee 100644 --- a/xen/common/unxz.c +++ b/xen/common/unxz.c @@ -102,19 +102,19 @@ * architecture or none if no BCJ filter is available for the architecture. */ #ifdef CONFIG_X86 -# define XZ_DEC_X86 +#define XZ_DEC_X86 #endif #ifdef CONFIG_PPC -# define XZ_DEC_POWERPC +#define XZ_DEC_POWERPC #endif #ifdef CONFIG_ARM -# define XZ_DEC_ARM +#define XZ_DEC_ARM #endif #ifdef CONFIG_IA64 -# define XZ_DEC_IA64 +#define XZ_DEC_IA64 #endif #ifdef CONFIG_SPARC -# define XZ_DEC_SPARC +#define XZ_DEC_SPARC #endif /* @@ -158,143 +158,154 @@ * fill() and flush() won't be used. */ STATIC int INIT unxz(unsigned char *in, unsigned int in_size, - int (*fill)(void *dest, unsigned int size), - int (*flush)(void *src, unsigned int size), - unsigned char *out, unsigned int *in_used, - void (*error)(const char *x)) + int (*fill)(void *dest, unsigned int size), + int (*flush)(void *src, unsigned int size), + unsigned char *out, unsigned int *in_used, + void (*error)(const char *x)) { - struct xz_buf b; - struct xz_dec *s; - enum xz_ret ret; - bool_t must_free_in = false; - - xz_crc32_init(); - - if (in_used != NULL) - *in_used = 0; - - if (fill == NULL && flush == NULL) - s = xz_dec_init(XZ_SINGLE, 0); - else - s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1); - - if (s == NULL) - goto error_alloc_state; - - if (flush == NULL) { - b.out = out; - b.out_size = (size_t)-1; - } else { - b.out_size = XZ_IOBUF_SIZE; - b.out = malloc(XZ_IOBUF_SIZE); - if (b.out == NULL) - goto error_alloc_out; - } - - if (in == NULL) { - must_free_in = true; - in = malloc(XZ_IOBUF_SIZE); - if (in == NULL) - goto error_alloc_in; - } - - b.in = in; - b.in_pos = 0; - b.in_size = in_size; - b.out_pos = 0; - - if (fill == NULL && flush == NULL) { - ret = xz_dec_run(s, &b); - } else { - do { - if (b.in_pos == b.in_size && fill != NULL) { - if (in_used != NULL) - *in_used += b.in_pos; - - b.in_pos = 0; - - in_size = fill(in, XZ_IOBUF_SIZE); - if ((int) in_size < 0) { - /* - * This isn't an optimal error code - * but it probably isn't worth making - * a new one either. - */ - ret = XZ_BUF_ERROR; - break; - } - - b.in_size = in_size; - } - - ret = xz_dec_run(s, &b); - - if (flush != NULL && (b.out_pos == b.out_size - || (ret != XZ_OK && b.out_pos > 0))) { - /* - * Setting ret here may hide an error - * returned by xz_dec_run(), but probably - * it's not too bad. - */ - if (flush(b.out, b.out_pos) != (int)b.out_pos) - ret = XZ_BUF_ERROR; - - b.out_pos = 0; - } - } while (ret == XZ_OK); - - if (must_free_in) - free(in); - - if (flush != NULL) - free(b.out); - } - - if (in_used != NULL) - *in_used += b.in_pos; - - xz_dec_end(s); - - switch (ret) { - case XZ_STREAM_END: - return 0; - - case XZ_MEM_ERROR: - /* This can occur only in multi-call mode. */ - error("XZ decompressor ran out of memory"); - break; - - case XZ_FORMAT_ERROR: - error("Input is not in the XZ format (wrong magic bytes)"); - break; - - case XZ_OPTIONS_ERROR: - error("Input was encoded with settings that are not " - "supported by this XZ decoder"); - break; - - case XZ_DATA_ERROR: - case XZ_BUF_ERROR: - error("XZ-compressed data is corrupt"); - break; - - default: - error("Bug in the XZ decompressor"); - break; - } - - return -1; + struct xz_buf b; + struct xz_dec *s; + enum xz_ret ret; + bool_t must_free_in = false; + + xz_crc32_init(); + + if ( in_used != NULL ) + *in_used = 0; + + if ( fill == NULL && flush == NULL ) + s = xz_dec_init(XZ_SINGLE, 0); + else + s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1); + + if ( s == NULL ) + goto error_alloc_state; + + if ( flush == NULL ) + { + b.out = out; + b.out_size = (size_t)-1; + } + else + { + b.out_size = XZ_IOBUF_SIZE; + b.out = malloc(XZ_IOBUF_SIZE); + if ( b.out == NULL ) + goto error_alloc_out; + } + + if ( in == NULL ) + { + must_free_in = true; + in = malloc(XZ_IOBUF_SIZE); + if ( in == NULL ) + goto error_alloc_in; + } + + b.in = in; + b.in_pos = 0; + b.in_size = in_size; + b.out_pos = 0; + + if ( fill == NULL && flush == NULL ) + { + ret = xz_dec_run(s, &b); + } + else + { + do { + if ( b.in_pos == b.in_size && fill != NULL ) + { + if ( in_used != NULL ) + *in_used += b.in_pos; + + b.in_pos = 0; + + in_size = fill(in, XZ_IOBUF_SIZE); + if ( (int)in_size < 0 ) + { + /* + * This isn't an optimal error code + * but it probably isn't worth making + * a new one either. + */ + ret = XZ_BUF_ERROR; + break; + } + + b.in_size = in_size; + } + + ret = xz_dec_run(s, &b); + + if ( flush != NULL && + (b.out_pos == b.out_size || (ret != XZ_OK && b.out_pos > 0)) ) + { + /* + * Setting ret here may hide an error + * returned by xz_dec_run(), but probably + * it's not too bad. + */ + if ( flush(b.out, b.out_pos) != (int)b.out_pos ) + ret = XZ_BUF_ERROR; + + b.out_pos = 0; + } + } while ( ret == XZ_OK ); + + if ( must_free_in ) + free(in); + + if ( flush != NULL ) + free(b.out); + } + + if ( in_used != NULL ) + *in_used += b.in_pos; + + xz_dec_end(s); + + switch (ret) + { + case XZ_STREAM_END: + return 0; + + case XZ_MEM_ERROR: + /* This can occur only in multi-call mode. */ + error("XZ decompressor ran out of memory"); + break; + + case XZ_FORMAT_ERROR: + error("Input is not in the XZ format (wrong magic bytes)"); + break; + + case XZ_OPTIONS_ERROR: + error("Input was encoded with settings that are not " + "supported by this XZ decoder"); + break; + + case XZ_DATA_ERROR: + case XZ_BUF_ERROR: + error("XZ-compressed data is corrupt"); + break; + + default: + error("Bug in the XZ decompressor"); + break; + } + + return -1; error_alloc_in: - if (flush != NULL) - free(b.out); + if ( flush != NULL ) + free(b.out); error_alloc_out: - xz_dec_end(s); + xz_dec_end(s); error_alloc_state: - error("XZ decompressor ran out of memory"); - return -1; + error("XZ decompressor ran out of memory"); + return -1; } /* diff --git a/xen/common/version.c b/xen/common/version.c index 223cb52e69..d2490fc377 100644 --- a/xen/common/version.c +++ b/xen/common/version.c @@ -88,8 +88,8 @@ int xen_build_id(const void **p, unsigned int *len) /* Defined in linker script. */ extern const Elf_Note __note_gnu_build_id_start[], __note_gnu_build_id_end[]; -int xen_build_id_check(const Elf_Note *n, unsigned int n_sz, - const void **p, unsigned int *len) +int xen_build_id_check(const Elf_Note *n, unsigned int n_sz, const void **p, + unsigned int *len) { /* Check if we really have a build-id. */ ASSERT(n_sz > sizeof(*n)); @@ -100,7 +100,7 @@ int xen_build_id_check(const Elf_Note *n, unsigned int n_sz, if ( n->namesz + n->descsz < n->namesz ) return -EINVAL; - if ( n->namesz < 4 /* GNU\0 */) + if ( n->namesz < 4 /* GNU\0 */ ) return -EINVAL; if ( n->namesz + n->descsz > n_sz - sizeof(*n) ) diff --git a/xen/common/virtual_region.c b/xen/common/virtual_region.c index aa23918bce..6cc9689c55 100644 --- a/xen/common/virtual_region.c +++ b/xen/common/virtual_region.c @@ -42,7 +42,7 @@ const struct virtual_region *find_text_region(unsigned long addr) const struct virtual_region *region; rcu_read_lock(&rcu_virtual_region_lock); - list_for_each_entry_rcu( region, &virtual_region_list, list ) + list_for_each_entry_rcu(region, &virtual_region_list, list) { if ( (void *)addr >= region->start && (void *)addr < region->end ) { @@ -111,8 +111,7 @@ void __init setup_virtual_regions(const struct exception_table_entry *start, #ifdef CONFIG_X86 __stop_bug_frames_3, #endif - NULL - }; + NULL}; for ( i = 1; bug_frames[i]; i++ ) { diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c index 26cfa2c605..ff515d42aa 100644 --- a/xen/common/vm_event.c +++ b/xen/common/vm_event.c @@ -19,7 +19,6 @@ * along with this program; If not, see . */ - #include #include #include @@ -31,21 +30,18 @@ #include /* for public/io/ring.h macros */ -#define xen_mb() smp_mb() -#define xen_rmb() smp_rmb() -#define xen_wmb() smp_wmb() - -#define vm_event_ring_lock_init(_ved) spin_lock_init(&(_ved)->ring_lock) -#define vm_event_ring_lock(_ved) spin_lock(&(_ved)->ring_lock) -#define vm_event_ring_unlock(_ved) spin_unlock(&(_ved)->ring_lock) - -static int vm_event_enable( - struct domain *d, - struct xen_domctl_vm_event_op *vec, - struct vm_event_domain **ved, - int pause_flag, - int param, - xen_event_channel_notification_t notification_fn) +#define xen_mb() smp_mb() +#define xen_rmb() smp_rmb() +#define xen_wmb() smp_wmb() + +#define vm_event_ring_lock_init(_ved) spin_lock_init(&(_ved)->ring_lock) +#define vm_event_ring_lock(_ved) spin_lock(&(_ved)->ring_lock) +#define vm_event_ring_unlock(_ved) spin_unlock(&(_ved)->ring_lock) + +static int vm_event_enable(struct domain *d, struct xen_domctl_vm_event_op *vec, + struct vm_event_domain **ved, int pause_flag, + int param, + xen_event_channel_notification_t notification_fn) { int rc; unsigned long ring_gfn = d->arch.hvm.params[param]; @@ -59,7 +55,8 @@ static int vm_event_enable( * the ring is in an undefined state and so is the guest. */ if ( (*ved)->ring_page ) - return -EBUSY;; + return -EBUSY; + ; /* The parameter defaults to zero, and it should be * set to something */ @@ -91,8 +88,7 @@ static int vm_event_enable( (*ved)->xen_port = vec->port = rc; /* Prepare ring buffer */ - FRONT_RING_INIT(&(*ved)->front_ring, - (vm_event_sring_t *)(*ved)->ring_page, + FRONT_RING_INIT(&(*ved)->front_ring, (vm_event_sring_t *)(*ved)->ring_page, PAGE_SIZE); /* Save the pause flag for this particular ring. */ @@ -104,9 +100,8 @@ static int vm_event_enable( vm_event_ring_unlock(*ved); return 0; - err: - destroy_ring_for_helper(&(*ved)->ring_page, - (*ved)->ring_pg_struct); +err: + destroy_ring_for_helper(&(*ved)->ring_page, (*ved)->ring_pg_struct); vm_event_ring_unlock(*ved); xfree(*ved); *ved = NULL; @@ -145,7 +140,8 @@ static void vm_event_wake_blocked(struct domain *d, struct vm_event_domain *ved) { int i, j, k; - for (i = ved->last_vcpu_wake_up + 1, j = 0; j < d->max_vcpus; i++, j++) + for ( i = ved->last_vcpu_wake_up + 1, j = 0; j < d->max_vcpus; + i++, j++ ) { k = i % d->max_vcpus; v = d->vcpu[k]; @@ -153,7 +149,7 @@ static void vm_event_wake_blocked(struct domain *d, struct vm_event_domain *ved) continue; if ( !(ved->blocked) || avail_req == 0 ) - break; + break; if ( test_and_clear_bit(ved->pause_flag, &v->pause_flags) ) { @@ -188,7 +184,7 @@ static void vm_event_wake_queued(struct domain *d, struct vm_event_domain *ved) */ void vm_event_wake(struct domain *d, struct vm_event_domain *ved) { - if (!list_empty(&ved->wq.list)) + if ( !list_empty(&ved->wq.list) ) vm_event_wake_queued(d, ved); else vm_event_wake_blocked(d, ved); @@ -212,7 +208,7 @@ static int vm_event_disable(struct domain *d, struct vm_event_domain **ved) free_xen_event_channel(d, (*ved)->xen_port); /* Unblock all vCPUs */ - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { if ( test_and_clear_bit((*ved)->pause_flag, &v->pause_flags) ) { @@ -221,8 +217,7 @@ static int vm_event_disable(struct domain *d, struct vm_event_domain **ved) } } - destroy_ring_for_helper(&(*ved)->ring_page, - (*ved)->ring_pg_struct); + destroy_ring_for_helper(&(*ved)->ring_page, (*ved)->ring_pg_struct); vm_event_cleanup_domain(d); @@ -267,8 +262,7 @@ void vm_event_mark_and_pause(struct vcpu *v, struct vm_event_domain *ved) * overly full and its continued execution would cause stalling and excessive * waiting. The vCPU will be automatically unpaused when the ring clears. */ -void vm_event_put_request(struct domain *d, - struct vm_event_domain *ved, +void vm_event_put_request(struct domain *d, struct vm_event_domain *ved, vm_event_request_t *req) { vm_event_front_ring_t *front_ring; @@ -277,7 +271,7 @@ void vm_event_put_request(struct domain *d, RING_IDX req_prod; struct vcpu *curr = current; - if( !vm_event_check_ring(ved)) + if ( !vm_event_check_ring(ved) ) return; if ( curr->domain != d ) @@ -285,8 +279,8 @@ void vm_event_put_request(struct domain *d, req->flags |= VM_EVENT_FLAG_FOREIGN; #ifndef NDEBUG if ( !(req->flags & VM_EVENT_FLAG_VCPU_PAUSED) ) - gdprintk(XENLOG_G_WARNING, "d%dv%d was not paused.\n", - d->domain_id, req->vcpu_id); + gdprintk(XENLOG_G_WARNING, "d%dv%d was not paused.\n", d->domain_id, + req->vcpu_id); #endif } @@ -315,8 +309,8 @@ void vm_event_put_request(struct domain *d, * See the comments above wake_blocked() for more information * on how this mechanism works to avoid waiting. */ avail_req = vm_event_ring_available(ved); - if( curr->domain == d && avail_req < d->max_vcpus && - !atomic_read(&curr->vm_event_pause_count) ) + if ( curr->domain == d && avail_req < d->max_vcpus && + !atomic_read(&curr->vm_event_pause_count) ) vm_event_mark_and_pause(curr, ved); vm_event_ring_unlock(ved); @@ -447,7 +441,7 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved) void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved) { - if( !vm_event_check_ring(ved) ) + if ( !vm_event_check_ring(ved) ) return; vm_event_ring_lock(ved); @@ -619,14 +613,14 @@ int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec, rc = -ENOSYS; - switch ( vec->mode ) + switch (vec->mode) { #ifdef CONFIG_HAS_MEM_PAGING case XEN_DOMCTL_VM_EVENT_OP_PAGING: { rc = -EINVAL; - switch( vec->op ) + switch (vec->op) { case XEN_VM_EVENT_ENABLE: { @@ -685,7 +679,7 @@ int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec, { rc = -EINVAL; - switch( vec->op ) + switch (vec->op) { case XEN_VM_EVENT_ENABLE: /* domain_pause() not required here, see XSA-99 */ @@ -726,7 +720,7 @@ int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec, { rc = -EINVAL; - switch( vec->op ) + switch (vec->op) { case XEN_VM_EVENT_ENABLE: rc = -EOPNOTSUPP; @@ -792,15 +786,14 @@ void vm_event_vcpu_unpause(struct vcpu *v) * All unpause requests as a result of toolstack responses. * Prevent underflow of the vcpu pause count. */ - do - { + do { old = prev; new = old - 1; if ( new < 0 ) { - printk(XENLOG_G_WARNING - "%pv vm_event: Too many unpause attempts\n", v); + printk(XENLOG_G_WARNING "%pv vm_event: Too many unpause attempts\n", + v); return; } diff --git a/xen/common/vmap.c b/xen/common/vmap.c index faebc1ddf1..07e0dfd387 100644 --- a/xen/common/vmap.c +++ b/xen/common/vmap.c @@ -28,11 +28,12 @@ void __init vm_init_type(enum vmap_region type, void *start, void *end) vm_base[type] = start; vm_end[type] = PFN_DOWN(end - start); - vm_low[type]= PFN_UP((vm_end[type] + 7) / 8); + vm_low[type] = PFN_UP((vm_end[type] + 7) / 8); nr = PFN_UP((vm_low[type] + 7) / 8); vm_top[type] = nr * PAGE_SIZE * 8; - for ( i = 0, va = (unsigned long)vm_bitmap(type); i < nr; ++i, va += PAGE_SIZE ) + for ( i = 0, va = (unsigned long)vm_bitmap(type); i < nr; + ++i, va += PAGE_SIZE ) { struct page_info *pg = alloc_domheap_page(NULL, 0); @@ -45,8 +46,7 @@ void __init vm_init_type(enum vmap_region type, void *start, void *end) populate_pt_range(va, vm_low[type] - nr); } -static void *vm_alloc(unsigned int nr, unsigned int align, - enum vmap_region t) +static void *vm_alloc(unsigned int nr, unsigned int align, enum vmap_region t) { unsigned int start, bit; @@ -60,7 +60,7 @@ static void *vm_alloc(unsigned int nr, unsigned int align, return NULL; spin_lock(&vm_lock); - for ( ; ; ) + for ( ;; ) { struct page_info *pg; @@ -153,8 +153,9 @@ static unsigned int vm_index(const void *va, enum vmap_region type) return 0; idx = PFN_DOWN(va - vm_base[type]); - return !test_bit(idx - 1, vm_bitmap(type)) && - test_bit(idx, vm_bitmap(type)) ? idx : 0; + return !test_bit(idx - 1, vm_bitmap(type)) && test_bit(idx, vm_bitmap(type)) + ? idx + : 0; } static unsigned int vm_size(const void *va, enum vmap_region type) @@ -199,9 +200,8 @@ static void vm_free(const void *va) spin_unlock(&vm_lock); } -void *__vmap(const mfn_t *mfn, unsigned int granularity, - unsigned int nr, unsigned int align, unsigned int flags, - enum vmap_region type) +void *__vmap(const mfn_t *mfn, unsigned int granularity, unsigned int nr, + unsigned int align, unsigned int flags, enum vmap_region type) { void *va = vm_alloc(nr * granularity, align, type); unsigned long cur = (unsigned long)va; @@ -268,7 +268,7 @@ static void *vmalloc_type(size_t size, enum vmap_region type) xfree(mfn); return va; - error: +error: while ( i-- ) free_domheap_page(mfn_to_page(mfn[i])); xfree(mfn); diff --git a/xen/common/vsprintf.c b/xen/common/vsprintf.c index 352d43b425..5173739a05 100644 --- a/xen/common/vsprintf.c +++ b/xen/common/vsprintf.c @@ -9,7 +9,7 @@ * Wirzenius wrote this portably, Torvalds fucked it up :-) */ -/* +/* * Fri Jul 13 2001 Crutcher Dunnavant * - changed to provide snprintf and vsnprintf functions * So Feb 1 16:51:32 CET 2004 Juergen Quade @@ -30,31 +30,38 @@ * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use */ -unsigned long simple_strtoul( - const char *cp, const char **endp, unsigned int base) +unsigned long simple_strtoul(const char *cp, const char **endp, + unsigned int base) { - unsigned long result = 0,value; + unsigned long result = 0, value; - if (!base) { + if ( !base ) + { base = 10; - if (*cp == '0') { + if ( *cp == '0' ) + { base = 8; cp++; - if ((toupper(*cp) == 'X') && isxdigit(cp[1])) { + if ( (toupper(*cp) == 'X') && isxdigit(cp[1]) ) + { cp++; base = 16; } } - } else if (base == 16) { - if (cp[0] == '0' && toupper(cp[1]) == 'X') + } + else if ( base == 16 ) + { + if ( cp[0] == '0' && toupper(cp[1]) == 'X' ) cp += 2; } - while (isxdigit(*cp) && - (value = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10) < base) { - result = result*base + value; + while ( isxdigit(*cp) && + (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < + base ) + { + result = result * base + value; cp++; } - if (endp) + if ( endp ) *endp = cp; return result; } @@ -69,9 +76,9 @@ EXPORT_SYMBOL(simple_strtoul); */ long simple_strtol(const char *cp, const char **endp, unsigned int base) { - if(*cp=='-') - return -simple_strtoul(cp+1,endp,base); - return simple_strtoul(cp,endp,base); + if ( *cp == '-' ) + return -simple_strtoul(cp + 1, endp, base); + return simple_strtoul(cp, endp, base); } EXPORT_SYMBOL(simple_strtol); @@ -82,31 +89,39 @@ EXPORT_SYMBOL(simple_strtol); * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use */ -unsigned long long simple_strtoull( - const char *cp, const char **endp, unsigned int base) +unsigned long long simple_strtoull(const char *cp, const char **endp, + unsigned int base) { - unsigned long long result = 0,value; + unsigned long long result = 0, value; - if (!base) { + if ( !base ) + { base = 10; - if (*cp == '0') { + if ( *cp == '0' ) + { base = 8; cp++; - if ((toupper(*cp) == 'X') && isxdigit(cp[1])) { + if ( (toupper(*cp) == 'X') && isxdigit(cp[1]) ) + { cp++; base = 16; } } - } else if (base == 16) { - if (cp[0] == '0' && toupper(cp[1]) == 'X') + } + else if ( base == 16 ) + { + if ( cp[0] == '0' && toupper(cp[1]) == 'X' ) cp += 2; } - while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp-'0' : (islower(*cp) - ? toupper(*cp) : *cp)-'A'+10) < base) { - result = result*base + value; + while ( isxdigit(*cp) && + (value = isdigit(*cp) ? *cp - '0' + : (islower(*cp) ? toupper(*cp) : *cp) - 'A' + + 10) < base ) + { + result = result * base + value; cp++; } - if (endp) + if ( endp ) *endp = cp; return result; } @@ -119,35 +134,34 @@ EXPORT_SYMBOL(simple_strtoull); * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use */ -long long simple_strtoll(const char *cp,const char **endp,unsigned int base) +long long simple_strtoll(const char *cp, const char **endp, unsigned int base) { - if(*cp=='-') - return -simple_strtoull(cp+1,endp,base); - return simple_strtoull(cp,endp,base); + if ( *cp == '-' ) + return -simple_strtoull(cp + 1, endp, base); + return simple_strtoull(cp, endp, base); } static int skip_atoi(const char **s) { - int i=0; + int i = 0; - while (isdigit(**s)) - i = i*10 + *((*s)++) - '0'; + while ( isdigit(**s) ) + i = i * 10 + *((*s)++) - '0'; return i; } -#define ZEROPAD 1 /* pad with zero */ -#define SIGN 2 /* unsigned/signed long */ -#define PLUS 4 /* show plus */ -#define SPACE 8 /* space if plus */ -#define LEFT 16 /* left justified */ -#define SPECIAL 32 /* 0x */ -#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ - -static char *number( - char *buf, char *end, unsigned long long num, - int base, int size, int precision, int type) +#define ZEROPAD 1 /* pad with zero */ +#define SIGN 2 /* unsigned/signed long */ +#define PLUS 4 /* show plus */ +#define SPACE 8 /* space if plus */ +#define LEFT 16 /* left justified */ +#define SPECIAL 32 /* 0x */ +#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char *number(char *buf, char *end, unsigned long long num, int base, + int size, int precision, int type) { - char c,sign,tmp[66]; + char c, sign, tmp[66]; const char *digits; static const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; static const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; @@ -156,107 +170,130 @@ static char *number( ASSERT(base >= 2 && base <= 36); digits = (type & LARGE) ? large_digits : small_digits; - if (type & LEFT) + if ( type & LEFT ) type &= ~ZEROPAD; c = (type & ZEROPAD) ? '0' : ' '; sign = 0; - if (type & SIGN) { - if ((signed long long) num < 0) { + if ( type & SIGN ) + { + if ( (signed long long)num < 0 ) + { sign = '-'; - num = - (signed long long) num; + num = -(signed long long)num; size--; - } else if (type & PLUS) { + } + else if ( type & PLUS ) + { sign = '+'; size--; - } else if (type & SPACE) { + } + else if ( type & SPACE ) + { sign = ' '; size--; } } - if (type & SPECIAL) { - if (num == 0) + if ( type & SPECIAL ) + { + if ( num == 0 ) type &= ~SPECIAL; - else if (base == 16) + else if ( base == 16 ) size -= 2; - else if (base == 8) + else if ( base == 8 ) size--; else type &= ~SPECIAL; } i = 0; - if (num == 0) - tmp[i++]='0'; - else while (num != 0) - tmp[i++] = digits[do_div(num,base)]; - if (i > precision) + if ( num == 0 ) + tmp[i++] = '0'; + else + while ( num != 0 ) + tmp[i++] = digits[do_div(num, base)]; + if ( i > precision ) precision = i; size -= precision; - if (!(type&(ZEROPAD+LEFT))) { - while(size-->0) { - if (buf < end) + if ( !(type & (ZEROPAD + LEFT)) ) + { + while ( size-- > 0 ) + { + if ( buf < end ) *buf = ' '; ++buf; } } - if (sign) { - if (buf < end) + if ( sign ) + { + if ( buf < end ) *buf = sign; ++buf; } - if (type & SPECIAL) { - if (buf < end) + if ( type & SPECIAL ) + { + if ( buf < end ) *buf = '0'; ++buf; - if (base == 16) { - if (buf < end) + if ( base == 16 ) + { + if ( buf < end ) *buf = digits[33]; ++buf; } } - if (!(type & LEFT)) { - while (size-- > 0) { - if (buf < end) + if ( !(type & LEFT) ) + { + while ( size-- > 0 ) + { + if ( buf < end ) *buf = c; ++buf; } } - while (i < precision--) { - if (buf < end) + while ( i < precision-- ) + { + if ( buf < end ) *buf = '0'; ++buf; } - while (i-- > 0) { - if (buf < end) + while ( i-- > 0 ) + { + if ( buf < end ) *buf = tmp[i]; ++buf; } - while (size-- > 0) { - if (buf < end) + while ( size-- > 0 ) + { + if ( buf < end ) *buf = ' '; ++buf; } return buf; } -static char *string(char *str, char *end, const char *s, - int field_width, int precision, int flags) +static char *string(char *str, char *end, const char *s, int field_width, + int precision, int flags) { int i, len = (precision < 0) ? strlen(s) : strnlen(s, precision); - if (!(flags & LEFT)) { - while (len < field_width--) { - if (str < end) + if ( !(flags & LEFT) ) + { + while ( len < field_width-- ) + { + if ( str < end ) *str = ' '; ++str; } } - for (i = 0; i < len; ++i) { - if (str < end) + for ( i = 0; i < len; ++i ) + { + if ( str < end ) *str = *s; - ++str; ++s; + ++str; + ++s; } - while (len < field_width--) { - if (str < end) + while ( len < field_width-- ) + { + if ( str < end ) *str = ' '; ++str; } @@ -265,8 +302,9 @@ static char *string(char *str, char *end, const char *s, } /* Print a bitmap as '0-3,6-15' */ -static char *print_bitmap_list( - char *str, char *end, const unsigned long *bitmap, unsigned int nr_bits) +static char *print_bitmap_list(char *str, char *end, + const unsigned long *bitmap, + unsigned int nr_bits) { /* current bit is 'cur', most recently seen range is [rbot, rtop] */ unsigned int cur, rbot, rtop; @@ -306,8 +344,9 @@ static char *print_bitmap_list( } /* Print a bitmap as a comma separated hex string. */ -static char *print_bitmap_string( - char *str, char *end, const unsigned long *bitmap, unsigned int nr_bits) +static char *print_bitmap_string(char *str, char *end, + const unsigned long *bitmap, + unsigned int nr_bits) { const unsigned int CHUNKSZ = 32; unsigned int chunksz; @@ -326,9 +365,9 @@ static char *print_bitmap_string( for ( i = ROUNDUP(nr_bits, CHUNKSZ) - CHUNKSZ; i >= 0; i -= CHUNKSZ ) { unsigned int chunkmask = (1ull << chunksz) - 1; - unsigned int word = i / BITS_PER_LONG; - unsigned int offset = i % BITS_PER_LONG; - unsigned long val = (bitmap[word] >> offset) & chunkmask; + unsigned int word = i / BITS_PER_LONG; + unsigned int offset = i % BITS_PER_LONG; + unsigned long val = (bitmap[word] >> offset) & chunkmask; if ( !first ) { @@ -355,12 +394,20 @@ static char *print_domain(char *str, char *end, const struct domain *d) if ( unlikely(!d) ) return string(str, end, "NULL", -1, -1, 0); - switch ( d->domain_id ) + switch (d->domain_id) { - case DOMID_IO: name = "[IO]"; break; - case DOMID_XEN: name = "[XEN]"; break; - case DOMID_COW: name = "[COW]"; break; - case DOMID_IDLE: name = "[IDLE]"; break; + case DOMID_IO: + name = "[IO]"; + break; + case DOMID_XEN: + name = "[XEN]"; + break; + case DOMID_COW: + name = "[COW]"; + break; + case DOMID_IDLE: + name = "[IDLE]"; + break; /* * In principle, we could ASSERT_UNREACHABLE() in the default case. * However, this path is used to print out crash information, which @@ -393,13 +440,12 @@ static char *print_vcpu(char *str, char *end, const struct vcpu *v) } static char *pointer(char *str, char *end, const char **fmt_ptr, - const void *arg, int field_width, int precision, - int flags) + const void *arg, int field_width, int precision, int flags) { const char *fmt = *fmt_ptr, *s; /* Custom %p suffixes. See XEN_ROOT/docs/misc/printk-formats.txt */ - switch ( fmt[1] ) + switch (fmt[1]) { case 'b': /* Bitmap as hex, or list */ ++*fmt_ptr; @@ -439,7 +485,7 @@ static char *pointer(char *str, char *end, const char **fmt_ptr, * Peek ahead in the format string to see if a recognised separator * modifier is present. */ - switch ( fmt[2] ) + switch (fmt[2]) { case 'C': /* Colons. */ ++*fmt_ptr; @@ -457,7 +503,7 @@ static char *pointer(char *str, char *end, const char **fmt_ptr, break; } - for ( i = 0; ; ) + for ( i = 0;; ) { /* Each byte: 2 chars, 0-padded, base 16, no hex prefix. */ str = number(str, end, hex_buffer[i], 16, 2, -1, ZEROPAD); @@ -478,7 +524,7 @@ static char *pointer(char *str, char *end, const char **fmt_ptr, case 'S': /* Symbol name unconditionally with offset and size */ { unsigned long sym_size, sym_offset; - char namebuf[KSYM_NAME_LEN+1]; + char namebuf[KSYM_NAME_LEN + 1]; /* Advance parents fmt string, as we have consumed 's' or 'S' */ ++*fmt_ptr; @@ -495,7 +541,8 @@ static char *pointer(char *str, char *end, const char **fmt_ptr, if ( fmt[1] == 'S' || sym_offset != 0 ) { /* Print '+/' */ - str = number(str, end, sym_offset, 16, -1, -1, SPECIAL|SIGN|PLUS); + str = + number(str, end, sym_offset, 16, -1, -1, SPECIAL | SIGN | PLUS); if ( str < end ) *str = '/'; ++str; @@ -503,8 +550,9 @@ static char *pointer(char *str, char *end, const char **fmt_ptr, } /* - * namebuf contents and s for core hypervisor are same but for Live Patch - * payloads they differ (namebuf contains the name of the payload). + * namebuf contents and s for core hypervisor are same but for Live + * Patch payloads they differ (namebuf contains the name of the + * payload). */ if ( namebuf != s ) { @@ -527,8 +575,8 @@ static char *pointer(char *str, char *end, const char **fmt_ptr, flags |= ZEROPAD; } - return number(str, end, (unsigned long)arg, - 16, field_width, precision, flags); + return number(str, end, (unsigned long)arg, 16, field_width, precision, + flags); } /** @@ -556,14 +604,14 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) char *str, *end, c; const char *s; - int flags; /* flags to number() */ + int flags; /* flags to number() */ - int field_width; /* width of output field */ - int precision; /* min. # of digits for integers; max - number of chars for from string */ - int qualifier; /* 'h', 'l', or 'L' for integer fields */ - /* 'z' support added 23/7/1999 S.H. */ - /* 'z' changed to 'Z' --davidm 1/25/99 */ + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ /* Reject out-of-range values early */ BUG_ON(((int)size < 0) || ((unsigned int)size != size)); @@ -571,14 +619,17 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) str = buf; end = buf + size; - if (end < buf) { - end = ((void *) -1); + if ( end < buf ) + { + end = ((void *)-1); size = end - buf; } - for (; *fmt ; ++fmt) { - if (*fmt != '%') { - if (str < end) + for ( ; *fmt; ++fmt ) + { + if ( *fmt != '%' ) + { + if ( str < end ) *str = *fmt; ++str; continue; @@ -587,24 +638,37 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) /* process flags */ flags = 0; repeat: - ++fmt; /* this also skips first '%' */ - switch (*fmt) { - case '-': flags |= LEFT; goto repeat; - case '+': flags |= PLUS; goto repeat; - case ' ': flags |= SPACE; goto repeat; - case '#': flags |= SPECIAL; goto repeat; - case '0': flags |= ZEROPAD; goto repeat; + ++fmt; /* this also skips first '%' */ + switch (*fmt) + { + case '-': + flags |= LEFT; + goto repeat; + case '+': + flags |= PLUS; + goto repeat; + case ' ': + flags |= SPACE; + goto repeat; + case '#': + flags |= SPECIAL; + goto repeat; + case '0': + flags |= ZEROPAD; + goto repeat; } /* get field width */ field_width = -1; - if (isdigit(*fmt)) + if ( isdigit(*fmt) ) field_width = skip_atoi(&fmt); - else if (*fmt == '*') { + else if ( *fmt == '*' ) + { ++fmt; /* it's the next argument */ field_width = va_arg(args, int); - if (field_width < 0) { + if ( field_width < 0 ) + { field_width = -field_width; flags |= LEFT; } @@ -612,26 +676,30 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) /* get the precision */ precision = -1; - if (*fmt == '.') { + if ( *fmt == '.' ) + { ++fmt; - if (isdigit(*fmt)) + if ( isdigit(*fmt) ) precision = skip_atoi(&fmt); - else if (*fmt == '*') { + else if ( *fmt == '*' ) + { ++fmt; - /* it's the next argument */ + /* it's the next argument */ precision = va_arg(args, int); } - if (precision < 0) + if ( precision < 0 ) precision = 0; } /* get the conversion qualifier */ qualifier = -1; - if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || - *fmt =='Z' || *fmt == 'z') { + if ( *fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt == 'Z' || + *fmt == 'z' ) + { qualifier = *fmt; ++fmt; - if (qualifier == 'l' && *fmt == 'l') { + if ( qualifier == 'l' && *fmt == 'l' ) + { qualifier = 'L'; ++fmt; } @@ -640,21 +708,25 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) /* default base */ base = 10; - switch (*fmt) { + switch (*fmt) + { case 'c': - if (!(flags & LEFT)) { - while (--field_width > 0) { - if (str < end) + if ( !(flags & LEFT) ) + { + while ( --field_width > 0 ) + { + if ( str < end ) *str = ' '; ++str; } } - c = (unsigned char) va_arg(args, int); - if (str < end) + c = (unsigned char)va_arg(args, int); + if ( str < end ) *str = c; ++str; - while (--field_width > 0) { - if (str < end) + while ( --field_width > 0 ) + { + if ( str < end ) *str = ' '; ++str; } @@ -662,7 +734,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) case 's': s = va_arg(args, char *); - if ((unsigned long)s < PAGE_SIZE) + if ( (unsigned long)s < PAGE_SIZE ) s = ""; str = string(str, end, s, field_width, precision, flags); @@ -674,22 +746,26 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) field_width, precision, flags); continue; - case 'n': - if (qualifier == 'l') { - long * ip = va_arg(args, long *); + if ( qualifier == 'l' ) + { + long *ip = va_arg(args, long *); *ip = (str - buf); - } else if (qualifier == 'Z' || qualifier == 'z') { - size_t * ip = va_arg(args, size_t *); + } + else if ( qualifier == 'Z' || qualifier == 'z' ) + { + size_t *ip = va_arg(args, size_t *); *ip = (str - buf); - } else { - int * ip = va_arg(args, int *); + } + else + { + int *ip = va_arg(args, int *); *ip = (str - buf); } continue; case '%': - if (str < end) + if ( str < end ) *str = '%'; ++str; continue; @@ -712,43 +788,53 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) break; default: - if (str < end) + if ( str < end ) *str = '%'; ++str; - if (*fmt) { - if (str < end) + if ( *fmt ) + { + if ( str < end ) *str = *fmt; ++str; - } else { + } + else + { --fmt; } continue; } - if (qualifier == 'L') + if ( qualifier == 'L' ) num = va_arg(args, long long); - else if (qualifier == 'l') { + else if ( qualifier == 'l' ) + { num = va_arg(args, unsigned long); - if (flags & SIGN) - num = (signed long) num; - } else if (qualifier == 'Z' || qualifier == 'z') { + if ( flags & SIGN ) + num = (signed long)num; + } + else if ( qualifier == 'Z' || qualifier == 'z' ) + { num = va_arg(args, size_t); - } else if (qualifier == 'h') { - num = (unsigned short) va_arg(args, int); - if (flags & SIGN) - num = (signed short) num; - } else { + } + else if ( qualifier == 'h' ) + { + num = (unsigned short)va_arg(args, int); + if ( flags & SIGN ) + num = (signed short)num; + } + else + { num = va_arg(args, unsigned int); - if (flags & SIGN) - num = (signed int) num; + if ( flags & SIGN ) + num = (signed int)num; } - str = number(str, end, num, base, - field_width, precision, flags); + str = number(str, end, num, base, field_width, precision, flags); } /* don't write out a null byte if the buf size is zero */ - if (size > 0) { - if (str < end) + if ( size > 0 ) + { + if ( str < end ) *str = '\0'; else end[-1] = '\0'; @@ -756,7 +842,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) /* the trailing null byte doesn't count towards the total * ++str; */ - return str-buf; + return str - buf; } EXPORT_SYMBOL(vsnprintf); @@ -779,8 +865,8 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) { int i; - i = vsnprintf(buf,size,fmt,args); - if (i >= size) + i = vsnprintf(buf, size, fmt, args); + if ( i >= size ) i = size - 1; return (i > 0) ? i : 0; } @@ -799,13 +885,13 @@ EXPORT_SYMBOL(vscnprintf); * as per ISO C99. If the return is greater than or equal to * @size, the resulting string is truncated. */ -int snprintf(char * buf, size_t size, const char *fmt, ...) +int snprintf(char *buf, size_t size, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); - i=vsnprintf(buf,size,fmt,args); + i = vsnprintf(buf, size, fmt, args); va_end(args); return i; } @@ -824,7 +910,7 @@ EXPORT_SYMBOL(snprintf); * greater than or equal to @size, the resulting string is truncated. */ -int scnprintf(char * buf, size_t size, const char *fmt, ...) +int scnprintf(char *buf, size_t size, const char *fmt, ...) { va_list args; int i; @@ -832,7 +918,7 @@ int scnprintf(char * buf, size_t size, const char *fmt, ...) va_start(args, fmt); i = vsnprintf(buf, size, fmt, args); va_end(args); - if (i >= size) + if ( i >= size ) i = size - 1; return (i > 0) ? i : 0; } @@ -864,7 +950,7 @@ int vasprintf(char **bufp, const char *fmt, va_list args) if ( !buf ) return -ENOMEM; - (void) vsnprintf(buf, size, fmt, args); + (void)vsnprintf(buf, size, fmt, args); *bufp = buf; return 0; @@ -887,7 +973,7 @@ int asprintf(char **bufp, const char *fmt, ...) int i; va_start(args, fmt); - i=vasprintf(bufp,fmt,args); + i = vasprintf(bufp, fmt, args); va_end(args); return i; } diff --git a/xen/common/wait.c b/xen/common/wait.c index 4f830a14e8..3a20f42c72 100644 --- a/xen/common/wait.c +++ b/xen/common/wait.c @@ -1,20 +1,20 @@ /****************************************************************************** * wait.c - * + * * Sleep in hypervisor context for some event to occur. - * + * * Copyright (c) 2010, Keir Fraser - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; If not, see . */ @@ -24,7 +24,8 @@ #include #include -struct waitqueue_vcpu { +struct waitqueue_vcpu +{ struct list_head list; struct vcpu *vcpu; #ifdef CONFIG_X86 @@ -138,41 +139,40 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv) gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n"); domain_crash(current->domain); - for ( ; ; ) + for ( ;; ) do_softirq(); } /* Hand-rolled setjmp(). */ - asm volatile ( - "push %%rax; push %%rbx; push %%rdx; push %%rbp;" - "push %%r8; push %%r9; push %%r10; push %%r11;" - "push %%r12; push %%r13; push %%r14; push %%r15;" - - "call 1f;" - "1: addq $2f-1b,(%%rsp);" - "sub %%esp,%%ecx;" - "cmp %3,%%ecx;" - "ja 3f;" - "mov %%rsp,%%rsi;" - - /* check_wakeup_from_wait() longjmp()'s to this point. */ - "2: rep movsb;" - "mov %%rsp,%%rsi;" - "3: pop %%rax;" - - "pop %%r15; pop %%r14; pop %%r13; pop %%r12;" - "pop %%r11; pop %%r10; pop %%r9; pop %%r8;" - "pop %%rbp; pop %%rdx; pop %%rbx; pop %%rax" - : "=&S" (wqv->esp), "=&c" (dummy), "=&D" (dummy) - : "i" (PAGE_SIZE), "0" (0), "1" (cpu_info), "2" (wqv->stack) - : "memory" ); + asm volatile("push %%rax; push %%rbx; push %%rdx; push %%rbp;" + "push %%r8; push %%r9; push %%r10; push %%r11;" + "push %%r12; push %%r13; push %%r14; push %%r15;" + + "call 1f;" + "1: addq $2f-1b,(%%rsp);" + "sub %%esp,%%ecx;" + "cmp %3,%%ecx;" + "ja 3f;" + "mov %%rsp,%%rsi;" + + /* check_wakeup_from_wait() longjmp()'s to this point. */ + "2: rep movsb;" + "mov %%rsp,%%rsi;" + "3: pop %%rax;" + + "pop %%r15; pop %%r14; pop %%r13; pop %%r12;" + "pop %%r11; pop %%r10; pop %%r9; pop %%r8;" + "pop %%rbp; pop %%rdx; pop %%rbx; pop %%rax" + : "=&S"(wqv->esp), "=&c"(dummy), "=&D"(dummy) + : "i"(PAGE_SIZE), "0"(0), "1"(cpu_info), "2"(wqv->stack) + : "memory"); if ( unlikely(wqv->esp == 0) ) { gdprintk(XENLOG_ERR, "Stack too large in %s\n", __func__); domain_crash(current->domain); - for ( ; ; ) + for ( ;; ) do_softirq(); } @@ -213,12 +213,12 @@ void check_wakeup_from_wait(void) * wqv->stack, and lands on a `rep movs` instruction. All other GPRs are * restored from the stack, so are available for use here. */ - asm volatile ( - "mov %1,%%"__OP"sp; INDIRECT_JMP %[ip]" - : : "S" (wqv->stack), "D" (wqv->esp), - "c" ((char *)get_cpu_info() - (char *)wqv->esp), - [ip] "r" (*(unsigned long *)wqv->stack) - : "memory" ); + asm volatile("mov %1,%%" __OP "sp; INDIRECT_JMP %[ip]" + : + : "S"(wqv->stack), "D"(wqv->esp), + "c"((char *)get_cpu_info() - (char *)wqv->esp), + [ip] "r"(*(unsigned long *)wqv->stack) + : "memory"); unreachable(); } diff --git a/xen/common/xenoprof.c b/xen/common/xenoprof.c index 8a72e382e6..5cc2e495dd 100644 --- a/xen/common/xenoprof.c +++ b/xen/common/xenoprof.c @@ -68,7 +68,7 @@ int acquire_pmu_ownership(int pmu_ownership) spin_unlock(&pmu_owner_lock); return 0; - out: +out: if ( pmu_owner == PMU_OWNER_HVM ) pmu_hvm_refcount++; spin_unlock(&pmu_owner_lock); @@ -137,8 +137,8 @@ static void xenoprof_reset_buf(struct domain *d) } } -static int -share_xenoprof_page_with_guest(struct domain *d, mfn_t mfn, int npages) +static int share_xenoprof_page_with_guest(struct domain *d, mfn_t mfn, + int npages) { int i; @@ -147,7 +147,7 @@ share_xenoprof_page_with_guest(struct domain *d, mfn_t mfn, int npages) { struct page_info *page = mfn_to_page(mfn_add(mfn, i)); - if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 ) + if ( (page->count_info & (PGC_allocated | PGC_count_mask)) != 0 ) { printk(XENLOG_G_INFO "dom%d mfn %#lx page->count_info %#lx\n", d->domain_id, mfn_x(mfn_add(mfn, i)), page->count_info); @@ -162,8 +162,7 @@ share_xenoprof_page_with_guest(struct domain *d, mfn_t mfn, int npages) return 0; } -static void -unshare_xenoprof_page_with_guest(struct xenoprof *x) +static void unshare_xenoprof_page_with_guest(struct xenoprof *x) { int i, npages = x->npages; mfn_t mfn = virt_to_mfn(x->rawbuf); @@ -178,9 +177,9 @@ unshare_xenoprof_page_with_guest(struct xenoprof *x) } } -static void -xenoprof_shared_gmfn_with_guest( - struct domain *d, unsigned long maddr, unsigned long gmaddr, int npages) +static void xenoprof_shared_gmfn_with_guest(struct domain *d, + unsigned long maddr, + unsigned long gmaddr, int npages) { int i; @@ -190,12 +189,11 @@ xenoprof_shared_gmfn_with_guest( if ( i == 0 ) gdprintk(XENLOG_WARNING, "xenoprof unsupported with autotranslated guests\n"); - } } -static int alloc_xenoprof_struct( - struct domain *d, int max_samples, int is_passive) +static int alloc_xenoprof_struct(struct domain *d, int max_samples, + int is_passive) { struct vcpu *v; int nvcpu, npages, bufsize, max_bufsize; @@ -203,7 +201,7 @@ static int alloc_xenoprof_struct( int i; nvcpu = 0; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) nvcpu++; if ( !nvcpu ) @@ -228,7 +226,8 @@ static int alloc_xenoprof_struct( bufsize = sizeof(struct xenoprof_buf); i = sizeof(struct event_log); #ifdef CONFIG_COMPAT - d->xenoprof->is_compat = is_pv_32bit_domain(is_passive ? hardware_domain : d); + d->xenoprof->is_compat = + is_pv_32bit_domain(is_passive ? hardware_domain : d); if ( XENOPROF_COMPAT(d->xenoprof) ) { bufsize = sizeof(struct compat_oprof_buf); @@ -238,7 +237,7 @@ static int alloc_xenoprof_struct( /* reduce max_samples if necessary to limit pages allocated */ max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu; - max_max_samples = ( (max_bufsize - bufsize) / i ) + 1; + max_max_samples = ((max_bufsize - bufsize) / i) + 1; if ( (unsigned)max_samples > max_max_samples ) max_samples = max_max_samples; @@ -262,10 +261,10 @@ static int alloc_xenoprof_struct( /* Update buffer pointers for active vcpus */ i = 0; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { - xenoprof_buf_t *buf = (xenoprof_buf_t *) - &d->xenoprof->rawbuf[i * bufsize]; + xenoprof_buf_t *buf = + (xenoprof_buf_t *)&d->xenoprof->rawbuf[i * bufsize]; d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples; d->xenoprof->vcpu[v->vcpu_id].buffer = buf; @@ -277,7 +276,7 @@ static int alloc_xenoprof_struct( if ( i >= nvcpu ) break; } - + return 0; } @@ -445,8 +444,7 @@ static int add_passive_list(XEN_GUEST_HANDLE_PARAM(void) arg) } ret = share_xenoprof_page_with_guest( - current->domain, virt_to_mfn(d->xenoprof->rawbuf), - d->xenoprof->npages); + current->domain, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages); if ( ret < 0 ) { put_domain(d); @@ -460,24 +458,23 @@ static int add_passive_list(XEN_GUEST_HANDLE_PARAM(void) arg) passive.buf_gmaddr = __pa(d->xenoprof->rawbuf); else xenoprof_shared_gmfn_with_guest( - current->domain, __pa(d->xenoprof->rawbuf), - passive.buf_gmaddr, d->xenoprof->npages); + current->domain, __pa(d->xenoprof->rawbuf), passive.buf_gmaddr, + d->xenoprof->npages); if ( __copy_to_guest(arg, &passive, 1) ) { put_domain(d); return -EFAULT; } - + passive_domains[pdomains] = d; pdomains++; return ret; } - /* Get space in the buffer */ -static int xenoprof_buf_space(struct domain *d, xenoprof_buf_t * buf, int size) +static int xenoprof_buf_space(struct domain *d, xenoprof_buf_t *buf, int size) { int head, tail; @@ -496,7 +493,7 @@ static int xenoprof_add_sample(struct domain *d, xenoprof_buf_t *buf, head = xenoprof_buf(d, buf, event_head); tail = xenoprof_buf(d, buf, event_tail); size = xenoprof_buf(d, buf, event_size); - + /* make sure indexes in shared buffer are sane */ if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) ) { @@ -512,7 +509,7 @@ static int xenoprof_add_sample(struct domain *d, xenoprof_buf_t *buf, head++; if ( head >= size ) head = 0; - + xenoprof_buf(d, buf, event_head) = head; } else @@ -562,14 +559,14 @@ void xenoprof_log_event(struct vcpu *vcpu, const struct cpu_user_regs *regs, invalid_buffer_samples++; return; } - + buf = v->buffer; /* Provide backtrace if requested. */ if ( backtrace_depth > 0 ) { if ( (xenoprof_buf_space(d, buf, v->event_size) < 2) || - !xenoprof_add_sample(d, buf, XENOPROF_ESCAPE_CODE, mode, + !xenoprof_add_sample(d, buf, XENOPROF_ESCAPE_CODE, mode, XENOPROF_TRACE_BEGIN) ) { xenoprof_buf(d, buf, lost_samples)++; @@ -590,15 +587,12 @@ void xenoprof_log_event(struct vcpu *vcpu, const struct cpu_user_regs *regs, xenoprof_buf(d, buf, kernel_samples)++; else xenoprof_buf(d, buf, xen_samples)++; - } if ( backtrace_depth > 0 ) xenoprof_backtrace(vcpu, regs, backtrace_depth, mode); } - - static int xenoprof_op_init(XEN_GUEST_HANDLE_PARAM(void) arg) { struct domain *d = current->domain; @@ -618,7 +612,7 @@ static int xenoprof_op_init(XEN_GUEST_HANDLE_PARAM(void) arg) * crashes. Once a better cleanup method is present, it will be possible to * allow another domain to be the primary profiler. */ - xenoprof_init.is_primary = + xenoprof_init.is_primary = ((xenoprof_primary_profiler == d) || ((xenoprof_primary_profiler == NULL) && is_hardware_domain(d))); if ( xenoprof_init.is_primary ) @@ -651,38 +645,37 @@ static int xenoprof_op_get_buffer(XEN_GUEST_HANDLE_PARAM(void) arg) return ret; } - ret = share_xenoprof_page_with_guest( - d, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages); + ret = share_xenoprof_page_with_guest(d, virt_to_mfn(d->xenoprof->rawbuf), + d->xenoprof->npages); if ( ret < 0 ) return ret; xenoprof_reset_buf(d); - d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED; + d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED; d->xenoprof->domain_ready = 0; - d->xenoprof->is_primary = (xenoprof_primary_profiler == current->domain); - + d->xenoprof->is_primary = (xenoprof_primary_profiler == current->domain); + xenoprof_get_buffer.nbuf = d->xenoprof->nbuf; xenoprof_get_buffer.bufsize = d->xenoprof->bufsize; if ( !paging_mode_translate(d) ) xenoprof_get_buffer.buf_gmaddr = __pa(d->xenoprof->rawbuf); else - xenoprof_shared_gmfn_with_guest( - d, __pa(d->xenoprof->rawbuf), xenoprof_get_buffer.buf_gmaddr, - d->xenoprof->npages); + xenoprof_shared_gmfn_with_guest(d, __pa(d->xenoprof->rawbuf), + xenoprof_get_buffer.buf_gmaddr, + d->xenoprof->npages); return __copy_to_guest(arg, &xenoprof_get_buffer, 1) ? -EFAULT : 0; } -#define NONPRIV_OP(op) ( (op == XENOPROF_init) \ - || (op == XENOPROF_enable_virq) \ - || (op == XENOPROF_disable_virq) \ - || (op == XENOPROF_get_buffer)) - +#define NONPRIV_OP(op) \ + ((op == XENOPROF_init) || (op == XENOPROF_enable_virq) || \ + (op == XENOPROF_disable_virq) || (op == XENOPROF_get_buffer)) + ret_t do_xenoprof_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) { int ret = 0; - + if ( (op < 0) || (op > XENOPROF_last_op) ) { gdprintk(XENLOG_DEBUG, "invalid operation %d\n", op); @@ -700,13 +693,12 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) return ret; spin_lock(&xenoprof_lock); - - switch ( op ) + + switch (op) { case XENOPROF_init: ret = xenoprof_op_init(arg); - if ( (ret == 0) && - (current->domain == xenoprof_primary_profiler) ) + if ( (ret == 0) && (current->domain == xenoprof_primary_profiler) ) xenoprof_state = XENOPROF_INITIALIZED; break; @@ -767,8 +759,7 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) break; case XENOPROF_counter: - if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) || - (adomains == 0) ) + if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) || (adomains == 0) ) { ret = -EPERM; break; @@ -810,8 +801,7 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) case XENOPROF_start: ret = -EPERM; - if ( (xenoprof_state == XENOPROF_READY) && - (activated == adomains) ) + if ( (xenoprof_state == XENOPROF_READY) && (activated == adomains) ) ret = xenoprof_arch_start(); if ( ret == 0 ) xenoprof_state = XENOPROF_PROFILING; @@ -836,7 +826,7 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) if ( !active_ready[i] ) continue; d = active_domains[i]; - for_each_vcpu(d, v) + for_each_vcpu (d, v) send_guest_vcpu_virq(v, VIRQ_XENOPROF); } xenoprof_state = XENOPROF_READY; @@ -846,7 +836,7 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) case XENOPROF_disable_virq: { struct xenoprof *x; - if ( (xenoprof_state == XENOPROF_PROFILING) && + if ( (xenoprof_state == XENOPROF_PROFILING) && (is_active(current->domain)) ) { ret = -EPERM; @@ -878,13 +868,13 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) if ( xenoprof_state == XENOPROF_INITIALIZED ) { activated = 0; - adomains=0; + adomains = 0; xenoprof_primary_profiler = NULL; - backtrace_depth=0; + backtrace_depth = 0; ret = 0; } break; - + case XENOPROF_set_backtrace: ret = 0; if ( !xenoprof_backtrace_supported() ) @@ -894,8 +884,7 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) break; case XENOPROF_ibs_counter: - if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) || - (adomains == 0) ) + if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) || (adomains == 0) ) { ret = -EPERM; break; diff --git a/xen/common/xmalloc_tlsf.c b/xen/common/xmalloc_tlsf.c index b256dc56cf..ad41e091b5 100644 --- a/xen/common/xmalloc_tlsf.c +++ b/xen/common/xmalloc_tlsf.c @@ -12,7 +12,7 @@ * You can choose the licence that better fits your requirements. * * Released under the terms of the GNU General Public License Version 2.0 - * Released under the terms of the GNU Lesser General Public License + * Released under the terms of the GNU Lesser General Public License * Version 2.1 * * This is kernel port of TLSF allocator. @@ -28,52 +28,53 @@ #include #include -#define MAX_POOL_NAME_LEN 16 +#define MAX_POOL_NAME_LEN 16 /* Some IMPORTANT TLSF parameters */ -#define MEM_ALIGN (sizeof(void *) * 2) -#define MEM_ALIGN_MASK (~(MEM_ALIGN - 1)) +#define MEM_ALIGN (sizeof(void *) * 2) +#define MEM_ALIGN_MASK (~(MEM_ALIGN - 1)) -#define MAX_FLI (30) -#define MAX_LOG2_SLI (5) -#define MAX_SLI (1 << MAX_LOG2_SLI) +#define MAX_FLI (30) +#define MAX_LOG2_SLI (5) +#define MAX_SLI (1 << MAX_LOG2_SLI) -#define FLI_OFFSET (6) +#define FLI_OFFSET (6) /* tlsf structure just will manage blocks bigger than 128 bytes */ -#define SMALL_BLOCK (128) -#define REAL_FLI (MAX_FLI - FLI_OFFSET) -#define MIN_BLOCK_SIZE (sizeof(struct free_ptr)) -#define BHDR_OVERHEAD (sizeof(struct bhdr) - MIN_BLOCK_SIZE) +#define SMALL_BLOCK (128) +#define REAL_FLI (MAX_FLI - FLI_OFFSET) +#define MIN_BLOCK_SIZE (sizeof(struct free_ptr)) +#define BHDR_OVERHEAD (sizeof(struct bhdr) - MIN_BLOCK_SIZE) -#define PTR_MASK (sizeof(void *) - 1) +#define PTR_MASK (sizeof(void *) - 1) #define BLOCK_SIZE_MASK (0xFFFFFFFF - PTR_MASK) -#define GET_NEXT_BLOCK(addr, r) ((struct bhdr *) \ - ((char *)(addr) + (r))) -#define ROUNDUP_SIZE(r) (((r) + MEM_ALIGN - 1) & MEM_ALIGN_MASK) -#define ROUNDDOWN_SIZE(r) ((r) & MEM_ALIGN_MASK) -#define ROUNDUP_PAGE(r) (((r) + PAGE_SIZE - 1) & PAGE_MASK) +#define GET_NEXT_BLOCK(addr, r) ((struct bhdr *)((char *)(addr) + (r))) +#define ROUNDUP_SIZE(r) (((r) + MEM_ALIGN - 1) & MEM_ALIGN_MASK) +#define ROUNDDOWN_SIZE(r) ((r)&MEM_ALIGN_MASK) +#define ROUNDUP_PAGE(r) (((r) + PAGE_SIZE - 1) & PAGE_MASK) -#define BLOCK_STATE (0x1) -#define PREV_STATE (0x2) +#define BLOCK_STATE (0x1) +#define PREV_STATE (0x2) /* bit 0 of the block size */ -#define FREE_BLOCK (0x1) -#define USED_BLOCK (0x0) +#define FREE_BLOCK (0x1) +#define USED_BLOCK (0x0) /* bit 1 of the block size */ -#define PREV_FREE (0x2) -#define PREV_USED (0x0) +#define PREV_FREE (0x2) +#define PREV_USED (0x0) static spinlock_t pool_list_lock; static struct list_head pool_list_head; -struct free_ptr { +struct free_ptr +{ struct bhdr *prev; struct bhdr *next; }; -struct bhdr { +struct bhdr +{ /* All blocks in a region are linked in order of physical address */ struct bhdr *prev_hdr; /* @@ -89,7 +90,8 @@ struct bhdr { } ptr; }; -struct xmem_pool { +struct xmem_pool +{ /* First level bitmap (REAL_FLI bits) */ u32 fl_bitmap; @@ -200,8 +202,8 @@ static inline struct bhdr *FIND_SUITABLE_BLOCK(struct xmem_pool *p, int *fl, /** * Remove first free block(b) from free list with indexes (fl, sl). */ -static inline void EXTRACT_BLOCK_HDR(struct bhdr *b, struct xmem_pool *p, int fl, - int sl) +static inline void EXTRACT_BLOCK_HDR(struct bhdr *b, struct xmem_pool *p, + int fl, int sl) { p->matrix[fl][sl] = b->ptr.free_ptr.next; if ( p->matrix[fl][sl] ) @@ -214,7 +216,7 @@ static inline void EXTRACT_BLOCK_HDR(struct bhdr *b, struct xmem_pool *p, int fl if ( !p->sl_bitmap[fl] ) clear_bit(fl, &p->fl_bitmap); } - b->ptr.free_ptr = (struct free_ptr) {NULL, NULL}; + b->ptr.free_ptr = (struct free_ptr){NULL, NULL}; } /** @@ -224,11 +226,9 @@ static inline void EXTRACT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, int sl) { if ( b->ptr.free_ptr.next ) - b->ptr.free_ptr.next->ptr.free_ptr.prev = - b->ptr.free_ptr.prev; + b->ptr.free_ptr.next->ptr.free_ptr.prev = b->ptr.free_ptr.prev; if ( b->ptr.free_ptr.prev ) - b->ptr.free_ptr.prev->ptr.free_ptr.next = - b->ptr.free_ptr.next; + b->ptr.free_ptr.prev->ptr.free_ptr.next = b->ptr.free_ptr.next; if ( p->matrix[fl][sl] == b ) { p->matrix[fl][sl] = b->ptr.free_ptr.next; @@ -236,18 +236,19 @@ static inline void EXTRACT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, { clear_bit(sl, &p->sl_bitmap[fl]); if ( !p->sl_bitmap[fl] ) - clear_bit (fl, &p->fl_bitmap); + clear_bit(fl, &p->fl_bitmap); } } - b->ptr.free_ptr = (struct free_ptr) {NULL, NULL}; + b->ptr.free_ptr = (struct free_ptr){NULL, NULL}; } /** * Insert block(b) in free list with indexes (fl, sl) */ -static inline void INSERT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, int sl) +static inline void INSERT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, + int sl) { - b->ptr.free_ptr = (struct free_ptr) {NULL, p->matrix[fl][sl]}; + b->ptr.free_ptr = (struct free_ptr){NULL, p->matrix[fl][sl]}; if ( p->matrix[fl][sl] ) p->matrix[fl][sl]->ptr.free_ptr.prev = b; p->matrix[fl][sl] = b; @@ -267,8 +268,8 @@ static inline void ADD_REGION(void *region, unsigned long region_size, b = (struct bhdr *)(region); b->prev_hdr = NULL; - b->size = ROUNDDOWN_SIZE(region_size - 2 * BHDR_OVERHEAD) - | FREE_BLOCK | PREV_USED; + b->size = ROUNDDOWN_SIZE(region_size - 2 * BHDR_OVERHEAD) | FREE_BLOCK | + PREV_USED; MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl); INSERT_BLOCK(b, pool, fl, sl); /* The sentinel block: allows us to know when we're in the last block */ @@ -283,13 +284,10 @@ static inline void ADD_REGION(void *region, unsigned long region_size, * TLSF pool-based allocator start. */ -struct xmem_pool *xmem_pool_create( - const char *name, - xmem_pool_get_memory get_mem, - xmem_pool_put_memory put_mem, - unsigned long init_size, - unsigned long max_size, - unsigned long grow_size) +struct xmem_pool * +xmem_pool_create(const char *name, xmem_pool_get_memory get_mem, + xmem_pool_put_memory put_mem, unsigned long init_size, + unsigned long max_size, unsigned long grow_size) { struct xmem_pool *pool; int pool_bytes, pool_order; @@ -339,13 +337,12 @@ unsigned long xmem_pool_get_used_size(struct xmem_pool *pool) unsigned long xmem_pool_get_total_size(struct xmem_pool *pool) { unsigned long total; - total = ROUNDUP_SIZE(sizeof(*pool)) - + pool->init_size - + (pool->num_regions - 1) * pool->grow_size; + total = ROUNDUP_SIZE(sizeof(*pool)) + pool->init_size + + (pool->num_regions - 1) * pool->grow_size; return total; } -void xmem_pool_destroy(struct xmem_pool *pool) +void xmem_pool_destroy(struct xmem_pool *pool) { int pool_bytes, pool_order; @@ -371,7 +368,7 @@ void xmem_pool_destroy(struct xmem_pool *pool) pool_bytes = ROUNDUP_SIZE(sizeof(*pool)); pool_order = get_order_from_bytes(pool_bytes); - free_xenheap_pages(pool,pool_order); + free_xenheap_pages(pool, pool_order); } void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool) @@ -392,7 +389,7 @@ void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool) /* Rounding up the requested size and calculating fl and sl */ spin_lock(&pool->lock); - retry_find: +retry_find: MAPPING_SEARCH(&size, &fl, &sl); /* Searching a free block */ @@ -401,9 +398,9 @@ void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool) /* Not found */ if ( size > (pool->grow_size - 2 * BHDR_OVERHEAD) ) goto out_locked; - if ( pool->max_size && (pool->init_size + - pool->num_regions * pool->grow_size - > pool->max_size) ) + if ( pool->max_size && + (pool->init_size + pool->num_regions * pool->grow_size > + pool->max_size) ) goto out_locked; spin_unlock(&pool->lock); if ( (region = pool->get_mem(pool->grow_size)) == NULL ) @@ -445,10 +442,10 @@ void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool) return (void *)b->ptr.buffer; /* Failed alloc */ - out_locked: +out_locked: spin_unlock(&pool->lock); - out: +out: return NULL; } @@ -460,12 +457,12 @@ void xmem_pool_free(void *ptr, struct xmem_pool *pool) if ( unlikely(ptr == NULL) ) return; - b = (struct bhdr *)((char *) ptr - BHDR_OVERHEAD); + b = (struct bhdr *)((char *)ptr - BHDR_OVERHEAD); spin_lock(&pool->lock); b->size |= FREE_BLOCK; pool->used_size -= (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD; - b->ptr.free_ptr = (struct free_ptr) { NULL, NULL}; + b->ptr.free_ptr = (struct free_ptr){NULL, NULL}; tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK); if ( tmp_b->size & FREE_BLOCK ) { @@ -498,7 +495,7 @@ void xmem_pool_free(void *ptr, struct xmem_pool *pool) tmp_b->size |= PREV_FREE; tmp_b->prev_hdr = b; - out: +out: spin_unlock(&pool->lock); } @@ -553,9 +550,8 @@ static void tlsf_init(void) { INIT_LIST_HEAD(&pool_list_head); spin_lock_init(&pool_list_lock); - xenpool = xmem_pool_create( - "xmalloc", xmalloc_pool_get, xmalloc_pool_put, - PAGE_SIZE, 0, PAGE_SIZE); + xenpool = xmem_pool_create("xmalloc", xmalloc_pool_get, xmalloc_pool_put, + PAGE_SIZE, 0, PAGE_SIZE); BUG_ON(!xenpool); } @@ -628,7 +624,7 @@ void xfree(void *p) BUG_ON((unsigned long)p & ((PAGE_SIZE << order) - 1)); PFN_ORDER(virt_to_page(p)) = 0; - for ( i = 0; ; ++i ) + for ( i = 0;; ++i ) { if ( !(size & (1 << i)) ) continue; @@ -640,7 +636,7 @@ void xfree(void *p) } /* Strip alignment padding. */ - b = (struct bhdr *)((char *) p - BHDR_OVERHEAD); + b = (struct bhdr *)((char *)p - BHDR_OVERHEAD); if ( b->size & 1 ) { p = (char *)p - (b->size & ~1u); diff --git a/xen/common/xz/crc32.c b/xen/common/xz/crc32.c index af08ae2cf6..3c7635eb04 100644 --- a/xen/common/xz/crc32.c +++ b/xen/common/xz/crc32.c @@ -21,31 +21,33 @@ XZ_EXTERN uint32_t INITDATA xz_crc32_table[256]; XZ_EXTERN void INIT xz_crc32_init(void) { - const uint32_t poly = 0xEDB88320; + const uint32_t poly = 0xEDB88320; - uint32_t i; - uint32_t j; - uint32_t r; + uint32_t i; + uint32_t j; + uint32_t r; - for (i = 0; i < 256; ++i) { - r = i; - for (j = 0; j < 8; ++j) - r = (r >> 1) ^ (poly & ~((r & 1) - 1)); + for ( i = 0; i < 256; ++i ) + { + r = i; + for ( j = 0; j < 8; ++j ) + r = (r >> 1) ^ (poly & ~((r & 1) - 1)); - xz_crc32_table[i] = r; - } + xz_crc32_table[i] = r; + } - return; + return; } XZ_EXTERN uint32_t INIT xz_crc32(const uint8_t *buf, size_t size, uint32_t crc) { - crc = ~crc; + crc = ~crc; - while (size != 0) { - crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); - --size; - } + while ( size != 0 ) + { + crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); + --size; + } - return ~crc; + return ~crc; } diff --git a/xen/common/xz/dec_bcj.c b/xen/common/xz/dec_bcj.c index 86c1192199..1cdec5178d 100644 --- a/xen/common/xz/dec_bcj.c +++ b/xen/common/xz/dec_bcj.c @@ -16,63 +16,66 @@ */ #ifdef XZ_DEC_BCJ -struct xz_dec_bcj { - /* Type of the BCJ filter being used */ - enum { - BCJ_X86 = 4, /* x86 or x86-64 */ - BCJ_POWERPC = 5, /* Big endian only */ - BCJ_IA64 = 6, /* Big or little endian */ - BCJ_ARM = 7, /* Little endian only */ - BCJ_ARMTHUMB = 8, /* Little endian only */ - BCJ_SPARC = 9 /* Big or little endian */ - } type; - - /* - * Return value of the next filter in the chain. We need to preserve - * this information across calls, because we must not call the next - * filter anymore once it has returned XZ_STREAM_END. - */ - enum xz_ret ret; - - /* True if we are operating in single-call mode. */ - bool_t single_call; - - /* - * Absolute position relative to the beginning of the uncompressed - * data (in a single .xz Block). We care only about the lowest 32 - * bits so this doesn't need to be uint64_t even with big files. - */ - uint32_t pos; - - /* x86 filter state */ - uint32_t x86_prev_mask; - - /* Temporary space to hold the variables from struct xz_buf */ - uint8_t *out; - size_t out_pos; - size_t out_size; - - struct { - /* Amount of already filtered data in the beginning of buf */ - size_t filtered; - - /* Total amount of data currently stored in buf */ - size_t size; - - /* - * Buffer to hold a mix of filtered and unfiltered data. This - * needs to be big enough to hold Alignment + 2 * Look-ahead: - * - * Type Alignment Look-ahead - * x86 1 4 - * PowerPC 4 0 - * IA-64 16 0 - * ARM 4 0 - * ARM-Thumb 2 2 - * SPARC 4 0 - */ - uint8_t buf[16]; - } temp; +struct xz_dec_bcj +{ + /* Type of the BCJ filter being used */ + enum + { + BCJ_X86 = 4, /* x86 or x86-64 */ + BCJ_POWERPC = 5, /* Big endian only */ + BCJ_IA64 = 6, /* Big or little endian */ + BCJ_ARM = 7, /* Little endian only */ + BCJ_ARMTHUMB = 8, /* Little endian only */ + BCJ_SPARC = 9 /* Big or little endian */ + } type; + + /* + * Return value of the next filter in the chain. We need to preserve + * this information across calls, because we must not call the next + * filter anymore once it has returned XZ_STREAM_END. + */ + enum xz_ret ret; + + /* True if we are operating in single-call mode. */ + bool_t single_call; + + /* + * Absolute position relative to the beginning of the uncompressed + * data (in a single .xz Block). We care only about the lowest 32 + * bits so this doesn't need to be uint64_t even with big files. + */ + uint32_t pos; + + /* x86 filter state */ + uint32_t x86_prev_mask; + + /* Temporary space to hold the variables from struct xz_buf */ + uint8_t *out; + size_t out_pos; + size_t out_size; + + struct + { + /* Amount of already filtered data in the beginning of buf */ + size_t filtered; + + /* Total amount of data currently stored in buf */ + size_t size; + + /* + * Buffer to hold a mix of filtered and unfiltered data. This + * needs to be big enough to hold Alignment + 2 * Look-ahead: + * + * Type Alignment Look-ahead + * x86 1 4 + * PowerPC 4 0 + * IA-64 16 0 + * ARM 4 0 + * ARM-Thumb 2 2 + * SPARC 4 0 + */ + uint8_t buf[16]; + } temp; }; #ifdef XZ_DEC_X86 @@ -82,255 +85,267 @@ struct xz_dec_bcj { */ static inline int INIT bcj_x86_test_msbyte(uint8_t b) { - return b == 0x00 || b == 0xFF; + return b == 0x00 || b == 0xFF; } static size_t INIT bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size) { - static const bool_t mask_to_allowed_status[8] - = { true, true, true, false, true, false, false, false }; - - static const uint8_t mask_to_bit_num[8] = { 0, 1, 2, 2, 3, 3, 3, 3 }; - - size_t i; - size_t prev_pos = (size_t)-1; - uint32_t prev_mask = s->x86_prev_mask; - uint32_t src; - uint32_t dest; - uint32_t j; - uint8_t b; - - if (size <= 4) - return 0; - - size -= 4; - for (i = 0; i < size; ++i) { - if ((buf[i] & 0xFE) != 0xE8) - continue; - - prev_pos = i - prev_pos; - if (prev_pos > 3) { - prev_mask = 0; - } else { - prev_mask = (prev_mask << (prev_pos - 1)) & 7; - if (prev_mask != 0) { - b = buf[i + 4 - mask_to_bit_num[prev_mask]]; - if (!mask_to_allowed_status[prev_mask] - || bcj_x86_test_msbyte(b)) { - prev_pos = i; - prev_mask = (prev_mask << 1) | 1; - continue; - } - } - } - - prev_pos = i; - - if (bcj_x86_test_msbyte(buf[i + 4])) { - src = get_unaligned_le32(buf + i + 1); - while (true) { - dest = src - (s->pos + (uint32_t)i + 5); - if (prev_mask == 0) - break; - - j = mask_to_bit_num[prev_mask] * 8; - b = (uint8_t)(dest >> (24 - j)); - if (!bcj_x86_test_msbyte(b)) - break; - - src = dest ^ (((uint32_t)1 << (32 - j)) - 1); - } - - dest &= 0x01FFFFFF; - dest |= (uint32_t)0 - (dest & 0x01000000); - put_unaligned_le32(dest, buf + i + 1); - i += 4; - } else { - prev_mask = (prev_mask << 1) | 1; - } - } - - prev_pos = i - prev_pos; - s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1); - return i; + static const bool_t mask_to_allowed_status[8] = {true, true, true, false, + true, false, false, false}; + + static const uint8_t mask_to_bit_num[8] = {0, 1, 2, 2, 3, 3, 3, 3}; + + size_t i; + size_t prev_pos = (size_t)-1; + uint32_t prev_mask = s->x86_prev_mask; + uint32_t src; + uint32_t dest; + uint32_t j; + uint8_t b; + + if ( size <= 4 ) + return 0; + + size -= 4; + for ( i = 0; i < size; ++i ) + { + if ( (buf[i] & 0xFE) != 0xE8 ) + continue; + + prev_pos = i - prev_pos; + if ( prev_pos > 3 ) + { + prev_mask = 0; + } + else + { + prev_mask = (prev_mask << (prev_pos - 1)) & 7; + if ( prev_mask != 0 ) + { + b = buf[i + 4 - mask_to_bit_num[prev_mask]]; + if ( !mask_to_allowed_status[prev_mask] || + bcj_x86_test_msbyte(b) ) + { + prev_pos = i; + prev_mask = (prev_mask << 1) | 1; + continue; + } + } + } + + prev_pos = i; + + if ( bcj_x86_test_msbyte(buf[i + 4]) ) + { + src = get_unaligned_le32(buf + i + 1); + while ( true ) + { + dest = src - (s->pos + (uint32_t)i + 5); + if ( prev_mask == 0 ) + break; + + j = mask_to_bit_num[prev_mask] * 8; + b = (uint8_t)(dest >> (24 - j)); + if ( !bcj_x86_test_msbyte(b) ) + break; + + src = dest ^ (((uint32_t)1 << (32 - j)) - 1); + } + + dest &= 0x01FFFFFF; + dest |= (uint32_t)0 - (dest & 0x01000000); + put_unaligned_le32(dest, buf + i + 1); + i += 4; + } + else + { + prev_mask = (prev_mask << 1) | 1; + } + } + + prev_pos = i - prev_pos; + s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1); + return i; } #endif #ifdef XZ_DEC_POWERPC static size_t INIT bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) { - size_t i; - uint32_t instr; - - for (i = 0; i + 4 <= size; i += 4) { - instr = get_unaligned_be32(buf + i); - if ((instr & 0xFC000003) == 0x48000001) { - instr &= 0x03FFFFFC; - instr -= s->pos + (uint32_t)i; - instr &= 0x03FFFFFC; - instr |= 0x48000001; - put_unaligned_be32(instr, buf + i); - } - } - - return i; + size_t i; + uint32_t instr; + + for ( i = 0; i + 4 <= size; i += 4 ) + { + instr = get_unaligned_be32(buf + i); + if ( (instr & 0xFC000003) == 0x48000001 ) + { + instr &= 0x03FFFFFC; + instr -= s->pos + (uint32_t)i; + instr &= 0x03FFFFFC; + instr |= 0x48000001; + put_unaligned_be32(instr, buf + i); + } + } + + return i; } #endif #ifdef XZ_DEC_IA64 static size_t INIT bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size) { - static const uint8_t branch_table[32] = { - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 4, 4, 6, 6, 0, 0, 7, 7, - 4, 4, 0, 0, 4, 4, 0, 0 - }; - - /* - * The local variables take a little bit stack space, but it's less - * than what LZMA2 decoder takes, so it doesn't make sense to reduce - * stack usage here without doing that for the LZMA2 decoder too. - */ - - /* Loop counters */ - size_t i; - size_t j; - - /* Instruction slot (0, 1, or 2) in the 128-bit instruction word */ - uint32_t slot; - - /* Bitwise offset of the instruction indicated by slot */ - uint32_t bit_pos; - - /* bit_pos split into byte and bit parts */ - uint32_t byte_pos; - uint32_t bit_res; - - /* Address part of an instruction */ - uint32_t addr; - - /* Mask used to detect which instructions to convert */ - uint32_t mask; - - /* 41-bit instruction stored somewhere in the lowest 48 bits */ - uint64_t instr; - - /* Instruction normalized with bit_res for easier manipulation */ - uint64_t norm; - - for (i = 0; i + 16 <= size; i += 16) { - mask = branch_table[buf[i] & 0x1F]; - for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) { - if (((mask >> slot) & 1) == 0) - continue; - - byte_pos = bit_pos >> 3; - bit_res = bit_pos & 7; - instr = 0; - for (j = 0; j < 6; ++j) - instr |= (uint64_t)(buf[i + j + byte_pos]) - << (8 * j); - - norm = instr >> bit_res; - - if (((norm >> 37) & 0x0F) == 0x05 - && ((norm >> 9) & 0x07) == 0) { - addr = (norm >> 13) & 0x0FFFFF; - addr |= ((uint32_t)(norm >> 36) & 1) << 20; - addr <<= 4; - addr -= s->pos + (uint32_t)i; - addr >>= 4; - - norm &= ~((uint64_t)0x8FFFFF << 13); - norm |= (uint64_t)(addr & 0x0FFFFF) << 13; - norm |= (uint64_t)(addr & 0x100000) - << (36 - 20); - - instr &= (1 << bit_res) - 1; - instr |= norm << bit_res; - - for (j = 0; j < 6; j++) - buf[i + j + byte_pos] - = (uint8_t)(instr >> (8 * j)); - } - } - } - - return i; + static const uint8_t branch_table[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 4, 6, 6, 0, 0, + 7, 7, 4, 4, 0, 0, 4, 4, 0, 0}; + + /* + * The local variables take a little bit stack space, but it's less + * than what LZMA2 decoder takes, so it doesn't make sense to reduce + * stack usage here without doing that for the LZMA2 decoder too. + */ + + /* Loop counters */ + size_t i; + size_t j; + + /* Instruction slot (0, 1, or 2) in the 128-bit instruction word */ + uint32_t slot; + + /* Bitwise offset of the instruction indicated by slot */ + uint32_t bit_pos; + + /* bit_pos split into byte and bit parts */ + uint32_t byte_pos; + uint32_t bit_res; + + /* Address part of an instruction */ + uint32_t addr; + + /* Mask used to detect which instructions to convert */ + uint32_t mask; + + /* 41-bit instruction stored somewhere in the lowest 48 bits */ + uint64_t instr; + + /* Instruction normalized with bit_res for easier manipulation */ + uint64_t norm; + + for ( i = 0; i + 16 <= size; i += 16 ) + { + mask = branch_table[buf[i] & 0x1F]; + for ( slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41 ) + { + if ( ((mask >> slot) & 1) == 0 ) + continue; + + byte_pos = bit_pos >> 3; + bit_res = bit_pos & 7; + instr = 0; + for ( j = 0; j < 6; ++j ) + instr |= (uint64_t)(buf[i + j + byte_pos]) << (8 * j); + + norm = instr >> bit_res; + + if ( ((norm >> 37) & 0x0F) == 0x05 && ((norm >> 9) & 0x07) == 0 ) + { + addr = (norm >> 13) & 0x0FFFFF; + addr |= ((uint32_t)(norm >> 36) & 1) << 20; + addr <<= 4; + addr -= s->pos + (uint32_t)i; + addr >>= 4; + + norm &= ~((uint64_t)0x8FFFFF << 13); + norm |= (uint64_t)(addr & 0x0FFFFF) << 13; + norm |= (uint64_t)(addr & 0x100000) << (36 - 20); + + instr &= (1 << bit_res) - 1; + instr |= norm << bit_res; + + for ( j = 0; j < 6; j++ ) + buf[i + j + byte_pos] = (uint8_t)(instr >> (8 * j)); + } + } + } + + return i; } #endif #ifdef XZ_DEC_ARM static size_t INIT bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size) { - size_t i; - uint32_t addr; - - for (i = 0; i + 4 <= size; i += 4) { - if (buf[i + 3] == 0xEB) { - addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8) - | ((uint32_t)buf[i + 2] << 16); - addr <<= 2; - addr -= s->pos + (uint32_t)i + 8; - addr >>= 2; - buf[i] = (uint8_t)addr; - buf[i + 1] = (uint8_t)(addr >> 8); - buf[i + 2] = (uint8_t)(addr >> 16); - } - } - - return i; + size_t i; + uint32_t addr; + + for ( i = 0; i + 4 <= size; i += 4 ) + { + if ( buf[i + 3] == 0xEB ) + { + addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8) | + ((uint32_t)buf[i + 2] << 16); + addr <<= 2; + addr -= s->pos + (uint32_t)i + 8; + addr >>= 2; + buf[i] = (uint8_t)addr; + buf[i + 1] = (uint8_t)(addr >> 8); + buf[i + 2] = (uint8_t)(addr >> 16); + } + } + + return i; } #endif #ifdef XZ_DEC_ARMTHUMB static size_t INIT bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size) { - size_t i; - uint32_t addr; - - for (i = 0; i + 4 <= size; i += 2) { - if ((buf[i + 1] & 0xF8) == 0xF0 - && (buf[i + 3] & 0xF8) == 0xF8) { - addr = (((uint32_t)buf[i + 1] & 0x07) << 19) - | ((uint32_t)buf[i] << 11) - | (((uint32_t)buf[i + 3] & 0x07) << 8) - | (uint32_t)buf[i + 2]; - addr <<= 1; - addr -= s->pos + (uint32_t)i + 4; - addr >>= 1; - buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07)); - buf[i] = (uint8_t)(addr >> 11); - buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07)); - buf[i + 2] = (uint8_t)addr; - i += 2; - } - } - - return i; + size_t i; + uint32_t addr; + + for ( i = 0; i + 4 <= size; i += 2 ) + { + if ( (buf[i + 1] & 0xF8) == 0xF0 && (buf[i + 3] & 0xF8) == 0xF8 ) + { + addr = (((uint32_t)buf[i + 1] & 0x07) << 19) | + ((uint32_t)buf[i] << 11) | + (((uint32_t)buf[i + 3] & 0x07) << 8) | (uint32_t)buf[i + 2]; + addr <<= 1; + addr -= s->pos + (uint32_t)i + 4; + addr >>= 1; + buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07)); + buf[i] = (uint8_t)(addr >> 11); + buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07)); + buf[i + 2] = (uint8_t)addr; + i += 2; + } + } + + return i; } #endif #ifdef XZ_DEC_SPARC static size_t INIT bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) { - size_t i; - uint32_t instr; - - for (i = 0; i + 4 <= size; i += 4) { - instr = get_unaligned_be32(buf + i); - if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) { - instr <<= 2; - instr -= s->pos + (uint32_t)i; - instr >>= 2; - instr = ((uint32_t)0x40000000 - (instr & 0x400000)) - | 0x40000000 | (instr & 0x3FFFFF); - put_unaligned_be32(instr, buf + i); - } - } - - return i; + size_t i; + uint32_t instr; + + for ( i = 0; i + 4 <= size; i += 4 ) + { + instr = get_unaligned_be32(buf + i); + if ( (instr >> 22) == 0x100 || (instr >> 22) == 0x1FF ) + { + instr <<= 2; + instr -= s->pos + (uint32_t)i; + instr >>= 2; + instr = ((uint32_t)0x40000000 - (instr & 0x400000)) | 0x40000000 | + (instr & 0x3FFFFF); + put_unaligned_be32(instr, buf + i); + } + } + + return i; } #endif @@ -342,53 +357,54 @@ static size_t INIT bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) * pointers, which could be problematic in the kernel boot code, which must * avoid pointers to static data (at least on x86). */ -static void INIT bcj_apply(struct xz_dec_bcj *s, - uint8_t *buf, size_t *pos, size_t size) +static void INIT bcj_apply(struct xz_dec_bcj *s, uint8_t *buf, size_t *pos, + size_t size) { - size_t filtered; + size_t filtered; - buf += *pos; - size -= *pos; + buf += *pos; + size -= *pos; - switch (s->type) { + switch (s->type) + { #ifdef XZ_DEC_X86 - case BCJ_X86: - filtered = bcj_x86(s, buf, size); - break; + case BCJ_X86: + filtered = bcj_x86(s, buf, size); + break; #endif #ifdef XZ_DEC_POWERPC - case BCJ_POWERPC: - filtered = bcj_powerpc(s, buf, size); - break; + case BCJ_POWERPC: + filtered = bcj_powerpc(s, buf, size); + break; #endif #ifdef XZ_DEC_IA64 - case BCJ_IA64: - filtered = bcj_ia64(s, buf, size); - break; + case BCJ_IA64: + filtered = bcj_ia64(s, buf, size); + break; #endif #ifdef XZ_DEC_ARM - case BCJ_ARM: - filtered = bcj_arm(s, buf, size); - break; + case BCJ_ARM: + filtered = bcj_arm(s, buf, size); + break; #endif #ifdef XZ_DEC_ARMTHUMB - case BCJ_ARMTHUMB: - filtered = bcj_armthumb(s, buf, size); - break; + case BCJ_ARMTHUMB: + filtered = bcj_armthumb(s, buf, size); + break; #endif #ifdef XZ_DEC_SPARC - case BCJ_SPARC: - filtered = bcj_sparc(s, buf, size); - break; + case BCJ_SPARC: + filtered = bcj_sparc(s, buf, size); + break; #endif - default: - /* Never reached but silence compiler warnings. */ - filtered = 0; - break; - } - - *pos += filtered; - s->pos += filtered; + default: + /* Never reached but silence compiler warnings. */ + filtered = 0; + break; + } + + *pos += filtered; + s->pos += filtered; } /* @@ -398,15 +414,15 @@ static void INIT bcj_apply(struct xz_dec_bcj *s, */ static void INIT bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b) { - size_t copy_size; + size_t copy_size; - copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos); - memcpy(b->out + b->out_pos, s->temp.buf, copy_size); - b->out_pos += copy_size; + copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos); + memcpy(b->out + b->out_pos, s->temp.buf, copy_size); + b->out_pos += copy_size; - s->temp.filtered -= copy_size; - s->temp.size -= copy_size; - memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size); + s->temp.filtered -= copy_size; + s->temp.size -= copy_size; + memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size); } /* @@ -415,160 +431,163 @@ static void INIT bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b) * some buffering. */ XZ_EXTERN enum xz_ret INIT xz_dec_bcj_run(struct xz_dec_bcj *s, - struct xz_dec_lzma2 *lzma2, - struct xz_buf *b) + struct xz_dec_lzma2 *lzma2, + struct xz_buf *b) { - size_t out_start; - - /* - * Flush pending already filtered data to the output buffer. Return - * immediatelly if we couldn't flush everything, or if the next - * filter in the chain had already returned XZ_STREAM_END. - */ - if (s->temp.filtered > 0) { - bcj_flush(s, b); - if (s->temp.filtered > 0) - return XZ_OK; - - if (s->ret == XZ_STREAM_END) - return XZ_STREAM_END; - } - - /* - * If we have more output space than what is currently pending in - * temp, copy the unfiltered data from temp to the output buffer - * and try to fill the output buffer by decoding more data from the - * next filter in the chain. Apply the BCJ filter on the new data - * in the output buffer. If everything cannot be filtered, copy it - * to temp and rewind the output buffer position accordingly. - * - * This needs to be always run when temp.size == 0 to handle a special - * case where the output buffer is full and the next filter has no - * more output coming but hasn't returned XZ_STREAM_END yet. - */ - if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) { - out_start = b->out_pos; - memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); - b->out_pos += s->temp.size; - - s->ret = xz_dec_lzma2_run(lzma2, b); - if (s->ret != XZ_STREAM_END - && (s->ret != XZ_OK || s->single_call)) - return s->ret; - - bcj_apply(s, b->out, &out_start, b->out_pos); - - /* - * As an exception, if the next filter returned XZ_STREAM_END, - * we can do that too, since the last few bytes that remain - * unfiltered are meant to remain unfiltered. - */ - if (s->ret == XZ_STREAM_END) - return XZ_STREAM_END; - - s->temp.size = b->out_pos - out_start; - b->out_pos -= s->temp.size; - memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); - - /* - * If there wasn't enough input to the next filter to fill - * the output buffer with unfiltered data, there's no point - * to try decoding more data to temp. - */ - if (b->out_pos + s->temp.size < b->out_size) - return XZ_OK; - } - - /* - * We have unfiltered data in temp. If the output buffer isn't full - * yet, try to fill the temp buffer by decoding more data from the - * next filter. Apply the BCJ filter on temp. Then we hopefully can - * fill the actual output buffer by copying filtered data from temp. - * A mix of filtered and unfiltered data may be left in temp; it will - * be taken care on the next call to this function. - */ - if (b->out_pos < b->out_size) { - /* Make b->out{,_pos,_size} temporarily point to s->temp. */ - s->out = b->out; - s->out_pos = b->out_pos; - s->out_size = b->out_size; - b->out = s->temp.buf; - b->out_pos = s->temp.size; - b->out_size = sizeof(s->temp.buf); - - s->ret = xz_dec_lzma2_run(lzma2, b); - - s->temp.size = b->out_pos; - b->out = s->out; - b->out_pos = s->out_pos; - b->out_size = s->out_size; - - if (s->ret != XZ_OK && s->ret != XZ_STREAM_END) - return s->ret; - - bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size); - - /* - * If the next filter returned XZ_STREAM_END, we mark that - * everything is filtered, since the last unfiltered bytes - * of the stream are meant to be left as is. - */ - if (s->ret == XZ_STREAM_END) - s->temp.filtered = s->temp.size; - - bcj_flush(s, b); - if (s->temp.filtered > 0) - return XZ_OK; - } - - return s->ret; + size_t out_start; + + /* + * Flush pending already filtered data to the output buffer. Return + * immediatelly if we couldn't flush everything, or if the next + * filter in the chain had already returned XZ_STREAM_END. + */ + if ( s->temp.filtered > 0 ) + { + bcj_flush(s, b); + if ( s->temp.filtered > 0 ) + return XZ_OK; + + if ( s->ret == XZ_STREAM_END ) + return XZ_STREAM_END; + } + + /* + * If we have more output space than what is currently pending in + * temp, copy the unfiltered data from temp to the output buffer + * and try to fill the output buffer by decoding more data from the + * next filter in the chain. Apply the BCJ filter on the new data + * in the output buffer. If everything cannot be filtered, copy it + * to temp and rewind the output buffer position accordingly. + * + * This needs to be always run when temp.size == 0 to handle a special + * case where the output buffer is full and the next filter has no + * more output coming but hasn't returned XZ_STREAM_END yet. + */ + if ( s->temp.size < b->out_size - b->out_pos || s->temp.size == 0 ) + { + out_start = b->out_pos; + memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); + b->out_pos += s->temp.size; + + s->ret = xz_dec_lzma2_run(lzma2, b); + if ( s->ret != XZ_STREAM_END && (s->ret != XZ_OK || s->single_call) ) + return s->ret; + + bcj_apply(s, b->out, &out_start, b->out_pos); + + /* + * As an exception, if the next filter returned XZ_STREAM_END, + * we can do that too, since the last few bytes that remain + * unfiltered are meant to remain unfiltered. + */ + if ( s->ret == XZ_STREAM_END ) + return XZ_STREAM_END; + + s->temp.size = b->out_pos - out_start; + b->out_pos -= s->temp.size; + memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); + + /* + * If there wasn't enough input to the next filter to fill + * the output buffer with unfiltered data, there's no point + * to try decoding more data to temp. + */ + if ( b->out_pos + s->temp.size < b->out_size ) + return XZ_OK; + } + + /* + * We have unfiltered data in temp. If the output buffer isn't full + * yet, try to fill the temp buffer by decoding more data from the + * next filter. Apply the BCJ filter on temp. Then we hopefully can + * fill the actual output buffer by copying filtered data from temp. + * A mix of filtered and unfiltered data may be left in temp; it will + * be taken care on the next call to this function. + */ + if ( b->out_pos < b->out_size ) + { + /* Make b->out{,_pos,_size} temporarily point to s->temp. */ + s->out = b->out; + s->out_pos = b->out_pos; + s->out_size = b->out_size; + b->out = s->temp.buf; + b->out_pos = s->temp.size; + b->out_size = sizeof(s->temp.buf); + + s->ret = xz_dec_lzma2_run(lzma2, b); + + s->temp.size = b->out_pos; + b->out = s->out; + b->out_pos = s->out_pos; + b->out_size = s->out_size; + + if ( s->ret != XZ_OK && s->ret != XZ_STREAM_END ) + return s->ret; + + bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size); + + /* + * If the next filter returned XZ_STREAM_END, we mark that + * everything is filtered, since the last unfiltered bytes + * of the stream are meant to be left as is. + */ + if ( s->ret == XZ_STREAM_END ) + s->temp.filtered = s->temp.size; + + bcj_flush(s, b); + if ( s->temp.filtered > 0 ) + return XZ_OK; + } + + return s->ret; } XZ_EXTERN struct xz_dec_bcj *INIT xz_dec_bcj_create(bool_t single_call) { - struct xz_dec_bcj *s = malloc(sizeof(*s)); - if (s != NULL) - s->single_call = single_call; + struct xz_dec_bcj *s = malloc(sizeof(*s)); + if ( s != NULL ) + s->single_call = single_call; - return s; + return s; } XZ_EXTERN enum xz_ret INIT xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id) { - switch (id) { + switch (id) + { #ifdef XZ_DEC_X86 - case BCJ_X86: + case BCJ_X86: #endif #ifdef XZ_DEC_POWERPC - case BCJ_POWERPC: + case BCJ_POWERPC: #endif #ifdef XZ_DEC_IA64 - case BCJ_IA64: + case BCJ_IA64: #endif #ifdef XZ_DEC_ARM - case BCJ_ARM: + case BCJ_ARM: #endif #ifdef XZ_DEC_ARMTHUMB - case BCJ_ARMTHUMB: + case BCJ_ARMTHUMB: #endif #ifdef XZ_DEC_SPARC - case BCJ_SPARC: + case BCJ_SPARC: #endif - break; + break; - default: - /* Unsupported Filter ID */ - return XZ_OPTIONS_ERROR; - } + default: + /* Unsupported Filter ID */ + return XZ_OPTIONS_ERROR; + } - s->type = id; - s->ret = XZ_OK; - s->pos = 0; - s->x86_prev_mask = 0; - s->temp.filtered = 0; - s->temp.size = 0; + s->type = id; + s->ret = XZ_OK; + s->pos = 0; + s->x86_prev_mask = 0; + s->temp.filtered = 0; + s->temp.size = 0; - return XZ_OK; + return XZ_OK; } #endif diff --git a/xen/common/xz/dec_lzma2.c b/xen/common/xz/dec_lzma2.c index 44fe79bc30..87c071951e 100644 --- a/xen/common/xz/dec_lzma2.c +++ b/xen/common/xz/dec_lzma2.c @@ -41,238 +41,246 @@ * in which the dictionary variables address the actual output * buffer directly. */ -struct dictionary { - /* Beginning of the history buffer */ - uint8_t *buf; - - /* Old position in buf (before decoding more data) */ - size_t start; - - /* Position in buf */ - size_t pos; - - /* - * How full dictionary is. This is used to detect corrupt input that - * would read beyond the beginning of the uncompressed stream. - */ - size_t full; - - /* Write limit; we don't write to buf[limit] or later bytes. */ - size_t limit; - - /* - * End of the dictionary buffer. In multi-call mode, this is - * the same as the dictionary size. In single-call mode, this - * indicates the size of the output buffer. - */ - size_t end; - - /* - * Size of the dictionary as specified in Block Header. This is used - * together with "full" to detect corrupt input that would make us - * read beyond the beginning of the uncompressed stream. - */ - uint32_t size; - - /* - * Maximum allowed dictionary size in multi-call mode. - * This is ignored in single-call mode. - */ - uint32_t size_max; - - /* - * Amount of memory currently allocated for the dictionary. - * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC, - * size_max is always the same as the allocated size.) - */ - uint32_t allocated; - - /* Operation mode */ - enum xz_mode mode; +struct dictionary +{ + /* Beginning of the history buffer */ + uint8_t *buf; + + /* Old position in buf (before decoding more data) */ + size_t start; + + /* Position in buf */ + size_t pos; + + /* + * How full dictionary is. This is used to detect corrupt input that + * would read beyond the beginning of the uncompressed stream. + */ + size_t full; + + /* Write limit; we don't write to buf[limit] or later bytes. */ + size_t limit; + + /* + * End of the dictionary buffer. In multi-call mode, this is + * the same as the dictionary size. In single-call mode, this + * indicates the size of the output buffer. + */ + size_t end; + + /* + * Size of the dictionary as specified in Block Header. This is used + * together with "full" to detect corrupt input that would make us + * read beyond the beginning of the uncompressed stream. + */ + uint32_t size; + + /* + * Maximum allowed dictionary size in multi-call mode. + * This is ignored in single-call mode. + */ + uint32_t size_max; + + /* + * Amount of memory currently allocated for the dictionary. + * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC, + * size_max is always the same as the allocated size.) + */ + uint32_t allocated; + + /* Operation mode */ + enum xz_mode mode; }; /* Range decoder */ -struct rc_dec { - uint32_t range; - uint32_t code; - - /* - * Number of initializing bytes remaining to be read - * by rc_read_init(). - */ - uint32_t init_bytes_left; - - /* - * Buffer from which we read our input. It can be either - * temp.buf or the caller-provided input buffer. - */ - const uint8_t *in; - size_t in_pos; - size_t in_limit; +struct rc_dec +{ + uint32_t range; + uint32_t code; + + /* + * Number of initializing bytes remaining to be read + * by rc_read_init(). + */ + uint32_t init_bytes_left; + + /* + * Buffer from which we read our input. It can be either + * temp.buf or the caller-provided input buffer. + */ + const uint8_t *in; + size_t in_pos; + size_t in_limit; }; /* Probabilities for a length decoder. */ -struct lzma_len_dec { - /* Probability of match length being at least 10 */ - uint16_t choice; +struct lzma_len_dec +{ + /* Probability of match length being at least 10 */ + uint16_t choice; - /* Probability of match length being at least 18 */ - uint16_t choice2; + /* Probability of match length being at least 18 */ + uint16_t choice2; - /* Probabilities for match lengths 2-9 */ - uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS]; + /* Probabilities for match lengths 2-9 */ + uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS]; - /* Probabilities for match lengths 10-17 */ - uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS]; + /* Probabilities for match lengths 10-17 */ + uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS]; - /* Probabilities for match lengths 18-273 */ - uint16_t high[LEN_HIGH_SYMBOLS]; + /* Probabilities for match lengths 18-273 */ + uint16_t high[LEN_HIGH_SYMBOLS]; }; -struct lzma_dec { - /* Distances of latest four matches */ - uint32_t rep0; - uint32_t rep1; - uint32_t rep2; - uint32_t rep3; - - /* Types of the most recently seen LZMA symbols */ - enum lzma_state state; - - /* - * Length of a match. This is updated so that dict_repeat can - * be called again to finish repeating the whole match. - */ - uint32_t len; - - /* - * LZMA properties or related bit masks (number of literal - * context bits, a mask dervied from the number of literal - * position bits, and a mask dervied from the number - * position bits) - */ - uint32_t lc; - uint32_t literal_pos_mask; /* (1 << lp) - 1 */ - uint32_t pos_mask; /* (1 << pb) - 1 */ - - /* If 1, it's a match. Otherwise it's a single 8-bit literal. */ - uint16_t is_match[STATES][POS_STATES_MAX]; - - /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */ - uint16_t is_rep[STATES]; - - /* - * If 0, distance of a repeated match is rep0. - * Otherwise check is_rep1. - */ - uint16_t is_rep0[STATES]; - - /* - * If 0, distance of a repeated match is rep1. - * Otherwise check is_rep2. - */ - uint16_t is_rep1[STATES]; - - /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */ - uint16_t is_rep2[STATES]; - - /* - * If 1, the repeated match has length of one byte. Otherwise - * the length is decoded from rep_len_decoder. - */ - uint16_t is_rep0_long[STATES][POS_STATES_MAX]; - - /* - * Probability tree for the highest two bits of the match - * distance. There is a separate probability tree for match - * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273]. - */ - uint16_t dist_slot[DIST_STATES][DIST_SLOTS]; - - /* - * Probility trees for additional bits for match distance - * when the distance is in the range [4, 127]. - */ - uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END]; - - /* - * Probability tree for the lowest four bits of a match - * distance that is equal to or greater than 128. - */ - uint16_t dist_align[ALIGN_SIZE]; - - /* Length of a normal match */ - struct lzma_len_dec match_len_dec; - - /* Length of a repeated match */ - struct lzma_len_dec rep_len_dec; - - /* Probabilities of literals */ - uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE]; +struct lzma_dec +{ + /* Distances of latest four matches */ + uint32_t rep0; + uint32_t rep1; + uint32_t rep2; + uint32_t rep3; + + /* Types of the most recently seen LZMA symbols */ + enum lzma_state state; + + /* + * Length of a match. This is updated so that dict_repeat can + * be called again to finish repeating the whole match. + */ + uint32_t len; + + /* + * LZMA properties or related bit masks (number of literal + * context bits, a mask dervied from the number of literal + * position bits, and a mask dervied from the number + * position bits) + */ + uint32_t lc; + uint32_t literal_pos_mask; /* (1 << lp) - 1 */ + uint32_t pos_mask; /* (1 << pb) - 1 */ + + /* If 1, it's a match. Otherwise it's a single 8-bit literal. */ + uint16_t is_match[STATES][POS_STATES_MAX]; + + /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */ + uint16_t is_rep[STATES]; + + /* + * If 0, distance of a repeated match is rep0. + * Otherwise check is_rep1. + */ + uint16_t is_rep0[STATES]; + + /* + * If 0, distance of a repeated match is rep1. + * Otherwise check is_rep2. + */ + uint16_t is_rep1[STATES]; + + /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */ + uint16_t is_rep2[STATES]; + + /* + * If 1, the repeated match has length of one byte. Otherwise + * the length is decoded from rep_len_decoder. + */ + uint16_t is_rep0_long[STATES][POS_STATES_MAX]; + + /* + * Probability tree for the highest two bits of the match + * distance. There is a separate probability tree for match + * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273]. + */ + uint16_t dist_slot[DIST_STATES][DIST_SLOTS]; + + /* + * Probility trees for additional bits for match distance + * when the distance is in the range [4, 127]. + */ + uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END]; + + /* + * Probability tree for the lowest four bits of a match + * distance that is equal to or greater than 128. + */ + uint16_t dist_align[ALIGN_SIZE]; + + /* Length of a normal match */ + struct lzma_len_dec match_len_dec; + + /* Length of a repeated match */ + struct lzma_len_dec rep_len_dec; + + /* Probabilities of literals */ + uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE]; }; -struct lzma2_dec { - /* Position in xz_dec_lzma2_run(). */ - enum lzma2_seq { - SEQ_CONTROL, - SEQ_UNCOMPRESSED_1, - SEQ_UNCOMPRESSED_2, - SEQ_COMPRESSED_0, - SEQ_COMPRESSED_1, - SEQ_PROPERTIES, - SEQ_LZMA_PREPARE, - SEQ_LZMA_RUN, - SEQ_COPY - } sequence; - - /* Next position after decoding the compressed size of the chunk. */ - enum lzma2_seq next_sequence; - - /* Uncompressed size of LZMA chunk (2 MiB at maximum) */ - uint32_t uncompressed; - - /* - * Compressed size of LZMA chunk or compressed/uncompressed - * size of uncompressed chunk (64 KiB at maximum) - */ - uint32_t compressed; - - /* - * True if dictionary reset is needed. This is false before - * the first chunk (LZMA or uncompressed). - */ - bool_t need_dict_reset; - - /* - * True if new LZMA properties are needed. This is false - * before the first LZMA chunk. - */ - bool_t need_props; +struct lzma2_dec +{ + /* Position in xz_dec_lzma2_run(). */ + enum lzma2_seq + { + SEQ_CONTROL, + SEQ_UNCOMPRESSED_1, + SEQ_UNCOMPRESSED_2, + SEQ_COMPRESSED_0, + SEQ_COMPRESSED_1, + SEQ_PROPERTIES, + SEQ_LZMA_PREPARE, + SEQ_LZMA_RUN, + SEQ_COPY + } sequence; + + /* Next position after decoding the compressed size of the chunk. */ + enum lzma2_seq next_sequence; + + /* Uncompressed size of LZMA chunk (2 MiB at maximum) */ + uint32_t uncompressed; + + /* + * Compressed size of LZMA chunk or compressed/uncompressed + * size of uncompressed chunk (64 KiB at maximum) + */ + uint32_t compressed; + + /* + * True if dictionary reset is needed. This is false before + * the first chunk (LZMA or uncompressed). + */ + bool_t need_dict_reset; + + /* + * True if new LZMA properties are needed. This is false + * before the first LZMA chunk. + */ + bool_t need_props; }; -struct xz_dec_lzma2 { - /* - * The order below is important on x86 to reduce code size and - * it shouldn't hurt on other platforms. Everything up to and - * including lzma.pos_mask are in the first 128 bytes on x86-32, - * which allows using smaller instructions to access those - * variables. On x86-64, fewer variables fit into the first 128 - * bytes, but this is still the best order without sacrificing - * the readability by splitting the structures. - */ - struct rc_dec rc; - struct dictionary dict; - struct lzma2_dec lzma2; - struct lzma_dec lzma; - - /* - * Temporary buffer which holds small number of input bytes between - * decoder calls. See lzma2_lzma() for details. - */ - struct { - uint32_t size; - uint8_t buf[3 * LZMA_IN_REQUIRED]; - } temp; +struct xz_dec_lzma2 +{ + /* + * The order below is important on x86 to reduce code size and + * it shouldn't hurt on other platforms. Everything up to and + * including lzma.pos_mask are in the first 128 bytes on x86-32, + * which allows using smaller instructions to access those + * variables. On x86-64, fewer variables fit into the first 128 + * bytes, but this is still the best order without sacrificing + * the readability by splitting the structures. + */ + struct rc_dec rc; + struct dictionary dict; + struct lzma2_dec lzma2; + struct lzma_dec lzma; + + /* + * Temporary buffer which holds small number of input bytes between + * decoder calls. See lzma2_lzma() for details. + */ + struct + { + uint32_t size; + uint8_t buf[3 * LZMA_IN_REQUIRED]; + } temp; }; /************** @@ -285,30 +293,31 @@ struct xz_dec_lzma2 { */ static void INIT dict_reset(struct dictionary *dict, struct xz_buf *b) { - if (DEC_IS_SINGLE(dict->mode)) { - dict->buf = b->out + b->out_pos; - dict->end = b->out_size - b->out_pos; - } - - dict->start = 0; - dict->pos = 0; - dict->limit = 0; - dict->full = 0; + if ( DEC_IS_SINGLE(dict->mode) ) + { + dict->buf = b->out + b->out_pos; + dict->end = b->out_size - b->out_pos; + } + + dict->start = 0; + dict->pos = 0; + dict->limit = 0; + dict->full = 0; } /* Set dictionary write limit */ static void INIT dict_limit(struct dictionary *dict, size_t out_max) { - if (dict->end - dict->pos <= out_max) - dict->limit = dict->end; - else - dict->limit = dict->pos + out_max; + if ( dict->end - dict->pos <= out_max ) + dict->limit = dict->end; + else + dict->limit = dict->pos + out_max; } /* Return true if at least one byte can be written into the dictionary. */ static inline bool_t INIT dict_has_space(const struct dictionary *dict) { - return dict->pos < dict->limit; + return dict->pos < dict->limit; } /* @@ -317,14 +326,15 @@ static inline bool_t INIT dict_has_space(const struct dictionary *dict) * still empty. This special case is needed for single-call decoding to * avoid writing a '\0' to the end of the destination buffer. */ -static inline uint32_t INIT dict_get(const struct dictionary *dict, uint32_t dist) +static inline uint32_t INIT dict_get(const struct dictionary *dict, + uint32_t dist) { - size_t offset = dict->pos - dist - 1; + size_t offset = dict->pos - dist - 1; - if (dist >= dict->pos) - offset += dict->end; + if ( dist >= dict->pos ) + offset += dict->end; - return dict->full > 0 ? dict->buf[offset] : 0; + return dict->full > 0 ? dict->buf[offset] : 0; } /* @@ -332,10 +342,10 @@ static inline uint32_t INIT dict_get(const struct dictionary *dict, uint32_t dis */ static inline void INIT dict_put(struct dictionary *dict, uint8_t byte) { - dict->buf[dict->pos++] = byte; + dict->buf[dict->pos++] = byte; - if (dict->full < dict->pos) - dict->full = dict->pos; + if ( dict->full < dict->pos ) + dict->full = dict->pos; } /* @@ -343,69 +353,69 @@ static inline void INIT dict_put(struct dictionary *dict, uint8_t byte) * invalid, false is returned. On success, true is returned and *len is * updated to indicate how many bytes were left to be repeated. */ -static bool_t INIT dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist) +static bool_t INIT dict_repeat(struct dictionary *dict, uint32_t *len, + uint32_t dist) { - size_t back; - uint32_t left; + size_t back; + uint32_t left; - if (dist >= dict->full || dist >= dict->size) - return false; + if ( dist >= dict->full || dist >= dict->size ) + return false; - left = min_t(size_t, dict->limit - dict->pos, *len); - *len -= left; + left = min_t(size_t, dict->limit - dict->pos, *len); + *len -= left; - back = dict->pos - dist - 1; - if (dist >= dict->pos) - back += dict->end; + back = dict->pos - dist - 1; + if ( dist >= dict->pos ) + back += dict->end; - do { - dict->buf[dict->pos++] = dict->buf[back++]; - if (back == dict->end) - back = 0; - } while (--left > 0); + do { + dict->buf[dict->pos++] = dict->buf[back++]; + if ( back == dict->end ) + back = 0; + } while ( --left > 0 ); - if (dict->full < dict->pos) - dict->full = dict->pos; + if ( dict->full < dict->pos ) + dict->full = dict->pos; - return true; + return true; } /* Copy uncompressed data as is from input to dictionary and output buffers. */ static void INIT dict_uncompressed(struct dictionary *dict, struct xz_buf *b, - uint32_t *left) + uint32_t *left) { - size_t copy_size; + size_t copy_size; - while (*left > 0 && b->in_pos < b->in_size - && b->out_pos < b->out_size) { - copy_size = min(b->in_size - b->in_pos, - b->out_size - b->out_pos); - if (copy_size > dict->end - dict->pos) - copy_size = dict->end - dict->pos; - if (copy_size > *left) - copy_size = *left; + while ( *left > 0 && b->in_pos < b->in_size && b->out_pos < b->out_size ) + { + copy_size = min(b->in_size - b->in_pos, b->out_size - b->out_pos); + if ( copy_size > dict->end - dict->pos ) + copy_size = dict->end - dict->pos; + if ( copy_size > *left ) + copy_size = *left; - *left -= copy_size; + *left -= copy_size; - memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size); - dict->pos += copy_size; + memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size); + dict->pos += copy_size; - if (dict->full < dict->pos) - dict->full = dict->pos; + if ( dict->full < dict->pos ) + dict->full = dict->pos; - if (DEC_IS_MULTI(dict->mode)) { - if (dict->pos == dict->end) - dict->pos = 0; + if ( DEC_IS_MULTI(dict->mode) ) + { + if ( dict->pos == dict->end ) + dict->pos = 0; - memcpy(b->out + b->out_pos, b->in + b->in_pos, - copy_size); - } + memcpy(b->out + b->out_pos, b->in + b->in_pos, copy_size); + } - dict->start = dict->pos; + dict->start = dict->pos; - b->out_pos += copy_size; - b->in_pos += copy_size; - } + b->out_pos += copy_size; + b->in_pos += copy_size; + } } /* @@ -415,19 +425,19 @@ static void INIT dict_uncompressed(struct dictionary *dict, struct xz_buf *b, */ static uint32_t INIT dict_flush(struct dictionary *dict, struct xz_buf *b) { - size_t copy_size = dict->pos - dict->start; + size_t copy_size = dict->pos - dict->start; - if (DEC_IS_MULTI(dict->mode)) { - if (dict->pos == dict->end) - dict->pos = 0; + if ( DEC_IS_MULTI(dict->mode) ) + { + if ( dict->pos == dict->end ) + dict->pos = 0; - memcpy(b->out + b->out_pos, dict->buf + dict->start, - copy_size); - } + memcpy(b->out + b->out_pos, dict->buf + dict->start, copy_size); + } - dict->start = dict->pos; - b->out_pos += copy_size; - return copy_size; + dict->start = dict->pos; + b->out_pos += copy_size; + return copy_size; } /***************** @@ -437,9 +447,9 @@ static uint32_t INIT dict_flush(struct dictionary *dict, struct xz_buf *b) /* Reset the range decoder. */ static void INIT rc_reset(struct rc_dec *rc) { - rc->range = (uint32_t)-1; - rc->code = 0; - rc->init_bytes_left = RC_INIT_BYTES; + rc->range = (uint32_t)-1; + rc->code = 0; + rc->init_bytes_left = RC_INIT_BYTES; } /* @@ -448,21 +458,22 @@ static void INIT rc_reset(struct rc_dec *rc) */ static bool_t INIT rc_read_init(struct rc_dec *rc, struct xz_buf *b) { - while (rc->init_bytes_left > 0) { - if (b->in_pos == b->in_size) - return false; + while ( rc->init_bytes_left > 0 ) + { + if ( b->in_pos == b->in_size ) + return false; - rc->code = (rc->code << 8) + b->in[b->in_pos++]; - --rc->init_bytes_left; - } + rc->code = (rc->code << 8) + b->in[b->in_pos++]; + --rc->init_bytes_left; + } - return true; + return true; } /* Return true if there may not be enough input for the next decoding loop. */ static inline bool_t INIT rc_limit_exceeded(const struct rc_dec *rc) { - return rc->in_pos > rc->in_limit; + return rc->in_pos > rc->in_limit; } /* @@ -471,16 +482,17 @@ static inline bool_t INIT rc_limit_exceeded(const struct rc_dec *rc) */ static inline bool_t INIT rc_is_finished(const struct rc_dec *rc) { - return rc->code == 0; + return rc->code == 0; } /* Read the next input byte if needed. */ static always_inline void rc_normalize(struct rc_dec *rc) { - if (rc->range < RC_TOP_VALUE) { - rc->range <<= RC_SHIFT_BITS; - rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++]; - } + if ( rc->range < RC_TOP_VALUE ) + { + rc->range <<= RC_SHIFT_BITS; + rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++]; + } } /* @@ -496,72 +508,78 @@ static always_inline void rc_normalize(struct rc_dec *rc) */ static always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob) { - uint32_t bound; - int bit; - - rc_normalize(rc); - bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob; - if (rc->code < bound) { - rc->range = bound; - *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS; - bit = 0; - } else { - rc->range -= bound; - rc->code -= bound; - *prob -= *prob >> RC_MOVE_BITS; - bit = 1; - } - - return bit; + uint32_t bound; + int bit; + + rc_normalize(rc); + bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob; + if ( rc->code < bound ) + { + rc->range = bound; + *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS; + bit = 0; + } + else + { + rc->range -= bound; + rc->code -= bound; + *prob -= *prob >> RC_MOVE_BITS; + bit = 1; + } + + return bit; } /* Decode a bittree starting from the most significant bit. */ -static always_inline uint32_t rc_bittree(struct rc_dec *rc, - uint16_t *probs, uint32_t limit) +static always_inline uint32_t rc_bittree(struct rc_dec *rc, uint16_t *probs, + uint32_t limit) { - uint32_t symbol = 1; + uint32_t symbol = 1; - do { - if (rc_bit(rc, &probs[symbol])) - symbol = (symbol << 1) + 1; - else - symbol <<= 1; - } while (symbol < limit); + do { + if ( rc_bit(rc, &probs[symbol]) ) + symbol = (symbol << 1) + 1; + else + symbol <<= 1; + } while ( symbol < limit ); - return symbol; + return symbol; } /* Decode a bittree starting from the least significant bit. */ -static always_inline void rc_bittree_reverse(struct rc_dec *rc, - uint16_t *probs, - uint32_t *dest, uint32_t limit) +static always_inline void rc_bittree_reverse(struct rc_dec *rc, uint16_t *probs, + uint32_t *dest, uint32_t limit) { - uint32_t symbol = 1; - uint32_t i = 0; - - do { - if (rc_bit(rc, &probs[symbol])) { - symbol = (symbol << 1) + 1; - *dest += 1 << i; - } else { - symbol <<= 1; - } - } while (++i < limit); + uint32_t symbol = 1; + uint32_t i = 0; + + do { + if ( rc_bit(rc, &probs[symbol]) ) + { + symbol = (symbol << 1) + 1; + *dest += 1 << i; + } + else + { + symbol <<= 1; + } + } while ( ++i < limit ); } /* Decode direct bits (fixed fifty-fifty probability) */ -static inline void INIT rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit) +static inline void INIT rc_direct(struct rc_dec *rc, uint32_t *dest, + uint32_t limit) { - uint32_t mask; - - do { - rc_normalize(rc); - rc->range >>= 1; - rc->code -= rc->range; - mask = (uint32_t)0 - (rc->code >> 31); - rc->code += rc->range & mask; - *dest = (*dest << 1) + (mask + 1); - } while (--limit > 0); + uint32_t mask; + + do { + rc_normalize(rc); + rc->range >>= 1; + rc->code -= rc->range; + mask = (uint32_t)0 - (rc->code >> 31); + rc->code += rc->range & mask; + *dest = (*dest << 1) + (mask + 1); + } while ( --limit > 0 ); } /******** @@ -571,114 +589,129 @@ static inline void INIT rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t li /* Get pointer to literal coder probability array. */ static uint16_t *INIT lzma_literal_probs(struct xz_dec_lzma2 *s) { - uint32_t prev_byte = dict_get(&s->dict, 0); - uint32_t low = prev_byte >> (8 - s->lzma.lc); - uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc; - return s->lzma.literal[low + high]; + uint32_t prev_byte = dict_get(&s->dict, 0); + uint32_t low = prev_byte >> (8 - s->lzma.lc); + uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc; + return s->lzma.literal[low + high]; } /* Decode a literal (one 8-bit byte) */ static void INIT lzma_literal(struct xz_dec_lzma2 *s) { - uint16_t *probs; - uint32_t symbol; - uint32_t match_byte; - uint32_t match_bit; - uint32_t offset; - uint32_t i; - - probs = lzma_literal_probs(s); - - if (lzma_state_is_literal(s->lzma.state)) { - symbol = rc_bittree(&s->rc, probs, 0x100); - } else { - symbol = 1; - match_byte = dict_get(&s->dict, s->lzma.rep0) << 1; - offset = 0x100; - - do { - match_bit = match_byte & offset; - match_byte <<= 1; - i = offset + match_bit + symbol; - - if (rc_bit(&s->rc, &probs[i])) { - symbol = (symbol << 1) + 1; - offset &= match_bit; - } else { - symbol <<= 1; - offset &= ~match_bit; - } - } while (symbol < 0x100); - } - - dict_put(&s->dict, (uint8_t)symbol); - lzma_state_literal(&s->lzma.state); + uint16_t *probs; + uint32_t symbol; + uint32_t match_byte; + uint32_t match_bit; + uint32_t offset; + uint32_t i; + + probs = lzma_literal_probs(s); + + if ( lzma_state_is_literal(s->lzma.state) ) + { + symbol = rc_bittree(&s->rc, probs, 0x100); + } + else + { + symbol = 1; + match_byte = dict_get(&s->dict, s->lzma.rep0) << 1; + offset = 0x100; + + do { + match_bit = match_byte & offset; + match_byte <<= 1; + i = offset + match_bit + symbol; + + if ( rc_bit(&s->rc, &probs[i]) ) + { + symbol = (symbol << 1) + 1; + offset &= match_bit; + } + else + { + symbol <<= 1; + offset &= ~match_bit; + } + } while ( symbol < 0x100 ); + } + + dict_put(&s->dict, (uint8_t)symbol); + lzma_state_literal(&s->lzma.state); } /* Decode the length of the match into s->lzma.len. */ static void INIT lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l, - uint32_t pos_state) + uint32_t pos_state) { - uint16_t *probs; - uint32_t limit; - - if (!rc_bit(&s->rc, &l->choice)) { - probs = l->low[pos_state]; - limit = LEN_LOW_SYMBOLS; - s->lzma.len = MATCH_LEN_MIN; - } else { - if (!rc_bit(&s->rc, &l->choice2)) { - probs = l->mid[pos_state]; - limit = LEN_MID_SYMBOLS; - s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS; - } else { - probs = l->high; - limit = LEN_HIGH_SYMBOLS; - s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS - + LEN_MID_SYMBOLS; - } - } - - s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit; + uint16_t *probs; + uint32_t limit; + + if ( !rc_bit(&s->rc, &l->choice) ) + { + probs = l->low[pos_state]; + limit = LEN_LOW_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN; + } + else + { + if ( !rc_bit(&s->rc, &l->choice2) ) + { + probs = l->mid[pos_state]; + limit = LEN_MID_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS; + } + else + { + probs = l->high; + limit = LEN_HIGH_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS; + } + } + + s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit; } /* Decode a match. The distance will be stored in s->lzma.rep0. */ static void INIT lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state) { - uint16_t *probs; - uint32_t dist_slot; - uint32_t limit; - - lzma_state_match(&s->lzma.state); - - s->lzma.rep3 = s->lzma.rep2; - s->lzma.rep2 = s->lzma.rep1; - s->lzma.rep1 = s->lzma.rep0; - - lzma_len(s, &s->lzma.match_len_dec, pos_state); - - probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)]; - dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS; - - if (dist_slot < DIST_MODEL_START) { - s->lzma.rep0 = dist_slot; - } else { - limit = (dist_slot >> 1) - 1; - s->lzma.rep0 = 2 + (dist_slot & 1); - - if (dist_slot < DIST_MODEL_END) { - s->lzma.rep0 <<= limit; - probs = s->lzma.dist_special + s->lzma.rep0 - - dist_slot - 1; - rc_bittree_reverse(&s->rc, probs, - &s->lzma.rep0, limit); - } else { - rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS); - s->lzma.rep0 <<= ALIGN_BITS; - rc_bittree_reverse(&s->rc, s->lzma.dist_align, - &s->lzma.rep0, ALIGN_BITS); - } - } + uint16_t *probs; + uint32_t dist_slot; + uint32_t limit; + + lzma_state_match(&s->lzma.state); + + s->lzma.rep3 = s->lzma.rep2; + s->lzma.rep2 = s->lzma.rep1; + s->lzma.rep1 = s->lzma.rep0; + + lzma_len(s, &s->lzma.match_len_dec, pos_state); + + probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)]; + dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS; + + if ( dist_slot < DIST_MODEL_START ) + { + s->lzma.rep0 = dist_slot; + } + else + { + limit = (dist_slot >> 1) - 1; + s->lzma.rep0 = 2 + (dist_slot & 1); + + if ( dist_slot < DIST_MODEL_END ) + { + s->lzma.rep0 <<= limit; + probs = s->lzma.dist_special + s->lzma.rep0 - dist_slot - 1; + rc_bittree_reverse(&s->rc, probs, &s->lzma.rep0, limit); + } + else + { + rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS); + s->lzma.rep0 <<= ALIGN_BITS; + rc_bittree_reverse(&s->rc, s->lzma.dist_align, &s->lzma.rep0, + ALIGN_BITS); + } + } } /* @@ -687,77 +720,89 @@ static void INIT lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state) */ static void INIT lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state) { - uint32_t tmp; - - if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) { - if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[ - s->lzma.state][pos_state])) { - lzma_state_short_rep(&s->lzma.state); - s->lzma.len = 1; - return; - } - } else { - if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) { - tmp = s->lzma.rep1; - } else { - if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) { - tmp = s->lzma.rep2; - } else { - tmp = s->lzma.rep3; - s->lzma.rep3 = s->lzma.rep2; - } - - s->lzma.rep2 = s->lzma.rep1; - } - - s->lzma.rep1 = s->lzma.rep0; - s->lzma.rep0 = tmp; - } - - lzma_state_long_rep(&s->lzma.state); - lzma_len(s, &s->lzma.rep_len_dec, pos_state); + uint32_t tmp; + + if ( !rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state]) ) + { + if ( !rc_bit(&s->rc, &s->lzma.is_rep0_long[s->lzma.state][pos_state]) ) + { + lzma_state_short_rep(&s->lzma.state); + s->lzma.len = 1; + return; + } + } + else + { + if ( !rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state]) ) + { + tmp = s->lzma.rep1; + } + else + { + if ( !rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state]) ) + { + tmp = s->lzma.rep2; + } + else + { + tmp = s->lzma.rep3; + s->lzma.rep3 = s->lzma.rep2; + } + + s->lzma.rep2 = s->lzma.rep1; + } + + s->lzma.rep1 = s->lzma.rep0; + s->lzma.rep0 = tmp; + } + + lzma_state_long_rep(&s->lzma.state); + lzma_len(s, &s->lzma.rep_len_dec, pos_state); } /* LZMA decoder core */ static bool_t INIT lzma_main(struct xz_dec_lzma2 *s) { - uint32_t pos_state; - - /* - * If the dictionary was reached during the previous call, try to - * finish the possibly pending repeat in the dictionary. - */ - if (dict_has_space(&s->dict) && s->lzma.len > 0) - dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0); - - /* - * Decode more LZMA symbols. One iteration may consume up to - * LZMA_IN_REQUIRED - 1 bytes. - */ - while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) { - pos_state = s->dict.pos & s->lzma.pos_mask; - - if (!rc_bit(&s->rc, &s->lzma.is_match[ - s->lzma.state][pos_state])) { - lzma_literal(s); - } else { - if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state])) - lzma_rep_match(s, pos_state); - else - lzma_match(s, pos_state); - - if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0)) - return false; - } - } - - /* - * Having the range decoder always normalized when we are outside - * this function makes it easier to correctly handle end of the chunk. - */ - rc_normalize(&s->rc); - - return true; + uint32_t pos_state; + + /* + * If the dictionary was reached during the previous call, try to + * finish the possibly pending repeat in the dictionary. + */ + if ( dict_has_space(&s->dict) && s->lzma.len > 0 ) + dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0); + + /* + * Decode more LZMA symbols. One iteration may consume up to + * LZMA_IN_REQUIRED - 1 bytes. + */ + while ( dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc) ) + { + pos_state = s->dict.pos & s->lzma.pos_mask; + + if ( !rc_bit(&s->rc, &s->lzma.is_match[s->lzma.state][pos_state]) ) + { + lzma_literal(s); + } + else + { + if ( rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state]) ) + lzma_rep_match(s, pos_state); + else + lzma_match(s, pos_state); + + if ( !dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0) ) + return false; + } + } + + /* + * Having the range decoder always normalized when we are outside + * this function makes it easier to correctly handle end of the chunk. + */ + rc_normalize(&s->rc); + + return true; } /* @@ -766,29 +811,29 @@ static bool_t INIT lzma_main(struct xz_dec_lzma2 *s) */ static void INIT lzma_reset(struct xz_dec_lzma2 *s) { - uint16_t *probs; - size_t i; - - s->lzma.state = STATE_LIT_LIT; - s->lzma.rep0 = 0; - s->lzma.rep1 = 0; - s->lzma.rep2 = 0; - s->lzma.rep3 = 0; - - /* - * All probabilities are initialized to the same value. This hack - * makes the code smaller by avoiding a separate loop for each - * probability array. - * - * This could be optimized so that only that part of literal - * probabilities that are actually required. In the common case - * we would write 12 KiB less. - */ - probs = s->lzma.is_match[0]; - for (i = 0; i < PROBS_TOTAL; ++i) - probs[i] = RC_BIT_MODEL_TOTAL / 2; - - rc_reset(&s->rc); + uint16_t *probs; + size_t i; + + s->lzma.state = STATE_LIT_LIT; + s->lzma.rep0 = 0; + s->lzma.rep1 = 0; + s->lzma.rep2 = 0; + s->lzma.rep3 = 0; + + /* + * All probabilities are initialized to the same value. This hack + * makes the code smaller by avoiding a separate loop for each + * probability array. + * + * This could be optimized so that only that part of literal + * probabilities that are actually required. In the common case + * we would write 12 KiB less. + */ + probs = s->lzma.is_match[0]; + for ( i = 0; i < PROBS_TOTAL; ++i ) + probs[i] = RC_BIT_MODEL_TOTAL / 2; + + rc_reset(&s->rc); } /* @@ -798,33 +843,35 @@ static void INIT lzma_reset(struct xz_dec_lzma2 *s) */ static bool_t INIT lzma_props(struct xz_dec_lzma2 *s, uint8_t props) { - if (props > (4 * 5 + 4) * 9 + 8) - return false; + if ( props > (4 * 5 + 4) * 9 + 8 ) + return false; - s->lzma.pos_mask = 0; - while (props >= 9 * 5) { - props -= 9 * 5; - ++s->lzma.pos_mask; - } + s->lzma.pos_mask = 0; + while ( props >= 9 * 5 ) + { + props -= 9 * 5; + ++s->lzma.pos_mask; + } - s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1; + s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1; - s->lzma.literal_pos_mask = 0; - while (props >= 9) { - props -= 9; - ++s->lzma.literal_pos_mask; - } + s->lzma.literal_pos_mask = 0; + while ( props >= 9 ) + { + props -= 9; + ++s->lzma.literal_pos_mask; + } - s->lzma.lc = props; + s->lzma.lc = props; - if (s->lzma.lc + s->lzma.literal_pos_mask > 4) - return false; + if ( s->lzma.lc + s->lzma.literal_pos_mask > 4 ) + return false; - s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1; + s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1; - lzma_reset(s); + lzma_reset(s); - return true; + return true; } /********* @@ -845,83 +892,90 @@ static bool_t INIT lzma_props(struct xz_dec_lzma2 *s, uint8_t props) */ static bool_t INIT lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) { - size_t in_avail; - uint32_t tmp; - - in_avail = b->in_size - b->in_pos; - if (s->temp.size > 0 || s->lzma2.compressed == 0) { - tmp = 2 * LZMA_IN_REQUIRED - s->temp.size; - if (tmp > s->lzma2.compressed - s->temp.size) - tmp = s->lzma2.compressed - s->temp.size; - if (tmp > in_avail) - tmp = in_avail; - - memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp); - - if (s->temp.size + tmp == s->lzma2.compressed) { - memzero(s->temp.buf + s->temp.size + tmp, - sizeof(s->temp.buf) - - s->temp.size - tmp); - s->rc.in_limit = s->temp.size + tmp; - } else if (s->temp.size + tmp < LZMA_IN_REQUIRED) { - s->temp.size += tmp; - b->in_pos += tmp; - return true; - } else { - s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED; - } - - s->rc.in = s->temp.buf; - s->rc.in_pos = 0; - - if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp) - return false; - - s->lzma2.compressed -= s->rc.in_pos; - - if (s->rc.in_pos < s->temp.size) { - s->temp.size -= s->rc.in_pos; - memmove(s->temp.buf, s->temp.buf + s->rc.in_pos, - s->temp.size); - return true; - } - - b->in_pos += s->rc.in_pos - s->temp.size; - s->temp.size = 0; - } - - in_avail = b->in_size - b->in_pos; - if (in_avail >= LZMA_IN_REQUIRED) { - s->rc.in = b->in; - s->rc.in_pos = b->in_pos; - - if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED) - s->rc.in_limit = b->in_pos + s->lzma2.compressed; - else - s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED; - - if (!lzma_main(s)) - return false; - - in_avail = s->rc.in_pos - b->in_pos; - if (in_avail > s->lzma2.compressed) - return false; - - s->lzma2.compressed -= in_avail; - b->in_pos = s->rc.in_pos; - } - - in_avail = b->in_size - b->in_pos; - if (in_avail < LZMA_IN_REQUIRED) { - if (in_avail > s->lzma2.compressed) - in_avail = s->lzma2.compressed; - - memcpy(s->temp.buf, b->in + b->in_pos, in_avail); - s->temp.size = in_avail; - b->in_pos += in_avail; - } - - return true; + size_t in_avail; + uint32_t tmp; + + in_avail = b->in_size - b->in_pos; + if ( s->temp.size > 0 || s->lzma2.compressed == 0 ) + { + tmp = 2 * LZMA_IN_REQUIRED - s->temp.size; + if ( tmp > s->lzma2.compressed - s->temp.size ) + tmp = s->lzma2.compressed - s->temp.size; + if ( tmp > in_avail ) + tmp = in_avail; + + memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp); + + if ( s->temp.size + tmp == s->lzma2.compressed ) + { + memzero(s->temp.buf + s->temp.size + tmp, + sizeof(s->temp.buf) - s->temp.size - tmp); + s->rc.in_limit = s->temp.size + tmp; + } + else if ( s->temp.size + tmp < LZMA_IN_REQUIRED ) + { + s->temp.size += tmp; + b->in_pos += tmp; + return true; + } + else + { + s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED; + } + + s->rc.in = s->temp.buf; + s->rc.in_pos = 0; + + if ( !lzma_main(s) || s->rc.in_pos > s->temp.size + tmp ) + return false; + + s->lzma2.compressed -= s->rc.in_pos; + + if ( s->rc.in_pos < s->temp.size ) + { + s->temp.size -= s->rc.in_pos; + memmove(s->temp.buf, s->temp.buf + s->rc.in_pos, s->temp.size); + return true; + } + + b->in_pos += s->rc.in_pos - s->temp.size; + s->temp.size = 0; + } + + in_avail = b->in_size - b->in_pos; + if ( in_avail >= LZMA_IN_REQUIRED ) + { + s->rc.in = b->in; + s->rc.in_pos = b->in_pos; + + if ( in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED ) + s->rc.in_limit = b->in_pos + s->lzma2.compressed; + else + s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED; + + if ( !lzma_main(s) ) + return false; + + in_avail = s->rc.in_pos - b->in_pos; + if ( in_avail > s->lzma2.compressed ) + return false; + + s->lzma2.compressed -= in_avail; + b->in_pos = s->rc.in_pos; + } + + in_avail = b->in_size - b->in_pos; + if ( in_avail < LZMA_IN_REQUIRED ) + { + if ( in_avail > s->lzma2.compressed ) + in_avail = s->lzma2.compressed; + + memcpy(s->temp.buf, b->in + b->in_pos, in_avail); + s->temp.size = in_avail; + b->in_pos += in_avail; + } + + return true; } /* @@ -929,247 +983,261 @@ static bool_t INIT lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) * decoding or copying of uncompressed chunks to other functions. */ XZ_EXTERN enum xz_ret INIT xz_dec_lzma2_run(struct xz_dec_lzma2 *s, - struct xz_buf *b) + struct xz_buf *b) { - uint32_t tmp; - - while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) { - switch (s->lzma2.sequence) { - case SEQ_CONTROL: - /* - * LZMA2 control byte - * - * Exact values: - * 0x00 End marker - * 0x01 Dictionary reset followed by - * an uncompressed chunk - * 0x02 Uncompressed chunk (no dictionary reset) - * - * Highest three bits (s->control & 0xE0): - * 0xE0 Dictionary reset, new properties and state - * reset, followed by LZMA compressed chunk - * 0xC0 New properties and state reset, followed - * by LZMA compressed chunk (no dictionary - * reset) - * 0xA0 State reset using old properties, - * followed by LZMA compressed chunk (no - * dictionary reset) - * 0x80 LZMA chunk (no dictionary or state reset) - * - * For LZMA compressed chunks, the lowest five bits - * (s->control & 1F) are the highest bits of the - * uncompressed size (bits 16-20). - * - * A new LZMA2 stream must begin with a dictionary - * reset. The first LZMA chunk must set new - * properties and reset the LZMA state. - * - * Values that don't match anything described above - * are invalid and we return XZ_DATA_ERROR. - */ - tmp = b->in[b->in_pos++]; - - if (tmp == 0x00) - return XZ_STREAM_END; - - if (tmp >= 0xE0 || tmp == 0x01) { - s->lzma2.need_props = true; - s->lzma2.need_dict_reset = false; - dict_reset(&s->dict, b); - } else if (s->lzma2.need_dict_reset) { - return XZ_DATA_ERROR; - } - - if (tmp >= 0x80) { - s->lzma2.uncompressed = (tmp & 0x1F) << 16; - s->lzma2.sequence = SEQ_UNCOMPRESSED_1; - - if (tmp >= 0xC0) { - /* - * When there are new properties, - * state reset is done at - * SEQ_PROPERTIES. - */ - s->lzma2.need_props = false; - s->lzma2.next_sequence - = SEQ_PROPERTIES; - - } else if (s->lzma2.need_props) { - return XZ_DATA_ERROR; - - } else { - s->lzma2.next_sequence - = SEQ_LZMA_PREPARE; - if (tmp >= 0xA0) - lzma_reset(s); - } - } else { - if (tmp > 0x02) - return XZ_DATA_ERROR; - - s->lzma2.sequence = SEQ_COMPRESSED_0; - s->lzma2.next_sequence = SEQ_COPY; - } - - break; - - case SEQ_UNCOMPRESSED_1: - s->lzma2.uncompressed - += (uint32_t)b->in[b->in_pos++] << 8; - s->lzma2.sequence = SEQ_UNCOMPRESSED_2; - break; - - case SEQ_UNCOMPRESSED_2: - s->lzma2.uncompressed - += (uint32_t)b->in[b->in_pos++] + 1; - s->lzma2.sequence = SEQ_COMPRESSED_0; - break; - - case SEQ_COMPRESSED_0: - s->lzma2.compressed - = (uint32_t)b->in[b->in_pos++] << 8; - s->lzma2.sequence = SEQ_COMPRESSED_1; - break; - - case SEQ_COMPRESSED_1: - s->lzma2.compressed - += (uint32_t)b->in[b->in_pos++] + 1; - s->lzma2.sequence = s->lzma2.next_sequence; - break; - - case SEQ_PROPERTIES: - if (!lzma_props(s, b->in[b->in_pos++])) - return XZ_DATA_ERROR; - - s->lzma2.sequence = SEQ_LZMA_PREPARE; - - /* Fall through */ - - case SEQ_LZMA_PREPARE: - if (s->lzma2.compressed < RC_INIT_BYTES) - return XZ_DATA_ERROR; - - if (!rc_read_init(&s->rc, b)) - return XZ_OK; - - s->lzma2.compressed -= RC_INIT_BYTES; - s->lzma2.sequence = SEQ_LZMA_RUN; - - /* Fall through */ - - case SEQ_LZMA_RUN: - /* - * Set dictionary limit to indicate how much we want - * to be encoded at maximum. Decode new data into the - * dictionary. Flush the new data from dictionary to - * b->out. Check if we finished decoding this chunk. - * In case the dictionary got full but we didn't fill - * the output buffer yet, we may run this loop - * multiple times without changing s->lzma2.sequence. - */ - dict_limit(&s->dict, min_t(size_t, - b->out_size - b->out_pos, - s->lzma2.uncompressed)); - if (!lzma2_lzma(s, b)) - return XZ_DATA_ERROR; - - s->lzma2.uncompressed -= dict_flush(&s->dict, b); - - if (s->lzma2.uncompressed == 0) { - if (s->lzma2.compressed > 0 || s->lzma.len > 0 - || !rc_is_finished(&s->rc)) - return XZ_DATA_ERROR; - - rc_reset(&s->rc); - s->lzma2.sequence = SEQ_CONTROL; - - } else if (b->out_pos == b->out_size - || (b->in_pos == b->in_size - && s->temp.size - < s->lzma2.compressed)) { - return XZ_OK; - } - - break; - - case SEQ_COPY: - dict_uncompressed(&s->dict, b, &s->lzma2.compressed); - if (s->lzma2.compressed > 0) - return XZ_OK; - - s->lzma2.sequence = SEQ_CONTROL; - break; - } - } - - return XZ_OK; + uint32_t tmp; + + while ( b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN ) + { + switch (s->lzma2.sequence) + { + case SEQ_CONTROL: + /* + * LZMA2 control byte + * + * Exact values: + * 0x00 End marker + * 0x01 Dictionary reset followed by + * an uncompressed chunk + * 0x02 Uncompressed chunk (no dictionary reset) + * + * Highest three bits (s->control & 0xE0): + * 0xE0 Dictionary reset, new properties and state + * reset, followed by LZMA compressed chunk + * 0xC0 New properties and state reset, followed + * by LZMA compressed chunk (no dictionary + * reset) + * 0xA0 State reset using old properties, + * followed by LZMA compressed chunk (no + * dictionary reset) + * 0x80 LZMA chunk (no dictionary or state reset) + * + * For LZMA compressed chunks, the lowest five bits + * (s->control & 1F) are the highest bits of the + * uncompressed size (bits 16-20). + * + * A new LZMA2 stream must begin with a dictionary + * reset. The first LZMA chunk must set new + * properties and reset the LZMA state. + * + * Values that don't match anything described above + * are invalid and we return XZ_DATA_ERROR. + */ + tmp = b->in[b->in_pos++]; + + if ( tmp == 0x00 ) + return XZ_STREAM_END; + + if ( tmp >= 0xE0 || tmp == 0x01 ) + { + s->lzma2.need_props = true; + s->lzma2.need_dict_reset = false; + dict_reset(&s->dict, b); + } + else if ( s->lzma2.need_dict_reset ) + { + return XZ_DATA_ERROR; + } + + if ( tmp >= 0x80 ) + { + s->lzma2.uncompressed = (tmp & 0x1F) << 16; + s->lzma2.sequence = SEQ_UNCOMPRESSED_1; + + if ( tmp >= 0xC0 ) + { + /* + * When there are new properties, + * state reset is done at + * SEQ_PROPERTIES. + */ + s->lzma2.need_props = false; + s->lzma2.next_sequence = SEQ_PROPERTIES; + } + else if ( s->lzma2.need_props ) + { + return XZ_DATA_ERROR; + } + else + { + s->lzma2.next_sequence = SEQ_LZMA_PREPARE; + if ( tmp >= 0xA0 ) + lzma_reset(s); + } + } + else + { + if ( tmp > 0x02 ) + return XZ_DATA_ERROR; + + s->lzma2.sequence = SEQ_COMPRESSED_0; + s->lzma2.next_sequence = SEQ_COPY; + } + + break; + + case SEQ_UNCOMPRESSED_1: + s->lzma2.uncompressed += (uint32_t)b->in[b->in_pos++] << 8; + s->lzma2.sequence = SEQ_UNCOMPRESSED_2; + break; + + case SEQ_UNCOMPRESSED_2: + s->lzma2.uncompressed += (uint32_t)b->in[b->in_pos++] + 1; + s->lzma2.sequence = SEQ_COMPRESSED_0; + break; + + case SEQ_COMPRESSED_0: + s->lzma2.compressed = (uint32_t)b->in[b->in_pos++] << 8; + s->lzma2.sequence = SEQ_COMPRESSED_1; + break; + + case SEQ_COMPRESSED_1: + s->lzma2.compressed += (uint32_t)b->in[b->in_pos++] + 1; + s->lzma2.sequence = s->lzma2.next_sequence; + break; + + case SEQ_PROPERTIES: + if ( !lzma_props(s, b->in[b->in_pos++]) ) + return XZ_DATA_ERROR; + + s->lzma2.sequence = SEQ_LZMA_PREPARE; + + /* Fall through */ + + case SEQ_LZMA_PREPARE: + if ( s->lzma2.compressed < RC_INIT_BYTES ) + return XZ_DATA_ERROR; + + if ( !rc_read_init(&s->rc, b) ) + return XZ_OK; + + s->lzma2.compressed -= RC_INIT_BYTES; + s->lzma2.sequence = SEQ_LZMA_RUN; + + /* Fall through */ + + case SEQ_LZMA_RUN: + /* + * Set dictionary limit to indicate how much we want + * to be encoded at maximum. Decode new data into the + * dictionary. Flush the new data from dictionary to + * b->out. Check if we finished decoding this chunk. + * In case the dictionary got full but we didn't fill + * the output buffer yet, we may run this loop + * multiple times without changing s->lzma2.sequence. + */ + dict_limit(&s->dict, min_t(size_t, b->out_size - b->out_pos, + s->lzma2.uncompressed)); + if ( !lzma2_lzma(s, b) ) + return XZ_DATA_ERROR; + + s->lzma2.uncompressed -= dict_flush(&s->dict, b); + + if ( s->lzma2.uncompressed == 0 ) + { + if ( s->lzma2.compressed > 0 || s->lzma.len > 0 || + !rc_is_finished(&s->rc) ) + return XZ_DATA_ERROR; + + rc_reset(&s->rc); + s->lzma2.sequence = SEQ_CONTROL; + } + else if ( b->out_pos == b->out_size || + (b->in_pos == b->in_size && + s->temp.size < s->lzma2.compressed) ) + { + return XZ_OK; + } + + break; + + case SEQ_COPY: + dict_uncompressed(&s->dict, b, &s->lzma2.compressed); + if ( s->lzma2.compressed > 0 ) + return XZ_OK; + + s->lzma2.sequence = SEQ_CONTROL; + break; + } + } + + return XZ_OK; } XZ_EXTERN struct xz_dec_lzma2 *INIT xz_dec_lzma2_create(enum xz_mode mode, - uint32_t dict_max) + uint32_t dict_max) { - struct xz_dec_lzma2 *s = malloc(sizeof(*s)); - if (s == NULL) - return NULL; - - s->dict.mode = mode; - s->dict.size_max = dict_max; - - if (DEC_IS_PREALLOC(mode)) { - s->dict.buf = large_malloc(dict_max); - if (s->dict.buf == NULL) { - free(s); - return NULL; - } - } else if (DEC_IS_DYNALLOC(mode)) { - s->dict.buf = NULL; - s->dict.allocated = 0; - } - - return s; + struct xz_dec_lzma2 *s = malloc(sizeof(*s)); + if ( s == NULL ) + return NULL; + + s->dict.mode = mode; + s->dict.size_max = dict_max; + + if ( DEC_IS_PREALLOC(mode) ) + { + s->dict.buf = large_malloc(dict_max); + if ( s->dict.buf == NULL ) + { + free(s); + return NULL; + } + } + else if ( DEC_IS_DYNALLOC(mode) ) + { + s->dict.buf = NULL; + s->dict.allocated = 0; + } + + return s; } -XZ_EXTERN enum xz_ret INIT xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props) +XZ_EXTERN enum xz_ret INIT xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, + uint8_t props) { - /* This limits dictionary size to 3 GiB to keep parsing simpler. */ - if (props > 39) - return XZ_OPTIONS_ERROR; - - s->dict.size = 2 + (props & 1); - s->dict.size <<= (props >> 1) + 11; - - if (DEC_IS_MULTI(s->dict.mode)) { - if (s->dict.size > s->dict.size_max) - return XZ_MEMLIMIT_ERROR; - - s->dict.end = s->dict.size; - - if (DEC_IS_DYNALLOC(s->dict.mode)) { - if (s->dict.allocated < s->dict.size) { - large_free(s->dict.buf); - s->dict.buf = large_malloc(s->dict.size); - if (s->dict.buf == NULL) { - s->dict.allocated = 0; - return XZ_MEM_ERROR; - } - } - } - } - - s->lzma.len = 0; - - s->lzma2.sequence = SEQ_CONTROL; - s->lzma2.need_dict_reset = true; - - s->temp.size = 0; - - return XZ_OK; + /* This limits dictionary size to 3 GiB to keep parsing simpler. */ + if ( props > 39 ) + return XZ_OPTIONS_ERROR; + + s->dict.size = 2 + (props & 1); + s->dict.size <<= (props >> 1) + 11; + + if ( DEC_IS_MULTI(s->dict.mode) ) + { + if ( s->dict.size > s->dict.size_max ) + return XZ_MEMLIMIT_ERROR; + + s->dict.end = s->dict.size; + + if ( DEC_IS_DYNALLOC(s->dict.mode) ) + { + if ( s->dict.allocated < s->dict.size ) + { + large_free(s->dict.buf); + s->dict.buf = large_malloc(s->dict.size); + if ( s->dict.buf == NULL ) + { + s->dict.allocated = 0; + return XZ_MEM_ERROR; + } + } + } + } + + s->lzma.len = 0; + + s->lzma2.sequence = SEQ_CONTROL; + s->lzma2.need_dict_reset = true; + + s->temp.size = 0; + + return XZ_OK; } XZ_EXTERN void INIT xz_dec_lzma2_end(struct xz_dec_lzma2 *s) { - if (DEC_IS_MULTI(s->dict.mode)) - large_free(s->dict.buf); + if ( DEC_IS_MULTI(s->dict.mode) ) + large_free(s->dict.buf); - free(s); + free(s); } diff --git a/xen/common/xz/dec_stream.c b/xen/common/xz/dec_stream.c index b8b566307c..8859bbf8af 100644 --- a/xen/common/xz/dec_stream.c +++ b/xen/common/xz/dec_stream.c @@ -11,141 +11,143 @@ #include "stream.h" /* Hash used to validate the Index field */ -struct xz_dec_hash { - vli_type unpadded; - vli_type uncompressed; - uint32_t crc32; +struct xz_dec_hash +{ + vli_type unpadded; + vli_type uncompressed; + uint32_t crc32; }; -struct xz_dec { - /* Position in dec_main() */ - enum { - SEQ_STREAM_HEADER, - SEQ_BLOCK_START, - SEQ_BLOCK_HEADER, - SEQ_BLOCK_UNCOMPRESS, - SEQ_BLOCK_PADDING, - SEQ_BLOCK_CHECK, - SEQ_INDEX, - SEQ_INDEX_PADDING, - SEQ_INDEX_CRC32, - SEQ_STREAM_FOOTER - } sequence; - - /* Position in variable-length integers and Check fields */ - uint32_t pos; - - /* Variable-length integer decoded by dec_vli() */ - vli_type vli; - - /* Saved in_pos and out_pos */ - size_t in_start; - size_t out_start; - - /* CRC32 value in Block or Index */ - uint32_t crc32; - - /* Type of the integrity check calculated from uncompressed data */ - enum xz_check check_type; - - /* Operation mode */ - enum xz_mode mode; - - /* - * True if the next call to xz_dec_run() is allowed to return - * XZ_BUF_ERROR. - */ - bool_t allow_buf_error; - - /* Information stored in Block Header */ - struct { - /* - * Value stored in the Compressed Size field, or - * VLI_UNKNOWN if Compressed Size is not present. - */ - vli_type compressed; - - /* - * Value stored in the Uncompressed Size field, or - * VLI_UNKNOWN if Uncompressed Size is not present. - */ - vli_type uncompressed; - - /* Size of the Block Header field */ - uint32_t size; - } block_header; - - /* Information collected when decoding Blocks */ - struct { - /* Observed compressed size of the current Block */ - vli_type compressed; - - /* Observed uncompressed size of the current Block */ - vli_type uncompressed; - - /* Number of Blocks decoded so far */ - vli_type count; - - /* - * Hash calculated from the Block sizes. This is used to - * validate the Index field. - */ - struct xz_dec_hash hash; - } block; - - /* Variables needed when verifying the Index field */ - struct { - /* Position in dec_index() */ - enum { - SEQ_INDEX_COUNT, - SEQ_INDEX_UNPADDED, - SEQ_INDEX_UNCOMPRESSED - } sequence; - - /* Size of the Index in bytes */ - vli_type size; - - /* Number of Records (matches block.count in valid files) */ - vli_type count; - - /* - * Hash calculated from the Records (matches block.hash in - * valid files). - */ - struct xz_dec_hash hash; - } index; - - /* - * Temporary buffer needed to hold Stream Header, Block Header, - * and Stream Footer. The Block Header is the biggest (1 KiB) - * so we reserve space according to that. buf[] has to be aligned - * to a multiple of four bytes; the size_t variables before it - * should guarantee this. - */ - struct { - size_t pos; - size_t size; - uint8_t buf[1024]; - } temp; - - struct xz_dec_lzma2 *lzma2; +struct xz_dec +{ + /* Position in dec_main() */ + enum + { + SEQ_STREAM_HEADER, + SEQ_BLOCK_START, + SEQ_BLOCK_HEADER, + SEQ_BLOCK_UNCOMPRESS, + SEQ_BLOCK_PADDING, + SEQ_BLOCK_CHECK, + SEQ_INDEX, + SEQ_INDEX_PADDING, + SEQ_INDEX_CRC32, + SEQ_STREAM_FOOTER + } sequence; + + /* Position in variable-length integers and Check fields */ + uint32_t pos; + + /* Variable-length integer decoded by dec_vli() */ + vli_type vli; + + /* Saved in_pos and out_pos */ + size_t in_start; + size_t out_start; + + /* CRC32 value in Block or Index */ + uint32_t crc32; + + /* Type of the integrity check calculated from uncompressed data */ + enum xz_check check_type; + + /* Operation mode */ + enum xz_mode mode; + + /* + * True if the next call to xz_dec_run() is allowed to return + * XZ_BUF_ERROR. + */ + bool_t allow_buf_error; + + /* Information stored in Block Header */ + struct + { + /* + * Value stored in the Compressed Size field, or + * VLI_UNKNOWN if Compressed Size is not present. + */ + vli_type compressed; + + /* + * Value stored in the Uncompressed Size field, or + * VLI_UNKNOWN if Uncompressed Size is not present. + */ + vli_type uncompressed; + + /* Size of the Block Header field */ + uint32_t size; + } block_header; + + /* Information collected when decoding Blocks */ + struct + { + /* Observed compressed size of the current Block */ + vli_type compressed; + + /* Observed uncompressed size of the current Block */ + vli_type uncompressed; + + /* Number of Blocks decoded so far */ + vli_type count; + + /* + * Hash calculated from the Block sizes. This is used to + * validate the Index field. + */ + struct xz_dec_hash hash; + } block; + + /* Variables needed when verifying the Index field */ + struct + { + /* Position in dec_index() */ + enum + { + SEQ_INDEX_COUNT, + SEQ_INDEX_UNPADDED, + SEQ_INDEX_UNCOMPRESSED + } sequence; + + /* Size of the Index in bytes */ + vli_type size; + + /* Number of Records (matches block.count in valid files) */ + vli_type count; + + /* + * Hash calculated from the Records (matches block.hash in + * valid files). + */ + struct xz_dec_hash hash; + } index; + + /* + * Temporary buffer needed to hold Stream Header, Block Header, + * and Stream Footer. The Block Header is the biggest (1 KiB) + * so we reserve space according to that. buf[] has to be aligned + * to a multiple of four bytes; the size_t variables before it + * should guarantee this. + */ + struct + { + size_t pos; + size_t size; + uint8_t buf[1024]; + } temp; + + struct xz_dec_lzma2 *lzma2; #ifdef XZ_DEC_BCJ - struct xz_dec_bcj *bcj; - bool_t bcj_active; + struct xz_dec_bcj *bcj; + bool_t bcj_active; #endif }; #ifdef XZ_DEC_ANY_CHECK /* Sizes of the Check field with different Check IDs */ -static const uint8_t check_sizes[16] = { - 0, - 4, 4, 4, - 8, 8, 8, - 16, 16, 16, - 32, 32, 32, - 64, 64, 64 -}; +static const uint8_t check_sizes[16] = {0, 4, 4, 4, 8, 8, 8, 16, + 16, 16, 32, 32, 32, 64, 64, 64}; #endif /* @@ -156,51 +158,54 @@ static const uint8_t check_sizes[16] = { */ static bool_t INIT fill_temp(struct xz_dec *s, struct xz_buf *b) { - size_t copy_size = min_t(size_t, - b->in_size - b->in_pos, s->temp.size - s->temp.pos); + size_t copy_size = + min_t(size_t, b->in_size - b->in_pos, s->temp.size - s->temp.pos); - memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size); - b->in_pos += copy_size; - s->temp.pos += copy_size; + memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size); + b->in_pos += copy_size; + s->temp.pos += copy_size; - if (s->temp.pos == s->temp.size) { - s->temp.pos = 0; - return true; - } + if ( s->temp.pos == s->temp.size ) + { + s->temp.pos = 0; + return true; + } - return false; + return false; } /* Decode a variable-length integer (little-endian base-128 encoding) */ static enum xz_ret INIT dec_vli(struct xz_dec *s, const uint8_t *in, - size_t *in_pos, size_t in_size) + size_t *in_pos, size_t in_size) { - uint8_t byte; + uint8_t byte; - if (s->pos == 0) - s->vli = 0; + if ( s->pos == 0 ) + s->vli = 0; - while (*in_pos < in_size) { - byte = in[*in_pos]; - ++*in_pos; + while ( *in_pos < in_size ) + { + byte = in[*in_pos]; + ++*in_pos; - s->vli |= (vli_type)(byte & 0x7F) << s->pos; + s->vli |= (vli_type)(byte & 0x7F) << s->pos; - if ((byte & 0x80) == 0) { - /* Don't allow non-minimal encodings. */ - if (byte == 0 && s->pos != 0) - return XZ_DATA_ERROR; + if ( (byte & 0x80) == 0 ) + { + /* Don't allow non-minimal encodings. */ + if ( byte == 0 && s->pos != 0 ) + return XZ_DATA_ERROR; - s->pos = 0; - return XZ_STREAM_END; - } + s->pos = 0; + return XZ_STREAM_END; + } - s->pos += 7; - if (s->pos == 7 * VLI_BYTES_MAX) - return XZ_DATA_ERROR; - } + s->pos += 7; + if ( s->pos == 7 * VLI_BYTES_MAX ) + return XZ_DATA_ERROR; + } - return XZ_OK; + return XZ_OK; } /* @@ -217,72 +222,69 @@ static enum xz_ret INIT dec_vli(struct xz_dec *s, const uint8_t *in, */ static enum xz_ret INIT dec_block(struct xz_dec *s, struct xz_buf *b) { - enum xz_ret ret; + enum xz_ret ret; - s->in_start = b->in_pos; - s->out_start = b->out_pos; + s->in_start = b->in_pos; + s->out_start = b->out_pos; #ifdef XZ_DEC_BCJ - if (s->bcj_active) - ret = xz_dec_bcj_run(s->bcj, s->lzma2, b); - else + if ( s->bcj_active ) + ret = xz_dec_bcj_run(s->bcj, s->lzma2, b); + else #endif - ret = xz_dec_lzma2_run(s->lzma2, b); - - s->block.compressed += b->in_pos - s->in_start; - s->block.uncompressed += b->out_pos - s->out_start; - - /* - * There is no need to separately check for VLI_UNKNOWN, since - * the observed sizes are always smaller than VLI_UNKNOWN. - */ - if (s->block.compressed > s->block_header.compressed - || s->block.uncompressed - > s->block_header.uncompressed) - return XZ_DATA_ERROR; - - if (s->check_type == XZ_CHECK_CRC32) - s->crc32 = xz_crc32(b->out + s->out_start, - b->out_pos - s->out_start, s->crc32); - - if (ret == XZ_STREAM_END) { - if (s->block_header.compressed != VLI_UNKNOWN - && s->block_header.compressed - != s->block.compressed) - return XZ_DATA_ERROR; - - if (s->block_header.uncompressed != VLI_UNKNOWN - && s->block_header.uncompressed - != s->block.uncompressed) - return XZ_DATA_ERROR; - - s->block.hash.unpadded += s->block_header.size - + s->block.compressed; + ret = xz_dec_lzma2_run(s->lzma2, b); + + s->block.compressed += b->in_pos - s->in_start; + s->block.uncompressed += b->out_pos - s->out_start; + + /* + * There is no need to separately check for VLI_UNKNOWN, since + * the observed sizes are always smaller than VLI_UNKNOWN. + */ + if ( s->block.compressed > s->block_header.compressed || + s->block.uncompressed > s->block_header.uncompressed ) + return XZ_DATA_ERROR; + + if ( s->check_type == XZ_CHECK_CRC32 ) + s->crc32 = xz_crc32(b->out + s->out_start, b->out_pos - s->out_start, + s->crc32); + + if ( ret == XZ_STREAM_END ) + { + if ( s->block_header.compressed != VLI_UNKNOWN && + s->block_header.compressed != s->block.compressed ) + return XZ_DATA_ERROR; + + if ( s->block_header.uncompressed != VLI_UNKNOWN && + s->block_header.uncompressed != s->block.uncompressed ) + return XZ_DATA_ERROR; + + s->block.hash.unpadded += s->block_header.size + s->block.compressed; #ifdef XZ_DEC_ANY_CHECK - s->block.hash.unpadded += check_sizes[s->check_type]; + s->block.hash.unpadded += check_sizes[s->check_type]; #else - if (s->check_type == XZ_CHECK_CRC32) - s->block.hash.unpadded += 4; + if ( s->check_type == XZ_CHECK_CRC32 ) + s->block.hash.unpadded += 4; #endif - s->block.hash.uncompressed += s->block.uncompressed; - s->block.hash.crc32 = xz_crc32( - (const uint8_t *)&s->block.hash, - sizeof(s->block.hash), s->block.hash.crc32); + s->block.hash.uncompressed += s->block.uncompressed; + s->block.hash.crc32 = + xz_crc32((const uint8_t *)&s->block.hash, sizeof(s->block.hash), + s->block.hash.crc32); - ++s->block.count; - } + ++s->block.count; + } - return ret; + return ret; } /* Update the Index size and the CRC32 value. */ static void INIT index_update(struct xz_dec *s, const struct xz_buf *b) { - size_t in_used = b->in_pos - s->in_start; - s->index.size += in_used; - s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32); + size_t in_used = b->in_pos - s->in_start; + s->index.size += in_used; + s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32); } /* @@ -295,48 +297,49 @@ static void INIT index_update(struct xz_dec *s, const struct xz_buf *b) */ static enum xz_ret INIT dec_index(struct xz_dec *s, struct xz_buf *b) { - enum xz_ret ret; - - do { - ret = dec_vli(s, b->in, &b->in_pos, b->in_size); - if (ret != XZ_STREAM_END) { - index_update(s, b); - return ret; - } - - switch (s->index.sequence) { - case SEQ_INDEX_COUNT: - s->index.count = s->vli; - - /* - * Validate that the Number of Records field - * indicates the same number of Records as - * there were Blocks in the Stream. - */ - if (s->index.count != s->block.count) - return XZ_DATA_ERROR; - - s->index.sequence = SEQ_INDEX_UNPADDED; - break; - - case SEQ_INDEX_UNPADDED: - s->index.hash.unpadded += s->vli; - s->index.sequence = SEQ_INDEX_UNCOMPRESSED; - break; - - case SEQ_INDEX_UNCOMPRESSED: - s->index.hash.uncompressed += s->vli; - s->index.hash.crc32 = xz_crc32( - (const uint8_t *)&s->index.hash, - sizeof(s->index.hash), - s->index.hash.crc32); - --s->index.count; - s->index.sequence = SEQ_INDEX_UNPADDED; - break; - } - } while (s->index.count > 0); - - return XZ_STREAM_END; + enum xz_ret ret; + + do { + ret = dec_vli(s, b->in, &b->in_pos, b->in_size); + if ( ret != XZ_STREAM_END ) + { + index_update(s, b); + return ret; + } + + switch (s->index.sequence) + { + case SEQ_INDEX_COUNT: + s->index.count = s->vli; + + /* + * Validate that the Number of Records field + * indicates the same number of Records as + * there were Blocks in the Stream. + */ + if ( s->index.count != s->block.count ) + return XZ_DATA_ERROR; + + s->index.sequence = SEQ_INDEX_UNPADDED; + break; + + case SEQ_INDEX_UNPADDED: + s->index.hash.unpadded += s->vli; + s->index.sequence = SEQ_INDEX_UNCOMPRESSED; + break; + + case SEQ_INDEX_UNCOMPRESSED: + s->index.hash.uncompressed += s->vli; + s->index.hash.crc32 = + xz_crc32((const uint8_t *)&s->index.hash, sizeof(s->index.hash), + s->index.hash.crc32); + --s->index.count; + s->index.sequence = SEQ_INDEX_UNPADDED; + break; + } + } while ( s->index.count > 0 ); + + return XZ_STREAM_END; } /* @@ -345,21 +348,21 @@ static enum xz_ret INIT dec_index(struct xz_dec *s, struct xz_buf *b) */ static enum xz_ret INIT crc32_validate(struct xz_dec *s, struct xz_buf *b) { - do { - if (b->in_pos == b->in_size) - return XZ_OK; + do { + if ( b->in_pos == b->in_size ) + return XZ_OK; - if (((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++]) - return XZ_DATA_ERROR; + if ( ((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++] ) + return XZ_DATA_ERROR; - s->pos += 8; + s->pos += 8; - } while (s->pos < 32); + } while ( s->pos < 32 ); - s->crc32 = 0; - s->pos = 0; + s->crc32 = 0; + s->pos = 0; - return XZ_STREAM_END; + return XZ_STREAM_END; } #ifdef XZ_DEC_ANY_CHECK @@ -369,353 +372,366 @@ static enum xz_ret INIT crc32_validate(struct xz_dec *s, struct xz_buf *b) */ static bool_t INIT check_skip(struct xz_dec *s, struct xz_buf *b) { - while (s->pos < check_sizes[s->check_type]) { - if (b->in_pos == b->in_size) - return false; + while ( s->pos < check_sizes[s->check_type] ) + { + if ( b->in_pos == b->in_size ) + return false; - ++b->in_pos; - ++s->pos; - } + ++b->in_pos; + ++s->pos; + } - s->pos = 0; + s->pos = 0; - return true; + return true; } #endif /* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */ static enum xz_ret INIT dec_stream_header(struct xz_dec *s) { - if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE)) - return XZ_FORMAT_ERROR; + if ( !memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE) ) + return XZ_FORMAT_ERROR; - if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0) - != get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2)) - return XZ_DATA_ERROR; + if ( xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0) != + get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2) ) + return XZ_DATA_ERROR; - if (s->temp.buf[HEADER_MAGIC_SIZE] != 0) - return XZ_OPTIONS_ERROR; + if ( s->temp.buf[HEADER_MAGIC_SIZE] != 0 ) + return XZ_OPTIONS_ERROR; - /* - * Of integrity checks, we support only none (Check ID = 0) and - * CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined, - * we will accept other check types too, but then the check won't - * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given. - */ - s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1]; + /* + * Of integrity checks, we support only none (Check ID = 0) and + * CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined, + * we will accept other check types too, but then the check won't + * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given. + */ + s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1]; #ifdef XZ_DEC_ANY_CHECK - if (s->check_type > XZ_CHECK_MAX) - return XZ_OPTIONS_ERROR; + if ( s->check_type > XZ_CHECK_MAX ) + return XZ_OPTIONS_ERROR; - if (s->check_type > XZ_CHECK_CRC32) - return XZ_UNSUPPORTED_CHECK; + if ( s->check_type > XZ_CHECK_CRC32 ) + return XZ_UNSUPPORTED_CHECK; #else - if (s->check_type > XZ_CHECK_CRC32) - return XZ_OPTIONS_ERROR; + if ( s->check_type > XZ_CHECK_CRC32 ) + return XZ_OPTIONS_ERROR; #endif - return XZ_OK; + return XZ_OK; } /* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */ static enum xz_ret INIT dec_stream_footer(struct xz_dec *s) { - if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE)) - return XZ_DATA_ERROR; - - if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf)) - return XZ_DATA_ERROR; - - /* - * Validate Backward Size. Note that we never added the size of the - * Index CRC32 field to s->index.size, thus we use s->index.size / 4 - * instead of s->index.size / 4 - 1. - */ - if ((s->index.size >> 2) != get_le32(s->temp.buf + 4)) - return XZ_DATA_ERROR; - - if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type) - return XZ_DATA_ERROR; - - /* - * Use XZ_STREAM_END instead of XZ_OK to be more convenient - * for the caller. - */ - return XZ_STREAM_END; + if ( !memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE) ) + return XZ_DATA_ERROR; + + if ( xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf) ) + return XZ_DATA_ERROR; + + /* + * Validate Backward Size. Note that we never added the size of the + * Index CRC32 field to s->index.size, thus we use s->index.size / 4 + * instead of s->index.size / 4 - 1. + */ + if ( (s->index.size >> 2) != get_le32(s->temp.buf + 4) ) + return XZ_DATA_ERROR; + + if ( s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type ) + return XZ_DATA_ERROR; + + /* + * Use XZ_STREAM_END instead of XZ_OK to be more convenient + * for the caller. + */ + return XZ_STREAM_END; } /* Decode the Block Header and initialize the filter chain. */ static enum xz_ret INIT dec_block_header(struct xz_dec *s) { - enum xz_ret ret; - - /* - * Validate the CRC32. We know that the temp buffer is at least - * eight bytes so this is safe. - */ - s->temp.size -= 4; - if (xz_crc32(s->temp.buf, s->temp.size, 0) - != get_le32(s->temp.buf + s->temp.size)) - return XZ_DATA_ERROR; - - s->temp.pos = 2; - - /* - * Catch unsupported Block Flags. We support only one or two filters - * in the chain, so we catch that with the same test. - */ + enum xz_ret ret; + + /* + * Validate the CRC32. We know that the temp buffer is at least + * eight bytes so this is safe. + */ + s->temp.size -= 4; + if ( xz_crc32(s->temp.buf, s->temp.size, 0) != + get_le32(s->temp.buf + s->temp.size) ) + return XZ_DATA_ERROR; + + s->temp.pos = 2; + + /* + * Catch unsupported Block Flags. We support only one or two filters + * in the chain, so we catch that with the same test. + */ #ifdef XZ_DEC_BCJ - if (s->temp.buf[1] & 0x3E) + if ( s->temp.buf[1] & 0x3E ) #else - if (s->temp.buf[1] & 0x3F) + if ( s->temp.buf[1] & 0x3F ) #endif - return XZ_OPTIONS_ERROR; - - /* Compressed Size */ - if (s->temp.buf[1] & 0x40) { - if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) - != XZ_STREAM_END) - return XZ_DATA_ERROR; - - s->block_header.compressed = s->vli; - } else { - s->block_header.compressed = VLI_UNKNOWN; - } - - /* Uncompressed Size */ - if (s->temp.buf[1] & 0x80) { - if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) - != XZ_STREAM_END) - return XZ_DATA_ERROR; - - s->block_header.uncompressed = s->vli; - } else { - s->block_header.uncompressed = VLI_UNKNOWN; - } + return XZ_OPTIONS_ERROR; + + /* Compressed Size */ + if ( s->temp.buf[1] & 0x40 ) + { + if ( dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) != + XZ_STREAM_END ) + return XZ_DATA_ERROR; + + s->block_header.compressed = s->vli; + } + else + { + s->block_header.compressed = VLI_UNKNOWN; + } + + /* Uncompressed Size */ + if ( s->temp.buf[1] & 0x80 ) + { + if ( dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) != + XZ_STREAM_END ) + return XZ_DATA_ERROR; + + s->block_header.uncompressed = s->vli; + } + else + { + s->block_header.uncompressed = VLI_UNKNOWN; + } #ifdef XZ_DEC_BCJ - /* If there are two filters, the first one must be a BCJ filter. */ - s->bcj_active = s->temp.buf[1] & 0x01; - if (s->bcj_active) { - if (s->temp.size - s->temp.pos < 2) - return XZ_OPTIONS_ERROR; - - ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]); - if (ret != XZ_OK) - return ret; - - /* - * We don't support custom start offset, - * so Size of Properties must be zero. - */ - if (s->temp.buf[s->temp.pos++] != 0x00) - return XZ_OPTIONS_ERROR; - } + /* If there are two filters, the first one must be a BCJ filter. */ + s->bcj_active = s->temp.buf[1] & 0x01; + if ( s->bcj_active ) + { + if ( s->temp.size - s->temp.pos < 2 ) + return XZ_OPTIONS_ERROR; + + ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]); + if ( ret != XZ_OK ) + return ret; + + /* + * We don't support custom start offset, + * so Size of Properties must be zero. + */ + if ( s->temp.buf[s->temp.pos++] != 0x00 ) + return XZ_OPTIONS_ERROR; + } #endif - /* Valid Filter Flags always take at least two bytes. */ - if (s->temp.size - s->temp.pos < 2) - return XZ_DATA_ERROR; + /* Valid Filter Flags always take at least two bytes. */ + if ( s->temp.size - s->temp.pos < 2 ) + return XZ_DATA_ERROR; - /* Filter ID = LZMA2 */ - if (s->temp.buf[s->temp.pos++] != 0x21) - return XZ_OPTIONS_ERROR; + /* Filter ID = LZMA2 */ + if ( s->temp.buf[s->temp.pos++] != 0x21 ) + return XZ_OPTIONS_ERROR; - /* Size of Properties = 1-byte Filter Properties */ - if (s->temp.buf[s->temp.pos++] != 0x01) - return XZ_OPTIONS_ERROR; + /* Size of Properties = 1-byte Filter Properties */ + if ( s->temp.buf[s->temp.pos++] != 0x01 ) + return XZ_OPTIONS_ERROR; - /* Filter Properties contains LZMA2 dictionary size. */ - if (s->temp.size - s->temp.pos < 1) - return XZ_DATA_ERROR; + /* Filter Properties contains LZMA2 dictionary size. */ + if ( s->temp.size - s->temp.pos < 1 ) + return XZ_DATA_ERROR; - ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]); - if (ret != XZ_OK) - return ret; + ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]); + if ( ret != XZ_OK ) + return ret; - /* The rest must be Header Padding. */ - while (s->temp.pos < s->temp.size) - if (s->temp.buf[s->temp.pos++] != 0x00) - return XZ_OPTIONS_ERROR; + /* The rest must be Header Padding. */ + while ( s->temp.pos < s->temp.size ) + if ( s->temp.buf[s->temp.pos++] != 0x00 ) + return XZ_OPTIONS_ERROR; - s->temp.pos = 0; - s->block.compressed = 0; - s->block.uncompressed = 0; + s->temp.pos = 0; + s->block.compressed = 0; + s->block.uncompressed = 0; - return XZ_OK; + return XZ_OK; } static enum xz_ret INIT dec_main(struct xz_dec *s, struct xz_buf *b) { - enum xz_ret ret; - - /* - * Store the start position for the case when we are in the middle - * of the Index field. - */ - s->in_start = b->in_pos; - - while (true) { - switch (s->sequence) { - case SEQ_STREAM_HEADER: - /* - * Stream Header is copied to s->temp, and then - * decoded from there. This way if the caller - * gives us only little input at a time, we can - * still keep the Stream Header decoding code - * simple. Similar approach is used in many places - * in this file. - */ - if (!fill_temp(s, b)) - return XZ_OK; - - /* - * If dec_stream_header() returns - * XZ_UNSUPPORTED_CHECK, it is still possible - * to continue decoding if working in multi-call - * mode. Thus, update s->sequence before calling - * dec_stream_header(). - */ - s->sequence = SEQ_BLOCK_START; - - ret = dec_stream_header(s); - if (ret != XZ_OK) - return ret; - - case SEQ_BLOCK_START: - /* We need one byte of input to continue. */ - if (b->in_pos == b->in_size) - return XZ_OK; - - /* See if this is the beginning of the Index field. */ - if (b->in[b->in_pos] == 0) { - s->in_start = b->in_pos++; - s->sequence = SEQ_INDEX; - break; - } - - /* - * Calculate the size of the Block Header and - * prepare to decode it. - */ - s->block_header.size - = ((uint32_t)b->in[b->in_pos] + 1) * 4; - - s->temp.size = s->block_header.size; - s->temp.pos = 0; - s->sequence = SEQ_BLOCK_HEADER; - - case SEQ_BLOCK_HEADER: - if (!fill_temp(s, b)) - return XZ_OK; - - ret = dec_block_header(s); - if (ret != XZ_OK) - return ret; - - s->sequence = SEQ_BLOCK_UNCOMPRESS; - - case SEQ_BLOCK_UNCOMPRESS: - ret = dec_block(s, b); - if (ret != XZ_STREAM_END) - return ret; - - s->sequence = SEQ_BLOCK_PADDING; - - case SEQ_BLOCK_PADDING: - /* - * Size of Compressed Data + Block Padding - * must be a multiple of four. We don't need - * s->block.compressed for anything else - * anymore, so we use it here to test the size - * of the Block Padding field. - */ - while (s->block.compressed & 3) { - if (b->in_pos == b->in_size) - return XZ_OK; - - if (b->in[b->in_pos++] != 0) - return XZ_DATA_ERROR; - - ++s->block.compressed; - } - - s->sequence = SEQ_BLOCK_CHECK; - - case SEQ_BLOCK_CHECK: - if (s->check_type == XZ_CHECK_CRC32) { - ret = crc32_validate(s, b); - if (ret != XZ_STREAM_END) - return ret; - } + enum xz_ret ret; + + /* + * Store the start position for the case when we are in the middle + * of the Index field. + */ + s->in_start = b->in_pos; + + while ( true ) + { + switch (s->sequence) + { + case SEQ_STREAM_HEADER: + /* + * Stream Header is copied to s->temp, and then + * decoded from there. This way if the caller + * gives us only little input at a time, we can + * still keep the Stream Header decoding code + * simple. Similar approach is used in many places + * in this file. + */ + if ( !fill_temp(s, b) ) + return XZ_OK; + + /* + * If dec_stream_header() returns + * XZ_UNSUPPORTED_CHECK, it is still possible + * to continue decoding if working in multi-call + * mode. Thus, update s->sequence before calling + * dec_stream_header(). + */ + s->sequence = SEQ_BLOCK_START; + + ret = dec_stream_header(s); + if ( ret != XZ_OK ) + return ret; + + case SEQ_BLOCK_START: + /* We need one byte of input to continue. */ + if ( b->in_pos == b->in_size ) + return XZ_OK; + + /* See if this is the beginning of the Index field. */ + if ( b->in[b->in_pos] == 0 ) + { + s->in_start = b->in_pos++; + s->sequence = SEQ_INDEX; + break; + } + + /* + * Calculate the size of the Block Header and + * prepare to decode it. + */ + s->block_header.size = ((uint32_t)b->in[b->in_pos] + 1) * 4; + + s->temp.size = s->block_header.size; + s->temp.pos = 0; + s->sequence = SEQ_BLOCK_HEADER; + + case SEQ_BLOCK_HEADER: + if ( !fill_temp(s, b) ) + return XZ_OK; + + ret = dec_block_header(s); + if ( ret != XZ_OK ) + return ret; + + s->sequence = SEQ_BLOCK_UNCOMPRESS; + + case SEQ_BLOCK_UNCOMPRESS: + ret = dec_block(s, b); + if ( ret != XZ_STREAM_END ) + return ret; + + s->sequence = SEQ_BLOCK_PADDING; + + case SEQ_BLOCK_PADDING: + /* + * Size of Compressed Data + Block Padding + * must be a multiple of four. We don't need + * s->block.compressed for anything else + * anymore, so we use it here to test the size + * of the Block Padding field. + */ + while ( s->block.compressed & 3 ) + { + if ( b->in_pos == b->in_size ) + return XZ_OK; + + if ( b->in[b->in_pos++] != 0 ) + return XZ_DATA_ERROR; + + ++s->block.compressed; + } + + s->sequence = SEQ_BLOCK_CHECK; + + case SEQ_BLOCK_CHECK: + if ( s->check_type == XZ_CHECK_CRC32 ) + { + ret = crc32_validate(s, b); + if ( ret != XZ_STREAM_END ) + return ret; + } #ifdef XZ_DEC_ANY_CHECK - else if (!check_skip(s, b)) { - return XZ_OK; - } + else if ( !check_skip(s, b) ) + { + return XZ_OK; + } #endif - s->sequence = SEQ_BLOCK_START; - break; + s->sequence = SEQ_BLOCK_START; + break; - case SEQ_INDEX: - ret = dec_index(s, b); - if (ret != XZ_STREAM_END) - return ret; + case SEQ_INDEX: + ret = dec_index(s, b); + if ( ret != XZ_STREAM_END ) + return ret; - s->sequence = SEQ_INDEX_PADDING; + s->sequence = SEQ_INDEX_PADDING; - case SEQ_INDEX_PADDING: - while ((s->index.size + (b->in_pos - s->in_start)) - & 3) { - if (b->in_pos == b->in_size) { - index_update(s, b); - return XZ_OK; - } + case SEQ_INDEX_PADDING: + while ( (s->index.size + (b->in_pos - s->in_start)) & 3 ) + { + if ( b->in_pos == b->in_size ) + { + index_update(s, b); + return XZ_OK; + } - if (b->in[b->in_pos++] != 0) - return XZ_DATA_ERROR; - } + if ( b->in[b->in_pos++] != 0 ) + return XZ_DATA_ERROR; + } - /* Finish the CRC32 value and Index size. */ - index_update(s, b); + /* Finish the CRC32 value and Index size. */ + index_update(s, b); - /* Compare the hashes to validate the Index field. */ - if (!memeq(&s->block.hash, &s->index.hash, - sizeof(s->block.hash))) - return XZ_DATA_ERROR; + /* Compare the hashes to validate the Index field. */ + if ( !memeq(&s->block.hash, &s->index.hash, sizeof(s->block.hash)) ) + return XZ_DATA_ERROR; - s->sequence = SEQ_INDEX_CRC32; + s->sequence = SEQ_INDEX_CRC32; - case SEQ_INDEX_CRC32: - ret = crc32_validate(s, b); - if (ret != XZ_STREAM_END) - return ret; + case SEQ_INDEX_CRC32: + ret = crc32_validate(s, b); + if ( ret != XZ_STREAM_END ) + return ret; - s->temp.size = STREAM_HEADER_SIZE; - s->sequence = SEQ_STREAM_FOOTER; + s->temp.size = STREAM_HEADER_SIZE; + s->sequence = SEQ_STREAM_FOOTER; - case SEQ_STREAM_FOOTER: - if (!fill_temp(s, b)) - return XZ_OK; + case SEQ_STREAM_FOOTER: + if ( !fill_temp(s, b) ) + return XZ_OK; - return dec_stream_footer(s); - } - } + return dec_stream_footer(s); + } + } - /* Never reached */ + /* Never reached */ } XZ_EXTERN void INIT xz_dec_reset(struct xz_dec *s) { - s->sequence = SEQ_STREAM_HEADER; - s->allow_buf_error = false; - s->pos = 0; - s->crc32 = 0; - memzero(&s->block, sizeof(s->block)); - memzero(&s->index, sizeof(s->index)); - s->temp.pos = 0; - s->temp.size = STREAM_HEADER_SIZE; + s->sequence = SEQ_STREAM_HEADER; + s->allow_buf_error = false; + s->pos = 0; + s->crc32 = 0; + memzero(&s->block, sizeof(s->block)); + memzero(&s->index, sizeof(s->index)); + s->temp.pos = 0; + s->temp.size = STREAM_HEADER_SIZE; } /* @@ -745,77 +761,81 @@ XZ_EXTERN void INIT xz_dec_reset(struct xz_dec *s) */ XZ_EXTERN enum xz_ret INIT xz_dec_run(struct xz_dec *s, struct xz_buf *b) { - size_t in_start; - size_t out_start; - enum xz_ret ret; - - if (DEC_IS_SINGLE(s->mode)) - xz_dec_reset(s); - - in_start = b->in_pos; - out_start = b->out_pos; - ret = dec_main(s, b); - - if (DEC_IS_SINGLE(s->mode)) { - if (ret == XZ_OK) - ret = b->in_pos == b->in_size - ? XZ_DATA_ERROR : XZ_BUF_ERROR; - - if (ret != XZ_STREAM_END) { - b->in_pos = in_start; - b->out_pos = out_start; - } - - } else if (ret == XZ_OK && in_start == b->in_pos - && out_start == b->out_pos) { - if (s->allow_buf_error) - ret = XZ_BUF_ERROR; - - s->allow_buf_error = true; - } else { - s->allow_buf_error = false; - } - - return ret; + size_t in_start; + size_t out_start; + enum xz_ret ret; + + if ( DEC_IS_SINGLE(s->mode) ) + xz_dec_reset(s); + + in_start = b->in_pos; + out_start = b->out_pos; + ret = dec_main(s, b); + + if ( DEC_IS_SINGLE(s->mode) ) + { + if ( ret == XZ_OK ) + ret = b->in_pos == b->in_size ? XZ_DATA_ERROR : XZ_BUF_ERROR; + + if ( ret != XZ_STREAM_END ) + { + b->in_pos = in_start; + b->out_pos = out_start; + } + } + else if ( ret == XZ_OK && in_start == b->in_pos && out_start == b->out_pos ) + { + if ( s->allow_buf_error ) + ret = XZ_BUF_ERROR; + + s->allow_buf_error = true; + } + else + { + s->allow_buf_error = false; + } + + return ret; } XZ_EXTERN struct xz_dec *INIT xz_dec_init(enum xz_mode mode, uint32_t dict_max) { - struct xz_dec *s = malloc(sizeof(*s)); - if (s == NULL) - return NULL; + struct xz_dec *s = malloc(sizeof(*s)); + if ( s == NULL ) + return NULL; - s->mode = mode; + s->mode = mode; #ifdef XZ_DEC_BCJ - s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode)); - if (s->bcj == NULL) - goto error_bcj; + s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode)); + if ( s->bcj == NULL ) + goto error_bcj; #endif - s->lzma2 = xz_dec_lzma2_create(mode, dict_max); - if (s->lzma2 == NULL) - goto error_lzma2; + s->lzma2 = xz_dec_lzma2_create(mode, dict_max); + if ( s->lzma2 == NULL ) + goto error_lzma2; - xz_dec_reset(s); - return s; + xz_dec_reset(s); + return s; error_lzma2: #ifdef XZ_DEC_BCJ - xz_dec_bcj_end(s->bcj); + xz_dec_bcj_end(s->bcj); error_bcj: #endif - free(s); - return NULL; + free(s); + return NULL; } XZ_EXTERN void INIT xz_dec_end(struct xz_dec *s) { - if (s != NULL) { - xz_dec_lzma2_end(s->lzma2); + if ( s != NULL ) + { + xz_dec_lzma2_end(s->lzma2); #ifdef XZ_DEC_BCJ - xz_dec_bcj_end(s->bcj); + xz_dec_bcj_end(s->bcj); #endif - free(s); - } + free(s); + } } diff --git a/xen/crypto/rijndael.c b/xen/crypto/rijndael.c index ac4bdd1856..d755ef7c4f 100644 --- a/xen/crypto/rijndael.c +++ b/xen/crypto/rijndael.c @@ -50,762 +50,653 @@ Td4[x] = Si[x].[01, 01, 01, 01]; */ static const u32 Te0[256] = { - 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, - 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U, - 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU, - 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU, - 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U, - 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU, - 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU, - 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU, - 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU, - 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU, - 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U, - 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU, - 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU, - 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U, - 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU, - 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU, - 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU, - 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU, - 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU, - 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U, - 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU, - 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU, - 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU, - 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU, - 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U, - 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U, - 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U, - 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U, - 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU, - 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U, - 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U, - 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU, - 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU, - 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U, - 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U, - 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U, - 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU, - 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U, - 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU, - 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U, - 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU, - 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U, - 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U, - 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU, - 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U, - 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U, - 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U, - 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U, - 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U, - 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U, - 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U, - 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U, - 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU, - 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U, - 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U, - 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U, - 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U, - 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U, - 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U, - 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU, - 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U, - 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U, - 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U, - 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU, + 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, 0xfff2f20dU, + 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U, 0x60303050U, 0x02010103U, + 0xce6767a9U, 0x562b2b7dU, 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, + 0xec76769aU, 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U, + 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU, 0x41adadecU, + 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU, 0x239c9cbfU, 0x53a4a4f7U, + 0xe4727296U, 0x9bc0c05bU, 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, + 0x4c26266aU, 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU, + 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U, 0xe2717193U, + 0xabd8d873U, 0x62313153U, 0x2a15153fU, 0x0804040cU, 0x95c7c752U, + 0x46232365U, 0x9dc3c35eU, 0x30181828U, 0x379696a1U, 0x0a05050fU, + 0x2f9a9ab5U, 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU, + 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU, 0x1209091bU, + 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU, 0x361b1b2dU, 0xdc6e6eb2U, + 0xb45a5aeeU, 0x5ba0a0fbU, 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, + 0x7db3b3ceU, 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U, + 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU, 0x40202060U, + 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU, 0xd46a6abeU, 0x8dcbcb46U, + 0x67bebed9U, 0x7239394bU, 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, + 0x85cfcf4aU, 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U, + 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U, 0x8a4545cfU, + 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U, 0xa05050f0U, 0x783c3c44U, + 0x259f9fbaU, 0x4ba8a8e3U, 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, + 0x058f8f8aU, 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U, + 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U, 0x20101030U, + 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU, 0x81cdcd4cU, 0x180c0c14U, + 0x26131335U, 0xc3ecec2fU, 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, + 0x2e171739U, 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U, + 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U, 0xc06060a0U, + 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU, 0x44222266U, 0x542a2a7eU, + 0x3b9090abU, 0x0b888883U, 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, + 0x2814143cU, 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U, + 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU, 0x924949dbU, + 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U, 0x9fc2c25dU, 0xbdd3d36eU, + 0x43acacefU, 0xc46262a6U, 0x399191a8U, 0x319595a4U, 0xd3e4e437U, + 0xf279798bU, 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U, + 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U, 0xd86c6cb4U, + 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U, 0xca6565afU, 0xf47a7a8eU, + 0x47aeaee9U, 0x10080818U, 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, + 0x5c2e2e72U, 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U, + 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U, 0x964b4bddU, + 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U, 0xe0707090U, 0x7c3e3e42U, + 0x71b5b5c4U, 0xcc6666aaU, 0x904848d8U, 0x06030305U, 0xf7f6f601U, + 0x1c0e0e12U, 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U, + 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U, 0xd9e1e138U, + 0xebf8f813U, 0x2b9898b3U, 0x22111133U, 0xd26969bbU, 0xa9d9d970U, + 0x078e8e89U, 0x339494a7U, 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, + 0xc9e9e920U, 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU, + 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U, 0x65bfbfdaU, + 0xd7e6e631U, 0x844242c6U, 0xd06868b8U, 0x824141c3U, 0x299999b0U, + 0x5a2d2d77U, 0x1e0f0f11U, 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, + 0x2c16163aU, }; static const u32 Te1[256] = { - 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU, - 0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U, - 0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU, - 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U, - 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU, - 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U, - 0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU, - 0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U, - 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U, - 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU, - 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U, - 0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U, - 0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U, - 0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU, - 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U, - 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U, - 0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU, - 0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U, - 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U, - 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U, - 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU, - 0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU, - 0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U, - 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU, - 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU, - 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U, - 0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU, - 0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U, - 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU, - 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U, - 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U, - 0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U, - 0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU, - 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U, - 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU, - 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U, - 0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU, - 0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U, - 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U, - 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU, - 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU, - 0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU, - 0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U, - 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U, - 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU, - 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U, - 0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU, - 0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U, - 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU, - 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U, - 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU, - 0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU, - 0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U, - 0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU, - 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U, - 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU, - 0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U, - 0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U, - 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U, - 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU, - 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU, - 0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U, - 0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU, - 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U, + 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU, 0x0dfff2f2U, + 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U, 0x50603030U, 0x03020101U, + 0xa9ce6767U, 0x7d562b2bU, 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, + 0x9aec7676U, 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU, + 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U, 0xec41adadU, + 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU, 0xbf239c9cU, 0xf753a4a4U, + 0x96e47272U, 0x5b9bc0c0U, 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, + 0x6a4c2626U, 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU, + 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U, 0x93e27171U, + 0x73abd8d8U, 0x53623131U, 0x3f2a1515U, 0x0c080404U, 0x5295c7c7U, + 0x65462323U, 0x5e9dc3c3U, 0x28301818U, 0xa1379696U, 0x0f0a0505U, + 0xb52f9a9aU, 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U, + 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U, 0x1b120909U, + 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU, 0x2d361b1bU, 0xb2dc6e6eU, + 0xeeb45a5aU, 0xfb5ba0a0U, 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, + 0xce7db3b3U, 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U, + 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU, 0x60402020U, + 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU, 0xbed46a6aU, 0x468dcbcbU, + 0xd967bebeU, 0x4b723939U, 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, + 0x4a85cfcfU, 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU, + 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U, 0xcf8a4545U, + 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU, 0xf0a05050U, 0x44783c3cU, + 0xba259f9fU, 0xe34ba8a8U, 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, + 0x8a058f8fU, 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U, + 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U, 0x30201010U, + 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U, 0x4c81cdcdU, 0x14180c0cU, + 0x35261313U, 0x2fc3ececU, 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, + 0x392e1717U, 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU, + 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U, 0xa0c06060U, + 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU, 0x66442222U, 0x7e542a2aU, + 0xab3b9090U, 0x830b8888U, 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, + 0x3c281414U, 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU, + 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU, 0xdb924949U, + 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU, 0x5d9fc2c2U, 0x6ebdd3d3U, + 0xef43acacU, 0xa6c46262U, 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, + 0x8bf27979U, 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU, + 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U, 0xb4d86c6cU, + 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU, 0xafca6565U, 0x8ef47a7aU, + 0xe947aeaeU, 0x18100808U, 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, + 0x725c2e2eU, 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U, + 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU, 0xdd964b4bU, + 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU, 0x90e07070U, 0x427c3e3eU, + 0xc471b5b5U, 0xaacc6666U, 0xd8904848U, 0x05060303U, 0x01f7f6f6U, + 0x121c0e0eU, 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U, + 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU, 0x38d9e1e1U, + 0x13ebf8f8U, 0xb32b9898U, 0x33221111U, 0xbbd26969U, 0x70a9d9d9U, + 0x89078e8eU, 0xa7339494U, 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, + 0x20c9e9e9U, 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU, + 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU, 0xda65bfbfU, + 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U, 0xc3824141U, 0xb0299999U, + 0x775a2d2dU, 0x111e0f0fU, 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, + 0x3a2c1616U, }; static const u32 Te2[256] = { - 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU, - 0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U, - 0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU, - 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U, - 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU, - 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U, - 0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU, - 0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U, - 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U, - 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU, - 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U, - 0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U, - 0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U, - 0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU, - 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U, - 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U, - 0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU, - 0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U, - 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U, - 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U, - 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU, - 0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU, - 0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U, - 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU, - 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU, - 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U, - 0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU, - 0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U, - 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU, - 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U, - 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U, - 0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U, - 0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU, - 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U, - 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU, - 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U, - 0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU, - 0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U, - 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U, - 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU, - 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU, - 0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU, - 0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U, - 0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U, - 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU, - 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U, - 0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU, - 0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U, - 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU, - 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U, - 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU, - 0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU, - 0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U, - 0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU, - 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U, - 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU, - 0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U, - 0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U, - 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U, - 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU, - 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU, - 0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U, - 0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU, - 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U, + 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU, 0xf20dfff2U, + 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U, 0x30506030U, 0x01030201U, + 0x67a9ce67U, 0x2b7d562bU, 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, + 0x769aec76U, 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU, + 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U, 0xadec41adU, + 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU, 0x9cbf239cU, 0xa4f753a4U, + 0x7296e472U, 0xc05b9bc0U, 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, + 0x266a4c26U, 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU, + 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U, 0x7193e271U, + 0xd873abd8U, 0x31536231U, 0x153f2a15U, 0x040c0804U, 0xc75295c7U, + 0x23654623U, 0xc35e9dc3U, 0x18283018U, 0x96a13796U, 0x050f0a05U, + 0x9ab52f9aU, 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U, + 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U, 0x091b1209U, + 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU, 0x1b2d361bU, 0x6eb2dc6eU, + 0x5aeeb45aU, 0xa0fb5ba0U, 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, + 0xb3ce7db3U, 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U, + 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU, 0x20604020U, + 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU, 0x6abed46aU, 0xcb468dcbU, + 0xbed967beU, 0x394b7239U, 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, + 0xcf4a85cfU, 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU, + 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U, 0x45cf8a45U, + 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU, 0x50f0a050U, 0x3c44783cU, + 0x9fba259fU, 0xa8e34ba8U, 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, + 0x8f8a058fU, 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U, + 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U, 0x10302010U, + 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U, 0xcd4c81cdU, 0x0c14180cU, + 0x13352613U, 0xec2fc3ecU, 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, + 0x17392e17U, 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU, + 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U, 0x60a0c060U, + 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU, 0x22664422U, 0x2a7e542aU, + 0x90ab3b90U, 0x88830b88U, 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, + 0x143c2814U, 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU, + 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU, 0x49db9249U, + 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU, 0xc25d9fc2U, 0xd36ebdd3U, + 0xacef43acU, 0x62a6c462U, 0x91a83991U, 0x95a43195U, 0xe437d3e4U, + 0x798bf279U, 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU, + 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U, 0x6cb4d86cU, + 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU, 0x65afca65U, 0x7a8ef47aU, + 0xaee947aeU, 0x08181008U, 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, + 0x2e725c2eU, 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U, + 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU, 0x4bdd964bU, + 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU, 0x7090e070U, 0x3e427c3eU, + 0xb5c471b5U, 0x66aacc66U, 0x48d89048U, 0x03050603U, 0xf601f7f6U, + 0x0e121c0eU, 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U, + 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU, 0xe138d9e1U, + 0xf813ebf8U, 0x98b32b98U, 0x11332211U, 0x69bbd269U, 0xd970a9d9U, + 0x8e89078eU, 0x94a73394U, 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, + 0xe920c9e9U, 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU, + 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU, 0xbfda65bfU, + 0xe631d7e6U, 0x42c68442U, 0x68b8d068U, 0x41c38241U, 0x99b02999U, + 0x2d775a2dU, 0x0f111e0fU, 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, + 0x163a2c16U, }; static const u32 Te3[256] = { - 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U, - 0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U, - 0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U, - 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU, - 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU, - 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU, - 0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U, - 0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU, - 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU, - 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U, - 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U, - 0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU, - 0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU, - 0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU, - 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU, - 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU, - 0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U, - 0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU, - 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU, - 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U, - 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U, - 0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U, - 0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U, - 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U, - 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU, - 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U, - 0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU, - 0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU, - 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U, - 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U, - 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U, - 0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU, - 0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U, - 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU, - 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU, - 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U, - 0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U, - 0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU, - 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U, - 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU, - 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U, - 0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U, - 0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U, - 0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U, - 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU, - 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U, - 0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU, - 0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U, - 0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU, - 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U, - 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU, - 0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU, - 0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU, - 0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU, - 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U, - 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U, - 0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U, - 0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U, - 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U, - 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U, - 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU, - 0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U, - 0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU, - 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU, + 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U, 0xf2f20dffU, + 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U, 0x30305060U, 0x01010302U, + 0x6767a9ceU, 0x2b2b7d56U, 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, + 0x76769aecU, 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU, + 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU, 0xadadec41U, + 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U, 0x9c9cbf23U, 0xa4a4f753U, + 0x727296e4U, 0xc0c05b9bU, 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, + 0x26266a4cU, 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U, + 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U, 0x717193e2U, + 0xd8d873abU, 0x31315362U, 0x15153f2aU, 0x04040c08U, 0xc7c75295U, + 0x23236546U, 0xc3c35e9dU, 0x18182830U, 0x9696a137U, 0x05050f0aU, + 0x9a9ab52fU, 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU, + 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU, 0x09091b12U, + 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U, 0x1b1b2d36U, 0x6e6eb2dcU, + 0x5a5aeeb4U, 0xa0a0fb5bU, 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, + 0xb3b3ce7dU, 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U, + 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U, 0x20206040U, + 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U, 0x6a6abed4U, 0xcbcb468dU, + 0xbebed967U, 0x39394b72U, 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, + 0xcfcf4a85U, 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU, + 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U, 0x4545cf8aU, + 0xf9f910e9U, 0x02020604U, 0x7f7f81feU, 0x5050f0a0U, 0x3c3c4478U, + 0x9f9fba25U, 0xa8a8e34bU, 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, + 0x8f8f8a05U, 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U, + 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U, 0x10103020U, + 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU, 0xcdcd4c81U, 0x0c0c1418U, + 0x13133526U, 0xecec2fc3U, 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, + 0x1717392eU, 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU, + 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U, 0x6060a0c0U, + 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U, 0x22226644U, 0x2a2a7e54U, + 0x9090ab3bU, 0x8888830bU, 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, + 0x14143c28U, 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU, + 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U, 0x4949db92U, + 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U, 0xc2c25d9fU, 0xd3d36ebdU, + 0xacacef43U, 0x6262a6c4U, 0x9191a839U, 0x9595a431U, 0xe4e437d3U, + 0x79798bf2U, 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU, + 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U, 0x6c6cb4d8U, + 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU, 0x6565afcaU, 0x7a7a8ef4U, + 0xaeaee947U, 0x08081810U, 0xbabad56fU, 0x787888f0U, 0x25256f4aU, + 0x2e2e725cU, 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U, + 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU, 0x4b4bdd96U, + 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU, 0x707090e0U, 0x3e3e427cU, + 0xb5b5c471U, 0x6666aaccU, 0x4848d890U, 0x03030506U, 0xf6f601f7U, + 0x0e0e121cU, 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U, + 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U, 0xe1e138d9U, + 0xf8f813ebU, 0x9898b32bU, 0x11113322U, 0x6969bbd2U, 0xd9d970a9U, + 0x8e8e8907U, 0x9494a733U, 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, + 0xe9e920c9U, 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U, + 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU, 0xbfbfda65U, + 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U, 0x4141c382U, 0x9999b029U, + 0x2d2d775aU, 0x0f0f111eU, 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, + 0x16163a2cU, }; static const u32 Te4[256] = { - 0x63636363U, 0x7c7c7c7cU, 0x77777777U, 0x7b7b7b7bU, - 0xf2f2f2f2U, 0x6b6b6b6bU, 0x6f6f6f6fU, 0xc5c5c5c5U, - 0x30303030U, 0x01010101U, 0x67676767U, 0x2b2b2b2bU, - 0xfefefefeU, 0xd7d7d7d7U, 0xababababU, 0x76767676U, - 0xcacacacaU, 0x82828282U, 0xc9c9c9c9U, 0x7d7d7d7dU, - 0xfafafafaU, 0x59595959U, 0x47474747U, 0xf0f0f0f0U, - 0xadadadadU, 0xd4d4d4d4U, 0xa2a2a2a2U, 0xafafafafU, - 0x9c9c9c9cU, 0xa4a4a4a4U, 0x72727272U, 0xc0c0c0c0U, - 0xb7b7b7b7U, 0xfdfdfdfdU, 0x93939393U, 0x26262626U, - 0x36363636U, 0x3f3f3f3fU, 0xf7f7f7f7U, 0xccccccccU, - 0x34343434U, 0xa5a5a5a5U, 0xe5e5e5e5U, 0xf1f1f1f1U, - 0x71717171U, 0xd8d8d8d8U, 0x31313131U, 0x15151515U, - 0x04040404U, 0xc7c7c7c7U, 0x23232323U, 0xc3c3c3c3U, - 0x18181818U, 0x96969696U, 0x05050505U, 0x9a9a9a9aU, - 0x07070707U, 0x12121212U, 0x80808080U, 0xe2e2e2e2U, - 0xebebebebU, 0x27272727U, 0xb2b2b2b2U, 0x75757575U, - 0x09090909U, 0x83838383U, 0x2c2c2c2cU, 0x1a1a1a1aU, - 0x1b1b1b1bU, 0x6e6e6e6eU, 0x5a5a5a5aU, 0xa0a0a0a0U, - 0x52525252U, 0x3b3b3b3bU, 0xd6d6d6d6U, 0xb3b3b3b3U, - 0x29292929U, 0xe3e3e3e3U, 0x2f2f2f2fU, 0x84848484U, - 0x53535353U, 0xd1d1d1d1U, 0x00000000U, 0xededededU, - 0x20202020U, 0xfcfcfcfcU, 0xb1b1b1b1U, 0x5b5b5b5bU, - 0x6a6a6a6aU, 0xcbcbcbcbU, 0xbebebebeU, 0x39393939U, - 0x4a4a4a4aU, 0x4c4c4c4cU, 0x58585858U, 0xcfcfcfcfU, - 0xd0d0d0d0U, 0xefefefefU, 0xaaaaaaaaU, 0xfbfbfbfbU, - 0x43434343U, 0x4d4d4d4dU, 0x33333333U, 0x85858585U, - 0x45454545U, 0xf9f9f9f9U, 0x02020202U, 0x7f7f7f7fU, - 0x50505050U, 0x3c3c3c3cU, 0x9f9f9f9fU, 0xa8a8a8a8U, - 0x51515151U, 0xa3a3a3a3U, 0x40404040U, 0x8f8f8f8fU, - 0x92929292U, 0x9d9d9d9dU, 0x38383838U, 0xf5f5f5f5U, - 0xbcbcbcbcU, 0xb6b6b6b6U, 0xdadadadaU, 0x21212121U, - 0x10101010U, 0xffffffffU, 0xf3f3f3f3U, 0xd2d2d2d2U, - 0xcdcdcdcdU, 0x0c0c0c0cU, 0x13131313U, 0xececececU, - 0x5f5f5f5fU, 0x97979797U, 0x44444444U, 0x17171717U, - 0xc4c4c4c4U, 0xa7a7a7a7U, 0x7e7e7e7eU, 0x3d3d3d3dU, - 0x64646464U, 0x5d5d5d5dU, 0x19191919U, 0x73737373U, - 0x60606060U, 0x81818181U, 0x4f4f4f4fU, 0xdcdcdcdcU, - 0x22222222U, 0x2a2a2a2aU, 0x90909090U, 0x88888888U, - 0x46464646U, 0xeeeeeeeeU, 0xb8b8b8b8U, 0x14141414U, - 0xdedededeU, 0x5e5e5e5eU, 0x0b0b0b0bU, 0xdbdbdbdbU, - 0xe0e0e0e0U, 0x32323232U, 0x3a3a3a3aU, 0x0a0a0a0aU, - 0x49494949U, 0x06060606U, 0x24242424U, 0x5c5c5c5cU, - 0xc2c2c2c2U, 0xd3d3d3d3U, 0xacacacacU, 0x62626262U, - 0x91919191U, 0x95959595U, 0xe4e4e4e4U, 0x79797979U, - 0xe7e7e7e7U, 0xc8c8c8c8U, 0x37373737U, 0x6d6d6d6dU, - 0x8d8d8d8dU, 0xd5d5d5d5U, 0x4e4e4e4eU, 0xa9a9a9a9U, - 0x6c6c6c6cU, 0x56565656U, 0xf4f4f4f4U, 0xeaeaeaeaU, - 0x65656565U, 0x7a7a7a7aU, 0xaeaeaeaeU, 0x08080808U, - 0xbabababaU, 0x78787878U, 0x25252525U, 0x2e2e2e2eU, - 0x1c1c1c1cU, 0xa6a6a6a6U, 0xb4b4b4b4U, 0xc6c6c6c6U, - 0xe8e8e8e8U, 0xddddddddU, 0x74747474U, 0x1f1f1f1fU, - 0x4b4b4b4bU, 0xbdbdbdbdU, 0x8b8b8b8bU, 0x8a8a8a8aU, - 0x70707070U, 0x3e3e3e3eU, 0xb5b5b5b5U, 0x66666666U, - 0x48484848U, 0x03030303U, 0xf6f6f6f6U, 0x0e0e0e0eU, - 0x61616161U, 0x35353535U, 0x57575757U, 0xb9b9b9b9U, - 0x86868686U, 0xc1c1c1c1U, 0x1d1d1d1dU, 0x9e9e9e9eU, - 0xe1e1e1e1U, 0xf8f8f8f8U, 0x98989898U, 0x11111111U, - 0x69696969U, 0xd9d9d9d9U, 0x8e8e8e8eU, 0x94949494U, - 0x9b9b9b9bU, 0x1e1e1e1eU, 0x87878787U, 0xe9e9e9e9U, - 0xcecececeU, 0x55555555U, 0x28282828U, 0xdfdfdfdfU, - 0x8c8c8c8cU, 0xa1a1a1a1U, 0x89898989U, 0x0d0d0d0dU, - 0xbfbfbfbfU, 0xe6e6e6e6U, 0x42424242U, 0x68686868U, - 0x41414141U, 0x99999999U, 0x2d2d2d2dU, 0x0f0f0f0fU, - 0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, 0x16161616U, + 0x63636363U, 0x7c7c7c7cU, 0x77777777U, 0x7b7b7b7bU, 0xf2f2f2f2U, + 0x6b6b6b6bU, 0x6f6f6f6fU, 0xc5c5c5c5U, 0x30303030U, 0x01010101U, + 0x67676767U, 0x2b2b2b2bU, 0xfefefefeU, 0xd7d7d7d7U, 0xababababU, + 0x76767676U, 0xcacacacaU, 0x82828282U, 0xc9c9c9c9U, 0x7d7d7d7dU, + 0xfafafafaU, 0x59595959U, 0x47474747U, 0xf0f0f0f0U, 0xadadadadU, + 0xd4d4d4d4U, 0xa2a2a2a2U, 0xafafafafU, 0x9c9c9c9cU, 0xa4a4a4a4U, + 0x72727272U, 0xc0c0c0c0U, 0xb7b7b7b7U, 0xfdfdfdfdU, 0x93939393U, + 0x26262626U, 0x36363636U, 0x3f3f3f3fU, 0xf7f7f7f7U, 0xccccccccU, + 0x34343434U, 0xa5a5a5a5U, 0xe5e5e5e5U, 0xf1f1f1f1U, 0x71717171U, + 0xd8d8d8d8U, 0x31313131U, 0x15151515U, 0x04040404U, 0xc7c7c7c7U, + 0x23232323U, 0xc3c3c3c3U, 0x18181818U, 0x96969696U, 0x05050505U, + 0x9a9a9a9aU, 0x07070707U, 0x12121212U, 0x80808080U, 0xe2e2e2e2U, + 0xebebebebU, 0x27272727U, 0xb2b2b2b2U, 0x75757575U, 0x09090909U, + 0x83838383U, 0x2c2c2c2cU, 0x1a1a1a1aU, 0x1b1b1b1bU, 0x6e6e6e6eU, + 0x5a5a5a5aU, 0xa0a0a0a0U, 0x52525252U, 0x3b3b3b3bU, 0xd6d6d6d6U, + 0xb3b3b3b3U, 0x29292929U, 0xe3e3e3e3U, 0x2f2f2f2fU, 0x84848484U, + 0x53535353U, 0xd1d1d1d1U, 0x00000000U, 0xededededU, 0x20202020U, + 0xfcfcfcfcU, 0xb1b1b1b1U, 0x5b5b5b5bU, 0x6a6a6a6aU, 0xcbcbcbcbU, + 0xbebebebeU, 0x39393939U, 0x4a4a4a4aU, 0x4c4c4c4cU, 0x58585858U, + 0xcfcfcfcfU, 0xd0d0d0d0U, 0xefefefefU, 0xaaaaaaaaU, 0xfbfbfbfbU, + 0x43434343U, 0x4d4d4d4dU, 0x33333333U, 0x85858585U, 0x45454545U, + 0xf9f9f9f9U, 0x02020202U, 0x7f7f7f7fU, 0x50505050U, 0x3c3c3c3cU, + 0x9f9f9f9fU, 0xa8a8a8a8U, 0x51515151U, 0xa3a3a3a3U, 0x40404040U, + 0x8f8f8f8fU, 0x92929292U, 0x9d9d9d9dU, 0x38383838U, 0xf5f5f5f5U, + 0xbcbcbcbcU, 0xb6b6b6b6U, 0xdadadadaU, 0x21212121U, 0x10101010U, + 0xffffffffU, 0xf3f3f3f3U, 0xd2d2d2d2U, 0xcdcdcdcdU, 0x0c0c0c0cU, + 0x13131313U, 0xececececU, 0x5f5f5f5fU, 0x97979797U, 0x44444444U, + 0x17171717U, 0xc4c4c4c4U, 0xa7a7a7a7U, 0x7e7e7e7eU, 0x3d3d3d3dU, + 0x64646464U, 0x5d5d5d5dU, 0x19191919U, 0x73737373U, 0x60606060U, + 0x81818181U, 0x4f4f4f4fU, 0xdcdcdcdcU, 0x22222222U, 0x2a2a2a2aU, + 0x90909090U, 0x88888888U, 0x46464646U, 0xeeeeeeeeU, 0xb8b8b8b8U, + 0x14141414U, 0xdedededeU, 0x5e5e5e5eU, 0x0b0b0b0bU, 0xdbdbdbdbU, + 0xe0e0e0e0U, 0x32323232U, 0x3a3a3a3aU, 0x0a0a0a0aU, 0x49494949U, + 0x06060606U, 0x24242424U, 0x5c5c5c5cU, 0xc2c2c2c2U, 0xd3d3d3d3U, + 0xacacacacU, 0x62626262U, 0x91919191U, 0x95959595U, 0xe4e4e4e4U, + 0x79797979U, 0xe7e7e7e7U, 0xc8c8c8c8U, 0x37373737U, 0x6d6d6d6dU, + 0x8d8d8d8dU, 0xd5d5d5d5U, 0x4e4e4e4eU, 0xa9a9a9a9U, 0x6c6c6c6cU, + 0x56565656U, 0xf4f4f4f4U, 0xeaeaeaeaU, 0x65656565U, 0x7a7a7a7aU, + 0xaeaeaeaeU, 0x08080808U, 0xbabababaU, 0x78787878U, 0x25252525U, + 0x2e2e2e2eU, 0x1c1c1c1cU, 0xa6a6a6a6U, 0xb4b4b4b4U, 0xc6c6c6c6U, + 0xe8e8e8e8U, 0xddddddddU, 0x74747474U, 0x1f1f1f1fU, 0x4b4b4b4bU, + 0xbdbdbdbdU, 0x8b8b8b8bU, 0x8a8a8a8aU, 0x70707070U, 0x3e3e3e3eU, + 0xb5b5b5b5U, 0x66666666U, 0x48484848U, 0x03030303U, 0xf6f6f6f6U, + 0x0e0e0e0eU, 0x61616161U, 0x35353535U, 0x57575757U, 0xb9b9b9b9U, + 0x86868686U, 0xc1c1c1c1U, 0x1d1d1d1dU, 0x9e9e9e9eU, 0xe1e1e1e1U, + 0xf8f8f8f8U, 0x98989898U, 0x11111111U, 0x69696969U, 0xd9d9d9d9U, + 0x8e8e8e8eU, 0x94949494U, 0x9b9b9b9bU, 0x1e1e1e1eU, 0x87878787U, + 0xe9e9e9e9U, 0xcecececeU, 0x55555555U, 0x28282828U, 0xdfdfdfdfU, + 0x8c8c8c8cU, 0xa1a1a1a1U, 0x89898989U, 0x0d0d0d0dU, 0xbfbfbfbfU, + 0xe6e6e6e6U, 0x42424242U, 0x68686868U, 0x41414141U, 0x99999999U, + 0x2d2d2d2dU, 0x0f0f0f0fU, 0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, + 0x16161616U, }; #ifdef NEED_RIJNDAEL_DECRYPT static const u32 Td0[256] = { - 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U, - 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U, - 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U, - 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU, - 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U, - 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U, - 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU, - 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U, - 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU, - 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U, - 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U, - 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U, - 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U, - 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU, - 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U, - 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU, - 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U, - 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU, - 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U, - 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U, - 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U, - 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU, - 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U, - 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU, - 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U, - 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU, - 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U, - 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU, - 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU, - 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U, - 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU, - 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U, - 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU, - 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U, - 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U, - 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U, - 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU, - 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U, - 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U, - 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU, - 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U, - 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U, - 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U, - 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U, - 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U, - 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU, - 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U, - 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U, - 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U, - 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U, - 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U, - 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU, - 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU, - 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU, - 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU, - 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U, - 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U, - 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU, - 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU, - 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U, - 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU, - 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U, - 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U, - 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U, + 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U, 0x3bab6bcbU, + 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U, 0x2030fa55U, 0xad766df6U, + 0x88cc7691U, 0xf5024c25U, 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, + 0xb562a38fU, 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U, + 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U, 0x038f5fe7U, + 0x15929c95U, 0xbf6d7aebU, 0x955259daU, 0xd4be832dU, 0x587421d3U, + 0x49e06929U, 0x8ec9c844U, 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, + 0x27b971ddU, 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U, + 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U, 0xb16477e0U, + 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U, 0x70486858U, 0x8f45fd19U, + 0x94de6c87U, 0x527bf8b7U, 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, + 0x6655ab2aU, 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U, + 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU, 0x8acf1c2bU, + 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U, 0x65daf4cdU, 0x0605bed5U, + 0xd134621fU, 0xc4a6fe8aU, 0x342e539dU, 0xa2f355a0U, 0x058ae132U, + 0xa4f6eb75U, 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U, + 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U, 0x91548db5U, + 0x71c45d05U, 0x0406d46fU, 0x605015ffU, 0x1998fb24U, 0xd6bde997U, + 0x894043ccU, 0x67d99e77U, 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, + 0x79c8eedbU, 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U, + 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU, 0xfd0efffbU, + 0x0f853856U, 0x3daed51eU, 0x362d3927U, 0x0a0fd964U, 0x685ca621U, + 0x9b5b54d1U, 0x24362e3aU, 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, + 0x1b9b919eU, 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U, + 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU, 0x0e090d0bU, + 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U, 0x57f11985U, 0xaf75074cU, + 0xee99ddbbU, 0xa37f60fdU, 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, + 0x5bfb7e34U, 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U, + 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U, 0x854a247dU, + 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU, 0x1d9e2f4bU, 0xdcb230f3U, + 0x0d8652ecU, 0x77c1e3d0U, 0x2bb3166cU, 0xa970b999U, 0x119448faU, + 0x47e96422U, 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU, + 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U, 0xa6f581cfU, + 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U, 0x2c3a9de4U, 0x5078920dU, + 0x6a5fcc9bU, 0x547e4662U, 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, + 0x82c3aff5U, 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U, + 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU, 0xcd267809U, + 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U, 0xe6956e65U, 0xaaffe67eU, + 0x21bccf08U, 0xef15e8e6U, 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, + 0x29b07cd6U, 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U, + 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U, 0xf104984aU, + 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU, 0x764dd68dU, 0x43efb04dU, + 0xccaa4d54U, 0xe49604dfU, 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, + 0x4665517fU, 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU, + 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U, 0x9ad7618cU, + 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U, 0xcea927eeU, 0xb761c935U, + 0xe11ce5edU, 0x7a47b13cU, 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, + 0x73c737bfU, 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U, + 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU, 0x161dc372U, + 0xbce2250cU, 0x283c498bU, 0xff0d9541U, 0x39a80171U, 0x080cb3deU, + 0xd8b4e49cU, 0x6456c190U, 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, + 0xd0b85742U, }; static const u32 Td1[256] = { - 0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU, - 0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U, - 0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU, - 0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U, - 0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U, - 0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U, - 0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U, - 0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U, - 0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U, - 0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU, - 0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU, - 0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU, - 0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U, - 0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU, - 0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U, - 0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U, - 0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U, - 0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU, - 0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU, - 0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U, - 0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU, - 0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U, - 0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU, - 0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU, - 0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U, - 0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U, - 0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U, - 0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU, - 0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U, - 0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU, - 0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U, - 0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U, - 0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U, - 0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU, - 0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U, - 0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U, - 0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U, - 0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U, - 0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U, - 0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U, - 0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU, - 0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU, - 0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U, - 0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU, - 0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U, - 0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU, - 0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU, - 0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U, - 0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU, - 0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U, - 0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U, - 0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U, - 0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U, - 0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U, - 0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U, - 0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U, - 0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU, - 0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U, - 0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U, - 0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU, - 0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U, - 0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U, - 0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U, - 0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U, + 0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU, 0xcb3bab6bU, + 0xf11f9d45U, 0xabacfa58U, 0x934be303U, 0x552030faU, 0xf6ad766dU, + 0x9188cc76U, 0x25f5024cU, 0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, + 0x8fb562a3U, 0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U, + 0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U, 0xe7038f5fU, + 0x9515929cU, 0xebbf6d7aU, 0xda955259U, 0x2dd4be83U, 0xd3587421U, + 0x2949e069U, 0x448ec9c8U, 0x6a75c289U, 0x78f48e79U, 0x6b99583eU, + 0xdd27b971U, 0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU, + 0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU, 0xe0b16477U, + 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU, 0x58704868U, 0x198f45fdU, + 0x8794de6cU, 0xb7527bf8U, 0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, + 0x2a6655abU, 0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U, + 0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U, 0x2b8acf1cU, + 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U, 0xcd65daf4U, 0xd50605beU, + 0x1fd13462U, 0x8ac4a6feU, 0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, + 0x75a4f6ebU, 0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U, + 0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU, 0xb591548dU, + 0x0571c45dU, 0x6f0406d4U, 0xff605015U, 0x241998fbU, 0x97d6bde9U, + 0xcc894043U, 0x7767d99eU, 0xbdb0e842U, 0x8807898bU, 0x38e7195bU, + 0xdb79c8eeU, 0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U, + 0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U, 0xfbfd0effU, + 0x560f8538U, 0x1e3daed5U, 0x27362d39U, 0x640a0fd9U, 0x21685ca6U, + 0xd19b5b54U, 0x3a24362eU, 0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, + 0x9e1b9b91U, 0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU, + 0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U, 0x0b0e090dU, + 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U, 0x8557f119U, 0x4caf7507U, + 0xbbee99ddU, 0xfda37f60U, 0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, + 0x345bfb7eU, 0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U, + 0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U, 0x7d854a24U, + 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U, 0x4b1d9e2fU, 0xf3dcb230U, + 0xec0d8652U, 0xd077c1e3U, 0x6c2bb316U, 0x99a970b9U, 0xfa119448U, + 0x2247e964U, 0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U, + 0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU, 0xcfa6f581U, + 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU, 0xe42c3a9dU, 0x0d507892U, + 0x9b6a5fccU, 0x62547e46U, 0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, + 0xf582c3afU, 0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U, + 0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU, 0x09cd2678U, + 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU, 0x65e6956eU, 0x7eaaffe6U, + 0x0821bccfU, 0xe6ef15e8U, 0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, + 0xd629b07cU, 0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U, + 0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U, 0x4af10498U, + 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U, 0x8d764dd6U, 0x4d43efb0U, + 0x54ccaa4dU, 0xdfe49604U, 0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, + 0x7f466551U, 0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U, + 0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U, 0x8c9ad761U, + 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU, 0xeecea927U, 0x35b761c9U, + 0xede11ce5U, 0x3c7a47b1U, 0x599cd2dfU, 0x3f55f273U, 0x791814ceU, + 0xbf73c737U, 0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU, + 0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U, 0x72161dc3U, + 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U, 0x7139a801U, 0xde080cb3U, + 0x9cd8b4e4U, 0x906456c1U, 0x617bcb84U, 0x70d532b6U, 0x74486c5cU, + 0x42d0b857U, }; static const u32 Td2[256] = { - 0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U, - 0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U, - 0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U, - 0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U, - 0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU, - 0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U, - 0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U, - 0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U, - 0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U, - 0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU, - 0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U, - 0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U, - 0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU, - 0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U, - 0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U, - 0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U, - 0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U, - 0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U, - 0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U, - 0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU, - 0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U, - 0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U, - 0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U, - 0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U, - 0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U, - 0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU, - 0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU, - 0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U, - 0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU, - 0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U, - 0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU, - 0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU, - 0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU, - 0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU, - 0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U, - 0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U, - 0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U, - 0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U, - 0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U, - 0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U, - 0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U, - 0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU, - 0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU, - 0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U, - 0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U, - 0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU, - 0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU, - 0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U, - 0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U, - 0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U, - 0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U, - 0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U, - 0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U, - 0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U, - 0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU, - 0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U, - 0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U, - 0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U, - 0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U, - 0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U, - 0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U, - 0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU, - 0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U, - 0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U, + 0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U, 0x6bcb3babU, + 0x45f11f9dU, 0x58abacfaU, 0x03934be3U, 0xfa552030U, 0x6df6ad76U, + 0x769188ccU, 0x4c25f502U, 0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, + 0xa38fb562U, 0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU, + 0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U, 0x5fe7038fU, + 0x9c951592U, 0x7aebbf6dU, 0x59da9552U, 0x832dd4beU, 0x21d35874U, + 0x692949e0U, 0xc8448ec9U, 0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, + 0x71dd27b9U, 0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU, + 0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U, 0x77e0b164U, + 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U, 0x68587048U, 0xfd198f45U, + 0x6c8794deU, 0xf8b7527bU, 0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, + 0xab2a6655U, 0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U, + 0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U, 0x1c2b8acfU, + 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U, 0xf4cd65daU, 0xbed50605U, + 0x621fd134U, 0xfe8ac4a6U, 0x539d342eU, 0x55a0a2f3U, 0xe132058aU, + 0xeb75a4f6U, 0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU, + 0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U, 0x8db59154U, + 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U, 0xfb241998U, 0xe997d6bdU, + 0x43cc8940U, 0x9e7767d9U, 0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, + 0xeedb79c8U, 0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U, + 0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU, 0xfffbfd0eU, + 0x38560f85U, 0xd51e3daeU, 0x3927362dU, 0xd9640a0fU, 0xa621685cU, + 0x54d19b5bU, 0x2e3a2436U, 0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, + 0x919e1b9bU, 0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U, + 0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU, 0x0d0b0e09U, + 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU, 0x198557f1U, 0x074caf75U, + 0xddbbee99U, 0x60fda37fU, 0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, + 0x7e345bfbU, 0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U, + 0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U, 0x247d854aU, + 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U, 0x2f4b1d9eU, 0x30f3dcb2U, + 0x52ec0d86U, 0xe3d077c1U, 0x166c2bb3U, 0xb999a970U, 0x48fa1194U, + 0x642247e9U, 0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U, + 0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U, 0x81cfa6f5U, + 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU, 0x9de42c3aU, 0x920d5078U, + 0xcc9b6a5fU, 0x4662547eU, 0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, + 0xaff582c3U, 0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U, + 0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU, 0x7809cd26U, + 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU, 0x6e65e695U, 0xe67eaaffU, + 0xcf0821bcU, 0xe8e6ef15U, 0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, + 0x7cd629b0U, 0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U, + 0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U, 0x984af104U, + 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U, 0xd68d764dU, 0xb04d43efU, + 0x4d54ccaaU, 0x04dfe496U, 0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, + 0x517f4665U, 0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU, + 0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U, 0x618c9ad7U, + 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U, 0x27eecea9U, 0xc935b761U, + 0xe5ede11cU, 0xb13c7a47U, 0xdf599cd2U, 0x733f55f2U, 0xce791814U, + 0x37bf73c7U, 0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U, + 0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U, 0xc372161dU, + 0x250cbce2U, 0x498b283cU, 0x9541ff0dU, 0x017139a8U, 0xb3de080cU, + 0xe49cd8b4U, 0xc1906456U, 0x84617bcbU, 0xb670d532U, 0x5c74486cU, + 0x5742d0b8U, }; static const u32 Td3[256] = { - 0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU, - 0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU, - 0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U, - 0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U, - 0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU, - 0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU, - 0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U, - 0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU, - 0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U, - 0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU, - 0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U, - 0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U, - 0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U, - 0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U, - 0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U, - 0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU, - 0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU, - 0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U, - 0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U, - 0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU, - 0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU, - 0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U, - 0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U, - 0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U, - 0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U, - 0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU, - 0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U, - 0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U, - 0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU, - 0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU, - 0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U, - 0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U, - 0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U, - 0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU, - 0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U, - 0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U, - 0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U, - 0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U, - 0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U, - 0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U, - 0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U, - 0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU, - 0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U, - 0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U, - 0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU, - 0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU, - 0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U, - 0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU, - 0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U, - 0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U, - 0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U, - 0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U, - 0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U, - 0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U, - 0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU, - 0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU, - 0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU, - 0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU, - 0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U, - 0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U, - 0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U, - 0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU, - 0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U, - 0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U, + 0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU, 0xab6bcb3bU, + 0x9d45f11fU, 0xfa58abacU, 0xe303934bU, 0x30fa5520U, 0x766df6adU, + 0xcc769188U, 0x024c25f5U, 0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, + 0x62a38fb5U, 0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU, + 0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU, 0x8f5fe703U, + 0x929c9515U, 0x6d7aebbfU, 0x5259da95U, 0xbe832dd4U, 0x7421d358U, + 0xe0692949U, 0xc9c8448eU, 0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, + 0xb971dd27U, 0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU, + 0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U, 0x6477e0b1U, + 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U, 0x48685870U, 0x45fd198fU, + 0xde6c8794U, 0x7bf8b752U, 0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, + 0x55ab2a66U, 0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U, + 0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU, 0xcf1c2b8aU, + 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU, 0xdaf4cd65U, 0x05bed506U, + 0x34621fd1U, 0xa6fe8ac4U, 0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, + 0xf6eb75a4U, 0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU, + 0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU, 0x548db591U, + 0xc45d0571U, 0x06d46f04U, 0x5015ff60U, 0x98fb2419U, 0xbde997d6U, + 0x4043cc89U, 0xd99e7767U, 0xe842bdb0U, 0x898b8807U, 0x195b38e7U, + 0xc8eedb79U, 0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U, + 0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU, 0x0efffbfdU, + 0x8538560fU, 0xaed51e3dU, 0x2d392736U, 0x0fd9640aU, 0x5ca62168U, + 0x5b54d19bU, 0x362e3a24U, 0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, + 0x9b919e1bU, 0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU, + 0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U, 0x090d0b0eU, + 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U, 0xf1198557U, 0x75074cafU, + 0x99ddbbeeU, 0x7f60fda3U, 0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, + 0xfb7e345bU, 0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U, + 0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U, 0x4a247d85U, + 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U, 0x9e2f4b1dU, 0xb230f3dcU, + 0x8652ec0dU, 0xc1e3d077U, 0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, + 0xe9642247U, 0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U, + 0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U, 0xf581cfa6U, + 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU, 0x3a9de42cU, 0x78920d50U, + 0x5fcc9b6aU, 0x7e466254U, 0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, + 0xc3aff582U, 0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU, + 0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU, 0x267809cdU, + 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U, 0x956e65e6U, 0xffe67eaaU, + 0xbccf0821U, 0x15e8e6efU, 0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, + 0xb07cd629U, 0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U, + 0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U, 0x04984af1U, + 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U, 0x4dd68d76U, 0xefb04d43U, + 0xaa4d54ccU, 0x9604dfe4U, 0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, + 0x65517f46U, 0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU, + 0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU, 0xd7618c9aU, + 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU, 0xa927eeceU, 0x61c935b7U, + 0x1ce5ede1U, 0x47b13c7aU, 0xd2df599cU, 0xf2733f55U, 0x14ce7918U, + 0xc737bf73U, 0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U, + 0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U, 0x1dc37216U, + 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU, 0xa8017139U, 0x0cb3de08U, + 0xb4e49cd8U, 0x56c19064U, 0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, + 0xb85742d0U, }; static const u32 Td4[256] = { - 0x52525252U, 0x09090909U, 0x6a6a6a6aU, 0xd5d5d5d5U, - 0x30303030U, 0x36363636U, 0xa5a5a5a5U, 0x38383838U, - 0xbfbfbfbfU, 0x40404040U, 0xa3a3a3a3U, 0x9e9e9e9eU, - 0x81818181U, 0xf3f3f3f3U, 0xd7d7d7d7U, 0xfbfbfbfbU, - 0x7c7c7c7cU, 0xe3e3e3e3U, 0x39393939U, 0x82828282U, - 0x9b9b9b9bU, 0x2f2f2f2fU, 0xffffffffU, 0x87878787U, - 0x34343434U, 0x8e8e8e8eU, 0x43434343U, 0x44444444U, - 0xc4c4c4c4U, 0xdedededeU, 0xe9e9e9e9U, 0xcbcbcbcbU, - 0x54545454U, 0x7b7b7b7bU, 0x94949494U, 0x32323232U, - 0xa6a6a6a6U, 0xc2c2c2c2U, 0x23232323U, 0x3d3d3d3dU, - 0xeeeeeeeeU, 0x4c4c4c4cU, 0x95959595U, 0x0b0b0b0bU, - 0x42424242U, 0xfafafafaU, 0xc3c3c3c3U, 0x4e4e4e4eU, - 0x08080808U, 0x2e2e2e2eU, 0xa1a1a1a1U, 0x66666666U, - 0x28282828U, 0xd9d9d9d9U, 0x24242424U, 0xb2b2b2b2U, - 0x76767676U, 0x5b5b5b5bU, 0xa2a2a2a2U, 0x49494949U, - 0x6d6d6d6dU, 0x8b8b8b8bU, 0xd1d1d1d1U, 0x25252525U, - 0x72727272U, 0xf8f8f8f8U, 0xf6f6f6f6U, 0x64646464U, - 0x86868686U, 0x68686868U, 0x98989898U, 0x16161616U, - 0xd4d4d4d4U, 0xa4a4a4a4U, 0x5c5c5c5cU, 0xccccccccU, - 0x5d5d5d5dU, 0x65656565U, 0xb6b6b6b6U, 0x92929292U, - 0x6c6c6c6cU, 0x70707070U, 0x48484848U, 0x50505050U, - 0xfdfdfdfdU, 0xededededU, 0xb9b9b9b9U, 0xdadadadaU, - 0x5e5e5e5eU, 0x15151515U, 0x46464646U, 0x57575757U, - 0xa7a7a7a7U, 0x8d8d8d8dU, 0x9d9d9d9dU, 0x84848484U, - 0x90909090U, 0xd8d8d8d8U, 0xababababU, 0x00000000U, - 0x8c8c8c8cU, 0xbcbcbcbcU, 0xd3d3d3d3U, 0x0a0a0a0aU, - 0xf7f7f7f7U, 0xe4e4e4e4U, 0x58585858U, 0x05050505U, - 0xb8b8b8b8U, 0xb3b3b3b3U, 0x45454545U, 0x06060606U, - 0xd0d0d0d0U, 0x2c2c2c2cU, 0x1e1e1e1eU, 0x8f8f8f8fU, - 0xcacacacaU, 0x3f3f3f3fU, 0x0f0f0f0fU, 0x02020202U, - 0xc1c1c1c1U, 0xafafafafU, 0xbdbdbdbdU, 0x03030303U, - 0x01010101U, 0x13131313U, 0x8a8a8a8aU, 0x6b6b6b6bU, - 0x3a3a3a3aU, 0x91919191U, 0x11111111U, 0x41414141U, - 0x4f4f4f4fU, 0x67676767U, 0xdcdcdcdcU, 0xeaeaeaeaU, - 0x97979797U, 0xf2f2f2f2U, 0xcfcfcfcfU, 0xcecececeU, - 0xf0f0f0f0U, 0xb4b4b4b4U, 0xe6e6e6e6U, 0x73737373U, - 0x96969696U, 0xacacacacU, 0x74747474U, 0x22222222U, - 0xe7e7e7e7U, 0xadadadadU, 0x35353535U, 0x85858585U, - 0xe2e2e2e2U, 0xf9f9f9f9U, 0x37373737U, 0xe8e8e8e8U, - 0x1c1c1c1cU, 0x75757575U, 0xdfdfdfdfU, 0x6e6e6e6eU, - 0x47474747U, 0xf1f1f1f1U, 0x1a1a1a1aU, 0x71717171U, - 0x1d1d1d1dU, 0x29292929U, 0xc5c5c5c5U, 0x89898989U, - 0x6f6f6f6fU, 0xb7b7b7b7U, 0x62626262U, 0x0e0e0e0eU, - 0xaaaaaaaaU, 0x18181818U, 0xbebebebeU, 0x1b1b1b1bU, - 0xfcfcfcfcU, 0x56565656U, 0x3e3e3e3eU, 0x4b4b4b4bU, - 0xc6c6c6c6U, 0xd2d2d2d2U, 0x79797979U, 0x20202020U, - 0x9a9a9a9aU, 0xdbdbdbdbU, 0xc0c0c0c0U, 0xfefefefeU, - 0x78787878U, 0xcdcdcdcdU, 0x5a5a5a5aU, 0xf4f4f4f4U, - 0x1f1f1f1fU, 0xddddddddU, 0xa8a8a8a8U, 0x33333333U, - 0x88888888U, 0x07070707U, 0xc7c7c7c7U, 0x31313131U, - 0xb1b1b1b1U, 0x12121212U, 0x10101010U, 0x59595959U, - 0x27272727U, 0x80808080U, 0xececececU, 0x5f5f5f5fU, - 0x60606060U, 0x51515151U, 0x7f7f7f7fU, 0xa9a9a9a9U, - 0x19191919U, 0xb5b5b5b5U, 0x4a4a4a4aU, 0x0d0d0d0dU, - 0x2d2d2d2dU, 0xe5e5e5e5U, 0x7a7a7a7aU, 0x9f9f9f9fU, - 0x93939393U, 0xc9c9c9c9U, 0x9c9c9c9cU, 0xefefefefU, - 0xa0a0a0a0U, 0xe0e0e0e0U, 0x3b3b3b3bU, 0x4d4d4d4dU, - 0xaeaeaeaeU, 0x2a2a2a2aU, 0xf5f5f5f5U, 0xb0b0b0b0U, - 0xc8c8c8c8U, 0xebebebebU, 0xbbbbbbbbU, 0x3c3c3c3cU, - 0x83838383U, 0x53535353U, 0x99999999U, 0x61616161U, - 0x17171717U, 0x2b2b2b2bU, 0x04040404U, 0x7e7e7e7eU, - 0xbabababaU, 0x77777777U, 0xd6d6d6d6U, 0x26262626U, - 0xe1e1e1e1U, 0x69696969U, 0x14141414U, 0x63636363U, - 0x55555555U, 0x21212121U, 0x0c0c0c0cU, 0x7d7d7d7dU, + 0x52525252U, 0x09090909U, 0x6a6a6a6aU, 0xd5d5d5d5U, 0x30303030U, + 0x36363636U, 0xa5a5a5a5U, 0x38383838U, 0xbfbfbfbfU, 0x40404040U, + 0xa3a3a3a3U, 0x9e9e9e9eU, 0x81818181U, 0xf3f3f3f3U, 0xd7d7d7d7U, + 0xfbfbfbfbU, 0x7c7c7c7cU, 0xe3e3e3e3U, 0x39393939U, 0x82828282U, + 0x9b9b9b9bU, 0x2f2f2f2fU, 0xffffffffU, 0x87878787U, 0x34343434U, + 0x8e8e8e8eU, 0x43434343U, 0x44444444U, 0xc4c4c4c4U, 0xdedededeU, + 0xe9e9e9e9U, 0xcbcbcbcbU, 0x54545454U, 0x7b7b7b7bU, 0x94949494U, + 0x32323232U, 0xa6a6a6a6U, 0xc2c2c2c2U, 0x23232323U, 0x3d3d3d3dU, + 0xeeeeeeeeU, 0x4c4c4c4cU, 0x95959595U, 0x0b0b0b0bU, 0x42424242U, + 0xfafafafaU, 0xc3c3c3c3U, 0x4e4e4e4eU, 0x08080808U, 0x2e2e2e2eU, + 0xa1a1a1a1U, 0x66666666U, 0x28282828U, 0xd9d9d9d9U, 0x24242424U, + 0xb2b2b2b2U, 0x76767676U, 0x5b5b5b5bU, 0xa2a2a2a2U, 0x49494949U, + 0x6d6d6d6dU, 0x8b8b8b8bU, 0xd1d1d1d1U, 0x25252525U, 0x72727272U, + 0xf8f8f8f8U, 0xf6f6f6f6U, 0x64646464U, 0x86868686U, 0x68686868U, + 0x98989898U, 0x16161616U, 0xd4d4d4d4U, 0xa4a4a4a4U, 0x5c5c5c5cU, + 0xccccccccU, 0x5d5d5d5dU, 0x65656565U, 0xb6b6b6b6U, 0x92929292U, + 0x6c6c6c6cU, 0x70707070U, 0x48484848U, 0x50505050U, 0xfdfdfdfdU, + 0xededededU, 0xb9b9b9b9U, 0xdadadadaU, 0x5e5e5e5eU, 0x15151515U, + 0x46464646U, 0x57575757U, 0xa7a7a7a7U, 0x8d8d8d8dU, 0x9d9d9d9dU, + 0x84848484U, 0x90909090U, 0xd8d8d8d8U, 0xababababU, 0x00000000U, + 0x8c8c8c8cU, 0xbcbcbcbcU, 0xd3d3d3d3U, 0x0a0a0a0aU, 0xf7f7f7f7U, + 0xe4e4e4e4U, 0x58585858U, 0x05050505U, 0xb8b8b8b8U, 0xb3b3b3b3U, + 0x45454545U, 0x06060606U, 0xd0d0d0d0U, 0x2c2c2c2cU, 0x1e1e1e1eU, + 0x8f8f8f8fU, 0xcacacacaU, 0x3f3f3f3fU, 0x0f0f0f0fU, 0x02020202U, + 0xc1c1c1c1U, 0xafafafafU, 0xbdbdbdbdU, 0x03030303U, 0x01010101U, + 0x13131313U, 0x8a8a8a8aU, 0x6b6b6b6bU, 0x3a3a3a3aU, 0x91919191U, + 0x11111111U, 0x41414141U, 0x4f4f4f4fU, 0x67676767U, 0xdcdcdcdcU, + 0xeaeaeaeaU, 0x97979797U, 0xf2f2f2f2U, 0xcfcfcfcfU, 0xcecececeU, + 0xf0f0f0f0U, 0xb4b4b4b4U, 0xe6e6e6e6U, 0x73737373U, 0x96969696U, + 0xacacacacU, 0x74747474U, 0x22222222U, 0xe7e7e7e7U, 0xadadadadU, + 0x35353535U, 0x85858585U, 0xe2e2e2e2U, 0xf9f9f9f9U, 0x37373737U, + 0xe8e8e8e8U, 0x1c1c1c1cU, 0x75757575U, 0xdfdfdfdfU, 0x6e6e6e6eU, + 0x47474747U, 0xf1f1f1f1U, 0x1a1a1a1aU, 0x71717171U, 0x1d1d1d1dU, + 0x29292929U, 0xc5c5c5c5U, 0x89898989U, 0x6f6f6f6fU, 0xb7b7b7b7U, + 0x62626262U, 0x0e0e0e0eU, 0xaaaaaaaaU, 0x18181818U, 0xbebebebeU, + 0x1b1b1b1bU, 0xfcfcfcfcU, 0x56565656U, 0x3e3e3e3eU, 0x4b4b4b4bU, + 0xc6c6c6c6U, 0xd2d2d2d2U, 0x79797979U, 0x20202020U, 0x9a9a9a9aU, + 0xdbdbdbdbU, 0xc0c0c0c0U, 0xfefefefeU, 0x78787878U, 0xcdcdcdcdU, + 0x5a5a5a5aU, 0xf4f4f4f4U, 0x1f1f1f1fU, 0xddddddddU, 0xa8a8a8a8U, + 0x33333333U, 0x88888888U, 0x07070707U, 0xc7c7c7c7U, 0x31313131U, + 0xb1b1b1b1U, 0x12121212U, 0x10101010U, 0x59595959U, 0x27272727U, + 0x80808080U, 0xececececU, 0x5f5f5f5fU, 0x60606060U, 0x51515151U, + 0x7f7f7f7fU, 0xa9a9a9a9U, 0x19191919U, 0xb5b5b5b5U, 0x4a4a4a4aU, + 0x0d0d0d0dU, 0x2d2d2d2dU, 0xe5e5e5e5U, 0x7a7a7a7aU, 0x9f9f9f9fU, + 0x93939393U, 0xc9c9c9c9U, 0x9c9c9c9cU, 0xefefefefU, 0xa0a0a0a0U, + 0xe0e0e0e0U, 0x3b3b3b3bU, 0x4d4d4d4dU, 0xaeaeaeaeU, 0x2a2a2a2aU, + 0xf5f5f5f5U, 0xb0b0b0b0U, 0xc8c8c8c8U, 0xebebebebU, 0xbbbbbbbbU, + 0x3c3c3c3cU, 0x83838383U, 0x53535353U, 0x99999999U, 0x61616161U, + 0x17171717U, 0x2b2b2b2bU, 0x04040404U, 0x7e7e7e7eU, 0xbabababaU, + 0x77777777U, 0xd6d6d6d6U, 0x26262626U, 0xe1e1e1e1U, 0x69696969U, + 0x14141414U, 0x63636363U, 0x55555555U, 0x21212121U, 0x0c0c0c0cU, + 0x7d7d7d7dU, }; #endif static const u32 rcon[] = { - 0x01000000, 0x02000000, 0x04000000, 0x08000000, - 0x10000000, 0x20000000, 0x40000000, 0x80000000, - 0x1B000000, 0x36000000, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */ + 0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000, + 0x40000000, 0x80000000, 0x1B000000, 0x36000000, /* for 128-bit blocks, + Rijndael never uses more + than 10 rcon values */ }; -#define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3])) -#define PUTU32(ct, st) { (ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); (ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); } +#define GETU32(pt) \ + (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ ((u32)(pt)[2] << 8) ^ \ + ((u32)(pt)[3])) +#define PUTU32(ct, st) \ + { \ + (ct)[0] = (u8)((st) >> 24); \ + (ct)[1] = (u8)((st) >> 16); \ + (ct)[2] = (u8)((st) >> 8); \ + (ct)[3] = (u8)(st); \ + } /** * Expand the cipher key into the encryption key schedule. * * @return the number of rounds for the given cipher key size. */ -int -rijndaelKeySetupEnc(u32 rk[/*4*(Nr + 1)*/], const u8 cipherKey[], int keyBits) +int rijndaelKeySetupEnc(u32 rk[/*4*(Nr + 1)*/], const u8 cipherKey[], + int keyBits) { - int i = 0; - u32 temp; + int i = 0; + u32 temp; - rk[0] = GETU32(cipherKey ); - rk[1] = GETU32(cipherKey + 4); - rk[2] = GETU32(cipherKey + 8); - rk[3] = GETU32(cipherKey + 12); - if (keyBits == 128) { - for (;;) { - temp = rk[3]; - rk[4] = rk[0] ^ - (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ - (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ - (Te4[(temp ) & 0xff] & 0x0000ff00) ^ - (Te4[(temp >> 24) ] & 0x000000ff) ^ - rcon[i]; - rk[5] = rk[1] ^ rk[4]; - rk[6] = rk[2] ^ rk[5]; - rk[7] = rk[3] ^ rk[6]; - if (++i == 10) { - return 10; - } - rk += 4; - } - } - rk[4] = GETU32(cipherKey + 16); - rk[5] = GETU32(cipherKey + 20); - if (keyBits == 192) { - for (;;) { - temp = rk[ 5]; - rk[ 6] = rk[ 0] ^ - (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ - (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ - (Te4[(temp ) & 0xff] & 0x0000ff00) ^ - (Te4[(temp >> 24) ] & 0x000000ff) ^ - rcon[i]; - rk[ 7] = rk[ 1] ^ rk[ 6]; - rk[ 8] = rk[ 2] ^ rk[ 7]; - rk[ 9] = rk[ 3] ^ rk[ 8]; - if (++i == 8) { - return 12; - } - rk[10] = rk[ 4] ^ rk[ 9]; - rk[11] = rk[ 5] ^ rk[10]; - rk += 6; - } - } - rk[6] = GETU32(cipherKey + 24); - rk[7] = GETU32(cipherKey + 28); - if (keyBits == 256) { - for (;;) { - temp = rk[ 7]; - rk[ 8] = rk[ 0] ^ - (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ - (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ - (Te4[(temp ) & 0xff] & 0x0000ff00) ^ - (Te4[(temp >> 24) ] & 0x000000ff) ^ - rcon[i]; - rk[ 9] = rk[ 1] ^ rk[ 8]; - rk[10] = rk[ 2] ^ rk[ 9]; - rk[11] = rk[ 3] ^ rk[10]; - if (++i == 7) { - return 14; - } - temp = rk[11]; - rk[12] = rk[ 4] ^ - (Te4[(temp >> 24) ] & 0xff000000) ^ - (Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^ - (Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^ - (Te4[(temp ) & 0xff] & 0x000000ff); - rk[13] = rk[ 5] ^ rk[12]; - rk[14] = rk[ 6] ^ rk[13]; - rk[15] = rk[ 7] ^ rk[14]; - rk += 8; - } - } - return 0; + rk[0] = GETU32(cipherKey); + rk[1] = GETU32(cipherKey + 4); + rk[2] = GETU32(cipherKey + 8); + rk[3] = GETU32(cipherKey + 12); + if ( keyBits == 128 ) + { + for ( ;; ) + { + temp = rk[3]; + rk[4] = rk[0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ + (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ + (Te4[(temp)&0xff] & 0x0000ff00) ^ + (Te4[(temp >> 24)] & 0x000000ff) ^ rcon[i]; + rk[5] = rk[1] ^ rk[4]; + rk[6] = rk[2] ^ rk[5]; + rk[7] = rk[3] ^ rk[6]; + if ( ++i == 10 ) + { + return 10; + } + rk += 4; + } + } + rk[4] = GETU32(cipherKey + 16); + rk[5] = GETU32(cipherKey + 20); + if ( keyBits == 192 ) + { + for ( ;; ) + { + temp = rk[5]; + rk[6] = rk[0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ + (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ + (Te4[(temp)&0xff] & 0x0000ff00) ^ + (Te4[(temp >> 24)] & 0x000000ff) ^ rcon[i]; + rk[7] = rk[1] ^ rk[6]; + rk[8] = rk[2] ^ rk[7]; + rk[9] = rk[3] ^ rk[8]; + if ( ++i == 8 ) + { + return 12; + } + rk[10] = rk[4] ^ rk[9]; + rk[11] = rk[5] ^ rk[10]; + rk += 6; + } + } + rk[6] = GETU32(cipherKey + 24); + rk[7] = GETU32(cipherKey + 28); + if ( keyBits == 256 ) + { + for ( ;; ) + { + temp = rk[7]; + rk[8] = rk[0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ + (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ + (Te4[(temp)&0xff] & 0x0000ff00) ^ + (Te4[(temp >> 24)] & 0x000000ff) ^ rcon[i]; + rk[9] = rk[1] ^ rk[8]; + rk[10] = rk[2] ^ rk[9]; + rk[11] = rk[3] ^ rk[10]; + if ( ++i == 7 ) + { + return 14; + } + temp = rk[11]; + rk[12] = rk[4] ^ (Te4[(temp >> 24)] & 0xff000000) ^ + (Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^ + (Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^ + (Te4[(temp)&0xff] & 0x000000ff); + rk[13] = rk[5] ^ rk[12]; + rk[14] = rk[6] ^ rk[13]; + rk[15] = rk[7] ^ rk[14]; + rk += 8; + } + } + return 0; } #ifdef NEED_RIJNDAEL_DECRYPT @@ -815,419 +706,456 @@ rijndaelKeySetupEnc(u32 rk[/*4*(Nr + 1)*/], const u8 cipherKey[], int keyBits) * * @return the number of rounds for the given cipher key size. */ -int -rijndaelKeySetupDec(u32 rk[/*4*(Nr + 1)*/], const u8 cipherKey[], int keyBits) +int rijndaelKeySetupDec(u32 rk[/*4*(Nr + 1)*/], const u8 cipherKey[], + int keyBits) { - int Nr, i, j; - u32 temp; + int Nr, i, j; + u32 temp; - /* expand the cipher key: */ - Nr = rijndaelKeySetupEnc(rk, cipherKey, keyBits); + /* expand the cipher key: */ + Nr = rijndaelKeySetupEnc(rk, cipherKey, keyBits); - /* invert the order of the round keys: */ - for (i = 0, j = 4*Nr; i < j; i += 4, j -= 4) { - temp = rk[i ]; rk[i ] = rk[j ]; rk[j ] = temp; - temp = rk[i + 1]; rk[i + 1] = rk[j + 1]; rk[j + 1] = temp; - temp = rk[i + 2]; rk[i + 2] = rk[j + 2]; rk[j + 2] = temp; - temp = rk[i + 3]; rk[i + 3] = rk[j + 3]; rk[j + 3] = temp; - } - /* apply the inverse MixColumn transform to all round keys but the first and the last: */ - for (i = 1; i < Nr; i++) { - rk += 4; - rk[0] = - Td0[Te4[(rk[0] >> 24) ] & 0xff] ^ - Td1[Te4[(rk[0] >> 16) & 0xff] & 0xff] ^ - Td2[Te4[(rk[0] >> 8) & 0xff] & 0xff] ^ - Td3[Te4[(rk[0] ) & 0xff] & 0xff]; - rk[1] = - Td0[Te4[(rk[1] >> 24) ] & 0xff] ^ - Td1[Te4[(rk[1] >> 16) & 0xff] & 0xff] ^ - Td2[Te4[(rk[1] >> 8) & 0xff] & 0xff] ^ - Td3[Te4[(rk[1] ) & 0xff] & 0xff]; - rk[2] = - Td0[Te4[(rk[2] >> 24) ] & 0xff] ^ - Td1[Te4[(rk[2] >> 16) & 0xff] & 0xff] ^ - Td2[Te4[(rk[2] >> 8) & 0xff] & 0xff] ^ - Td3[Te4[(rk[2] ) & 0xff] & 0xff]; - rk[3] = - Td0[Te4[(rk[3] >> 24) ] & 0xff] ^ - Td1[Te4[(rk[3] >> 16) & 0xff] & 0xff] ^ - Td2[Te4[(rk[3] >> 8) & 0xff] & 0xff] ^ - Td3[Te4[(rk[3] ) & 0xff] & 0xff]; - } - return Nr; + /* invert the order of the round keys: */ + for ( i = 0, j = 4 * Nr; i < j; i += 4, j -= 4 ) + { + temp = rk[i]; + rk[i] = rk[j]; + rk[j] = temp; + temp = rk[i + 1]; + rk[i + 1] = rk[j + 1]; + rk[j + 1] = temp; + temp = rk[i + 2]; + rk[i + 2] = rk[j + 2]; + rk[j + 2] = temp; + temp = rk[i + 3]; + rk[i + 3] = rk[j + 3]; + rk[j + 3] = temp; + } + /* apply the inverse MixColumn transform to all round keys but the first and + * the last: */ + for ( i = 1; i < Nr; i++ ) + { + rk += 4; + rk[0] = Td0[Te4[(rk[0] >> 24)] & 0xff] ^ + Td1[Te4[(rk[0] >> 16) & 0xff] & 0xff] ^ + Td2[Te4[(rk[0] >> 8) & 0xff] & 0xff] ^ + Td3[Te4[(rk[0]) & 0xff] & 0xff]; + rk[1] = Td0[Te4[(rk[1] >> 24)] & 0xff] ^ + Td1[Te4[(rk[1] >> 16) & 0xff] & 0xff] ^ + Td2[Te4[(rk[1] >> 8) & 0xff] & 0xff] ^ + Td3[Te4[(rk[1]) & 0xff] & 0xff]; + rk[2] = Td0[Te4[(rk[2] >> 24)] & 0xff] ^ + Td1[Te4[(rk[2] >> 16) & 0xff] & 0xff] ^ + Td2[Te4[(rk[2] >> 8) & 0xff] & 0xff] ^ + Td3[Te4[(rk[2]) & 0xff] & 0xff]; + rk[3] = Td0[Te4[(rk[3] >> 24)] & 0xff] ^ + Td1[Te4[(rk[3] >> 16) & 0xff] & 0xff] ^ + Td2[Te4[(rk[3] >> 8) & 0xff] & 0xff] ^ + Td3[Te4[(rk[3]) & 0xff] & 0xff]; + } + return Nr; } #endif /* NEED_RIJNDAEL_DECRYPT */ -void -rijndaelEncrypt(const u32 rk[/*4*(Nr + 1)*/], int Nr, const u8 pt[16], - u8 ct[16]) +void rijndaelEncrypt(const u32 rk[/*4*(Nr + 1)*/], int Nr, const u8 pt[16], + u8 ct[16]) { - u32 s0, s1, s2, s3, t0, t1, t2, t3; + u32 s0, s1, s2, s3, t0, t1, t2, t3; #ifndef FULL_UNROLL int r; #endif /* ?FULL_UNROLL */ /* - * map byte array block to cipher state - * and add initial round key: - */ - s0 = GETU32(pt ) ^ rk[0]; - s1 = GETU32(pt + 4) ^ rk[1]; - s2 = GETU32(pt + 8) ^ rk[2]; - s3 = GETU32(pt + 12) ^ rk[3]; + * map byte array block to cipher state + * and add initial round key: + */ + s0 = GETU32(pt) ^ rk[0]; + s1 = GETU32(pt + 4) ^ rk[1]; + s2 = GETU32(pt + 8) ^ rk[2]; + s3 = GETU32(pt + 12) ^ rk[3]; #ifdef FULL_UNROLL /* round 1: */ - t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[ 4]; - t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[ 5]; - t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[ 6]; - t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[ 7]; - /* round 2: */ - s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[ 8]; - s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[ 9]; - s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[10]; - s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[11]; + t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ + Te3[s3 & 0xff] ^ rk[4]; + t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ + Te3[s0 & 0xff] ^ rk[5]; + t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ + Te3[s1 & 0xff] ^ rk[6]; + t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ + Te3[s2 & 0xff] ^ rk[7]; + /* round 2: */ + s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ + Te3[t3 & 0xff] ^ rk[8]; + s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ + Te3[t0 & 0xff] ^ rk[9]; + s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ + Te3[t1 & 0xff] ^ rk[10]; + s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ + Te3[t2 & 0xff] ^ rk[11]; /* round 3: */ - t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[12]; - t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[13]; - t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[14]; - t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[15]; - /* round 4: */ - s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[16]; - s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[17]; - s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[18]; - s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[19]; + t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ + Te3[s3 & 0xff] ^ rk[12]; + t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ + Te3[s0 & 0xff] ^ rk[13]; + t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ + Te3[s1 & 0xff] ^ rk[14]; + t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ + Te3[s2 & 0xff] ^ rk[15]; + /* round 4: */ + s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ + Te3[t3 & 0xff] ^ rk[16]; + s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ + Te3[t0 & 0xff] ^ rk[17]; + s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ + Te3[t1 & 0xff] ^ rk[18]; + s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ + Te3[t2 & 0xff] ^ rk[19]; /* round 5: */ - t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[20]; - t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[21]; - t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[22]; - t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[23]; - /* round 6: */ - s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[24]; - s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[25]; - s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[26]; - s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[27]; + t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ + Te3[s3 & 0xff] ^ rk[20]; + t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ + Te3[s0 & 0xff] ^ rk[21]; + t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ + Te3[s1 & 0xff] ^ rk[22]; + t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ + Te3[s2 & 0xff] ^ rk[23]; + /* round 6: */ + s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ + Te3[t3 & 0xff] ^ rk[24]; + s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ + Te3[t0 & 0xff] ^ rk[25]; + s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ + Te3[t1 & 0xff] ^ rk[26]; + s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ + Te3[t2 & 0xff] ^ rk[27]; /* round 7: */ - t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[28]; - t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[29]; - t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[30]; - t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[31]; - /* round 8: */ - s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[32]; - s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[33]; - s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[34]; - s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[35]; + t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ + Te3[s3 & 0xff] ^ rk[28]; + t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ + Te3[s0 & 0xff] ^ rk[29]; + t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ + Te3[s1 & 0xff] ^ rk[30]; + t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ + Te3[s2 & 0xff] ^ rk[31]; + /* round 8: */ + s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ + Te3[t3 & 0xff] ^ rk[32]; + s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ + Te3[t0 & 0xff] ^ rk[33]; + s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ + Te3[t1 & 0xff] ^ rk[34]; + s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ + Te3[t2 & 0xff] ^ rk[35]; /* round 9: */ - t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[36]; - t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[37]; - t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[38]; - t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[39]; - if (Nr > 10) { - /* round 10: */ - s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[40]; - s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[41]; - s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[42]; - s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[43]; - /* round 11: */ - t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[44]; - t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[45]; - t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[46]; - t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[47]; - if (Nr > 12) { - /* round 12: */ - s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[48]; - s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[49]; - s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[50]; - s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[51]; - /* round 13: */ - t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[52]; - t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[53]; - t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[54]; - t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[55]; - } + t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ + Te3[s3 & 0xff] ^ rk[36]; + t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ + Te3[s0 & 0xff] ^ rk[37]; + t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ + Te3[s1 & 0xff] ^ rk[38]; + t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ + Te3[s2 & 0xff] ^ rk[39]; + if ( Nr > 10 ) + { + /* round 10: */ + s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ + Te3[t3 & 0xff] ^ rk[40]; + s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ + Te3[t0 & 0xff] ^ rk[41]; + s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ + Te3[t1 & 0xff] ^ rk[42]; + s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ + Te3[t2 & 0xff] ^ rk[43]; + /* round 11: */ + t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ + Te3[s3 & 0xff] ^ rk[44]; + t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ + Te3[s0 & 0xff] ^ rk[45]; + t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ + Te3[s1 & 0xff] ^ rk[46]; + t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ + Te3[s2 & 0xff] ^ rk[47]; + if ( Nr > 12 ) + { + /* round 12: */ + s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ + Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[48]; + s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ + Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[49]; + s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ + Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[50]; + s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ + Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[51]; + /* round 13: */ + t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ + Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[52]; + t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ + Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[53]; + t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ + Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[54]; + t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ + Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[55]; + } } rk += Nr << 2; #else /* !FULL_UNROLL */ /* - * Nr - 1 full rounds: - */ + * Nr - 1 full rounds: + */ r = Nr >> 1; - for (;;) { - t0 = - Te0[(s0 >> 24) ] ^ - Te1[(s1 >> 16) & 0xff] ^ - Te2[(s2 >> 8) & 0xff] ^ - Te3[(s3 ) & 0xff] ^ - rk[4]; - t1 = - Te0[(s1 >> 24) ] ^ - Te1[(s2 >> 16) & 0xff] ^ - Te2[(s3 >> 8) & 0xff] ^ - Te3[(s0 ) & 0xff] ^ - rk[5]; - t2 = - Te0[(s2 >> 24) ] ^ - Te1[(s3 >> 16) & 0xff] ^ - Te2[(s0 >> 8) & 0xff] ^ - Te3[(s1 ) & 0xff] ^ - rk[6]; - t3 = - Te0[(s3 >> 24) ] ^ - Te1[(s0 >> 16) & 0xff] ^ - Te2[(s1 >> 8) & 0xff] ^ - Te3[(s2 ) & 0xff] ^ - rk[7]; + for ( ;; ) + { + t0 = Te0[(s0 >> 24)] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ + Te3[(s3)&0xff] ^ rk[4]; + t1 = Te0[(s1 >> 24)] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ + Te3[(s0)&0xff] ^ rk[5]; + t2 = Te0[(s2 >> 24)] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ + Te3[(s1)&0xff] ^ rk[6]; + t3 = Te0[(s3 >> 24)] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ + Te3[(s2)&0xff] ^ rk[7]; - rk += 8; - if (--r == 0) { - break; - } + rk += 8; + if ( --r == 0 ) + { + break; + } - s0 = - Te0[(t0 >> 24) ] ^ - Te1[(t1 >> 16) & 0xff] ^ - Te2[(t2 >> 8) & 0xff] ^ - Te3[(t3 ) & 0xff] ^ - rk[0]; - s1 = - Te0[(t1 >> 24) ] ^ - Te1[(t2 >> 16) & 0xff] ^ - Te2[(t3 >> 8) & 0xff] ^ - Te3[(t0 ) & 0xff] ^ - rk[1]; - s2 = - Te0[(t2 >> 24) ] ^ - Te1[(t3 >> 16) & 0xff] ^ - Te2[(t0 >> 8) & 0xff] ^ - Te3[(t1 ) & 0xff] ^ - rk[2]; - s3 = - Te0[(t3 >> 24) ] ^ - Te1[(t0 >> 16) & 0xff] ^ - Te2[(t1 >> 8) & 0xff] ^ - Te3[(t2 ) & 0xff] ^ - rk[3]; + s0 = Te0[(t0 >> 24)] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ + Te3[(t3)&0xff] ^ rk[0]; + s1 = Te0[(t1 >> 24)] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ + Te3[(t0)&0xff] ^ rk[1]; + s2 = Te0[(t2 >> 24)] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ + Te3[(t1)&0xff] ^ rk[2]; + s3 = Te0[(t3 >> 24)] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ + Te3[(t2)&0xff] ^ rk[3]; } #endif /* ?FULL_UNROLL */ - /* - * apply last round and - * map cipher state to byte array block: - */ - s0 = - (Te4[(t0 >> 24) ] & 0xff000000) ^ - (Te4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ - (Te4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ - (Te4[(t3 ) & 0xff] & 0x000000ff) ^ - rk[0]; - PUTU32(ct , s0); - s1 = - (Te4[(t1 >> 24) ] & 0xff000000) ^ - (Te4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ - (Te4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ - (Te4[(t0 ) & 0xff] & 0x000000ff) ^ - rk[1]; - PUTU32(ct + 4, s1); - s2 = - (Te4[(t2 >> 24) ] & 0xff000000) ^ - (Te4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ - (Te4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ - (Te4[(t1 ) & 0xff] & 0x000000ff) ^ - rk[2]; - PUTU32(ct + 8, s2); - s3 = - (Te4[(t3 >> 24) ] & 0xff000000) ^ - (Te4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ - (Te4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ - (Te4[(t2 ) & 0xff] & 0x000000ff) ^ - rk[3]; - PUTU32(ct + 12, s3); + /* + * apply last round and + * map cipher state to byte array block: + */ + s0 = (Te4[(t0 >> 24)] & 0xff000000) ^ + (Te4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ + (Te4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (Te4[(t3)&0xff] & 0x000000ff) ^ + rk[0]; + PUTU32(ct, s0); + s1 = (Te4[(t1 >> 24)] & 0xff000000) ^ + (Te4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ + (Te4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ (Te4[(t0)&0xff] & 0x000000ff) ^ + rk[1]; + PUTU32(ct + 4, s1); + s2 = (Te4[(t2 >> 24)] & 0xff000000) ^ + (Te4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ + (Te4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ (Te4[(t1)&0xff] & 0x000000ff) ^ + rk[2]; + PUTU32(ct + 8, s2); + s3 = (Te4[(t3 >> 24)] & 0xff000000) ^ + (Te4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ + (Te4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ (Te4[(t2)&0xff] & 0x000000ff) ^ + rk[3]; + PUTU32(ct + 12, s3); } #ifdef NEED_RIJNDAEL_DECRYPT -static void -rijndaelDecrypt(const u32 rk[/*4*(Nr + 1)*/], int Nr, const u8 ct[16], - u8 pt[16]) +static void rijndaelDecrypt(const u32 rk[/*4*(Nr + 1)*/], int Nr, + const u8 ct[16], u8 pt[16]) { - u32 s0, s1, s2, s3, t0, t1, t2, t3; + u32 s0, s1, s2, s3, t0, t1, t2, t3; #ifndef FULL_UNROLL int r; #endif /* ?FULL_UNROLL */ /* - * map byte array block to cipher state - * and add initial round key: - */ - s0 = GETU32(ct ) ^ rk[0]; - s1 = GETU32(ct + 4) ^ rk[1]; - s2 = GETU32(ct + 8) ^ rk[2]; + * map byte array block to cipher state + * and add initial round key: + */ + s0 = GETU32(ct) ^ rk[0]; + s1 = GETU32(ct + 4) ^ rk[1]; + s2 = GETU32(ct + 8) ^ rk[2]; s3 = GETU32(ct + 12) ^ rk[3]; #ifdef FULL_UNROLL /* round 1: */ - t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[ 4]; - t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[ 5]; - t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[ 6]; - t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[ 7]; + t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ + Td3[s1 & 0xff] ^ rk[4]; + t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ + Td3[s2 & 0xff] ^ rk[5]; + t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ + Td3[s3 & 0xff] ^ rk[6]; + t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ + Td3[s0 & 0xff] ^ rk[7]; /* round 2: */ - s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[ 8]; - s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[ 9]; - s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[10]; - s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[11]; + s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ + Td3[t1 & 0xff] ^ rk[8]; + s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ + Td3[t2 & 0xff] ^ rk[9]; + s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ + Td3[t3 & 0xff] ^ rk[10]; + s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ + Td3[t0 & 0xff] ^ rk[11]; /* round 3: */ - t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[12]; - t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[13]; - t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[14]; - t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[15]; + t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ + Td3[s1 & 0xff] ^ rk[12]; + t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ + Td3[s2 & 0xff] ^ rk[13]; + t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ + Td3[s3 & 0xff] ^ rk[14]; + t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ + Td3[s0 & 0xff] ^ rk[15]; /* round 4: */ - s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[16]; - s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[17]; - s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[18]; - s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[19]; + s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ + Td3[t1 & 0xff] ^ rk[16]; + s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ + Td3[t2 & 0xff] ^ rk[17]; + s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ + Td3[t3 & 0xff] ^ rk[18]; + s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ + Td3[t0 & 0xff] ^ rk[19]; /* round 5: */ - t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[20]; - t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[21]; - t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[22]; - t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[23]; + t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ + Td3[s1 & 0xff] ^ rk[20]; + t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ + Td3[s2 & 0xff] ^ rk[21]; + t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ + Td3[s3 & 0xff] ^ rk[22]; + t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ + Td3[s0 & 0xff] ^ rk[23]; /* round 6: */ - s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[24]; - s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[25]; - s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[26]; - s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[27]; + s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ + Td3[t1 & 0xff] ^ rk[24]; + s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ + Td3[t2 & 0xff] ^ rk[25]; + s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ + Td3[t3 & 0xff] ^ rk[26]; + s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ + Td3[t0 & 0xff] ^ rk[27]; /* round 7: */ - t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[28]; - t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[29]; - t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[30]; - t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[31]; + t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ + Td3[s1 & 0xff] ^ rk[28]; + t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ + Td3[s2 & 0xff] ^ rk[29]; + t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ + Td3[s3 & 0xff] ^ rk[30]; + t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ + Td3[s0 & 0xff] ^ rk[31]; /* round 8: */ - s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[32]; - s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[33]; - s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[34]; - s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[35]; + s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ + Td3[t1 & 0xff] ^ rk[32]; + s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ + Td3[t2 & 0xff] ^ rk[33]; + s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ + Td3[t3 & 0xff] ^ rk[34]; + s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ + Td3[t0 & 0xff] ^ rk[35]; /* round 9: */ - t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[36]; - t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[37]; - t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[38]; - t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[39]; - if (Nr > 10) { - /* round 10: */ - s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[40]; - s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[41]; - s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[42]; - s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[43]; - /* round 11: */ - t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[44]; - t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[45]; - t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[46]; - t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[47]; - if (Nr > 12) { - /* round 12: */ - s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[48]; - s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[49]; - s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[50]; - s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[51]; - /* round 13: */ - t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[52]; - t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[53]; - t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[54]; - t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[55]; - } + t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ + Td3[s1 & 0xff] ^ rk[36]; + t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ + Td3[s2 & 0xff] ^ rk[37]; + t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ + Td3[s3 & 0xff] ^ rk[38]; + t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ + Td3[s0 & 0xff] ^ rk[39]; + if ( Nr > 10 ) + { + /* round 10: */ + s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ + Td3[t1 & 0xff] ^ rk[40]; + s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ + Td3[t2 & 0xff] ^ rk[41]; + s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ + Td3[t3 & 0xff] ^ rk[42]; + s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ + Td3[t0 & 0xff] ^ rk[43]; + /* round 11: */ + t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ + Td3[s1 & 0xff] ^ rk[44]; + t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ + Td3[s2 & 0xff] ^ rk[45]; + t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ + Td3[s3 & 0xff] ^ rk[46]; + t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ + Td3[s0 & 0xff] ^ rk[47]; + if ( Nr > 12 ) + { + /* round 12: */ + s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ + Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[48]; + s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ + Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[49]; + s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ + Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[50]; + s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ + Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[51]; + /* round 13: */ + t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ + Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[52]; + t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ + Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[53]; + t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ + Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[54]; + t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ + Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[55]; + } } - rk += Nr << 2; + rk += Nr << 2; #else /* !FULL_UNROLL */ /* * Nr - 1 full rounds: */ r = Nr >> 1; - for (;;) { - t0 = - Td0[(s0 >> 24) ] ^ - Td1[(s3 >> 16) & 0xff] ^ - Td2[(s2 >> 8) & 0xff] ^ - Td3[(s1 ) & 0xff] ^ - rk[4]; - t1 = - Td0[(s1 >> 24) ] ^ - Td1[(s0 >> 16) & 0xff] ^ - Td2[(s3 >> 8) & 0xff] ^ - Td3[(s2 ) & 0xff] ^ - rk[5]; - t2 = - Td0[(s2 >> 24) ] ^ - Td1[(s1 >> 16) & 0xff] ^ - Td2[(s0 >> 8) & 0xff] ^ - Td3[(s3 ) & 0xff] ^ - rk[6]; - t3 = - Td0[(s3 >> 24) ] ^ - Td1[(s2 >> 16) & 0xff] ^ - Td2[(s1 >> 8) & 0xff] ^ - Td3[(s0 ) & 0xff] ^ - rk[7]; + for ( ;; ) + { + t0 = Td0[(s0 >> 24)] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ + Td3[(s1)&0xff] ^ rk[4]; + t1 = Td0[(s1 >> 24)] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ + Td3[(s2)&0xff] ^ rk[5]; + t2 = Td0[(s2 >> 24)] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ + Td3[(s3)&0xff] ^ rk[6]; + t3 = Td0[(s3 >> 24)] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ + Td3[(s0)&0xff] ^ rk[7]; - rk += 8; - if (--r == 0) { - break; - } + rk += 8; + if ( --r == 0 ) + { + break; + } - s0 = - Td0[(t0 >> 24) ] ^ - Td1[(t3 >> 16) & 0xff] ^ - Td2[(t2 >> 8) & 0xff] ^ - Td3[(t1 ) & 0xff] ^ - rk[0]; - s1 = - Td0[(t1 >> 24) ] ^ - Td1[(t0 >> 16) & 0xff] ^ - Td2[(t3 >> 8) & 0xff] ^ - Td3[(t2 ) & 0xff] ^ - rk[1]; - s2 = - Td0[(t2 >> 24) ] ^ - Td1[(t1 >> 16) & 0xff] ^ - Td2[(t0 >> 8) & 0xff] ^ - Td3[(t3 ) & 0xff] ^ - rk[2]; - s3 = - Td0[(t3 >> 24) ] ^ - Td1[(t2 >> 16) & 0xff] ^ - Td2[(t1 >> 8) & 0xff] ^ - Td3[(t0 ) & 0xff] ^ - rk[3]; + s0 = Td0[(t0 >> 24)] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ + Td3[(t1)&0xff] ^ rk[0]; + s1 = Td0[(t1 >> 24)] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ + Td3[(t2)&0xff] ^ rk[1]; + s2 = Td0[(t2 >> 24)] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ + Td3[(t3)&0xff] ^ rk[2]; + s3 = Td0[(t3 >> 24)] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ + Td3[(t0)&0xff] ^ rk[3]; } #endif /* ?FULL_UNROLL */ /* - * apply last round and - * map cipher state to byte array block: - */ - s0 = - (Td4[(t0 >> 24) ] & 0xff000000) ^ - (Td4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ - (Td4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ - (Td4[(t1 ) & 0xff] & 0x000000ff) ^ - rk[0]; - PUTU32(pt , s0); - s1 = - (Td4[(t1 >> 24) ] & 0xff000000) ^ - (Td4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ - (Td4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ - (Td4[(t2 ) & 0xff] & 0x000000ff) ^ - rk[1]; - PUTU32(pt + 4, s1); - s2 = - (Td4[(t2 >> 24) ] & 0xff000000) ^ - (Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ - (Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ - (Td4[(t3 ) & 0xff] & 0x000000ff) ^ - rk[2]; - PUTU32(pt + 8, s2); - s3 = - (Td4[(t3 >> 24) ] & 0xff000000) ^ - (Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ - (Td4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ - (Td4[(t0 ) & 0xff] & 0x000000ff) ^ - rk[3]; - PUTU32(pt + 12, s3); + * apply last round and + * map cipher state to byte array block: + */ + s0 = (Td4[(t0 >> 24)] & 0xff000000) ^ + (Td4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ + (Td4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (Td4[(t1)&0xff] & 0x000000ff) ^ + rk[0]; + PUTU32(pt, s0); + s1 = (Td4[(t1 >> 24)] & 0xff000000) ^ + (Td4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ + (Td4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ (Td4[(t2)&0xff] & 0x000000ff) ^ + rk[1]; + PUTU32(pt + 4, s1); + s2 = (Td4[(t2 >> 24)] & 0xff000000) ^ + (Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ + (Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ (Td4[(t3)&0xff] & 0x000000ff) ^ + rk[2]; + PUTU32(pt + 8, s2); + s3 = (Td4[(t3 >> 24)] & 0xff000000) ^ + (Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ + (Td4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ (Td4[(t0)&0xff] & 0x000000ff) ^ + rk[3]; + PUTU32(pt + 12, s3); } #endif /* NEED_RIJNDAEL_DECRYPT */ @@ -1235,49 +1163,48 @@ rijndaelDecrypt(const u32 rk[/*4*(Nr + 1)*/], int Nr, const u8 ct[16], #ifdef NEED_RIJNDAEL_WRAPPERS /* setup key context for encryption only */ -int -rijndael_set_key_enc_only(rijndael_ctx *ctx, const unsigned char *key, int bits) +int rijndael_set_key_enc_only(rijndael_ctx *ctx, const unsigned char *key, + int bits) { - int rounds; + int rounds; - rounds = rijndaelKeySetupEnc(ctx->ek, key, bits); - if (rounds == 0) - return -1; + rounds = rijndaelKeySetupEnc(ctx->ek, key, bits); + if ( rounds == 0 ) + return -1; - ctx->Nr = rounds; - ctx->enc_only = 1; + ctx->Nr = rounds; + ctx->enc_only = 1; - return 0; + return 0; } /* setup key context for both encryption and decryption */ -int -rijndael_set_key(rijndael_ctx *ctx, const unsigned char *key, int bits) +int rijndael_set_key(rijndael_ctx *ctx, const unsigned char *key, int bits) { - int rounds; + int rounds; - rounds = rijndaelKeySetupEnc(ctx->ek, key, bits); - if (rounds == 0) - return -1; - if (rijndaelKeySetupDec(ctx->dk, key, bits) != rounds) - return -1; + rounds = rijndaelKeySetupEnc(ctx->ek, key, bits); + if ( rounds == 0 ) + return -1; + if ( rijndaelKeySetupDec(ctx->dk, key, bits) != rounds ) + return -1; - ctx->Nr = rounds; - ctx->enc_only = 0; + ctx->Nr = rounds; + ctx->enc_only = 0; - return 0; + return 0; } -void -rijndael_decrypt(rijndael_ctx *ctx, const unsigned char *src, unsigned char *dst) +void rijndael_decrypt(rijndael_ctx *ctx, const unsigned char *src, + unsigned char *dst) { - rijndaelDecrypt(ctx->dk, ctx->Nr, src, dst); + rijndaelDecrypt(ctx->dk, ctx->Nr, src, dst); } -void -rijndael_encrypt(rijndael_ctx *ctx, const unsigned char *src, unsigned char *dst) +void rijndael_encrypt(rijndael_ctx *ctx, const unsigned char *src, + unsigned char *dst) { - rijndaelEncrypt(ctx->ek, ctx->Nr, src, dst); + rijndaelEncrypt(ctx->ek, ctx->Nr, src, dst); } #endif /* NEED_RIJNDAEL_WRAPPERS */ diff --git a/xen/crypto/vmac.c b/xen/crypto/vmac.c index 294dd16a52..f8678f5f00 100644 --- a/xen/crypto/vmac.c +++ b/xen/crypto/vmac.c @@ -11,7 +11,7 @@ #include #include #include -#define UINT64_C(x) x##ULL +#define UINT64_C(x) x##ULL /* end for Xen */ /* Enable code tuned for 64-bit registers; otherwise tuned for 32-bit */ @@ -20,26 +20,25 @@ #endif /* Enable code tuned for Intel SSE2 instruction set */ -#if ((__SSE2__ || (_M_IX86_FP >= 2)) && ( ! VMAC_ARCH_64)) -#define VMAC_USE_SSE2 1 +#if ( (__SSE2__ || (_M_IX86_FP >= 2)) && (!VMAC_ARCH_64) ) +#define VMAC_USE_SSE2 1 #include #endif /* Native word reads. Update (or define via compiler) if incorrect */ -#ifndef VMAC_ARCH_BIG_ENDIAN /* Assume big-endian unless on the list */ +#ifndef VMAC_ARCH_BIG_ENDIAN /* Assume big-endian unless on the list */ #define VMAC_ARCH_BIG_ENDIAN \ - (!(__x86_64__ || __i386__ || _M_IX86 || \ - _M_X64 || __ARMEL__ || __MIPSEL__)) + (!(__x86_64__ || __i386__ || _M_IX86 || _M_X64 || __ARMEL__ || __MIPSEL__)) #endif /* ----------------------------------------------------------------------- */ /* Constants and masks */ -const uint64_t p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */ -const uint64_t m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */ -const uint64_t m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ -const uint64_t m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ -const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ +const uint64_t p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */ +const uint64_t m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */ +const uint64_t m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ +const uint64_t m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ +const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ /* ----------------------------------------------------------------------- * * The following routines are used in this implementation. They are @@ -50,89 +49,101 @@ const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ * MUL64: 64x64->128-bit multiplication * PMUL64: assumes top bits cleared on inputs * ADD128: 128x128->128-bit addition - * GET_REVERSED_64: load and byte-reverse 64-bit word + * GET_REVERSED_64: load and byte-reverse 64-bit word * ----------------------------------------------------------------------- */ /* ----------------------------------------------------------------------- */ -#if (__GNUC__ && (__x86_64__ || __amd64__)) +#if ( __GNUC__ && (__x86_64__ || __amd64__) ) /* ----------------------------------------------------------------------- */ -#define ADD128(rh,rl,ih,il) \ - asm ("addq %3, %1 \n\t" \ - "adcq %2, %0" \ - : "+r"(rh),"+r"(rl) \ - : "r"(ih),"r"(il) : "cc"); +#define ADD128(rh, rl, ih, il) \ + asm("addq %3, %1 \n\t" \ + "adcq %2, %0" \ + : "+r"(rh), "+r"(rl) \ + : "r"(ih), "r"(il) \ + : "cc"); -#define MUL64(rh,rl,i1,i2) \ - asm ("mulq %3" : "=a"(rl), "=d"(rh) : "a"(i1), "r"(i2) : "cc") +#define MUL64(rh, rl, i1, i2) \ + asm("mulq %3" : "=a"(rl), "=d"(rh) : "a"(i1), "r"(i2) : "cc") #define PMUL64 MUL64 -#define GET_REVERSED_64(p) \ - ({uint64_t x; \ - asm ("bswapq %0" : "=r" (x) : "0"(*(uint64_t *)(p))); x;}) +#define GET_REVERSED_64(p) \ + ({ \ + uint64_t x; \ + asm("bswapq %0" : "=r"(x) : "0"(*(uint64_t *)(p))); \ + x; \ + }) /* ----------------------------------------------------------------------- */ #elif (__GNUC__ && __i386__) /* ----------------------------------------------------------------------- */ -#define GET_REVERSED_64(p) \ - ({ uint64_t x; \ - uint32_t *tp = (uint32_t *)(p); \ - asm ("bswap %%edx\n\t" \ - "bswap %%eax" \ - : "=A"(x) \ - : "a"(tp[1]), "d"(tp[0])); \ - x; }) +#define GET_REVERSED_64(p) \ + ({ \ + uint64_t x; \ + uint32_t *tp = (uint32_t *)(p); \ + asm("bswap %%edx\n\t" \ + "bswap %%eax" \ + : "=A"(x) \ + : "a"(tp[1]), "d"(tp[0])); \ + x; \ + }) /* ----------------------------------------------------------------------- */ #elif (__GNUC__ && __ppc64__) /* ----------------------------------------------------------------------- */ -#define ADD128(rh,rl,ih,il) \ - asm volatile ( "addc %1, %1, %3 \n\t" \ - "adde %0, %0, %2" \ - : "+r"(rh),"+r"(rl) \ - : "r"(ih),"r"(il)); - -#define MUL64(rh,rl,i1,i2) \ -{ uint64_t _i1 = (i1), _i2 = (i2); \ - rl = _i1 * _i2; \ - asm volatile ("mulhdu %0, %1, %2" : "=r" (rh) : "r" (_i1), "r" (_i2));\ -} +#define ADD128(rh, rl, ih, il) \ + asm volatile("addc %1, %1, %3 \n\t" \ + "adde %0, %0, %2" \ + : "+r"(rh), "+r"(rl) \ + : "r"(ih), "r"(il)); + +#define MUL64(rh, rl, i1, i2) \ + { \ + uint64_t _i1 = (i1), _i2 = (i2); \ + rl = _i1 * _i2; \ + asm volatile("mulhdu %0, %1, %2" : "=r"(rh) : "r"(_i1), "r"(_i2)); \ + } #define PMUL64 MUL64 -#define GET_REVERSED_64(p) \ - ({ uint32_t hi, lo, *_p = (uint32_t *)(p); \ - asm volatile ("lwbrx %0, %1, %2" : "=r"(lo) : "b%"(0), "r"(_p) ); \ - asm volatile ("lwbrx %0, %1, %2" : "=r"(hi) : "b%"(4), "r"(_p) ); \ - ((uint64_t)hi << 32) | (uint64_t)lo; } ) +#define GET_REVERSED_64(p) \ + ({ \ + uint32_t hi, lo, *_p = (uint32_t *)(p); \ + asm volatile("lwbrx %0, %1, %2" : "=r"(lo) : "b%"(0), "r"(_p)); \ + asm volatile("lwbrx %0, %1, %2" : "=r"(hi) : "b%"(4), "r"(_p)); \ + ((uint64_t)hi << 32) | (uint64_t)lo; \ + }) /* ----------------------------------------------------------------------- */ #elif (__GNUC__ && (__ppc__ || __PPC__)) /* ----------------------------------------------------------------------- */ -#define GET_REVERSED_64(p) \ - ({ uint32_t hi, lo, *_p = (uint32_t *)(p); \ - asm volatile ("lwbrx %0, %1, %2" : "=r"(lo) : "b%"(0), "r"(_p) ); \ - asm volatile ("lwbrx %0, %1, %2" : "=r"(hi) : "b%"(4), "r"(_p) ); \ - ((uint64_t)hi << 32) | (uint64_t)lo; } ) +#define GET_REVERSED_64(p) \ + ({ \ + uint32_t hi, lo, *_p = (uint32_t *)(p); \ + asm volatile("lwbrx %0, %1, %2" : "=r"(lo) : "b%"(0), "r"(_p)); \ + asm volatile("lwbrx %0, %1, %2" : "=r"(hi) : "b%"(4), "r"(_p)); \ + ((uint64_t)hi << 32) | (uint64_t)lo; \ + }) /* ----------------------------------------------------------------------- */ #elif (__GNUC__ && (__ARMEL__ || __ARM__)) /* ----------------------------------------------------------------------- */ -#define bswap32(v) \ -({ uint32_t tmp,out; \ - asm volatile( \ - "eor %1, %2, %2, ror #16\n" \ - "bic %1, %1, #0x00ff0000\n" \ - "mov %0, %2, ror #8\n" \ - "eor %0, %0, %1, lsr #8" \ - : "=r" (out), "=&r" (tmp) \ - : "r" (v)); \ - out;}) +#define bswap32(v) \ + ({ \ + uint32_t tmp, out; \ + asm volatile("eor %1, %2, %2, ror #16\n" \ + "bic %1, %1, #0x00ff0000\n" \ + "mov %0, %2, ror #8\n" \ + "eor %0, %0, %1, lsr #8" \ + : "=r"(out), "=&r"(tmp) \ + : "r"(v)); \ + out; \ + }) /* ----------------------------------------------------------------------- */ #elif _MSC_VER @@ -140,18 +151,19 @@ const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ #include -#if (_M_IA64 || _M_X64) && \ +#if ( _M_IA64 || _M_X64 ) && \ (!defined(__INTEL_COMPILER) || __INTEL_COMPILER >= 1000) -#define MUL64(rh,rl,i1,i2) (rl) = _umul128(i1,i2,&(rh)); +#define MUL64(rh, rl, i1, i2) (rl) = _umul128(i1, i2, &(rh)); #pragma intrinsic(_umul128) #define PMUL64 MUL64 #endif /* MSVC uses add, adc in this version */ -#define ADD128(rh,rl,ih,il) \ - { uint64_t _il = (il); \ - (rl) += (_il); \ - (rh) += (ih) + ((rl) < (_il)); \ +#define ADD128(rh, rl, ih, il) \ + { \ + uint64_t _il = (il); \ + (rl) += (_il); \ + (rh) += (ih) + ((rl) < (_il)); \ } #if _MSC_VER >= 1300 @@ -159,9 +171,8 @@ const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ #pragma intrinsic(_byteswap_uint64) #endif -#if _MSC_VER >= 1400 && \ - (!defined(__INTEL_COMPILER) || __INTEL_COMPILER >= 1000) -#define MUL32(i1,i2) (__emulu((uint32_t)(i1),(uint32_t)(i2))) +#if _MSC_VER >= 1400 && (!defined(__INTEL_COMPILER) || __INTEL_COMPILER >= 1000) +#define MUL32(i1, i2) (__emulu((uint32_t)(i1), (uint32_t)(i2))) #pragma intrinsic(__emulu) #endif @@ -170,11 +181,11 @@ const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ /* ----------------------------------------------------------------------- */ #if __GNUC__ -#define ALIGN(n) __attribute__ ((aligned(n))) -#define NOINLINE __attribute__ ((noinline)) +#define ALIGN(n) __attribute__((aligned(n))) +#define NOINLINE __attribute__((noinline)) #elif _MSC_VER -#define ALIGN(n) __declspec(align(n)) -#define NOINLINE __declspec(noinline) +#define ALIGN(n) __declspec(align(n)) +#define NOINLINE __declspec(noinline) #else #define ALIGN(n) #define NOINLINE @@ -185,75 +196,85 @@ const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ /* ----------------------------------------------------------------------- */ #ifndef ADD128 -#define ADD128(rh,rl,ih,il) \ - { uint64_t _il = (il); \ - (rl) += (_il); \ - if ((rl) < (_il)) (rh)++; \ - (rh) += (ih); \ +#define ADD128(rh, rl, ih, il) \ + { \ + uint64_t _il = (il); \ + (rl) += (_il); \ + if ( (rl) < (_il) ) \ + (rh)++; \ + (rh) += (ih); \ } #endif #ifndef MUL32 -#define MUL32(i1,i2) ((uint64_t)(uint32_t)(i1)*(uint32_t)(i2)) +#define MUL32(i1, i2) ((uint64_t)(uint32_t)(i1) * (uint32_t)(i2)) #endif -#ifndef PMUL64 /* rh may not be same as i1 or i2 */ -#define PMUL64(rh,rl,i1,i2) /* Assumes m doesn't overflow */ \ - { uint64_t _i1 = (i1), _i2 = (i2); \ - uint64_t m = MUL32(_i1,_i2>>32) + MUL32(_i1>>32,_i2); \ - rh = MUL32(_i1>>32,_i2>>32); \ - rl = MUL32(_i1,_i2); \ - ADD128(rh,rl,(m >> 32),(m << 32)); \ +#ifndef PMUL64 /* rh may not be same as i1 or i2 */ +#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ + { \ + uint64_t _i1 = (i1), _i2 = (i2); \ + uint64_t m = MUL32(_i1, _i2 >> 32) + MUL32(_i1 >> 32, _i2); \ + rh = MUL32(_i1 >> 32, _i2 >> 32); \ + rl = MUL32(_i1, _i2); \ + ADD128(rh, rl, (m >> 32), (m << 32)); \ } #endif #ifndef MUL64 -#define MUL64(rh,rl,i1,i2) \ - { uint64_t _i1 = (i1), _i2 = (i2); \ - uint64_t m1= MUL32(_i1,_i2>>32); \ - uint64_t m2= MUL32(_i1>>32,_i2); \ - rh = MUL32(_i1>>32,_i2>>32); \ - rl = MUL32(_i1,_i2); \ - ADD128(rh,rl,(m1 >> 32),(m1 << 32)); \ - ADD128(rh,rl,(m2 >> 32),(m2 << 32)); \ +#define MUL64(rh, rl, i1, i2) \ + { \ + uint64_t _i1 = (i1), _i2 = (i2); \ + uint64_t m1 = MUL32(_i1, _i2 >> 32); \ + uint64_t m2 = MUL32(_i1 >> 32, _i2); \ + rh = MUL32(_i1 >> 32, _i2 >> 32); \ + rl = MUL32(_i1, _i2); \ + ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \ + ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \ } #endif #ifndef GET_REVERSED_64 #ifndef bswap64 #ifndef bswap32 -#define bswap32(x) \ - ({ uint32_t bsx = (x); \ - ((((bsx) & 0xff000000u) >> 24) | (((bsx) & 0x00ff0000u) >> 8) | \ - (((bsx) & 0x0000ff00u) << 8) | (((bsx) & 0x000000ffu) << 24)); }) +#define bswap32(x) \ + ({ \ + uint32_t bsx = (x); \ + ((((bsx)&0xff000000u) >> 24) | (((bsx)&0x00ff0000u) >> 8) | \ + (((bsx)&0x0000ff00u) << 8) | (((bsx)&0x000000ffu) << 24)); \ + }) #endif -#define bswap64(x) \ - ({ union { uint64_t ll; uint32_t l[2]; } w, r; \ - w.ll = (x); \ - r.l[0] = bswap32 (w.l[1]); \ - r.l[1] = bswap32 (w.l[0]); \ - r.ll; }) +#define bswap64(x) \ + ({ \ + union { \ + uint64_t ll; \ + uint32_t l[2]; \ + } w, r; \ + w.ll = (x); \ + r.l[0] = bswap32(w.l[1]); \ + r.l[1] = bswap32(w.l[0]); \ + r.ll; \ + }) #endif -#define GET_REVERSED_64(p) bswap64(*(uint64_t *)(p)) +#define GET_REVERSED_64(p) bswap64(*(uint64_t *)(p)) #endif /* ----------------------------------------------------------------------- */ -#if (VMAC_PREFER_BIG_ENDIAN) -# define get64PE get64BE +#if ( VMAC_PREFER_BIG_ENDIAN ) +#define get64PE get64BE #else -# define get64PE get64LE +#define get64PE get64LE #endif -#if (VMAC_ARCH_BIG_ENDIAN) -# define get64BE(ptr) (*(uint64_t *)(ptr)) -# define get64LE(ptr) GET_REVERSED_64(ptr) +#if ( VMAC_ARCH_BIG_ENDIAN ) +#define get64BE(ptr) (*(uint64_t *)(ptr)) +#define get64LE(ptr) GET_REVERSED_64(ptr) #else /* assume little-endian */ -# define get64BE(ptr) GET_REVERSED_64(ptr) -# define get64LE(ptr) (*(uint64_t *)(ptr)) +#define get64BE(ptr) GET_REVERSED_64(ptr) +#define get64LE(ptr) (*(uint64_t *)(ptr)) #endif - /* --------------------------------------------------------------------- * * For highest performance the L1 NH and L2 polynomial hashes should be * carefully implemented to take advantage of one's target architechture. @@ -270,85 +291,114 @@ const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ #if VMAC_ARCH_64 /* ----------------------------------------------------------------------- */ -#define nh_16(mp, kp, nw, rh, rl) \ -{ int i; uint64_t th, tl; \ - rh = rl = 0; \ - for (i = 0; i < nw; i+= 2) { \ - MUL64(th,tl,get64PE((mp)+i )+(kp)[i ],get64PE((mp)+i+1)+(kp)[i+1]);\ - ADD128(rh,rl,th,tl); \ - } \ -} -#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \ -{ int i; uint64_t th, tl; \ - rh1 = rl1 = rh = rl = 0; \ - for (i = 0; i < nw; i+= 2) { \ - MUL64(th,tl,get64PE((mp)+i )+(kp)[i ],get64PE((mp)+i+1)+(kp)[i+1]);\ - ADD128(rh,rl,th,tl); \ - MUL64(th,tl,get64PE((mp)+i )+(kp)[i+2],get64PE((mp)+i+1)+(kp)[i+3]);\ - ADD128(rh1,rl1,th,tl); \ - } \ -} +#define nh_16(mp, kp, nw, rh, rl) \ + { \ + int i; \ + uint64_t th, tl; \ + rh = rl = 0; \ + for ( i = 0; i < nw; i += 2 ) \ + { \ + MUL64(th, tl, get64PE((mp) + i) + (kp)[i], \ + get64PE((mp) + i + 1) + (kp)[i + 1]); \ + ADD128(rh, rl, th, tl); \ + } \ + } +#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \ + { \ + int i; \ + uint64_t th, tl; \ + rh1 = rl1 = rh = rl = 0; \ + for ( i = 0; i < nw; i += 2 ) \ + { \ + MUL64(th, tl, get64PE((mp) + i) + (kp)[i], \ + get64PE((mp) + i + 1) + (kp)[i + 1]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, get64PE((mp) + i) + (kp)[i + 2], \ + get64PE((mp) + i + 1) + (kp)[i + 3]); \ + ADD128(rh1, rl1, th, tl); \ + } \ + } -#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */ -#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ -{ int i; uint64_t th, tl; \ - rh = rl = 0; \ - for (i = 0; i < nw; i+= 8) { \ - MUL64(th,tl,get64PE((mp)+i )+(kp)[i ],get64PE((mp)+i+1)+(kp)[i+1]);\ - ADD128(rh,rl,th,tl); \ - MUL64(th,tl,get64PE((mp)+i+2)+(kp)[i+2],get64PE((mp)+i+3)+(kp)[i+3]);\ - ADD128(rh,rl,th,tl); \ - MUL64(th,tl,get64PE((mp)+i+4)+(kp)[i+4],get64PE((mp)+i+5)+(kp)[i+5]);\ - ADD128(rh,rl,th,tl); \ - MUL64(th,tl,get64PE((mp)+i+6)+(kp)[i+6],get64PE((mp)+i+7)+(kp)[i+7]);\ - ADD128(rh,rl,th,tl); \ - } \ -} -#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \ -{ int i; uint64_t th, tl; \ - rh1 = rl1 = rh = rl = 0; \ - for (i = 0; i < nw; i+= 8) { \ - MUL64(th,tl,get64PE((mp)+i )+(kp)[i ],get64PE((mp)+i+1)+(kp)[i+1]);\ - ADD128(rh,rl,th,tl); \ - MUL64(th,tl,get64PE((mp)+i )+(kp)[i+2],get64PE((mp)+i+1)+(kp)[i+3]);\ - ADD128(rh1,rl1,th,tl); \ - MUL64(th,tl,get64PE((mp)+i+2)+(kp)[i+2],get64PE((mp)+i+3)+(kp)[i+3]);\ - ADD128(rh,rl,th,tl); \ - MUL64(th,tl,get64PE((mp)+i+2)+(kp)[i+4],get64PE((mp)+i+3)+(kp)[i+5]);\ - ADD128(rh1,rl1,th,tl); \ - MUL64(th,tl,get64PE((mp)+i+4)+(kp)[i+4],get64PE((mp)+i+5)+(kp)[i+5]);\ - ADD128(rh,rl,th,tl); \ - MUL64(th,tl,get64PE((mp)+i+4)+(kp)[i+6],get64PE((mp)+i+5)+(kp)[i+7]);\ - ADD128(rh1,rl1,th,tl); \ - MUL64(th,tl,get64PE((mp)+i+6)+(kp)[i+6],get64PE((mp)+i+7)+(kp)[i+7]);\ - ADD128(rh,rl,th,tl); \ - MUL64(th,tl,get64PE((mp)+i+6)+(kp)[i+8],get64PE((mp)+i+7)+(kp)[i+9]);\ - ADD128(rh1,rl1,th,tl); \ - } \ -} +#if ( VMAC_NHBYTES >= 64 ) /* These versions do 64-bytes of message at a time \ + */ +#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ + { \ + int i; \ + uint64_t th, tl; \ + rh = rl = 0; \ + for ( i = 0; i < nw; i += 8 ) \ + { \ + MUL64(th, tl, get64PE((mp) + i) + (kp)[i], \ + get64PE((mp) + i + 1) + (kp)[i + 1]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, get64PE((mp) + i + 2) + (kp)[i + 2], \ + get64PE((mp) + i + 3) + (kp)[i + 3]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, get64PE((mp) + i + 4) + (kp)[i + 4], \ + get64PE((mp) + i + 5) + (kp)[i + 5]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, get64PE((mp) + i + 6) + (kp)[i + 6], \ + get64PE((mp) + i + 7) + (kp)[i + 7]); \ + ADD128(rh, rl, th, tl); \ + } \ + } +#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \ + { \ + int i; \ + uint64_t th, tl; \ + rh1 = rl1 = rh = rl = 0; \ + for ( i = 0; i < nw; i += 8 ) \ + { \ + MUL64(th, tl, get64PE((mp) + i) + (kp)[i], \ + get64PE((mp) + i + 1) + (kp)[i + 1]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, get64PE((mp) + i) + (kp)[i + 2], \ + get64PE((mp) + i + 1) + (kp)[i + 3]); \ + ADD128(rh1, rl1, th, tl); \ + MUL64(th, tl, get64PE((mp) + i + 2) + (kp)[i + 2], \ + get64PE((mp) + i + 3) + (kp)[i + 3]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, get64PE((mp) + i + 2) + (kp)[i + 4], \ + get64PE((mp) + i + 3) + (kp)[i + 5]); \ + ADD128(rh1, rl1, th, tl); \ + MUL64(th, tl, get64PE((mp) + i + 4) + (kp)[i + 4], \ + get64PE((mp) + i + 5) + (kp)[i + 5]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, get64PE((mp) + i + 4) + (kp)[i + 6], \ + get64PE((mp) + i + 5) + (kp)[i + 7]); \ + ADD128(rh1, rl1, th, tl); \ + MUL64(th, tl, get64PE((mp) + i + 6) + (kp)[i + 6], \ + get64PE((mp) + i + 7) + (kp)[i + 7]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, get64PE((mp) + i + 6) + (kp)[i + 8], \ + get64PE((mp) + i + 7) + (kp)[i + 9]); \ + ADD128(rh1, rl1, th, tl); \ + } \ + } #endif -#define poly_step(ah, al, kh, kl, mh, ml) \ -{ uint64_t t1h, t1l, t2h, t2l, t3h, t3l, z=0; \ - /* compute ab*cd, put bd into result registers */ \ - PMUL64(t3h,t3l,al,kh); \ - PMUL64(t2h,t2l,ah,kl); \ - PMUL64(t1h,t1l,ah,2*kh); \ - PMUL64(ah,al,al,kl); \ - /* add 2 * ac to result */ \ - ADD128(ah,al,t1h,t1l); \ - /* add together ad + bc */ \ - ADD128(t2h,t2l,t3h,t3l); \ - /* now (ah,al), (t2l,2*t2h) need summing */ \ - /* first add the high registers, carrying into t2h */ \ - ADD128(t2h,ah,z,t2l); \ - /* double t2h and add top bit of ah */ \ - t2h = 2 * t2h + (ah >> 63); \ - ah &= m63; \ - /* now add the low registers */ \ - ADD128(ah,al,mh,ml); \ - ADD128(ah,al,z,t2h); \ -} +#define poly_step(ah, al, kh, kl, mh, ml) \ + { \ + uint64_t t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \ + /* compute ab*cd, put bd into result registers */ \ + PMUL64(t3h, t3l, al, kh); \ + PMUL64(t2h, t2l, ah, kl); \ + PMUL64(t1h, t1l, ah, 2 * kh); \ + PMUL64(ah, al, al, kl); \ + /* add 2 * ac to result */ \ + ADD128(ah, al, t1h, t1l); \ + /* add together ad + bc */ \ + ADD128(t2h, t2l, t3h, t3l); \ + /* now (ah,al), (t2l,2*t2h) need summing */ \ + /* first add the high registers, carrying into t2h */ \ + ADD128(t2h, ah, z, t2l); \ + /* double t2h and add top bit of ah */ \ + t2h = 2 * t2h + (ah >> 63); \ + ah &= m63; \ + /* now add the low registers */ \ + ADD128(ah, al, mh, ml); \ + ADD128(ah, al, z, t2h); \ + } /* ----------------------------------------------------------------------- */ #elif VMAC_USE_SSE2 @@ -356,240 +406,202 @@ const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ // macros from Crypto++ for sharing inline assembly code between MSVC and GNU C #if defined(__GNUC__) - // define these in two steps to allow arguments to be expanded - #define GNU_AS2(x, y) #x ", " #y ";" - #define GNU_AS3(x, y, z) #x ", " #y ", " #z ";" - #define GNU_ASL(x) "\n" #x ":" - #define GNU_ASJ(x, y, z) #x " " #y #z ";" - #define AS2(x, y) GNU_AS2(x, y) - #define AS3(x, y, z) GNU_AS3(x, y, z) - #define ASS(x, y, a, b, c, d) #x ", " #y ", " #a "*64+" #b "*16+" #c "*4+" #d ";" - #define ASL(x) GNU_ASL(x) - #define ASJ(x, y, z) GNU_ASJ(x, y, z) +// define these in two steps to allow arguments to be expanded +#define GNU_AS2(x, y) #x ", " #y ";" +#define GNU_AS3(x, y, z) #x ", " #y ", " #z ";" +#define GNU_ASL(x) "\n" #x ":" +#define GNU_ASJ(x, y, z) #x " " #y #z ";" +#define AS2(x, y) GNU_AS2(x, y) +#define AS3(x, y, z) GNU_AS3(x, y, z) +#define ASS(x, y, a, b, c, d) \ +#x ", " #y ", " #a "*64+" #b "*16+" #c "*4+" #d ";" +#define ASL(x) GNU_ASL(x) +#define ASJ(x, y, z) GNU_ASJ(x, y, z) #else - #define AS2(x, y) __asm {x, y} - #define AS3(x, y, z) __asm {x, y, z} - #define ASS(x, y, a, b, c, d) __asm {x, y, _MM_SHUFFLE(a, b, c, d)} - #define ASL(x) __asm {label##x:} - #define ASJ(x, y, z) __asm {x label##y} +#define AS2(x, y) __asm {x, y} +#define AS3(x, y, z) __asm {x, y, z} +#define ASS(x, y, a, b, c, d) __asm {x, y, _MM_SHUFFLE(a, b, c, d)} +#define ASL(x) __asm {label##x: } +#define ASJ(x, y, z) __asm {x label##y} #endif -static void NOINLINE nh_16_func(const uint64_t *mp, const uint64_t *kp, size_t nw, uint64_t *rh, uint64_t *rl) +static void NOINLINE nh_16_func(const uint64_t *mp, const uint64_t *kp, + size_t nw, uint64_t *rh, uint64_t *rl) { - // This assembly version, using MMX registers, is just as fast as the - // intrinsics version (which uses XMM registers) on the Intel Core 2, - // but is much faster on the Pentium 4. In order to schedule multiplies - // as early as possible, the loop interleaves operations for the current - // block and the next block. To mask out high 32-bits, we use "movd" - // to move the lower 32-bits to the stack and then back. Surprisingly, - // this is faster than any other method. + // This assembly version, using MMX registers, is just as fast as the + // intrinsics version (which uses XMM registers) on the Intel Core 2, + // but is much faster on the Pentium 4. In order to schedule multiplies + // as early as possible, the loop interleaves operations for the current + // block and the next block. To mask out high 32-bits, we use "movd" + // to move the lower 32-bits to the stack and then back. Surprisingly, + // this is faster than any other method. #ifdef __GNUC__ - __asm__ __volatile__ - ( - ".intel_syntax noprefix;" + __asm__ __volatile__( + ".intel_syntax noprefix;" #else - AS2( mov esi, mp) - AS2( mov edi, kp) - AS2( mov ecx, nw) - AS2( mov eax, rl) - AS2( mov edx, rh) + AS2(mov esi, mp) + AS2(mov edi, kp) + AS2(mov ecx, nw) + AS2(mov eax, rl) + AS2(mov edx, rh) #endif - AS2( sub esp, 12) - AS2( movq mm6, [esi]) - AS2( paddq mm6, [edi]) - AS2( movq mm5, [esi+8]) - AS2( paddq mm5, [edi+8]) - AS2( add esi, 16) - AS2( add edi, 16) - AS2( movq mm4, mm6) - ASS( pshufw mm2, mm6, 1, 0, 3, 2) - AS2( pmuludq mm6, mm5) - ASS( pshufw mm3, mm5, 1, 0, 3, 2) - AS2( pmuludq mm5, mm2) - AS2( pmuludq mm2, mm3) - AS2( pmuludq mm3, mm4) - AS2( pxor mm7, mm7) - AS2( movd [esp], mm6) - AS2( psrlq mm6, 32) - AS2( movd [esp+4], mm5) - AS2( psrlq mm5, 32) - AS2( sub ecx, 2) - ASJ( jz, 1, f) - ASL(0) - AS2( movq mm0, [esi]) - AS2( paddq mm0, [edi]) - AS2( movq mm1, [esi+8]) - AS2( paddq mm1, [edi+8]) - AS2( add esi, 16) - AS2( add edi, 16) - AS2( movq mm4, mm0) - AS2( paddq mm5, mm2) - ASS( pshufw mm2, mm0, 1, 0, 3, 2) - AS2( pmuludq mm0, mm1) - AS2( movd [esp+8], mm3) - AS2( psrlq mm3, 32) - AS2( paddq mm5, mm3) - ASS( pshufw mm3, mm1, 1, 0, 3, 2) - AS2( pmuludq mm1, mm2) - AS2( pmuludq mm2, mm3) - AS2( pmuludq mm3, mm4) - AS2( movd mm4, [esp]) - AS2( paddq mm7, mm4) - AS2( movd mm4, [esp+4]) - AS2( paddq mm6, mm4) - AS2( movd mm4, [esp+8]) - AS2( paddq mm6, mm4) - AS2( movd [esp], mm0) - AS2( psrlq mm0, 32) - AS2( paddq mm6, mm0) - AS2( movd [esp+4], mm1) - AS2( psrlq mm1, 32) - AS2( paddq mm5, mm1) - AS2( sub ecx, 2) - ASJ( jnz, 0, b) - ASL(1) - AS2( paddq mm5, mm2) - AS2( movd [esp+8], mm3) - AS2( psrlq mm3, 32) - AS2( paddq mm5, mm3) - AS2( movd mm4, [esp]) - AS2( paddq mm7, mm4) - AS2( movd mm4, [esp+4]) - AS2( paddq mm6, mm4) - AS2( movd mm4, [esp+8]) - AS2( paddq mm6, mm4) - - ASS( pshufw mm0, mm7, 3, 2, 1, 0) - AS2( psrlq mm7, 32) - AS2( paddq mm6, mm7) - AS2( punpckldq mm0, mm6) - AS2( psrlq mm6, 32) - AS2( paddq mm5, mm6) - AS2( movq [eax], mm0) - AS2( movq [edx], mm5) - AS2( add esp, 12) + AS2(sub esp, 12) AS2(movq mm6, [esi]) AS2(paddq mm6, [edi]) AS2( + movq mm5, [esi + 8]) AS2(paddq mm5, [edi + 8]) AS2(add esi, 16) + AS2(add edi, 16) AS2(movq mm4, mm6) ASS( + pshufw mm2, mm6, 1, 0, 3, + 2) AS2(pmuludq mm6, mm5) ASS(pshufw mm3, mm5, 1, 0, 3, + 2) AS2(pmuludq mm5, mm2) + AS2(pmuludq mm2, mm3) AS2(pmuludq mm3, mm4) AS2( + pxor mm7, + mm7) AS2(movd[esp], + mm6) AS2(psrlq mm6, + 32) AS2(movd[esp + 4], mm5) + AS2(psrlq mm5, 32) AS2(sub ecx, 2) ASJ(jz, 1, f) ASL(0) AS2( + movq mm0, [esi]) AS2(paddq mm0, + [edi]) AS2(movq mm1, [esi + 8]) + AS2(paddq mm1, [edi + 8]) AS2(add esi, 16) AS2( + add edi, 16) AS2(movq mm4, mm0) AS2(paddq mm5, mm2) + ASS(pshufw mm2, mm0, 1, 0, 3, 2) AS2( + pmuludq mm0, + mm1) AS2(movd[esp + 8], + mm3) AS2(psrlq mm3, + 32) AS2(paddq mm5, + mm3) ASS(pshufw mm3, + mm1, 1, 0, 3, 2) + AS2(pmuludq mm1, mm2) AS2(pmuludq mm2, mm3) AS2( + pmuludq + mm3, + mm4) AS2(movd mm4, [esp]) AS2(paddq mm7, + mm4) + AS2(movd mm4, [esp + 4]) AS2(paddq mm6, mm4) AS2( + movd mm4, + [esp + + 8]) AS2(paddq mm6, + mm4) AS2(movd[esp], mm0) + AS2(psrlq mm0, 32) AS2(paddq mm6, mm0) AS2( + movd[esp + 4], + mm1) AS2(psrlq mm1, + 32) AS2(paddq mm5, + mm1) AS2(sub ecx, + 2) + ASJ(jnz, 0, b) ASL(1) AS2( + paddq mm5, + mm2) AS2(movd[esp + 8], + mm3) AS2(psrlq mm3, + 32) AS2(paddq mm5, + mm3) + AS2(movd mm4, [esp]) AS2( + paddq mm7, + mm4) AS2(movd mm4, + [esp + 4]) AS2(paddq mm6, mm4) + AS2(movd mm4, + [esp + + 8]) AS2(paddq mm6, mm4) + + ASS(pshufw mm0, mm7, 3, + 2, 1, + 0) AS2(psrlq mm7, + 32) + AS2(paddq mm6, mm7) AS2( + punpckldq mm0, + mm6) AS2(psrlq + mm6, + 32) + AS2(paddq mm5, mm6) AS2( + movq[eax], + mm0) + AS2(movq[edx], + mm5) + AS2(add esp, + 12) #ifdef __GNUC__ - ".att_syntax prefix;" - : - : "S" (mp), "D" (kp), "c" (nw), "a" (rl), "d" (rh) - : "memory", "cc" - ); + ".a" + "tt" + "_s" + "yn" + "ta" + "x " + "pr" + "ef" + "ix" + ";" + : + : "S"(mp), "D"(kp), "c"(nw), "a"(rl), "d"(rh) + : "memory", "cc"); #endif } -#define nh_16(mp, kp, nw, rh, rl) nh_16_func(mp, kp, nw, &(rh), &(rl)); +#define nh_16(mp, kp, nw, rh, rl) nh_16_func(mp, kp, nw, &(rh), &(rl)); static void poly_step_func(uint64_t *ahi, uint64_t *alo, const uint64_t *kh, - const uint64_t *kl, const uint64_t *mh, const uint64_t *ml) + const uint64_t *kl, const uint64_t *mh, + const uint64_t *ml) { - // This code tries to schedule the multiplies as early as possible to overcome - // the long latencies on the Pentium 4. It also minimizes "movq" instructions - // which are very expensive on the P4. - -#define a0 [eax+0] -#define a1 [eax+4] -#define a2 [ebx+0] -#define a3 [ebx+4] -#define k0 [ecx+0] -#define k1 [ecx+4] -#define k2 [edx+0] -#define k3 [edx+4] + // This code tries to schedule the multiplies as early as possible to + // overcome the long latencies on the Pentium 4. It also minimizes "movq" + // instructions which are very expensive on the P4. + +#define a0 [eax + 0] +#define a1 [eax + 4] +#define a2 [ebx + 0] +#define a3 [ebx + 4] +#define k0 [ecx + 0] +#define k1 [ecx + 4] +#define k2 [edx + 0] +#define k3 [edx + 4] #ifdef __GNUC__ - uint32_t temp; - __asm__ __volatile__ - ( - "mov %%ebx, %0;" - "mov %1, %%ebx;" - ".intel_syntax noprefix;" + uint32_t temp; + __asm__ __volatile__( + "mov %%ebx, %0;" + "mov %1, %%ebx;" + ".intel_syntax noprefix;" #else - AS2( mov ebx, ahi) - AS2( mov edx, kh) - AS2( mov eax, alo) - AS2( mov ecx, kl) - AS2( mov esi, mh) - AS2( mov edi, ml) + AS2(mov ebx, ahi) + AS2(mov edx, kh) + AS2(mov eax, alo) + AS2(mov ecx, kl) + AS2(mov esi, mh) + AS2(mov edi, ml) #endif - AS2( movd mm0, a3) - AS2( movq mm4, mm0) - AS2( pmuludq mm0, k3) // a3*k3 - AS2( movd mm1, a0) - AS2( pmuludq mm1, k2) // a0*k2 - AS2( movd mm2, a1) - AS2( movd mm6, k1) - AS2( pmuludq mm2, mm6) // a1*k1 - AS2( movd mm3, a2) - AS2( movq mm5, mm3) - AS2( movd mm7, k0) - AS2( pmuludq mm3, mm7) // a2*k0 - AS2( pmuludq mm4, mm7) // a3*k0 - AS2( pmuludq mm5, mm6) // a2*k1 - AS2( psllq mm0, 1) - AS2( paddq mm0, [esi]) - AS2( paddq mm0, mm1) - AS2( movd mm1, a1) - AS2( paddq mm4, mm5) - AS2( movq mm5, mm1) - AS2( pmuludq mm1, k2) // a1*k2 - AS2( paddq mm0, mm2) - AS2( movd mm2, a0) - AS2( paddq mm0, mm3) - AS2( movq mm3, mm2) - AS2( pmuludq mm2, k3) // a0*k3 - AS2( pmuludq mm3, mm7) // a0*k0 - AS2( movd esi, mm0) - AS2( psrlq mm0, 32) - AS2( pmuludq mm7, mm5) // a1*k0 - AS2( pmuludq mm5, k3) // a1*k3 - AS2( paddq mm0, mm1) - AS2( movd mm1, a2) - AS2( pmuludq mm1, k2) // a2*k2 - AS2( paddq mm0, mm2) - AS2( paddq mm0, mm4) - AS2( movq mm4, mm0) - AS2( movd mm2, a3) - AS2( pmuludq mm2, mm6) // a3*k1 - AS2( pmuludq mm6, a0) // a0*k1 - AS2( psrlq mm0, 31) - AS2( paddq mm0, mm3) - AS2( movd mm3, [edi]) - AS2( paddq mm0, mm3) - AS2( movd mm3, a2) - AS2( pmuludq mm3, k3) // a2*k3 - AS2( paddq mm5, mm1) - AS2( movd mm1, a3) - AS2( pmuludq mm1, k2) // a3*k2 - AS2( paddq mm5, mm2) - AS2( movd mm2, [edi+4]) - AS2( psllq mm5, 1) - AS2( paddq mm0, mm5) - AS2( movq mm5, mm0) - AS2( psllq mm4, 33) - AS2( psrlq mm0, 32) - AS2( paddq mm6, mm7) - AS2( movd mm7, esi) - AS2( paddq mm0, mm6) - AS2( paddq mm0, mm2) - AS2( paddq mm3, mm1) - AS2( psllq mm3, 1) - AS2( paddq mm0, mm3) - AS2( psrlq mm4, 1) - AS2( punpckldq mm5, mm0) - AS2( psrlq mm0, 32) - AS2( por mm4, mm7) - AS2( paddq mm0, mm4) - AS2( movq a0, mm5) - AS2( movq a2, mm0) + AS2(movd mm0, a3) AS2(movq mm4, mm0) AS2(pmuludq mm0, k3) // a3*k3 + AS2(movd mm1, a0) AS2(pmuludq mm1, k2) // a0*k2 + AS2(movd mm2, a1) AS2(movd mm6, k1) AS2(pmuludq mm2, mm6) // a1*k1 + AS2(movd mm3, a2) AS2(movq mm5, mm3) AS2(movd mm7, k0) + AS2(pmuludq mm3, mm7) // a2*k0 + AS2(pmuludq mm4, mm7) // a3*k0 + AS2(pmuludq mm5, mm6) // a2*k1 + AS2(psllq mm0, 1) AS2(paddq mm0, [esi]) AS2(paddq mm0, mm1) + AS2(movd mm1, a1) AS2(paddq mm4, mm5) AS2(movq mm5, mm1) + AS2(pmuludq mm1, k2) // a1*k2 + AS2(paddq mm0, mm2) AS2(movd mm2, a0) AS2(paddq mm0, mm3) + AS2(movq mm3, mm2) AS2(pmuludq mm2, k3) // a0*k3 + AS2(pmuludq mm3, mm7) // a0*k0 + AS2(movd esi, mm0) AS2(psrlq mm0, 32) AS2(pmuludq mm7, mm5) // a1*k0 + AS2(pmuludq mm5, k3) // a1*k3 + AS2(paddq mm0, mm1) AS2(movd mm1, a2) AS2(pmuludq mm1, k2) // a2*k2 + AS2(paddq mm0, mm2) AS2(paddq mm0, mm4) AS2(movq mm4, mm0) + AS2(movd mm2, a3) AS2(pmuludq mm2, mm6) // a3*k1 + AS2(pmuludq mm6, a0) // a0*k1 + AS2(psrlq mm0, 31) AS2(paddq mm0, mm3) AS2(movd mm3, [edi]) + AS2(paddq mm0, mm3) AS2(movd mm3, a2) AS2(pmuludq mm3, k3) // a2*k3 + AS2(paddq mm5, mm1) AS2(movd mm1, a3) AS2(pmuludq mm1, k2) // a3*k2 + AS2(paddq mm5, mm2) AS2(movd mm2, [edi + 4]) AS2(psllq mm5, 1) + AS2(paddq mm0, mm5) AS2(movq mm5, mm0) AS2(psllq mm4, 33) + AS2(psrlq mm0, 32) AS2(paddq mm6, mm7) AS2(movd mm7, esi) + AS2(paddq mm0, mm6) AS2(paddq mm0, mm2) AS2(paddq mm3, mm1) + AS2(psllq mm3, 1) AS2(paddq mm0, mm3) AS2(psrlq mm4, 1) + AS2(punpckldq mm5, mm0) AS2(psrlq mm0, 32) + AS2(por mm4, mm7) AS2(paddq mm0, mm4) + AS2(movq a0, mm5) AS2(movq a2, mm0) #ifdef __GNUC__ - ".att_syntax prefix;" - "mov %0, %%ebx;" - : "=m" (temp) - : "m" (ahi), "D" (ml), "d" (kh), "a" (alo), "S" (mh), "c" (kl) - : "memory", "cc" - ); + ".att_syntax prefix;" + "mov %0, %%ebx;" + : "=m"(temp) + : "m"(ahi), "D"(ml), "d"(kh), "a"(alo), "S"(mh), "c"(kl) + : "memory", "cc"); #endif - #undef a0 #undef a1 #undef a2 @@ -600,33 +612,36 @@ static void poly_step_func(uint64_t *ahi, uint64_t *alo, const uint64_t *kh, #undef k3 } -#define poly_step(ah, al, kh, kl, mh, ml) \ - poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml)) +#define poly_step(ah, al, kh, kl, mh, ml) \ + poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml)) /* ----------------------------------------------------------------------- */ #else /* not VMAC_ARCH_64 and not SSE2 */ /* ----------------------------------------------------------------------- */ #ifndef nh_16 -#define nh_16(mp, kp, nw, rh, rl) \ -{ uint64_t t1,t2,m1,m2,t; \ - int i; \ - rh = rl = t = 0; \ - for (i = 0; i < nw; i+=2) { \ - t1 = get64PE(mp+i) + kp[i]; \ - t2 = get64PE(mp+i+1) + kp[i+1]; \ - m2 = MUL32(t1 >> 32, t2); \ - m1 = MUL32(t1, t2 >> 32); \ - ADD128(rh,rl,MUL32(t1 >> 32,t2 >> 32),MUL32(t1,t2)); \ - rh += (uint64_t)(uint32_t)(m1 >> 32) + (uint32_t)(m2 >> 32); \ - t += (uint64_t)(uint32_t)m1 + (uint32_t)m2; \ - } \ - ADD128(rh,rl,(t >> 32),(t << 32)); \ -} +#define nh_16(mp, kp, nw, rh, rl) \ + { \ + uint64_t t1, t2, m1, m2, t; \ + int i; \ + rh = rl = t = 0; \ + for ( i = 0; i < nw; i += 2 ) \ + { \ + t1 = get64PE(mp + i) + kp[i]; \ + t2 = get64PE(mp + i + 1) + kp[i + 1]; \ + m2 = MUL32(t1 >> 32, t2); \ + m1 = MUL32(t1, t2 >> 32); \ + ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), MUL32(t1, t2)); \ + rh += (uint64_t)(uint32_t)(m1 >> 32) + (uint32_t)(m2 >> 32); \ + t += (uint64_t)(uint32_t)m1 + (uint32_t)m2; \ + } \ + ADD128(rh, rl, (t >> 32), (t << 32)); \ + } #endif static void poly_step_func(uint64_t *ahi, uint64_t *alo, const uint64_t *kh, - const uint64_t *kl, const uint64_t *mh, const uint64_t *ml) + const uint64_t *kl, const uint64_t *mh, + const uint64_t *ml) { #if VMAC_ARCH_BIG_ENDIAN @@ -637,21 +652,21 @@ static void poly_step_func(uint64_t *ahi, uint64_t *alo, const uint64_t *kh, #define INDEX_LOW 0 #endif -#define a0 *(((uint32_t*)alo)+INDEX_LOW) -#define a1 *(((uint32_t*)alo)+INDEX_HIGH) -#define a2 *(((uint32_t*)ahi)+INDEX_LOW) -#define a3 *(((uint32_t*)ahi)+INDEX_HIGH) -#define k0 *(((uint32_t*)kl)+INDEX_LOW) -#define k1 *(((uint32_t*)kl)+INDEX_HIGH) -#define k2 *(((uint32_t*)kh)+INDEX_LOW) -#define k3 *(((uint32_t*)kh)+INDEX_HIGH) +#define a0 *(((uint32_t *)alo) + INDEX_LOW) +#define a1 *(((uint32_t *)alo) + INDEX_HIGH) +#define a2 *(((uint32_t *)ahi) + INDEX_LOW) +#define a3 *(((uint32_t *)ahi) + INDEX_HIGH) +#define k0 *(((uint32_t *)kl) + INDEX_LOW) +#define k1 *(((uint32_t *)kl) + INDEX_HIGH) +#define k2 *(((uint32_t *)kh) + INDEX_LOW) +#define k3 *(((uint32_t *)kh) + INDEX_HIGH) uint64_t p, q, t; uint32_t t2; p = MUL32(a3, k3); p += p; - p += *(uint64_t *)mh; + p += *(uint64_t *)mh; p += MUL32(a0, k2); p += MUL32(a1, k1); p += MUL32(a2, k0); @@ -663,19 +678,19 @@ static void poly_step_func(uint64_t *ahi, uint64_t *alo, const uint64_t *kh, p += MUL32(a3, k0); t |= ((uint64_t)((uint32_t)p & 0x7fffffff)) << 32; p >>= 31; - p += (uint64_t)(((uint32_t*)ml)[INDEX_LOW]); + p += (uint64_t)(((uint32_t *)ml)[INDEX_LOW]); p += MUL32(a0, k0); - q = MUL32(a1, k3); + q = MUL32(a1, k3); q += MUL32(a2, k2); q += MUL32(a3, k1); q += q; p += q; t2 = (uint32_t)(p); p >>= 32; - p += (uint64_t)(((uint32_t*)ml)[INDEX_HIGH]); + p += (uint64_t)(((uint32_t *)ml)[INDEX_HIGH]); p += MUL32(a0, k1); p += MUL32(a1, k0); - q = MUL32(a2, k3); + q = MUL32(a2, k3); q += MUL32(a3, k2); q += q; p += q; @@ -693,47 +708,46 @@ static void poly_step_func(uint64_t *ahi, uint64_t *alo, const uint64_t *kh, #undef k3 } -#define poly_step(ah, al, kh, kl, mh, ml) \ - poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml)) +#define poly_step(ah, al, kh, kl, mh, ml) \ + poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml)) /* ----------------------------------------------------------------------- */ -#endif /* end of specialized NH and poly definitions */ +#endif /* end of specialized NH and poly definitions */ /* ----------------------------------------------------------------------- */ /* At least nh_16 is defined. Defined others as needed here */ #ifndef nh_16_2 -#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \ - nh_16(mp, kp, nw, rh, rl); \ - nh_16(mp, ((kp)+2), nw, rh2, rl2); +#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \ + nh_16(mp, kp, nw, rh, rl); \ + nh_16(mp, ((kp) + 2), nw, rh2, rl2); #endif #ifndef nh_vmac_nhbytes -#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ - nh_16(mp, kp, nw, rh, rl) +#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) nh_16(mp, kp, nw, rh, rl) #endif #ifndef nh_vmac_nhbytes_2 -#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \ - nh_vmac_nhbytes(mp, kp, nw, rh, rl); \ - nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); +#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \ + nh_vmac_nhbytes(mp, kp, nw, rh, rl); \ + nh_vmac_nhbytes(mp, ((kp) + 2), nw, rh2, rl2); #endif /* ----------------------------------------------------------------------- */ static void vhash_abort(vmac_ctx_t *ctx) { - ctx->polytmp[0] = ctx->polykey[0] ; - ctx->polytmp[1] = ctx->polykey[1] ; - #if (VMAC_TAG_LEN == 128) - ctx->polytmp[2] = ctx->polykey[2] ; - ctx->polytmp[3] = ctx->polykey[3] ; - #endif + ctx->polytmp[0] = ctx->polykey[0]; + ctx->polytmp[1] = ctx->polykey[1]; +#if ( VMAC_TAG_LEN == 128 ) + ctx->polytmp[2] = ctx->polykey[2]; + ctx->polytmp[3] = ctx->polykey[3]; +#endif ctx->first_block_processed = 0; } /* ----------------------------------------------------------------------- */ -static uint64_t l3hash(uint64_t p1, uint64_t p2, - uint64_t k1, uint64_t k2, uint64_t len) +static uint64_t l3hash(uint64_t p1, uint64_t p2, uint64_t k1, uint64_t k2, + uint64_t len) { - uint64_t rh, rl, t, z=0; + uint64_t rh, rl, t, z = 0; /* fully reduce (p1,p2)+(len,0) mod p127 */ t = p1 >> 63; @@ -766,15 +780,15 @@ static uint64_t l3hash(uint64_t p1, uint64_t p2, t += t << 8; rl += t; rl += (0 - (rl < t)) & 257; - rl += (0 - (rl > p64-1)) & 257; + rl += (0 - (rl > p64 - 1)) & 257; return rl; } /* ----------------------------------------------------------------------- */ void vhash_update(unsigned char *m, - unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */ - vmac_ctx_t *ctx) + unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */ + vmac_ctx_t *ctx) { uint64_t rh, rl, *mptr; const uint64_t *kptr = (uint64_t *)ctx->nhkey; @@ -782,67 +796,67 @@ void vhash_update(unsigned char *m, uint64_t ch, cl; uint64_t pkh = ctx->polykey[0]; uint64_t pkl = ctx->polykey[1]; - #if (VMAC_TAG_LEN == 128) +#if ( VMAC_TAG_LEN == 128 ) uint64_t ch2, cl2, rh2, rl2; uint64_t pkh2 = ctx->polykey[2]; uint64_t pkl2 = ctx->polykey[3]; - #endif +#endif mptr = (uint64_t *)m; - i = mbytes / VMAC_NHBYTES; /* Must be non-zero */ + i = mbytes / VMAC_NHBYTES; /* Must be non-zero */ ch = ctx->polytmp[0]; cl = ctx->polytmp[1]; - #if (VMAC_TAG_LEN == 128) +#if ( VMAC_TAG_LEN == 128 ) ch2 = ctx->polytmp[2]; cl2 = ctx->polytmp[3]; - #endif - - if ( ! ctx->first_block_processed) { +#endif + + if ( !ctx->first_block_processed ) + { ctx->first_block_processed = 1; - #if (VMAC_TAG_LEN == 64) - nh_vmac_nhbytes(mptr,kptr,VMAC_NHBYTES/8,rh,rl); - #else - nh_vmac_nhbytes_2(mptr,kptr,VMAC_NHBYTES/8,rh,rl,rh2,rl2); +#if ( VMAC_TAG_LEN == 64 ) + nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES / 8, rh, rl); +#else + nh_vmac_nhbytes_2(mptr, kptr, VMAC_NHBYTES / 8, rh, rl, rh2, rl2); rh2 &= m62; - ADD128(ch2,cl2,rh2,rl2); - #endif + ADD128(ch2, cl2, rh2, rl2); +#endif rh &= m62; - ADD128(ch,cl,rh,rl); - mptr += (VMAC_NHBYTES/sizeof(uint64_t)); + ADD128(ch, cl, rh, rl); + mptr += (VMAC_NHBYTES / sizeof(uint64_t)); i--; } - while (i--) { - #if (VMAC_TAG_LEN == 64) - nh_vmac_nhbytes(mptr,kptr,VMAC_NHBYTES/8,rh,rl); - #else - nh_vmac_nhbytes_2(mptr,kptr,VMAC_NHBYTES/8,rh,rl,rh2,rl2); + while ( i-- ) + { +#if ( VMAC_TAG_LEN == 64 ) + nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES / 8, rh, rl); +#else + nh_vmac_nhbytes_2(mptr, kptr, VMAC_NHBYTES / 8, rh, rl, rh2, rl2); rh2 &= m62; - poly_step(ch2,cl2,pkh2,pkl2,rh2,rl2); - #endif + poly_step(ch2, cl2, pkh2, pkl2, rh2, rl2); +#endif rh &= m62; - poly_step(ch,cl,pkh,pkl,rh,rl); - mptr += (VMAC_NHBYTES/sizeof(uint64_t)); + poly_step(ch, cl, pkh, pkl, rh, rl); + mptr += (VMAC_NHBYTES / sizeof(uint64_t)); } ctx->polytmp[0] = ch; ctx->polytmp[1] = cl; - #if (VMAC_TAG_LEN == 128) +#if ( VMAC_TAG_LEN == 128 ) ctx->polytmp[2] = ch2; ctx->polytmp[3] = cl2; - #endif - #if VMAC_USE_SSE2 +#endif +#if VMAC_USE_SSE2 _mm_empty(); /* SSE2 version of poly_step uses mmx instructions */ - #endif +#endif } /* ----------------------------------------------------------------------- */ -uint64_t vhash(unsigned char m[], - unsigned int mbytes, - uint64_t *tagl, - vmac_ctx_t *ctx) +uint64_t vhash(unsigned char m[], unsigned int mbytes, uint64_t *tagl, + vmac_ctx_t *ctx) { uint64_t rh, rl, *mptr; const uint64_t *kptr = (uint64_t *)ctx->nhkey; @@ -850,144 +864,145 @@ uint64_t vhash(unsigned char m[], uint64_t ch, cl; uint64_t pkh = ctx->polykey[0]; uint64_t pkl = ctx->polykey[1]; - #if (VMAC_TAG_LEN == 128) - uint64_t ch2, cl2, rh2, rl2; - uint64_t pkh2 = ctx->polykey[2]; - uint64_t pkl2 = ctx->polykey[3]; - #endif +#if ( VMAC_TAG_LEN == 128 ) + uint64_t ch2, cl2, rh2, rl2; + uint64_t pkh2 = ctx->polykey[2]; + uint64_t pkl2 = ctx->polykey[3]; +#endif mptr = (uint64_t *)m; i = mbytes / VMAC_NHBYTES; remaining = mbytes % VMAC_NHBYTES; - if (ctx->first_block_processed) + if ( ctx->first_block_processed ) { ch = ctx->polytmp[0]; cl = ctx->polytmp[1]; - #if (VMAC_TAG_LEN == 128) +#if ( VMAC_TAG_LEN == 128 ) ch2 = ctx->polytmp[2]; cl2 = ctx->polytmp[3]; - #endif +#endif } - else if (i) + else if ( i ) { - #if (VMAC_TAG_LEN == 64) - nh_vmac_nhbytes(mptr,kptr,VMAC_NHBYTES/8,ch,cl); - #else - nh_vmac_nhbytes_2(mptr,kptr,VMAC_NHBYTES/8,ch,cl,ch2,cl2); +#if ( VMAC_TAG_LEN == 64 ) + nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES / 8, ch, cl); +#else + nh_vmac_nhbytes_2(mptr, kptr, VMAC_NHBYTES / 8, ch, cl, ch2, cl2); ch2 &= m62; - ADD128(ch2,cl2,pkh2,pkl2); - #endif + ADD128(ch2, cl2, pkh2, pkl2); +#endif ch &= m62; - ADD128(ch,cl,pkh,pkl); - mptr += (VMAC_NHBYTES/sizeof(uint64_t)); + ADD128(ch, cl, pkh, pkl); + mptr += (VMAC_NHBYTES / sizeof(uint64_t)); i--; } - else if (remaining) + else if ( remaining ) { - #if (VMAC_TAG_LEN == 64) - nh_16(mptr,kptr,2*((remaining+15)/16),ch,cl); - #else - nh_16_2(mptr,kptr,2*((remaining+15)/16),ch,cl,ch2,cl2); +#if ( VMAC_TAG_LEN == 64 ) + nh_16(mptr, kptr, 2 * ((remaining + 15) / 16), ch, cl); +#else + nh_16_2(mptr, kptr, 2 * ((remaining + 15) / 16), ch, cl, ch2, cl2); ch2 &= m62; - ADD128(ch2,cl2,pkh2,pkl2); - #endif + ADD128(ch2, cl2, pkh2, pkl2); +#endif ch &= m62; - ADD128(ch,cl,pkh,pkl); - mptr += (VMAC_NHBYTES/sizeof(uint64_t)); + ADD128(ch, cl, pkh, pkl); + mptr += (VMAC_NHBYTES / sizeof(uint64_t)); goto do_l3; } else /* Empty String */ { - ch = pkh; cl = pkl; - #if (VMAC_TAG_LEN == 128) - ch2 = pkh2; cl2 = pkl2; - #endif + ch = pkh; + cl = pkl; +#if ( VMAC_TAG_LEN == 128 ) + ch2 = pkh2; + cl2 = pkl2; +#endif goto do_l3; } - while (i--) { - #if (VMAC_TAG_LEN == 64) - nh_vmac_nhbytes(mptr,kptr,VMAC_NHBYTES/8,rh,rl); - #else - nh_vmac_nhbytes_2(mptr,kptr,VMAC_NHBYTES/8,rh,rl,rh2,rl2); + while ( i-- ) + { +#if ( VMAC_TAG_LEN == 64 ) + nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES / 8, rh, rl); +#else + nh_vmac_nhbytes_2(mptr, kptr, VMAC_NHBYTES / 8, rh, rl, rh2, rl2); rh2 &= m62; - poly_step(ch2,cl2,pkh2,pkl2,rh2,rl2); - #endif + poly_step(ch2, cl2, pkh2, pkl2, rh2, rl2); +#endif rh &= m62; - poly_step(ch,cl,pkh,pkl,rh,rl); - mptr += (VMAC_NHBYTES/sizeof(uint64_t)); + poly_step(ch, cl, pkh, pkl, rh, rl); + mptr += (VMAC_NHBYTES / sizeof(uint64_t)); } - if (remaining) { - #if (VMAC_TAG_LEN == 64) - nh_16(mptr,kptr,2*((remaining+15)/16),rh,rl); - #else - nh_16_2(mptr,kptr,2*((remaining+15)/16),rh,rl,rh2,rl2); + if ( remaining ) + { +#if ( VMAC_TAG_LEN == 64 ) + nh_16(mptr, kptr, 2 * ((remaining + 15) / 16), rh, rl); +#else + nh_16_2(mptr, kptr, 2 * ((remaining + 15) / 16), rh, rl, rh2, rl2); rh2 &= m62; - poly_step(ch2,cl2,pkh2,pkl2,rh2,rl2); - #endif + poly_step(ch2, cl2, pkh2, pkl2, rh2, rl2); +#endif rh &= m62; - poly_step(ch,cl,pkh,pkl,rh,rl); + poly_step(ch, cl, pkh, pkl, rh, rl); } do_l3: - #if VMAC_USE_SSE2 +#if VMAC_USE_SSE2 _mm_empty(); /* SSE2 version of poly_step uses mmx instructions */ - #endif +#endif vhash_abort(ctx); remaining *= 8; -#if (VMAC_TAG_LEN == 128) - *tagl = l3hash(ch2, cl2, ctx->l3key[2], ctx->l3key[3],remaining); +#if ( VMAC_TAG_LEN == 128 ) + *tagl = l3hash(ch2, cl2, ctx->l3key[2], ctx->l3key[3], remaining); #endif - return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1],remaining); + return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining); } /* ----------------------------------------------------------------------- */ -uint64_t vmac(unsigned char m[], - unsigned int mbytes, - unsigned char n[16], - uint64_t *tagl, - vmac_ctx_t *ctx) +uint64_t vmac(unsigned char m[], unsigned int mbytes, unsigned char n[16], + uint64_t *tagl, vmac_ctx_t *ctx) { -#if (VMAC_TAG_LEN == 64) +#if ( VMAC_TAG_LEN == 64 ) uint64_t *in_n, *out_p; uint64_t p, h; int i; - - #if VMAC_CACHE_NONCES + +#if VMAC_CACHE_NONCES in_n = ctx->cached_nonce; out_p = ctx->cached_aes; - #else +#else uint64_t tmp[2]; in_n = out_p = tmp; - #endif +#endif i = n[15] & 1; - #if VMAC_CACHE_NONCES - if ((*(uint64_t *)(n+8) != in_n[1]) || - (*(uint64_t *)(n ) != in_n[0])) { - #endif - - in_n[0] = *(uint64_t *)(n ); - in_n[1] = *(uint64_t *)(n+8); +#if VMAC_CACHE_NONCES + if ( (*(uint64_t *)(n + 8) != in_n[1]) || (*(uint64_t *)(n) != in_n[0]) ) + { +#endif + + in_n[0] = *(uint64_t *)(n); + in_n[1] = *(uint64_t *)(n + 8); ((unsigned char *)in_n)[15] &= 0xFE; aes_encryption(in_n, out_p, &ctx->cipher_key); - #if VMAC_CACHE_NONCES - ((unsigned char *)in_n)[15] |= (unsigned char)(1-i); +#if VMAC_CACHE_NONCES + ((unsigned char *)in_n)[15] |= (unsigned char)(1 - i); } - #endif +#endif p = get64BE(out_p + i); h = vhash(m, mbytes, (uint64_t *)0, ctx); return p + h; #else uint64_t tmp[2]; - uint64_t th,tl; + uint64_t th, tl; aes_encryption(n, (unsigned char *)tmp, &ctx->cipher_key); th = vhash(m, mbytes, &tl, ctx); th += get64BE(tmp); - *tagl = tl + get64BE(tmp+1); + *tagl = tl + get64BE(tmp + 1); return th; #endif } @@ -999,52 +1014,54 @@ void vmac_set_key(unsigned char user_key[], vmac_ctx_t *ctx) uint64_t in[2] = {0}, out[2]; unsigned i; aes_key_setup(user_key, &ctx->cipher_key); - + /* Fill nh key */ - ((unsigned char *)in)[0] = 0x80; - for (i = 0; i < sizeof(ctx->nhkey)/8; i+=2) { + ((unsigned char *)in)[0] = 0x80; + for ( i = 0; i < sizeof(ctx->nhkey) / 8; i += 2 ) + { aes_encryption((unsigned char *)in, (unsigned char *)out, - &ctx->cipher_key); - ctx->nhkey[i ] = get64BE(out); - ctx->nhkey[i+1] = get64BE(out+1); + &ctx->cipher_key); + ctx->nhkey[i] = get64BE(out); + ctx->nhkey[i + 1] = get64BE(out + 1); ((unsigned char *)in)[15] += 1; } /* Fill poly key */ - ((unsigned char *)in)[0] = 0xC0; + ((unsigned char *)in)[0] = 0xC0; in[1] = 0; - for (i = 0; i < sizeof(ctx->polykey)/8; i+=2) { + for ( i = 0; i < sizeof(ctx->polykey) / 8; i += 2 ) + { aes_encryption((unsigned char *)in, (unsigned char *)out, - &ctx->cipher_key); - ctx->polytmp[i ] = ctx->polykey[i ] = get64BE(out) & mpoly; - ctx->polytmp[i+1] = ctx->polykey[i+1] = get64BE(out+1) & mpoly; + &ctx->cipher_key); + ctx->polytmp[i] = ctx->polykey[i] = get64BE(out) & mpoly; + ctx->polytmp[i + 1] = ctx->polykey[i + 1] = get64BE(out + 1) & mpoly; ((unsigned char *)in)[15] += 1; } /* Fill ip key */ ((unsigned char *)in)[0] = 0xE0; in[1] = 0; - for (i = 0; i < sizeof(ctx->l3key)/8; i+=2) { + for ( i = 0; i < sizeof(ctx->l3key) / 8; i += 2 ) + { do { aes_encryption((unsigned char *)in, (unsigned char *)out, - &ctx->cipher_key); - ctx->l3key[i ] = get64BE(out); - ctx->l3key[i+1] = get64BE(out+1); + &ctx->cipher_key); + ctx->l3key[i] = get64BE(out); + ctx->l3key[i + 1] = get64BE(out + 1); ((unsigned char *)in)[15] += 1; - } while (ctx->l3key[i] >= p64 || ctx->l3key[i+1] >= p64); + } while ( ctx->l3key[i] >= p64 || ctx->l3key[i + 1] >= p64 ); } - - /* Invalidate nonce/aes cache and reset other elements */ - #if (VMAC_TAG_LEN == 64) && (VMAC_CACHE_NONCES) + +/* Invalidate nonce/aes cache and reset other elements */ +#if ( VMAC_TAG_LEN == 64 ) && (VMAC_CACHE_NONCES) ctx->cached_nonce[0] = (uint64_t)-1; /* Ensure illegal nonce */ ctx->cached_nonce[1] = (uint64_t)0; /* Ensure illegal nonce */ - #endif +#endif ctx->first_block_processed = 0; } /* ----------------------------------------------------------------------- */ - #if VMAC_RUN_TESTS #include @@ -1052,20 +1069,21 @@ void vmac_set_key(unsigned char user_key[], vmac_ctx_t *ctx) #include #include -unsigned prime(void) /* Wake variable speed cpu, get rough speed estimate */ +unsigned prime(void) /* Wake variable speed cpu, get rough speed estimate */ { volatile uint64_t i; - volatile uint64_t j=1; - unsigned cnt=0; + volatile uint64_t j = 1; + unsigned cnt = 0; volatile clock_t ticks = clock(); do { - for (i = 0; i < 500000; i++) { + for ( i = 0; i < 500000; i++ ) + { uint64_t x = get64PE(&j); j = x * x + (uint64_t)ticks; } cnt++; - } while (clock() - ticks < (CLOCKS_PER_SEC/2)); - return cnt; /* cnt is millions of iterations per second */ + } while ( clock() - ticks < (CLOCKS_PER_SEC / 2) ); + return cnt; /* cnt is millions of iterations per second */ } int main(void) @@ -1076,42 +1094,43 @@ int main(void) unsigned char *m; ALIGN(4) unsigned char key[] = "abcdefghijklmnop"; ALIGN(4) unsigned char nonce[] = "\0\0\0\0\0\0\0\0bcdefghi"; - unsigned int vector_lengths[] = {0,3,48,300,3000000}; - #if (VMAC_TAG_LEN == 64) - ALIGN(4) char *should_be[] = {"2576BE1C56D8B81B","2D376CF5B1813CE5", - "E8421F61D573D298","4492DF6C5CAC1BBE", - "09BA597DD7601113"}; - #else - ALIGN(4) char *should_be[] = {"472766C70F74ED23481D6D7DE4E80DAC", - "4EE815A06A1D71EDD36FC75D51188A42", - "09F2C80C8E1007A0C12FAE19FE4504AE", - "66438817154850C61D8A412164803BCB", - "2B6B02288FFC461B75485DE893C629DC"}; - #endif + unsigned int vector_lengths[] = {0, 3, 48, 300, 3000000}; +#if ( VMAC_TAG_LEN == 64 ) + ALIGN(4) + char *should_be[] = {"2576BE1C56D8B81B", "2D376CF5B1813CE5", + "E8421F61D573D298", "4492DF6C5CAC1BBE", + "09BA597DD7601113"}; +#else + ALIGN(4) + char *should_be[] = { + "472766C70F74ED23481D6D7DE4E80DAC", "4EE815A06A1D71EDD36FC75D51188A42", + "09F2C80C8E1007A0C12FAE19FE4504AE", "66438817154850C61D8A412164803BCB", + "2B6B02288FFC461B75485DE893C629DC"}; +#endif unsigned speed_lengths[] = {16, 32, 64, 128, 256, 512, 1024, 2048, 4096}; unsigned i, j, *speed_iters; clock_t ticks; double cpb; const unsigned int buf_len = 3 * (1 << 20); - + j = prime(); - i = sizeof(speed_lengths)/sizeof(speed_lengths[0]); - speed_iters = (unsigned *)malloc(i*sizeof(speed_iters[0])); - speed_iters[i-1] = j * (1 << 12); - while (--i) speed_iters[i-1] = (unsigned)(1.3 * speed_iters[i]); - + i = sizeof(speed_lengths) / sizeof(speed_lengths[0]); + speed_iters = (unsigned *)malloc(i * sizeof(speed_iters[0])); + speed_iters[i - 1] = j * (1 << 12); + while ( --i ) + speed_iters[i - 1] = (unsigned)(1.3 * speed_iters[i]); + /* Initialize context and message buffer, all 16-byte aligned */ p = malloc(buf_len + 32); m = (unsigned char *)(((size_t)p + 16) & ~((size_t)15)); memset(m, 0, buf_len + 16); vmac_set_key(key, &ctx); - + /* Test incremental and all-in-one interfaces for correctness */ vmac_set_key(key, &ctx_aio); vmac_set_key(key, &ctx_inc1); vmac_set_key(key, &ctx_inc2); - - + /* for (i = 0; i <= 512; i++) { vhash_update(m,(i/VMAC_NHBYTES)*VMAC_NHBYTES,&ctx_inc1); @@ -1120,38 +1139,41 @@ int main(void) vhash_update(m,(i/VMAC_NHBYTES)*VMAC_NHBYTES,&ctx_inc1); for (j = 0; j < vector_lengths[i]; j++) m[j] = (unsigned char)('a'+j%3); - + } */ - + /* Generate vectors */ - for (i = 0; i < sizeof(vector_lengths)/sizeof(unsigned int); i++) { - for (j = 0; j < vector_lengths[i]; j++) - m[j] = (unsigned char)('a'+j%3); + for ( i = 0; i < sizeof(vector_lengths) / sizeof(unsigned int); i++ ) + { + for ( j = 0; j < vector_lengths[i]; j++ ) + m[j] = (unsigned char)('a' + j % 3); res = vmac(m, vector_lengths[i], nonce, &tagl, &ctx); - #if (VMAC_TAG_LEN == 64) - printf("\'abc\' * %7u: %016llX Should be: %s\n", - vector_lengths[i]/3,res,should_be[i]); - #else +#if ( VMAC_TAG_LEN == 64 ) + printf("\'abc\' * %7u: %016llX Should be: %s\n", vector_lengths[i] / 3, + res, should_be[i]); +#else printf("\'abc\' * %7u: %016llX%016llX\nShould be : %s\n", - vector_lengths[i]/3,res,tagl,should_be[i]); - #endif + vector_lengths[i] / 3, res, tagl, should_be[i]); +#endif } /* Speed test */ - for (i = 0; i < sizeof(speed_lengths)/sizeof(unsigned int); i++) { + for ( i = 0; i < sizeof(speed_lengths) / sizeof(unsigned int); i++ ) + { ticks = clock(); - for (j = 0; j < speed_iters[i]; j++) { - #if HASH_ONLY + for ( j = 0; j < speed_iters[i]; j++ ) + { +#if HASH_ONLY res = vhash(m, speed_lengths[i], &tagl, &ctx); - #else +#else res = vmac(m, speed_lengths[i], nonce, &tagl, &ctx); nonce[7]++; - #endif +#endif } ticks = clock() - ticks; - cpb = ((ticks*VMAC_HZ)/ - ((double)CLOCKS_PER_SEC*speed_lengths[i]*speed_iters[i])); + cpb = ((ticks * VMAC_HZ) / + ((double)CLOCKS_PER_SEC * speed_lengths[i] * speed_iters[i])); printf("%4u bytes, %2.2f cpb\n", speed_lengths[i], cpb); } return 1; diff --git a/xen/drivers/acpi/apei/apei-base.c b/xen/drivers/acpi/apei/apei-base.c index 6f81e7fa36..04dbbbfd8e 100644 --- a/xen/drivers/acpi/apei/apei-base.c +++ b/xen/drivers/acpi/apei/apei-base.c @@ -49,231 +49,233 @@ * INJection) interpreter framework. */ -#define APEI_EXEC_PRESERVE_REGISTER 0x1 +#define APEI_EXEC_PRESERVE_REGISTER 0x1 int apei_exec_ctx_init(struct apei_exec_context *ctx, - struct apei_exec_ins_type *ins_table, - u32 instructions, - struct acpi_whea_header *action_table, - u32 entries) + struct apei_exec_ins_type *ins_table, u32 instructions, + struct acpi_whea_header *action_table, u32 entries) { - if (!ctx) - return -EINVAL; - - ctx->ins_table = ins_table; - ctx->instructions = instructions; - ctx->action_table = action_table; - ctx->entries = entries; - return 0; + if ( !ctx ) + return -EINVAL; + + ctx->ins_table = ins_table; + ctx->instructions = instructions; + ctx->action_table = action_table; + ctx->entries = entries; + return 0; } int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val) { - int rc; + int rc; - rc = apei_read(val, &entry->register_region); - if (rc) - return rc; - *val >>= entry->register_region.bit_offset; - *val &= entry->mask; + rc = apei_read(val, &entry->register_region); + if ( rc ) + return rc; + *val >>= entry->register_region.bit_offset; + *val &= entry->mask; - return 0; + return 0; } int apei_exec_read_register(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - int rc; - u64 val = 0; + int rc; + u64 val = 0; - rc = __apei_exec_read_register(entry, &val); - if (rc) - return rc; - ctx->value = val; + rc = __apei_exec_read_register(entry, &val); + if ( rc ) + return rc; + ctx->value = val; - return 0; + return 0; } int apei_exec_read_register_value(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - int rc; + int rc; - rc = apei_exec_read_register(ctx, entry); - if (rc) - return rc; - ctx->value = (ctx->value == entry->value); + rc = apei_exec_read_register(ctx, entry); + if ( rc ) + return rc; + ctx->value = (ctx->value == entry->value); - return 0; + return 0; } int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val) { - int rc; - - val &= entry->mask; - val <<= entry->register_region.bit_offset; - if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) { - u64 valr = 0; - rc = apei_read(&valr, &entry->register_region); - if (rc) - return rc; - valr &= ~(entry->mask << entry->register_region.bit_offset); - val |= valr; - } - rc = apei_write(val, &entry->register_region); - - return rc; + int rc; + + val &= entry->mask; + val <<= entry->register_region.bit_offset; + if ( entry->flags & APEI_EXEC_PRESERVE_REGISTER ) + { + u64 valr = 0; + rc = apei_read(&valr, &entry->register_region); + if ( rc ) + return rc; + valr &= ~(entry->mask << entry->register_region.bit_offset); + val |= valr; + } + rc = apei_write(val, &entry->register_region); + + return rc; } int apei_exec_write_register(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - return __apei_exec_write_register(entry, ctx->value); + return __apei_exec_write_register(entry, ctx->value); } int apei_exec_write_register_value(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - int rc; + int rc; - ctx->value = entry->value; - rc = apei_exec_write_register(ctx, entry); + ctx->value = entry->value; + rc = apei_exec_write_register(ctx, entry); - return rc; + return rc; } int apei_exec_noop(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - return 0; + return 0; } /* * Interpret the specified action. Go through whole action table, * execute all instructions belong to the action. */ -int __apei_exec_run(struct apei_exec_context *ctx, u8 action, - bool_t optional) +int __apei_exec_run(struct apei_exec_context *ctx, u8 action, bool_t optional) { - int rc = -ENOENT; - u32 i, ip; - struct acpi_whea_header *entry; - apei_exec_ins_func_t run; - - ctx->ip = 0; - - /* - * "ip" is the instruction pointer of current instruction, - * "ctx->ip" specifies the next instruction to executed, - * instruction "run" function may change the "ctx->ip" to - * implement "goto" semantics. - */ + int rc = -ENOENT; + u32 i, ip; + struct acpi_whea_header *entry; + apei_exec_ins_func_t run; + + ctx->ip = 0; + + /* + * "ip" is the instruction pointer of current instruction, + * "ctx->ip" specifies the next instruction to executed, + * instruction "run" function may change the "ctx->ip" to + * implement "goto" semantics. + */ rewind: - ip = 0; - for (i = 0; i < ctx->entries; i++) { - entry = &ctx->action_table[i]; - if (entry->action != action) - continue; - if (ip == ctx->ip) { - if (entry->instruction >= ctx->instructions || - !ctx->ins_table[entry->instruction].run) { - printk(KERN_WARNING - "Invalid action table, unknown instruction " - "type: %d\n", entry->instruction); - return -EINVAL; - } - run = ctx->ins_table[entry->instruction].run; - rc = run(ctx, entry); - if (rc < 0) - return rc; - else if (rc != APEI_EXEC_SET_IP) - ctx->ip++; - } - ip++; - if (ctx->ip < ip) - goto rewind; - } - - return !optional && rc < 0 ? rc : 0; + ip = 0; + for ( i = 0; i < ctx->entries; i++ ) + { + entry = &ctx->action_table[i]; + if ( entry->action != action ) + continue; + if ( ip == ctx->ip ) + { + if ( entry->instruction >= ctx->instructions || + !ctx->ins_table[entry->instruction].run ) + { + printk(KERN_WARNING "Invalid action table, unknown instruction " + "type: %d\n", + entry->instruction); + return -EINVAL; + } + run = ctx->ins_table[entry->instruction].run; + rc = run(ctx, entry); + if ( rc < 0 ) + return rc; + else if ( rc != APEI_EXEC_SET_IP ) + ctx->ip++; + } + ip++; + if ( ctx->ip < ip ) + goto rewind; + } + + return !optional && rc < 0 ? rc : 0; } typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx, - struct acpi_whea_header *entry, - void *data); + struct acpi_whea_header *entry, + void *data); static int __init apei_exec_for_each_entry(struct apei_exec_context *ctx, - apei_exec_entry_func_t func, - void *data, - int *end) + apei_exec_entry_func_t func, + void *data, int *end) { - u8 ins; - int i, rc; - struct acpi_whea_header *entry; - struct apei_exec_ins_type *ins_table = ctx->ins_table; - - for (i = 0; i < ctx->entries; i++) { - entry = ctx->action_table + i; - ins = entry->instruction; - if (end) - *end = i; - if (ins >= ctx->instructions || !ins_table[ins].run) { - printk(KERN_WARNING "Invalid action table, " - "unknown instruction type: %d\n", ins); - return -EINVAL; - } - rc = func(ctx, entry, data); - if (rc) - return rc; - } - - return 0; + u8 ins; + int i, rc; + struct acpi_whea_header *entry; + struct apei_exec_ins_type *ins_table = ctx->ins_table; + + for ( i = 0; i < ctx->entries; i++ ) + { + entry = ctx->action_table + i; + ins = entry->instruction; + if ( end ) + *end = i; + if ( ins >= ctx->instructions || !ins_table[ins].run ) + { + printk(KERN_WARNING "Invalid action table, " + "unknown instruction type: %d\n", + ins); + return -EINVAL; + } + rc = func(ctx, entry, data); + if ( rc ) + return rc; + } + + return 0; } static int __init pre_map_gar_callback(struct apei_exec_context *ctx, - struct acpi_whea_header *entry, - void *data) + struct acpi_whea_header *entry, + void *data) { - u8 ins = entry->instruction; + u8 ins = entry->instruction; - if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) - return apei_pre_map_gar(&entry->register_region); + if ( ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER ) + return apei_pre_map_gar(&entry->register_region); - return 0; + return 0; } /* Pre-map all GARs in action table. */ int __init apei_exec_pre_map_gars(struct apei_exec_context *ctx) { - int rc, end; - - rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback, - NULL, &end); - if (rc) { - struct apei_exec_context ctx_unmap; - memcpy(&ctx_unmap, ctx, sizeof(*ctx)); - ctx_unmap.entries = end; - apei_exec_post_unmap_gars(&ctx_unmap); - } - - return rc; + int rc, end; + + rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback, NULL, &end); + if ( rc ) + { + struct apei_exec_context ctx_unmap; + memcpy(&ctx_unmap, ctx, sizeof(*ctx)); + ctx_unmap.entries = end; + apei_exec_post_unmap_gars(&ctx_unmap); + } + + return rc; } static int __init post_unmap_gar_callback(struct apei_exec_context *ctx, - struct acpi_whea_header *entry, - void *data) + struct acpi_whea_header *entry, + void *data) { - u8 ins = entry->instruction; + u8 ins = entry->instruction; - if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) - apei_post_unmap_gar(&entry->register_region); + if ( ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER ) + apei_post_unmap_gar(&entry->register_region); - return 0; + return 0; } /* Post-unmap all GAR in action table. */ int __init apei_exec_post_unmap_gars(struct apei_exec_context *ctx) { - return apei_exec_for_each_entry(ctx, post_unmap_gar_callback, - NULL, NULL); + return apei_exec_for_each_entry(ctx, post_unmap_gar_callback, NULL, NULL); } diff --git a/xen/drivers/acpi/apei/apei-io.c b/xen/drivers/acpi/apei/apei-io.c index 89b70f45ef..36bf7e17c5 100644 --- a/xen/drivers/acpi/apei/apei-io.c +++ b/xen/drivers/acpi/apei/apei-io.c @@ -40,59 +40,60 @@ static LIST_HEAD(apei_iomaps); */ static DEFINE_SPINLOCK(apei_iomaps_lock); -struct apei_iomap { - struct list_head list; - void __iomem *vaddr; - unsigned long size; - paddr_t paddr; +struct apei_iomap +{ + struct list_head list; + void __iomem *vaddr; + unsigned long size; + paddr_t paddr; }; -static struct apei_iomap *__apei_find_iomap(paddr_t paddr, - unsigned long size) +static struct apei_iomap *__apei_find_iomap(paddr_t paddr, unsigned long size) { - struct apei_iomap *map; - - list_for_each_entry(map, &apei_iomaps, list) { - if (map->paddr + map->size >= paddr + size && - map->paddr <= paddr) - return map; - } - return NULL; + struct apei_iomap *map; + + list_for_each_entry (map, &apei_iomaps, list) + { + if ( map->paddr + map->size >= paddr + size && map->paddr <= paddr ) + return map; + } + return NULL; } -static void __iomem *__apei_ioremap_fast(paddr_t paddr, - unsigned long size) +static void __iomem *__apei_ioremap_fast(paddr_t paddr, unsigned long size) { - struct apei_iomap *map; + struct apei_iomap *map; - map = __apei_find_iomap(paddr, size); - if (map) - return map->vaddr + (paddr - map->paddr); - else - return NULL; + map = __apei_find_iomap(paddr, size); + if ( map ) + return map->vaddr + (paddr - map->paddr); + else + return NULL; } static int apei_range_nr; static void __iomem *__init apei_range_map(paddr_t paddr, unsigned long size) { - int i, pg; - int start_nr, cur_nr; - - pg = ((((paddr + size -1) & PAGE_MASK) - - (paddr & PAGE_MASK)) >> PAGE_SHIFT) + 1; - if (apei_range_nr + pg > FIX_APEI_RANGE_MAX) - return NULL; - - start_nr = apei_range_nr + pg -1; - for (i = 0; i < pg; i++) { - cur_nr = start_nr - i; - set_fixmap_nocache(FIX_APEI_RANGE_BASE + cur_nr, - paddr + (i << PAGE_SHIFT)); - apei_range_nr++; - } - - return fix_to_virt(FIX_APEI_RANGE_BASE + start_nr); + int i, pg; + int start_nr, cur_nr; + + pg = ((((paddr + size - 1) & PAGE_MASK) - (paddr & PAGE_MASK)) >> + PAGE_SHIFT) + + 1; + if ( apei_range_nr + pg > FIX_APEI_RANGE_MAX ) + return NULL; + + start_nr = apei_range_nr + pg - 1; + for ( i = 0; i < pg; i++ ) + { + cur_nr = start_nr - i; + set_fixmap_nocache(FIX_APEI_RANGE_BASE + cur_nr, + paddr + (i << PAGE_SHIFT)); + apei_range_nr++; + } + + return fix_to_virt(FIX_APEI_RANGE_BASE + start_nr); } /* @@ -102,37 +103,38 @@ static void __iomem *__init apei_range_map(paddr_t paddr, unsigned long size) */ void __iomem *__init apei_pre_map(paddr_t paddr, unsigned long size) { - void __iomem *vaddr; - struct apei_iomap *map; - unsigned long flags; - - spin_lock_irqsave(&apei_iomaps_lock, flags); - vaddr = __apei_ioremap_fast(paddr, size); - spin_unlock_irqrestore(&apei_iomaps_lock, flags); - if (vaddr) - return vaddr; - - map = xmalloc(struct apei_iomap); - if (!map) - return NULL; - - vaddr = apei_range_map(paddr, size); - if (!vaddr) { - xfree(map); - return NULL; - } - - INIT_LIST_HEAD(&map->list); - map->paddr = paddr & PAGE_MASK; - map->size = (((paddr + size + PAGE_SIZE -1) & PAGE_MASK) - - (paddr & PAGE_MASK)); - map->vaddr = vaddr; - - spin_lock_irqsave(&apei_iomaps_lock, flags); - list_add_tail(&map->list, &apei_iomaps); - spin_unlock_irqrestore(&apei_iomaps_lock, flags); - - return map->vaddr + (paddr - map->paddr); + void __iomem *vaddr; + struct apei_iomap *map; + unsigned long flags; + + spin_lock_irqsave(&apei_iomaps_lock, flags); + vaddr = __apei_ioremap_fast(paddr, size); + spin_unlock_irqrestore(&apei_iomaps_lock, flags); + if ( vaddr ) + return vaddr; + + map = xmalloc(struct apei_iomap); + if ( !map ) + return NULL; + + vaddr = apei_range_map(paddr, size); + if ( !vaddr ) + { + xfree(map); + return NULL; + } + + INIT_LIST_HEAD(&map->list); + map->paddr = paddr & PAGE_MASK; + map->size = + (((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - (paddr & PAGE_MASK)); + map->vaddr = vaddr; + + spin_lock_irqsave(&apei_iomaps_lock, flags); + list_add_tail(&map->list, &apei_iomaps); + spin_unlock_irqrestore(&apei_iomaps_lock, flags); + + return map->vaddr + (paddr - map->paddr); } /* @@ -140,186 +142,190 @@ void __iomem *__init apei_pre_map(paddr_t paddr, unsigned long size) */ static void __init apei_post_unmap(paddr_t paddr, unsigned long size) { - struct apei_iomap *map; - unsigned long flags; + struct apei_iomap *map; + unsigned long flags; - spin_lock_irqsave(&apei_iomaps_lock, flags); - map = __apei_find_iomap(paddr, size); - if (map) - list_del(&map->list); - spin_unlock_irqrestore(&apei_iomaps_lock, flags); + spin_lock_irqsave(&apei_iomaps_lock, flags); + map = __apei_find_iomap(paddr, size); + if ( map ) + list_del(&map->list); + spin_unlock_irqrestore(&apei_iomaps_lock, flags); - xfree(map); + xfree(map); } /* In NMI handler, should set silent = 1 */ -static int apei_check_gar(struct acpi_generic_address *reg, - u64 *paddr, int silent) +static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr, + int silent) { - u32 width, space_id; - - width = reg->bit_width; - space_id = reg->space_id; - /* Handle possible alignment issues */ - memcpy(paddr, ®->address, sizeof(*paddr)); - if (!*paddr) { - if (!silent) - printk(KERN_WARNING - "Invalid physical address in GAR\n"); - return -EINVAL; - } - - if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) { - if (!silent) - printk(KERN_WARNING - "Invalid bit width in GAR\n"); - return -EINVAL; - } - - if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && - space_id != ACPI_ADR_SPACE_SYSTEM_IO) { - if (!silent) - printk(KERN_WARNING - "Invalid address space type in GAR\n"); - return -EINVAL; - } - - return 0; + u32 width, space_id; + + width = reg->bit_width; + space_id = reg->space_id; + /* Handle possible alignment issues */ + memcpy(paddr, ®->address, sizeof(*paddr)); + if ( !*paddr ) + { + if ( !silent ) + printk(KERN_WARNING "Invalid physical address in GAR\n"); + return -EINVAL; + } + + if ( (width != 8) && (width != 16) && (width != 32) && (width != 64) ) + { + if ( !silent ) + printk(KERN_WARNING "Invalid bit width in GAR\n"); + return -EINVAL; + } + + if ( space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && + space_id != ACPI_ADR_SPACE_SYSTEM_IO ) + { + if ( !silent ) + printk(KERN_WARNING "Invalid address space type in GAR\n"); + return -EINVAL; + } + + return 0; } /* Pre-map, working on GAR */ int __init apei_pre_map_gar(struct acpi_generic_address *reg) { - u64 paddr; - void __iomem *vaddr; - int rc; + u64 paddr; + void __iomem *vaddr; + int rc; - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) - return 0; + if ( reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY ) + return 0; - rc = apei_check_gar(reg, &paddr, 0); - if (rc) - return rc; + rc = apei_check_gar(reg, &paddr, 0); + if ( rc ) + return rc; - vaddr = apei_pre_map(paddr, reg->bit_width / 8); - if (!vaddr) - return -EIO; + vaddr = apei_pre_map(paddr, reg->bit_width / 8); + if ( !vaddr ) + return -EIO; - return 0; + return 0; } /* Post-unmap, working on GAR */ int __init apei_post_unmap_gar(struct acpi_generic_address *reg) { - u64 paddr; - int rc; + u64 paddr; + int rc; - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) - return 0; + if ( reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY ) + return 0; - rc = apei_check_gar(reg, &paddr, 0); - if (rc) - return rc; + rc = apei_check_gar(reg, &paddr, 0); + if ( rc ) + return rc; - apei_post_unmap(paddr, reg->bit_width / 8); + apei_post_unmap(paddr, reg->bit_width / 8); - return 0; + return 0; } static int apei_read_mem(u64 paddr, u64 *val, u32 width) { - void __iomem *addr; - u64 tmpval; - - addr = __apei_ioremap_fast(paddr, width); - switch (width) { - case 8: - *val = readb(addr); - break; - case 16: - *val = readw(addr); - break; - case 32: - *val = readl(addr); - break; - case 64: - tmpval = (u64)readl(addr); - tmpval |= ((u64)readl(addr+4)) << 32; - *val = tmpval; - break; - default: - return -EINVAL; - } - - return 0; + void __iomem *addr; + u64 tmpval; + + addr = __apei_ioremap_fast(paddr, width); + switch (width) + { + case 8: + *val = readb(addr); + break; + case 16: + *val = readw(addr); + break; + case 32: + *val = readl(addr); + break; + case 64: + tmpval = (u64)readl(addr); + tmpval |= ((u64)readl(addr + 4)) << 32; + *val = tmpval; + break; + default: + return -EINVAL; + } + + return 0; } static int apei_write_mem(u64 paddr, u64 val, u32 width) { - void __iomem *addr; - u32 tmpval; - - addr = __apei_ioremap_fast(paddr, width); - switch (width) { - case 8: - writeb(val, addr); - break; - case 16: - writew(val, addr); - break; - case 32: - writel(val, addr); - break; - case 64: - tmpval = (u32)val; - writel(tmpval, addr); - tmpval = (u32)(val >> 32); - writel(tmpval, addr+4); - break; - default: - return -EINVAL; - } - - return 0; + void __iomem *addr; + u32 tmpval; + + addr = __apei_ioremap_fast(paddr, width); + switch (width) + { + case 8: + writeb(val, addr); + break; + case 16: + writew(val, addr); + break; + case 32: + writel(val, addr); + break; + case 64: + tmpval = (u32)val; + writel(tmpval, addr); + tmpval = (u32)(val >> 32); + writel(tmpval, addr + 4); + break; + default: + return -EINVAL; + } + + return 0; } int apei_read(u64 *val, struct acpi_generic_address *reg) { - u64 paddr; - int rc; - - rc = apei_check_gar(reg, &paddr, 1); - if (rc) - return rc; - - *val = 0; - - /* currently all erst implementation take bit_width as real range */ - switch (reg->space_id) { - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - return apei_read_mem(paddr, val, reg->bit_width); - case ACPI_ADR_SPACE_SYSTEM_IO: - return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width); - default: - return -EINVAL; - } + u64 paddr; + int rc; + + rc = apei_check_gar(reg, &paddr, 1); + if ( rc ) + return rc; + + *val = 0; + + /* currently all erst implementation take bit_width as real range */ + switch (reg->space_id) + { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + return apei_read_mem(paddr, val, reg->bit_width); + case ACPI_ADR_SPACE_SYSTEM_IO: + return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width); + default: + return -EINVAL; + } } int apei_write(u64 val, struct acpi_generic_address *reg) { - u64 paddr; - int rc; - - rc = apei_check_gar(reg, &paddr, 1); - if (rc) - return rc; - - switch (reg->space_id) { - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - return apei_write_mem(paddr, val, reg->bit_width); - case ACPI_ADR_SPACE_SYSTEM_IO: - return acpi_os_write_port(paddr, val, reg->bit_width); - default: - return -EINVAL; - } + u64 paddr; + int rc; + + rc = apei_check_gar(reg, &paddr, 1); + if ( rc ) + return rc; + + switch (reg->space_id) + { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + return apei_write_mem(paddr, val, reg->bit_width); + case ACPI_ADR_SPACE_SYSTEM_IO: + return acpi_os_write_port(paddr, val, reg->bit_width); + default: + return -EINVAL; + } } diff --git a/xen/drivers/acpi/apei/erst.c b/xen/drivers/acpi/apei/erst.c index 3a2e403173..65561d2d41 100644 --- a/xen/drivers/acpi/apei/erst.c +++ b/xen/drivers/acpi/apei/erst.c @@ -40,39 +40,40 @@ #include "apei-internal.h" /* ERST command status */ -#define ERST_STATUS_SUCCESS 0x0 -#define ERST_STATUS_NOT_ENOUGH_SPACE 0x1 -#define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2 -#define ERST_STATUS_FAILED 0x3 -#define ERST_STATUS_RECORD_STORE_EMPTY 0x4 -#define ERST_STATUS_RECORD_NOT_FOUND 0x5 - -#define ERST_TAB_ENTRY(tab) \ - ((struct acpi_whea_header *)((char *)(tab) + \ - sizeof(struct acpi_table_erst))) - -#define SPIN_UNIT 1 /* 1us */ +#define ERST_STATUS_SUCCESS 0x0 +#define ERST_STATUS_NOT_ENOUGH_SPACE 0x1 +#define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2 +#define ERST_STATUS_FAILED 0x3 +#define ERST_STATUS_RECORD_STORE_EMPTY 0x4 +#define ERST_STATUS_RECORD_NOT_FOUND 0x5 + +#define ERST_TAB_ENTRY(tab) \ + ((struct acpi_whea_header *)((char *)(tab) + \ + sizeof(struct acpi_table_erst))) + +#define SPIN_UNIT 1 /* 1us */ /* Firmware should respond within 1 miliseconds */ -#define FIRMWARE_TIMEOUT (1 * 1000) -#define FIRMWARE_MAX_STALL 50 /* 50us */ +#define FIRMWARE_TIMEOUT (1 * 1000) +#define FIRMWARE_MAX_STALL 50 /* 50us */ static struct acpi_table_erst *__read_mostly erst_tab; static bool_t __read_mostly erst_enabled; /* ERST Error Log Address Range atrributes */ -#define ERST_RANGE_RESERVED 0x0001 -#define ERST_RANGE_NVRAM 0x0002 -#define ERST_RANGE_SLOW 0x0004 +#define ERST_RANGE_RESERVED 0x0001 +#define ERST_RANGE_NVRAM 0x0002 +#define ERST_RANGE_SLOW 0x0004 /* * ERST Error Log Address Range, used as buffer for reading/writing * error records. */ -static struct erst_erange { - u64 base; - u64 size; - void __iomem *vaddr; - u32 attr; +static struct erst_erange +{ + u64 base; + u64 size; + void __iomem *vaddr; + u32 attr; } erst_erange; /* @@ -87,345 +88,367 @@ static DEFINE_SPINLOCK(erst_lock); static inline int erst_errno(int command_status) { - switch (command_status) { - case ERST_STATUS_SUCCESS: - return 0; - case ERST_STATUS_HARDWARE_NOT_AVAILABLE: - return -ENODEV; - case ERST_STATUS_NOT_ENOUGH_SPACE: - return -ENOSPC; - case ERST_STATUS_RECORD_STORE_EMPTY: - case ERST_STATUS_RECORD_NOT_FOUND: - return -ENOENT; - default: - return -EINVAL; - } + switch (command_status) + { + case ERST_STATUS_SUCCESS: + return 0; + case ERST_STATUS_HARDWARE_NOT_AVAILABLE: + return -ENODEV; + case ERST_STATUS_NOT_ENOUGH_SPACE: + return -ENOSPC; + case ERST_STATUS_RECORD_STORE_EMPTY: + case ERST_STATUS_RECORD_NOT_FOUND: + return -ENOENT; + default: + return -EINVAL; + } } static int erst_timedout(u64 *t, u64 spin_unit) { - if ((s64)*t < spin_unit) { - printk(XENLOG_WARNING "Firmware does not respond in time\n"); - return 1; - } - *t -= spin_unit; - udelay(spin_unit); - return 0; + if ( (s64)*t < spin_unit ) + { + printk(XENLOG_WARNING "Firmware does not respond in time\n"); + return 1; + } + *t -= spin_unit; + udelay(spin_unit); + return 0; } static int erst_exec_load_var1(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - return __apei_exec_read_register(entry, &ctx->var1); + return __apei_exec_read_register(entry, &ctx->var1); } static int erst_exec_load_var2(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - return __apei_exec_read_register(entry, &ctx->var2); + return __apei_exec_read_register(entry, &ctx->var2); } static int erst_exec_store_var1(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - return __apei_exec_write_register(entry, ctx->var1); + return __apei_exec_write_register(entry, ctx->var1); } static int erst_exec_add(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - ctx->var1 += ctx->var2; - return 0; + ctx->var1 += ctx->var2; + return 0; } static int erst_exec_subtract(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - ctx->var1 -= ctx->var2; - return 0; + ctx->var1 -= ctx->var2; + return 0; } static int erst_exec_add_value(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - int rc; - u64 val; + int rc; + u64 val; - rc = __apei_exec_read_register(entry, &val); - if (rc) - return rc; - val += ctx->value; - rc = __apei_exec_write_register(entry, val); - return rc; + rc = __apei_exec_read_register(entry, &val); + if ( rc ) + return rc; + val += ctx->value; + rc = __apei_exec_write_register(entry, val); + return rc; } static int erst_exec_subtract_value(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - int rc; - u64 val; + int rc; + u64 val; - rc = __apei_exec_read_register(entry, &val); - if (rc) - return rc; - val -= ctx->value; - rc = __apei_exec_write_register(entry, val); - return rc; + rc = __apei_exec_read_register(entry, &val); + if ( rc ) + return rc; + val -= ctx->value; + rc = __apei_exec_write_register(entry, val); + return rc; } static int erst_exec_stall(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - udelay((ctx->var1 > FIRMWARE_MAX_STALL) ? - FIRMWARE_MAX_STALL : - ctx->var1); - return 0; + udelay((ctx->var1 > FIRMWARE_MAX_STALL) ? FIRMWARE_MAX_STALL : ctx->var1); + return 0; } static int erst_exec_stall_while_true(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) -{ - int rc; - u64 val; - u64 timeout = FIRMWARE_TIMEOUT; - u64 stall_time = (ctx->var1 > FIRMWARE_MAX_STALL) ? - FIRMWARE_MAX_STALL : - ctx->var1; - - for (;;) { - rc = __apei_exec_read_register(entry, &val); - if (rc) - return rc; - if (val != ctx->value) - break; - if (erst_timedout(&timeout, stall_time)) - return -EIO; - } - return 0; -} - -static int erst_exec_skip_next_instruction_if_true( - struct apei_exec_context *ctx, - struct acpi_whea_header *entry) -{ - int rc; - u64 val; - - rc = __apei_exec_read_register(entry, &val); - if (rc) - return rc; - if (val == ctx->value) { - ctx->ip += 2; - return APEI_EXEC_SET_IP; - } - - return 0; + struct acpi_whea_header *entry) +{ + int rc; + u64 val; + u64 timeout = FIRMWARE_TIMEOUT; + u64 stall_time = + (ctx->var1 > FIRMWARE_MAX_STALL) ? FIRMWARE_MAX_STALL : ctx->var1; + + for ( ;; ) + { + rc = __apei_exec_read_register(entry, &val); + if ( rc ) + return rc; + if ( val != ctx->value ) + break; + if ( erst_timedout(&timeout, stall_time) ) + return -EIO; + } + return 0; +} + +static int +erst_exec_skip_next_instruction_if_true(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + u64 val; + + rc = __apei_exec_read_register(entry, &val); + if ( rc ) + return rc; + if ( val == ctx->value ) + { + ctx->ip += 2; + return APEI_EXEC_SET_IP; + } + + return 0; } static int erst_exec_goto(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - ctx->ip = ctx->value; - return APEI_EXEC_SET_IP; + ctx->ip = ctx->value; + return APEI_EXEC_SET_IP; } static int erst_exec_set_src_address_base(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - return __apei_exec_read_register(entry, &ctx->src_base); + return __apei_exec_read_register(entry, &ctx->src_base); } static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - return __apei_exec_read_register(entry, &ctx->dst_base); + return __apei_exec_read_register(entry, &ctx->dst_base); } static int erst_exec_move_data(struct apei_exec_context *ctx, - struct acpi_whea_header *entry) + struct acpi_whea_header *entry) { - int rc; - u64 offset; - void *src, *dst; + int rc; + u64 offset; + void *src, *dst; - /* ioremap does not work in interrupt context */ - if (in_irq()) { - printk(KERN_WARNING - "MOVE_DATA cannot be used in interrupt context\n"); - return -EBUSY; - } + /* ioremap does not work in interrupt context */ + if ( in_irq() ) + { + printk(KERN_WARNING "MOVE_DATA cannot be used in interrupt context\n"); + return -EBUSY; + } - rc = __apei_exec_read_register(entry, &offset); - if (rc) - return rc; + rc = __apei_exec_read_register(entry, &offset); + if ( rc ) + return rc; - src = ioremap(ctx->src_base + offset, ctx->var2); - if (!src) - return -ENOMEM; + src = ioremap(ctx->src_base + offset, ctx->var2); + if ( !src ) + return -ENOMEM; - dst = ioremap(ctx->dst_base + offset, ctx->var2); - if (dst) { - memmove(dst, src, ctx->var2); - iounmap(dst); - } else - rc = -ENOMEM; + dst = ioremap(ctx->dst_base + offset, ctx->var2); + if ( dst ) + { + memmove(dst, src, ctx->var2); + iounmap(dst); + } + else + rc = -ENOMEM; - iounmap(src); + iounmap(src); - return rc; + return rc; } static struct apei_exec_ins_type erst_ins_type[] = { - [ACPI_ERST_READ_REGISTER] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = apei_exec_read_register, - }, - [ACPI_ERST_READ_REGISTER_VALUE] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = apei_exec_read_register_value, - }, - [ACPI_ERST_WRITE_REGISTER] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = apei_exec_write_register, - }, - [ACPI_ERST_WRITE_REGISTER_VALUE] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = apei_exec_write_register_value, - }, - [ACPI_ERST_NOOP] = { - .flags = 0, - .run = apei_exec_noop, - }, - [ACPI_ERST_LOAD_VAR1] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = erst_exec_load_var1, - }, - [ACPI_ERST_LOAD_VAR2] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = erst_exec_load_var2, - }, - [ACPI_ERST_STORE_VAR1] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = erst_exec_store_var1, - }, - [ACPI_ERST_ADD] = { - .flags = 0, - .run = erst_exec_add, - }, - [ACPI_ERST_SUBTRACT] = { - .flags = 0, - .run = erst_exec_subtract, - }, - [ACPI_ERST_ADD_VALUE] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = erst_exec_add_value, - }, - [ACPI_ERST_SUBTRACT_VALUE] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = erst_exec_subtract_value, - }, - [ACPI_ERST_STALL] = { - .flags = 0, - .run = erst_exec_stall, - }, - [ACPI_ERST_STALL_WHILE_TRUE] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = erst_exec_stall_while_true, - }, - [ACPI_ERST_SKIP_NEXT_IF_TRUE] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = erst_exec_skip_next_instruction_if_true, - }, - [ACPI_ERST_GOTO] = { - .flags = 0, - .run = erst_exec_goto, - }, - [ACPI_ERST_SET_SRC_ADDRESS_BASE] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = erst_exec_set_src_address_base, - }, - [ACPI_ERST_SET_DST_ADDRESS_BASE] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = erst_exec_set_dst_address_base, - }, - [ACPI_ERST_MOVE_DATA] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = erst_exec_move_data, - }, + [ACPI_ERST_READ_REGISTER] = + { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_read_register, + }, + [ACPI_ERST_READ_REGISTER_VALUE] = + { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_read_register_value, + }, + [ACPI_ERST_WRITE_REGISTER] = + { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_write_register, + }, + [ACPI_ERST_WRITE_REGISTER_VALUE] = + { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_write_register_value, + }, + [ACPI_ERST_NOOP] = + { + .flags = 0, + .run = apei_exec_noop, + }, + [ACPI_ERST_LOAD_VAR1] = + { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_load_var1, + }, + [ACPI_ERST_LOAD_VAR2] = + { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_load_var2, + }, + [ACPI_ERST_STORE_VAR1] = + { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_store_var1, + }, + [ACPI_ERST_ADD] = + { + .flags = 0, + .run = erst_exec_add, + }, + [ACPI_ERST_SUBTRACT] = + { + .flags = 0, + .run = erst_exec_subtract, + }, + [ACPI_ERST_ADD_VALUE] = + { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_add_value, + }, + [ACPI_ERST_SUBTRACT_VALUE] = + { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_subtract_value, + }, + [ACPI_ERST_STALL] = + { + .flags = 0, + .run = erst_exec_stall, + }, + [ACPI_ERST_STALL_WHILE_TRUE] = + { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_stall_while_true, + }, + [ACPI_ERST_SKIP_NEXT_IF_TRUE] = + { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_skip_next_instruction_if_true, + }, + [ACPI_ERST_GOTO] = + { + .flags = 0, + .run = erst_exec_goto, + }, + [ACPI_ERST_SET_SRC_ADDRESS_BASE] = + { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_set_src_address_base, + }, + [ACPI_ERST_SET_DST_ADDRESS_BASE] = + { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_set_dst_address_base, + }, + [ACPI_ERST_MOVE_DATA] = + { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_move_data, + }, }; static inline void erst_exec_ctx_init(struct apei_exec_context *ctx) { - apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type), - ERST_TAB_ENTRY(erst_tab), erst_tab->entries); + apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type), + ERST_TAB_ENTRY(erst_tab), erst_tab->entries); } static int erst_get_erange(struct erst_erange *range) { - struct apei_exec_context ctx; - int rc; + struct apei_exec_context ctx; + int rc; - erst_exec_ctx_init(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE); - if (rc) - return rc; - range->base = apei_exec_ctx_get_output(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH); - if (rc) - return rc; - range->size = apei_exec_ctx_get_output(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES); - if (rc) - return rc; - range->attr = apei_exec_ctx_get_output(&ctx); + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE); + if ( rc ) + return rc; + range->base = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH); + if ( rc ) + return rc; + range->size = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES); + if ( rc ) + return rc; + range->attr = apei_exec_ctx_get_output(&ctx); - return 0; + return 0; } #ifndef NDEBUG /* currently dead code */ static ssize_t __erst_get_record_count(void) { - struct apei_exec_context ctx; - int rc; - u64 output; - ssize_t count; + struct apei_exec_context ctx; + int rc; + u64 output; + ssize_t count; - erst_exec_ctx_init(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT); - if (rc) - return rc; - count = output = apei_exec_ctx_get_output(&ctx); - return count >= 0 && count == output ? count : -ERANGE; + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT); + if ( rc ) + return rc; + count = output = apei_exec_ctx_get_output(&ctx); + return count >= 0 && count == output ? count : -ERANGE; } ssize_t erst_get_record_count(void) { - ssize_t count; - unsigned long flags; + ssize_t count; + unsigned long flags; - if (!erst_enabled) - return -ENODEV; + if ( !erst_enabled ) + return -ENODEV; - spin_lock_irqsave(&erst_lock, flags); - count = __erst_get_record_count(); - spin_unlock_irqrestore(&erst_lock, flags); + spin_lock_irqsave(&erst_lock, flags); + count = __erst_get_record_count(); + spin_unlock_irqrestore(&erst_lock, flags); - return count; + return count; } static int __erst_get_next_record_id(u64 *record_id) { - struct apei_exec_context ctx; - int rc; + struct apei_exec_context ctx; + int rc; - erst_exec_ctx_init(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID); - if (rc) - return rc; - *record_id = apei_exec_ctx_get_output(&ctx); + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID); + if ( rc ) + return rc; + *record_id = apei_exec_ctx_get_output(&ctx); - return 0; + return 0; } /* @@ -435,142 +458,145 @@ static int __erst_get_next_record_id(u64 *record_id) */ int erst_get_next_record_id(u64 *record_id) { - int rc; - unsigned long flags; + int rc; + unsigned long flags; - if (!erst_enabled) - return -ENODEV; + if ( !erst_enabled ) + return -ENODEV; - spin_lock_irqsave(&erst_lock, flags); - rc = __erst_get_next_record_id(record_id); - spin_unlock_irqrestore(&erst_lock, flags); + spin_lock_irqsave(&erst_lock, flags); + rc = __erst_get_next_record_id(record_id); + spin_unlock_irqrestore(&erst_lock, flags); - return rc; + return rc; } #endif /* currently dead code */ static int __erst_write_to_storage(u64 offset) { - struct apei_exec_context ctx; - u64 timeout = FIRMWARE_TIMEOUT; - u64 val; - int rc; - - erst_exec_ctx_init(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE); - if (rc) - return rc; - apei_exec_ctx_set_input(&ctx, offset); - rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET); - if (rc) - return rc; - rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); - if (rc) - return rc; - for (;;) { - rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); - if (rc) - return rc; - val = apei_exec_ctx_get_output(&ctx); - if (!val) - break; - if (erst_timedout(&timeout, SPIN_UNIT)) - return -EIO; - } - rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); - if (rc) - return rc; - val = apei_exec_ctx_get_output(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_END); - if (rc) - return rc; - - return erst_errno(val); + struct apei_exec_context ctx; + u64 timeout = FIRMWARE_TIMEOUT; + u64 val; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE); + if ( rc ) + return rc; + apei_exec_ctx_set_input(&ctx, offset); + rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET); + if ( rc ) + return rc; + rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); + if ( rc ) + return rc; + for ( ;; ) + { + rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); + if ( rc ) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if ( !val ) + break; + if ( erst_timedout(&timeout, SPIN_UNIT) ) + return -EIO; + } + rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); + if ( rc ) + return rc; + val = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_END); + if ( rc ) + return rc; + + return erst_errno(val); } #ifndef NDEBUG /* currently dead code */ static int __erst_read_from_storage(u64 record_id, u64 offset) { - struct apei_exec_context ctx; - u64 timeout = FIRMWARE_TIMEOUT; - u64 val; - int rc; - - erst_exec_ctx_init(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ); - if (rc) - return rc; - apei_exec_ctx_set_input(&ctx, offset); - rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET); - if (rc) - return rc; - apei_exec_ctx_set_input(&ctx, record_id); - rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID); - if (rc) - return rc; - rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); - if (rc) - return rc; - for (;;) { - rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); - if (rc) - return rc; - val = apei_exec_ctx_get_output(&ctx); - if (!val) - break; - if (erst_timedout(&timeout, SPIN_UNIT)) - return -EIO; - }; - rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); - if (rc) - return rc; - val = apei_exec_ctx_get_output(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_END); - if (rc) - return rc; - - return erst_errno(val); + struct apei_exec_context ctx; + u64 timeout = FIRMWARE_TIMEOUT; + u64 val; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ); + if ( rc ) + return rc; + apei_exec_ctx_set_input(&ctx, offset); + rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET); + if ( rc ) + return rc; + apei_exec_ctx_set_input(&ctx, record_id); + rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID); + if ( rc ) + return rc; + rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); + if ( rc ) + return rc; + for ( ;; ) + { + rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); + if ( rc ) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if ( !val ) + break; + if ( erst_timedout(&timeout, SPIN_UNIT) ) + return -EIO; + }; + rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); + if ( rc ) + return rc; + val = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_END); + if ( rc ) + return rc; + + return erst_errno(val); } static int __erst_clear_from_storage(u64 record_id) { - struct apei_exec_context ctx; - u64 timeout = FIRMWARE_TIMEOUT; - u64 val; - int rc; - - erst_exec_ctx_init(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR); - if (rc) - return rc; - apei_exec_ctx_set_input(&ctx, record_id); - rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID); - if (rc) - return rc; - rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); - if (rc) - return rc; - for (;;) { - rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); - if (rc) - return rc; - val = apei_exec_ctx_get_output(&ctx); - if (!val) - break; - if (erst_timedout(&timeout, SPIN_UNIT)) - return -EIO; - } - rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); - if (rc) - return rc; - val = apei_exec_ctx_get_output(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_END); - if (rc) - return rc; - - return erst_errno(val); + struct apei_exec_context ctx; + u64 timeout = FIRMWARE_TIMEOUT; + u64 val; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR); + if ( rc ) + return rc; + apei_exec_ctx_set_input(&ctx, record_id); + rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID); + if ( rc ) + return rc; + rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); + if ( rc ) + return rc; + for ( ;; ) + { + rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); + if ( rc ) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if ( !val ) + break; + if ( erst_timedout(&timeout, SPIN_UNIT) ) + return -EIO; + } + rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); + if ( rc ) + return rc; + val = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_END); + if ( rc ) + return rc; + + return erst_errno(val); } #endif /* currently dead code */ @@ -578,105 +604,105 @@ static int __erst_clear_from_storage(u64 record_id) /* NVRAM ERST Error Log Address Range is not supported yet */ static int __erst_write_to_nvram(const struct cper_record_header *record) { - /* do not print message, because printk is not safe for NMI */ - return -ENOSYS; + /* do not print message, because printk is not safe for NMI */ + return -ENOSYS; } #ifndef NDEBUG /* currently dead code */ static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset) { - printk(KERN_WARNING - "NVRAM ERST Log Address Range is not implemented yet\n"); - return -ENOSYS; + printk(KERN_WARNING + "NVRAM ERST Log Address Range is not implemented yet\n"); + return -ENOSYS; } static int __erst_clear_from_nvram(u64 record_id) { - printk(KERN_WARNING - "NVRAM ERST Log Address Range is not implemented yet\n"); - return -ENOSYS; + printk(KERN_WARNING + "NVRAM ERST Log Address Range is not implemented yet\n"); + return -ENOSYS; } #endif /* currently dead code */ int erst_write(const struct cper_record_header *record) { - int rc; - unsigned long flags; - struct cper_record_header *rcd_erange; + int rc; + unsigned long flags; + struct cper_record_header *rcd_erange; - if (!record) - return -EINVAL; + if ( !record ) + return -EINVAL; - if (!erst_enabled) - return -ENODEV; + if ( !erst_enabled ) + return -ENODEV; - if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE)) - return -EINVAL; + if ( memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE) ) + return -EINVAL; - if (erst_erange.attr & ERST_RANGE_NVRAM) { - if (!spin_trylock_irqsave(&erst_lock, flags)) - return -EBUSY; - rc = __erst_write_to_nvram(record); - spin_unlock_irqrestore(&erst_lock, flags); - return rc; - } + if ( erst_erange.attr & ERST_RANGE_NVRAM ) + { + if ( !spin_trylock_irqsave(&erst_lock, flags) ) + return -EBUSY; + rc = __erst_write_to_nvram(record); + spin_unlock_irqrestore(&erst_lock, flags); + return rc; + } - if (record->record_length > erst_erange.size) - return -EINVAL; + if ( record->record_length > erst_erange.size ) + return -EINVAL; - if (!spin_trylock_irqsave(&erst_lock, flags)) - return -EBUSY; - memcpy(erst_erange.vaddr, record, record->record_length); - rcd_erange = erst_erange.vaddr; - /* signature for serialization system */ - memcpy(&rcd_erange->persistence_information, "ER", 2); + if ( !spin_trylock_irqsave(&erst_lock, flags) ) + return -EBUSY; + memcpy(erst_erange.vaddr, record, record->record_length); + rcd_erange = erst_erange.vaddr; + /* signature for serialization system */ + memcpy(&rcd_erange->persistence_information, "ER", 2); - rc = __erst_write_to_storage(0); - spin_unlock_irqrestore(&erst_lock, flags); + rc = __erst_write_to_storage(0); + spin_unlock_irqrestore(&erst_lock, flags); - return rc; + return rc; } #ifndef NDEBUG /* currently dead code */ static int __erst_read_to_erange(u64 record_id, u64 *offset) { - int rc; + int rc; - if (erst_erange.attr & ERST_RANGE_NVRAM) - return __erst_read_to_erange_from_nvram( - record_id, offset); + if ( erst_erange.attr & ERST_RANGE_NVRAM ) + return __erst_read_to_erange_from_nvram(record_id, offset); - rc = __erst_read_from_storage(record_id, 0); - if (rc) - return rc; - *offset = 0; + rc = __erst_read_from_storage(record_id, 0); + if ( rc ) + return rc; + *offset = 0; - return 0; + return 0; } static ssize_t __erst_read(u64 record_id, struct cper_record_header *record, - size_t buflen) + size_t buflen) { - int rc; - u64 offset; - ssize_t len; - struct cper_record_header *rcd_tmp; + int rc; + u64 offset; + ssize_t len; + struct cper_record_header *rcd_tmp; - rc = __erst_read_to_erange(record_id, &offset); - if (rc) - return rc; - rcd_tmp = erst_erange.vaddr + offset; - if (rcd_tmp->record_length > buflen) - return -ENOBUFS; - len = rcd_tmp->record_length; - if (len < 0) - return -ERANGE; - memcpy(record, rcd_tmp, len); + rc = __erst_read_to_erange(record_id, &offset); + if ( rc ) + return rc; + rcd_tmp = erst_erange.vaddr + offset; + if ( rcd_tmp->record_length > buflen ) + return -ENOBUFS; + len = rcd_tmp->record_length; + if ( len < 0 ) + return -ERANGE; + memcpy(record, rcd_tmp, len); - return len; + return len; } /* @@ -685,18 +711,18 @@ static ssize_t __erst_read(u64 record_id, struct cper_record_header *record, * else everything is OK, and return value is record length */ ssize_t erst_read(u64 record_id, struct cper_record_header *record, - size_t buflen) + size_t buflen) { - ssize_t len; - unsigned long flags; + ssize_t len; + unsigned long flags; - if (!erst_enabled) - return -ENODEV; + if ( !erst_enabled ) + return -ENODEV; - spin_lock_irqsave(&erst_lock, flags); - len = __erst_read(record_id, record, buflen); - spin_unlock_irqrestore(&erst_lock, flags); - return len; + spin_lock_irqsave(&erst_lock, flags); + len = __erst_read(record_id, record, buflen); + spin_unlock_irqrestore(&erst_lock, flags); + return len; } /* @@ -707,138 +733,143 @@ ssize_t erst_read(u64 record_id, struct cper_record_header *record, */ ssize_t erst_read_next(struct cper_record_header *record, size_t buflen) { - int rc; - ssize_t len; - unsigned long flags; - u64 record_id; + int rc; + ssize_t len; + unsigned long flags; + u64 record_id; - if (!erst_enabled) - return -ENODEV; + if ( !erst_enabled ) + return -ENODEV; - spin_lock_irqsave(&erst_lock, flags); - rc = __erst_get_next_record_id(&record_id); - if (rc) { - spin_unlock_irqrestore(&erst_lock, flags); - return rc; - } - /* no more record */ - if (record_id == APEI_ERST_INVALID_RECORD_ID) { - spin_unlock_irqrestore(&erst_lock, flags); - return 0; - } + spin_lock_irqsave(&erst_lock, flags); + rc = __erst_get_next_record_id(&record_id); + if ( rc ) + { + spin_unlock_irqrestore(&erst_lock, flags); + return rc; + } + /* no more record */ + if ( record_id == APEI_ERST_INVALID_RECORD_ID ) + { + spin_unlock_irqrestore(&erst_lock, flags); + return 0; + } - len = __erst_read(record_id, record, buflen); - spin_unlock_irqrestore(&erst_lock, flags); + len = __erst_read(record_id, record, buflen); + spin_unlock_irqrestore(&erst_lock, flags); - return len; + return len; } int erst_clear(u64 record_id) { - int rc; - unsigned long flags; + int rc; + unsigned long flags; - if (!erst_enabled) - return -ENODEV; + if ( !erst_enabled ) + return -ENODEV; - spin_lock_irqsave(&erst_lock, flags); - if (erst_erange.attr & ERST_RANGE_NVRAM) - rc = __erst_clear_from_nvram(record_id); - else - rc = __erst_clear_from_storage(record_id); - spin_unlock_irqrestore(&erst_lock, flags); + spin_lock_irqsave(&erst_lock, flags); + if ( erst_erange.attr & ERST_RANGE_NVRAM ) + rc = __erst_clear_from_nvram(record_id); + else + rc = __erst_clear_from_storage(record_id); + spin_unlock_irqrestore(&erst_lock, flags); - return rc; + return rc; } #endif /* currently dead code */ static int __init erst_check_table(struct acpi_table_erst *erst_tab) { - if (erst_tab->header.length < sizeof(*erst_tab)) - return -EINVAL; + if ( erst_tab->header.length < sizeof(*erst_tab) ) + return -EINVAL; - switch (erst_tab->header_length) { - case sizeof(*erst_tab) - sizeof(erst_tab->header): - /* - * While invalid per specification, there are (early?) systems - * indicating the full header size here, so accept that value too. - */ - case sizeof(*erst_tab): - break; - default: - return -EINVAL; - } + switch (erst_tab->header_length) + { + case sizeof(*erst_tab) - sizeof(erst_tab->header): + /* + * While invalid per specification, there are (early?) systems + * indicating the full header size here, so accept that value too. + */ + case sizeof(*erst_tab): + break; + default: + return -EINVAL; + } - if (erst_tab->entries != - (erst_tab->header.length - sizeof(*erst_tab)) / - sizeof(struct acpi_erst_entry)) - return -EINVAL; + if ( erst_tab->entries != (erst_tab->header.length - sizeof(*erst_tab)) / + sizeof(struct acpi_erst_entry) ) + return -EINVAL; - return 0; + return 0; } int __init erst_init(void) { - int rc = 0; - acpi_status status; - acpi_physical_address erst_addr; - acpi_native_uint erst_len; - struct apei_exec_context ctx; - - if (acpi_disabled) - return -ENODEV; - - status = acpi_get_table_phys(ACPI_SIG_ERST, 0, &erst_addr, &erst_len); - if (status == AE_NOT_FOUND) { - printk(KERN_INFO "ERST table was not found\n"); - return -ENODEV; - } - if (ACPI_FAILURE(status)) { - const char *msg = acpi_format_exception(status); - printk(KERN_WARNING "Failed to get ERST table: %s\n", msg); - return -EINVAL; - } - map_pages_to_xen((unsigned long)__va(erst_addr), maddr_to_mfn(erst_addr), - PFN_UP(erst_addr + erst_len) - PFN_DOWN(erst_addr), - PAGE_HYPERVISOR); - erst_tab = __va(erst_addr); - - rc = erst_check_table(erst_tab); - if (rc) { - printk(KERN_ERR "ERST table is invalid\n"); - return rc; - } - - erst_exec_ctx_init(&ctx); - rc = apei_exec_pre_map_gars(&ctx); - if (rc) - return rc; - - rc = erst_get_erange(&erst_erange); - if (rc) { - if (rc == -ENODEV) - printk(KERN_INFO - "The corresponding hardware device or firmware " - "implementation is not available.\n"); - else - printk(KERN_ERR - "Failed to get Error Log Address Range.\n"); - goto err_unmap_reg; - } - - erst_erange.vaddr = apei_pre_map(erst_erange.base, erst_erange.size); - if (!erst_erange.vaddr) { - rc = -ENOMEM; - goto err_unmap_reg; - } - - printk(KERN_INFO "Xen ERST support is initialized.\n"); - erst_enabled = 1; - - return 0; + int rc = 0; + acpi_status status; + acpi_physical_address erst_addr; + acpi_native_uint erst_len; + struct apei_exec_context ctx; + + if ( acpi_disabled ) + return -ENODEV; + + status = acpi_get_table_phys(ACPI_SIG_ERST, 0, &erst_addr, &erst_len); + if ( status == AE_NOT_FOUND ) + { + printk(KERN_INFO "ERST table was not found\n"); + return -ENODEV; + } + if ( ACPI_FAILURE(status) ) + { + const char *msg = acpi_format_exception(status); + printk(KERN_WARNING "Failed to get ERST table: %s\n", msg); + return -EINVAL; + } + map_pages_to_xen((unsigned long)__va(erst_addr), maddr_to_mfn(erst_addr), + PFN_UP(erst_addr + erst_len) - PFN_DOWN(erst_addr), + PAGE_HYPERVISOR); + erst_tab = __va(erst_addr); + + rc = erst_check_table(erst_tab); + if ( rc ) + { + printk(KERN_ERR "ERST table is invalid\n"); + return rc; + } + + erst_exec_ctx_init(&ctx); + rc = apei_exec_pre_map_gars(&ctx); + if ( rc ) + return rc; + + rc = erst_get_erange(&erst_erange); + if ( rc ) + { + if ( rc == -ENODEV ) + printk(KERN_INFO "The corresponding hardware device or firmware " + "implementation is not available.\n"); + else + printk(KERN_ERR "Failed to get Error Log Address Range.\n"); + goto err_unmap_reg; + } + + erst_erange.vaddr = apei_pre_map(erst_erange.base, erst_erange.size); + if ( !erst_erange.vaddr ) + { + rc = -ENOMEM; + goto err_unmap_reg; + } + + printk(KERN_INFO "Xen ERST support is initialized.\n"); + erst_enabled = 1; + + return 0; err_unmap_reg: - apei_exec_post_unmap_gars(&ctx); - return rc; + apei_exec_post_unmap_gars(&ctx); + return rc; } diff --git a/xen/drivers/acpi/apei/hest.c b/xen/drivers/acpi/apei/hest.c index 70734ab0e2..813635fa4c 100644 --- a/xen/drivers/acpi/apei/hest.c +++ b/xen/drivers/acpi/apei/hest.c @@ -46,81 +46,83 @@ boolean_param("hest_disable", hest_disable); static struct acpi_table_hest *__read_mostly hest_tab; static const int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { - [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */ - [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1, - [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi), - [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root), - [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer), - [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge), - [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic), + [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */ + [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1, + [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi), + [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root), + [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer), + [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge), + [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic), }; static int hest_esrc_len(const struct acpi_hest_header *hest_hdr) { - u16 hest_type = hest_hdr->type; - int len; - - if (hest_type >= ACPI_HEST_TYPE_RESERVED) - return 0; - - len = hest_esrc_len_tab[hest_type]; - - if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) { - const struct acpi_hest_ia_corrected *cmc = - container_of(hest_hdr, - const struct acpi_hest_ia_corrected, - header); - - len = sizeof(*cmc) + cmc->num_hardware_banks * - sizeof(struct acpi_hest_ia_error_bank); - } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) { - const struct acpi_hest_ia_machine_check *mc = - container_of(hest_hdr, - const struct acpi_hest_ia_machine_check, - header); - - len = sizeof(*mc) + mc->num_hardware_banks * - sizeof(struct acpi_hest_ia_error_bank); - } - BUG_ON(len == -1); - - return len; + u16 hest_type = hest_hdr->type; + int len; + + if ( hest_type >= ACPI_HEST_TYPE_RESERVED ) + return 0; + + len = hest_esrc_len_tab[hest_type]; + + if ( hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK ) + { + const struct acpi_hest_ia_corrected *cmc = + container_of(hest_hdr, const struct acpi_hest_ia_corrected, header); + + len = sizeof(*cmc) + + cmc->num_hardware_banks * sizeof(struct acpi_hest_ia_error_bank); + } + else if ( hest_type == ACPI_HEST_TYPE_IA32_CHECK ) + { + const struct acpi_hest_ia_machine_check *mc = container_of( + hest_hdr, const struct acpi_hest_ia_machine_check, header); + + len = sizeof(*mc) + + mc->num_hardware_banks * sizeof(struct acpi_hest_ia_error_bank); + } + BUG_ON(len == -1); + + return len; }; int apei_hest_parse(apei_hest_func_t func, void *data) { - struct acpi_hest_header *hest_hdr; - int i, rc, len; - - if (hest_disable || !hest_tab) - return -EINVAL; - - hest_hdr = (struct acpi_hest_header *)(hest_tab + 1); - for (i = 0; i < hest_tab->error_source_count; i++) { - len = hest_esrc_len(hest_hdr); - if (!len) { - printk(XENLOG_WARNING HEST_PFX - "Unknown or unused hardware error source " - "type: %d for hardware error source: %d\n", - hest_hdr->type, hest_hdr->source_id); - return -EINVAL; - } - if ((void *)hest_hdr + len > - (void *)hest_tab + hest_tab->header.length) { - printk(XENLOG_WARNING HEST_PFX - "Table contents overflow for hardware error source: %d\n", - hest_hdr->source_id); - return -EINVAL; - } - - rc = func(hest_hdr, data); - if (rc) - return rc; - - hest_hdr = (void *)hest_hdr + len; - } - - return 0; + struct acpi_hest_header *hest_hdr; + int i, rc, len; + + if ( hest_disable || !hest_tab ) + return -EINVAL; + + hest_hdr = (struct acpi_hest_header *)(hest_tab + 1); + for ( i = 0; i < hest_tab->error_source_count; i++ ) + { + len = hest_esrc_len(hest_hdr); + if ( !len ) + { + printk(XENLOG_WARNING HEST_PFX + "Unknown or unused hardware error source " + "type: %d for hardware error source: %d\n", + hest_hdr->type, hest_hdr->source_id); + return -EINVAL; + } + if ( (void *)hest_hdr + len > + (void *)hest_tab + hest_tab->header.length ) + { + printk(XENLOG_WARNING HEST_PFX + "Table contents overflow for hardware error source: %d\n", + hest_hdr->source_id); + return -EINVAL; + } + + rc = func(hest_hdr, data); + if ( rc ) + return rc; + + hest_hdr = (void *)hest_hdr + len; + } + + return 0; } /* @@ -128,72 +130,75 @@ int apei_hest_parse(apei_hest_func_t func, void *data) * along with a set of MC banks which work in FF mode. */ static int __init hest_parse_cmc(const struct acpi_hest_header *hest_hdr, - void *data) + void *data) { #ifdef CONFIG_X86_MCE - unsigned int i; - const struct acpi_hest_ia_corrected *cmc; - const struct acpi_hest_ia_error_bank *mc_bank; - - if (hest_hdr->type != ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) - return 0; - - cmc = container_of(hest_hdr, const struct acpi_hest_ia_corrected, header); - if (!cmc->enabled) - return 0; - - /* - * We expect HEST to provide a list of MC banks that report errors - * in firmware first mode. Otherwise, return non-zero value to - * indicate that we are done parsing HEST. - */ - if (!(cmc->flags & ACPI_HEST_FIRMWARE_FIRST) || !cmc->num_hardware_banks) - return 1; - - printk(XENLOG_INFO HEST_PFX "Enabling Firmware First mode for corrected errors.\n"); - - mc_bank = (const struct acpi_hest_ia_error_bank *)(cmc + 1); - for (i = 0; i < cmc->num_hardware_banks; i++, mc_bank++) - mce_disable_bank(mc_bank->bank_number); + unsigned int i; + const struct acpi_hest_ia_corrected *cmc; + const struct acpi_hest_ia_error_bank *mc_bank; + + if ( hest_hdr->type != ACPI_HEST_TYPE_IA32_CORRECTED_CHECK ) + return 0; + + cmc = container_of(hest_hdr, const struct acpi_hest_ia_corrected, header); + if ( !cmc->enabled ) + return 0; + + /* + * We expect HEST to provide a list of MC banks that report errors + * in firmware first mode. Otherwise, return non-zero value to + * indicate that we are done parsing HEST. + */ + if ( !(cmc->flags & ACPI_HEST_FIRMWARE_FIRST) || !cmc->num_hardware_banks ) + return 1; + + printk(XENLOG_INFO HEST_PFX + "Enabling Firmware First mode for corrected errors.\n"); + + mc_bank = (const struct acpi_hest_ia_error_bank *)(cmc + 1); + for ( i = 0; i < cmc->num_hardware_banks; i++, mc_bank++ ) + mce_disable_bank(mc_bank->bank_number); #else -# define acpi_disable_cmcff 1 +#define acpi_disable_cmcff 1 #endif - return 1; + return 1; } void __init acpi_hest_init(void) { - acpi_status status; - acpi_physical_address hest_addr; - acpi_native_uint hest_len; - - if (acpi_disabled) - return; - - if (hest_disable) { - printk(XENLOG_INFO HEST_PFX "Table parsing disabled.\n"); - return; - } - - status = acpi_get_table_phys(ACPI_SIG_HEST, 0, &hest_addr, &hest_len); - if (status == AE_NOT_FOUND) - goto err; - if (ACPI_FAILURE(status)) { - printk(XENLOG_ERR HEST_PFX "Failed to get table, %s\n", - acpi_format_exception(status)); - goto err; - } - map_pages_to_xen((unsigned long)__va(hest_addr), maddr_to_mfn(hest_addr), - PFN_UP(hest_addr + hest_len) - PFN_DOWN(hest_addr), - PAGE_HYPERVISOR); - hest_tab = __va(hest_addr); - - if (!acpi_disable_cmcff) - apei_hest_parse(hest_parse_cmc, NULL); - - printk(XENLOG_INFO HEST_PFX "Table parsing has been initialized\n"); - return; + acpi_status status; + acpi_physical_address hest_addr; + acpi_native_uint hest_len; + + if ( acpi_disabled ) + return; + + if ( hest_disable ) + { + printk(XENLOG_INFO HEST_PFX "Table parsing disabled.\n"); + return; + } + + status = acpi_get_table_phys(ACPI_SIG_HEST, 0, &hest_addr, &hest_len); + if ( status == AE_NOT_FOUND ) + goto err; + if ( ACPI_FAILURE(status) ) + { + printk(XENLOG_ERR HEST_PFX "Failed to get table, %s\n", + acpi_format_exception(status)); + goto err; + } + map_pages_to_xen((unsigned long)__va(hest_addr), maddr_to_mfn(hest_addr), + PFN_UP(hest_addr + hest_len) - PFN_DOWN(hest_addr), + PAGE_HYPERVISOR); + hest_tab = __va(hest_addr); + + if ( !acpi_disable_cmcff ) + apei_hest_parse(hest_parse_cmc, NULL); + + printk(XENLOG_INFO HEST_PFX "Table parsing has been initialized\n"); + return; err: - hest_disable = 1; + hest_disable = 1; } diff --git a/xen/drivers/acpi/hwregs.c b/xen/drivers/acpi/hwregs.c index 958d374e0c..28ac4b4497 100644 --- a/xen/drivers/acpi/hwregs.c +++ b/xen/drivers/acpi/hwregs.c @@ -49,7 +49,7 @@ #include #include -#define _COMPONENT ACPI_HARDWARE +#define _COMPONENT ACPI_HARDWARE ACPI_MODULE_NAME("hwregs") /******************************************************************************* @@ -66,15 +66,15 @@ ACPI_MODULE_NAME("hwregs") static struct acpi_bit_register_info * acpi_hw_get_bit_register_info(u32 register_id) { - ACPI_FUNCTION_ENTRY(); + ACPI_FUNCTION_ENTRY(); - if (register_id > ACPI_BITREG_MAX) { - ACPI_DEBUG_PRINT((AE_INFO, "Invalid BitRegister ID: %X", - register_id)); - return (NULL); - } + if ( register_id > ACPI_BITREG_MAX ) + { + ACPI_DEBUG_PRINT((AE_INFO, "Invalid BitRegister ID: %X", register_id)); + return (NULL); + } - return (&acpi_gbl_bit_register_info[register_id]); + return (&acpi_gbl_bit_register_info[register_id]); } /******************************************************************************* @@ -91,42 +91,41 @@ acpi_hw_get_bit_register_info(u32 register_id) * ******************************************************************************/ -acpi_status acpi_get_register(u32 register_id, u32 * return_value) +acpi_status acpi_get_register(u32 register_id, u32 *return_value) { - u32 register_value = 0; - struct acpi_bit_register_info *bit_reg_info; - acpi_status status; + u32 register_value = 0; + struct acpi_bit_register_info *bit_reg_info; + acpi_status status; - ACPI_FUNCTION_TRACE(acpi_get_register); + ACPI_FUNCTION_TRACE(acpi_get_register); - /* Get the info structure corresponding to the requested ACPI Register */ + /* Get the info structure corresponding to the requested ACPI Register */ - bit_reg_info = acpi_hw_get_bit_register_info(register_id); - if (!bit_reg_info) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } + bit_reg_info = acpi_hw_get_bit_register_info(register_id); + if ( !bit_reg_info ) + { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } - /* Read from the register */ + /* Read from the register */ - status = acpi_hw_register_read(bit_reg_info->parent_register, - ®ister_value); + status = + acpi_hw_register_read(bit_reg_info->parent_register, ®ister_value); - if (ACPI_SUCCESS(status)) { + if ( ACPI_SUCCESS(status) ) + { + /* Normalize the value that was read */ - /* Normalize the value that was read */ + register_value = ((register_value & bit_reg_info->access_bit_mask) >> + bit_reg_info->bit_position); - register_value = - ((register_value & bit_reg_info->access_bit_mask) - >> bit_reg_info->bit_position); + *return_value = register_value; - *return_value = register_value; + ACPI_DEBUG_PRINT((ACPI_DB_IO, "Read value %8.8X register %X\n", + register_value, bit_reg_info->parent_register)); + } - ACPI_DEBUG_PRINT((ACPI_DB_IO, "Read value %8.8X register %X\n", - register_value, - bit_reg_info->parent_register)); - } - - return_ACPI_STATUS(status); + return_ACPI_STATUS(status); } /******************************************************************************* @@ -144,135 +143,124 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value) ******************************************************************************/ acpi_status acpi_set_register(u32 register_id, u32 value) { - u32 register_value = 0; - struct acpi_bit_register_info *bit_reg_info; - acpi_status status; - - ACPI_FUNCTION_TRACE_U32(acpi_set_register, register_id); - - /* Get the info structure corresponding to the requested ACPI Register */ - - bit_reg_info = acpi_hw_get_bit_register_info(register_id); - if (!bit_reg_info) { - ACPI_DEBUG_PRINT((AE_INFO, "Bad ACPI HW RegisterId: %X", - register_id)); - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* Always do a register read first so we can insert the new bits */ - - status = acpi_hw_register_read(bit_reg_info->parent_register, - ®ister_value); - if (ACPI_FAILURE(status)) { - goto unlock_and_exit; - } - - /* - * Decode the Register ID - * Register ID = [Register block ID] | [bit ID] - * - * Check bit ID to fine locate Register offset. - * Check Mask to determine Register offset, and then read-write. - */ - switch (bit_reg_info->parent_register) { - case ACPI_REGISTER_PM1_STATUS: - - /* - * Status Registers are different from the rest. Clear by - * writing 1, and writing 0 has no effect. So, the only relevant - * information is the single bit we're interested in, all others should - * be written as 0 so they will be left unchanged. - */ - value = ACPI_REGISTER_PREPARE_BITS(value, - bit_reg_info->bit_position, - bit_reg_info-> - access_bit_mask); - if (value) { - status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, - (u16) value); - register_value = 0; + u32 register_value = 0; + struct acpi_bit_register_info *bit_reg_info; + acpi_status status; + + ACPI_FUNCTION_TRACE_U32(acpi_set_register, register_id); + + /* Get the info structure corresponding to the requested ACPI Register */ + + bit_reg_info = acpi_hw_get_bit_register_info(register_id); + if ( !bit_reg_info ) + { + ACPI_DEBUG_PRINT((AE_INFO, "Bad ACPI HW RegisterId: %X", register_id)); + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + /* Always do a register read first so we can insert the new bits */ + + status = + acpi_hw_register_read(bit_reg_info->parent_register, ®ister_value); + if ( ACPI_FAILURE(status) ) + { + goto unlock_and_exit; + } + + /* + * Decode the Register ID + * Register ID = [Register block ID] | [bit ID] + * + * Check bit ID to fine locate Register offset. + * Check Mask to determine Register offset, and then read-write. + */ + switch (bit_reg_info->parent_register) + { + case ACPI_REGISTER_PM1_STATUS: + + /* + * Status Registers are different from the rest. Clear by + * writing 1, and writing 0 has no effect. So, the only relevant + * information is the single bit we're interested in, all others should + * be written as 0 so they will be left unchanged. + */ + value = ACPI_REGISTER_PREPARE_BITS(value, bit_reg_info->bit_position, + bit_reg_info->access_bit_mask); + if ( value ) + { + status = + acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, (u16)value); + register_value = 0; + } + break; + + case ACPI_REGISTER_PM1_ENABLE: + + ACPI_REGISTER_INSERT_VALUE(register_value, bit_reg_info->bit_position, + bit_reg_info->access_bit_mask, value); + + status = acpi_hw_register_write(ACPI_REGISTER_PM1_ENABLE, + (u16)register_value); + break; + + case ACPI_REGISTER_PM1_CONTROL: + + /* + * Write the PM1 Control register. + * Note that at this level, the fact that there are actually TWO + * registers (A and B - and B may not exist) is abstracted. + */ + ACPI_DEBUG_PRINT( + (ACPI_DB_IO, "PM1 control: Read %X\n", register_value)); + + ACPI_REGISTER_INSERT_VALUE(register_value, bit_reg_info->bit_position, + bit_reg_info->access_bit_mask, value); + + status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL, + (u16)register_value); + break; + + case ACPI_REGISTER_PM2_CONTROL: + +#if 0 /* Redundant read in original Linux code. */ + status = acpi_hw_register_read(ACPI_REGISTER_PM2_CONTROL, + ®ister_value); + if (ACPI_FAILURE(status)) { + goto unlock_and_exit; } - break; +#endif - case ACPI_REGISTER_PM1_ENABLE: + ACPI_DEBUG_PRINT( + (ACPI_DB_IO, "PM2 control: Read %X from %8.8X%8.8X\n", + register_value, + ACPI_FORMAT_UINT64(acpi_gbl_FADT.xpm2_control_block.address))); - ACPI_REGISTER_INSERT_VALUE(register_value, - bit_reg_info->bit_position, - bit_reg_info->access_bit_mask, - value); + ACPI_REGISTER_INSERT_VALUE(register_value, bit_reg_info->bit_position, + bit_reg_info->access_bit_mask, value); - status = acpi_hw_register_write(ACPI_REGISTER_PM1_ENABLE, - (u16) register_value); - break; + ACPI_DEBUG_PRINT( + (ACPI_DB_IO, "About to write %4.4X to %8.8X%8.8X\n", register_value, + ACPI_FORMAT_UINT64(acpi_gbl_FADT.xpm2_control_block.address))); - case ACPI_REGISTER_PM1_CONTROL: + status = acpi_hw_register_write(ACPI_REGISTER_PM2_CONTROL, + (u8)(register_value)); + break; - /* - * Write the PM1 Control register. - * Note that at this level, the fact that there are actually TWO - * registers (A and B - and B may not exist) is abstracted. - */ - ACPI_DEBUG_PRINT((ACPI_DB_IO, "PM1 control: Read %X\n", - register_value)); + default: + break; + } - ACPI_REGISTER_INSERT_VALUE(register_value, - bit_reg_info->bit_position, - bit_reg_info->access_bit_mask, - value); +unlock_and_exit: - status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL, - (u16) register_value); - break; + /* Normalize the value that was read */ - case ACPI_REGISTER_PM2_CONTROL: + ACPI_DEBUG_EXEC(register_value = + ((register_value & bit_reg_info->access_bit_mask) >> + bit_reg_info->bit_position)); -#if 0 /* Redundant read in original Linux code. */ - status = acpi_hw_register_read(ACPI_REGISTER_PM2_CONTROL, - ®ister_value); - if (ACPI_FAILURE(status)) { - goto unlock_and_exit; - } -#endif - - ACPI_DEBUG_PRINT((ACPI_DB_IO, - "PM2 control: Read %X from %8.8X%8.8X\n", - register_value, - ACPI_FORMAT_UINT64(acpi_gbl_FADT. - xpm2_control_block. - address))); - - ACPI_REGISTER_INSERT_VALUE(register_value, - bit_reg_info->bit_position, - bit_reg_info->access_bit_mask, - value); - - ACPI_DEBUG_PRINT((ACPI_DB_IO, - "About to write %4.4X to %8.8X%8.8X\n", - register_value, - ACPI_FORMAT_UINT64(acpi_gbl_FADT. - xpm2_control_block. - address))); - - status = acpi_hw_register_write(ACPI_REGISTER_PM2_CONTROL, - (u8) (register_value)); - break; - - default: - break; - } - - unlock_and_exit: - - /* Normalize the value that was read */ - - ACPI_DEBUG_EXEC(register_value = - ((register_value & bit_reg_info->access_bit_mask) >> - bit_reg_info->bit_position)); - - ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Set bits: %8.8X actual %8.8X register %X\n", value, - register_value, bit_reg_info->parent_register)); - return_ACPI_STATUS(status); + ACPI_DEBUG_PRINT((ACPI_DB_IO, "Set bits: %8.8X actual %8.8X register %X\n", + value, register_value, bit_reg_info->parent_register)); + return_ACPI_STATUS(status); } /****************************************************************************** @@ -287,104 +275,97 @@ acpi_status acpi_set_register(u32 register_id, u32 value) * DESCRIPTION: Read from the specified ACPI register * ******************************************************************************/ -acpi_status -acpi_hw_register_read(u32 register_id, u32 * return_value) +acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value) { - u32 value1 = 0; - u32 value2 = 0; - acpi_status status; + u32 value1 = 0; + u32 value2 = 0; + acpi_status status; - ACPI_FUNCTION_TRACE(hw_register_read); + ACPI_FUNCTION_TRACE(hw_register_read); - switch (register_id) { - case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */ + switch (register_id) + { + case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */ - status = - acpi_hw_low_level_read(16, &value1, - &acpi_gbl_FADT.xpm1a_event_block); - if (ACPI_FAILURE(status)) { - goto exit; - } + status = acpi_hw_low_level_read(16, &value1, + &acpi_gbl_FADT.xpm1a_event_block); + if ( ACPI_FAILURE(status) ) + { + goto exit; + } - /* PM1B is optional */ + /* PM1B is optional */ - status = - acpi_hw_low_level_read(16, &value2, - &acpi_gbl_FADT.xpm1b_event_block); - value1 |= value2; - break; + status = acpi_hw_low_level_read(16, &value2, + &acpi_gbl_FADT.xpm1b_event_block); + value1 |= value2; + break; - case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */ + case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */ - status = - acpi_hw_low_level_read(16, &value1, &acpi_gbl_xpm1a_enable); - if (ACPI_FAILURE(status)) { - goto exit; - } + status = acpi_hw_low_level_read(16, &value1, &acpi_gbl_xpm1a_enable); + if ( ACPI_FAILURE(status) ) + { + goto exit; + } - /* PM1B is optional */ + /* PM1B is optional */ - status = - acpi_hw_low_level_read(16, &value2, &acpi_gbl_xpm1b_enable); - value1 |= value2; - break; + status = acpi_hw_low_level_read(16, &value2, &acpi_gbl_xpm1b_enable); + value1 |= value2; + break; - case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */ + case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */ - status = - acpi_hw_low_level_read(16, &value1, - &acpi_gbl_FADT.xpm1a_control_block); - if (ACPI_FAILURE(status)) { - goto exit; - } + status = acpi_hw_low_level_read(16, &value1, + &acpi_gbl_FADT.xpm1a_control_block); + if ( ACPI_FAILURE(status) ) + { + goto exit; + } - status = - acpi_hw_low_level_read(16, &value2, - &acpi_gbl_FADT.xpm1b_control_block); - value1 |= value2; - break; + status = acpi_hw_low_level_read(16, &value2, + &acpi_gbl_FADT.xpm1b_control_block); + value1 |= value2; + break; - case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ + case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ - status = - acpi_hw_low_level_read(8, &value1, - &acpi_gbl_FADT.xpm2_control_block); - break; + status = acpi_hw_low_level_read(8, &value1, + &acpi_gbl_FADT.xpm2_control_block); + break; - case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ + case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ - status = - acpi_hw_low_level_read(32, &value1, - &acpi_gbl_FADT.xpm_timer_block); - break; + status = + acpi_hw_low_level_read(32, &value1, &acpi_gbl_FADT.xpm_timer_block); + break; - case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ + case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ - status = - acpi_os_read_port(acpi_gbl_FADT.smi_command, &value1, 8); - break; + status = acpi_os_read_port(acpi_gbl_FADT.smi_command, &value1, 8); + break; - case ACPI_REGISTER_SLEEP_STATUS: + case ACPI_REGISTER_SLEEP_STATUS: - status = - acpi_hw_low_level_read(acpi_gbl_FADT.sleep_status.bit_width, - &value1, - &acpi_gbl_FADT.sleep_status); - break; + status = acpi_hw_low_level_read(acpi_gbl_FADT.sleep_status.bit_width, + &value1, &acpi_gbl_FADT.sleep_status); + break; - default: - ACPI_DEBUG_PRINT((AE_INFO, "Unknown Register ID: %X", register_id)); - status = AE_BAD_PARAMETER; - break; - } + default: + ACPI_DEBUG_PRINT((AE_INFO, "Unknown Register ID: %X", register_id)); + status = AE_BAD_PARAMETER; + break; + } - exit: +exit: - if (ACPI_SUCCESS(status)) { - *return_value = value1; - } + if ( ACPI_SUCCESS(status) ) + { + *return_value = value1; + } - return_ACPI_STATUS(status); + return_ACPI_STATUS(status); } /****************************************************************************** @@ -415,138 +396,127 @@ acpi_hw_register_read(u32 register_id, u32 * return_value) acpi_status acpi_hw_register_write(u32 register_id, u32 value) { - acpi_status status; - u32 read_value; + acpi_status status; + u32 read_value; - ACPI_FUNCTION_TRACE(hw_register_write); + ACPI_FUNCTION_TRACE(hw_register_write); - switch (register_id) { - case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */ + switch (register_id) + { + case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */ - /* Perform a read first to preserve certain bits (per ACPI spec) */ + /* Perform a read first to preserve certain bits (per ACPI spec) */ - status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, - &read_value); - if (ACPI_FAILURE(status)) { - goto exit; - } + status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &read_value); + if ( ACPI_FAILURE(status) ) + { + goto exit; + } - /* Insert the bits to be preserved */ + /* Insert the bits to be preserved */ - ACPI_INSERT_BITS(value, ACPI_PM1_STATUS_PRESERVED_BITS, - read_value); + ACPI_INSERT_BITS(value, ACPI_PM1_STATUS_PRESERVED_BITS, read_value); - /* Now we can write the data */ + /* Now we can write the data */ - status = - acpi_hw_low_level_write(16, value, - &acpi_gbl_FADT.xpm1a_event_block); - if (ACPI_FAILURE(status)) { - goto exit; - } + status = acpi_hw_low_level_write(16, value, + &acpi_gbl_FADT.xpm1a_event_block); + if ( ACPI_FAILURE(status) ) + { + goto exit; + } - /* PM1B is optional */ + /* PM1B is optional */ - status = - acpi_hw_low_level_write(16, value, - &acpi_gbl_FADT.xpm1b_event_block); - break; + status = acpi_hw_low_level_write(16, value, + &acpi_gbl_FADT.xpm1b_event_block); + break; - case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */ + case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */ - status = - acpi_hw_low_level_write(16, value, &acpi_gbl_xpm1a_enable); - if (ACPI_FAILURE(status)) { - goto exit; - } + status = acpi_hw_low_level_write(16, value, &acpi_gbl_xpm1a_enable); + if ( ACPI_FAILURE(status) ) + { + goto exit; + } - /* PM1B is optional */ + /* PM1B is optional */ - status = - acpi_hw_low_level_write(16, value, &acpi_gbl_xpm1b_enable); - break; + status = acpi_hw_low_level_write(16, value, &acpi_gbl_xpm1b_enable); + break; - case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */ + case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */ - /* - * Perform a read first to preserve certain bits (per ACPI spec) - */ - status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, - &read_value); - if (ACPI_FAILURE(status)) { - goto exit; - } + /* + * Perform a read first to preserve certain bits (per ACPI spec) + */ + status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, &read_value); + if ( ACPI_FAILURE(status) ) + { + goto exit; + } - /* Insert the bits to be preserved */ + /* Insert the bits to be preserved */ - ACPI_INSERT_BITS(value, ACPI_PM1_CONTROL_PRESERVED_BITS, - read_value); + ACPI_INSERT_BITS(value, ACPI_PM1_CONTROL_PRESERVED_BITS, read_value); - /* Now we can write the data */ + /* Now we can write the data */ - status = - acpi_hw_low_level_write(16, value, - &acpi_gbl_FADT.xpm1a_control_block); - if (ACPI_FAILURE(status)) { - goto exit; - } + status = acpi_hw_low_level_write(16, value, + &acpi_gbl_FADT.xpm1a_control_block); + if ( ACPI_FAILURE(status) ) + { + goto exit; + } - status = - acpi_hw_low_level_write(16, value, - &acpi_gbl_FADT.xpm1b_control_block); - break; + status = acpi_hw_low_level_write(16, value, + &acpi_gbl_FADT.xpm1b_control_block); + break; - case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */ + case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */ - status = - acpi_hw_low_level_write(16, value, - &acpi_gbl_FADT.xpm1a_control_block); - break; + status = acpi_hw_low_level_write(16, value, + &acpi_gbl_FADT.xpm1a_control_block); + break; - case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */ + case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */ - status = - acpi_hw_low_level_write(16, value, - &acpi_gbl_FADT.xpm1b_control_block); - break; + status = acpi_hw_low_level_write(16, value, + &acpi_gbl_FADT.xpm1b_control_block); + break; - case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ + case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ - status = - acpi_hw_low_level_write(8, value, - &acpi_gbl_FADT.xpm2_control_block); - break; + status = acpi_hw_low_level_write(8, value, + &acpi_gbl_FADT.xpm2_control_block); + break; - case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ + case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ - status = - acpi_hw_low_level_write(32, value, - &acpi_gbl_FADT.xpm_timer_block); - break; + status = + acpi_hw_low_level_write(32, value, &acpi_gbl_FADT.xpm_timer_block); + break; - case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ + case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ - /* SMI_CMD is currently always in IO space */ + /* SMI_CMD is currently always in IO space */ - status = - acpi_os_write_port(acpi_gbl_FADT.smi_command, value, 8); - break; + status = acpi_os_write_port(acpi_gbl_FADT.smi_command, value, 8); + break; - case ACPI_REGISTER_SLEEP_CONTROL: + case ACPI_REGISTER_SLEEP_CONTROL: - status = - acpi_hw_low_level_write(acpi_gbl_FADT.sleep_control.bit_width, - value, - &acpi_gbl_FADT.sleep_control); - break; + status = acpi_hw_low_level_write(acpi_gbl_FADT.sleep_control.bit_width, + value, &acpi_gbl_FADT.sleep_control); + break; - default: - status = AE_BAD_PARAMETER; - break; - } + default: + status = AE_BAD_PARAMETER; + break; + } - exit: - return_ACPI_STATUS(status); +exit: + return_ACPI_STATUS(status); } /****************************************************************************** @@ -563,60 +533,61 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value) * ******************************************************************************/ -acpi_status -acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg) +acpi_status acpi_hw_low_level_read(u32 width, u32 *value, + struct acpi_generic_address *reg) { - u64 address; - acpi_status status; - - ACPI_FUNCTION_NAME(hw_low_level_read); - - /* - * Must have a valid pointer to a GAS structure, and - * a non-zero address within. However, don't return an error - * because the PM1A/B code must not fail if B isn't present. - */ - if (!reg) { - return (AE_OK); - } - - /* Get a local copy of the address. Handles possible alignment issues */ - - ACPI_MOVE_64_TO_64(&address, ®->address); - if (!address) { - return (AE_OK); - } - *value = 0; - - /* - * Two address spaces supported: Memory or IO. - * PCI_Config is not supported here because the GAS struct is insufficient - */ - switch (reg->space_id) { - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - - status = acpi_os_read_memory((acpi_physical_address) address, - value, width); - break; - - case ACPI_ADR_SPACE_SYSTEM_IO: - - status = acpi_os_read_port((acpi_io_address) address, - value, width); - break; - - default: - - return (AE_BAD_PARAMETER); - } - - ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n", - *value, width, - ACPI_FORMAT_UINT64(address), - acpi_ut_get_region_name(reg->address_space_id))); - - return (status); + u64 address; + acpi_status status; + + ACPI_FUNCTION_NAME(hw_low_level_read); + + /* + * Must have a valid pointer to a GAS structure, and + * a non-zero address within. However, don't return an error + * because the PM1A/B code must not fail if B isn't present. + */ + if ( !reg ) + { + return (AE_OK); + } + + /* Get a local copy of the address. Handles possible alignment issues */ + + ACPI_MOVE_64_TO_64(&address, ®->address); + if ( !address ) + { + return (AE_OK); + } + *value = 0; + + /* + * Two address spaces supported: Memory or IO. + * PCI_Config is not supported here because the GAS struct is insufficient + */ + switch (reg->space_id) + { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + + status = + acpi_os_read_memory((acpi_physical_address)address, value, width); + break; + + case ACPI_ADR_SPACE_SYSTEM_IO: + + status = acpi_os_read_port((acpi_io_address)address, value, width); + break; + + default: + + return (AE_BAD_PARAMETER); + } + + ACPI_DEBUG_PRINT((ACPI_DB_IO, + "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n", *value, + width, ACPI_FORMAT_UINT64(address), + acpi_ut_get_region_name(reg->address_space_id))); + + return (status); } /****************************************************************************** @@ -633,57 +604,57 @@ acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg) * ******************************************************************************/ -acpi_status -acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg) +acpi_status acpi_hw_low_level_write(u32 width, u32 value, + struct acpi_generic_address *reg) { - u64 address; - acpi_status status; - - ACPI_FUNCTION_NAME(hw_low_level_write); - - /* - * Must have a valid pointer to a GAS structure, and - * a non-zero address within. However, don't return an error - * because the PM1A/B code must not fail if B isn't present. - */ - if (!reg) { - return (AE_OK); - } - - /* Get a local copy of the address. Handles possible alignment issues */ - - ACPI_MOVE_64_TO_64(&address, ®->address); - if (!address) { - return (AE_OK); - } - - /* - * Two address spaces supported: Memory or IO. - * PCI_Config is not supported here because the GAS struct is insufficient - */ - switch (reg->space_id) { - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - - status = acpi_os_write_memory((acpi_physical_address) address, - value, width); - break; - - case ACPI_ADR_SPACE_SYSTEM_IO: - - status = acpi_os_write_port((acpi_io_address) address, - value, width); - break; - - default: - return (AE_BAD_PARAMETER); - } - - ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", - value, width, - ACPI_FORMAT_UINT64(address), - acpi_ut_get_region_name(reg->address_space_id))); - - return (status); + u64 address; + acpi_status status; + + ACPI_FUNCTION_NAME(hw_low_level_write); + + /* + * Must have a valid pointer to a GAS structure, and + * a non-zero address within. However, don't return an error + * because the PM1A/B code must not fail if B isn't present. + */ + if ( !reg ) + { + return (AE_OK); + } + + /* Get a local copy of the address. Handles possible alignment issues */ + + ACPI_MOVE_64_TO_64(&address, ®->address); + if ( !address ) + { + return (AE_OK); + } + + /* + * Two address spaces supported: Memory or IO. + * PCI_Config is not supported here because the GAS struct is insufficient + */ + switch (reg->space_id) + { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + + status = + acpi_os_write_memory((acpi_physical_address)address, value, width); + break; + + case ACPI_ADR_SPACE_SYSTEM_IO: + + status = acpi_os_write_port((acpi_io_address)address, value, width); + break; + + default: + return (AE_BAD_PARAMETER); + } + + ACPI_DEBUG_PRINT((ACPI_DB_IO, + "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", value, + width, ACPI_FORMAT_UINT64(address), + acpi_ut_get_region_name(reg->address_space_id))); + + return (status); } - diff --git a/xen/drivers/acpi/numa.c b/xen/drivers/acpi/numa.c index 85f891757c..6ba5944dee 100644 --- a/xen/drivers/acpi/numa.c +++ b/xen/drivers/acpi/numa.c @@ -28,189 +28,180 @@ #include #include -#define ACPI_NUMA 0x80000000 -#define _COMPONENT ACPI_NUMA +#define ACPI_NUMA 0x80000000 +#define _COMPONENT ACPI_NUMA ACPI_MODULE_NAME("numa") int __initdata srat_rev; -void __init acpi_table_print_srat_entry(struct acpi_subtable_header * header) +void __init acpi_table_print_srat_entry(struct acpi_subtable_header *header) { + ACPI_FUNCTION_NAME("acpi_table_print_srat_entry"); - ACPI_FUNCTION_NAME("acpi_table_print_srat_entry"); + if ( !header ) + return; - if (!header) - return; - - switch (header->type) { - - case ACPI_SRAT_TYPE_CPU_AFFINITY: + switch (header->type) + { + case ACPI_SRAT_TYPE_CPU_AFFINITY: #ifdef ACPI_DEBUG_OUTPUT - { - struct acpi_srat_cpu_affinity *p = - container_of(header, struct acpi_srat_cpu_affinity, header); - u32 proximity_domain = p->proximity_domain_lo; - - if (srat_rev >= 2) { - proximity_domain |= p->proximity_domain_hi[0] << 8; - proximity_domain |= p->proximity_domain_hi[1] << 16; - proximity_domain |= p->proximity_domain_hi[2] << 24; - } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n", - p->apic_id, p->local_sapic_eid, - proximity_domain, - p->flags & ACPI_SRAT_CPU_ENABLED - ? "enabled" : "disabled")); - } -#endif /* ACPI_DEBUG_OUTPUT */ - break; - - case ACPI_SRAT_TYPE_MEMORY_AFFINITY: + { + struct acpi_srat_cpu_affinity *p = + container_of(header, struct acpi_srat_cpu_affinity, header); + u32 proximity_domain = p->proximity_domain_lo; + + if ( srat_rev >= 2 ) + { + proximity_domain |= p->proximity_domain_hi[0] << 8; + proximity_domain |= p->proximity_domain_hi[1] << 16; + proximity_domain |= p->proximity_domain_hi[2] << 24; + } + ACPI_DEBUG_PRINT( + (ACPI_DB_INFO, + "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d " + "%s\n", + p->apic_id, p->local_sapic_eid, proximity_domain, + p->flags & ACPI_SRAT_CPU_ENABLED ? "enabled" : "disabled")); + } +#endif /* ACPI_DEBUG_OUTPUT */ + break; + + case ACPI_SRAT_TYPE_MEMORY_AFFINITY: #ifdef ACPI_DEBUG_OUTPUT - { - struct acpi_srat_mem_affinity *p = - container_of(header, struct acpi_srat_mem_affinity, header); - u32 proximity_domain = p->proximity_domain; - - if (srat_rev < 2) - proximity_domain &= 0xff; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "SRAT Memory (%#"PRIx64 - " length %#"PRIx64")" - " in proximity domain %d %s%s\n", - p->base_address, p->length, - proximity_domain, - p->flags & ACPI_SRAT_MEM_ENABLED - ? "enabled" : "disabled", - p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE - ? " hot-pluggable" : "")); - } -#endif /* ACPI_DEBUG_OUTPUT */ - break; - - case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY: + { + struct acpi_srat_mem_affinity *p = + container_of(header, struct acpi_srat_mem_affinity, header); + u32 proximity_domain = p->proximity_domain; + + if ( srat_rev < 2 ) + proximity_domain &= 0xff; + ACPI_DEBUG_PRINT( + (ACPI_DB_INFO, + "SRAT Memory (%#" PRIx64 " length %#" PRIx64 ")" + " in proximity domain %d %s%s\n", + p->base_address, p->length, proximity_domain, + p->flags & ACPI_SRAT_MEM_ENABLED ? "enabled" : "disabled", + p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE ? " hot-pluggable" : "")); + } +#endif /* ACPI_DEBUG_OUTPUT */ + break; + + case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY: #ifdef ACPI_DEBUG_OUTPUT - { - struct acpi_srat_x2apic_cpu_affinity *p = - (struct acpi_srat_x2apic_cpu_affinity *)header; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "SRAT Processor (x2apicid[0x%08x]) in" - " proximity domain %d %s\n", - p->apic_id, - p->proximity_domain, - (p->flags & ACPI_SRAT_CPU_ENABLED) ? - "enabled" : "disabled")); - } -#endif /* ACPI_DEBUG_OUTPUT */ - break; - default: - printk(KERN_WARNING PREFIX - "Found unsupported SRAT entry (type = %#x)\n", - header->type); - break; - } + { + struct acpi_srat_x2apic_cpu_affinity *p = + (struct acpi_srat_x2apic_cpu_affinity *)header; + ACPI_DEBUG_PRINT( + (ACPI_DB_INFO, + "SRAT Processor (x2apicid[0x%08x]) in" + " proximity domain %d %s\n", + p->apic_id, p->proximity_domain, + (p->flags & ACPI_SRAT_CPU_ENABLED) ? "enabled" : "disabled")); + } +#endif /* ACPI_DEBUG_OUTPUT */ + break; + default: + printk(KERN_WARNING PREFIX + "Found unsupported SRAT entry (type = %#x)\n", + header->type); + break; + } } static int __init acpi_parse_slit(struct acpi_table_header *table) { - acpi_numa_slit_init((struct acpi_table_slit *)table); + acpi_numa_slit_init((struct acpi_table_slit *)table); - return 0; + return 0; } -static int __init -acpi_parse_x2apic_affinity(struct acpi_subtable_header *header, - const unsigned long end) +static int __init acpi_parse_x2apic_affinity( + struct acpi_subtable_header *header, const unsigned long end) { - const struct acpi_srat_x2apic_cpu_affinity *processor_affinity - = container_of(header, struct acpi_srat_x2apic_cpu_affinity, - header); + const struct acpi_srat_x2apic_cpu_affinity *processor_affinity = + container_of(header, struct acpi_srat_x2apic_cpu_affinity, header); - if (!header) - return -EINVAL; + if ( !header ) + return -EINVAL; - acpi_table_print_srat_entry(header); + acpi_table_print_srat_entry(header); - /* let architecture-dependent part to do it */ - acpi_numa_x2apic_affinity_init(processor_affinity); + /* let architecture-dependent part to do it */ + acpi_numa_x2apic_affinity_init(processor_affinity); - return 0; + return 0; } -static int __init -acpi_parse_processor_affinity(struct acpi_subtable_header *header, - const unsigned long end) +static int __init acpi_parse_processor_affinity( + struct acpi_subtable_header *header, const unsigned long end) { - const struct acpi_srat_cpu_affinity *processor_affinity - = container_of(header, struct acpi_srat_cpu_affinity, header); + const struct acpi_srat_cpu_affinity *processor_affinity = + container_of(header, struct acpi_srat_cpu_affinity, header); - if (!header) - return -EINVAL; + if ( !header ) + return -EINVAL; - acpi_table_print_srat_entry(header); + acpi_table_print_srat_entry(header); - /* let architecture-dependent part to do it */ - acpi_numa_processor_affinity_init(processor_affinity); + /* let architecture-dependent part to do it */ + acpi_numa_processor_affinity_init(processor_affinity); - return 0; + return 0; } -static int __init -acpi_parse_memory_affinity(struct acpi_subtable_header *header, - const unsigned long end) +static int __init acpi_parse_memory_affinity( + struct acpi_subtable_header *header, const unsigned long end) { - const struct acpi_srat_mem_affinity *memory_affinity - = container_of(header, struct acpi_srat_mem_affinity, header); + const struct acpi_srat_mem_affinity *memory_affinity = + container_of(header, struct acpi_srat_mem_affinity, header); - if (!header) - return -EINVAL; + if ( !header ) + return -EINVAL; - acpi_table_print_srat_entry(header); + acpi_table_print_srat_entry(header); - /* let architecture-dependent part to do it */ - acpi_numa_memory_affinity_init(memory_affinity); + /* let architecture-dependent part to do it */ + acpi_numa_memory_affinity_init(memory_affinity); - return 0; + return 0; } int __init acpi_parse_srat(struct acpi_table_header *table) { - if (!table) - return -EINVAL; + if ( !table ) + return -EINVAL; - srat_rev = table->revision; + srat_rev = table->revision; - return 0; + return 0; } -int __init -acpi_table_parse_srat(int id, acpi_madt_entry_handler handler, - unsigned int max_entries) +int __init acpi_table_parse_srat(int id, acpi_madt_entry_handler handler, + unsigned int max_entries) { - return acpi_table_parse_entries(ACPI_SIG_SRAT, - sizeof(struct acpi_table_srat), id, - handler, max_entries); + return acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), id, handler, + max_entries); } int __init acpi_numa_init(void) { - /* SRAT: Static Resource Affinity Table */ - if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { - acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY, - acpi_parse_x2apic_affinity, 0); - acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, - acpi_parse_processor_affinity, 0); - acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, - acpi_parse_memory_affinity, - NR_NODE_MEMBLKS); - } - - /* SLIT: System Locality Information Table */ - acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit); - - acpi_numa_arch_fixup(); - return 0; + /* SRAT: Static Resource Affinity Table */ + if ( !acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat) ) + { + acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY, + acpi_parse_x2apic_affinity, 0); + acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, + acpi_parse_processor_affinity, 0); + acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, + acpi_parse_memory_affinity, NR_NODE_MEMBLKS); + } + + /* SLIT: System Locality Information Table */ + acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit); + + acpi_numa_arch_fixup(); + return 0; } #if 0 diff --git a/xen/drivers/acpi/osl.c b/xen/drivers/acpi/osl.c index 4c8bb7839e..4f49842f79 100644 --- a/xen/drivers/acpi/osl.c +++ b/xen/drivers/acpi/osl.c @@ -38,7 +38,7 @@ #include #include -#define _COMPONENT ACPI_OS_SERVICES +#define _COMPONENT ACPI_OS_SERVICES ACPI_MODULE_NAME("osl") #ifdef CONFIG_ACPI_CUSTOM_DSDT @@ -47,202 +47,224 @@ ACPI_MODULE_NAME("osl") void __init acpi_os_printf(const char *fmt, ...) { - va_list args; - va_start(args, fmt); - acpi_os_vprintf(fmt, args); - va_end(args); + va_list args; + va_start(args, fmt); + acpi_os_vprintf(fmt, args); + va_end(args); } void __init acpi_os_vprintf(const char *fmt, va_list args) { - static char buffer[512]; + static char buffer[512]; - vsnprintf(buffer, sizeof(buffer), fmt, args); + vsnprintf(buffer, sizeof(buffer), fmt, args); - printk("%s", buffer); + printk("%s", buffer); } acpi_physical_address __initdata rsdp_hint; acpi_physical_address __init acpi_os_get_root_pointer(void) { - if (rsdp_hint) - return rsdp_hint; - - if (efi_enabled(EFI_BOOT)) { - if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) - return efi.acpi20; - else if (efi.acpi != EFI_INVALID_TABLE_ADDR) - return efi.acpi; - else { - printk(KERN_ERR PREFIX - "System description tables not found\n"); - return 0; - } - } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { - acpi_physical_address pa = 0; - - acpi_find_root_pointer(&pa); - return pa; - } - - return 0; + if ( rsdp_hint ) + return rsdp_hint; + + if ( efi_enabled(EFI_BOOT) ) + { + if ( efi.acpi20 != EFI_INVALID_TABLE_ADDR ) + return efi.acpi20; + else if ( efi.acpi != EFI_INVALID_TABLE_ADDR ) + return efi.acpi; + else + { + printk(KERN_ERR PREFIX "System description tables not found\n"); + return 0; + } + } + else if ( IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP) ) + { + acpi_physical_address pa = 0; + + acpi_find_root_pointer(&pa); + return pa; + } + + return 0; } -void __iomem * -acpi_os_map_memory(acpi_physical_address phys, acpi_size size) +void __iomem *acpi_os_map_memory(acpi_physical_address phys, acpi_size size) { - if (system_state >= SYS_STATE_boot) { - mfn_t mfn = _mfn(PFN_DOWN(phys)); - unsigned int offs = phys & (PAGE_SIZE - 1); - - /* The low first Mb is always mapped on x86. */ - if (IS_ENABLED(CONFIG_X86) && !((phys + size - 1) >> 20)) - return __va(phys); - return __vmap(&mfn, PFN_UP(offs + size), 1, 1, - ACPI_MAP_MEM_ATTR, VMAP_DEFAULT) + offs; - } - return __acpi_map_table(phys, size); + if ( system_state >= SYS_STATE_boot ) + { + mfn_t mfn = _mfn(PFN_DOWN(phys)); + unsigned int offs = phys & (PAGE_SIZE - 1); + + /* The low first Mb is always mapped on x86. */ + if ( IS_ENABLED(CONFIG_X86) && !((phys + size - 1) >> 20) ) + return __va(phys); + return __vmap(&mfn, PFN_UP(offs + size), 1, 1, ACPI_MAP_MEM_ATTR, + VMAP_DEFAULT) + + offs; + } + return __acpi_map_table(phys, size); } -void acpi_os_unmap_memory(void __iomem * virt, acpi_size size) +void acpi_os_unmap_memory(void __iomem *virt, acpi_size size) { - if (IS_ENABLED(CONFIG_X86) && - (unsigned long)virt >= DIRECTMAP_VIRT_START && - (unsigned long)virt < DIRECTMAP_VIRT_END) { - ASSERT(!((__pa(virt) + size - 1) >> 20)); - return; - } - - if (system_state >= SYS_STATE_boot) - vunmap((void *)((unsigned long)virt & PAGE_MASK)); + if ( IS_ENABLED(CONFIG_X86) && + (unsigned long)virt >= DIRECTMAP_VIRT_START && + (unsigned long)virt < DIRECTMAP_VIRT_END ) + { + ASSERT(!((__pa(virt) + size - 1) >> 20)); + return; + } + + if ( system_state >= SYS_STATE_boot ) + vunmap((void *)((unsigned long)virt & PAGE_MASK)); } -acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) +acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width) { - u32 dummy; - - if (!value) - value = &dummy; - - *value = 0; - if (width <= 8) { - *(u8 *) value = inb(port); - } else if (width <= 16) { - *(u16 *) value = inw(port); - } else if (width <= 32) { - *(u32 *) value = inl(port); - } else { - BUG(); - } - - return AE_OK; + u32 dummy; + + if ( !value ) + value = &dummy; + + *value = 0; + if ( width <= 8 ) + { + *(u8 *)value = inb(port); + } + else if ( width <= 16 ) + { + *(u16 *)value = inw(port); + } + else if ( width <= 32 ) + { + *(u32 *)value = inl(port); + } + else + { + BUG(); + } + + return AE_OK; } acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) { - if (width <= 8) { - outb(value, port); - } else if (width <= 16) { - outw(value, port); - } else if (width <= 32) { - outl(value, port); - } else { - BUG(); - } - - return AE_OK; + if ( width <= 8 ) + { + outb(value, port); + } + else if ( width <= 16 ) + { + outw(value, port); + } + else if ( width <= 32 ) + { + outl(value, port); + } + else + { + BUG(); + } + + return AE_OK; } -acpi_status -acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) +acpi_status acpi_os_read_memory(acpi_physical_address phys_addr, u32 *value, + u32 width) { - u32 dummy; - void __iomem *virt_addr = acpi_os_map_memory(phys_addr, width >> 3); - - if (!virt_addr) - return AE_ERROR; - - if (!value) - value = &dummy; - - switch (width) { - case 8: - *(u8 *) value = readb(virt_addr); - break; - case 16: - *(u16 *) value = readw(virt_addr); - break; - case 32: - *(u32 *) value = readl(virt_addr); - break; - default: - BUG(); - } - - acpi_os_unmap_memory(virt_addr, width >> 3); - - return AE_OK; + u32 dummy; + void __iomem *virt_addr = acpi_os_map_memory(phys_addr, width >> 3); + + if ( !virt_addr ) + return AE_ERROR; + + if ( !value ) + value = &dummy; + + switch (width) + { + case 8: + *(u8 *)value = readb(virt_addr); + break; + case 16: + *(u16 *)value = readw(virt_addr); + break; + case 32: + *(u32 *)value = readl(virt_addr); + break; + default: + BUG(); + } + + acpi_os_unmap_memory(virt_addr, width >> 3); + + return AE_OK; } -acpi_status -acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) +acpi_status acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, + u32 width) { - void __iomem *virt_addr = acpi_os_map_memory(phys_addr, width >> 3); - - if (!virt_addr) - return AE_ERROR; - - switch (width) { - case 8: - writeb(value, virt_addr); - break; - case 16: - writew(value, virt_addr); - break; - case 32: - writel(value, virt_addr); - break; - default: - BUG(); - } - - acpi_os_unmap_memory(virt_addr, width >> 3); - - return AE_OK; + void __iomem *virt_addr = acpi_os_map_memory(phys_addr, width >> 3); + + if ( !virt_addr ) + return AE_ERROR; + + switch (width) + { + case 8: + writeb(value, virt_addr); + break; + case 16: + writew(value, virt_addr); + break; + case 32: + writel(value, virt_addr); + break; + default: + BUG(); + } + + acpi_os_unmap_memory(virt_addr, width >> 3); + + return AE_OK; } #define is_xmalloc_memory(ptr) ((unsigned long)(ptr) & (PAGE_SIZE - 1)) void *__init acpi_os_alloc_memory(size_t sz) { - void *ptr; + void *ptr; - if (system_state == SYS_STATE_early_boot) - return mfn_to_virt(mfn_x(alloc_boot_pages(PFN_UP(sz), 1))); + if ( system_state == SYS_STATE_early_boot ) + return mfn_to_virt(mfn_x(alloc_boot_pages(PFN_UP(sz), 1))); - ptr = xmalloc_bytes(sz); - ASSERT(!ptr || is_xmalloc_memory(ptr)); - return ptr; + ptr = xmalloc_bytes(sz); + ASSERT(!ptr || is_xmalloc_memory(ptr)); + return ptr; } void *__init acpi_os_zalloc_memory(size_t sz) { - void *ptr; - - if (system_state != SYS_STATE_early_boot) { - ptr = xzalloc_bytes(sz); - ASSERT(!ptr || is_xmalloc_memory(ptr)); - return ptr; - } - ptr = acpi_os_alloc_memory(sz); - return ptr ? memset(ptr, 0, sz) : NULL; + void *ptr; + + if ( system_state != SYS_STATE_early_boot ) + { + ptr = xzalloc_bytes(sz); + ASSERT(!ptr || is_xmalloc_memory(ptr)); + return ptr; + } + ptr = acpi_os_alloc_memory(sz); + return ptr ? memset(ptr, 0, sz) : NULL; } void __init acpi_os_free_memory(void *ptr) { - if (is_xmalloc_memory(ptr)) - xfree(ptr); - else if (ptr && system_state == SYS_STATE_early_boot) - init_boot_pages(__pa(ptr), __pa(ptr) + PAGE_SIZE); + if ( is_xmalloc_memory(ptr) ) + xfree(ptr); + else if ( ptr && system_state == SYS_STATE_early_boot ) + init_boot_pages(__pa(ptr), __pa(ptr) + PAGE_SIZE); } diff --git a/xen/drivers/acpi/pmstat.c b/xen/drivers/acpi/pmstat.c index bb80506ad6..1c31831e18 100644 --- a/xen/drivers/acpi/pmstat.c +++ b/xen/drivers/acpi/pmstat.c @@ -3,14 +3,14 @@ # # Copyright (c) 2008, Liu Jinsong # -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) # any later version. # -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with @@ -55,7 +55,7 @@ int do_get_pm_info(struct xen_sysctl_get_pmstat *op) return -EINVAL; pmpt = processor_pminfo[op->cpuid]; - switch ( op->type & PMSTAT_CATEGORY_MASK ) + switch (op->type & PMSTAT_CATEGORY_MASK) { case PMSTAT_CX: if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_CX) ) @@ -73,7 +73,7 @@ int do_get_pm_info(struct xen_sysctl_get_pmstat *op) return -ENODEV; } - switch ( op->type ) + switch (op->type) { case PMSTAT_get_max_px: { @@ -85,8 +85,8 @@ int do_get_pm_info(struct xen_sysctl_get_pmstat *op) { uint32_t ct; struct pm_px *pxpt; - spinlock_t *cpufreq_statistic_lock = - &per_cpu(cpufreq_statistic_lock, op->cpuid); + spinlock_t *cpufreq_statistic_lock = + &per_cpu(cpufreq_statistic_lock, op->cpuid); spin_lock(cpufreq_statistic_lock); @@ -102,7 +102,7 @@ int do_get_pm_info(struct xen_sysctl_get_pmstat *op) cpufreq_residency_update(op->cpuid, pxpt->u.cur); ct = pmpt->perf.state_count; - if ( copy_to_guest(op->u.getpx.trans_pt, pxpt->u.trans_pt, ct*ct) ) + if ( copy_to_guest(op->u.getpx.trans_pt, pxpt->u.trans_pt, ct * ct) ) { spin_unlock(cpufreq_statistic_lock); ret = -EFAULT; @@ -173,14 +173,14 @@ static int read_scaling_available_governors(char *scaling_available_governors, if ( !scaling_available_governors ) return -EINVAL; - list_for_each_entry(t, &cpufreq_governor_list, governor_list) + list_for_each_entry (t, &cpufreq_governor_list, governor_list) { - i += scnprintf(&scaling_available_governors[i], - CPUFREQ_NAME_LEN, "%s ", t->name); + i += scnprintf(&scaling_available_governors[i], CPUFREQ_NAME_LEN, "%s ", + t->name); if ( i > size ) return -EINVAL; } - scaling_available_governors[i-1] = '\0'; + scaling_available_governors[i - 1] = '\0'; return 0; } @@ -193,63 +193,64 @@ static int get_cpufreq_para(struct xen_sysctl_pm_op *op) uint32_t gov_num = 0; uint32_t *affected_cpus; uint32_t *scaling_available_frequencies; - char *scaling_available_governors; + char *scaling_available_governors; struct list_head *pos; uint32_t cpu, i, j = 0; pmpt = processor_pminfo[op->cpuid]; policy = per_cpu(cpufreq_cpu_policy, op->cpuid); - if ( !pmpt || !pmpt->perf.states || - !policy || !policy->governor ) + if ( !pmpt || !pmpt->perf.states || !policy || !policy->governor ) return -EINVAL; - list_for_each(pos, &cpufreq_governor_list) + list_for_each (pos, &cpufreq_governor_list) gov_num++; - if ( (op->u.get_para.cpu_num != cpumask_weight(policy->cpus)) || - (op->u.get_para.freq_num != pmpt->perf.state_count) || - (op->u.get_para.gov_num != gov_num) ) + if ( (op->u.get_para.cpu_num != cpumask_weight(policy->cpus)) || + (op->u.get_para.freq_num != pmpt->perf.state_count) || + (op->u.get_para.gov_num != gov_num) ) { - op->u.get_para.cpu_num = cpumask_weight(policy->cpus); + op->u.get_para.cpu_num = cpumask_weight(policy->cpus); op->u.get_para.freq_num = pmpt->perf.state_count; - op->u.get_para.gov_num = gov_num; + op->u.get_para.gov_num = gov_num; return -EAGAIN; } if ( !(affected_cpus = xzalloc_array(uint32_t, op->u.get_para.cpu_num)) ) return -ENOMEM; - for_each_cpu(cpu, policy->cpus) + for_each_cpu (cpu, policy->cpus) affected_cpus[j++] = cpu; - ret = copy_to_guest(op->u.get_para.affected_cpus, - affected_cpus, op->u.get_para.cpu_num); + ret = copy_to_guest(op->u.get_para.affected_cpus, affected_cpus, + op->u.get_para.cpu_num); xfree(affected_cpus); if ( ret ) return ret; if ( !(scaling_available_frequencies = - xzalloc_array(uint32_t, op->u.get_para.freq_num)) ) + xzalloc_array(uint32_t, op->u.get_para.freq_num)) ) return -ENOMEM; for ( i = 0; i < op->u.get_para.freq_num; i++ ) scaling_available_frequencies[i] = - pmpt->perf.states[i].core_frequency * 1000; + pmpt->perf.states[i].core_frequency * 1000; ret = copy_to_guest(op->u.get_para.scaling_available_frequencies, - scaling_available_frequencies, op->u.get_para.freq_num); + scaling_available_frequencies, op->u.get_para.freq_num); xfree(scaling_available_frequencies); if ( ret ) return ret; if ( !(scaling_available_governors = - xzalloc_array(char, gov_num * CPUFREQ_NAME_LEN)) ) + xzalloc_array(char, gov_num *CPUFREQ_NAME_LEN)) ) return -ENOMEM; if ( (ret = read_scaling_available_governors(scaling_available_governors, - gov_num * CPUFREQ_NAME_LEN * sizeof(char))) ) + gov_num * CPUFREQ_NAME_LEN * + sizeof(char))) ) { xfree(scaling_available_governors); return ret; } - ret = copy_to_guest(op->u.get_para.scaling_available_governors, - scaling_available_governors, gov_num * CPUFREQ_NAME_LEN); + ret = + copy_to_guest(op->u.get_para.scaling_available_governors, + scaling_available_governors, gov_num * CPUFREQ_NAME_LEN); xfree(scaling_available_governors); if ( ret ) return ret; @@ -263,26 +264,26 @@ static int get_cpufreq_para(struct xen_sysctl_pm_op *op) op->u.get_para.scaling_min_freq = policy->min; if ( cpufreq_driver.name[0] ) - strlcpy(op->u.get_para.scaling_driver, - cpufreq_driver.name, CPUFREQ_NAME_LEN); + strlcpy(op->u.get_para.scaling_driver, cpufreq_driver.name, + CPUFREQ_NAME_LEN); else strlcpy(op->u.get_para.scaling_driver, "Unknown", CPUFREQ_NAME_LEN); if ( policy->governor->name[0] ) - strlcpy(op->u.get_para.scaling_governor, - policy->governor->name, CPUFREQ_NAME_LEN); + strlcpy(op->u.get_para.scaling_governor, policy->governor->name, + CPUFREQ_NAME_LEN); else strlcpy(op->u.get_para.scaling_governor, "Unknown", CPUFREQ_NAME_LEN); /* governor specific para */ - if ( !strnicmp(op->u.get_para.scaling_governor, - "userspace", CPUFREQ_NAME_LEN) ) + if ( !strnicmp(op->u.get_para.scaling_governor, "userspace", + CPUFREQ_NAME_LEN) ) { op->u.get_para.u.userspace.scaling_setspeed = policy->cur; } - if ( !strnicmp(op->u.get_para.scaling_governor, - "ondemand", CPUFREQ_NAME_LEN) ) + if ( !strnicmp(op->u.get_para.scaling_governor, "ondemand", + CPUFREQ_NAME_LEN) ) { ret = get_cpufreq_ondemand_para( &op->u.get_para.u.ondemand.sampling_rate_max, @@ -306,7 +307,7 @@ static int set_cpufreq_gov(struct xen_sysctl_pm_op *op) memcpy(&new_policy, old_policy, sizeof(struct cpufreq_policy)); new_policy.governor = __find_governor(op->u.set_gov.scaling_governor); - if (new_policy.governor == NULL) + if ( new_policy.governor == NULL ) return -EINVAL; return __cpufreq_set_policy(old_policy, &new_policy); @@ -322,7 +323,7 @@ static int set_cpufreq_para(struct xen_sysctl_pm_op *op) if ( !policy || !policy->governor ) return -EINVAL; - switch(op->u.set_para.ctrl_type) + switch (op->u.set_para.ctrl_type) { case SCALING_MAX_FREQ: { @@ -348,10 +349,9 @@ static int set_cpufreq_para(struct xen_sysctl_pm_op *op) case SCALING_SETSPEED: { - unsigned int freq =op->u.set_para.ctrl_value; + unsigned int freq = op->u.set_para.ctrl_value; - if ( !strnicmp(policy->governor->name, - "userspace", CPUFREQ_NAME_LEN) ) + if ( !strnicmp(policy->governor->name, "userspace", CPUFREQ_NAME_LEN) ) ret = write_userspace_scaling_setspeed(op->cpuid, freq); else ret = -EINVAL; @@ -363,8 +363,7 @@ static int set_cpufreq_para(struct xen_sysctl_pm_op *op) { unsigned int sampling_rate = op->u.set_para.ctrl_value; - if ( !strnicmp(policy->governor->name, - "ondemand", CPUFREQ_NAME_LEN) ) + if ( !strnicmp(policy->governor->name, "ondemand", CPUFREQ_NAME_LEN) ) ret = write_ondemand_sampling_rate(sampling_rate); else ret = -EINVAL; @@ -376,8 +375,7 @@ static int set_cpufreq_para(struct xen_sysctl_pm_op *op) { unsigned int up_threshold = op->u.set_para.ctrl_value; - if ( !strnicmp(policy->governor->name, - "ondemand", CPUFREQ_NAME_LEN) ) + if ( !strnicmp(policy->governor->name, "ondemand", CPUFREQ_NAME_LEN) ) ret = write_ondemand_up_threshold(up_threshold); else ret = -EINVAL; @@ -402,7 +400,7 @@ int do_pm_op(struct xen_sysctl_pm_op *op) return -EINVAL; pmpt = processor_pminfo[op->cpuid]; - switch ( op->cmd & PM_PARA_CATEGORY_MASK ) + switch (op->cmd & PM_PARA_CATEGORY_MASK) { case CPUFREQ_PARA: if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_PX) ) @@ -412,7 +410,7 @@ int do_pm_op(struct xen_sysctl_pm_op *op) break; } - switch ( op->cmd ) + switch (op->cmd) { case GET_CPUFREQ_PARA: { @@ -504,7 +502,8 @@ int acpi_set_pdc_bits(u32 acpi_id, XEN_GUEST_HANDLE_PARAM(uint32) pdc) if ( xen_processor_pmbits & XEN_PROCESSOR_PM_TX ) mask |= ACPI_PDC_T_MASK | ACPI_PDC_SMP_C1PT; bits[2] &= (ACPI_PDC_C_MASK | ACPI_PDC_P_MASK | ACPI_PDC_T_MASK | - ACPI_PDC_SMP_C1PT) & ~mask; + ACPI_PDC_SMP_C1PT) & + ~mask; ret = arch_acpi_set_pdc_bits(acpi_id, bits, mask); } if ( !ret && __copy_to_guest_offset(pdc, 2, bits + 2, 1) ) diff --git a/xen/drivers/acpi/reboot.c b/xen/drivers/acpi/reboot.c index 72d06fd8e5..97349348cb 100644 --- a/xen/drivers/acpi/reboot.c +++ b/xen/drivers/acpi/reboot.c @@ -4,35 +4,33 @@ void acpi_reboot(void) { - struct acpi_generic_address *rr; - u8 reset_value; + struct acpi_generic_address *rr; + u8 reset_value; - rr = &acpi_gbl_FADT.reset_register; + rr = &acpi_gbl_FADT.reset_register; - /* Is the reset register supported? The spec says we should be - * checking the bit width and bit offset, but Windows ignores - * these fields */ - if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)) - return; + /* Is the reset register supported? The spec says we should be + * checking the bit width and bit offset, but Windows ignores + * these fields */ + if ( !(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) ) + return; - reset_value = acpi_gbl_FADT.reset_value; + reset_value = acpi_gbl_FADT.reset_value; - /* The reset register can only exist in I/O, Memory or PCI config space - * on a device on bus 0. */ - switch (rr->space_id) { - case ACPI_ADR_SPACE_PCI_CONFIG: - printk("Resetting with ACPI PCI RESET_REG.\n"); - /* Write the value that resets us. */ - pci_conf_write8(0, 0, - (rr->address >> 32) & 31, - (rr->address >> 16) & 7, - (rr->address & 255), - reset_value); - break; - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - case ACPI_ADR_SPACE_SYSTEM_IO: - printk("Resetting with ACPI MEMORY or I/O RESET_REG.\n"); - acpi_hw_low_level_write(8, reset_value, rr); - break; - } + /* The reset register can only exist in I/O, Memory or PCI config space + * on a device on bus 0. */ + switch (rr->space_id) + { + case ACPI_ADR_SPACE_PCI_CONFIG: + printk("Resetting with ACPI PCI RESET_REG.\n"); + /* Write the value that resets us. */ + pci_conf_write8(0, 0, (rr->address >> 32) & 31, (rr->address >> 16) & 7, + (rr->address & 255), reset_value); + break; + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + case ACPI_ADR_SPACE_SYSTEM_IO: + printk("Resetting with ACPI MEMORY or I/O RESET_REG.\n"); + acpi_hw_low_level_write(8, reset_value, rr); + break; + } } diff --git a/xen/drivers/acpi/tables.c b/xen/drivers/acpi/tables.c index b890b73901..c3ad28b7ea 100644 --- a/xen/drivers/acpi/tables.c +++ b/xen/drivers/acpi/tables.c @@ -31,353 +31,347 @@ #include #include -#define PREFIX "ACPI: " +#define PREFIX "ACPI: " -#define ACPI_MAX_TABLES 128 +#define ACPI_MAX_TABLES 128 -static const char *__initdata -mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" }; -static const char *__initdata -mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" }; +static const char *__initdata mps_inti_flags_polarity[] = {"dfl", "high", "res", + "low"}; +static const char *__initdata mps_inti_flags_trigger[] = {"dfl", "edge", "res", + "level"}; static int acpi_apic_instance __initdata; void __init acpi_table_print_madt_entry(struct acpi_subtable_header *header) { - if (!header) - return; - - switch (header->type) { - - case ACPI_MADT_TYPE_LOCAL_APIC: - { - struct acpi_madt_local_apic *p = - (struct acpi_madt_local_apic *)header; - printk(KERN_INFO PREFIX - "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n", - p->processor_id, p->id, - (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); - } - break; - - case ACPI_MADT_TYPE_LOCAL_X2APIC: - { - struct acpi_madt_local_x2apic *p = - (struct acpi_madt_local_x2apic *)header; - printk(KERN_INFO PREFIX - "X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n", - p->local_apic_id, p->uid, - (p->lapic_flags & ACPI_MADT_ENABLED) ? - "enabled" : "disabled"); - } - break; - - case ACPI_MADT_TYPE_IO_APIC: - { - struct acpi_madt_io_apic *p = - (struct acpi_madt_io_apic *)header; - printk(KERN_INFO PREFIX - "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n", - p->id, p->address, p->global_irq_base); - } - break; - - case ACPI_MADT_TYPE_INTERRUPT_OVERRIDE: - { - struct acpi_madt_interrupt_override *p = - (struct acpi_madt_interrupt_override *)header; - printk(KERN_INFO PREFIX - "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n", - p->bus, p->source_irq, p->global_irq, - mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK], - mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]); - if (p->inti_flags & - ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK)) - printk(KERN_INFO PREFIX - "INT_SRC_OVR unexpected reserved flags: %#x\n", - p->inti_flags & - ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK)); - - } - break; - - case ACPI_MADT_TYPE_NMI_SOURCE: - { - struct acpi_madt_nmi_source *p = - (struct acpi_madt_nmi_source *)header; - printk(KERN_INFO PREFIX - "NMI_SRC (%s %s global_irq %d)\n", - mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK], - mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2], - p->global_irq); - } - break; - - case ACPI_MADT_TYPE_LOCAL_APIC_NMI: - { - struct acpi_madt_local_apic_nmi *p = - (struct acpi_madt_local_apic_nmi *)header; - printk(KERN_INFO PREFIX - "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[%#x])\n", - p->processor_id, - mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK ], - mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2], - p->lint); - } - break; - - case ACPI_MADT_TYPE_LOCAL_X2APIC_NMI: - { - u16 polarity, trigger; - struct acpi_madt_local_x2apic_nmi *p = - (struct acpi_madt_local_x2apic_nmi *)header; - - polarity = p->inti_flags & ACPI_MADT_POLARITY_MASK; - trigger = (p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2; - - printk(KERN_INFO PREFIX - "X2APIC_NMI (uid[0x%02x] %s %s lint[%#x])\n", - p->uid, - mps_inti_flags_polarity[polarity], - mps_inti_flags_trigger[trigger], - p->lint); - } - break; - - case ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE: - { - struct acpi_madt_local_apic_override *p = - (struct acpi_madt_local_apic_override *)header; - printk(KERN_INFO PREFIX - "LAPIC_ADDR_OVR (address[%p])\n", - (void *)(unsigned long)p->address); - } - break; - - case ACPI_MADT_TYPE_IO_SAPIC: - { - struct acpi_madt_io_sapic *p = - (struct acpi_madt_io_sapic *)header; - printk(KERN_INFO PREFIX - "IOSAPIC (id[%#x] address[%p] gsi_base[%d])\n", - p->id, (void *)(unsigned long)p->address, - p->global_irq_base); - } - break; - - case ACPI_MADT_TYPE_LOCAL_SAPIC: - { - struct acpi_madt_local_sapic *p = - (struct acpi_madt_local_sapic *)header; - printk(KERN_INFO PREFIX - "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n", - p->processor_id, p->id, p->eid, - (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); - } - break; - - case ACPI_MADT_TYPE_INTERRUPT_SOURCE: - { - struct acpi_madt_interrupt_source *p = - (struct acpi_madt_interrupt_source *)header; - printk(KERN_INFO PREFIX - "PLAT_INT_SRC (%s %s type[%#x] id[0x%04x] eid[%#x] iosapic_vector[%#x] global_irq[%#x]\n", - mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK], - mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2], - p->type, p->id, p->eid, p->io_sapic_vector, - p->global_irq); - } - break; - - case ACPI_MADT_TYPE_GENERIC_INTERRUPT: - { - struct acpi_madt_generic_interrupt *p = - container_of(header, struct acpi_madt_generic_interrupt, header); - - printk(KERN_DEBUG PREFIX - "GICC (acpi_id[0x%04x] address[0x%"PRIx64"] MPIDR[0x%"PRIx64"] %s)\n", - p->uid, p->base_address, - p->arm_mpidr, - (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); - - } - break; - - case ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR: - { - struct acpi_madt_generic_distributor *p = - container_of(header, struct acpi_madt_generic_distributor, header); - - printk(KERN_DEBUG PREFIX - "GIC Distributor (gic_id[0x%04x] address[0x%"PRIx64"] gsi_base[%d])\n", - p->gic_id, p->base_address, - p->global_irq_base); - } - break; - - default: - printk(KERN_WARNING PREFIX - "Found unsupported MADT entry (type = %#x)\n", - header->type); - break; - } + if ( !header ) + return; + + switch (header->type) + { + case ACPI_MADT_TYPE_LOCAL_APIC: + { + struct acpi_madt_local_apic *p = (struct acpi_madt_local_apic *)header; + printk(KERN_INFO PREFIX "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n", + p->processor_id, p->id, + (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); + } + break; + + case ACPI_MADT_TYPE_LOCAL_X2APIC: + { + struct acpi_madt_local_x2apic *p = + (struct acpi_madt_local_x2apic *)header; + printk(KERN_INFO PREFIX "X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n", + p->local_apic_id, p->uid, + (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); + } + break; + + case ACPI_MADT_TYPE_IO_APIC: + { + struct acpi_madt_io_apic *p = (struct acpi_madt_io_apic *)header; + printk(KERN_INFO PREFIX + "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n", + p->id, p->address, p->global_irq_base); + } + break; + + case ACPI_MADT_TYPE_INTERRUPT_OVERRIDE: + { + struct acpi_madt_interrupt_override *p = + (struct acpi_madt_interrupt_override *)header; + printk( + KERN_INFO PREFIX + "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n", + p->bus, p->source_irq, p->global_irq, + mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK], + mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> + 2]); + if ( p->inti_flags & + ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK) ) + printk(KERN_INFO PREFIX + "INT_SRC_OVR unexpected reserved flags: %#x\n", + p->inti_flags & + ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK)); + } + break; + + case ACPI_MADT_TYPE_NMI_SOURCE: + { + struct acpi_madt_nmi_source *p = (struct acpi_madt_nmi_source *)header; + printk( + KERN_INFO PREFIX "NMI_SRC (%s %s global_irq %d)\n", + mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK], + mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> + 2], + p->global_irq); + } + break; + + case ACPI_MADT_TYPE_LOCAL_APIC_NMI: + { + struct acpi_madt_local_apic_nmi *p = + (struct acpi_madt_local_apic_nmi *)header; + printk( + KERN_INFO PREFIX "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[%#x])\n", + p->processor_id, + mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK], + mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> + 2], + p->lint); + } + break; + + case ACPI_MADT_TYPE_LOCAL_X2APIC_NMI: + { + u16 polarity, trigger; + struct acpi_madt_local_x2apic_nmi *p = + (struct acpi_madt_local_x2apic_nmi *)header; + + polarity = p->inti_flags & ACPI_MADT_POLARITY_MASK; + trigger = (p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2; + + printk(KERN_INFO PREFIX "X2APIC_NMI (uid[0x%02x] %s %s lint[%#x])\n", + p->uid, mps_inti_flags_polarity[polarity], + mps_inti_flags_trigger[trigger], p->lint); + } + break; + + case ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE: + { + struct acpi_madt_local_apic_override *p = + (struct acpi_madt_local_apic_override *)header; + printk(KERN_INFO PREFIX "LAPIC_ADDR_OVR (address[%p])\n", + (void *)(unsigned long)p->address); + } + break; + + case ACPI_MADT_TYPE_IO_SAPIC: + { + struct acpi_madt_io_sapic *p = (struct acpi_madt_io_sapic *)header; + printk(KERN_INFO PREFIX "IOSAPIC (id[%#x] address[%p] gsi_base[%d])\n", + p->id, (void *)(unsigned long)p->address, p->global_irq_base); + } + break; + + case ACPI_MADT_TYPE_LOCAL_SAPIC: + { + struct acpi_madt_local_sapic *p = + (struct acpi_madt_local_sapic *)header; + printk(KERN_INFO PREFIX "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] " + "lsapic_eid[0x%02x] %s)\n", + p->processor_id, p->id, p->eid, + (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); + } + break; + + case ACPI_MADT_TYPE_INTERRUPT_SOURCE: + { + struct acpi_madt_interrupt_source *p = + (struct acpi_madt_interrupt_source *)header; + printk( + KERN_INFO PREFIX "PLAT_INT_SRC (%s %s type[%#x] id[0x%04x] " + "eid[%#x] iosapic_vector[%#x] global_irq[%#x]\n", + mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK], + mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> + 2], + p->type, p->id, p->eid, p->io_sapic_vector, p->global_irq); + } + break; + + case ACPI_MADT_TYPE_GENERIC_INTERRUPT: + { + struct acpi_madt_generic_interrupt *p = + container_of(header, struct acpi_madt_generic_interrupt, header); + + printk(KERN_DEBUG PREFIX "GICC (acpi_id[0x%04x] address[0x%" PRIx64 + "] MPIDR[0x%" PRIx64 "] %s)\n", + p->uid, p->base_address, p->arm_mpidr, + (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); + } + break; + + case ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR: + { + struct acpi_madt_generic_distributor *p = + container_of(header, struct acpi_madt_generic_distributor, header); + + printk(KERN_DEBUG PREFIX + "GIC Distributor (gic_id[0x%04x] address[0x%" PRIx64 + "] gsi_base[%d])\n", + p->gic_id, p->base_address, p->global_irq_base); + } + break; + + default: + printk(KERN_WARNING PREFIX + "Found unsupported MADT entry (type = %#x)\n", + header->type); + break; + } } -static struct acpi_subtable_header * __init +static struct acpi_subtable_header *__init acpi_get_entry(const char *id, unsigned long table_size, - const struct acpi_table_header *table_header, - enum acpi_madt_type entry_id, unsigned int entry_index) + const struct acpi_table_header *table_header, + enum acpi_madt_type entry_id, unsigned int entry_index) { - struct acpi_subtable_header *entry; - int count = 0; - unsigned long table_end; - - if (!table_size) - return NULL; - - if (!table_header) { - printk(KERN_WARNING PREFIX "%4.4s not present\n", id); - return NULL; - } - - table_end = (unsigned long)table_header + table_header->length; - - /* Parse all entries looking for a match. */ - entry = (void *)table_header + table_size; - - while ((unsigned long)(entry + 1) < table_end) { - if (entry->length < sizeof(*entry)) { - printk(KERN_ERR PREFIX "[%4.4s:%#x] Invalid length\n", - id, entry_id); - return NULL; - } - - if (entry->type == entry_id) { - if (count == entry_index) - return entry; - count++; - } - - entry = (void *)entry + entry->length; - } - - return NULL; + struct acpi_subtable_header *entry; + int count = 0; + unsigned long table_end; + + if ( !table_size ) + return NULL; + + if ( !table_header ) + { + printk(KERN_WARNING PREFIX "%4.4s not present\n", id); + return NULL; + } + + table_end = (unsigned long)table_header + table_header->length; + + /* Parse all entries looking for a match. */ + entry = (void *)table_header + table_size; + + while ( (unsigned long)(entry + 1) < table_end ) + { + if ( entry->length < sizeof(*entry) ) + { + printk(KERN_ERR PREFIX "[%4.4s:%#x] Invalid length\n", id, + entry_id); + return NULL; + } + + if ( entry->type == entry_id ) + { + if ( count == entry_index ) + return entry; + count++; + } + + entry = (void *)entry + entry->length; + } + + return NULL; } -struct acpi_subtable_header * __init -acpi_table_get_entry_madt(enum acpi_madt_type entry_id, - unsigned int entry_index) +struct acpi_subtable_header *__init acpi_table_get_entry_madt( + enum acpi_madt_type entry_id, unsigned int entry_index) { - struct acpi_table_header *table_header; - acpi_status status; - - status = acpi_get_table(ACPI_SIG_MADT, acpi_apic_instance, - &table_header); - if (ACPI_FAILURE(status)) { - printk(KERN_WARNING PREFIX "%4.4s not present\n", - ACPI_SIG_MADT); - return NULL; - } - - return acpi_get_entry(ACPI_SIG_MADT, sizeof(struct acpi_table_madt), - table_header, entry_id, entry_index); + struct acpi_table_header *table_header; + acpi_status status; + + status = acpi_get_table(ACPI_SIG_MADT, acpi_apic_instance, &table_header); + if ( ACPI_FAILURE(status) ) + { + printk(KERN_WARNING PREFIX "%4.4s not present\n", ACPI_SIG_MADT); + return NULL; + } + + return acpi_get_entry(ACPI_SIG_MADT, sizeof(struct acpi_table_madt), + table_header, entry_id, entry_index); } -int __init -acpi_parse_entries(char *id, unsigned long table_size, - acpi_table_entry_handler handler, - struct acpi_table_header *table_header, - int entry_id, unsigned int max_entries) +int __init acpi_parse_entries(char *id, unsigned long table_size, + acpi_table_entry_handler handler, + struct acpi_table_header *table_header, + int entry_id, unsigned int max_entries) { - struct acpi_subtable_header *entry; - int count = 0; - unsigned long table_end; - - if (acpi_disabled) - return -ENODEV; - - if (!id || !handler) - return -EINVAL; - - if (!table_size) - return -EINVAL; - - if (!table_header) { - printk(KERN_WARNING PREFIX "%4.4s not present\n", id); - return -ENODEV; - } - - table_end = (unsigned long)table_header + table_header->length; - - /* Parse all entries looking for a match. */ - - entry = (struct acpi_subtable_header *) - ((unsigned long)table_header + table_size); - - while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) < - table_end) { - if (entry->length < sizeof(*entry)) { - printk(KERN_ERR PREFIX "[%4.4s:%#x] Invalid length\n", - id, entry_id); - return -ENODATA; - } - - if (entry->type == entry_id - && (!max_entries || count < max_entries)) { - if (handler(entry, table_end)) - return -EINVAL; - - count++; - } - - entry = (struct acpi_subtable_header *) - ((unsigned long)entry + entry->length); - } - - if (max_entries && count > max_entries) { - printk(KERN_WARNING PREFIX "[%4.4s:%#x] ignored %i entries of " - "%i found\n", id, entry_id, count - max_entries, count); - } - - return count; + struct acpi_subtable_header *entry; + int count = 0; + unsigned long table_end; + + if ( acpi_disabled ) + return -ENODEV; + + if ( !id || !handler ) + return -EINVAL; + + if ( !table_size ) + return -EINVAL; + + if ( !table_header ) + { + printk(KERN_WARNING PREFIX "%4.4s not present\n", id); + return -ENODEV; + } + + table_end = (unsigned long)table_header + table_header->length; + + /* Parse all entries looking for a match. */ + + entry = (struct acpi_subtable_header *)((unsigned long)table_header + + table_size); + + while ( ((unsigned long)entry) + sizeof(struct acpi_subtable_header) < + table_end ) + { + if ( entry->length < sizeof(*entry) ) + { + printk(KERN_ERR PREFIX "[%4.4s:%#x] Invalid length\n", id, + entry_id); + return -ENODATA; + } + + if ( entry->type == entry_id && (!max_entries || count < max_entries) ) + { + if ( handler(entry, table_end) ) + return -EINVAL; + + count++; + } + + entry = (struct acpi_subtable_header *)((unsigned long)entry + + entry->length); + } + + if ( max_entries && count > max_entries ) + { + printk(KERN_WARNING PREFIX "[%4.4s:%#x] ignored %i entries of " + "%i found\n", + id, entry_id, count - max_entries, count); + } + + return count; } -int __init -acpi_table_parse_entries(char *id, - unsigned long table_size, - int entry_id, - acpi_table_entry_handler handler, - unsigned int max_entries) +int __init acpi_table_parse_entries(char *id, unsigned long table_size, + int entry_id, + acpi_table_entry_handler handler, + unsigned int max_entries) { - struct acpi_table_header *table_header = NULL; - u32 instance = 0; + struct acpi_table_header *table_header = NULL; + u32 instance = 0; - if (acpi_disabled) - return -ENODEV; + if ( acpi_disabled ) + return -ENODEV; - if (!id || !handler) - return -EINVAL; + if ( !id || !handler ) + return -EINVAL; - if (!strncmp(id, ACPI_SIG_MADT, 4)) - instance = acpi_apic_instance; + if ( !strncmp(id, ACPI_SIG_MADT, 4) ) + instance = acpi_apic_instance; - acpi_get_table(id, instance, &table_header); - if (!table_header) { - printk(KERN_WARNING PREFIX "%4.4s not present\n", id); - return -ENODEV; - } + acpi_get_table(id, instance, &table_header); + if ( !table_header ) + { + printk(KERN_WARNING PREFIX "%4.4s not present\n", id); + return -ENODEV; + } - return acpi_parse_entries(id, table_size, handler, table_header, - entry_id, max_entries); + return acpi_parse_entries(id, table_size, handler, table_header, entry_id, + max_entries); } -int __init -acpi_table_parse_madt(enum acpi_madt_type id, - acpi_table_entry_handler handler, unsigned int max_entries) +int __init acpi_table_parse_madt(enum acpi_madt_type id, + acpi_table_entry_handler handler, + unsigned int max_entries) { - return acpi_table_parse_entries(ACPI_SIG_MADT, - sizeof(struct acpi_table_madt), id, - handler, max_entries); + return acpi_table_parse_entries(ACPI_SIG_MADT, + sizeof(struct acpi_table_madt), id, handler, + max_entries); } /** @@ -391,48 +385,50 @@ acpi_table_parse_madt(enum acpi_madt_type id, */ int __init acpi_table_parse(char *id, acpi_table_handler handler) { - struct acpi_table_header *table = NULL; + struct acpi_table_header *table = NULL; - if (acpi_disabled) - return -ENODEV; + if ( acpi_disabled ) + return -ENODEV; - if (!handler) - return -EINVAL; + if ( !handler ) + return -EINVAL; - if (strncmp(id, ACPI_SIG_MADT, 4) == 0) - acpi_get_table(id, acpi_apic_instance, &table); - else - acpi_get_table(id, 0, &table); + if ( strncmp(id, ACPI_SIG_MADT, 4) == 0 ) + acpi_get_table(id, acpi_apic_instance, &table); + else + acpi_get_table(id, 0, &table); - if (table) { - return handler(table); - } else - return -ENODEV; + if ( table ) + { + return handler(table); + } + else + return -ENODEV; } -/* +/* * The BIOS is supposed to supply a single APIC/MADT, * but some report two. Provide a knob to use either. * (don't you wish instance 0 and 1 were not the same?) */ static void __init check_multiple_madt(void) { - struct acpi_table_header *table = NULL; - - acpi_get_table(ACPI_SIG_MADT, 2, &table); - if (table) { - printk(KERN_WARNING PREFIX - "BIOS bug: multiple APIC/MADT found," - " using %d\n", acpi_apic_instance); - printk(KERN_WARNING PREFIX - "If \"acpi_apic_instance=%d\" works better, " - "notify linux-acpi@vger.kernel.org\n", - acpi_apic_instance ? 0 : 2); - - } else - acpi_apic_instance = 0; - - return; + struct acpi_table_header *table = NULL; + + acpi_get_table(ACPI_SIG_MADT, 2, &table); + if ( table ) + { + printk(KERN_WARNING PREFIX "BIOS bug: multiple APIC/MADT found," + " using %d\n", + acpi_apic_instance); + printk(KERN_WARNING PREFIX "If \"acpi_apic_instance=%d\" works better, " + "notify linux-acpi@vger.kernel.org\n", + acpi_apic_instance ? 0 : 2); + } + else + acpi_apic_instance = 0; + + return; } /* @@ -446,25 +442,25 @@ static void __init check_multiple_madt(void) int __init acpi_table_init(void) { - acpi_status status; + acpi_status status; - status = acpi_initialize_tables(NULL, ACPI_MAX_TABLES, 0); - if (ACPI_FAILURE(status)) - return -EINVAL; + status = acpi_initialize_tables(NULL, ACPI_MAX_TABLES, 0); + if ( ACPI_FAILURE(status) ) + return -EINVAL; - check_multiple_madt(); - return 0; + check_multiple_madt(); + return 0; } static int __init acpi_parse_apic_instance(const char *str) { - const char *q; + const char *q; - acpi_apic_instance = simple_strtoul(str, &q, 0); + acpi_apic_instance = simple_strtoul(str, &q, 0); - printk(KERN_NOTICE PREFIX "Shall use APIC/MADT table %d\n", - acpi_apic_instance); + printk(KERN_NOTICE PREFIX "Shall use APIC/MADT table %d\n", + acpi_apic_instance); - return *q ? -EINVAL : 0; + return *q ? -EINVAL : 0; } custom_param("acpi_apic_instance", acpi_parse_apic_instance); diff --git a/xen/drivers/acpi/tables/tbfadt.c b/xen/drivers/acpi/tables/tbfadt.c index f11fd5a900..3f1350124a 100644 --- a/xen/drivers/acpi/tables/tbfadt.c +++ b/xen/drivers/acpi/tables/tbfadt.c @@ -45,13 +45,12 @@ #include #include -#define _COMPONENT ACPI_TABLES +#define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbfadt") /* Local prototypes */ -static void inline -acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, - u8 bit_width, u64 address); +static void inline acpi_tb_init_generic_address( + struct acpi_generic_address *generic_address, u8 bit_width, u64 address); static void acpi_tb_convert_fadt(void); @@ -59,55 +58,53 @@ static void acpi_tb_validate_fadt(void); /* Table for conversion of FADT to common internal format and FADT validation */ -typedef struct acpi_fadt_info { - char *name; - u16 target; - u16 source; - u16 length; - u8 type; +typedef struct acpi_fadt_info +{ + char *name; + u16 target; + u16 source; + u16 length; + u8 type; } acpi_fadt_info; -#define ACPI_FADT_OPTIONAL 0 -#define ACPI_FADT_REQUIRED 1 -#define ACPI_FADT_SEPARATE_LENGTH 2 +#define ACPI_FADT_OPTIONAL 0 +#define ACPI_FADT_REQUIRED 1 +#define ACPI_FADT_SEPARATE_LENGTH 2 static struct acpi_fadt_info __initdata fadt_info_table[] = { - {"Pm1aEventBlock", ACPI_FADT_OFFSET(xpm1a_event_block), - ACPI_FADT_OFFSET(pm1a_event_block), - ACPI_FADT_OFFSET(pm1_event_length), ACPI_FADT_REQUIRED}, + {"Pm1aEventBlock", ACPI_FADT_OFFSET(xpm1a_event_block), + ACPI_FADT_OFFSET(pm1a_event_block), ACPI_FADT_OFFSET(pm1_event_length), + ACPI_FADT_REQUIRED}, - {"Pm1bEventBlock", ACPI_FADT_OFFSET(xpm1b_event_block), - ACPI_FADT_OFFSET(pm1b_event_block), - ACPI_FADT_OFFSET(pm1_event_length), ACPI_FADT_OPTIONAL}, + {"Pm1bEventBlock", ACPI_FADT_OFFSET(xpm1b_event_block), + ACPI_FADT_OFFSET(pm1b_event_block), ACPI_FADT_OFFSET(pm1_event_length), + ACPI_FADT_OPTIONAL}, - {"Pm1aControlBlock", ACPI_FADT_OFFSET(xpm1a_control_block), - ACPI_FADT_OFFSET(pm1a_control_block), - ACPI_FADT_OFFSET(pm1_control_length), ACPI_FADT_REQUIRED}, + {"Pm1aControlBlock", ACPI_FADT_OFFSET(xpm1a_control_block), + ACPI_FADT_OFFSET(pm1a_control_block), ACPI_FADT_OFFSET(pm1_control_length), + ACPI_FADT_REQUIRED}, - {"Pm1bControlBlock", ACPI_FADT_OFFSET(xpm1b_control_block), - ACPI_FADT_OFFSET(pm1b_control_block), - ACPI_FADT_OFFSET(pm1_control_length), ACPI_FADT_OPTIONAL}, + {"Pm1bControlBlock", ACPI_FADT_OFFSET(xpm1b_control_block), + ACPI_FADT_OFFSET(pm1b_control_block), ACPI_FADT_OFFSET(pm1_control_length), + ACPI_FADT_OPTIONAL}, - {"Pm2ControlBlock", ACPI_FADT_OFFSET(xpm2_control_block), - ACPI_FADT_OFFSET(pm2_control_block), - ACPI_FADT_OFFSET(pm2_control_length), ACPI_FADT_SEPARATE_LENGTH}, + {"Pm2ControlBlock", ACPI_FADT_OFFSET(xpm2_control_block), + ACPI_FADT_OFFSET(pm2_control_block), ACPI_FADT_OFFSET(pm2_control_length), + ACPI_FADT_SEPARATE_LENGTH}, - {"PmTimerBlock", ACPI_FADT_OFFSET(xpm_timer_block), - ACPI_FADT_OFFSET(pm_timer_block), - ACPI_FADT_OFFSET(pm_timer_length), - ACPI_FADT_SEPARATE_LENGTH}, /* ACPI 5.0A: Timer is optional */ + {"PmTimerBlock", ACPI_FADT_OFFSET(xpm_timer_block), + ACPI_FADT_OFFSET(pm_timer_block), ACPI_FADT_OFFSET(pm_timer_length), + ACPI_FADT_SEPARATE_LENGTH}, /* ACPI 5.0A: Timer is optional */ - {"Gpe0Block", ACPI_FADT_OFFSET(xgpe0_block), - ACPI_FADT_OFFSET(gpe0_block), - ACPI_FADT_OFFSET(gpe0_block_length), ACPI_FADT_SEPARATE_LENGTH}, + {"Gpe0Block", ACPI_FADT_OFFSET(xgpe0_block), ACPI_FADT_OFFSET(gpe0_block), + ACPI_FADT_OFFSET(gpe0_block_length), ACPI_FADT_SEPARATE_LENGTH}, - {"Gpe1Block", ACPI_FADT_OFFSET(xgpe1_block), - ACPI_FADT_OFFSET(gpe1_block), - ACPI_FADT_OFFSET(gpe1_block_length), ACPI_FADT_SEPARATE_LENGTH} -}; + {"Gpe1Block", ACPI_FADT_OFFSET(xgpe1_block), ACPI_FADT_OFFSET(gpe1_block), + ACPI_FADT_OFFSET(gpe1_block_length), ACPI_FADT_SEPARATE_LENGTH}}; -#define ACPI_FADT_INFO_ENTRIES (sizeof (fadt_info_table) / sizeof (struct acpi_fadt_info)) +#define ACPI_FADT_INFO_ENTRIES \ + (sizeof(fadt_info_table) / sizeof(struct acpi_fadt_info)) /******************************************************************************* * @@ -125,23 +122,21 @@ static struct acpi_fadt_info __initdata fadt_info_table[] = { * ******************************************************************************/ -static void inline -acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, - u8 bit_width, u64 address) +static void inline acpi_tb_init_generic_address( + struct acpi_generic_address *generic_address, u8 bit_width, u64 address) { - - /* - * The 64-bit Address field is non-aligned in the byte packed - * GAS struct. - */ - ACPI_MOVE_64_TO_64(&generic_address->address, &address); - - /* All other fields are byte-wide */ - - generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO; - generic_address->bit_width = bit_width; - generic_address->bit_offset = 0; - generic_address->access_width = 0; + /* + * The 64-bit Address field is non-aligned in the byte packed + * GAS struct. + */ + ACPI_MOVE_64_TO_64(&generic_address->address, &address); + + /* All other fields are byte-wide */ + + generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO; + generic_address->bit_width = bit_width; + generic_address->bit_offset = 0; + generic_address->access_width = 0; } /******************************************************************************* @@ -160,51 +155,51 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, void __init acpi_tb_parse_fadt(acpi_native_uint table_index, u8 flags) { - u32 length; - struct acpi_table_header *table; + u32 length; + struct acpi_table_header *table; - /* - * The FADT has multiple versions with different lengths, - * and it contains pointers to both the DSDT and FACS tables. - * - * Get a local copy of the FADT and convert it to a common format - * Map entire FADT, assumed to be smaller than one page. - */ - length = acpi_gbl_root_table_list.tables[table_index].length; + /* + * The FADT has multiple versions with different lengths, + * and it contains pointers to both the DSDT and FACS tables. + * + * Get a local copy of the FADT and convert it to a common format + * Map entire FADT, assumed to be smaller than one page. + */ + length = acpi_gbl_root_table_list.tables[table_index].length; - table = - acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index]. - address, length); - if (!table) { - return; - } + table = acpi_os_map_memory( + acpi_gbl_root_table_list.tables[table_index].address, length); + if ( !table ) + { + return; + } - /* - * Validate the FADT checksum before we copy the table. Ignore - * checksum error as we want to try to get the DSDT and FACS. - */ - (void)acpi_tb_verify_checksum(table, length); + /* + * Validate the FADT checksum before we copy the table. Ignore + * checksum error as we want to try to get the DSDT and FACS. + */ + (void)acpi_tb_verify_checksum(table, length); - /* Obtain a local copy of the FADT in common ACPI 2.0+ format */ + /* Obtain a local copy of the FADT in common ACPI 2.0+ format */ - acpi_tb_create_local_fadt(table, length); + acpi_tb_create_local_fadt(table, length); - /* All done with the real FADT, unmap it */ + /* All done with the real FADT, unmap it */ - acpi_os_unmap_memory(table, length); + acpi_os_unmap_memory(table, length); - /* Obtain the DSDT and FACS tables via their addresses within the FADT */ + /* Obtain the DSDT and FACS tables via their addresses within the FADT */ - acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt, - flags, ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT); + acpi_tb_install_table((acpi_physical_address)acpi_gbl_FADT.Xdsdt, flags, + ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT); - /* If Hardware Reduced flag is set, there is no FACS */ + /* If Hardware Reduced flag is set, there is no FACS */ - if (!acpi_gbl_reduced_hardware) { - acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT. - Xfacs, flags, ACPI_SIG_FACS, - ACPI_TABLE_INDEX_FACS); - } + if ( !acpi_gbl_reduced_hardware ) + { + acpi_tb_install_table((acpi_physical_address)acpi_gbl_FADT.Xfacs, flags, + ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS); + } } /******************************************************************************* @@ -223,44 +218,46 @@ void __init acpi_tb_parse_fadt(acpi_native_uint table_index, u8 flags) * ******************************************************************************/ -void __init acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length) +void __init acpi_tb_create_local_fadt(struct acpi_table_header *table, + u32 length) { - - /* - * Check if the FADT is larger than the largest table that we expect - * (the ACPI 5.0 version). If so, truncate the table, and issue - * a warning. - */ - if (length > sizeof(struct acpi_table_fadt)) { - ACPI_WARNING((AE_INFO, - "FADT (revision %u) is longer than ACPI 5.0 version," - " truncating length %u to %zu", - table->revision, (unsigned)length, - sizeof(struct acpi_table_fadt))); - } - - /* Clear the entire local FADT */ - - ACPI_MEMSET(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt)); - - /* Copy the original FADT, up to sizeof (struct acpi_table_fadt) */ - - ACPI_MEMCPY(&acpi_gbl_FADT, table, - ACPI_MIN(length, sizeof(struct acpi_table_fadt))); - - /* Take a copy of the Hardware Reduced flag */ - - acpi_gbl_reduced_hardware = FALSE; - if (acpi_gbl_FADT.flags & ACPI_FADT_HW_REDUCED) { - acpi_gbl_reduced_hardware = TRUE; - } - - /* - * 1) Convert the local copy of the FADT to the common internal format - * 2) Validate some of the important values within the FADT - */ - acpi_tb_convert_fadt(); - acpi_tb_validate_fadt(); + /* + * Check if the FADT is larger than the largest table that we expect + * (the ACPI 5.0 version). If so, truncate the table, and issue + * a warning. + */ + if ( length > sizeof(struct acpi_table_fadt) ) + { + ACPI_WARNING((AE_INFO, + "FADT (revision %u) is longer than ACPI 5.0 version," + " truncating length %u to %zu", + table->revision, (unsigned)length, + sizeof(struct acpi_table_fadt))); + } + + /* Clear the entire local FADT */ + + ACPI_MEMSET(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt)); + + /* Copy the original FADT, up to sizeof (struct acpi_table_fadt) */ + + ACPI_MEMCPY(&acpi_gbl_FADT, table, + ACPI_MIN(length, sizeof(struct acpi_table_fadt))); + + /* Take a copy of the Hardware Reduced flag */ + + acpi_gbl_reduced_hardware = FALSE; + if ( acpi_gbl_FADT.flags & ACPI_FADT_HW_REDUCED ) + { + acpi_gbl_reduced_hardware = TRUE; + } + + /* + * 1) Convert the local copy of the FADT to the common internal format + * 2) Validate some of the important values within the FADT + */ + acpi_tb_convert_fadt(); + acpi_tb_validate_fadt(); } /******************************************************************************* @@ -294,97 +291,93 @@ void __init acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 lengt static void __init acpi_tb_convert_fadt(void) { - u8 pm1_register_length; - struct acpi_generic_address *target; - acpi_native_uint i; - - /* Update the local FADT table header length */ - - acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt); - - /* Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary */ - - if (!acpi_gbl_FADT.Xfacs) { - acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs; - } - - if (!acpi_gbl_FADT.Xdsdt) { - acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt; - } - - /* - * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which - * should be zero are indeed zero. This will workaround BIOSs that - * inadvertently place values in these fields. - * - * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at - * offset 45, 55, 95, and the word located at offset 109, 110. - */ - if (acpi_gbl_FADT.header.revision < 3) { - acpi_gbl_FADT.preferred_profile = 0; - acpi_gbl_FADT.pstate_control = 0; - acpi_gbl_FADT.cst_control = 0; - acpi_gbl_FADT.boot_flags = 0; - } - - /* - * Expand the ACPI 1.0 32-bit V1.0 addresses to the ACPI 2.0 64-bit "X" - * generic address structures as necessary. - */ - for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) { - target = - ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT, - fadt_info_table[i].target); - - /* Expand only if the X target is null */ - - if (!target->address) { - acpi_tb_init_generic_address(target, - *ACPI_ADD_PTR(u8, - &acpi_gbl_FADT, - fadt_info_table - [i].length), - (u64) * ACPI_ADD_PTR(u32, - &acpi_gbl_FADT, - fadt_info_table - [i]. - source)); - } - } - - /* - * Calculate separate GAS structs for the PM1 Enable registers. - * These addresses do not appear (directly) in the FADT, so it is - * useful to calculate them once, here. - * - * The PM event blocks are split into two register blocks, first is the - * PM Status Register block, followed immediately by the PM Enable Register - * block. Each is of length (pm1_event_length/2) - */ - pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length); - - /* The PM1A register block is required */ - - acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable, - pm1_register_length, - (acpi_gbl_FADT.xpm1a_event_block.address + - pm1_register_length)); - /* Don't forget to copy space_id of the GAS */ - acpi_gbl_xpm1a_enable.space_id = - acpi_gbl_FADT.xpm1a_event_block.space_id; - - /* The PM1B register block is optional, ignore if not present */ - - if (acpi_gbl_FADT.xpm1b_event_block.address) { - acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable, - pm1_register_length, - (acpi_gbl_FADT.xpm1b_event_block. - address + pm1_register_length)); - /* Don't forget to copy space_id of the GAS */ - acpi_gbl_xpm1b_enable.space_id = - acpi_gbl_FADT.xpm1a_event_block.space_id; - - } + u8 pm1_register_length; + struct acpi_generic_address *target; + acpi_native_uint i; + + /* Update the local FADT table header length */ + + acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt); + + /* Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary */ + + if ( !acpi_gbl_FADT.Xfacs ) + { + acpi_gbl_FADT.Xfacs = (u64)acpi_gbl_FADT.facs; + } + + if ( !acpi_gbl_FADT.Xdsdt ) + { + acpi_gbl_FADT.Xdsdt = (u64)acpi_gbl_FADT.dsdt; + } + + /* + * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which + * should be zero are indeed zero. This will workaround BIOSs that + * inadvertently place values in these fields. + * + * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at + * offset 45, 55, 95, and the word located at offset 109, 110. + */ + if ( acpi_gbl_FADT.header.revision < 3 ) + { + acpi_gbl_FADT.preferred_profile = 0; + acpi_gbl_FADT.pstate_control = 0; + acpi_gbl_FADT.cst_control = 0; + acpi_gbl_FADT.boot_flags = 0; + } + + /* + * Expand the ACPI 1.0 32-bit V1.0 addresses to the ACPI 2.0 64-bit "X" + * generic address structures as necessary. + */ + for ( i = 0; i < ACPI_FADT_INFO_ENTRIES; i++ ) + { + target = ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT, + fadt_info_table[i].target); + + /* Expand only if the X target is null */ + + if ( !target->address ) + { + acpi_tb_init_generic_address( + target, + *ACPI_ADD_PTR(u8, &acpi_gbl_FADT, fadt_info_table[i].length), + (u64)*ACPI_ADD_PTR(u32, &acpi_gbl_FADT, + fadt_info_table[i].source)); + } + } + + /* + * Calculate separate GAS structs for the PM1 Enable registers. + * These addresses do not appear (directly) in the FADT, so it is + * useful to calculate them once, here. + * + * The PM event blocks are split into two register blocks, first is the + * PM Status Register block, followed immediately by the PM Enable Register + * block. Each is of length (pm1_event_length/2) + */ + pm1_register_length = (u8)ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length); + + /* The PM1A register block is required */ + + acpi_tb_init_generic_address( + &acpi_gbl_xpm1a_enable, pm1_register_length, + (acpi_gbl_FADT.xpm1a_event_block.address + pm1_register_length)); + /* Don't forget to copy space_id of the GAS */ + acpi_gbl_xpm1a_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id; + + /* The PM1B register block is optional, ignore if not present */ + + if ( acpi_gbl_FADT.xpm1b_event_block.address ) + { + acpi_tb_init_generic_address( + &acpi_gbl_xpm1b_enable, pm1_register_length, + (acpi_gbl_FADT.xpm1b_event_block.address + pm1_register_length)); + /* Don't forget to copy space_id of the GAS */ + acpi_gbl_xpm1b_enable.space_id = + acpi_gbl_FADT.xpm1a_event_block.space_id; + } } /****************************************************************************** @@ -409,70 +402,75 @@ static void __init acpi_tb_convert_fadt(void) static void __init acpi_tb_validate_fadt(void) { - u32 *address32; - struct acpi_generic_address *address64; - u8 length; - acpi_native_uint i; - - /* If Hardware Reduced flag is set, we are all done */ - - if (acpi_gbl_reduced_hardware) { - return; - } - - /* Examine all of the 64-bit extended address fields (X fields) */ - - for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) { - - /* Generate pointers to the 32-bit and 64-bit addresses and get the length */ - - address64 = - ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT, - fadt_info_table[i].target); - address32 = - ACPI_ADD_PTR(u32, &acpi_gbl_FADT, - fadt_info_table[i].source); - length = - *ACPI_ADD_PTR(u8, &acpi_gbl_FADT, - fadt_info_table[i].length); - - if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) { - /* - * Field is required (Pm1a_event, Pm1a_control). - * Both the address and length must be non-zero. - */ - if (!address64->address || !length) { - ACPI_ERROR((AE_INFO, - "Required field \"%s\" has zero address and/or length: %8.8X%8.8X/%X", - fadt_info_table[i].name, - ACPI_FORMAT_UINT64(address64-> - address), - length)); - } - } else if (fadt_info_table[i].type & ACPI_FADT_SEPARATE_LENGTH) { - /* - * Field is optional (PM2Control, GPE0, GPE1) AND has its own - * length field. If present, both the address and length must be valid. - */ - if ((address64->address && !length) - || (!address64->address && length)) { - ACPI_WARNING((AE_INFO, - "Optional field \"%s\" has zero address or length: %8.8X%8.8X/%X", - fadt_info_table[i].name, - ACPI_FORMAT_UINT64(address64-> - address), - length)); - } - } - - /* If both 32- and 64-bit addresses are valid (non-zero), they must match */ - - if (address64->address && *address32 && - (address64->address != (u64) * address32)) { - ACPI_ERROR((AE_INFO, - "32/64X address mismatch in \"%s\": [%8.8X] [%8.8X%8.8X], using 64X", - fadt_info_table[i].name, *address32, - ACPI_FORMAT_UINT64(address64->address))); - } - } + u32 *address32; + struct acpi_generic_address *address64; + u8 length; + acpi_native_uint i; + + /* If Hardware Reduced flag is set, we are all done */ + + if ( acpi_gbl_reduced_hardware ) + { + return; + } + + /* Examine all of the 64-bit extended address fields (X fields) */ + + for ( i = 0; i < ACPI_FADT_INFO_ENTRIES; i++ ) + { + /* Generate pointers to the 32-bit and 64-bit addresses and get the + * length */ + + address64 = ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT, + fadt_info_table[i].target); + address32 = + ACPI_ADD_PTR(u32, &acpi_gbl_FADT, fadt_info_table[i].source); + length = *ACPI_ADD_PTR(u8, &acpi_gbl_FADT, fadt_info_table[i].length); + + if ( fadt_info_table[i].type & ACPI_FADT_REQUIRED ) + { + /* + * Field is required (Pm1a_event, Pm1a_control). + * Both the address and length must be non-zero. + */ + if ( !address64->address || !length ) + { + ACPI_ERROR((AE_INFO, + "Required field \"%s\" has zero address and/or " + "length: %8.8X%8.8X/%X", + fadt_info_table[i].name, + ACPI_FORMAT_UINT64(address64->address), length)); + } + } + else if ( fadt_info_table[i].type & ACPI_FADT_SEPARATE_LENGTH ) + { + /* + * Field is optional (PM2Control, GPE0, GPE1) AND has its own + * length field. If present, both the address and length must be + * valid. + */ + if ( (address64->address && !length) || + (!address64->address && length) ) + { + ACPI_WARNING((AE_INFO, + "Optional field \"%s\" has zero address or " + "length: %8.8X%8.8X/%X", + fadt_info_table[i].name, + ACPI_FORMAT_UINT64(address64->address), length)); + } + } + + /* If both 32- and 64-bit addresses are valid (non-zero), they must + * match */ + + if ( address64->address && *address32 && + (address64->address != (u64)*address32) ) + { + ACPI_ERROR((AE_INFO, + "32/64X address mismatch in \"%s\": [%8.8X] " + "[%8.8X%8.8X], using 64X", + fadt_info_table[i].name, *address32, + ACPI_FORMAT_UINT64(address64->address))); + } + } } diff --git a/xen/drivers/acpi/tables/tbinstal.c b/xen/drivers/acpi/tables/tbinstal.c index 9ff62921b9..4718360049 100644 --- a/xen/drivers/acpi/tables/tbinstal.c +++ b/xen/drivers/acpi/tables/tbinstal.c @@ -45,7 +45,7 @@ #include #include -#define _COMPONENT ACPI_TABLES +#define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbinstal") /****************************************************************************** @@ -61,36 +61,37 @@ ACPI_MODULE_NAME("tbinstal") *****************************************************************************/ acpi_status __init acpi_tb_verify_table(struct acpi_table_desc *table_desc) { - acpi_status status = AE_OK; + acpi_status status = AE_OK; - ACPI_FUNCTION_TRACE(tb_verify_table); + ACPI_FUNCTION_TRACE(tb_verify_table); - /* Map the table if necessary */ + /* Map the table if necessary */ - if (!table_desc->pointer) { - if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) == - ACPI_TABLE_ORIGIN_MAPPED) { - table_desc->pointer = - acpi_os_map_memory(table_desc->address, - table_desc->length); - } - if (!table_desc->pointer) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - } + if ( !table_desc->pointer ) + { + if ( (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) == + ACPI_TABLE_ORIGIN_MAPPED ) + { + table_desc->pointer = + acpi_os_map_memory(table_desc->address, table_desc->length); + } + if ( !table_desc->pointer ) + { + return_ACPI_STATUS(AE_NO_MEMORY); + } + } - /* FACS is the odd table, has no standard ACPI header and no checksum */ + /* FACS is the odd table, has no standard ACPI header and no checksum */ - if (!ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS)) { + if ( !ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS) ) + { + /* Always calculate checksum, ignore bad checksum if requested */ - /* Always calculate checksum, ignore bad checksum if requested */ + status = + acpi_tb_verify_checksum(table_desc->pointer, table_desc->length); + } - status = - acpi_tb_verify_checksum(table_desc->pointer, - table_desc->length); - } - - return_ACPI_STATUS(status); + return_ACPI_STATUS(status); } /******************************************************************************* @@ -107,44 +108,46 @@ acpi_status __init acpi_tb_verify_table(struct acpi_table_desc *table_desc) acpi_status __init acpi_tb_resize_root_table_list(void) { - struct acpi_table_desc *tables; + struct acpi_table_desc *tables; - ACPI_FUNCTION_TRACE(tb_resize_root_table_list); + ACPI_FUNCTION_TRACE(tb_resize_root_table_list); - /* allow_resize flag is a parameter to acpi_initialize_tables */ + /* allow_resize flag is a parameter to acpi_initialize_tables */ - if (!(acpi_gbl_root_table_list.flags & ACPI_ROOT_ALLOW_RESIZE)) { - ACPI_ERROR((AE_INFO, - "Resize of Root Table Array is not allowed")); - return_ACPI_STATUS(AE_SUPPORT); - } + if ( !(acpi_gbl_root_table_list.flags & ACPI_ROOT_ALLOW_RESIZE) ) + { + ACPI_ERROR((AE_INFO, "Resize of Root Table Array is not allowed")); + return_ACPI_STATUS(AE_SUPPORT); + } - /* Increase the Table Array size */ + /* Increase the Table Array size */ - tables = ACPI_ALLOCATE_ZEROED((acpi_gbl_root_table_list.size + - ACPI_ROOT_TABLE_SIZE_INCREMENT) - * sizeof(struct acpi_table_desc)); - if (!tables) { - ACPI_ERROR((AE_INFO, - "Could not allocate new root table array")); - return_ACPI_STATUS(AE_NO_MEMORY); - } + tables = ACPI_ALLOCATE_ZEROED( + (acpi_gbl_root_table_list.size + ACPI_ROOT_TABLE_SIZE_INCREMENT) * + sizeof(struct acpi_table_desc)); + if ( !tables ) + { + ACPI_ERROR((AE_INFO, "Could not allocate new root table array")); + return_ACPI_STATUS(AE_NO_MEMORY); + } - /* Copy and free the previous table array */ + /* Copy and free the previous table array */ - if (acpi_gbl_root_table_list.tables) { - ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, - acpi_gbl_root_table_list.size * - sizeof(struct acpi_table_desc)); + if ( acpi_gbl_root_table_list.tables ) + { + ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, + acpi_gbl_root_table_list.size * + sizeof(struct acpi_table_desc)); - if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { - ACPI_FREE(acpi_gbl_root_table_list.tables); - } - } + if ( acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED ) + { + ACPI_FREE(acpi_gbl_root_table_list.tables); + } + } - acpi_gbl_root_table_list.tables = tables; - acpi_gbl_root_table_list.size += ACPI_ROOT_TABLE_SIZE_INCREMENT; - acpi_gbl_root_table_list.flags |= (u8) ACPI_ROOT_ORIGIN_ALLOCATED; + acpi_gbl_root_table_list.tables = tables; + acpi_gbl_root_table_list.size += ACPI_ROOT_TABLE_SIZE_INCREMENT; + acpi_gbl_root_table_list.flags |= (u8)ACPI_ROOT_ORIGIN_ALLOCATED; - return_ACPI_STATUS(AE_OK); + return_ACPI_STATUS(AE_OK); } diff --git a/xen/drivers/acpi/tables/tbutils.c b/xen/drivers/acpi/tables/tbutils.c index d135a50ff9..7aebfaf331 100644 --- a/xen/drivers/acpi/tables/tbutils.c +++ b/xen/drivers/acpi/tables/tbutils.c @@ -45,7 +45,7 @@ #include #include -#define _COMPONENT ACPI_TABLES +#define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbutils") /******************************************************************************* @@ -61,51 +61,50 @@ ACPI_MODULE_NAME("tbutils") * AE_NULL_ENTRY - XSDT has NULL entry * * DESCRIPTION: validate XSDT -******************************************************************************/ + ******************************************************************************/ -static acpi_status __init -acpi_tb_check_xsdt(acpi_physical_address address) +static acpi_status __init acpi_tb_check_xsdt(acpi_physical_address address) { - struct acpi_table_header *table; - u32 length; - u64 xsdt_entry_address; - u8 *table_entry; - u32 table_count; - int i; - - table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); - if (!table) - return AE_NO_MEMORY; - - length = table->length; - acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); - if (length < sizeof(struct acpi_table_header)) - return AE_INVALID_TABLE_LENGTH; - - table = acpi_os_map_memory(address, length); - if (!table) - return AE_NO_MEMORY; - - /* Calculate the number of tables described in XSDT */ - table_count = - (u32) ((table->length - - sizeof(struct acpi_table_header)) / sizeof(u64)); - table_entry = - ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header); - for (i = 0; i < table_count; i++) { - ACPI_MOVE_64_TO_64(&xsdt_entry_address, table_entry); - if (!xsdt_entry_address) { - /* XSDT has NULL entry */ - break; - } - table_entry += sizeof(u64); - } - acpi_os_unmap_memory(table, length); - - if (i < table_count) - return AE_NULL_ENTRY; - else - return AE_OK; + struct acpi_table_header *table; + u32 length; + u64 xsdt_entry_address; + u8 *table_entry; + u32 table_count; + int i; + + table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); + if ( !table ) + return AE_NO_MEMORY; + + length = table->length; + acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); + if ( length < sizeof(struct acpi_table_header) ) + return AE_INVALID_TABLE_LENGTH; + + table = acpi_os_map_memory(address, length); + if ( !table ) + return AE_NO_MEMORY; + + /* Calculate the number of tables described in XSDT */ + table_count = + (u32)((table->length - sizeof(struct acpi_table_header)) / sizeof(u64)); + table_entry = ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header); + for ( i = 0; i < table_count; i++ ) + { + ACPI_MOVE_64_TO_64(&xsdt_entry_address, table_entry); + if ( !xsdt_entry_address ) + { + /* XSDT has NULL entry */ + break; + } + table_entry += sizeof(u64); + } + acpi_os_unmap_memory(table, length); + + if ( i < table_count ) + return AE_NULL_ENTRY; + else + return AE_OK; } /******************************************************************************* @@ -121,43 +120,38 @@ acpi_tb_check_xsdt(acpi_physical_address address) * ******************************************************************************/ -void __init -acpi_tb_print_table_header(acpi_physical_address address, - struct acpi_table_header *header) +void __init acpi_tb_print_table_header(acpi_physical_address address, + struct acpi_table_header *header) { - - if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) { - - /* FACS only has signature and length fields of common table header */ - - ACPI_INFO((AE_INFO, "%4.4s %08lX, %04X", - header->signature, (unsigned long)address, - header->length)); - } else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) { - - /* RSDP has no common fields */ - - ACPI_INFO((AE_INFO, "RSDP %08lX, %04X (r%d %6.6s)", - (unsigned long)address, - (ACPI_CAST_PTR(struct acpi_table_rsdp, header)-> - revision > - 0) ? ACPI_CAST_PTR(struct acpi_table_rsdp, - header)->length : 20, - ACPI_CAST_PTR(struct acpi_table_rsdp, - header)->revision, - ACPI_CAST_PTR(struct acpi_table_rsdp, - header)->oem_id)); - } else { - /* Standard ACPI table with full common header */ - - ACPI_INFO((AE_INFO, - "%4.4s %08lX, %04X (r%d %6.6s %8.8s %8X %4.4s %8X)", - header->signature, (unsigned long)address, - header->length, header->revision, header->oem_id, - header->oem_table_id, header->oem_revision, - header->asl_compiler_id, - header->asl_compiler_revision)); - } + if ( ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS) ) + { + /* FACS only has signature and length fields of common table header */ + + ACPI_INFO((AE_INFO, "%4.4s %08lX, %04X", header->signature, + (unsigned long)address, header->length)); + } + else if ( ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP) ) + { + /* RSDP has no common fields */ + + ACPI_INFO((AE_INFO, "RSDP %08lX, %04X (r%d %6.6s)", + (unsigned long)address, + (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->revision > 0) + ? ACPI_CAST_PTR(struct acpi_table_rsdp, header)->length + : 20, + ACPI_CAST_PTR(struct acpi_table_rsdp, header)->revision, + ACPI_CAST_PTR(struct acpi_table_rsdp, header)->oem_id)); + } + else + { + /* Standard ACPI table with full common header */ + + ACPI_INFO((AE_INFO, "%4.4s %08lX, %04X (r%d %6.6s %8.8s %8X %4.4s %8X)", + header->signature, (unsigned long)address, header->length, + header->revision, header->oem_id, header->oem_table_id, + header->oem_revision, header->asl_compiler_id, + header->asl_compiler_revision)); + } } /******************************************************************************* @@ -174,30 +168,32 @@ acpi_tb_print_table_header(acpi_physical_address address, * ******************************************************************************/ -acpi_status __init -acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length) +acpi_status __init acpi_tb_verify_checksum(struct acpi_table_header *table, + u32 length) { - u8 checksum; + u8 checksum; - /* Compute the checksum on the table */ + /* Compute the checksum on the table */ - checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length); + checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length); - /* Checksum ok? (should be zero) */ + /* Checksum ok? (should be zero) */ - if (checksum) { - ACPI_WARNING((AE_INFO, - "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X", - table->signature, table->checksum, - (u8) (table->checksum - checksum))); + if ( checksum ) + { + ACPI_WARNING( + (AE_INFO, + "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X", + table->signature, table->checksum, + (u8)(table->checksum - checksum))); -#if (ACPI_CHECKSUM_ABORT) +#if ( ACPI_CHECKSUM_ABORT ) - return (AE_BAD_CHECKSUM); + return (AE_BAD_CHECKSUM); #endif - } + } - return (AE_OK); + return (AE_OK); } /******************************************************************************* @@ -213,16 +209,17 @@ acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length) * ******************************************************************************/ -u8 acpi_tb_checksum(u8 * buffer, acpi_native_uint length) +u8 acpi_tb_checksum(u8 *buffer, acpi_native_uint length) { - u8 sum = 0; - u8 *end = buffer + length; + u8 sum = 0; + u8 *end = buffer + length; - while (buffer < end) { - sum = (u8) (sum + *(buffer++)); - } + while ( buffer < end ) + { + sum = (u8)(sum + *(buffer++)); + } - return sum; + return sum; } /******************************************************************************* @@ -241,49 +238,49 @@ u8 acpi_tb_checksum(u8 * buffer, acpi_native_uint length) * ******************************************************************************/ -void __init -acpi_tb_install_table(acpi_physical_address address, - u8 flags, char *signature, acpi_native_uint table_index) +void __init acpi_tb_install_table(acpi_physical_address address, u8 flags, + char *signature, acpi_native_uint table_index) { - struct acpi_table_header *table; + struct acpi_table_header *table; - if (!address) { - ACPI_ERROR((AE_INFO, - "Null physical address for ACPI table [%s]", - signature)); - return; - } + if ( !address ) + { + ACPI_ERROR( + (AE_INFO, "Null physical address for ACPI table [%s]", signature)); + return; + } - /* Map just the table header */ + /* Map just the table header */ - table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); - if (!table) { - return; - } + table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); + if ( !table ) + { + return; + } - /* If a particular signature is expected, signature must match */ + /* If a particular signature is expected, signature must match */ - if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) { - ACPI_ERROR((AE_INFO, - "Invalid signature 0x%X for ACPI table [%s]", - *ACPI_CAST_PTR(u32, table->signature), signature)); - goto unmap_and_exit; - } + if ( signature && !ACPI_COMPARE_NAME(table->signature, signature) ) + { + ACPI_ERROR((AE_INFO, "Invalid signature 0x%X for ACPI table [%s]", + *ACPI_CAST_PTR(u32, table->signature), signature)); + goto unmap_and_exit; + } - /* Initialize the table entry */ + /* Initialize the table entry */ - acpi_gbl_root_table_list.tables[table_index].address = address; - acpi_gbl_root_table_list.tables[table_index].length = table->length; - acpi_gbl_root_table_list.tables[table_index].flags = flags; + acpi_gbl_root_table_list.tables[table_index].address = address; + acpi_gbl_root_table_list.tables[table_index].length = table->length; + acpi_gbl_root_table_list.tables[table_index].flags = flags; - ACPI_MOVE_32_TO_32(& - (acpi_gbl_root_table_list.tables[table_index]. - signature), table->signature); + ACPI_MOVE_32_TO_32( + &(acpi_gbl_root_table_list.tables[table_index].signature), + table->signature); - acpi_tb_print_table_header(address, table); + acpi_tb_print_table_header(address, table); - unmap_and_exit: - acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); +unmap_and_exit: + acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); } /******************************************************************************* @@ -304,41 +301,44 @@ acpi_tb_install_table(acpi_physical_address address, ******************************************************************************/ static acpi_physical_address __init -acpi_tb_get_root_table_entry(u8 * table_entry, - acpi_native_uint table_entry_size) +acpi_tb_get_root_table_entry(u8 *table_entry, acpi_native_uint table_entry_size) { - u64 address64; - - /* - * Get the table physical address (32-bit for RSDT, 64-bit for XSDT): - * Note: Addresses are 32-bit aligned (not 64) in both RSDT and XSDT - */ - if (table_entry_size == sizeof(u32)) { - /* - * 32-bit platform, RSDT: Return 32-bit table entry - * 64-bit platform, RSDT: Expand 32-bit to 64-bit and return - */ - return ((acpi_physical_address) - (*ACPI_CAST_PTR(u32, table_entry))); - } else { - /* - * 32-bit platform, XSDT: Truncate 64-bit to 32-bit and return - * 64-bit platform, XSDT: Move (unaligned) 64-bit to local, return 64-bit - */ - ACPI_MOVE_64_TO_64(&address64, table_entry); + u64 address64; + + /* + * Get the table physical address (32-bit for RSDT, 64-bit for XSDT): + * Note: Addresses are 32-bit aligned (not 64) in both RSDT and XSDT + */ + if ( table_entry_size == sizeof(u32) ) + { + /* + * 32-bit platform, RSDT: Return 32-bit table entry + * 64-bit platform, RSDT: Expand 32-bit to 64-bit and return + */ + return ((acpi_physical_address)(*ACPI_CAST_PTR(u32, table_entry))); + } + else + { + /* + * 32-bit platform, XSDT: Truncate 64-bit to 32-bit and return + * 64-bit platform, XSDT: Move (unaligned) 64-bit to local, return + * 64-bit + */ + ACPI_MOVE_64_TO_64(&address64, table_entry); #if ACPI_MACHINE_WIDTH == 32 - if (address64 > ACPI_UINT32_MAX) { - - /* Will truncate 64-bit address to 32 bits, issue warning */ - - ACPI_WARNING((AE_INFO, - "64-bit Physical Address in XSDT is too large (%8.8X%8.8X), truncating", - ACPI_FORMAT_UINT64(address64))); - } + if ( address64 > ACPI_UINT32_MAX ) + { + /* Will truncate 64-bit address to 32 bits, issue warning */ + + ACPI_WARNING((AE_INFO, + "64-bit Physical Address in XSDT is too large " + "(%8.8X%8.8X), truncating", + ACPI_FORMAT_UINT64(address64))); + } #endif - return ((acpi_physical_address) (address64)); - } + return ((acpi_physical_address)(address64)); + } } /******************************************************************************* @@ -359,168 +359,172 @@ acpi_tb_get_root_table_entry(u8 * table_entry, * ******************************************************************************/ -acpi_status __init -acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags) +acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address, + u8 flags) { - struct acpi_table_rsdp *rsdp; - acpi_native_uint table_entry_size; - acpi_native_uint i; - u32 table_count; - struct acpi_table_header *table; - acpi_physical_address address; - acpi_physical_address rsdt_address = 0; - u32 length; - u8 *table_entry; - acpi_status status; - - ACPI_FUNCTION_TRACE(tb_parse_root_table); - - /* - * Map the entire RSDP and extract the address of the RSDT or XSDT - */ - rsdp = acpi_os_map_memory(rsdp_address, sizeof(struct acpi_table_rsdp)); - if (!rsdp) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - acpi_tb_print_table_header(rsdp_address, - ACPI_CAST_PTR(struct acpi_table_header, - rsdp)); - - /* Differentiate between RSDT and XSDT root tables */ - - if (rsdp->revision > 1 && rsdp->xsdt_physical_address) { - /* - * Root table is an XSDT (64-bit physical addresses). We must use the - * XSDT if the revision is > 1 and the XSDT pointer is present, as per - * the ACPI specification. - */ - address = (acpi_physical_address) rsdp->xsdt_physical_address; - table_entry_size = sizeof(u64); - rsdt_address = (acpi_physical_address) - rsdp->rsdt_physical_address; - } else { - /* Root table is an RSDT (32-bit physical addresses) */ - - address = (acpi_physical_address) rsdp->rsdt_physical_address; - table_entry_size = sizeof(u32); - } - - /* - * It is not possible to map more than one entry in some environments, - * so unmap the RSDP here before mapping other tables - */ - acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp)); - - if (table_entry_size == sizeof(u64)) { - if (acpi_tb_check_xsdt(address) == AE_NULL_ENTRY) { - /* XSDT has NULL entry, RSDT is used */ - address = rsdt_address; - table_entry_size = sizeof(u32); - ACPI_WARNING((AE_INFO, "BIOS XSDT has NULL entry, " - "using RSDT")); - } - } - /* Map the RSDT/XSDT table header to get the full table length */ - - table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); - if (!table) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - acpi_tb_print_table_header(address, table); - - /* Get the length of the full table, verify length and map entire table */ - - length = table->length; - acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); - - if (length < sizeof(struct acpi_table_header)) { - ACPI_ERROR((AE_INFO, "Invalid length 0x%X in RSDT/XSDT", - length)); - return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH); - } - - table = acpi_os_map_memory(address, length); - if (!table) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - /* Validate the root table checksum */ - - status = acpi_tb_verify_checksum(table, length); - if (ACPI_FAILURE(status)) { - acpi_os_unmap_memory(table, length); - return_ACPI_STATUS(status); - } - - /* Calculate the number of tables described in the root table */ - - table_count = - (u32) ((table->length - - sizeof(struct acpi_table_header)) / table_entry_size); - - /* - * First two entries in the table array are reserved for the DSDT and FACS, - * which are not actually present in the RSDT/XSDT - they come from the FADT - */ - table_entry = - ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header); - acpi_gbl_root_table_list.count = 2; - - /* - * Initialize the root table array from the RSDT/XSDT - */ - for (i = 0; i < table_count; i++) { - if (acpi_gbl_root_table_list.count >= - acpi_gbl_root_table_list.size) { - - /* There is no more room in the root table array, attempt resize */ - - status = acpi_tb_resize_root_table_list(); - if (ACPI_FAILURE(status)) { - ACPI_WARNING((AE_INFO, - "Truncating %u table entries!", - (unsigned) - (acpi_gbl_root_table_list.size - - acpi_gbl_root_table_list. - count))); - break; - } - } - - /* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */ - - acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count]. - address = - acpi_tb_get_root_table_entry(table_entry, table_entry_size); - - table_entry += table_entry_size; - acpi_gbl_root_table_list.count++; - } - - /* - * It is not possible to map more than one entry in some environments, - * so unmap the root table here before mapping other tables - */ - acpi_os_unmap_memory(table, length); - - /* - * Complete the initialization of the root table array by examining - * the header of each table - */ - for (i = 2; i < acpi_gbl_root_table_list.count; i++) { - acpi_tb_install_table(acpi_gbl_root_table_list.tables[i]. - address, flags, NULL, i); - - /* Special case for FADT - get the DSDT and FACS */ - - if (ACPI_COMPARE_NAME - (&acpi_gbl_root_table_list.tables[i].signature, - ACPI_SIG_FADT)) { - acpi_tb_parse_fadt(i, flags); - } - } - - return_ACPI_STATUS(AE_OK); + struct acpi_table_rsdp *rsdp; + acpi_native_uint table_entry_size; + acpi_native_uint i; + u32 table_count; + struct acpi_table_header *table; + acpi_physical_address address; + acpi_physical_address rsdt_address = 0; + u32 length; + u8 *table_entry; + acpi_status status; + + ACPI_FUNCTION_TRACE(tb_parse_root_table); + + /* + * Map the entire RSDP and extract the address of the RSDT or XSDT + */ + rsdp = acpi_os_map_memory(rsdp_address, sizeof(struct acpi_table_rsdp)); + if ( !rsdp ) + { + return_ACPI_STATUS(AE_NO_MEMORY); + } + + acpi_tb_print_table_header(rsdp_address, + ACPI_CAST_PTR(struct acpi_table_header, rsdp)); + + /* Differentiate between RSDT and XSDT root tables */ + + if ( rsdp->revision > 1 && rsdp->xsdt_physical_address ) + { + /* + * Root table is an XSDT (64-bit physical addresses). We must use the + * XSDT if the revision is > 1 and the XSDT pointer is present, as per + * the ACPI specification. + */ + address = (acpi_physical_address)rsdp->xsdt_physical_address; + table_entry_size = sizeof(u64); + rsdt_address = (acpi_physical_address)rsdp->rsdt_physical_address; + } + else + { + /* Root table is an RSDT (32-bit physical addresses) */ + + address = (acpi_physical_address)rsdp->rsdt_physical_address; + table_entry_size = sizeof(u32); + } + + /* + * It is not possible to map more than one entry in some environments, + * so unmap the RSDP here before mapping other tables + */ + acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp)); + + if ( table_entry_size == sizeof(u64) ) + { + if ( acpi_tb_check_xsdt(address) == AE_NULL_ENTRY ) + { + /* XSDT has NULL entry, RSDT is used */ + address = rsdt_address; + table_entry_size = sizeof(u32); + ACPI_WARNING((AE_INFO, "BIOS XSDT has NULL entry, " + "using RSDT")); + } + } + /* Map the RSDT/XSDT table header to get the full table length */ + + table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); + if ( !table ) + { + return_ACPI_STATUS(AE_NO_MEMORY); + } + + acpi_tb_print_table_header(address, table); + + /* Get the length of the full table, verify length and map entire table */ + + length = table->length; + acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); + + if ( length < sizeof(struct acpi_table_header) ) + { + ACPI_ERROR((AE_INFO, "Invalid length 0x%X in RSDT/XSDT", length)); + return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH); + } + + table = acpi_os_map_memory(address, length); + if ( !table ) + { + return_ACPI_STATUS(AE_NO_MEMORY); + } + + /* Validate the root table checksum */ + + status = acpi_tb_verify_checksum(table, length); + if ( ACPI_FAILURE(status) ) + { + acpi_os_unmap_memory(table, length); + return_ACPI_STATUS(status); + } + + /* Calculate the number of tables described in the root table */ + + table_count = (u32)((table->length - sizeof(struct acpi_table_header)) / + table_entry_size); + + /* + * First two entries in the table array are reserved for the DSDT and FACS, + * which are not actually present in the RSDT/XSDT - they come from the FADT + */ + table_entry = ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header); + acpi_gbl_root_table_list.count = 2; + + /* + * Initialize the root table array from the RSDT/XSDT + */ + for ( i = 0; i < table_count; i++ ) + { + if ( acpi_gbl_root_table_list.count >= acpi_gbl_root_table_list.size ) + { + /* There is no more room in the root table array, attempt resize */ + + status = acpi_tb_resize_root_table_list(); + if ( ACPI_FAILURE(status) ) + { + ACPI_WARNING((AE_INFO, "Truncating %u table entries!", + (unsigned)(acpi_gbl_root_table_list.size - + acpi_gbl_root_table_list.count))); + break; + } + } + + /* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */ + + acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count] + .address = + acpi_tb_get_root_table_entry(table_entry, table_entry_size); + + table_entry += table_entry_size; + acpi_gbl_root_table_list.count++; + } + + /* + * It is not possible to map more than one entry in some environments, + * so unmap the root table here before mapping other tables + */ + acpi_os_unmap_memory(table, length); + + /* + * Complete the initialization of the root table array by examining + * the header of each table + */ + for ( i = 2; i < acpi_gbl_root_table_list.count; i++ ) + { + acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].address, flags, + NULL, i); + + /* Special case for FADT - get the DSDT and FACS */ + + if ( ACPI_COMPARE_NAME(&acpi_gbl_root_table_list.tables[i].signature, + ACPI_SIG_FADT) ) + { + acpi_tb_parse_fadt(i, flags); + } + } + + return_ACPI_STATUS(AE_OK); } diff --git a/xen/drivers/acpi/tables/tbxface.c b/xen/drivers/acpi/tables/tbxface.c index 21b2e5eae1..07438588ff 100644 --- a/xen/drivers/acpi/tables/tbxface.c +++ b/xen/drivers/acpi/tables/tbxface.c @@ -46,7 +46,7 @@ #include #include -#define _COMPONENT ACPI_TABLES +#define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbxface") /******************************************************************************* @@ -65,12 +65,11 @@ ACPI_MODULE_NAME("tbxface") acpi_status __init acpi_allocate_root_table(u32 initial_table_count) { + acpi_gbl_root_table_list.size = + initial_table_count - ACPI_ROOT_TABLE_SIZE_INCREMENT; + acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE; - acpi_gbl_root_table_list.size = initial_table_count - - ACPI_ROOT_TABLE_SIZE_INCREMENT; - acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE; - - return (acpi_tb_resize_root_table_list()); + return (acpi_tb_resize_root_table_list()); } /******************************************************************************* @@ -78,10 +77,9 @@ acpi_status __init acpi_allocate_root_table(u32 initial_table_count) * FUNCTION: acpi_initialize_tables * * PARAMETERS: initial_table_array - Pointer to an array of pre-allocated - * struct acpi_table_desc structures. If NULL, the - * array is dynamically allocated. - * initial_table_count - Size of initial_table_array, in number of - * struct acpi_table_desc structures + * struct acpi_table_desc structures. If + *NULL, the array is dynamically allocated. initial_table_count - Size of + *initial_table_array, in number of struct acpi_table_desc structures * allow_realloc - Flag to tell Table Manager if resize of * pre-allocated array is allowed. Ignored * if initial_table_array is NULL. @@ -95,59 +93,63 @@ acpi_status __init acpi_allocate_root_table(u32 initial_table_count) * such as the kernel boot sequence where it may not be available. * * If the host OS memory managers are initialized, use NULL for - * initial_table_array, and the table will be dynamically allocated. + * initial_table_array, and the table will be dynamically + *allocated. * ******************************************************************************/ acpi_status __init -acpi_initialize_tables(struct acpi_table_desc * initial_table_array, - u32 initial_table_count, u8 allow_resize) +acpi_initialize_tables(struct acpi_table_desc *initial_table_array, + u32 initial_table_count, u8 allow_resize) { - acpi_physical_address rsdp_address; - acpi_status status; - - ACPI_FUNCTION_TRACE(acpi_initialize_tables); - - /* - * Set up the Root Table Array - * Allocate the table array if requested - */ - if (!initial_table_array) { - status = acpi_allocate_root_table(initial_table_count); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - } else { - /* Root Table Array has been statically allocated by the host */ - - ACPI_MEMSET(initial_table_array, 0, - initial_table_count * - sizeof(struct acpi_table_desc)); - - acpi_gbl_root_table_list.tables = initial_table_array; - acpi_gbl_root_table_list.size = initial_table_count; - acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN; - if (allow_resize) { - acpi_gbl_root_table_list.flags |= - ACPI_ROOT_ALLOW_RESIZE; - } - } - - /* Get the address of the RSDP */ - - rsdp_address = acpi_os_get_root_pointer(); - if (!rsdp_address) { - return_ACPI_STATUS(AE_NOT_FOUND); - } - - /* - * Get the root table (RSDT or XSDT) and extract all entries to the local - * Root Table Array. This array contains the information of the RSDT/XSDT - * in a common, more useable format. - */ - status = - acpi_tb_parse_root_table(rsdp_address, ACPI_TABLE_ORIGIN_MAPPED); - return_ACPI_STATUS(status); + acpi_physical_address rsdp_address; + acpi_status status; + + ACPI_FUNCTION_TRACE(acpi_initialize_tables); + + /* + * Set up the Root Table Array + * Allocate the table array if requested + */ + if ( !initial_table_array ) + { + status = acpi_allocate_root_table(initial_table_count); + if ( ACPI_FAILURE(status) ) + { + return_ACPI_STATUS(status); + } + } + else + { + /* Root Table Array has been statically allocated by the host */ + + ACPI_MEMSET(initial_table_array, 0, + initial_table_count * sizeof(struct acpi_table_desc)); + + acpi_gbl_root_table_list.tables = initial_table_array; + acpi_gbl_root_table_list.size = initial_table_count; + acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN; + if ( allow_resize ) + { + acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE; + } + } + + /* Get the address of the RSDP */ + + rsdp_address = acpi_os_get_root_pointer(); + if ( !rsdp_address ) + { + return_ACPI_STATUS(AE_NOT_FOUND); + } + + /* + * Get the root table (RSDT or XSDT) and extract all entries to the local + * Root Table Array. This array contains the information of the RSDT/XSDT + * in a common, more useable format. + */ + status = acpi_tb_parse_root_table(rsdp_address, ACPI_TABLE_ORIGIN_MAPPED); + return_ACPI_STATUS(status); } /******************************************************************************* @@ -163,46 +165,48 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array, * DESCRIPTION: Finds and verifies an ACPI table. * *****************************************************************************/ -acpi_status __init -acpi_get_table(char *signature, - acpi_native_uint instance, struct acpi_table_header **out_table) +acpi_status __init acpi_get_table(char *signature, acpi_native_uint instance, + struct acpi_table_header **out_table) { - acpi_native_uint i; - acpi_native_uint j; - acpi_status status; - - /* Parameter validation */ - - if (!signature || !out_table) { - return (AE_BAD_PARAMETER); - } - - /* - * Walk the root table list - */ - for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) { - if (!ACPI_COMPARE_NAME - (&(acpi_gbl_root_table_list.tables[i].signature), - signature)) { - continue; - } - - if (++j < instance) { - continue; - } - - status = - acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]); - if (ACPI_SUCCESS(status)) { - *out_table = acpi_gbl_root_table_list.tables[i].pointer; - } - - acpi_gbl_root_table_list.tables[i].pointer = NULL; - - return (status); - } - - return (AE_NOT_FOUND); + acpi_native_uint i; + acpi_native_uint j; + acpi_status status; + + /* Parameter validation */ + + if ( !signature || !out_table ) + { + return (AE_BAD_PARAMETER); + } + + /* + * Walk the root table list + */ + for ( i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++ ) + { + if ( !ACPI_COMPARE_NAME(&(acpi_gbl_root_table_list.tables[i].signature), + signature) ) + { + continue; + } + + if ( ++j < instance ) + { + continue; + } + + status = acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]); + if ( ACPI_SUCCESS(status) ) + { + *out_table = acpi_gbl_root_table_list.tables[i].pointer; + } + + acpi_gbl_root_table_list.tables[i].pointer = NULL; + + return (status); + } + + return (AE_NOT_FOUND); } /****************************************************************************** @@ -219,36 +223,37 @@ acpi_get_table(char *signature, * DESCRIPTION: Finds physical address and length of ACPI table * *****************************************************************************/ -acpi_status __init -acpi_get_table_phys(acpi_string signature, acpi_native_uint instance, - acpi_physical_address *addr, acpi_native_uint *len) +acpi_status __init acpi_get_table_phys(acpi_string signature, + acpi_native_uint instance, + acpi_physical_address *addr, + acpi_native_uint *len) { - acpi_native_uint i, j; - acpi_status status; + acpi_native_uint i, j; + acpi_status status; - if (!signature || !addr || !len) - return AE_BAD_PARAMETER; + if ( !signature || !addr || !len ) + return AE_BAD_PARAMETER; - for (i = j = 0; i < acpi_gbl_root_table_list.count; i++) { - if (!ACPI_COMPARE_NAME( - &acpi_gbl_root_table_list.tables[i].signature, - signature)) - continue; + for ( i = j = 0; i < acpi_gbl_root_table_list.count; i++ ) + { + if ( !ACPI_COMPARE_NAME(&acpi_gbl_root_table_list.tables[i].signature, + signature) ) + continue; - if (++j < instance) - continue; + if ( ++j < instance ) + continue; - status = - acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]); - if (ACPI_SUCCESS(status)) { - *addr = acpi_gbl_root_table_list.tables[i].address; - *len = acpi_gbl_root_table_list.tables[i].length; - } + status = acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]); + if ( ACPI_SUCCESS(status) ) + { + *addr = acpi_gbl_root_table_list.tables[i].address; + *len = acpi_gbl_root_table_list.tables[i].length; + } - acpi_gbl_root_table_list.tables[i].pointer = NULL; + acpi_gbl_root_table_list.tables[i].pointer = NULL; - return status; - } + return status; + } - return AE_NOT_FOUND; + return AE_NOT_FOUND; } diff --git a/xen/drivers/acpi/tables/tbxfroot.c b/xen/drivers/acpi/tables/tbxfroot.c index 18e5ad6e5a..54de322812 100644 --- a/xen/drivers/acpi/tables/tbxfroot.c +++ b/xen/drivers/acpi/tables/tbxfroot.c @@ -45,11 +45,11 @@ #include #include -#define _COMPONENT ACPI_TABLES +#define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbxfroot") /* Local prototypes */ -static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length); +static u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length); /******************************************************************************* * @@ -65,36 +65,38 @@ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length); static acpi_status __init acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) { - ACPI_FUNCTION_ENTRY(); - - /* - * The signature and checksum must both be correct - * - * Note: Sometimes there exists more than one RSDP in memory; the valid - * RSDP has a valid checksum, all others have an invalid checksum. - */ - if (ACPI_STRNCMP((char *)rsdp, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1) - != 0) { - - /* Nope, BAD Signature */ - - return (AE_BAD_SIGNATURE); - } - - /* Check the standard checksum */ - - if (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) { - return (AE_BAD_CHECKSUM); - } - - /* Check extended checksum if table version >= 2 */ - - if ((rsdp->revision >= 2) && - (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) { - return (AE_BAD_CHECKSUM); - } - - return (AE_OK); + ACPI_FUNCTION_ENTRY(); + + /* + * The signature and checksum must both be correct + * + * Note: Sometimes there exists more than one RSDP in memory; the valid + * RSDP has a valid checksum, all others have an invalid checksum. + */ + if ( ACPI_STRNCMP((char *)rsdp, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1) != + 0 ) + { + /* Nope, BAD Signature */ + + return (AE_BAD_SIGNATURE); + } + + /* Check the standard checksum */ + + if ( acpi_tb_checksum((u8 *)rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0 ) + { + return (AE_BAD_CHECKSUM); + } + + /* Check extended checksum if table version >= 2 */ + + if ( (rsdp->revision >= 2) && + (acpi_tb_checksum((u8 *)rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0) ) + { + return (AE_BAD_CHECKSUM); + } + + return (AE_OK); } /******************************************************************************* @@ -117,105 +119,100 @@ static acpi_status __init acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) * ******************************************************************************/ -acpi_status __init acpi_find_root_pointer(acpi_native_uint * table_address) +acpi_status __init acpi_find_root_pointer(acpi_native_uint *table_address) { - u8 *table_ptr; - u8 *mem_rover; - u32 physical_address; - - ACPI_FUNCTION_TRACE(acpi_find_root_pointer); + u8 *table_ptr; + u8 *mem_rover; + u32 physical_address; - /* 1a) Get the location of the Extended BIOS Data Area (EBDA) */ + ACPI_FUNCTION_TRACE(acpi_find_root_pointer); - table_ptr = acpi_os_map_memory((acpi_physical_address) - ACPI_EBDA_PTR_LOCATION, - ACPI_EBDA_PTR_LENGTH); - if (!table_ptr) { - ACPI_ERROR((AE_INFO, - "Could not map memory at %8.8X for length %X", - ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH)); + /* 1a) Get the location of the Extended BIOS Data Area (EBDA) */ - return_ACPI_STATUS(AE_NO_MEMORY); - } + table_ptr = acpi_os_map_memory( + (acpi_physical_address)ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH); + if ( !table_ptr ) + { + ACPI_ERROR((AE_INFO, "Could not map memory at %8.8X for length %X", + ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH)); - ACPI_MOVE_16_TO_32(&physical_address, table_ptr); + return_ACPI_STATUS(AE_NO_MEMORY); + } - /* Convert segment part to physical address */ + ACPI_MOVE_16_TO_32(&physical_address, table_ptr); - physical_address <<= 4; - acpi_os_unmap_memory(table_ptr, ACPI_EBDA_PTR_LENGTH); + /* Convert segment part to physical address */ - /* EBDA present? */ + physical_address <<= 4; + acpi_os_unmap_memory(table_ptr, ACPI_EBDA_PTR_LENGTH); - if (physical_address > 0x400) { - /* - * 1b) Search EBDA paragraphs (EBDA is required to be a - * minimum of 1_k length) - */ - table_ptr = acpi_os_map_memory((acpi_native_uint) - physical_address, - ACPI_EBDA_WINDOW_SIZE); - if (!table_ptr) { - ACPI_ERROR((AE_INFO, - "Could not map memory at %8.8X for length %X", - physical_address, ACPI_EBDA_WINDOW_SIZE)); + /* EBDA present? */ - return_ACPI_STATUS(AE_NO_MEMORY); - } + if ( physical_address > 0x400 ) + { + /* + * 1b) Search EBDA paragraphs (EBDA is required to be a + * minimum of 1_k length) + */ + table_ptr = acpi_os_map_memory((acpi_native_uint)physical_address, + ACPI_EBDA_WINDOW_SIZE); + if ( !table_ptr ) + { + ACPI_ERROR((AE_INFO, "Could not map memory at %8.8X for length %X", + physical_address, ACPI_EBDA_WINDOW_SIZE)); - mem_rover = - acpi_tb_scan_memory_for_rsdp(table_ptr, - ACPI_EBDA_WINDOW_SIZE); - acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE); + return_ACPI_STATUS(AE_NO_MEMORY); + } - if (mem_rover) { + mem_rover = + acpi_tb_scan_memory_for_rsdp(table_ptr, ACPI_EBDA_WINDOW_SIZE); + acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE); - /* Return the physical address */ + if ( mem_rover ) + { + /* Return the physical address */ - physical_address += - (u32) ACPI_PTR_DIFF(mem_rover, table_ptr); + physical_address += (u32)ACPI_PTR_DIFF(mem_rover, table_ptr); - *table_address = physical_address; - return_ACPI_STATUS(AE_OK); - } - } + *table_address = physical_address; + return_ACPI_STATUS(AE_OK); + } + } - /* - * 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh - */ - table_ptr = acpi_os_map_memory((acpi_physical_address) - ACPI_HI_RSDP_WINDOW_BASE, - ACPI_HI_RSDP_WINDOW_SIZE); + /* + * 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh + */ + table_ptr = + acpi_os_map_memory((acpi_physical_address)ACPI_HI_RSDP_WINDOW_BASE, + ACPI_HI_RSDP_WINDOW_SIZE); - if (!table_ptr) { - ACPI_ERROR((AE_INFO, - "Could not map memory at %8.8X for length %X", - ACPI_HI_RSDP_WINDOW_BASE, - ACPI_HI_RSDP_WINDOW_SIZE)); + if ( !table_ptr ) + { + ACPI_ERROR((AE_INFO, "Could not map memory at %8.8X for length %X", + ACPI_HI_RSDP_WINDOW_BASE, ACPI_HI_RSDP_WINDOW_SIZE)); - return_ACPI_STATUS(AE_NO_MEMORY); - } + return_ACPI_STATUS(AE_NO_MEMORY); + } - mem_rover = - acpi_tb_scan_memory_for_rsdp(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE); - acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE); + mem_rover = + acpi_tb_scan_memory_for_rsdp(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE); + acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE); - if (mem_rover) { + if ( mem_rover ) + { + /* Return the physical address */ - /* Return the physical address */ + physical_address = (u32)(ACPI_HI_RSDP_WINDOW_BASE + + ACPI_PTR_DIFF(mem_rover, table_ptr)); - physical_address = (u32) - (ACPI_HI_RSDP_WINDOW_BASE + - ACPI_PTR_DIFF(mem_rover, table_ptr)); + *table_address = physical_address; + return_ACPI_STATUS(AE_OK); + } - *table_address = physical_address; - return_ACPI_STATUS(AE_OK); - } + /* A valid RSDP was not found */ - /* A valid RSDP was not found */ - - ACPI_ERROR((AE_INFO, "A valid RSDP was not found")); - return_ACPI_STATUS(AE_NOT_FOUND); + ACPI_ERROR((AE_INFO, "A valid RSDP was not found")); + return_ACPI_STATUS(AE_NOT_FOUND); } /******************************************************************************* @@ -230,43 +227,43 @@ acpi_status __init acpi_find_root_pointer(acpi_native_uint * table_address) * DESCRIPTION: Search a block of memory for the RSDP signature * ******************************************************************************/ -static u8 *__init acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length) +static u8 *__init acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length) { - acpi_status status; - u8 *mem_rover; - u8 *end_address; - - ACPI_FUNCTION_TRACE(tb_scan_memory_for_rsdp); - - end_address = start_address + length; + acpi_status status; + u8 *mem_rover; + u8 *end_address; - /* Search from given start address for the requested length */ + ACPI_FUNCTION_TRACE(tb_scan_memory_for_rsdp); - for (mem_rover = start_address; mem_rover < end_address; - mem_rover += ACPI_RSDP_SCAN_STEP) { + end_address = start_address + length; - /* The RSDP signature and checksum must both be correct */ + /* Search from given start address for the requested length */ - status = - acpi_tb_validate_rsdp(ACPI_CAST_PTR - (struct acpi_table_rsdp, mem_rover)); - if (ACPI_SUCCESS(status)) { + for ( mem_rover = start_address; mem_rover < end_address; + mem_rover += ACPI_RSDP_SCAN_STEP ) + { + /* The RSDP signature and checksum must both be correct */ - /* Sig and checksum valid, we have found a real RSDP */ + status = acpi_tb_validate_rsdp( + ACPI_CAST_PTR(struct acpi_table_rsdp, mem_rover)); + if ( ACPI_SUCCESS(status) ) + { + /* Sig and checksum valid, we have found a real RSDP */ - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "RSDP located at physical address %p\n", - mem_rover)); - return_PTR(mem_rover); - } + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "RSDP located at physical address %p\n", + mem_rover)); + return_PTR(mem_rover); + } - /* No sig match or bad checksum, keep searching */ - } + /* No sig match or bad checksum, keep searching */ + } - /* Searched entire block, no RSDP was found */ + /* Searched entire block, no RSDP was found */ - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Searched entire block from %p, valid RSDP was not found\n", - start_address)); - return_PTR(NULL); + ACPI_DEBUG_PRINT( + (ACPI_DB_INFO, + "Searched entire block from %p, valid RSDP was not found\n", + start_address)); + return_PTR(NULL); } diff --git a/xen/drivers/acpi/utilities/utglobal.c b/xen/drivers/acpi/utilities/utglobal.c index 68232f67f4..47c1c2993d 100644 --- a/xen/drivers/acpi/utilities/utglobal.c +++ b/xen/drivers/acpi/utilities/utglobal.c @@ -49,8 +49,8 @@ #include ACPI_EXPORT_SYMBOL(acpi_gbl_FADT) -#define _COMPONENT ACPI_UTILITIES - ACPI_MODULE_NAME("utglobal") +#define _COMPONENT ACPI_UTILITIES +ACPI_MODULE_NAME("utglobal") /******************************************************************************* * @@ -68,23 +68,22 @@ ACPI_EXPORT_SYMBOL(acpi_gbl_FADT) const char *__init acpi_format_exception(acpi_status status) { - const char *exception = NULL; + const char *exception = NULL; - ACPI_FUNCTION_ENTRY(); + ACPI_FUNCTION_ENTRY(); - exception = acpi_ut_validate_exception(status); - if (!exception) { + exception = acpi_ut_validate_exception(status); + if ( !exception ) + { + /* Exception code was not recognized */ - /* Exception code was not recognized */ + ACPI_ERROR((AE_INFO, "Unknown exception code: %#X", status)); - ACPI_ERROR((AE_INFO, - "Unknown exception code: %#X", status)); + exception = "UNKNOWN_STATUS_CODE"; + dump_execution_state(); + } - exception = "UNKNOWN_STATUS_CODE"; - dump_execution_state(); - } - - return (ACPI_CAST_PTR(const char, exception)); + return (ACPI_CAST_PTR(const char, exception)); } /****************************************************************************** @@ -94,76 +93,76 @@ const char *__init acpi_format_exception(acpi_status status) ******************************************************************************/ struct acpi_bit_register_info acpi_gbl_bit_register_info[ACPI_NUM_BITREG] = { - /* Name Parent Register Register Bit Position Register Bit Mask */ - - /* ACPI_BITREG_TIMER_STATUS */ {ACPI_REGISTER_PM1_STATUS, - ACPI_BITPOSITION_TIMER_STATUS, - ACPI_BITMASK_TIMER_STATUS}, - /* ACPI_BITREG_BUS_MASTER_STATUS */ {ACPI_REGISTER_PM1_STATUS, - ACPI_BITPOSITION_BUS_MASTER_STATUS, - ACPI_BITMASK_BUS_MASTER_STATUS}, - /* ACPI_BITREG_GLOBAL_LOCK_STATUS */ {ACPI_REGISTER_PM1_STATUS, - ACPI_BITPOSITION_GLOBAL_LOCK_STATUS, - ACPI_BITMASK_GLOBAL_LOCK_STATUS}, - /* ACPI_BITREG_POWER_BUTTON_STATUS */ {ACPI_REGISTER_PM1_STATUS, - ACPI_BITPOSITION_POWER_BUTTON_STATUS, - ACPI_BITMASK_POWER_BUTTON_STATUS}, - /* ACPI_BITREG_SLEEP_BUTTON_STATUS */ {ACPI_REGISTER_PM1_STATUS, - ACPI_BITPOSITION_SLEEP_BUTTON_STATUS, - ACPI_BITMASK_SLEEP_BUTTON_STATUS}, - /* ACPI_BITREG_RT_CLOCK_STATUS */ {ACPI_REGISTER_PM1_STATUS, - ACPI_BITPOSITION_RT_CLOCK_STATUS, - ACPI_BITMASK_RT_CLOCK_STATUS}, - /* ACPI_BITREG_WAKE_STATUS */ {ACPI_REGISTER_PM1_STATUS, - ACPI_BITPOSITION_WAKE_STATUS, - ACPI_BITMASK_WAKE_STATUS}, - /* ACPI_BITREG_PCIEXP_WAKE_STATUS */ {ACPI_REGISTER_PM1_STATUS, - ACPI_BITPOSITION_PCIEXP_WAKE_STATUS, - ACPI_BITMASK_PCIEXP_WAKE_STATUS}, - - /* ACPI_BITREG_TIMER_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, - ACPI_BITPOSITION_TIMER_ENABLE, - ACPI_BITMASK_TIMER_ENABLE}, - /* ACPI_BITREG_GLOBAL_LOCK_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, - ACPI_BITPOSITION_GLOBAL_LOCK_ENABLE, - ACPI_BITMASK_GLOBAL_LOCK_ENABLE}, - /* ACPI_BITREG_POWER_BUTTON_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, - ACPI_BITPOSITION_POWER_BUTTON_ENABLE, - ACPI_BITMASK_POWER_BUTTON_ENABLE}, - /* ACPI_BITREG_SLEEP_BUTTON_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, - ACPI_BITPOSITION_SLEEP_BUTTON_ENABLE, - ACPI_BITMASK_SLEEP_BUTTON_ENABLE}, - /* ACPI_BITREG_RT_CLOCK_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, - ACPI_BITPOSITION_RT_CLOCK_ENABLE, - ACPI_BITMASK_RT_CLOCK_ENABLE}, - /* ACPI_BITREG_WAKE_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, 0, 0}, - /* ACPI_BITREG_PCIEXP_WAKE_DISABLE */ {ACPI_REGISTER_PM1_ENABLE, - ACPI_BITPOSITION_PCIEXP_WAKE_DISABLE, - ACPI_BITMASK_PCIEXP_WAKE_DISABLE}, - - /* ACPI_BITREG_SCI_ENABLE */ {ACPI_REGISTER_PM1_CONTROL, - ACPI_BITPOSITION_SCI_ENABLE, - ACPI_BITMASK_SCI_ENABLE}, - /* ACPI_BITREG_BUS_MASTER_RLD */ {ACPI_REGISTER_PM1_CONTROL, - ACPI_BITPOSITION_BUS_MASTER_RLD, - ACPI_BITMASK_BUS_MASTER_RLD}, - /* ACPI_BITREG_GLOBAL_LOCK_RELEASE */ {ACPI_REGISTER_PM1_CONTROL, - ACPI_BITPOSITION_GLOBAL_LOCK_RELEASE, - ACPI_BITMASK_GLOBAL_LOCK_RELEASE}, - /* ACPI_BITREG_SLEEP_TYPE_A */ {ACPI_REGISTER_PM1_CONTROL, - ACPI_BITPOSITION_SLEEP_TYPE_X, - ACPI_BITMASK_SLEEP_TYPE_X}, - /* ACPI_BITREG_SLEEP_TYPE_B */ {ACPI_REGISTER_PM1_CONTROL, - ACPI_BITPOSITION_SLEEP_TYPE_X, - ACPI_BITMASK_SLEEP_TYPE_X}, - /* ACPI_BITREG_SLEEP_ENABLE */ {ACPI_REGISTER_PM1_CONTROL, - ACPI_BITPOSITION_SLEEP_ENABLE, - ACPI_BITMASK_SLEEP_ENABLE}, - - /* ACPI_BITREG_ARB_DIS */ {ACPI_REGISTER_PM2_CONTROL, - ACPI_BITPOSITION_ARB_DISABLE, - ACPI_BITMASK_ARB_DISABLE} -}; + /* Name Parent Register Register Bit + Position Register Bit Mask */ + + /* ACPI_BITREG_TIMER_STATUS */ {ACPI_REGISTER_PM1_STATUS, + ACPI_BITPOSITION_TIMER_STATUS, + ACPI_BITMASK_TIMER_STATUS}, + /* ACPI_BITREG_BUS_MASTER_STATUS */ + {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_BUS_MASTER_STATUS, + ACPI_BITMASK_BUS_MASTER_STATUS}, + /* ACPI_BITREG_GLOBAL_LOCK_STATUS */ + {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_GLOBAL_LOCK_STATUS, + ACPI_BITMASK_GLOBAL_LOCK_STATUS}, + /* ACPI_BITREG_POWER_BUTTON_STATUS */ + {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_POWER_BUTTON_STATUS, + ACPI_BITMASK_POWER_BUTTON_STATUS}, + /* ACPI_BITREG_SLEEP_BUTTON_STATUS */ + {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_SLEEP_BUTTON_STATUS, + ACPI_BITMASK_SLEEP_BUTTON_STATUS}, + /* ACPI_BITREG_RT_CLOCK_STATUS */ + {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_RT_CLOCK_STATUS, + ACPI_BITMASK_RT_CLOCK_STATUS}, + /* ACPI_BITREG_WAKE_STATUS */ + {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_WAKE_STATUS, + ACPI_BITMASK_WAKE_STATUS}, + /* ACPI_BITREG_PCIEXP_WAKE_STATUS */ + {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_PCIEXP_WAKE_STATUS, + ACPI_BITMASK_PCIEXP_WAKE_STATUS}, + + /* ACPI_BITREG_TIMER_ENABLE */ + {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_TIMER_ENABLE, + ACPI_BITMASK_TIMER_ENABLE}, + /* ACPI_BITREG_GLOBAL_LOCK_ENABLE */ + {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_GLOBAL_LOCK_ENABLE, + ACPI_BITMASK_GLOBAL_LOCK_ENABLE}, + /* ACPI_BITREG_POWER_BUTTON_ENABLE */ + {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_POWER_BUTTON_ENABLE, + ACPI_BITMASK_POWER_BUTTON_ENABLE}, + /* ACPI_BITREG_SLEEP_BUTTON_ENABLE */ + {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_SLEEP_BUTTON_ENABLE, + ACPI_BITMASK_SLEEP_BUTTON_ENABLE}, + /* ACPI_BITREG_RT_CLOCK_ENABLE */ + {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_RT_CLOCK_ENABLE, + ACPI_BITMASK_RT_CLOCK_ENABLE}, + /* ACPI_BITREG_WAKE_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, 0, 0}, + /* ACPI_BITREG_PCIEXP_WAKE_DISABLE */ + {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_PCIEXP_WAKE_DISABLE, + ACPI_BITMASK_PCIEXP_WAKE_DISABLE}, + + /* ACPI_BITREG_SCI_ENABLE */ + {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_SCI_ENABLE, + ACPI_BITMASK_SCI_ENABLE}, + /* ACPI_BITREG_BUS_MASTER_RLD */ + {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_BUS_MASTER_RLD, + ACPI_BITMASK_BUS_MASTER_RLD}, + /* ACPI_BITREG_GLOBAL_LOCK_RELEASE */ + {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_GLOBAL_LOCK_RELEASE, + ACPI_BITMASK_GLOBAL_LOCK_RELEASE}, + /* ACPI_BITREG_SLEEP_TYPE_A */ + {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_SLEEP_TYPE_X, + ACPI_BITMASK_SLEEP_TYPE_X}, + /* ACPI_BITREG_SLEEP_TYPE_B */ + {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_SLEEP_TYPE_X, + ACPI_BITMASK_SLEEP_TYPE_X}, + /* ACPI_BITREG_SLEEP_ENABLE */ + {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_SLEEP_ENABLE, + ACPI_BITMASK_SLEEP_ENABLE}, + + /* ACPI_BITREG_ARB_DIS */ + {ACPI_REGISTER_PM2_CONTROL, ACPI_BITPOSITION_ARB_DISABLE, + ACPI_BITMASK_ARB_DISABLE}}; #ifdef ACPI_DEBUG_OUTPUT @@ -182,26 +181,21 @@ struct acpi_bit_register_info acpi_gbl_bit_register_info[ACPI_NUM_BITREG] = { /* Region type decoding */ static const char *const acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = { - "SystemMemory", - "SystemIO", - "PCI_Config", - "EmbeddedControl", - "SMBus", - "CMOS", - "PCIBARTarget", - "DataTable" -}; + "SystemMemory", "SystemIO", "PCI_Config", "EmbeddedControl", + "SMBus", "CMOS", "PCIBARTarget", "DataTable"}; const char *acpi_ut_get_region_name(u8 space_id) { - - if (space_id >= ACPI_USER_REGION_BEGIN) { - return ("UserDefinedRegion"); - } else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) { - return ("InvalidSpaceId"); - } - - return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id])); + if ( space_id >= ACPI_USER_REGION_BEGIN ) + { + return ("UserDefinedRegion"); + } + else if ( space_id >= ACPI_NUM_PREDEFINED_REGIONS ) + { + return ("InvalidSpaceId"); + } + + return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id])); } #endif diff --git a/xen/drivers/acpi/utilities/utmisc.c b/xen/drivers/acpi/utilities/utmisc.c index 4e1497ad0f..4c667e2a52 100644 --- a/xen/drivers/acpi/utilities/utmisc.c +++ b/xen/drivers/acpi/utilities/utmisc.c @@ -44,7 +44,7 @@ #include #include -#define _COMPONENT ACPI_UTILITIES +#define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utmisc") /******************************************************************************* @@ -62,61 +62,63 @@ ACPI_MODULE_NAME("utmisc") ******************************************************************************/ const char *__init acpi_ut_validate_exception(acpi_status status) { - acpi_status sub_status; - const char *exception = NULL; + acpi_status sub_status; + const char *exception = NULL; - ACPI_FUNCTION_ENTRY(); + ACPI_FUNCTION_ENTRY(); - /* - * Status is composed of two parts, a "type" and an actual code - */ - sub_status = (status & ~AE_CODE_MASK); + /* + * Status is composed of two parts, a "type" and an actual code + */ + sub_status = (status & ~AE_CODE_MASK); - switch (status & AE_CODE_MASK) { - case AE_CODE_ENVIRONMENTAL: + switch (status & AE_CODE_MASK) + { + case AE_CODE_ENVIRONMENTAL: - if (sub_status <= AE_CODE_ENV_MAX) { - exception = acpi_gbl_exception_names_env[sub_status]; - } - break; + if ( sub_status <= AE_CODE_ENV_MAX ) + { + exception = acpi_gbl_exception_names_env[sub_status]; + } + break; - case AE_CODE_PROGRAMMER: + case AE_CODE_PROGRAMMER: - if (sub_status <= AE_CODE_PGM_MAX) { - exception = - acpi_gbl_exception_names_pgm[sub_status - 1]; - } - break; + if ( sub_status <= AE_CODE_PGM_MAX ) + { + exception = acpi_gbl_exception_names_pgm[sub_status - 1]; + } + break; - case AE_CODE_ACPI_TABLES: + case AE_CODE_ACPI_TABLES: - if (sub_status <= AE_CODE_TBL_MAX) { - exception = - acpi_gbl_exception_names_tbl[sub_status - 1]; - } - break; + if ( sub_status <= AE_CODE_TBL_MAX ) + { + exception = acpi_gbl_exception_names_tbl[sub_status - 1]; + } + break; - case AE_CODE_AML: + case AE_CODE_AML: - if (sub_status <= AE_CODE_AML_MAX) { - exception = - acpi_gbl_exception_names_aml[sub_status - 1]; - } - break; + if ( sub_status <= AE_CODE_AML_MAX ) + { + exception = acpi_gbl_exception_names_aml[sub_status - 1]; + } + break; - case AE_CODE_CONTROL: + case AE_CODE_CONTROL: - if (sub_status <= AE_CODE_CTRL_MAX) { - exception = - acpi_gbl_exception_names_ctrl[sub_status - 1]; - } - break; + if ( sub_status <= AE_CODE_CTRL_MAX ) + { + exception = acpi_gbl_exception_names_ctrl[sub_status - 1]; + } + break; - default: - break; - } + default: + break; + } - return (ACPI_CAST_PTR(const char, exception)); + return (ACPI_CAST_PTR(const char, exception)); } /******************************************************************************* @@ -133,46 +135,49 @@ const char *__init acpi_ut_validate_exception(acpi_status status) * ******************************************************************************/ -void ACPI_INTERNAL_VAR_XFACE __init -acpi_ut_error(const char *module_name, u32 line_number, char *format, ...) +void ACPI_INTERNAL_VAR_XFACE __init acpi_ut_error(const char *module_name, + u32 line_number, char *format, + ...) { - va_list args; + va_list args; - acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number); + acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number); - va_start(args, format); - acpi_os_vprintf(format, args); - acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); - va_end(args); + va_start(args, format); + acpi_os_vprintf(format, args); + acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); + va_end(args); } -void ACPI_INTERNAL_VAR_XFACE __init -acpi_ut_warning(const char *module_name, u32 line_number, char *format, ...) +void ACPI_INTERNAL_VAR_XFACE __init acpi_ut_warning(const char *module_name, + u32 line_number, + char *format, ...) { - va_list args; + va_list args; - acpi_os_printf("ACPI Warning (%s-%04d): ", module_name, line_number); + acpi_os_printf("ACPI Warning (%s-%04d): ", module_name, line_number); - va_start(args, format); - acpi_os_vprintf(format, args); - acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); - va_end(args); - va_end(args); + va_start(args, format); + acpi_os_vprintf(format, args); + acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); + va_end(args); + va_end(args); } -void ACPI_INTERNAL_VAR_XFACE __init -acpi_ut_info(const char *module_name, u32 line_number, char *format, ...) +void ACPI_INTERNAL_VAR_XFACE __init acpi_ut_info(const char *module_name, + u32 line_number, char *format, + ...) { - va_list args; - - /* - * Removed module_name, line_number, and acpica version, not needed - * for info output - */ - acpi_os_printf("ACPI: "); - - va_start(args, format); - acpi_os_vprintf(format, args); - acpi_os_printf("\n"); - va_end(args); + va_list args; + + /* + * Removed module_name, line_number, and acpica version, not needed + * for info output + */ + acpi_os_printf("ACPI: "); + + va_start(args, format); + acpi_os_vprintf(format, args); + acpi_os_printf("\n"); + va_end(args); } diff --git a/xen/drivers/char/arm-uart.c b/xen/drivers/char/arm-uart.c index 627746ba89..397be82e00 100644 --- a/xen/drivers/char/arm-uart.c +++ b/xen/drivers/char/arm-uart.c @@ -57,10 +57,12 @@ static void __init dt_uart_init(void) ret = dt_property_read_string(chosen, "stdout-path", &stdout); if ( ret >= 0 ) { - printk("Taking dtuart configuration from /chosen/stdout-path\n"); - if ( strlcpy(opt_dtuart, stdout, sizeof(opt_dtuart)) - >= sizeof(opt_dtuart) ) - printk("WARNING: /chosen/stdout-path too long, truncated\n"); + printk( + "Taking dtuart configuration from /chosen/stdout-path\n"); + if ( strlcpy(opt_dtuart, stdout, sizeof(opt_dtuart)) >= + sizeof(opt_dtuart) ) + printk( + "WARNING: /chosen/stdout-path too long, truncated\n"); } else if ( ret != -EINVAL /* Not present */ ) printk("Failed to read /chosen/stdout-path (%d)\n", ret); @@ -118,7 +120,9 @@ static void __init acpi_uart_init(void) } } #else -static void __init acpi_uart_init(void) { } +static void __init acpi_uart_init(void) +{ +} #endif void __init arm_uart_init(void) diff --git a/xen/drivers/char/cadence-uart.c b/xen/drivers/char/cadence-uart.c index 22905ba66c..0f94ada954 100644 --- a/xen/drivers/char/cadence-uart.c +++ b/xen/drivers/char/cadence-uart.c @@ -29,7 +29,8 @@ #include #include -static struct cuart { +static struct cuart +{ unsigned int irq; void __iomem *regs; /* UART with IRQ line: interrupt-driven I/O. */ @@ -37,8 +38,8 @@ static struct cuart { struct vuart_info vuart; } cuart_com = {0}; -#define cuart_read(uart, off) readl((uart)->regs + (off)) -#define cuart_write(uart, off,val) writel((val), (uart)->regs + (off)) +#define cuart_read(uart, off) readl((uart)->regs + (off)) +#define cuart_write(uart, off, val) writel((val), (uart)->regs + (off)) static void cuart_interrupt(int irq, void *data, struct cpu_user_regs *regs) { @@ -63,8 +64,9 @@ static void __init cuart_init_preirq(struct serial_port *port) cuart_write(uart, R_UART_MR, UART_MR_NO_PARITY); /* Enable and Reset both the RX and TX paths. */ - cuart_write(uart, R_UART_CR, UART_CR_RX_RST | UART_CR_TX_RST | - UART_CR_RX_ENABLE | UART_CR_TX_ENABLE); + cuart_write(uart, R_UART_CR, + UART_CR_RX_RST | UART_CR_TX_RST | UART_CR_RX_ENABLE | + UART_CR_TX_ENABLE); } static void __init cuart_init_postirq(struct serial_port *port) @@ -75,10 +77,11 @@ static void __init cuart_init_postirq(struct serial_port *port) if ( uart->irq > 0 ) { uart->irqaction.handler = cuart_interrupt; - uart->irqaction.name = "cadence-uart"; - uart->irqaction.dev_id = port; + uart->irqaction.name = "cadence-uart"; + uart->irqaction.dev_id = port; if ( (rc = setup_irq(uart->irq, 0, &uart->irqaction)) != 0 ) - printk("ERROR: Failed to allocate cadence-uart IRQ %d\n", uart->irq); + printk("ERROR: Failed to allocate cadence-uart IRQ %d\n", + uart->irq); } /* Clear pending error interrupts */ @@ -105,7 +108,7 @@ static int cuart_tx_ready(struct serial_port *port) struct cuart *uart = port->uart; unsigned int status = cuart_read(uart, R_UART_SR); - return !( status & UART_SR_INTR_TFUL ); + return !(status & UART_SR_INTR_TFUL); } static void cuart_putc(struct serial_port *port, char c) @@ -130,7 +133,7 @@ static int __init cuart_irq(struct serial_port *port) { struct cuart *uart = port->uart; - return ( (uart->irq > 0) ? uart->irq : -1 ); + return ((uart->irq > 0) ? uart->irq : -1); } static const struct vuart_info *cuart_vuart(struct serial_port *port) @@ -141,16 +144,16 @@ static const struct vuart_info *cuart_vuart(struct serial_port *port) } static struct uart_driver __read_mostly cuart_driver = { - .init_preirq = cuart_init_preirq, + .init_preirq = cuart_init_preirq, .init_postirq = cuart_init_postirq, - .endboot = NULL, - .suspend = cuart_suspend, - .resume = cuart_resume, - .tx_ready = cuart_tx_ready, - .putc = cuart_putc, - .getc = cuart_getc, - .irq = cuart_irq, - .vuart_info = cuart_vuart, + .endboot = NULL, + .suspend = cuart_suspend, + .resume = cuart_resume, + .tx_ready = cuart_tx_ready, + .putc = cuart_putc, + .getc = cuart_getc, + .irq = cuart_irq, + .vuart_info = cuart_vuart, }; static int __init cuart_init(struct dt_device_node *dev, const void *data) @@ -202,23 +205,21 @@ static int __init cuart_init(struct dt_device_node *dev, const void *data) return 0; } -static const struct dt_device_match cuart_dt_match[] __initconst = -{ +static const struct dt_device_match cuart_dt_match[] __initconst = { DT_MATCH_COMPATIBLE("cdns,uart-r1p8"), DT_MATCH_COMPATIBLE("cdns,uart-r1p12"), - { /* sentinel */ }, + {/* sentinel */}, }; -DT_DEVICE_START(cuart, "Cadence UART", DEVICE_SERIAL) - .dt_match = cuart_dt_match, - .init = cuart_init, -DT_DEVICE_END - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ +DT_DEVICE_START(cuart, "Cadence UART", DEVICE_SERIAL).dt_match = cuart_dt_match, + .init = cuart_init, + DT_DEVICE_END + + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c index 4315588f05..580f91683a 100644 --- a/xen/drivers/char/console.c +++ b/xen/drivers/char/console.c @@ -1,8 +1,8 @@ /****************************************************************************** * console.c - * + * * Emergency console I/O for Xen and the domain-0 guest OS. - * + * * Copyright (c) 2002-2004, K A Fraser. * * Added printf_ratelimit @@ -68,11 +68,11 @@ boolean_param("console_to_ring", opt_console_to_ring); /* console_timestamps: include a timestamp prefix on every Xen console line. */ enum con_timestamp_mode { - TSM_NONE, /* No timestamps */ - TSM_DATE, /* [YYYY-MM-DD HH:MM:SS] */ - TSM_DATE_MS, /* [YYYY-MM-DD HH:MM:SS.mmm] */ - TSM_BOOT, /* [SSSSSS.uuuuuu] */ - TSM_RAW, /* [XXXXXXXXXXXXXXXX] */ + TSM_NONE, /* No timestamps */ + TSM_DATE, /* [YYYY-MM-DD HH:MM:SS] */ + TSM_DATE_MS, /* [YYYY-MM-DD HH:MM:SS.mmm] */ + TSM_BOOT, /* [SSSSSS.uuuuuu] */ + TSM_RAW, /* [XXXXXXXXXXXXXXXX] */ }; static enum con_timestamp_mode __read_mostly opt_con_timestamp_mode = TSM_NONE; @@ -85,7 +85,7 @@ static uint32_t __initdata opt_conring_size; size_param("conring_size", opt_conring_size); #define _CONRING_SIZE 16384 -#define CONRING_IDX_MASK(i) ((i)&(conring_size-1)) +#define CONRING_IDX_MASK(i) ((i) & (conring_size - 1)) static char __initdata _conring[_CONRING_SIZE]; static char *__read_mostly conring = _conring; static uint32_t __read_mostly conring_size = _CONRING_SIZE; @@ -114,13 +114,13 @@ static DEFINE_SPINLOCK(console_lock); * the lower threshold equal to the upper. */ #ifdef NDEBUG -#define XENLOG_UPPER_THRESHOLD 2 /* Do not print INFO and DEBUG */ -#define XENLOG_LOWER_THRESHOLD 2 /* Always print ERR and WARNING */ +#define XENLOG_UPPER_THRESHOLD 2 /* Do not print INFO and DEBUG */ +#define XENLOG_LOWER_THRESHOLD 2 /* Always print ERR and WARNING */ #define XENLOG_GUEST_UPPER_THRESHOLD 2 /* Do not print INFO and DEBUG */ #define XENLOG_GUEST_LOWER_THRESHOLD 0 /* Rate-limit ERR and WARNING */ #else -#define XENLOG_UPPER_THRESHOLD 4 /* Do not discard anything */ -#define XENLOG_LOWER_THRESHOLD 4 /* Print everything */ +#define XENLOG_UPPER_THRESHOLD 4 /* Do not discard anything */ +#define XENLOG_LOWER_THRESHOLD 4 /* Print everything */ #define XENLOG_GUEST_UPPER_THRESHOLD 4 /* Do not discard anything */ #define XENLOG_GUEST_LOWER_THRESHOLD 4 /* Print everything */ #endif @@ -128,7 +128,7 @@ static DEFINE_SPINLOCK(console_lock); * The XENLOG_DEFAULT is the default given to printks that * do not have any print level associated with them. */ -#define XENLOG_DEFAULT 1 /* XENLOG_WARNING */ +#define XENLOG_DEFAULT 1 /* XENLOG_WARNING */ #define XENLOG_GUEST_DEFAULT 1 /* XENLOG_WARNING */ static int __read_mostly xenlog_upper_thresh = XENLOG_UPPER_THRESHOLD; @@ -154,20 +154,21 @@ custom_runtime_param("guest_loglvl", parse_guest_loglvl); static atomic_t print_everything = ATOMIC_INIT(0); -#define ___parse_loglvl(s, ps, lvlstr, lvlnum) \ - if ( !strncmp((s), (lvlstr), strlen(lvlstr)) ) { \ - *(ps) = (s) + strlen(lvlstr); \ - return (lvlnum); \ +#define ___parse_loglvl(s, ps, lvlstr, lvlnum) \ + if ( !strncmp((s), (lvlstr), strlen(lvlstr)) ) \ + { \ + *(ps) = (s) + strlen(lvlstr); \ + return (lvlnum); \ } static int __parse_loglvl(const char *s, const char **ps) { - ___parse_loglvl(s, ps, "none", 0); - ___parse_loglvl(s, ps, "error", 1); + ___parse_loglvl(s, ps, "none", 0); + ___parse_loglvl(s, ps, "error", 1); ___parse_loglvl(s, ps, "warning", 2); - ___parse_loglvl(s, ps, "info", 3); - ___parse_loglvl(s, ps, "debug", 4); - ___parse_loglvl(s, ps, "all", 4); + ___parse_loglvl(s, ps, "info", 3); + ___parse_loglvl(s, ps, "debug", 4); + ___parse_loglvl(s, ps, "all", 4); return 2; /* sane fallback */ } @@ -175,7 +176,7 @@ static int _parse_loglvl(const char *s, int *lower, int *upper) { *lower = *upper = __parse_loglvl(s, &s); if ( *s == '/' ) - *upper = __parse_loglvl(s+1, &s); + *upper = __parse_loglvl(s + 1, &s); if ( *upper < *lower ) *upper = *lower; @@ -195,13 +196,18 @@ static int parse_guest_loglvl(const char *s) static char *loglvl_str(int lvl) { - switch ( lvl ) + switch (lvl) { - case 0: return "Nothing"; - case 1: return "Errors"; - case 2: return "Errors and warnings"; - case 3: return "Errors, warnings and info"; - case 4: return "All"; + case 0: + return "Nothing"; + case 1: + return "Errors"; + case 2: + return "Errors and warnings"; + case 3: + return "Errors, warnings and info"; + case 4: + return "All"; } return "???"; } @@ -224,16 +230,16 @@ static void do_toggle_guest(unsigned char key, struct cpu_user_regs *regs) lower_thresh_adj = &xenlog_lower_thresh; thresh_adj = "standard"; } - printk("'%c' pressed -> %s log level adjustments enabled\n", - key, thresh_adj); + printk("'%c' pressed -> %s log level adjustments enabled\n", key, + thresh_adj); } static void do_adj_thresh(unsigned char key) { if ( *upper_thresh_adj < *lower_thresh_adj ) *upper_thresh_adj = *lower_thresh_adj; - printk("'%c' pressed -> %s log level: %s (rate limited %s)\n", - key, thresh_adj, loglvl_str(*lower_thresh_adj), + printk("'%c' pressed -> %s log level: %s (rate limited %s)\n", key, + thresh_adj, loglvl_str(*lower_thresh_adj), loglvl_str(*upper_thresh_adj)); } @@ -274,15 +280,13 @@ long read_console_ring(struct xen_sysctl_readconsole *op) XEN_GUEST_HANDLE_PARAM(char) str; uint32_t idx, len, max, sofar, c, p; - str = guest_handle_cast(op->buffer, char), - max = op->count; + str = guest_handle_cast(op->buffer, char), max = op->count; sofar = 0; c = read_atomic(&conringc); p = read_atomic(&conringp); - if ( op->incremental && - (c <= p ? c < op->index && op->index <= p - : c < op->index || op->index <= p) ) + if ( op->incremental && (c <= p ? c < op->index && op->index <= p + : c < op->index || op->index <= p) ) c = op->index; while ( (c != p) && (sofar < max) ) @@ -312,7 +316,6 @@ long read_console_ring(struct xen_sysctl_readconsole *op) return 0; } - /* * ******************************************************* * *************** ACCESS TO SERIAL LINE ***************** @@ -321,7 +324,7 @@ long read_console_ring(struct xen_sysctl_readconsole *op) /* Characters received over the serial line are buffered for domain 0. */ #define SERIAL_RX_SIZE 128 -#define SERIAL_RX_MASK(_i) ((_i)&(SERIAL_RX_SIZE-1)) +#define SERIAL_RX_MASK(_i) ((_i) & (SERIAL_RX_SIZE - 1)) static char serial_rx_ring[SERIAL_RX_SIZE]; static unsigned int serial_rx_cons, serial_rx_prod; @@ -398,7 +401,7 @@ static void dump_console_ring_key(unsigned char key) * CTRL- changes input direction, rotating among Xen, Dom0, * and the DomUs started from Xen at boot. */ -#define switch_code (opt_conswitch[0]-'a'+1) +#define switch_code (opt_conswitch[0] - 'a' + 1) /* * console_rx=0 => input to xen * console_rx=1 => input to dom0 @@ -410,7 +413,7 @@ static unsigned int __read_mostly console_rx = 0; struct domain *console_input_domain(void) { if ( console_rx == 0 ) - return NULL; + return NULL; return rcu_lock_domain_by_id(console_rx - 1); } @@ -435,7 +438,7 @@ static void switch_serial_input(void) static void __serial_rx(char c, struct cpu_user_regs *regs) { - switch ( console_rx ) + switch (console_rx) { case 0: return handle_keypress(c, regs); @@ -465,8 +468,7 @@ static void __serial_rx(char c, struct cpu_user_regs *regs) * domain, without a full PV ring to Dom0 (in that case input * comes from the PV ring), then send the character to it. */ - if ( d != NULL && - !d->arch.vpl011.backend_in_domain && + if ( d != NULL && !d->arch.vpl011.backend_in_domain && d->arch.vpl011.backend.xen != NULL ) vpl011_rx_char_xen(d, c); else @@ -518,9 +520,9 @@ static DECLARE_SOFTIRQ_TASKLET(notify_dom0_con_ring_tasklet, static inline void xen_console_write_debug_port(const char *buf, size_t len) { unsigned long tmp; - asm volatile ( "rep outsb;" - : "=&S" (tmp), "=&c" (tmp) - : "0" (buf), "1" (len), "d" (XEN_HVM_DEBUGCONS_IOPORT) ); + asm volatile("rep outsb;" + : "=&S"(tmp), "=&c"(tmp) + : "0"(buf), "1"(len), "d"(XEN_HVM_DEBUGCONS_IOPORT)); } #endif @@ -534,10 +536,9 @@ static long guest_console_write(XEN_GUEST_HANDLE_PARAM(char) buffer, int count) { if ( kcount && hypercall_preempt_check() ) return hypercall_create_continuation( - __HYPERVISOR_console_io, "iih", - CONSOLEIO_write, count, buffer); + __HYPERVISOR_console_io, "iih", CONSOLEIO_write, count, buffer); - kcount = min_t(int, count, sizeof(kbuf)-1); + kcount = min_t(int, count, sizeof(kbuf) - 1); if ( copy_from_guest(kbuf, buffer, kcount) ) return -EFAULT; kbuf[kcount] = '\0'; @@ -575,7 +576,7 @@ static long guest_console_write(XEN_GUEST_HANDLE_PARAM(char) buffer, int count) char *kin = kbuf, *kout = kbuf, c; /* Strip non-printable characters */ - for ( ; ; ) + for ( ;; ) { c = *kin++; if ( c == '\0' || c == '\n' ) @@ -623,7 +624,7 @@ long do_console_io(int cmd, int count, XEN_GUEST_HANDLE_PARAM(char) buffer) if ( rc ) return rc; - switch ( cmd ) + switch (cmd) { case CONSOLEIO_write: rc = guest_console_write(buffer, count); @@ -655,7 +656,6 @@ long do_console_io(int cmd, int count, XEN_GUEST_HANDLE_PARAM(char) buffer) return rc; } - /* * ***************************************************** * *************** GENERIC CONSOLE I/O ***************** @@ -697,7 +697,7 @@ static int printk_prefix_check(char *p, char **pp) while ( (p[0] == '<') && (p[1] != '\0') && (p[2] == '>') ) { - switch ( p[1] ) + switch (p[1]) { case 'G': upper_thresh = ACCESS_ONCE(xenlog_guest_upper_thresh); @@ -717,14 +717,13 @@ static int printk_prefix_check(char *p, char **pp) *pp = p; - return ((atomic_read(&print_everything) != 0) || - (loglvl < lower_thresh) || + return ((atomic_read(&print_everything) != 0) || (loglvl < lower_thresh) || ((loglvl < upper_thresh) && printk_ratelimit())); -} +} static int parse_console_timestamps(const char *s) { - switch ( parse_bool(s, NULL) ) + switch (parse_bool(s, NULL)) { case 0: opt_con_timestamp_mode = TSM_NONE; @@ -759,7 +758,7 @@ static void printk_start_of_line(const char *prefix) __putstr(prefix); - switch ( mode ) + switch (mode) { case TSM_DATE: case TSM_DATE_MS: @@ -770,16 +769,16 @@ static void printk_start_of_line(const char *prefix) else if ( mode == TSM_DATE ) { snprintf(tstr, sizeof(tstr), "[%04u-%02u-%02u %02u:%02u:%02u] ", - 1900 + tm.tm_year, tm.tm_mon + 1, tm.tm_mday, - tm.tm_hour, tm.tm_min, tm.tm_sec); + 1900 + tm.tm_year, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, + tm.tm_min, tm.tm_sec); break; } else { snprintf(tstr, sizeof(tstr), - "[%04u-%02u-%02u %02u:%02u:%02u.%03"PRIu64"] ", - 1900 + tm.tm_year, tm.tm_mon + 1, tm.tm_mday, - tm.tm_hour, tm.tm_min, tm.tm_sec, nsec / 1000000); + "[%04u-%02u-%02u %02u:%02u:%02u.%03" PRIu64 "] ", + 1900 + tm.tm_year, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, + tm.tm_min, tm.tm_sec, nsec / 1000000); break; } /* fall through */ @@ -789,13 +788,13 @@ static void printk_start_of_line(const char *prefix) if ( sec | nsec ) { - snprintf(tstr, sizeof(tstr), "[%5"PRIu64".%06"PRIu64"] ", - sec, nsec / 1000); + snprintf(tstr, sizeof(tstr), "[%5" PRIu64 ".%06" PRIu64 "] ", sec, + nsec / 1000); break; } /* fall through */ case TSM_RAW: - snprintf(tstr, sizeof(tstr), "[%016"PRIx64"] ", get_cycles()); + snprintf(tstr, sizeof(tstr), "[%016" PRIx64 "] ", get_cycles()); break; case TSM_NONE: @@ -808,12 +807,13 @@ static void printk_start_of_line(const char *prefix) static void vprintk_common(const char *prefix, const char *fmt, va_list args) { - struct vps { + struct vps + { bool_t continued, do_print; - } *state; + } * state; static DEFINE_PER_CPU(struct vps, state); - static char buf[1024]; - char *p, *q; + static char buf[1024]; + char *p, *q; unsigned long flags; /* console_lock can be acquired recursively from __printk_ratelimit(). */ @@ -930,8 +930,8 @@ void __init console_init_preirq(void) spin_unlock(&console_lock); printk("Xen version %d.%d%s (%s@%s) (%s) debug=%c " gcov_string " %s\n", xen_major_version(), xen_minor_version(), xen_extra_version(), - xen_compile_by(), xen_compile_domain(), - xen_compiler(), debug_build() ? 'y' : 'n', xen_compile_date()); + xen_compile_by(), xen_compile_domain(), xen_compiler(), + debug_build() ? 'y' : 'n', xen_compile_date()); printk("Latest ChangeSet: %s\n", xen_changeset()); if ( opt_sync_console ) @@ -962,7 +962,7 @@ void __init console_init_ring(void) opt_conring_size = PAGE_SIZE << order; spin_lock_irqsave(&console_lock, flags); - for ( i = conringc ; i != conringp; i++ ) + for ( i = conringc; i != conringp; i++ ) ring[i & (opt_conring_size - 1)] = conring[i & (conring_size - 1)]; conring = ring; smp_wmb(); /* Allow users of console_force_unlock() to see larger buffer. */ @@ -1015,10 +1015,10 @@ void __init console_endboot(void) register_keyhandler('w', dump_console_ring_key, "synchronously dump console ring buffer (dmesg)", 0); - register_irq_keyhandler('+', &do_inc_thresh, - "increase log level threshold", 0); - register_irq_keyhandler('-', &do_dec_thresh, - "decrease log level threshold", 0); + register_irq_keyhandler('+', &do_inc_thresh, "increase log level threshold", + 0); + register_irq_keyhandler('-', &do_dec_thresh, "decrease log level threshold", + 0); register_irq_keyhandler('G', &do_toggle_guest, "toggle host/guest log level adjustment", 0); @@ -1112,7 +1112,7 @@ int __printk_ratelimit(int ratelimit_ms, int ratelimit_burst) spin_lock_irqsave(&ratelimit_lock, flags); toks += ms - last_msg; last_msg = ms; - if ( toks > (ratelimit_burst * ratelimit_ms)) + if ( toks > (ratelimit_burst * ratelimit_ms) ) toks = ratelimit_burst * ratelimit_ms; if ( toks >= ratelimit_ms ) { @@ -1162,7 +1162,7 @@ int printk_ratelimit(void) /* Send output direct to console, or buffer it? */ static volatile int debugtrace_send_to_console; -static char *debugtrace_buf; /* Debug-trace buffer */ +static char *debugtrace_buf; /* Debug-trace buffer */ static unsigned int debugtrace_prd; /* Producer index */ static unsigned int debugtrace_kilobytes = 128, debugtrace_bytes; static unsigned int debugtrace_used; @@ -1201,7 +1201,7 @@ static void debugtrace_toggle(void) * buffer itself causes more printk() invocations. */ printk("debugtrace_printk now writing to %s.\n", - !debugtrace_send_to_console ? "console": "buffer"); + !debugtrace_send_to_console ? "console" : "buffer"); if ( !debugtrace_send_to_console ) debugtrace_dump_worker(); @@ -1209,7 +1209,6 @@ static void debugtrace_toggle(void) spin_unlock_irqrestore(&debugtrace_lock, flags); watchdog_enable(); - } void debugtrace_dump(void) @@ -1227,11 +1226,11 @@ void debugtrace_dump(void) void debugtrace_printk(const char *fmt, ...) { - static char buf[1024]; + static char buf[1024]; static u32 count; - va_list args; - char *p; + va_list args; + char *p; unsigned long flags; if ( debugtrace_bytes == 0 ) @@ -1257,7 +1256,7 @@ void debugtrace_printk(const char *fmt, ...) { for ( p = buf; *p != '\0'; p++ ) { - debugtrace_buf[debugtrace_prd++] = *p; + debugtrace_buf[debugtrace_prd++] = *p; /* Always leave a nul byte at the end of the buffer. */ if ( debugtrace_prd == (debugtrace_bytes - 1) ) debugtrace_prd = 0; @@ -1278,7 +1277,8 @@ static int __init debugtrace_init(void) unsigned int kbytes, bytes; /* Round size down to next power of two. */ - while ( (kbytes = (debugtrace_kilobytes & (debugtrace_kilobytes-1))) != 0 ) + while ( (kbytes = (debugtrace_kilobytes & (debugtrace_kilobytes - 1))) != + 0 ) debugtrace_kilobytes = kbytes; bytes = debugtrace_kilobytes << 10; @@ -1302,7 +1302,6 @@ __initcall(debugtrace_init); #endif /* !NDEBUG */ - /* * ************************************************************** * *************** Debugging/tracing/error-report *************** @@ -1315,7 +1314,7 @@ void panic(const char *fmt, ...) unsigned long flags; static DEFINE_SPINLOCK(lock); static char buf[128]; - + debugtrace_dump(); /* Protects buf[] and ensure multi-line message prints atomically. */ @@ -1357,7 +1356,9 @@ void panic(const char *fmt, ...) * ************************************************************** */ -static void suspend_steal_fn(const char *str) { } +static void suspend_steal_fn(const char *str) +{ +} static int suspend_steal_id; int console_suspend(void) @@ -1383,4 +1384,3 @@ int console_resume(void) * indent-tabs-mode: nil * End: */ - diff --git a/xen/drivers/char/consoled.c b/xen/drivers/char/consoled.c index 552abf5766..02c478dd97 100644 --- a/xen/drivers/char/consoled.c +++ b/xen/drivers/char/consoled.c @@ -93,7 +93,7 @@ size_t consoled_guest_rx(void) ACCESS_ONCE(cons_ring->out_cons) = cons; pv_shim_inject_evtchn(pv_console_evtchn()); - out: +out: spin_unlock(&rx_lock); return recv; @@ -130,7 +130,7 @@ size_t consoled_guest_tx(char c) smp_wmb(); ACCESS_ONCE(cons_ring->in_prod) = prod; - notify: +notify: /* Always notify the guest: prevents receive path from getting stuck. */ pv_shim_inject_evtchn(pv_console_evtchn()); diff --git a/xen/drivers/char/ehci-dbgp.c b/xen/drivers/char/ehci-dbgp.c index 475dc41767..4c392a1114 100644 --- a/xen/drivers/char/ehci-dbgp.c +++ b/xen/drivers/char/ehci-dbgp.c @@ -20,7 +20,8 @@ /* EHCI register interface, corresponds to EHCI Revision 0.95 specification */ /* Section 2.2 Host Controller Capability Registers */ -struct ehci_caps { +struct ehci_caps +{ /* * These fields are specified as 8 and 16 bit registers, * but some hosts can't perform 8 or 16 bit PCI accesses. @@ -29,150 +30,153 @@ struct ehci_caps { * affects the memory map for big endian controllers. */ u32 hc_capbase; -#define HC_LENGTH(p) (0x00ff & (p)) /* bits 7:0 / offset 0x00 */ -#define HC_VERSION(p) (0xffff & ((p) >> 16)) /* bits 31:16 / offset 0x02 */ +#define HC_LENGTH(p) (0x00ff & (p)) /* bits 7:0 / offset 0x00 */ +#define HC_VERSION(p) (0xffff & ((p) >> 16)) /* bits 31:16 / offset 0x02 */ - u32 hcs_params; /* HCSPARAMS - offset 0x04 */ + u32 hcs_params; /* HCSPARAMS - offset 0x04 */ #define HCS_DEBUG_PORT(p) (((p) >> 20) & 0xf) /* bits 23:20, debug port? */ -#define HCS_INDICATOR(p) ((p) & (1 << 16)) /* true: has port indicators */ -#define HCS_N_CC(p) (((p) >> 12) & 0xf) /* bits 15:12, #companion HCs */ -#define HCS_N_PCC(p) (((p) >> 8) & 0xf) /* bits 11:8, ports per CC */ +#define HCS_INDICATOR(p) ((p) & (1 << 16)) /* true: has port indicators */ +#define HCS_N_CC(p) (((p) >> 12) & 0xf) /* bits 15:12, #companion HCs */ +#define HCS_N_PCC(p) (((p) >> 8) & 0xf) /* bits 11:8, ports per CC */ #define HCS_PORTROUTED(p) ((p) & (1 << 7)) /* true: port routing */ -#define HCS_PPC(p) ((p) & (1 << 4)) /* true: port power control */ -#define HCS_N_PORTS(p) (((p) >> 0) & 0xf) /* bits 3:0, ports on HC */ +#define HCS_PPC(p) ((p) & (1 << 4)) /* true: port power control */ +#define HCS_N_PORTS(p) (((p) >> 0) & 0xf) /* bits 3:0, ports on HC */ - u32 hcc_params; /* HCCPARAMS - offset 0x08 */ + u32 hcc_params; /* HCCPARAMS - offset 0x08 */ /* EHCI 1.1 addendum */ #define HCC_32FRAME_PERIODIC_LIST(p) ((p) & (1 << 19)) #define HCC_PER_PORT_CHANGE_EVENT(p) ((p) & (1 << 18)) -#define HCC_LPM(p) ((p) & (1 << 17)) +#define HCC_LPM(p) ((p) & (1 << 17)) #define HCC_HW_PREFETCH(p) ((p) & (1 << 16)) -#define HCC_EXT_CAPS(p) (((p) >> 8) & 0xff) /* for pci extended caps */ -#define HCC_ISOC_CACHE(p) ((p) & (1 << 7)) /* true: can cache isoc frame */ -#define HCC_ISOC_THRES(p) (((p) >> 4) & 0x7) /* bits 6:4, uframes cached */ -#define HCC_CANPARK(p) ((p) & (1 << 2)) /* true: can park on async qh */ -#define HCC_PGM_FRAMELISTLEN(p) ((p) & (1 << 1)) /* true: periodic_size changes */ -#define HCC_64BIT_ADDR(p) ((p) & 1) /* true: can use 64-bit addr */ - - u8 portroute[8]; /* nibbles for routing - offset 0x0C */ +#define HCC_EXT_CAPS(p) (((p) >> 8) & 0xff) /* for pci extended caps */ +#define HCC_ISOC_CACHE(p) ((p) & (1 << 7)) /* true: can cache isoc frame */ +#define HCC_ISOC_THRES(p) (((p) >> 4) & 0x7) /* bits 6:4, uframes cached */ +#define HCC_CANPARK(p) ((p) & (1 << 2)) /* true: can park on async qh */ +#define HCC_PGM_FRAMELISTLEN(p) \ + ((p) & (1 << 1)) /* true: periodic_size changes */ +#define HCC_64BIT_ADDR(p) ((p)&1) /* true: can use 64-bit addr */ + + u8 portroute[8]; /* nibbles for routing - offset 0x0C */ }; /* Section 2.3 Host Controller Operational Registers */ -struct ehci_regs { +struct ehci_regs +{ /* USBCMD: offset 0x00 */ u32 command; /* EHCI 1.1 addendum */ -#define CMD_HIRD (0xf << 24) /* host initiated resume duration */ -#define CMD_PPCEE (1 << 15) /* per port change event enable */ -#define CMD_FSP (1 << 14) /* fully synchronized prefetch */ -#define CMD_ASPE (1 << 13) /* async schedule prefetch enable */ -#define CMD_PSPE (1 << 12) /* periodic schedule prefetch enable */ +#define CMD_HIRD (0xf << 24) /* host initiated resume duration */ +#define CMD_PPCEE (1 << 15) /* per port change event enable */ +#define CMD_FSP (1 << 14) /* fully synchronized prefetch */ +#define CMD_ASPE (1 << 13) /* async schedule prefetch enable */ +#define CMD_PSPE (1 << 12) /* periodic schedule prefetch enable */ /* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */ -#define CMD_PARK (1 << 11) /* enable "park" on async qh */ +#define CMD_PARK (1 << 11) /* enable "park" on async qh */ #define CMD_PARK_CNT(c) (((c) >> 8) & 3) /* how many transfers to park for */ -#define CMD_LRESET (1 << 7) /* partial reset (no ports, etc) */ -#define CMD_IAAD (1 << 6) /* "doorbell" interrupt async advance */ -#define CMD_ASE (1 << 5) /* async schedule enable */ -#define CMD_PSE (1 << 4) /* periodic schedule enable */ +#define CMD_LRESET (1 << 7) /* partial reset (no ports, etc) */ +#define CMD_IAAD (1 << 6) /* "doorbell" interrupt async advance */ +#define CMD_ASE (1 << 5) /* async schedule enable */ +#define CMD_PSE (1 << 4) /* periodic schedule enable */ /* 3:2 is periodic frame list size */ -#define CMD_RESET (1 << 1) /* reset HC not bus */ -#define CMD_RUN (1 << 0) /* start/stop HC */ +#define CMD_RESET (1 << 1) /* reset HC not bus */ +#define CMD_RUN (1 << 0) /* start/stop HC */ /* USBSTS: offset 0x04 */ u32 status; -#define STS_PPCE_MASK (0xff << 16) /* Per-Port change event 1-16 */ -#define STS_ASS (1 << 15) /* Async Schedule Status */ -#define STS_PSS (1 << 14) /* Periodic Schedule Status */ -#define STS_RECL (1 << 13) /* Reclamation */ -#define STS_HALT (1 << 12) /* Not running (any reason) */ -/* some bits reserved */ +#define STS_PPCE_MASK (0xff << 16) /* Per-Port change event 1-16 */ +#define STS_ASS (1 << 15) /* Async Schedule Status */ +#define STS_PSS (1 << 14) /* Periodic Schedule Status */ +#define STS_RECL (1 << 13) /* Reclamation */ +#define STS_HALT (1 << 12) /* Not running (any reason) */ + /* some bits reserved */ /* these STS_* flags are also intr_enable bits (USBINTR) */ -#define STS_IAA (1 << 5) /* Interrupted on async advance */ -#define STS_FATAL (1 << 4) /* such as some PCI access errors */ -#define STS_FLR (1 << 3) /* frame list rolled over */ -#define STS_PCD (1 << 2) /* port change detect */ -#define STS_ERR (1 << 1) /* "error" completion (overflow, ...) */ -#define STS_INT (1 << 0) /* "normal" completion (short, ...) */ +#define STS_IAA (1 << 5) /* Interrupted on async advance */ +#define STS_FATAL (1 << 4) /* such as some PCI access errors */ +#define STS_FLR (1 << 3) /* frame list rolled over */ +#define STS_PCD (1 << 2) /* port change detect */ +#define STS_ERR (1 << 1) /* "error" completion (overflow, ...) */ +#define STS_INT (1 << 0) /* "normal" completion (short, ...) */ /* USBINTR: offset 0x08 */ u32 intr_enable; /* FRINDEX: offset 0x0C */ - u32 frame_index; /* current microframe number */ + u32 frame_index; /* current microframe number */ /* CTRLDSSEGMENT: offset 0x10 */ - u32 segment; /* address bits 63:32 if needed */ + u32 segment; /* address bits 63:32 if needed */ /* PERIODICLISTBASE: offset 0x14 */ - u32 frame_list; /* points to periodic list */ + u32 frame_list; /* points to periodic list */ /* ASYNCLISTADDR: offset 0x18 */ - u32 async_next; /* address of next async queue head */ + u32 async_next; /* address of next async queue head */ u32 reserved[9]; /* CONFIGFLAG: offset 0x40 */ u32 configured_flag; -#define FLAG_CF (1 << 0) /* true: we'll support "high speed" */ +#define FLAG_CF (1 << 0) /* true: we'll support "high speed" */ /* PORTSC: offset 0x44 */ - u32 port_status[0]; /* up to N_PORTS */ + u32 port_status[0]; /* up to N_PORTS */ /* EHCI 1.1 addendum */ -#define PORTSC_SUSPEND_STS_ACK 0 -#define PORTSC_SUSPEND_STS_NYET 1 +#define PORTSC_SUSPEND_STS_ACK 0 +#define PORTSC_SUSPEND_STS_NYET 1 #define PORTSC_SUSPEND_STS_STALL 2 -#define PORTSC_SUSPEND_STS_ERR 3 +#define PORTSC_SUSPEND_STS_ERR 3 -#define PORT_DEV_ADDR (0x7f << 25) /* device address */ -#define PORT_SSTS (0x3 << 23) /* suspend status */ +#define PORT_DEV_ADDR (0x7f << 25) /* device address */ +#define PORT_SSTS (0x3 << 23) /* suspend status */ /* 31:23 reserved */ -#define PORT_WKOC_E (1 << 22) /* wake on overcurrent (enable) */ -#define PORT_WKDISC_E (1 << 21) /* wake on disconnect (enable) */ -#define PORT_WKCONN_E (1 << 20) /* wake on connect (enable) */ +#define PORT_WKOC_E (1 << 22) /* wake on overcurrent (enable) */ +#define PORT_WKDISC_E (1 << 21) /* wake on disconnect (enable) */ +#define PORT_WKCONN_E (1 << 20) /* wake on connect (enable) */ /* 19:16 for port testing */ -#define PORT_TEST(x) (((x) & 0xf) << 16) /* Port Test Control */ -#define PORT_TEST_PKT PORT_TEST(0x4) /* Port Test Control - packet test */ +#define PORT_TEST(x) (((x)&0xf) << 16) /* Port Test Control */ +#define PORT_TEST_PKT PORT_TEST(0x4) /* Port Test Control - packet test */ #define PORT_TEST_FORCE PORT_TEST(0x5) /* Port Test Control - force enable */ -#define PORT_LED_OFF (0 << 14) -#define PORT_LED_AMBER (1 << 14) -#define PORT_LED_GREEN (2 << 14) -#define PORT_LED_MASK (3 << 14) -#define PORT_OWNER (1 << 13) /* true: companion hc owns this port */ -#define PORT_POWER (1 << 12) /* true: has power (see PPC) */ -#define PORT_USB11(x) (((x) & (3 << 10)) == (1 << 10)) /* USB 1.1 device */ +#define PORT_LED_OFF (0 << 14) +#define PORT_LED_AMBER (1 << 14) +#define PORT_LED_GREEN (2 << 14) +#define PORT_LED_MASK (3 << 14) +#define PORT_OWNER (1 << 13) /* true: companion hc owns this port */ +#define PORT_POWER (1 << 12) /* true: has power (see PPC) */ +#define PORT_USB11(x) (((x) & (3 << 10)) == (1 << 10)) /* USB 1.1 device */ /* 11:10 for detecting lowspeed devices (reset vs release ownership) */ /* 9 reserved */ -#define PORT_LPM (1 << 9) /* LPM transaction */ -#define PORT_RESET (1 << 8) /* reset port */ -#define PORT_SUSPEND (1 << 7) /* suspend port */ -#define PORT_RESUME (1 << 6) /* resume it */ -#define PORT_OCC (1 << 5) /* over current change */ -#define PORT_OC (1 << 4) /* over current active */ -#define PORT_PEC (1 << 3) /* port enable change */ -#define PORT_PE (1 << 2) /* port enable */ -#define PORT_CSC (1 << 1) /* connect status change */ -#define PORT_CONNECT (1 << 0) /* device connected */ -#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC) +#define PORT_LPM (1 << 9) /* LPM transaction */ +#define PORT_RESET (1 << 8) /* reset port */ +#define PORT_SUSPEND (1 << 7) /* suspend port */ +#define PORT_RESUME (1 << 6) /* resume it */ +#define PORT_OCC (1 << 5) /* over current change */ +#define PORT_OC (1 << 4) /* over current active */ +#define PORT_PEC (1 << 3) /* port enable change */ +#define PORT_PE (1 << 2) /* port enable */ +#define PORT_CSC (1 << 1) /* connect status change */ +#define PORT_CONNECT (1 << 0) /* device connected */ +#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC) }; /* * Appendix C, Debug port ... intended for use with special "debug devices" * that can help if there's no serial console. (nonstandard enumeration.) */ -struct ehci_dbg_port { +struct ehci_dbg_port +{ u32 control; -#define DBGP_OWNER (1 << 30) -#define DBGP_ENABLED (1 << 28) -#define DBGP_DONE (1 << 16) -#define DBGP_INUSE (1 << 10) +#define DBGP_OWNER (1 << 30) +#define DBGP_ENABLED (1 << 28) +#define DBGP_DONE (1 << 16) +#define DBGP_INUSE (1 << 10) #define DBGP_ERRCODE(x) (((x) >> 7) & 0x07) -# define DBGP_ERR_BAD 1 -# define DBGP_ERR_SIGNAL 2 -#define DBGP_ERROR (1 << 6) -#define DBGP_GO (1 << 5) -#define DBGP_OUT (1 << 4) -#define DBGP_LEN (0xf << 0) -#define DBGP_CLAIM (DBGP_OWNER | DBGP_ENABLED | DBGP_INUSE) +#define DBGP_ERR_BAD 1 +#define DBGP_ERR_SIGNAL 2 +#define DBGP_ERROR (1 << 6) +#define DBGP_GO (1 << 5) +#define DBGP_OUT (1 << 4) +#define DBGP_LEN (0xf << 0) +#define DBGP_CLAIM (DBGP_OWNER | DBGP_ENABLED | DBGP_INUSE) u32 pids; -#define DBGP_PID_GET(x) (((x) >> 16) & 0xff) +#define DBGP_PID_GET(x) (((x) >> 16) & 0xff) #define DBGP_PID_SET(data, tok) (((data) << 8) | (tok)) u32 data03; u32 data47; @@ -188,29 +192,29 @@ struct ehci_dbg_port { * This bit flag is used in endpoint descriptors' bEndpointAddress field. * It's also one of three fields in control requests bRequestType. */ -#define USB_DIR_OUT 0 /* to device */ -#define USB_DIR_IN 0x80 /* to host */ +#define USB_DIR_OUT 0 /* to device */ +#define USB_DIR_IN 0x80 /* to host */ /* * USB types, the second of three bRequestType fields */ -#define USB_TYPE_MASK (0x03 << 5) +#define USB_TYPE_MASK (0x03 << 5) #define USB_TYPE_STANDARD (0x00 << 5) -#define USB_TYPE_CLASS (0x01 << 5) -#define USB_TYPE_VENDOR (0x02 << 5) +#define USB_TYPE_CLASS (0x01 << 5) +#define USB_TYPE_VENDOR (0x02 << 5) #define USB_TYPE_RESERVED (0x03 << 5) /* * USB recipients, the third of three bRequestType fields */ -#define USB_RECIP_MASK 0x1f -#define USB_RECIP_DEVICE 0x00 +#define USB_RECIP_MASK 0x1f +#define USB_RECIP_DEVICE 0x00 #define USB_RECIP_INTERFACE 0x01 -#define USB_RECIP_ENDPOINT 0x02 -#define USB_RECIP_OTHER 0x03 +#define USB_RECIP_ENDPOINT 0x02 +#define USB_RECIP_OTHER 0x03 /* From Wireless USB 1.0 */ -#define USB_RECIP_PORT 0x04 -#define USB_RECIP_RPIPE 0x05 +#define USB_RECIP_PORT 0x04 +#define USB_RECIP_RPIPE 0x05 /* * Standard requests, for the bRequest field of a SETUP packet. @@ -219,19 +223,19 @@ struct ehci_dbg_port { * TYPE_CLASS or TYPE_VENDOR specific feature flags could be retrieved * by a GET_STATUS request. */ -#define USB_REQ_GET_STATUS 0x00 -#define USB_REQ_CLEAR_FEATURE 0x01 -#define USB_REQ_SET_FEATURE 0x03 -#define USB_REQ_SET_ADDRESS 0x05 -#define USB_REQ_GET_DESCRIPTOR 0x06 -#define USB_REQ_SET_DESCRIPTOR 0x07 +#define USB_REQ_GET_STATUS 0x00 +#define USB_REQ_CLEAR_FEATURE 0x01 +#define USB_REQ_SET_FEATURE 0x03 +#define USB_REQ_SET_ADDRESS 0x05 +#define USB_REQ_GET_DESCRIPTOR 0x06 +#define USB_REQ_SET_DESCRIPTOR 0x07 #define USB_REQ_GET_CONFIGURATION 0x08 #define USB_REQ_SET_CONFIGURATION 0x09 -#define USB_REQ_GET_INTERFACE 0x0A -#define USB_REQ_SET_INTERFACE 0x0B -#define USB_REQ_SYNCH_FRAME 0x0C +#define USB_REQ_GET_INTERFACE 0x0A +#define USB_REQ_SET_INTERFACE 0x0B +#define USB_REQ_SYNCH_FRAME 0x0C -#define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */ +#define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */ /** * struct usb_ctrlrequest - SETUP data for a USB device control request @@ -250,7 +254,8 @@ struct ehci_dbg_port { * For most devices, interfaces don't coordinate with each other, so * such requests may be made at any time. */ -struct __packed usb_ctrlrequest { +struct __packed usb_ctrlrequest +{ u8 bRequestType; u8 bRequest; __le16 wValue; @@ -260,9 +265,10 @@ struct __packed usb_ctrlrequest { /* USB_DT_DEBUG: for special highspeed devices, replacing serial console */ -#define USB_DT_DEBUG 0x0a +#define USB_DT_DEBUG 0x0a -struct __packed usb_debug_descriptor { +struct __packed usb_debug_descriptor +{ u8 bLength; u8 bDescriptorType; /* bulk endpoints with 8 byte maxpacket */ @@ -277,45 +283,47 @@ struct __packed usb_debug_descriptor { */ /* token */ -#define USB_PID_OUT 0xe1 -#define USB_PID_IN 0x69 -#define USB_PID_SOF 0xa5 -#define USB_PID_SETUP 0x2d +#define USB_PID_OUT 0xe1 +#define USB_PID_IN 0x69 +#define USB_PID_SOF 0xa5 +#define USB_PID_SETUP 0x2d /* handshake */ -#define USB_PID_ACK 0xd2 -#define USB_PID_NAK 0x5a -#define USB_PID_STALL 0x1e -#define USB_PID_NYET 0x96 +#define USB_PID_ACK 0xd2 +#define USB_PID_NAK 0x5a +#define USB_PID_STALL 0x1e +#define USB_PID_NYET 0x96 /* data */ -#define USB_PID_DATA0 0xc3 -#define USB_PID_DATA1 0x4b -#define USB_PID_DATA2 0x87 -#define USB_PID_MDATA 0x0f +#define USB_PID_DATA0 0xc3 +#define USB_PID_DATA1 0x4b +#define USB_PID_DATA2 0x87 +#define USB_PID_MDATA 0x0f /* Special */ -#define USB_PID_PREAMBLE 0x3c -#define USB_PID_ERR 0x3c -#define USB_PID_SPLIT 0x78 -#define USB_PID_PING 0xb4 -#define USB_PID_UNDEF_0 0xf0 +#define USB_PID_PREAMBLE 0x3c +#define USB_PID_ERR 0x3c +#define USB_PID_SPLIT 0x78 +#define USB_PID_PING 0xb4 +#define USB_PID_UNDEF_0 0xf0 #define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320 -#define PCI_CAP_ID_EHCI_DEBUG 0x0a +#define PCI_CAP_ID_EHCI_DEBUG 0x0a -#define HUB_ROOT_RESET_TIME 50 /* times are in msec */ -#define HUB_SHORT_RESET_TIME 10 -#define HUB_LONG_RESET_TIME 200 -#define HUB_RESET_TIMEOUT 500 +#define HUB_ROOT_RESET_TIME 50 /* times are in msec */ +#define HUB_SHORT_RESET_TIME 10 +#define HUB_LONG_RESET_TIME 200 +#define HUB_RESET_TIMEOUT 500 -#define DBGP_MAX_PACKET 8 -#define DBGP_LOOPS 1000 -#define DBGP_TIMEOUT (250 * 1000) /* us */ -#define DBGP_CHECK_INTERVAL 100 /* us */ +#define DBGP_MAX_PACKET 8 +#define DBGP_LOOPS 1000 +#define DBGP_TIMEOUT (250 * 1000) /* us */ +#define DBGP_CHECK_INTERVAL 100 /* us */ /* This one can be set arbitrarily - only affects input responsiveness: */ -#define DBGP_IDLE_INTERVAL 100 /* ms */ +#define DBGP_IDLE_INTERVAL 100 /* ms */ -struct ehci_dbgp { +struct ehci_dbgp +{ struct ehci_dbg_port __iomem *ehci_debug; - enum dbgp_state { + enum dbgp_state + { dbgp_idle, dbgp_out, dbgp_in, @@ -323,7 +331,8 @@ struct ehci_dbgp { dbgp_unsafe /* cannot use debug device during EHCI reset */ } state; unsigned int phys_port; - struct { + struct + { unsigned int endpoint; unsigned int chunk; char buf[DBGP_MAX_PACKET]; @@ -360,8 +369,10 @@ static void ehci_dbgp_status(struct ehci_dbgp *dbgp, const char *str) } #ifndef DBGP_DEBUG -static inline __attribute__ ((format (printf, 1, 2))) void -dbgp_printk(const char *fmt, ...) { } +static inline __attribute__((format(printf, 1, 2))) void +dbgp_printk(const char *fmt, ...) +{ +} #endif static inline u32 dbgp_len_update(u32 x, u32 len) @@ -551,7 +562,7 @@ static int dbgp_wait_until_done(struct ehci_dbgp *dbgp, u32 ctrl, dbgp->timeout = 0; - for ( ; ; writel(ctrl | DBGP_GO, &dbgp->ehci_debug->control) ) + for ( ;; writel(ctrl | DBGP_GO, &dbgp->ehci_debug->control) ) { u8 pid; @@ -588,9 +599,9 @@ static int dbgp_wait_until_done(struct ehci_dbgp *dbgp, u32 ctrl, return ret; } -static int dbgp_bulk_write(struct ehci_dbgp *dbgp, - unsigned int devnum, unsigned int endpoint, - const void *bytes, unsigned int size, u32 *pctrl) +static int dbgp_bulk_write(struct ehci_dbgp *dbgp, unsigned int devnum, + unsigned int endpoint, const void *bytes, + unsigned int size, u32 *pctrl) { u32 addr, pids, ctrl; @@ -611,9 +622,8 @@ static int dbgp_bulk_write(struct ehci_dbgp *dbgp, return 0; } -static int dbgp_bulk_read(struct ehci_dbgp *dbgp, - unsigned int devnum, unsigned int endpoint, - unsigned int size, u32 *pctrl) +static int dbgp_bulk_read(struct ehci_dbgp *dbgp, unsigned int devnum, + unsigned int endpoint, unsigned int size, u32 *pctrl) { u32 addr, pids, ctrl; @@ -635,8 +645,8 @@ static int dbgp_bulk_read(struct ehci_dbgp *dbgp, } static int dbgp_control_msg(struct ehci_dbgp *dbgp, unsigned int devnum, - int requesttype, int request, int value, - int index, void *data, unsigned int size) + int requesttype, int request, int value, int index, + void *data, unsigned int size) { u32 addr, pids, ctrl; struct usb_ctrlrequest req; @@ -713,8 +723,9 @@ static unsigned int __init find_dbgp(struct ehci_dbgp *dbgp, cap = __find_dbgp(bus, slot, func); if ( !cap || ehci_num-- ) { - if ( !func && !(pci_conf_read8(0, bus, slot, func, - PCI_HEADER_TYPE) & 0x80) ) + if ( !func && + !(pci_conf_read8(0, bus, slot, func, PCI_HEADER_TYPE) & + 0x80) ) break; continue; } @@ -806,12 +817,11 @@ static int ehci_reset_port(struct ehci_dbgp *dbgp, unsigned int port) writel(portsc, &dbgp->ehci_regs->port_status[port - 1]); delay = HUB_ROOT_RESET_TIME; - for ( delay_time = 0; delay_time < HUB_RESET_TIMEOUT; - delay_time += delay ) + for ( delay_time = 0; delay_time < HUB_RESET_TIMEOUT; delay_time += delay ) { dbgp_mdelay(delay); portsc = readl(&dbgp->ehci_regs->port_status[port - 1]); - if (!(portsc & PORT_RESET)) + if ( !(portsc & PORT_RESET) ) break; } @@ -824,7 +834,7 @@ static int ehci_reset_port(struct ehci_dbgp *dbgp, unsigned int port) &dbgp->ehci_regs->port_status[port - 1]); do { udelay(1); - portsc = readl(&dbgp->ehci_regs->port_status[port-1]); + portsc = readl(&dbgp->ehci_regs->port_status[port - 1]); } while ( (portsc & PORT_RESET) && --loop ); } @@ -933,10 +943,10 @@ try_again: /* Find the debug device and make it device number 127 */ for ( devnum = 0; devnum <= 127; devnum++ ) { - ret = dbgp_control_msg(dbgp, devnum, - USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - USB_REQ_GET_DESCRIPTOR, (USB_DT_DEBUG << 8), 0, - &dbgp_desc, sizeof(dbgp_desc)); + ret = dbgp_control_msg( + dbgp, devnum, USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + USB_REQ_GET_DESCRIPTOR, (USB_DT_DEBUG << 8), 0, &dbgp_desc, + sizeof(dbgp_desc)); if ( ret > 0 ) break; } @@ -951,9 +961,9 @@ try_again: /* Move the device to 127 if it isn't already there. */ if ( devnum != USB_DEBUG_DEVNUM ) { - ret = dbgp_control_msg(dbgp, devnum, - USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - USB_REQ_SET_ADDRESS, USB_DEBUG_DEVNUM, 0, NULL, 0); + ret = dbgp_control_msg( + dbgp, devnum, USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + USB_REQ_SET_ADDRESS, USB_DEBUG_DEVNUM, 0, NULL, 0); if ( ret < 0 ) { dbgp_printk("could not move attached device to %d\n", @@ -967,8 +977,8 @@ try_again: /* Enable the debug interface */ ret = dbgp_control_msg(dbgp, USB_DEBUG_DEVNUM, USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - USB_REQ_SET_FEATURE, USB_DEVICE_DEBUG_MODE, - 0, NULL, 0); + USB_REQ_SET_FEATURE, USB_DEVICE_DEBUG_MODE, 0, NULL, + 0); if ( ret < 0 ) { dbgp_printk("could not enable the debug device\n"); @@ -977,8 +987,8 @@ try_again: dbgp_printk("debug interface enabled\n"); /* Perform a small write to get the even/odd data state in sync. */ - ret = dbgp_bulk_write(dbgp, USB_DEBUG_DEVNUM, dbgp->out.endpoint, - "\n", 1, &ctrl); + ret = dbgp_bulk_write(dbgp, USB_DEBUG_DEVNUM, dbgp->out.endpoint, "\n", 1, + &ctrl); if ( !ret ) ret = dbgp_wait_until_done(dbgp, ctrl, DBGP_LOOPS); if ( ret < 0 ) @@ -1016,8 +1026,8 @@ static void nvidia_set_debug_port(struct ehci_dbgp *dbgp, unsigned int port) static void __init detect_set_debug_port(struct ehci_dbgp *dbgp) { - if ( pci_conf_read16(0, dbgp->bus, dbgp->slot, dbgp->func, - PCI_VENDOR_ID) == 0x10de ) + if ( pci_conf_read16(0, dbgp->bus, dbgp->slot, dbgp->func, PCI_VENDOR_ID) == + 0x10de ) { dbgp_printk("using nvidia set_debug_port\n"); set_debug_port = nvidia_set_debug_port; @@ -1028,8 +1038,8 @@ static void __init detect_set_debug_port(struct ehci_dbgp *dbgp) * The code in ehci_dbgp_bios_handoff() is derived from the USB PCI * quirk initialization in Linux. */ -#define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */ -#define EHCI_USBLEGCTLSTS 4 /* legacy control/status */ +#define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */ +#define EHCI_USBLEGCTLSTS 4 /* legacy control/status */ static void ehci_dbgp_bios_handoff(struct ehci_dbgp *dbgp, u32 hcc_params) { u32 cap; @@ -1098,7 +1108,7 @@ try_next_port: for ( i = 1; i <= n_ports; i++ ) { - portsc = readl(&dbgp->ehci_regs->port_status[i-1]); + portsc = readl(&dbgp->ehci_regs->port_status[i - 1]); dbgp_printk("portstatus%d: %08x\n", i, portsc); } @@ -1120,7 +1130,7 @@ try_next_port: return -1; ret = ehci_dbgp_external_startup(dbgp); - if (ret == -EIO) + if ( ret == -EIO ) goto next_debug_port; if ( ret < 0 ) @@ -1185,7 +1195,7 @@ static void ehci_dbgp_flush(struct serial_port *port) goal = NOW() + MICROSECS(DBGP_CHECK_INTERVAL); if ( dbgp->timer.expires > goal ) - set_timer(&dbgp->timer, goal); + set_timer(&dbgp->timer, goal); } static void ehci_dbgp_putc(struct serial_port *port, char c) @@ -1267,8 +1277,8 @@ static void _ehci_dbgp_poll(struct cpu_user_regs *regs) if ( spin_trylock_irqsave(&port->tx_lock, flags) ) { - if ( dbgp->state == dbgp_idle && !dbgp->in.chunk && - !dbgp->out.chunk && port->txbufp == port->txbufc ) + if ( dbgp->state == dbgp_idle && !dbgp->in.chunk && !dbgp->out.chunk && + port->txbufp == port->txbufc ) { if ( dbgp_bulk_read(dbgp, USB_DEBUG_DEVNUM, dbgp->in.endpoint, DBGP_MAX_PACKET, NULL) ) @@ -1307,13 +1317,13 @@ static void __init ehci_dbgp_init_preirq(struct serial_port *port) u32 debug_port, offset; void __iomem *ehci_bar; - debug_port = pci_conf_read32(0, dbgp->bus, dbgp->slot, dbgp->func, - dbgp->cap); + debug_port = + pci_conf_read32(0, dbgp->bus, dbgp->slot, dbgp->func, dbgp->cap); offset = (debug_port >> 16) & 0xfff; /* double check if the mem space is enabled */ - dbgp->pci_cr = pci_conf_read8(0, dbgp->bus, dbgp->slot, dbgp->func, - PCI_COMMAND); + dbgp->pci_cr = + pci_conf_read8(0, dbgp->bus, dbgp->slot, dbgp->func, PCI_COMMAND); if ( !(dbgp->pci_cr & PCI_COMMAND_MEMORY) ) { dbgp->pci_cr |= PCI_COMMAND_MEMORY; @@ -1332,8 +1342,7 @@ static void __init ehci_dbgp_init_preirq(struct serial_port *port) dbgp_printk("ehci_bar: %p\n", ehci_bar); dbgp->ehci_caps = ehci_bar; - dbgp->ehci_regs = ehci_bar + - HC_LENGTH(readl(&dbgp->ehci_caps->hc_capbase)); + dbgp->ehci_regs = ehci_bar + HC_LENGTH(readl(&dbgp->ehci_caps->hc_capbase)); dbgp->ehci_debug = ehci_bar + offset; detect_set_debug_port(dbgp); @@ -1383,8 +1392,8 @@ static int ehci_dbgp_check_release(struct ehci_dbgp *dbgp) * so as to allow for reuse of the USB device, which means it is time * to shutdown the USB debug port. */ - printk(XENLOG_INFO "Releasing EHCI debug port at %02x:%02x.%u\n", - dbgp->bus, dbgp->slot, dbgp->func); + printk(XENLOG_INFO "Releasing EHCI debug port at %02x:%02x.%u\n", dbgp->bus, + dbgp->slot, dbgp->func); if ( dbgp->timer.function ) kill_timer(&dbgp->timer); @@ -1415,8 +1424,8 @@ static void ehci_dbgp_suspend(struct serial_port *port) stop_timer(&dbgp->timer); dbgp->timer.expires = 0; - dbgp->pci_cr = pci_conf_read16(0, dbgp->bus, dbgp->slot, dbgp->func, - PCI_COMMAND); + dbgp->pci_cr = + pci_conf_read16(0, dbgp->bus, dbgp->slot, dbgp->func, PCI_COMMAND); dbgp->state = dbgp_unsafe; } @@ -1430,26 +1439,25 @@ static void ehci_dbgp_resume(struct serial_port *port) pci_conf_write32(0, dbgp->bus, dbgp->slot, dbgp->func, dbgp->bar, dbgp->bar_val); - pci_conf_write16(0, dbgp->bus, dbgp->slot, dbgp->func, - PCI_COMMAND, dbgp->pci_cr); + pci_conf_write16(0, dbgp->bus, dbgp->slot, dbgp->func, PCI_COMMAND, + dbgp->pci_cr); ehci_dbgp_setup_preirq(dbgp); ehci_dbgp_setup_postirq(dbgp); } static struct uart_driver __read_mostly ehci_dbgp_driver = { - .init_preirq = ehci_dbgp_init_preirq, + .init_preirq = ehci_dbgp_init_preirq, .init_postirq = ehci_dbgp_init_postirq, - .endboot = ehci_dbgp_endboot, - .suspend = ehci_dbgp_suspend, - .resume = ehci_dbgp_resume, - .tx_ready = ehci_dbgp_tx_ready, - .putc = ehci_dbgp_putc, - .flush = ehci_dbgp_flush, - .getc = ehci_dbgp_getc -}; + .endboot = ehci_dbgp_endboot, + .suspend = ehci_dbgp_suspend, + .resume = ehci_dbgp_resume, + .tx_ready = ehci_dbgp_tx_ready, + .putc = ehci_dbgp_putc, + .flush = ehci_dbgp_flush, + .getc = ehci_dbgp_getc}; -static struct ehci_dbgp ehci_dbgp = { .state = dbgp_unsafe, .phys_port = 1 }; +static struct ehci_dbgp ehci_dbgp = {.state = dbgp_unsafe, .phys_port = 1}; static char __initdata opt_dbgp[30]; string_param("dbgp", opt_dbgp); @@ -1474,8 +1482,8 @@ void __init ehci_dbgp_init(void) if ( !dbgp->cap ) return; - dbgp_printk("Found EHCI debug port on %02x:%02x.%u\n", - dbgp->bus, dbgp->slot, dbgp->func); + dbgp_printk("Found EHCI debug port on %02x:%02x.%u\n", dbgp->bus, + dbgp->slot, dbgp->func); } else if ( strncmp(opt_dbgp + 4, "@pci", 4) == 0 ) { @@ -1496,14 +1504,13 @@ void __init ehci_dbgp_init(void) if ( !dbgp->cap ) return; - dbgp_printk("Using EHCI debug port on %02x:%02x.%u\n", - bus, slot, func); + dbgp_printk("Using EHCI debug port on %02x:%02x.%u\n", bus, slot, func); } else return; - debug_port = pci_conf_read32(0, dbgp->bus, dbgp->slot, dbgp->func, - dbgp->cap); + debug_port = + pci_conf_read32(0, dbgp->bus, dbgp->slot, dbgp->func, dbgp->cap); dbgp->bar = (debug_port >> 29) & 0x7; dbgp->bar = ((dbgp->bar - 1) * 4) + PCI_BASE_ADDRESS_0; offset = (debug_port >> 16) & 0xfff; @@ -1514,8 +1521,8 @@ void __init ehci_dbgp_init(void) return; } - dbgp->bar_val = bar_val = pci_conf_read32(0, dbgp->bus, dbgp->slot, - dbgp->func, dbgp->bar); + dbgp->bar_val = bar_val = + pci_conf_read32(0, dbgp->bus, dbgp->slot, dbgp->func, dbgp->bar); dbgp_printk("bar_val: %08x\n", bar_val); if ( bar_val & ~PCI_BASE_ADDRESS_MEM_MASK ) { @@ -1537,19 +1544,19 @@ int dbgp_op(const struct physdev_dbgp_op *op) if ( !ehci_dbgp.ehci_debug ) return 0; - switch ( op->bus ) + switch (op->bus) { case PHYSDEVOP_DBGP_BUS_UNKNOWN: break; case PHYSDEVOP_DBGP_BUS_PCI: if ( op->u.pci.seg || ehci_dbgp.bus != op->u.pci.bus || - PCI_DEVFN(ehci_dbgp.slot, ehci_dbgp.func) != op->u.pci.devfn ) - default: + PCI_DEVFN(ehci_dbgp.slot, ehci_dbgp.func) != op->u.pci.devfn ) + default: return 0; break; } - switch ( op->op ) + switch (op->op) { case PHYSDEVOP_DBGP_RESET_PREPARE: spin_lock_irq(ehci_dbgp.lock); diff --git a/xen/drivers/char/exynos4210-uart.c b/xen/drivers/char/exynos4210-uart.c index fa7dbc0391..135e55c9d9 100644 --- a/xen/drivers/char/exynos4210-uart.c +++ b/xen/drivers/char/exynos4210-uart.c @@ -27,7 +27,8 @@ #include #include -static struct exynos4210_uart { +static struct exynos4210_uart +{ unsigned int baud, clock_hz, data_bits, parity, stop_bits; unsigned int irq; void *regs; @@ -36,16 +37,17 @@ static struct exynos4210_uart { } exynos4210_com = {0}; /* These parity settings can be ORed directly into the ULCON. */ -#define PARITY_NONE (0) -#define PARITY_ODD (0x4) -#define PARITY_EVEN (0x5) +#define PARITY_NONE (0) +#define PARITY_ODD (0x4) +#define PARITY_EVEN (0x5) #define FORCED_CHECKED_AS_ONE (0x6) #define FORCED_CHECKED_AS_ZERO (0x7) -#define exynos4210_read(uart, off) readl((uart)->regs + off) -#define exynos4210_write(uart, off, val) writel(val, (uart->regs) + off) +#define exynos4210_read(uart, off) readl((uart)->regs + off) +#define exynos4210_write(uart, off, val) writel(val, (uart->regs) + off) -static void exynos4210_uart_interrupt(int irq, void *data, struct cpu_user_regs *regs) +static void exynos4210_uart_interrupt(int irq, void *data, + struct cpu_user_regs *regs) { struct serial_port *port = data; struct exynos4210_uart *uart = port->uart; @@ -77,7 +79,6 @@ static void exynos4210_uart_interrupt(int irq, void *data, struct cpu_user_regs exynos4210_write(uart, UINTP, UINTM_ERROR); } - if ( status & (UINTM_RXD | UINTM_ERROR) ) { /* uart->regs[UINTM] |= RXD|ERROR; */ @@ -147,7 +148,6 @@ static void __init exynos4210_uart_init_preirq(struct serial_port *port) ASSERT(uart->stop_bits >= 1 && uart->stop_bits <= 2); ulcon |= (uart->stop_bits - 1) << ULCON_STOPB_SHIFT; - /* Parity */ ulcon |= uart->parity << ULCON_PARITY_SHIFT; @@ -184,7 +184,7 @@ static void __init exynos4210_uart_init_preirq(struct serial_port *port) */ exynos4210_write(uart, UCON, UCON_RX_IRQ_LEVEL | UCON_TX_IRQ_LEVEL | UCON_RX_IRQ | - UCON_TX_IRQ | UCON_RX_TIMEOUT); + UCON_TX_IRQ | UCON_RX_TIMEOUT); } static void __init exynos4210_uart_init_postirq(struct serial_port *port) @@ -193,8 +193,8 @@ static void __init exynos4210_uart_init_postirq(struct serial_port *port) int rc; uart->irqaction.handler = exynos4210_uart_interrupt; - uart->irqaction.name = "exynos4210_uart"; - uart->irqaction.dev_id = port; + uart->irqaction.name = "exynos4210_uart"; + uart->irqaction.dev_id = port; if ( (rc = setup_irq(uart->irq, 0, &uart->irqaction)) != 0 ) dprintk(XENLOG_ERR, "Failed to allocated exynos4210_uart IRQ %d\n", @@ -282,16 +282,16 @@ static const struct vuart_info *exynos4210_vuart_info(struct serial_port *port) } static struct uart_driver __read_mostly exynos4210_uart_driver = { - .init_preirq = exynos4210_uart_init_preirq, + .init_preirq = exynos4210_uart_init_preirq, .init_postirq = exynos4210_uart_init_postirq, - .endboot = NULL, - .suspend = exynos4210_uart_suspend, - .resume = exynos4210_uart_resume, - .tx_ready = exynos4210_uart_tx_ready, - .putc = exynos4210_uart_putc, - .getc = exynos4210_uart_getc, - .irq = exynos4210_uart_irq, - .vuart_info = exynos4210_vuart_info, + .endboot = NULL, + .suspend = exynos4210_uart_suspend, + .resume = exynos4210_uart_resume, + .tx_ready = exynos4210_uart_tx_ready, + .putc = exynos4210_uart_putc, + .getc = exynos4210_uart_getc, + .irq = exynos4210_uart_irq, + .vuart_info = exynos4210_vuart_info, }; /* TODO: Parse UART config from the command line */ @@ -309,9 +309,9 @@ static int __init exynos4210_uart_init(struct dt_device_node *dev, uart = &exynos4210_com; /* uart->clock_hz = 0x16e3600; */ - uart->baud = BAUD_AUTO; + uart->baud = BAUD_AUTO; uart->data_bits = 8; - uart->parity = PARITY_NONE; + uart->parity = PARITY_NONE; uart->stop_bits = 1; res = dt_device_get_address(dev, 0, &addr, &size); @@ -351,22 +351,21 @@ static int __init exynos4210_uart_init(struct dt_device_node *dev, return 0; } -static const struct dt_device_match exynos4210_dt_match[] __initconst = -{ +static const struct dt_device_match exynos4210_dt_match[] __initconst = { DT_MATCH_COMPATIBLE("samsung,exynos4210-uart"), - { /* sentinel */ }, + {/* sentinel */}, }; -DT_DEVICE_START(exynos4210, "Exynos 4210 UART", DEVICE_SERIAL) - .dt_match = exynos4210_dt_match, - .init = exynos4210_uart_init, -DT_DEVICE_END +DT_DEVICE_START(exynos4210, "Exynos 4210 UART", DEVICE_SERIAL).dt_match = + exynos4210_dt_match, + .init = exynos4210_uart_init, + DT_DEVICE_END -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/drivers/char/mvebu-uart.c b/xen/drivers/char/mvebu-uart.c index b72db9542e..1dde7ed1f0 100644 --- a/xen/drivers/char/mvebu-uart.c +++ b/xen/drivers/char/mvebu-uart.c @@ -24,46 +24,47 @@ #include /* Register offsets */ -#define UART_RX_REG 0x00 - -#define UART_TX_REG 0x04 - -#define UART_CTRL_REG 0x08 -#define CTRL_TXFIFO_RST BIT(15) -#define CTRL_RXFIFO_RST BIT(14) -#define CTRL_TX_RDY_INT BIT(5) -#define CTRL_RX_RDY_INT BIT(4) -#define CTRL_BRK_DET_INT BIT(3) -#define CTRL_FRM_ERR_INT BIT(2) -#define CTRL_PAR_ERR_INT BIT(1) -#define CTRL_OVR_ERR_INT BIT(0) -#define CTRL_ERR_INT (CTRL_BRK_DET_INT | CTRL_FRM_ERR_INT | \ - CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT) - -#define UART_STATUS_REG 0x0c -#define STATUS_TXFIFO_EMP BIT(13) -#define STATUS_TXFIFO_FUL BIT(11) -#define STATUS_TXFIFO_HFL BIT(10) -#define STATUS_TX_RDY BIT(5) -#define STATUS_RX_RDY BIT(4) -#define STATUS_BRK_DET BIT(3) -#define STATUS_FRM_ERR BIT(2) -#define STATUS_PAR_ERR BIT(1) -#define STATUS_OVR_ERR BIT(0) -#define STATUS_BRK_ERR (STATUS_BRK_DET | STATUS_FRM_ERR | \ - STATUS_PAR_ERR | STATUS_OVR_ERR) - -#define TX_FIFO_SIZE 32 - -static struct mvebu3700_uart { +#define UART_RX_REG 0x00 + +#define UART_TX_REG 0x04 + +#define UART_CTRL_REG 0x08 +#define CTRL_TXFIFO_RST BIT(15) +#define CTRL_RXFIFO_RST BIT(14) +#define CTRL_TX_RDY_INT BIT(5) +#define CTRL_RX_RDY_INT BIT(4) +#define CTRL_BRK_DET_INT BIT(3) +#define CTRL_FRM_ERR_INT BIT(2) +#define CTRL_PAR_ERR_INT BIT(1) +#define CTRL_OVR_ERR_INT BIT(0) +#define CTRL_ERR_INT \ + (CTRL_BRK_DET_INT | CTRL_FRM_ERR_INT | CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT) + +#define UART_STATUS_REG 0x0c +#define STATUS_TXFIFO_EMP BIT(13) +#define STATUS_TXFIFO_FUL BIT(11) +#define STATUS_TXFIFO_HFL BIT(10) +#define STATUS_TX_RDY BIT(5) +#define STATUS_RX_RDY BIT(4) +#define STATUS_BRK_DET BIT(3) +#define STATUS_FRM_ERR BIT(2) +#define STATUS_PAR_ERR BIT(1) +#define STATUS_OVR_ERR BIT(0) +#define STATUS_BRK_ERR \ + (STATUS_BRK_DET | STATUS_FRM_ERR | STATUS_PAR_ERR | STATUS_OVR_ERR) + +#define TX_FIFO_SIZE 32 + +static struct mvebu3700_uart +{ unsigned int irq; void __iomem *regs; struct irqaction irqaction; struct vuart_info vuart; } mvebu3700_com = {0}; -#define mvebu3700_read(uart, off) readl((uart)->regs + off) -#define mvebu3700_write(uart, off, val) writel(val, (uart->regs) + off) +#define mvebu3700_read(uart, off) readl((uart)->regs + off) +#define mvebu3700_write(uart, off, val) writel(val, (uart->regs) + off) static void mvebu3700_uart_interrupt(int irq, void *data, struct cpu_user_regs *regs) @@ -72,8 +73,8 @@ static void mvebu3700_uart_interrupt(int irq, void *data, struct mvebu3700_uart *uart = port->uart; uint32_t st = mvebu3700_read(uart, UART_STATUS_REG); - if ( st & (STATUS_RX_RDY | STATUS_OVR_ERR | STATUS_FRM_ERR | - STATUS_BRK_DET) ) + if ( st & + (STATUS_RX_RDY | STATUS_OVR_ERR | STATUS_FRM_ERR | STATUS_BRK_DET) ) serial_rx_interrupt(port, regs); if ( st & STATUS_TX_RDY ) @@ -109,8 +110,8 @@ static void __init mvebu3700_uart_init_postirq(struct serial_port *port) uint32_t reg; uart->irqaction.handler = mvebu3700_uart_interrupt; - uart->irqaction.name = "mvebu3700_uart"; - uart->irqaction.dev_id = port; + uart->irqaction.name = "mvebu3700_uart"; + uart->irqaction.dev_id = port; if ( setup_irq(uart->irq, 0, &uart->irqaction) != 0 ) { @@ -210,18 +211,18 @@ static int mvebu3700_uart_tx_ready(struct serial_port *port) } static struct uart_driver __read_mostly mvebu3700_uart_driver = { - .init_preirq = mvebu3700_uart_init_preirq, + .init_preirq = mvebu3700_uart_init_preirq, .init_postirq = mvebu3700_uart_init_postirq, - .endboot = NULL, - .suspend = mvebu3700_uart_suspend, - .resume = mvebu3700_uart_resume, - .putc = mvebu3700_uart_putc, - .getc = mvebu3700_uart_getc, - .tx_ready = mvebu3700_uart_tx_ready, - .stop_tx = mvebu3700_uart_stop_tx, - .start_tx = mvebu3700_uart_start_tx, - .irq = mvebu3700_irq, - .vuart_info = mvebu3700_vuart_info, + .endboot = NULL, + .suspend = mvebu3700_uart_suspend, + .resume = mvebu3700_uart_resume, + .putc = mvebu3700_uart_putc, + .getc = mvebu3700_uart_getc, + .tx_ready = mvebu3700_uart_tx_ready, + .stop_tx = mvebu3700_uart_stop_tx, + .start_tx = mvebu3700_uart_start_tx, + .irq = mvebu3700_irq, + .vuart_info = mvebu3700_vuart_info, }; static int __init mvebu_uart_init(struct dt_device_node *dev, const void *data) @@ -250,7 +251,7 @@ static int __init mvebu_uart_init(struct dt_device_node *dev, const void *data) return -EINVAL; } - uart->irq = res; + uart->irq = res; uart->regs = ioremap_nocache(addr, size); if ( !uart->regs ) @@ -273,22 +274,21 @@ static int __init mvebu_uart_init(struct dt_device_node *dev, const void *data) return 0; } -static const struct dt_device_match mvebu_dt_match[] __initconst = -{ +static const struct dt_device_match mvebu_dt_match[] __initconst = { DT_MATCH_COMPATIBLE("marvell,armada-3700-uart"), - { /* sentinel */ }, + {/* sentinel */}, }; -DT_DEVICE_START(mvebu, "Marvell Armada-3700 UART", DEVICE_SERIAL) - .dt_match = mvebu_dt_match, - .init = mvebu_uart_init, -DT_DEVICE_END +DT_DEVICE_START(mvebu, "Marvell Armada-3700 UART", DEVICE_SERIAL).dt_match = + mvebu_dt_match, + .init = mvebu_uart_init, + DT_DEVICE_END -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/drivers/char/ns16550.c b/xen/drivers/char/ns16550.c index 189e121b7e..80933bb4ec 100644 --- a/xen/drivers/char/ns16550.c +++ b/xen/drivers/char/ns16550.c @@ -1,10 +1,10 @@ /****************************************************************************** * ns16550.c - * + * * Driver for 16550-series UARTs. This driver is to be kept within Xen as * it permits debugging of seriously-toasted machines (e.g., in situations * where a device driver within a guest OS would be inaccessible). - * + * * Copyright (c) 2003-2005, K A Fraser */ @@ -43,7 +43,8 @@ static char __initdata opt_com2[128] = ""; string_param("com1", opt_com1); string_param("com2", opt_com2); -enum serial_param_type { +enum serial_param_type +{ baud, clock_hz, data_bits, @@ -62,14 +63,15 @@ enum serial_param_type { num_serial_params }; -static struct ns16550 { +static struct ns16550 +{ int baud, clock_hz, data_bits, parity, stop_bits, fifo_size, irq; - u64 io_base; /* I/O port or memory-mapped I/O address. */ + u64 io_base; /* I/O port or memory-mapped I/O address. */ u64 io_size; - int reg_shift; /* Bits to shift register offset by */ - int reg_width; /* Size of access to use, the registers - * themselves are still bytes */ - char __iomem *remapped_io_base; /* Remapped virtual address of MMIO. */ + int reg_shift; /* Bits to shift register offset by */ + int reg_width; /* Size of access to use, the registers + * themselves are still bytes */ + char __iomem *remapped_io_base; /* Remapped virtual address of MMIO. */ /* UART with IRQ line: interrupt-driven I/O. */ struct irqaction irqaction; u8 lsr_mask; @@ -95,9 +97,10 @@ static struct ns16550 { bool msi; const struct ns16550_config_param *param; /* Points into .init.*! */ #endif -} ns16550_com[2] = { { 0 } }; +} ns16550_com[2] = {{0}}; -struct serial_param_var { +struct serial_param_var +{ char name[12]; enum serial_param_type type; }; @@ -124,10 +127,12 @@ static const struct serial_param_var __initconst sp_vars[] = { }; #ifdef CONFIG_HAS_PCI -struct ns16550_config { +struct ns16550_config +{ u16 vendor_id; u16 dev_id; - enum { + enum + { param_default, /* Must not be referenced by any table entry. */ param_trumanage, param_oxford, @@ -140,7 +145,8 @@ struct ns16550_config { }; /* Defining uart config options for MMIO devices */ -struct ns16550_config_param { +struct ns16550_config_param +{ unsigned int reg_shift; unsigned int reg_width; unsigned int fifo_size; @@ -159,70 +165,78 @@ struct ns16550_config_param { * driver does nothing for MMIO based devices. */ static const struct ns16550_config_param __initconst uart_param[] = { - [param_default] = { - .reg_width = 1, - .lsr_mask = UART_LSR_THRE, - .max_ports = 1, - }, - [param_trumanage] = { - .reg_shift = 2, - .reg_width = 1, - .fifo_size = 16, - .lsr_mask = (UART_LSR_THRE | UART_LSR_TEMT), - .mmio = 1, - .max_ports = 1, - }, - [param_oxford] = { - .base_baud = 4000000, - .uart_offset = 0x200, - .first_offset = 0x1000, - .reg_width = 1, - .fifo_size = 16, - .lsr_mask = UART_LSR_THRE, - .mmio = 1, - .max_ports = 1, /* It can do more, but we would need more custom code.*/ - }, - [param_oxford_2port] = { - .base_baud = 4000000, - .uart_offset = 0x200, - .first_offset = 0x1000, - .reg_width = 1, - .fifo_size = 16, - .lsr_mask = UART_LSR_THRE, - .mmio = 1, - .max_ports = 2, - }, - [param_pericom_1port] = { - .base_baud = 921600, - .uart_offset = 8, - .reg_width = 1, - .fifo_size = 16, - .lsr_mask = UART_LSR_THRE, - .bar0 = 1, - .max_ports = 1, - }, - [param_pericom_2port] = { - .base_baud = 921600, - .uart_offset = 8, - .reg_width = 1, - .fifo_size = 16, - .lsr_mask = UART_LSR_THRE, - .bar0 = 1, - .max_ports = 2, - }, + [param_default] = + { + .reg_width = 1, + .lsr_mask = UART_LSR_THRE, + .max_ports = 1, + }, + [param_trumanage] = + { + .reg_shift = 2, + .reg_width = 1, + .fifo_size = 16, + .lsr_mask = (UART_LSR_THRE | UART_LSR_TEMT), + .mmio = 1, + .max_ports = 1, + }, + [param_oxford] = + { + .base_baud = 4000000, + .uart_offset = 0x200, + .first_offset = 0x1000, + .reg_width = 1, + .fifo_size = 16, + .lsr_mask = UART_LSR_THRE, + .mmio = 1, + .max_ports = + 1, /* It can do more, but we would need more custom code.*/ + }, + [param_oxford_2port] = + { + .base_baud = 4000000, + .uart_offset = 0x200, + .first_offset = 0x1000, + .reg_width = 1, + .fifo_size = 16, + .lsr_mask = UART_LSR_THRE, + .mmio = 1, + .max_ports = 2, + }, + [param_pericom_1port] = + { + .base_baud = 921600, + .uart_offset = 8, + .reg_width = 1, + .fifo_size = 16, + .lsr_mask = UART_LSR_THRE, + .bar0 = 1, + .max_ports = 1, + }, + [param_pericom_2port] = + { + .base_baud = 921600, + .uart_offset = 8, + .reg_width = 1, + .fifo_size = 16, + .lsr_mask = UART_LSR_THRE, + .bar0 = 1, + .max_ports = 2, + }, /* * Of the two following ones, we can't really use all of their ports, * unless ns16550_com[] would get grown. */ - [param_pericom_4port] = { - .base_baud = 921600, - .uart_offset = 8, - .reg_width = 1, - .fifo_size = 16, - .lsr_mask = UART_LSR_THRE, - .bar0 = 1, - .max_ports = 4, - }, + [param_pericom_4port] = + { + .base_baud = 921600, + .uart_offset = 8, + .reg_width = 1, + .fifo_size = 16, + .lsr_mask = UART_LSR_THRE, + .bar0 = 1, + .max_ports = 4, + }, [param_pericom_8port] = { .base_baud = 921600, .uart_offset = 8, @@ -231,10 +245,8 @@ static const struct ns16550_config_param __initconst uart_param[] = { .lsr_mask = UART_LSR_THRE, .bar0 = 1, .max_ports = 8, - } -}; -static const struct ns16550_config __initconst uart_config[] = -{ + }}; +static const struct ns16550_config __initconst uart_config[] = { /* Broadcom TruManage device */ { .vendor_id = PCI_VENDOR_ID_BROADCOM, @@ -434,30 +446,21 @@ static const struct ns16550_config __initconst uart_config[] = .param = param_oxford, }, /* Pericom PI7C9X7951 Uno UART */ - { - .vendor_id = PCI_VENDOR_ID_PERICOM, - .dev_id = 0x7951, - .param = param_pericom_1port - }, + {.vendor_id = PCI_VENDOR_ID_PERICOM, + .dev_id = 0x7951, + .param = param_pericom_1port}, /* Pericom PI7C9X7952 Duo UART */ - { - .vendor_id = PCI_VENDOR_ID_PERICOM, - .dev_id = 0x7952, - .param = param_pericom_2port - }, + {.vendor_id = PCI_VENDOR_ID_PERICOM, + .dev_id = 0x7952, + .param = param_pericom_2port}, /* Pericom PI7C9X7954 Quad UART */ - { - .vendor_id = PCI_VENDOR_ID_PERICOM, - .dev_id = 0x7954, - .param = param_pericom_4port - }, + {.vendor_id = PCI_VENDOR_ID_PERICOM, + .dev_id = 0x7954, + .param = param_pericom_4port}, /* Pericom PI7C9X7958 Octal UART */ - { - .vendor_id = PCI_VENDOR_ID_PERICOM, - .dev_id = 0x7958, - .param = param_pericom_8port - } -}; + {.vendor_id = PCI_VENDOR_ID_PERICOM, + .dev_id = 0x7958, + .param = param_pericom_8port}}; #endif static void ns16550_delayed_resume(void *data); @@ -469,7 +472,7 @@ static u8 ns_read_reg(struct ns16550 *uart, unsigned int reg) if ( uart->remapped_io_base == NULL ) return inb(uart->io_base + reg); #endif - switch ( uart->reg_width ) + switch (uart->reg_width) { case 1: return readb(addr); @@ -487,7 +490,7 @@ static void ns_write_reg(struct ns16550 *uart, unsigned int reg, u8 c) if ( uart->remapped_io_base == NULL ) return outb(c, uart->io_base + reg); #endif - switch ( uart->reg_width ) + switch (uart->reg_width) { case 1: writeb(c, addr); @@ -523,8 +526,7 @@ static void handle_dw_usr_busy_quirk(struct ns16550 *uart) } } -static void ns16550_interrupt( - int irq, void *dev_id, struct cpu_user_regs *regs) +static void ns16550_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) { struct serial_port *port = dev_id; struct ns16550 *uart = port->uart; @@ -571,7 +573,7 @@ static void __ns16550_poll(struct cpu_user_regs *regs) serial_rx_interrupt(port, regs); } - if ( ( ns_read_reg(uart, UART_LSR) & uart->lsr_mask ) == uart->lsr_mask ) + if ( (ns_read_reg(uart, UART_LSR) & uart->lsr_mask) == uart->lsr_mask ) serial_tx_interrupt(port, regs); out: @@ -595,8 +597,9 @@ static int ns16550_tx_ready(struct serial_port *port) if ( ns16550_ioport_invalid(uart) ) return -EIO; - return ( (ns_read_reg(uart, UART_LSR) & - uart->lsr_mask ) == uart->lsr_mask ) ? uart->fifo_size : 0; + return ((ns_read_reg(uart, UART_LSR) & uart->lsr_mask) == uart->lsr_mask) + ? uart->fifo_size + : 0; } static void ns16550_putc(struct serial_port *port, char c) @@ -610,7 +613,7 @@ static int ns16550_getc(struct serial_port *port, char *pc) struct ns16550 *uart = port->uart; if ( ns16550_ioport_invalid(uart) || - !(ns_read_reg(uart, UART_LSR) & UART_LSR_DR) ) + !(ns_read_reg(uart, UART_LSR) & UART_LSR_DR) ) return 0; *pc = ns_read_reg(uart, UART_RBR); @@ -624,10 +627,9 @@ static void pci_serial_early_init(struct ns16550 *uart) return; if ( uart->pb_bdf_enable ) - pci_conf_write16(0, uart->pb_bdf[0], uart->pb_bdf[1], uart->pb_bdf[2], - PCI_IO_BASE, - (uart->io_base & 0xF000) | - ((uart->io_base & 0xF000) >> 8)); + pci_conf_write16( + 0, uart->pb_bdf[0], uart->pb_bdf[1], uart->pb_bdf[2], PCI_IO_BASE, + (uart->io_base & 0xF000) | ((uart->io_base & 0xF000) >> 8)); pci_conf_write32(0, uart->ps_bdf[0], uart->ps_bdf[1], uart->ps_bdf[2], PCI_BASE_ADDRESS_0, @@ -640,7 +642,7 @@ static void pci_serial_early_init(struct ns16550 *uart) static void ns16550_setup_preirq(struct ns16550 *uart) { unsigned char lcr; - unsigned int divisor; + unsigned int divisor; uart->intr_works = 0; @@ -666,14 +668,13 @@ static void ns16550_setup_preirq(struct ns16550 *uart) else { /* Baud rate already set: read it out from the divisor latch. */ - divisor = ns_read_reg(uart, UART_DLL); + divisor = ns_read_reg(uart, UART_DLL); divisor |= ns_read_reg(uart, UART_DLM) << 8; if ( divisor ) uart->baud = uart->clock_hz / (divisor << 4); else - printk(XENLOG_ERR - "Automatic baud rate determination was requested," - " but a baud rate was not set up\n"); + printk(XENLOG_ERR "Automatic baud rate determination was requested," + " but a baud rate was not set up\n"); } ns_write_reg(uart, UART_LCR, lcr); @@ -682,7 +683,8 @@ static void ns16550_setup_preirq(struct ns16550 *uart) /* Enable and clear the FIFOs. Set a large trigger threshold. */ ns_write_reg(uart, UART_FCR, - UART_FCR_ENABLE | UART_FCR_CLRX | UART_FCR_CLTX | UART_FCR_TRG14); + UART_FCR_ENABLE | UART_FCR_CLRX | UART_FCR_CLTX | + UART_FCR_TRG14); } static void __init ns16550_init_preirq(struct serial_port *port) @@ -728,8 +730,8 @@ static void ns16550_setup_postirq(struct ns16550 *uart) if ( uart->irq > 0 ) { /* Master interrupt enable; also keep DTR/RTS asserted. */ - ns_write_reg(uart, - UART_MCR, UART_MCR_OUT2 | UART_MCR_DTR | UART_MCR_RTS); + ns_write_reg(uart, UART_MCR, + UART_MCR_OUT2 | UART_MCR_DTR | UART_MCR_RTS); /* Enable receive interrupts. */ ns_write_reg(uart, UART_IER, UART_IER_ERDAI); @@ -754,28 +756,29 @@ static void __init ns16550_init_postirq(struct serial_port *port) /* Calculate time to fill RX FIFO and/or empty TX FIFO for polling. */ bits = uart->data_bits + uart->stop_bits + !!uart->parity; - uart->timeout_ms = max_t( - unsigned int, 1, (bits * uart->fifo_size * 1000) / uart->baud); + uart->timeout_ms = + max_t(unsigned int, 1, (bits * uart->fifo_size * 1000) / uart->baud); #ifdef CONFIG_HAS_PCI if ( uart->bar || uart->ps_bdf_enable ) { if ( !uart->param ) - pci_hide_device(0, uart->ps_bdf[0], PCI_DEVFN(uart->ps_bdf[1], - uart->ps_bdf[2])); + pci_hide_device(0, uart->ps_bdf[0], + PCI_DEVFN(uart->ps_bdf[1], uart->ps_bdf[2])); else { if ( uart->param->mmio && - rangeset_add_range(mmio_ro_ranges, - uart->io_base, + rangeset_add_range(mmio_ro_ranges, uart->io_base, uart->io_base + uart->io_size - 1) ) - printk(XENLOG_INFO "Error while adding MMIO range of device to mmio_ro_ranges\n"); + printk(XENLOG_INFO "Error while adding MMIO range of device to " + "mmio_ro_ranges\n"); if ( pci_ro_device(0, uart->ps_bdf[0], PCI_DEVFN(uart->ps_bdf[1], uart->ps_bdf[2])) ) - printk(XENLOG_INFO "Could not mark config space of %02x:%02x.%u read-only.\n", - uart->ps_bdf[0], uart->ps_bdf[1], - uart->ps_bdf[2]); + printk( + XENLOG_INFO + "Could not mark config space of %02x:%02x.%u read-only.\n", + uart->ps_bdf[0], uart->ps_bdf[1], uart->ps_bdf[2]); } if ( uart->msi ) @@ -784,8 +787,7 @@ static void __init ns16550_init_postirq(struct serial_port *port) .bus = uart->ps_bdf[0], .devfn = PCI_DEVFN(uart->ps_bdf[1], uart->ps_bdf[2]), .irq = rc = uart->irq, - .entry_nr = 1 - }; + .entry_nr = 1}; if ( rc > 0 ) { @@ -829,8 +831,8 @@ static void __init ns16550_init_postirq(struct serial_port *port) if ( uart->irq > 0 ) { uart->irqaction.handler = ns16550_interrupt; - uart->irqaction.name = "ns16550"; - uart->irqaction.dev_id = port; + uart->irqaction.name = "ns16550"; + uart->irqaction.dev_id = port; if ( (rc = setup_irq(uart->irq, 0, &uart->irqaction)) != 0 ) printk("ERROR: Failed to allocate ns16550 IRQ %d\n", uart->irq); } @@ -846,8 +848,8 @@ static void ns16550_suspend(struct serial_port *port) #ifdef CONFIG_HAS_PCI if ( uart->bar ) - uart->cr = pci_conf_read16(0, uart->ps_bdf[0], uart->ps_bdf[1], - uart->ps_bdf[2], PCI_COMMAND); + uart->cr = pci_conf_read16(0, uart->ps_bdf[0], uart->ps_bdf[1], + uart->ps_bdf[2], PCI_COMMAND); #endif } @@ -858,17 +860,17 @@ static void _ns16550_resume(struct serial_port *port) if ( uart->bar ) { - pci_conf_write32(0, uart->ps_bdf[0], uart->ps_bdf[1], uart->ps_bdf[2], - PCI_BASE_ADDRESS_0 + uart->bar_idx*4, uart->bar); + pci_conf_write32(0, uart->ps_bdf[0], uart->ps_bdf[1], uart->ps_bdf[2], + PCI_BASE_ADDRESS_0 + uart->bar_idx * 4, uart->bar); /* If 64 bit BAR, write higher 32 bits to BAR+4 */ if ( uart->bar & PCI_BASE_ADDRESS_MEM_TYPE_64 ) - pci_conf_write32(0, uart->ps_bdf[0], - uart->ps_bdf[1], uart->ps_bdf[2], - PCI_BASE_ADDRESS_0 + (uart->bar_idx+1)*4, uart->bar64); + pci_conf_write32( + 0, uart->ps_bdf[0], uart->ps_bdf[1], uart->ps_bdf[2], + PCI_BASE_ADDRESS_0 + (uart->bar_idx + 1) * 4, uart->bar64); - pci_conf_write16(0, uart->ps_bdf[0], uart->ps_bdf[1], uart->ps_bdf[2], - PCI_COMMAND, uart->cr); + pci_conf_write16(0, uart->ps_bdf[0], uart->ps_bdf[1], uart->ps_bdf[2], + PCI_COMMAND, uart->cr); } #endif @@ -949,7 +951,8 @@ static void ns16550_stop_tx(struct serial_port *port) struct ns16550 *uart = port->uart; u8 ier = ns_read_reg(uart, UART_IER); - /* Mask off transmit holding register empty interrupt if currently unmasked. */ + /* Mask off transmit holding register empty interrupt if currently unmasked. + */ if ( ier & UART_IER_ETHREI ) ns_write_reg(uart, UART_IER, ier & ~UART_IER_ETHREI); } @@ -964,36 +967,36 @@ static const struct vuart_info *ns16550_vuart_info(struct serial_port *port) #endif static struct uart_driver __read_mostly ns16550_driver = { - .init_preirq = ns16550_init_preirq, - .init_irq = ns16550_init_irq, + .init_preirq = ns16550_init_preirq, + .init_irq = ns16550_init_irq, .init_postirq = ns16550_init_postirq, - .endboot = ns16550_endboot, - .suspend = ns16550_suspend, - .resume = ns16550_resume, - .tx_ready = ns16550_tx_ready, - .putc = ns16550_putc, - .getc = ns16550_getc, - .irq = ns16550_irq, - .start_tx = ns16550_start_tx, - .stop_tx = ns16550_stop_tx, + .endboot = ns16550_endboot, + .suspend = ns16550_suspend, + .resume = ns16550_resume, + .tx_ready = ns16550_tx_ready, + .putc = ns16550_putc, + .getc = ns16550_getc, + .irq = ns16550_irq, + .start_tx = ns16550_start_tx, + .stop_tx = ns16550_stop_tx, #ifdef CONFIG_ARM - .vuart_info = ns16550_vuart_info, + .vuart_info = ns16550_vuart_info, #endif }; static int __init parse_parity_char(int c) { - switch ( c ) + switch (c) { case 'n': return UART_PARITY_NONE; - case 'o': + case 'o': return UART_PARITY_ODD; - case 'e': + case 'e': return UART_PARITY_EVEN; - case 'm': + case 'm': return UART_PARITY_MARK; - case 's': + case 's': return UART_PARITY_SPACE; } return 0; @@ -1030,7 +1033,7 @@ static int __init check_existence(struct ns16550 *uart) * 16C754B) allow only to modify them if an EFR bit is set. */ scratch2 = ns_read_reg(uart, UART_IER) & 0x0f; - ns_write_reg(uart,UART_IER, 0x0F); + ns_write_reg(uart, UART_IER, 0x0F); scratch3 = ns_read_reg(uart, UART_IER) & 0x0f; ns_write_reg(uart, UART_IER, scratch); if ( (scratch2 != 0) || (scratch3 != 0x0F) ) @@ -1046,8 +1049,8 @@ static int __init check_existence(struct ns16550 *uart) } #ifdef CONFIG_HAS_PCI -static int __init -pci_uart_config(struct ns16550 *uart, bool_t skip_amt, unsigned int idx) +static int __init pci_uart_config(struct ns16550 *uart, bool_t skip_amt, + unsigned int idx) { u64 orig_base = uart->io_base; unsigned int b, d, f, nextf, i; @@ -1064,10 +1067,12 @@ pci_uart_config(struct ns16550 *uart, bool_t skip_amt, unsigned int idx) u64 size = 0; const struct ns16550_config_param *param = uart_param; - nextf = (f || (pci_conf_read16(0, b, d, f, PCI_HEADER_TYPE) & - 0x80)) ? f + 1 : 8; + nextf = + (f || (pci_conf_read16(0, b, d, f, PCI_HEADER_TYPE) & 0x80)) + ? f + 1 + : 8; - switch ( pci_conf_read16(0, b, d, f, PCI_CLASS_DEVICE) ) + switch (pci_conf_read16(0, b, d, f, PCI_CLASS_DEVICE)) { case 0x0700: /* single port serial */ case 0x0702: /* multi port serial */ @@ -1109,45 +1114,48 @@ pci_uart_config(struct ns16550 *uart, bool_t skip_amt, unsigned int idx) uart->io_base = 0; bar = pci_conf_read32(0, b, d, f, - PCI_BASE_ADDRESS_0 + bar_idx*4); + PCI_BASE_ADDRESS_0 + bar_idx * 4); /* MMIO based */ if ( param->mmio && !(bar & PCI_BASE_ADDRESS_SPACE_IO) ) { pci_conf_write32(0, b, d, f, - PCI_BASE_ADDRESS_0 + bar_idx*4, ~0u); - len = pci_conf_read32(0, b, d, f, PCI_BASE_ADDRESS_0 + bar_idx*4); + PCI_BASE_ADDRESS_0 + bar_idx * 4, ~0u); + len = pci_conf_read32(0, b, d, f, + PCI_BASE_ADDRESS_0 + bar_idx * 4); pci_conf_write32(0, b, d, f, - PCI_BASE_ADDRESS_0 + bar_idx*4, bar); + PCI_BASE_ADDRESS_0 + bar_idx * 4, bar); /* Handle 64 bit BAR if found */ if ( bar & PCI_BASE_ADDRESS_MEM_TYPE_64 ) { - bar_64 = pci_conf_read32(0, b, d, f, - PCI_BASE_ADDRESS_0 + (bar_idx+1)*4); + bar_64 = pci_conf_read32( + 0, b, d, f, PCI_BASE_ADDRESS_0 + (bar_idx + 1) * 4); pci_conf_write32(0, b, d, f, - PCI_BASE_ADDRESS_0 + (bar_idx+1)*4, ~0u); - len_64 = pci_conf_read32(0, b, d, f, - PCI_BASE_ADDRESS_0 + (bar_idx+1)*4); + PCI_BASE_ADDRESS_0 + (bar_idx + 1) * 4, + ~0u); + len_64 = pci_conf_read32( + 0, b, d, f, PCI_BASE_ADDRESS_0 + (bar_idx + 1) * 4); pci_conf_write32(0, b, d, f, - PCI_BASE_ADDRESS_0 + (bar_idx+1)*4, bar_64); - size = ((u64)~0 << 32) | PCI_BASE_ADDRESS_MEM_MASK; + PCI_BASE_ADDRESS_0 + (bar_idx + 1) * 4, + bar_64); + size = ((u64)~0 << 32) | PCI_BASE_ADDRESS_MEM_MASK; size &= ((u64)len_64 << 32) | len; } else size = len & PCI_BASE_ADDRESS_MEM_MASK; - uart->io_base = ((u64)bar_64 << 32) | - (bar & PCI_BASE_ADDRESS_MEM_MASK); + uart->io_base = + ((u64)bar_64 << 32) | (bar & PCI_BASE_ADDRESS_MEM_MASK); } /* IO based */ else if ( !param->mmio && (bar & PCI_BASE_ADDRESS_SPACE_IO) ) { pci_conf_write32(0, b, d, f, - PCI_BASE_ADDRESS_0 + bar_idx*4, ~0u); + PCI_BASE_ADDRESS_0 + bar_idx * 4, ~0u); len = pci_conf_read32(0, b, d, f, PCI_BASE_ADDRESS_0); pci_conf_write32(0, b, d, f, - PCI_BASE_ADDRESS_0 + bar_idx*4, bar); + PCI_BASE_ADDRESS_0 + bar_idx * 4, bar); size = len & PCI_BASE_ADDRESS_IO_MASK; uart->io_base = bar & ~PCI_BASE_ADDRESS_SPACE_IO; @@ -1164,8 +1172,8 @@ pci_uart_config(struct ns16550 *uart, bool_t skip_amt, unsigned int idx) * 8 bytes times (1 << reg_shift). */ if ( size < param->first_offset + - port_idx * param->uart_offset + - (8 << param->reg_shift) ) + port_idx * param->uart_offset + + (8 << param->reg_shift) ) continue; uart->param = param; @@ -1173,8 +1181,8 @@ pci_uart_config(struct ns16550 *uart, bool_t skip_amt, unsigned int idx) uart->reg_shift = param->reg_shift; uart->reg_width = param->reg_width; uart->lsr_mask = param->lsr_mask; - uart->io_base += param->first_offset + - port_idx * param->uart_offset; + uart->io_base += + param->first_offset + port_idx * param->uart_offset; if ( param->base_baud ) uart->clock_hz = param->base_baud * 16; if ( param->fifo_size ) @@ -1186,10 +1194,10 @@ pci_uart_config(struct ns16550 *uart, bool_t skip_amt, unsigned int idx) uart->bar_idx = bar_idx; uart->bar = bar; uart->bar64 = bar_64; - uart->io_size = max(8U << param->reg_shift, - param->uart_offset); - uart->irq = pci_conf_read8(0, b, d, f, PCI_INTERRUPT_PIN) ? - pci_conf_read8(0, b, d, f, PCI_INTERRUPT_LINE) : 0; + uart->io_size = max(8U << param->reg_shift, param->uart_offset); + uart->irq = pci_conf_read8(0, b, d, f, PCI_INTERRUPT_PIN) + ? pci_conf_read8(0, b, d, f, PCI_INTERRUPT_LINE) + : 0; return 0; } @@ -1232,19 +1240,18 @@ static enum __init serial_param_type get_token(char *token, char **value) return num_serial_params; } -#define PARSE_ERR(_f, _a...) \ - do { \ - printk( "ERROR: " _f "\n" , ## _a ); \ - return; \ +#define PARSE_ERR(_f, _a...) \ + do { \ + printk("ERROR: " _f "\n", ##_a); \ + return; \ } while ( 0 ) -#define PARSE_ERR_RET(_f, _a...) \ - do { \ - printk( "ERROR: " _f "\n" , ## _a ); \ - return false; \ +#define PARSE_ERR_RET(_f, _a...) \ + do { \ + printk("ERROR: " _f "\n", ##_a); \ + return false; \ } while ( 0 ) - static bool __init parse_positional(struct ns16550 *uart, char **str) { int baud; @@ -1301,7 +1308,7 @@ static bool __init parse_positional(struct ns16550 *uart, char **str) #ifdef CONFIG_HAS_PCI if ( strncmp(conf, "pci", 3) == 0 ) { - if ( pci_uart_config(uart, 1/* skip AMT */, uart - ns16550_com) ) + if ( pci_uart_config(uart, 1 /* skip AMT */, uart - ns16550_com) ) return true; conf += 3; } @@ -1335,8 +1342,8 @@ static bool __init parse_positional(struct ns16550 *uart, char **str) #ifdef CONFIG_HAS_PCI if ( *conf == ',' && *++conf != ',' ) { - conf = parse_pci(conf, NULL, &uart->ps_bdf[0], - &uart->ps_bdf[1], &uart->ps_bdf[2]); + conf = parse_pci(conf, NULL, &uart->ps_bdf[0], &uart->ps_bdf[1], + &uart->ps_bdf[2]); if ( !conf ) PARSE_ERR_RET("Bad port PCI coordinates"); uart->ps_bdf_enable = true; @@ -1344,8 +1351,8 @@ static bool __init parse_positional(struct ns16550 *uart, char **str) if ( *conf == ',' && *++conf != ',' ) { - if ( !parse_pci(conf, NULL, &uart->pb_bdf[0], - &uart->pb_bdf[1], &uart->pb_bdf[2]) ) + if ( !parse_pci(conf, NULL, &uart->pb_bdf[0], &uart->pb_bdf[1], + &uart->pb_bdf[2]) ) PARSE_ERR_RET("Bad bridge PCI coordinates"); uart->pb_bdf_enable = true; } @@ -1363,12 +1370,11 @@ static bool __init parse_namevalue_pairs(char *str, struct ns16550 *uart) if ( (str == NULL) || (*str == '\0') ) return true; - do - { + do { /* When no tokens are found, start will be NULL */ token = strsep(&start, ","); - switch ( get_token(token, ¶m_value) ) + switch (get_token(token, ¶m_value)) { case baud: uart->baud = simple_strtoul(param_value, NULL, 0); @@ -1423,7 +1429,7 @@ static bool __init parse_namevalue_pairs(char *str, struct ns16550 *uart) case device: if ( strncmp(param_value, "pci", 3) == 0 ) { - pci_uart_config(uart, 1/* skip AMT */, uart - ns16550_com); + pci_uart_config(uart, 1 /* skip AMT */, uart - ns16550_com); dev_set = true; } else if ( strncmp(param_value, "amt", 3) == 0 ) @@ -1449,8 +1455,8 @@ static bool __init parse_namevalue_pairs(char *str, struct ns16550 *uart) return true; } -static void __init ns16550_parse_port_config( - struct ns16550 *uart, const char *conf) +static void __init ns16550_parse_port_config(struct ns16550 *uart, + const char *conf) { char com_console_options[128]; char *str; @@ -1474,7 +1480,7 @@ static void __init ns16550_parse_port_config( if ( !parse_namevalue_pairs(str, uart) ) return; - config_parsed: +config_parsed: /* Sanity checks. */ if ( (uart->baud != BAUD_AUTO) && ((uart->baud < 1200) || (uart->baud > 115200)) ) @@ -1496,13 +1502,13 @@ static void __init ns16550_parse_port_config( static void ns16550_init_common(struct ns16550 *uart) { - uart->clock_hz = UART_CLOCK_HZ; + uart->clock_hz = UART_CLOCK_HZ; /* Default is no transmit FIFO. */ uart->fifo_size = 1; /* Default lsr_mask = UART_LSR_THRE */ - uart->lsr_mask = UART_LSR_THRE; + uart->lsr_mask = UART_LSR_THRE; } void __init ns16550_init(int index, struct ns16550_defaults *defaults) @@ -1516,15 +1522,15 @@ void __init ns16550_init(int index, struct ns16550_defaults *defaults) ns16550_init_common(uart); - uart->baud = (defaults->baud ? : - console_has((index == 0) ? "com1" : "com2") - ? BAUD_AUTO : 0); + uart->baud = + (defaults->baud + ?: console_has((index == 0) ? "com1" : "com2") ? BAUD_AUTO : 0); uart->data_bits = defaults->data_bits; - uart->parity = parse_parity_char(defaults->parity); + uart->parity = parse_parity_char(defaults->parity); uart->stop_bits = defaults->stop_bits; - uart->irq = defaults->irq; - uart->io_base = defaults->io_base; - uart->io_size = 8; + uart->irq = defaults->irq; + uart->io_base = defaults->io_base; + uart->io_size = 8; uart->reg_width = 1; uart->reg_shift = 0; @@ -1544,9 +1550,9 @@ static int __init ns16550_uart_dt_init(struct dt_device_node *dev, ns16550_init_common(uart); - uart->baud = BAUD_AUTO; + uart->baud = BAUD_AUTO; uart->data_bits = 8; - uart->parity = UART_PARITY_NONE; + uart->parity = UART_PARITY_NONE; uart->stop_bits = 1; res = dt_device_get_address(dev, 0, &uart->io_base, &io_size); @@ -1573,7 +1579,7 @@ static int __init ns16550_uart_dt_init(struct dt_device_node *dev, return -EINVAL; res = platform_get_irq(dev, 0); - if ( ! res ) + if ( !res ) return -EINVAL; uart->irq = res; @@ -1581,9 +1587,9 @@ static int __init ns16550_uart_dt_init(struct dt_device_node *dev, uart->vuart.base_addr = uart->io_base; uart->vuart.size = uart->io_size; - uart->vuart.data_off = UART_THR <reg_shift; - uart->vuart.status_off = UART_LSR<reg_shift; - uart->vuart.status = UART_LSR_THRE|UART_LSR_TEMT; + uart->vuart.data_off = UART_THR << uart->reg_shift; + uart->vuart.status_off = UART_LSR << uart->reg_shift; + uart->vuart.status = UART_LSR_THRE | UART_LSR_TEMT; /* Register with generic serial driver. */ serial_register_uart(uart - ns16550_com, &ns16550_driver, uart); @@ -1593,26 +1599,25 @@ static int __init ns16550_uart_dt_init(struct dt_device_node *dev, return 0; } -static const struct dt_device_match ns16550_dt_match[] __initconst = -{ +static const struct dt_device_match ns16550_dt_match[] __initconst = { DT_MATCH_COMPATIBLE("ns16550"), DT_MATCH_COMPATIBLE("ns16550a"), DT_MATCH_COMPATIBLE("snps,dw-apb-uart"), - { /* sentinel */ }, + {/* sentinel */}, }; -DT_DEVICE_START(ns16550, "NS16550 UART", DEVICE_SERIAL) - .dt_match = ns16550_dt_match, - .init = ns16550_uart_dt_init, -DT_DEVICE_END +DT_DEVICE_START(ns16550, "NS16550 UART", DEVICE_SERIAL).dt_match = + ns16550_dt_match, + .init = ns16550_uart_dt_init, + DT_DEVICE_END #endif /* HAS_DEVICE_TREE */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/drivers/char/omap-uart.c b/xen/drivers/char/omap-uart.c index d6a5d59aa2..f43e00f3b4 100644 --- a/xen/drivers/char/omap-uart.c +++ b/xen/drivers/char/omap-uart.c @@ -25,33 +25,35 @@ #define REG_SHIFT 2 /* Register offsets */ -#define UART_OMAP_EFR 0x02 /* Enhanced feature register */ -#define UART_OMAP_MDR1 0x08 /* Mode definition register 1 */ -#define UART_OMAP_SCR 0x10 /* Supplementary control register */ -#define UART_OMAP_SSR 0x11 /* Supplementary status register */ -#define UART_OMAP_SYSC 0x15 /* System configuration register */ -#define UART_OMAP_TXFIFO_LVL 0x1A /* TX FIFO level register */ +#define UART_OMAP_EFR 0x02 /* Enhanced feature register */ +#define UART_OMAP_MDR1 0x08 /* Mode definition register 1 */ +#define UART_OMAP_SCR 0x10 /* Supplementary control register */ +#define UART_OMAP_SSR 0x11 /* Supplementary status register */ +#define UART_OMAP_SYSC 0x15 /* System configuration register */ +#define UART_OMAP_TXFIFO_LVL 0x1A /* TX FIFO level register */ /* Enhanced feature register */ -#define UART_OMAP_EFR_ECB 0x10 /* Enhanced control bit */ +#define UART_OMAP_EFR_ECB 0x10 /* Enhanced control bit */ /* Mode definition register 1 */ -#define UART_OMAP_MDR1_16X_MODE 0x00 /* UART 16x mode */ -#define UART_OMAP_MDR1_DISABLE 0x07 /* Disable (default state) */ +#define UART_OMAP_MDR1_16X_MODE 0x00 /* UART 16x mode */ +#define UART_OMAP_MDR1_DISABLE 0x07 /* Disable (default state) */ /* Supplementary control register bitmasks */ -#define UART_OMAP_SCR_RX_TRIG_GRANU1_MASK (1 << 7) +#define UART_OMAP_SCR_RX_TRIG_GRANU1_MASK (1 << 7) /* Supplementary status register bitmasks */ -#define UART_OMAP_SSR_TX_FIFO_FULL_MASK (1 << 0) +#define UART_OMAP_SSR_TX_FIFO_FULL_MASK (1 << 0) /* System configuration register */ -#define UART_OMAP_SYSC_DEF_CONF 0x0d /* autoidle mode, wakeup is enabled */ +#define UART_OMAP_SYSC_DEF_CONF 0x0d /* autoidle mode, wakeup is enabled */ -#define omap_read(uart, off) readl((uart)->regs + (off<regs + (off<regs + (off << REG_SHIFT)) +#define omap_write(uart, off, val) \ + writel((val), (uart)->regs + (off << REG_SHIFT)) -static struct omap_uart { +static struct omap_uart +{ u32 baud, clock_hz, data_bits, parity, stop_bits, fifo_size; unsigned int irq; char __iomem *regs; @@ -69,12 +71,13 @@ static void omap_uart_interrupt(int irq, void *data, struct cpu_user_regs *regs) while ( !(omap_read(uart, UART_IIR) & UART_IIR_NOINT) ) { lsr = omap_read(uart, UART_LSR) & 0xff; - if ( lsr & UART_LSR_THRE ) + if ( lsr & UART_LSR_THRE ) serial_tx_interrupt(port, regs); - if ( lsr & UART_LSR_DR ) + if ( lsr & UART_LSR_DR ) serial_rx_interrupt(port, regs); - if ( port->txbufc == port->txbufp ) { + if ( port->txbufc == port->txbufp ) + { reg = omap_read(uart, UART_IER); omap_write(uart, UART_IER, reg & (~UART_IER_ETHREI)); } @@ -99,7 +102,7 @@ static void baud_protocol_setup(struct omap_uart *uart) * Enable access to the UART_IER[7:4] bit field. */ efr = omap_read(uart, UART_OMAP_EFR); - omap_write(uart, UART_OMAP_EFR, efr|UART_OMAP_EFR_ECB); + omap_write(uart, UART_OMAP_EFR, efr | UART_OMAP_EFR_ECB); /* * Switch to register operation mode to access the UART_IER register. */ @@ -128,8 +131,9 @@ static void baud_protocol_setup(struct omap_uart *uart) * Load the new protocol formatting (parity, stop-bit, character length) * and switch to register operational mode. */ - omap_write(uart, UART_LCR, (uart->data_bits - 5) | - ((uart->stop_bits - 1) << 2) | uart->parity); + omap_write(uart, UART_LCR, + (uart->data_bits - 5) | ((uart->stop_bits - 1) << 2) | + uart->parity); } static void fifo_setup(struct omap_uart *uart) @@ -145,7 +149,7 @@ static void fifo_setup(struct omap_uart *uart) * Enable register submode TCR_TLR to access the UART_OMAP_TLR register. */ efr = omap_read(uart, UART_OMAP_EFR); - omap_write(uart, UART_OMAP_EFR, efr|UART_OMAP_EFR_ECB); + omap_write(uart, UART_OMAP_EFR, efr | UART_OMAP_EFR_ECB); /* * Switch to register configuration mode A to access the UART_MCR * register. @@ -155,12 +159,12 @@ static void fifo_setup(struct omap_uart *uart) * Enable register submode TCR_TLR to access the UART_OMAP_TLR register */ mcr = omap_read(uart, UART_MCR); - omap_write(uart, UART_MCR, mcr|UART_MCR_TCRTLR); + omap_write(uart, UART_MCR, mcr | UART_MCR_TCRTLR); /* * Enable the FIFO; load the new FIFO trigger and the new DMA mode. */ - omap_write(uart, UART_FCR, UART_FCR_R_TRIG_01| - UART_FCR_T_TRIG_10|UART_FCR_ENABLE); + omap_write(uart, UART_FCR, + UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_10 | UART_FCR_ENABLE); /* * Switch to register configuration mode B to access the UART_EFR * register. @@ -199,7 +203,7 @@ static void __init omap_uart_init_preirq(struct serial_port *port) * Clear the FIFO buffers. */ omap_write(uart, UART_FCR, UART_FCR_ENABLE); - omap_write(uart, UART_FCR, UART_FCR_ENABLE|UART_FCR_CLRX|UART_FCR_CLTX); + omap_write(uart, UART_FCR, UART_FCR_ENABLE | UART_FCR_CLRX | UART_FCR_CLTX); omap_write(uart, UART_FCR, 0); /* @@ -215,7 +219,7 @@ static void __init omap_uart_init_preirq(struct serial_port *port) fifo_setup(uart); /* No flow control */ - omap_write(uart, UART_MCR, UART_MCR_DTR|UART_MCR_RTS); + omap_write(uart, UART_MCR, UART_MCR_DTR | UART_MCR_RTS); omap_write(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE); @@ -239,7 +243,8 @@ static void __init omap_uart_init_postirq(struct serial_port *port) } /* Enable interrupts */ - omap_write(uart, UART_IER, UART_IER_ERDAI|UART_IER_ETHREI|UART_IER_ELSI); + omap_write(uart, UART_IER, + UART_IER_ERDAI | UART_IER_ETHREI | UART_IER_ELSI); } static void omap_uart_suspend(struct serial_port *port) @@ -267,7 +272,7 @@ static int omap_uart_tx_ready(struct serial_port *port) /* Check number of data bytes stored in TX FIFO */ cnt = omap_read(uart, UART_OMAP_TXFIFO_LVL); - ASSERT( cnt >= 0 && cnt <= uart->fifo_size ); + ASSERT(cnt >= 0 && cnt <= uart->fifo_size); return (uart->fifo_size - cnt); } @@ -284,7 +289,7 @@ static int omap_uart_getc(struct serial_port *port, char *pc) struct omap_uart *uart = port->uart; if ( !(omap_read(uart, UART_LSR) & UART_LSR_DR) ) - return 0; + return 0; *pc = omap_read(uart, UART_RBR) & 0xff; return 1; @@ -317,8 +322,7 @@ static struct uart_driver __read_mostly omap_uart_driver = { .vuart_info = omap_vuart_info, }; -static int __init omap_uart_init(struct dt_device_node *dev, - const void *data) +static int __init omap_uart_init(struct dt_device_node *dev, const void *data) { const char *config = data; struct omap_uart *uart; @@ -367,7 +371,6 @@ static int __init omap_uart_init(struct dt_device_node *dev, return -ENOMEM; } - uart->vuart.base_addr = addr; uart->vuart.size = size; uart->vuart.data_off = UART_THR; @@ -382,22 +385,21 @@ static int __init omap_uart_init(struct dt_device_node *dev, return 0; } -static const struct dt_device_match omap_uart_dt_match[] __initconst = -{ +static const struct dt_device_match omap_uart_dt_match[] __initconst = { DT_MATCH_COMPATIBLE("ti,omap4-uart"), - { /* sentinel */ }, + {/* sentinel */}, }; -DT_DEVICE_START(omap_uart, "OMAP UART", DEVICE_SERIAL) - .dt_match = omap_uart_dt_match, - .init = omap_uart_init, -DT_DEVICE_END +DT_DEVICE_START(omap_uart, "OMAP UART", DEVICE_SERIAL).dt_match = + omap_uart_dt_match, + .init = omap_uart_init, + DT_DEVICE_END -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/drivers/char/pl011.c b/xen/drivers/char/pl011.c index be67242bc0..04485fe1bc 100644 --- a/xen/drivers/char/pl011.c +++ b/xen/drivers/char/pl011.c @@ -29,7 +29,8 @@ #include #include -static struct pl011 { +static struct pl011 +{ unsigned int data_bits, parity, stop_bits; unsigned int irq; void __iomem *regs; @@ -40,19 +41,19 @@ static struct pl011 { /* struct timer timer; */ /* unsigned int timeout_ms; */ /* bool_t probing, intr_works; */ - bool sbsa; /* ARM SBSA generic interface */ + bool sbsa; /* ARM SBSA generic interface */ } pl011_com = {0}; /* These parity settings can be ORed directly into the LCR. */ -#define PARITY_NONE (0) -#define PARITY_ODD (PEN) -#define PARITY_EVEN (PEN|EPS) -#define PARITY_MARK (PEN|SPS) -#define PARITY_SPACE (PEN|EPS|SPS) +#define PARITY_NONE (0) +#define PARITY_ODD (PEN) +#define PARITY_EVEN (PEN | EPS) +#define PARITY_MARK (PEN | SPS) +#define PARITY_SPACE (PEN | EPS | SPS) /* SBSA v2.x document requires, all reads/writes must be 32-bit accesses */ -#define pl011_read(uart, off) readl((uart)->regs + (off)) -#define pl011_write(uart, off,val) writel((val), (uart)->regs + (off)) +#define pl011_read(uart, off) readl((uart)->regs + (off)) +#define pl011_write(uart, off, val) writel((val), (uart)->regs + (off)) static unsigned int pl011_intr_status(struct pl011 *uart) { @@ -68,11 +69,10 @@ static void pl011_interrupt(int irq, void *data, struct cpu_user_regs *regs) if ( status ) { - do - { - pl011_write(uart, ICR, status & ~(TXI|RTI|RXI)); + do { + pl011_write(uart, ICR, status & ~(TXI | RTI | RXI)); - if ( status & (RTI|RXI) ) + if ( status & (RTI | RXI) ) serial_rx_interrupt(port, regs); /* TODO @@ -84,7 +84,7 @@ static void pl011_interrupt(int irq, void *data, struct cpu_user_regs *regs) serial_tx_interrupt(port, regs); status = pl011_intr_status(uart); - } while (status != 0); + } while ( status != 0 ); } } @@ -102,10 +102,9 @@ static void __init pl011_init_preirq(struct serial_port *port) pl011_write(uart, DMACR, 0x0); /* This write must follow FBRD and IBRD writes. */ - pl011_write(uart, LCR_H, (uart->data_bits - 5) << 5 - | FEN - | ((uart->stop_bits - 1) << 3) - | uart->parity); + pl011_write(uart, LCR_H, + (uart->data_bits - 5) << 5 | FEN | + ((uart->stop_bits - 1) << 3) | uart->parity); } /* Clear errors */ pl011_write(uart, RSR, 0); @@ -131,17 +130,17 @@ static void __init pl011_init_postirq(struct serial_port *port) if ( uart->irq > 0 ) { uart->irqaction.handler = pl011_interrupt; - uart->irqaction.name = "pl011"; - uart->irqaction.dev_id = port; + uart->irqaction.name = "pl011"; + uart->irqaction.dev_id = port; if ( (rc = setup_irq(uart->irq, 0, &uart->irqaction)) != 0 ) printk("ERROR: Failed to allocate pl011 IRQ %d\n", uart->irq); } /* Clear pending error interrupts */ - pl011_write(uart, ICR, OEI|BEI|PEI|FEI); + pl011_write(uart, ICR, OEI | BEI | PEI | FEI); /* Unmask interrupts */ - pl011_write(uart, IMSC, RTI|OEI|BEI|PEI|FEI|TXI|RXI); + pl011_write(uart, IMSC, RTI | OEI | BEI | PEI | FEI | TXI | RXI); } static void pl011_suspend(struct serial_port *port) @@ -208,18 +207,18 @@ static void pl011_tx_start(struct serial_port *port) } static struct uart_driver __read_mostly pl011_driver = { - .init_preirq = pl011_init_preirq, + .init_preirq = pl011_init_preirq, .init_postirq = pl011_init_postirq, - .endboot = NULL, - .suspend = pl011_suspend, - .resume = pl011_resume, - .tx_ready = pl011_tx_ready, - .putc = pl011_putc, - .getc = pl011_getc, - .irq = pl011_irq, - .start_tx = pl011_tx_start, - .stop_tx = pl011_tx_stop, - .vuart_info = pl011_vuart, + .endboot = NULL, + .suspend = pl011_suspend, + .resume = pl011_resume, + .tx_ready = pl011_tx_ready, + .putc = pl011_putc, + .getc = pl011_getc, + .irq = pl011_irq, + .start_tx = pl011_tx_start, + .stop_tx = pl011_tx_stop, + .vuart_info = pl011_vuart, }; static int __init pl011_uart_init(int irq, u64 addr, u64 size, bool sbsa) @@ -227,11 +226,11 @@ static int __init pl011_uart_init(int irq, u64 addr, u64 size, bool sbsa) struct pl011 *uart; uart = &pl011_com; - uart->irq = irq; + uart->irq = irq; uart->data_bits = 8; - uart->parity = PARITY_NONE; + uart->parity = PARITY_NONE; uart->stop_bits = 1; - uart->sbsa = sbsa; + uart->sbsa = sbsa; uart->regs = ioremap_nocache(addr, size); if ( !uart->regs ) @@ -292,29 +291,27 @@ static int __init pl011_dt_uart_init(struct dt_device_node *dev, return 0; } -static const struct dt_device_match pl011_dt_match[] __initconst = -{ +static const struct dt_device_match pl011_dt_match[] __initconst = { DT_MATCH_COMPATIBLE("arm,pl011"), - { /* sentinel */ }, + {/* sentinel */}, }; -DT_DEVICE_START(pl011, "PL011 UART", DEVICE_SERIAL) - .dt_match = pl011_dt_match, - .init = pl011_dt_uart_init, -DT_DEVICE_END +DT_DEVICE_START(pl011, "PL011 UART", DEVICE_SERIAL).dt_match = pl011_dt_match, + .init = pl011_dt_uart_init, + DT_DEVICE_END #ifdef CONFIG_ACPI #include -static int __init pl011_acpi_uart_init(const void *data) + static int __init pl011_acpi_uart_init(const void *data) { acpi_status status; struct acpi_table_spcr *spcr = NULL; int res; bool sbsa; - status = acpi_get_table(ACPI_SIG_SPCR, 0, - (struct acpi_table_header **)&spcr); + status = + acpi_get_table(ACPI_SIG_SPCR, 0, (struct acpi_table_header **)&spcr); if ( ACPI_FAILURE(status) ) { @@ -328,8 +325,8 @@ static int __init pl011_acpi_uart_init(const void *data) /* trigger/polarity information is not available in spcr */ irq_set_type(spcr->interrupt, IRQ_TYPE_LEVEL_HIGH); - res = pl011_uart_init(spcr->interrupt, spcr->serial_port.address, - PAGE_SIZE, sbsa); + res = pl011_uart_init(spcr->interrupt, spcr->serial_port.address, PAGE_SIZE, + sbsa); if ( res < 0 ) { printk("pl011: Unable to initialize\n"); @@ -340,27 +337,27 @@ static int __init pl011_acpi_uart_init(const void *data) } ACPI_DEVICE_START(apl011, "PL011 UART", DEVICE_SERIAL) - .class_type = ACPI_DBG2_PL011, - .init = pl011_acpi_uart_init, -ACPI_DEVICE_END + .class_type = ACPI_DBG2_PL011, + .init = pl011_acpi_uart_init, + ACPI_DEVICE_END -ACPI_DEVICE_START(asbsa_uart, "SBSA UART", DEVICE_SERIAL) - .class_type = ACPI_DBG2_SBSA, - .init = pl011_acpi_uart_init, -ACPI_DEVICE_END + ACPI_DEVICE_START(asbsa_uart, "SBSA UART", DEVICE_SERIAL) + .class_type = ACPI_DBG2_SBSA, + .init = pl011_acpi_uart_init, + ACPI_DEVICE_END -ACPI_DEVICE_START(asbsa32_uart, "SBSA32 UART", DEVICE_SERIAL) - .class_type = ACPI_DBG2_SBSA_32, - .init = pl011_acpi_uart_init, -ACPI_DEVICE_END + ACPI_DEVICE_START(asbsa32_uart, "SBSA32 UART", DEVICE_SERIAL) + .class_type = ACPI_DBG2_SBSA_32, + .init = pl011_acpi_uart_init, + ACPI_DEVICE_END #endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/drivers/char/scif-uart.c b/xen/drivers/char/scif-uart.c index 465fb3457f..d55e2df2b7 100644 --- a/xen/drivers/char/scif-uart.c +++ b/xen/drivers/char/scif-uart.c @@ -29,13 +29,14 @@ #include #include -#define scif_readb(uart, off) readb((uart)->regs + (off)) -#define scif_writeb(uart, off, val) writeb((val), (uart)->regs + (off)) +#define scif_readb(uart, off) readb((uart)->regs + (off)) +#define scif_writeb(uart, off, val) writeb((val), (uart)->regs + (off)) -#define scif_readw(uart, off) readw((uart)->regs + (off)) -#define scif_writew(uart, off, val) writew((val), (uart)->regs + (off)) +#define scif_readw(uart, off) readw((uart)->regs + (off)) +#define scif_writew(uart, off, val) writew((val), (uart)->regs + (off)) -static struct scif_uart { +static struct scif_uart +{ unsigned int irq; char __iomem *regs; struct irqaction irqaction; @@ -86,7 +87,8 @@ static void __init scif_uart_init_preirq(struct serial_port *port) * Wait until last bit has been transmitted. This is needed for a smooth * transition when we come from early printk */ - while ( !(scif_readw(uart, SCIF_SCFSR) & SCFSR_TEND) ); + while ( !(scif_readw(uart, SCIF_SCFSR) & SCFSR_TEND) ) + ; /* Disable TX/RX parts and all interrupts */ scif_writew(uart, SCIF_SCSCR, 0); @@ -104,8 +106,8 @@ static void __init scif_uart_init_preirq(struct serial_port *port) scif_writew(uart, SCIF_SCFCR, SCFCR_RTRG11 | SCFCR_TTRG11); /* Enable TX/RX parts */ - scif_writew(uart, SCIF_SCSCR, scif_readw(uart, SCIF_SCSCR) | - SCSCR_TE | SCSCR_RE); + scif_writew(uart, SCIF_SCSCR, + scif_readw(uart, SCIF_SCSCR) | SCSCR_TE | SCSCR_RE); } static void __init scif_uart_init_postirq(struct serial_port *port) @@ -114,8 +116,8 @@ static void __init scif_uart_init_postirq(struct serial_port *port) int rc; uart->irqaction.handler = scif_uart_interrupt; - uart->irqaction.name = "scif_uart"; - uart->irqaction.dev_id = port; + uart->irqaction.name = "scif_uart"; + uart->irqaction.dev_id = port; if ( (rc = setup_irq(uart->irq, 0, &uart->irqaction)) != 0 ) dprintk(XENLOG_ERR, "Failed to allocated scif_uart IRQ %d\n", @@ -128,8 +130,9 @@ static void __init scif_uart_init_postirq(struct serial_port *port) scif_writew(uart, SCIF_SCLSR, 0); /* Enable TX/RX and Error Interrupts */ - scif_writew(uart, SCIF_SCSCR, scif_readw(uart, SCIF_SCSCR) | - SCSCR_TIE | SCSCR_RIE | SCSCR_REIE); + scif_writew(uart, SCIF_SCSCR, + scif_readw(uart, SCIF_SCSCR) | SCSCR_TIE | SCSCR_RIE | + SCSCR_REIE); } static void scif_uart_suspend(struct serial_port *port) @@ -151,9 +154,9 @@ static int scif_uart_tx_ready(struct serial_port *port) if ( !(scif_readw(uart, SCIF_SCFSR) & SCFSR_TDFE) ) return 0; - /* Check number of data bytes stored in TX FIFO */ + /* Check number of data bytes stored in TX FIFO */ cnt = scif_readw(uart, SCIF_SCFDR) >> 8; - ASSERT( cnt >= 0 && cnt <= SCIF_FIFO_MAX_SIZE ); + ASSERT(cnt >= 0 && cnt <= SCIF_FIFO_MAX_SIZE); return (SCIF_FIFO_MAX_SIZE - cnt); } @@ -164,8 +167,8 @@ static void scif_uart_putc(struct serial_port *port, char c) scif_writeb(uart, SCIF_SCFTDR, c); /* Clear required TX flags */ - scif_writew(uart, SCIF_SCFSR, scif_readw(uart, SCIF_SCFSR) & - ~(SCFSR_TEND | SCFSR_TDFE)); + scif_writew(uart, SCIF_SCFSR, + scif_readw(uart, SCIF_SCFSR) & ~(SCFSR_TEND | SCFSR_TDFE)); } static int scif_uart_getc(struct serial_port *port, char *pc) @@ -215,22 +218,21 @@ static void scif_uart_stop_tx(struct serial_port *port) } static struct uart_driver __read_mostly scif_uart_driver = { - .init_preirq = scif_uart_init_preirq, + .init_preirq = scif_uart_init_preirq, .init_postirq = scif_uart_init_postirq, - .endboot = NULL, - .suspend = scif_uart_suspend, - .resume = scif_uart_resume, - .tx_ready = scif_uart_tx_ready, - .putc = scif_uart_putc, - .getc = scif_uart_getc, - .irq = scif_uart_irq, - .start_tx = scif_uart_start_tx, - .stop_tx = scif_uart_stop_tx, - .vuart_info = scif_vuart_info, + .endboot = NULL, + .suspend = scif_uart_suspend, + .resume = scif_uart_resume, + .tx_ready = scif_uart_tx_ready, + .putc = scif_uart_putc, + .getc = scif_uart_getc, + .irq = scif_uart_irq, + .start_tx = scif_uart_start_tx, + .stop_tx = scif_uart_stop_tx, + .vuart_info = scif_vuart_info, }; -static int __init scif_uart_init(struct dt_device_node *dev, - const void *data) +static int __init scif_uart_init(struct dt_device_node *dev, const void *data) { const char *config = data; struct scif_uart *uart; @@ -246,7 +248,7 @@ static int __init scif_uart_init(struct dt_device_node *dev, if ( res ) { printk("scif-uart: Unable to retrieve the base" - " address of the UART\n"); + " address of the UART\n"); return res; } @@ -265,11 +267,11 @@ static int __init scif_uart_init(struct dt_device_node *dev, return -ENOMEM; } - uart->vuart.base_addr = addr; - uart->vuart.size = size; - uart->vuart.data_off = SCIF_SCFTDR; + uart->vuart.base_addr = addr; + uart->vuart.size = size; + uart->vuart.data_off = SCIF_SCFTDR; uart->vuart.status_off = SCIF_SCFSR; - uart->vuart.status = SCFSR_TDFE; + uart->vuart.status = SCFSR_TDFE; /* Register with generic serial driver */ serial_register_uart(SERHND_DTUART, &scif_uart_driver, uart); @@ -279,22 +281,21 @@ static int __init scif_uart_init(struct dt_device_node *dev, return 0; } -static const struct dt_device_match scif_uart_dt_match[] __initconst = -{ +static const struct dt_device_match scif_uart_dt_match[] __initconst = { DT_MATCH_COMPATIBLE("renesas,scif"), - { /* sentinel */ }, + {/* sentinel */}, }; -DT_DEVICE_START(scif_uart, "SCIF UART", DEVICE_SERIAL) - .dt_match = scif_uart_dt_match, - .init = scif_uart_init, -DT_DEVICE_END +DT_DEVICE_START(scif_uart, "SCIF UART", DEVICE_SERIAL).dt_match = + scif_uart_dt_match, + .init = scif_uart_init, + DT_DEVICE_END -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ + /* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/drivers/char/serial.c b/xen/drivers/char/serial.c index 221a14c092..6262aaae5f 100644 --- a/xen/drivers/char/serial.c +++ b/xen/drivers/char/serial.c @@ -1,8 +1,8 @@ /****************************************************************************** * serial.c - * + * * Framework for serial device drivers. - * + * * Copyright (c) 2003-2008, K A Fraser */ @@ -18,15 +18,12 @@ unsigned int __read_mostly serial_txbufsz = 16384; size_param("serial_tx_buffer", serial_txbufsz); -#define mask_serial_rxbuf_idx(_i) ((_i)&(serial_rxbufsz-1)) -#define mask_serial_txbuf_idx(_i) ((_i)&(serial_txbufsz-1)) +#define mask_serial_rxbuf_idx(_i) ((_i) & (serial_rxbufsz - 1)) +#define mask_serial_txbuf_idx(_i) ((_i) & (serial_txbufsz - 1)) static struct serial_port com[SERHND_IDX + 1] = { - [0 ... SERHND_IDX] = { - .rx_lock = SPIN_LOCK_UNLOCKED, - .tx_lock = SPIN_LOCK_UNLOCKED - } -}; + [0 ... SERHND_IDX] = {.rx_lock = SPIN_LOCK_UNLOCKED, + .tx_lock = SPIN_LOCK_UNLOCKED}}; static bool_t __read_mostly post_irq; @@ -59,7 +56,7 @@ void serial_rx_interrupt(struct serial_port *port, struct cpu_user_regs *regs) else if ( !(c & 0x80) && (port->rx_lo != NULL) ) fn = port->rx_lo; else if ( (port->rxbufp - port->rxbufc) != serial_rxbufsz ) - port->rxbuf[mask_serial_rxbuf_idx(port->rxbufp++)] = c; + port->rxbuf[mask_serial_rxbuf_idx(port->rxbufp++)] = c; } spin_unlock_irqrestore(&port->rx_lock, flags); @@ -103,15 +100,15 @@ void serial_tx_interrupt(struct serial_port *port, struct cpu_user_regs *regs) { if ( port->txbufc == port->txbufp ) break; - port->driver->putc( - port, port->txbuf[mask_serial_txbuf_idx(port->txbufc++)]); + port->driver->putc(port, + port->txbuf[mask_serial_txbuf_idx(port->txbufc++)]); } if ( i && port->driver->flush ) port->driver->flush(port); spin_unlock(&port->tx_lock); - out: +out: local_irq_restore(flags); } @@ -271,17 +268,17 @@ char serial_getc(int handle) return '\0'; do { - for ( ; ; ) + for ( ;; ) { spin_lock_irqsave(&port->rx_lock, flags); - + if ( port->rxbufp != port->rxbufc ) { c = port->rxbuf[mask_serial_rxbuf_idx(port->rxbufc++)]; spin_unlock_irqrestore(&port->rx_lock, flags); break; } - + if ( port->driver->getc(port, &c) ) { spin_unlock_irqrestore(&port->rx_lock, flags); @@ -293,9 +290,9 @@ char serial_getc(int handle) cpu_relax(); udelay(100); } - } while ( ((handle & SERHND_LO) && (c & 0x80)) || + } while ( ((handle & SERHND_LO) && (c & 0x80)) || ((handle & SERHND_HI) && !(c & 0x80)) ); - + return c & 0x7f; } @@ -318,7 +315,7 @@ int __init serial_parse_handle(char *conf) if ( strncmp(conf, "com", 3) ) goto fail; - switch ( conf[3] ) + switch (conf[3]) { case '1': handle = SERHND_COM1; @@ -335,7 +332,7 @@ int __init serial_parse_handle(char *conf) else if ( conf[4] == 'L' ) flags |= SERHND_LO; - common: +common: if ( !com[handle].driver ) goto fail; @@ -350,7 +347,7 @@ int __init serial_parse_handle(char *conf) return handle | flags | SERHND_COOKED; - fail: +fail: return -1; } @@ -373,7 +370,7 @@ void __init serial_set_rx_handler(int handle, serial_rx_fn fn) { if ( port->rx_lo != NULL ) goto fail; - port->rx_lo = fn; + port->rx_lo = fn; } else if ( handle & SERHND_HI ) { @@ -391,9 +388,9 @@ void __init serial_set_rx_handler(int handle, serial_rx_fn fn) spin_unlock_irqrestore(&port->rx_lock, flags); return; - fail: +fail: spin_unlock_irqrestore(&port->rx_lock, flags); - printk("ERROR: Conflicting receive handlers for COM%d\n", + printk("ERROR: Conflicting receive handlers for COM%d\n", handle & SERHND_IDX); } @@ -419,7 +416,7 @@ void serial_start_sync(int handle) if ( handle == -1 ) return; - + port = &com[handle & SERHND_IDX]; spin_lock_irqsave(&port->tx_lock, flags); @@ -454,7 +451,7 @@ void serial_end_sync(int handle) if ( handle == -1 ) return; - + port = &com[handle & SERHND_IDX]; spin_lock_irqsave(&port->tx_lock, flags); @@ -471,7 +468,7 @@ void serial_start_log_everything(int handle) if ( handle == -1 ) return; - + port = &com[handle & SERHND_IDX]; spin_lock_irqsave(&port->tx_lock, flags); @@ -487,7 +484,7 @@ void serial_end_log_everything(int handle) if ( handle == -1 ) return; - + port = &com[handle & SERHND_IDX]; spin_lock_irqsave(&port->tx_lock, flags); @@ -535,8 +532,8 @@ void __init serial_endboot(void) int __init serial_irq(int idx) { - if ( (idx >= 0) && (idx < ARRAY_SIZE(com)) && - com[idx].driver && com[idx].driver->irq ) + if ( (idx >= 0) && (idx < ARRAY_SIZE(com)) && com[idx].driver && + com[idx].driver->irq ) return com[idx].driver->irq(&com[idx]); return -1; @@ -544,8 +541,8 @@ int __init serial_irq(int idx) const struct vuart_info *serial_vuart_info(int idx) { - if ( (idx >= 0) && (idx < ARRAY_SIZE(com)) && - com[idx].driver && com[idx].driver->vuart_info ) + if ( (idx >= 0) && (idx < ARRAY_SIZE(com)) && com[idx].driver && + com[idx].driver->vuart_info ) return com[idx].driver->vuart_info(&com[idx]); return NULL; @@ -572,7 +569,7 @@ void __init serial_register_uart(int idx, struct uart_driver *driver, { /* Store UART-specific info. */ com[idx].driver = driver; - com[idx].uart = uart; + com[idx].uart = uart; } void __init serial_async_transmit(struct serial_port *port) @@ -584,8 +581,7 @@ void __init serial_async_transmit(struct serial_port *port) serial_txbufsz = PAGE_SIZE; while ( serial_txbufsz & (serial_txbufsz - 1) ) serial_txbufsz &= serial_txbufsz - 1; - port->txbuf = alloc_xenheap_pages( - get_order_from_bytes(serial_txbufsz), 0); + port->txbuf = alloc_xenheap_pages(get_order_from_bytes(serial_txbufsz), 0); } /* diff --git a/xen/drivers/char/xen_pv_console.c b/xen/drivers/char/xen_pv_console.c index cc1c1d743f..06a8db2150 100644 --- a/xen/drivers/char/xen_pv_console.c +++ b/xen/drivers/char/xen_pv_console.c @@ -61,12 +61,12 @@ void pv_console_init(void) cons_evtchn = raw_evtchn; printk("Initialised PV console at 0x%p with pfn %#lx and evtchn %#x\n", - cons_ring, raw_pfn, cons_evtchn); + cons_ring, raw_pfn, cons_evtchn); pv_console = true; return; - error: +error: printk("Couldn't initialise PV console\n"); } diff --git a/xen/drivers/cpufreq/cpufreq.c b/xen/drivers/cpufreq/cpufreq.c index ba9897a22b..dada99aba5 100644 --- a/xen/drivers/cpufreq/cpufreq.c +++ b/xen/drivers/cpufreq/cpufreq.c @@ -49,10 +49,11 @@ static unsigned int __read_mostly usr_min_freq; static unsigned int __read_mostly usr_max_freq; static void cpufreq_cmdline_common_para(struct cpufreq_policy *new_policy); -struct cpufreq_dom { - unsigned int dom; - cpumask_var_t map; - struct list_head node; +struct cpufreq_dom +{ + unsigned int dom; + cpumask_var_t map; + struct list_head node; }; static LIST_HEAD_READ_MOSTLY(cpufreq_dom_list_head); @@ -106,11 +107,11 @@ struct cpufreq_governor *__find_governor(const char *governor) { struct cpufreq_governor *t; - if (!governor) + if ( !governor ) return NULL; - list_for_each_entry(t, &cpufreq_governor_list, governor_list) - if (!strnicmp(governor, t->name, CPUFREQ_NAME_LEN)) + list_for_each_entry (t, &cpufreq_governor_list, governor_list) + if ( !strnicmp(governor, t->name, CPUFREQ_NAME_LEN) ) return t; return NULL; @@ -118,10 +119,10 @@ struct cpufreq_governor *__find_governor(const char *governor) int __init cpufreq_register_governor(struct cpufreq_governor *governor) { - if (!governor) + if ( !governor ) return -EINVAL; - if (__find_governor(governor->name) != NULL) + if ( __find_governor(governor->name) != NULL ) return -EEXIST; list_add(&governor->governor_list, &cpufreq_governor_list); @@ -134,19 +135,18 @@ int cpufreq_limit_change(unsigned int cpu) struct cpufreq_policy *data; struct cpufreq_policy policy; - if (!cpu_online(cpu) || !(data = per_cpu(cpufreq_cpu_policy, cpu)) || - !processor_pminfo[cpu]) + if ( !cpu_online(cpu) || !(data = per_cpu(cpufreq_cpu_policy, cpu)) || + !processor_pminfo[cpu] ) return -ENODEV; perf = &processor_pminfo[cpu]->perf; - if (perf->platform_limit >= perf->state_count) + if ( perf->platform_limit >= perf->state_count ) return -EINVAL; - memcpy(&policy, data, sizeof(struct cpufreq_policy)); + memcpy(&policy, data, sizeof(struct cpufreq_policy)); - policy.max = - perf->states[perf->platform_limit].core_frequency * 1000; + policy.max = perf->states[perf->platform_limit].core_frequency * 1000; return __cpufreq_set_policy(data, &policy); } @@ -172,65 +172,73 @@ int cpufreq_add_cpu(unsigned int cpu) if ( !(perf->init & XEN_PX_INIT) ) return -EINVAL; - if (!cpufreq_driver.init) + if ( !cpufreq_driver.init ) return 0; - if (per_cpu(cpufreq_cpu_policy, cpu)) + if ( per_cpu(cpufreq_cpu_policy, cpu) ) return 0; - if (perf->shared_type == CPUFREQ_SHARED_TYPE_HW) + if ( perf->shared_type == CPUFREQ_SHARED_TYPE_HW ) hw_all = 1; dom = perf->domain_info.domain; - list_for_each(pos, &cpufreq_dom_list_head) { + list_for_each (pos, &cpufreq_dom_list_head) + { cpufreq_dom = list_entry(pos, struct cpufreq_dom, node); - if (dom == cpufreq_dom->dom) { + if ( dom == cpufreq_dom->dom ) + { domexist = 1; break; } } - if (!domexist) { + if ( !domexist ) + { cpufreq_dom = xzalloc(struct cpufreq_dom); - if (!cpufreq_dom) + if ( !cpufreq_dom ) return -ENOMEM; - if (!zalloc_cpumask_var(&cpufreq_dom->map)) { + if ( !zalloc_cpumask_var(&cpufreq_dom->map) ) + { xfree(cpufreq_dom); return -ENOMEM; } cpufreq_dom->dom = dom; list_add(&cpufreq_dom->node, &cpufreq_dom_list_head); - } else { + } + else + { /* domain sanity check under whatever coordination type */ firstcpu = cpumask_first(cpufreq_dom->map); - if ((perf->domain_info.coord_type != - processor_pminfo[firstcpu]->perf.domain_info.coord_type) || - (perf->domain_info.num_processors != - processor_pminfo[firstcpu]->perf.domain_info.num_processors)) { - + if ( (perf->domain_info.coord_type != + processor_pminfo[firstcpu]->perf.domain_info.coord_type) || + (perf->domain_info.num_processors != + processor_pminfo[firstcpu]->perf.domain_info.num_processors) ) + { printk(KERN_WARNING "cpufreq fail to add CPU%d:" - "incorrect _PSD(%"PRIu64":%"PRIu64"), " - "expect(%"PRIu64"/%"PRIu64")\n", + "incorrect _PSD(%" PRIu64 ":%" PRIu64 "), " + "expect(%" PRIu64 "/%" PRIu64 ")\n", cpu, perf->domain_info.coord_type, perf->domain_info.num_processors, processor_pminfo[firstcpu]->perf.domain_info.coord_type, - processor_pminfo[firstcpu]->perf.domain_info.num_processors - ); + processor_pminfo[firstcpu]->perf.domain_info.num_processors); return -EINVAL; } } - if (!domexist || hw_all) { + if ( !domexist || hw_all ) + { policy = xzalloc(struct cpufreq_policy); - if (!policy) { + if ( !policy ) + { ret = -ENOMEM; goto err0; } - if (!zalloc_cpumask_var(&policy->cpus)) { + if ( !zalloc_cpumask_var(&policy->cpus) ) + { xfree(policy); ret = -ENOMEM; goto err0; @@ -240,20 +248,23 @@ int cpufreq_add_cpu(unsigned int cpu) per_cpu(cpufreq_cpu_policy, cpu) = policy; ret = cpufreq_driver.init(policy); - if (ret) { + if ( ret ) + { free_cpumask_var(policy->cpus); xfree(policy); per_cpu(cpufreq_cpu_policy, cpu) = NULL; goto err0; } - if (cpufreq_verbose) + if ( cpufreq_verbose ) printk("CPU %u initialization completed\n", cpu); - } else { + } + else + { firstcpu = cpumask_first(cpufreq_dom->map); policy = per_cpu(cpufreq_cpu_policy, firstcpu); per_cpu(cpufreq_cpu_policy, cpu) = policy; - if (cpufreq_verbose) + if ( cpufreq_verbose ) printk("adding CPU %u\n", cpu); } @@ -261,28 +272,31 @@ int cpufreq_add_cpu(unsigned int cpu) cpumask_set_cpu(cpu, cpufreq_dom->map); ret = cpufreq_statistic_init(cpu); - if (ret) + if ( ret ) goto err1; - if (hw_all || (cpumask_weight(cpufreq_dom->map) == - perf->domain_info.num_processors)) { + if ( hw_all || (cpumask_weight(cpufreq_dom->map) == + perf->domain_info.num_processors) ) + { memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); policy->governor = NULL; cpufreq_cmdline_common_para(&new_policy); ret = __cpufreq_set_policy(policy, &new_policy); - if (ret) { - if (new_policy.governor == CPUFREQ_DEFAULT_GOVERNOR) + if ( ret ) + { + if ( new_policy.governor == CPUFREQ_DEFAULT_GOVERNOR ) /* if default governor fail, cpufreq really meet troubles */ goto err2; - else { + else + { /* grub option governor fail */ /* give one more chance to default gov */ memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); new_policy.governor = CPUFREQ_DEFAULT_GOVERNOR; ret = __cpufreq_set_policy(policy, &new_policy); - if (ret) + if ( ret ) goto err2; } } @@ -297,13 +311,15 @@ err1: cpumask_clear_cpu(cpu, policy->cpus); cpumask_clear_cpu(cpu, cpufreq_dom->map); - if (cpumask_empty(policy->cpus)) { + if ( cpumask_empty(policy->cpus) ) + { cpufreq_driver.exit(policy); free_cpumask_var(policy->cpus); xfree(policy); } err0: - if (cpumask_empty(cpufreq_dom->map)) { + if ( cpumask_empty(cpufreq_dom->map) ) + { list_del(&cpufreq_dom->node); free_cpumask_var(cpufreq_dom->map); xfree(cpufreq_dom); @@ -330,30 +346,32 @@ int cpufreq_del_cpu(unsigned int cpu) if ( !(perf->init & XEN_PX_INIT) ) return -EINVAL; - if (!per_cpu(cpufreq_cpu_policy, cpu)) + if ( !per_cpu(cpufreq_cpu_policy, cpu) ) return 0; - if (perf->shared_type == CPUFREQ_SHARED_TYPE_HW) + if ( perf->shared_type == CPUFREQ_SHARED_TYPE_HW ) hw_all = 1; dom = perf->domain_info.domain; policy = per_cpu(cpufreq_cpu_policy, cpu); - list_for_each(pos, &cpufreq_dom_list_head) { + list_for_each (pos, &cpufreq_dom_list_head) + { cpufreq_dom = list_entry(pos, struct cpufreq_dom, node); - if (dom == cpufreq_dom->dom) { + if ( dom == cpufreq_dom->dom ) + { domexist = 1; break; } } - if (!domexist) + if ( !domexist ) return -EINVAL; /* for HW_ALL, stop gov for each core of the _PSD domain */ /* for SW_ALL & SW_ANY, stop gov for the 1st core of the _PSD domain */ - if (hw_all || (cpumask_weight(cpufreq_dom->map) == - perf->domain_info.num_processors)) + if ( hw_all || (cpumask_weight(cpufreq_dom->map) == + perf->domain_info.num_processors) ) __cpufreq_governor(policy, CPUFREQ_GOV_STOP); cpufreq_statistic_exit(cpu); @@ -361,7 +379,8 @@ int cpufreq_del_cpu(unsigned int cpu) cpumask_clear_cpu(cpu, policy->cpus); cpumask_clear_cpu(cpu, cpufreq_dom->map); - if (cpumask_empty(policy->cpus)) { + if ( cpumask_empty(policy->cpus) ) + { cpufreq_driver.exit(policy); free_cpumask_var(policy->cpus); xfree(policy); @@ -369,13 +388,14 @@ int cpufreq_del_cpu(unsigned int cpu) /* for the last cpu of the domain, clean room */ /* It's safe here to free freq_table, drv_data and policy */ - if (cpumask_empty(cpufreq_dom->map)) { + if ( cpumask_empty(cpufreq_dom->map) ) + { list_del(&cpufreq_dom->node); free_cpumask_var(cpufreq_dom->map); xfree(cpufreq_dom); } - if (cpufreq_verbose) + if ( cpufreq_verbose ) printk("deleting CPU %u\n", cpu); return 0; } @@ -383,7 +403,7 @@ int cpufreq_del_cpu(unsigned int cpu) static void print_PCT(struct xen_pct_register *ptr) { printk("\t_PCT: descriptor=%d, length=%d, space_id=%d, " - "bit_width=%d, bit_offset=%d, reserved=%d, address=%"PRId64"\n", + "bit_width=%d, bit_offset=%d, reserved=%d, address=%" PRId64 "\n", ptr->descriptor, ptr->length, ptr->space_id, ptr->bit_width, ptr->bit_offset, ptr->reserved, ptr->address); } @@ -392,23 +412,20 @@ static void print_PSS(struct xen_processor_px *ptr, int count) { int i; printk("\t_PSS: state_count=%d\n", count); - for (i=0; inum_entries, ptr->revision, ptr->domain, ptr->coord_type, ptr->num_processors); } @@ -418,21 +435,22 @@ static void print_PPC(unsigned int platform_limit) printk("\t_PPC: %d\n", platform_limit); } -int set_px_pminfo(uint32_t acpi_id, struct xen_processor_performance *dom0_px_info) +int set_px_pminfo(uint32_t acpi_id, + struct xen_processor_performance *dom0_px_info) { - int ret=0, cpuid; + int ret = 0, cpuid; struct processor_pminfo *pmpt; struct processor_performance *pxpt; cpuid = get_cpu_id(acpi_id); - if ( cpuid < 0 || !dom0_px_info) + if ( cpuid < 0 || !dom0_px_info ) { ret = -EINVAL; goto out; } if ( cpufreq_verbose ) - printk("Set CPU acpi_id(%d) cpuid(%d) Px State info:\n", - acpi_id, cpuid); + printk("Set CPU acpi_id(%d) cpuid(%d) Px State info:\n", acpi_id, + cpuid); pmpt = processor_pminfo[cpuid]; if ( !pmpt ) @@ -452,19 +470,19 @@ int set_px_pminfo(uint32_t acpi_id, struct xen_processor_performance *dom0_px_in if ( dom0_px_info->flags & XEN_PX_PCT ) { /* space_id check */ - if (dom0_px_info->control_register.space_id != - dom0_px_info->status_register.space_id) + if ( dom0_px_info->control_register.space_id != + dom0_px_info->status_register.space_id ) { ret = -EINVAL; goto out; } - memcpy ((void *)&pxpt->control_register, - (void *)&dom0_px_info->control_register, - sizeof(struct xen_pct_register)); - memcpy ((void *)&pxpt->status_register, - (void *)&dom0_px_info->status_register, - sizeof(struct xen_pct_register)); + memcpy((void *)&pxpt->control_register, + (void *)&dom0_px_info->control_register, + sizeof(struct xen_pct_register)); + memcpy((void *)&pxpt->status_register, + (void *)&dom0_px_info->status_register, + sizeof(struct xen_pct_register)); if ( cpufreq_verbose ) { @@ -473,17 +491,17 @@ int set_px_pminfo(uint32_t acpi_id, struct xen_processor_performance *dom0_px_in } } - if ( dom0_px_info->flags & XEN_PX_PSS ) + if ( dom0_px_info->flags & XEN_PX_PSS ) { /* capability check */ - if (dom0_px_info->state_count <= 1) + if ( dom0_px_info->state_count <= 1 ) { ret = -EINVAL; goto out; } if ( !(pxpt->states = xmalloc_array(struct xen_processor_px, - dom0_px_info->state_count)) ) + dom0_px_info->state_count)) ) { ret = -ENOMEM; goto out; @@ -497,24 +515,23 @@ int set_px_pminfo(uint32_t acpi_id, struct xen_processor_performance *dom0_px_in pxpt->state_count = dom0_px_info->state_count; if ( cpufreq_verbose ) - print_PSS(pxpt->states,pxpt->state_count); + print_PSS(pxpt->states, pxpt->state_count); } if ( dom0_px_info->flags & XEN_PX_PSD ) { /* check domain coordination */ - if (dom0_px_info->shared_type != CPUFREQ_SHARED_TYPE_ALL && - dom0_px_info->shared_type != CPUFREQ_SHARED_TYPE_ANY && - dom0_px_info->shared_type != CPUFREQ_SHARED_TYPE_HW) + if ( dom0_px_info->shared_type != CPUFREQ_SHARED_TYPE_ALL && + dom0_px_info->shared_type != CPUFREQ_SHARED_TYPE_ANY && + dom0_px_info->shared_type != CPUFREQ_SHARED_TYPE_HW ) { ret = -EINVAL; goto out; } pxpt->shared_type = dom0_px_info->shared_type; - memcpy ((void *)&pxpt->domain_info, - (void *)&dom0_px_info->domain_info, - sizeof(struct xen_psd_package)); + memcpy((void *)&pxpt->domain_info, (void *)&dom0_px_info->domain_info, + sizeof(struct xen_psd_package)); if ( cpufreq_verbose ) print_PSD(&pxpt->domain_info); @@ -529,13 +546,13 @@ int set_px_pminfo(uint32_t acpi_id, struct xen_processor_performance *dom0_px_in if ( pxpt->init == XEN_PX_INIT ) { - ret = cpufreq_limit_change(cpuid); + ret = cpufreq_limit_change(cpuid); goto out; } } - if ( dom0_px_info->flags == ( XEN_PX_PCT | XEN_PX_PSS | - XEN_PX_PSD | XEN_PX_PPC ) ) + if ( dom0_px_info->flags == + (XEN_PX_PCT | XEN_PX_PSS | XEN_PX_PSD | XEN_PX_PPC) ) { pxpt->init = XEN_PX_INIT; @@ -549,25 +566,29 @@ out: static void cpufreq_cmdline_common_para(struct cpufreq_policy *new_policy) { - if (usr_max_freq) + if ( usr_max_freq ) new_policy->max = usr_max_freq; - if (usr_min_freq) + if ( usr_min_freq ) new_policy->min = usr_min_freq; } -static int __init cpufreq_handle_common_option(const char *name, const char *val) +static int __init cpufreq_handle_common_option(const char *name, + const char *val) { - if (!strcmp(name, "maxfreq") && val) { + if ( !strcmp(name, "maxfreq") && val ) + { usr_max_freq = simple_strtoul(val, NULL, 0); return 1; } - if (!strcmp(name, "minfreq") && val) { + if ( !strcmp(name, "minfreq") && val ) + { usr_min_freq = simple_strtoul(val, NULL, 0); return 1; } - if (!strcmp(name, "verbose")) { + if ( !strcmp(name, "verbose") ) + { cpufreq_verbose = !val || !!simple_strtoul(val, NULL, 0); return 1; } @@ -577,14 +598,9 @@ static int __init cpufreq_handle_common_option(const char *name, const char *val static int __init cpufreq_cmdline_parse(const char *s) { - static struct cpufreq_governor *__initdata cpufreq_governors[] = - { - CPUFREQ_DEFAULT_GOVERNOR, - &cpufreq_gov_userspace, - &cpufreq_gov_dbs, - &cpufreq_gov_performance, - &cpufreq_gov_powersave - }; + static struct cpufreq_governor *__initdata cpufreq_governors[] = { + CPUFREQ_DEFAULT_GOVERNOR, &cpufreq_gov_userspace, &cpufreq_gov_dbs, + &cpufreq_gov_performance, &cpufreq_gov_powersave}; static char __initdata buf[128]; char *str = buf; unsigned int gov_index = 0; @@ -595,30 +611,36 @@ static int __init cpufreq_cmdline_parse(const char *s) char *val, *end = strchr(str, ','); unsigned int i; - if (end) + if ( end ) *end++ = '\0'; val = strchr(str, '='); - if (val) + if ( val ) *val++ = '\0'; - if (!cpufreq_opt_governor) { - if (!val) { - for (i = 0; i < ARRAY_SIZE(cpufreq_governors); ++i) { - if (!strcmp(str, cpufreq_governors[i]->name)) { + if ( !cpufreq_opt_governor ) + { + if ( !val ) + { + for ( i = 0; i < ARRAY_SIZE(cpufreq_governors); ++i ) + { + if ( !strcmp(str, cpufreq_governors[i]->name) ) + { cpufreq_opt_governor = cpufreq_governors[i]; gov_index = i; str = NULL; break; } } - } else { + } + else + { cpufreq_opt_governor = CPUFREQ_DEFAULT_GOVERNOR; } } - if (str && !cpufreq_handle_common_option(str, val) && - (!cpufreq_governors[gov_index]->handle_option || - !cpufreq_governors[gov_index]->handle_option(str, val))) + if ( str && !cpufreq_handle_common_option(str, val) && + (!cpufreq_governors[gov_index]->handle_option || + !cpufreq_governors[gov_index]->handle_option(str, val)) ) { printk(XENLOG_WARNING "cpufreq/%s: option '%s' not recognized\n", cpufreq_governors[gov_index]->name, str); @@ -626,17 +648,17 @@ static int __init cpufreq_cmdline_parse(const char *s) } str = end; - } while (str); + } while ( str ); return rc; } -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - switch ( action ) + switch (action) { case CPU_DOWN_FAILED: case CPU_ONLINE: @@ -652,9 +674,7 @@ static int cpu_callback( return NOTIFY_DONE; } -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback -}; +static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback}; static int __init cpufreq_presmp_init(void) { @@ -665,9 +685,9 @@ presmp_initcall(cpufreq_presmp_init); int __init cpufreq_register_driver(const struct cpufreq_driver *driver_data) { - if ( !driver_data || !driver_data->init || - !driver_data->verify || !driver_data->exit || - (!driver_data->target == !driver_data->setpolicy) ) + if ( !driver_data || !driver_data->init || !driver_data->verify || + !driver_data->exit || + (!driver_data->target == !driver_data->setpolicy) ) return -EINVAL; if ( cpufreq_driver.init ) diff --git a/xen/drivers/cpufreq/cpufreq_misc_governors.c b/xen/drivers/cpufreq/cpufreq_misc_governors.c index 746bbcd5ff..75e1f67194 100644 --- a/xen/drivers/cpufreq/cpufreq_misc_governors.c +++ b/xen/drivers/cpufreq/cpufreq_misc_governors.c @@ -5,7 +5,7 @@ * (C) 2002 - 2004 Dominik Brodowski * * Nov 2008 Liu Jinsong - * Porting cpufreq_userspace.c, cpufreq_performance.c, and + * Porting cpufreq_userspace.c, cpufreq_performance.c, and * cpufreq_powersave.c from Liunx 2.6.23 to Xen hypervisor * * This program is free software; you can redistribute it and/or modify @@ -32,28 +32,28 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, int ret = 0; unsigned int cpu; - if (unlikely(!policy) || - unlikely(!cpu_online(cpu = policy->cpu))) + if ( unlikely(!policy) || unlikely(!cpu_online(cpu = policy->cpu)) ) return -EINVAL; - switch (event) { + switch (event) + { case CPUFREQ_GOV_START: - if (!per_cpu(cpu_set_freq, cpu)) + if ( !per_cpu(cpu_set_freq, cpu) ) per_cpu(cpu_set_freq, cpu) = policy->cur; break; case CPUFREQ_GOV_STOP: per_cpu(cpu_set_freq, cpu) = 0; break; case CPUFREQ_GOV_LIMITS: - if (policy->max < per_cpu(cpu_set_freq, cpu)) + if ( policy->max < per_cpu(cpu_set_freq, cpu) ) ret = __cpufreq_driver_target(policy, policy->max, - CPUFREQ_RELATION_H); - else if (policy->min > per_cpu(cpu_set_freq, cpu)) + CPUFREQ_RELATION_H); + else if ( policy->min > per_cpu(cpu_set_freq, cpu) ) ret = __cpufreq_driver_target(policy, policy->min, - CPUFREQ_RELATION_L); + CPUFREQ_RELATION_L); else ret = __cpufreq_driver_target(policy, per_cpu(cpu_set_freq, cpu), - CPUFREQ_RELATION_L); + CPUFREQ_RELATION_L); break; default: @@ -68,31 +68,32 @@ int write_userspace_scaling_setspeed(unsigned int cpu, unsigned int freq) { struct cpufreq_policy *policy; - if (!cpu_online(cpu) || !(policy = per_cpu(cpufreq_cpu_policy, cpu))) + if ( !cpu_online(cpu) || !(policy = per_cpu(cpufreq_cpu_policy, cpu)) ) return -EINVAL; per_cpu(cpu_set_freq, cpu) = freq; - if (freq < policy->min) + if ( freq < policy->min ) freq = policy->min; - if (freq > policy->max) + if ( freq > policy->max ) freq = policy->max; return __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); } -static bool_t __init -cpufreq_userspace_handle_option(const char *name, const char *val) +static bool_t __init cpufreq_userspace_handle_option(const char *name, + const char *val) { - if (!strcmp(name, "speed") && val) { + if ( !strcmp(name, "speed") && val ) + { userspace_cmdline_freq = simple_strtoul(val, NULL, 0); return 1; } return 0; } -static int cpufreq_userspace_cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpufreq_userspace_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; @@ -107,45 +108,42 @@ static int cpufreq_userspace_cpu_callback( } static struct notifier_block cpufreq_userspace_cpu_nfb = { - .notifier_call = cpufreq_userspace_cpu_callback -}; + .notifier_call = cpufreq_userspace_cpu_callback}; struct cpufreq_governor cpufreq_gov_userspace = { .name = "userspace", .governor = cpufreq_governor_userspace, - .handle_option = cpufreq_userspace_handle_option -}; + .handle_option = cpufreq_userspace_handle_option}; static int __init cpufreq_gov_userspace_init(void) { unsigned int cpu; - for_each_online_cpu(cpu) + for_each_online_cpu (cpu) per_cpu(cpu_set_freq, cpu) = userspace_cmdline_freq; register_cpu_notifier(&cpufreq_userspace_cpu_nfb); return cpufreq_register_governor(&cpufreq_gov_userspace); } __initcall(cpufreq_gov_userspace_init); - /* * cpufreq performance governor */ static int cpufreq_governor_performance(struct cpufreq_policy *policy, - unsigned int event) + unsigned int event) { int ret = 0; - if (!policy) + if ( !policy ) return -EINVAL; - switch (event) { + switch (event) + { case CPUFREQ_GOV_START: case CPUFREQ_GOV_STOP: break; case CPUFREQ_GOV_LIMITS: - ret = __cpufreq_driver_target(policy, policy->max, - CPUFREQ_RELATION_H); + ret = __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); break; default: ret = -EINVAL; @@ -166,7 +164,6 @@ static int __init cpufreq_gov_performance_init(void) } __initcall(cpufreq_gov_performance_init); - /* * cpufreq powersave governor */ @@ -175,16 +172,16 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy, { int ret = 0; - if (!policy) + if ( !policy ) return -EINVAL; - switch (event) { + switch (event) + { case CPUFREQ_GOV_START: case CPUFREQ_GOV_STOP: break; case CPUFREQ_GOV_LIMITS: - ret = __cpufreq_driver_target(policy, policy->min, - CPUFREQ_RELATION_L); + ret = __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); break; default: ret = -EINVAL; diff --git a/xen/drivers/cpufreq/cpufreq_ondemand.c b/xen/drivers/cpufreq/cpufreq_ondemand.c index 6b905d7cfc..fe8e0b06de 100644 --- a/xen/drivers/cpufreq/cpufreq_ondemand.c +++ b/xen/drivers/cpufreq/cpufreq_ondemand.c @@ -5,7 +5,7 @@ * (C) 2003 Venkatesh Pallipadi . * Jun Nakajima * Feb 2008 Liu Jinsong - * Porting cpufreq_ondemand.c from Liunx 2.6.23 to Xen hypervisor + * Porting cpufreq_ondemand.c from Liunx 2.6.23 to Xen hypervisor * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -20,33 +20,36 @@ #include #include -#define DEF_FREQUENCY_UP_THRESHOLD (80) -#define MIN_FREQUENCY_UP_THRESHOLD (11) -#define MAX_FREQUENCY_UP_THRESHOLD (100) - -#define MIN_DBS_INTERVAL (MICROSECS(100)) -#define MIN_SAMPLING_RATE_RATIO (2) -#define MIN_SAMPLING_MILLISECS (MIN_SAMPLING_RATE_RATIO * 10) -#define MIN_STAT_SAMPLING_RATE \ - (MIN_SAMPLING_MILLISECS * MILLISECS(1)) -#define MIN_SAMPLING_RATE \ - (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) -#define MAX_SAMPLING_RATE (500 * def_sampling_rate) -#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 ) +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) + +#define MIN_DBS_INTERVAL (MICROSECS(100)) +#define MIN_SAMPLING_RATE_RATIO (2) +#define MIN_SAMPLING_MILLISECS (MIN_SAMPLING_RATE_RATIO * 10) +#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_MILLISECS * MILLISECS(1)) +#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) +#define MAX_SAMPLING_RATE (500 * def_sampling_rate) +#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) +#define TRANSITION_LATENCY_LIMIT (10 * 1000) static uint64_t def_sampling_rate; static uint64_t usr_sampling_rate; /* Sampling types */ -enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; +enum +{ + DBS_NORMAL_SAMPLE, + DBS_SUB_SAMPLE +}; static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); -static unsigned int dbs_enable; /* number of CPUs using this policy */ +static unsigned int dbs_enable; /* number of CPUs using this policy */ -static struct dbs_tuners { - uint64_t sampling_rate; +static struct dbs_tuners +{ + uint64_t sampling_rate; unsigned int up_threshold; unsigned int powersave_bias; } dbs_tuners_ins = { @@ -79,15 +82,14 @@ int write_ondemand_up_threshold(unsigned int up_threshold) int get_cpufreq_ondemand_para(uint32_t *sampling_rate_max, uint32_t *sampling_rate_min, - uint32_t *sampling_rate, - uint32_t *up_threshold) + uint32_t *sampling_rate, uint32_t *up_threshold) { - if (!sampling_rate_max || !sampling_rate_min || - !sampling_rate || !up_threshold) + if ( !sampling_rate_max || !sampling_rate_min || !sampling_rate || + !up_threshold ) return -EINVAL; - *sampling_rate_max = MAX_SAMPLING_RATE/MICROSECS(1); - *sampling_rate_min = MIN_SAMPLING_RATE/MICROSECS(1); + *sampling_rate_max = MAX_SAMPLING_RATE / MICROSECS(1); + *sampling_rate_min = MIN_SAMPLING_RATE / MICROSECS(1); *sampling_rate = dbs_tuners_ins.sampling_rate / MICROSECS(1); *up_threshold = dbs_tuners_ins.up_threshold; @@ -102,14 +104,15 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) unsigned int max; unsigned int j; - if (!this_dbs_info->enable) + if ( !this_dbs_info->enable ) return; policy = this_dbs_info->cur_policy; max = policy->max; - if (unlikely(policy->resume)) { - __cpufreq_driver_target(policy, max,CPUFREQ_RELATION_H); + if ( unlikely(policy->resume) ) + { + __cpufreq_driver_target(policy, max, CPUFREQ_RELATION_H); return; } @@ -117,11 +120,12 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) total_ns = cur_ns - this_dbs_info->prev_cpu_wall; this_dbs_info->prev_cpu_wall = NOW(); - if (total_ns < MIN_DBS_INTERVAL) + if ( total_ns < MIN_DBS_INTERVAL ) return; /* Get Idle Time */ - for_each_cpu(j, policy->cpus) { + for_each_cpu (j, policy->cpus) + { uint64_t idle_ns, total_idle_ns; uint64_t load, load_freq, freq_avg; struct cpu_dbs_info_s *j_dbs_info; @@ -131,7 +135,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) idle_ns = total_idle_ns - j_dbs_info->prev_cpu_idle; j_dbs_info->prev_cpu_idle = total_idle_ns; - if (unlikely(total_ns < idle_ns)) + if ( unlikely(total_ns < idle_ns) ) continue; load = 100 * (total_ns - idle_ns) / total_ns; @@ -139,14 +143,15 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) freq_avg = cpufreq_driver_getavg(j, GOV_GETAVG); load_freq = load * freq_avg; - if (load_freq > max_load_freq) + if ( load_freq > max_load_freq ) max_load_freq = load_freq; } /* Check for frequency increase */ - if (max_load_freq > (uint64_t) dbs_tuners_ins.up_threshold * policy->cur) { + if ( max_load_freq > (uint64_t)dbs_tuners_ins.up_threshold * policy->cur ) + { /* if we are already at full speed then break out early */ - if (policy->cur == max) + if ( policy->cur == max ) return; __cpufreq_driver_target(policy, max, CPUFREQ_RELATION_H); return; @@ -154,7 +159,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) /* Check for frequency decrease */ /* if we cannot reduce the frequency anymore, break out early */ - if (policy->cur == policy->min) + if ( policy->cur == policy->min ) return; /* @@ -162,8 +167,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) * can support the current CPU usage without triggering the up * policy. To be safe, we focus 10 points under the threshold. */ - if (max_load_freq - < (uint64_t) (dbs_tuners_ins.up_threshold - 10) * policy->cur) { + if ( max_load_freq < + (uint64_t)(dbs_tuners_ins.up_threshold - 10) * policy->cur ) + { uint64_t freq_next; freq_next = max_load_freq / (dbs_tuners_ins.up_threshold - 10); @@ -176,13 +182,13 @@ static void do_dbs_timer(void *dbs) { struct cpu_dbs_info_s *dbs_info = (struct cpu_dbs_info_s *)dbs; - if (!dbs_info->enable) + if ( !dbs_info->enable ) return; dbs_check_cpu(dbs_info); set_timer(&per_cpu(dbs_timer, dbs_info->cpu), - align_timer(NOW() , dbs_tuners_ins.sampling_rate)); + align_timer(NOW(), dbs_tuners_ins.sampling_rate)); } static void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) @@ -190,12 +196,13 @@ static void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) dbs_info->enable = 1; init_timer(&per_cpu(dbs_timer, dbs_info->cpu), do_dbs_timer, - (void *)dbs_info, dbs_info->cpu); + (void *)dbs_info, dbs_info->cpu); - set_timer(&per_cpu(dbs_timer, dbs_info->cpu), NOW()+dbs_tuners_ins.sampling_rate); + set_timer(&per_cpu(dbs_timer, dbs_info->cpu), + NOW() + dbs_tuners_ins.sampling_rate); - if ( processor_pminfo[dbs_info->cpu]->perf.shared_type - == CPUFREQ_SHARED_TYPE_HW ) + if ( processor_pminfo[dbs_info->cpu]->perf.shared_type == + CPUFREQ_SHARED_TYPE_HW ) { dbs_info->stoppable = 1; } @@ -223,24 +230,27 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) this_dbs_info = &per_cpu(cpu_dbs_info, cpu); - switch (event) { + switch (event) + { case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) + if ( (!cpu_online(cpu)) || (!policy->cur) ) return -EINVAL; - if (policy->cpuinfo.transition_latency > - (TRANSITION_LATENCY_LIMIT * 1000)) { + if ( policy->cpuinfo.transition_latency > + (TRANSITION_LATENCY_LIMIT * 1000) ) + { printk(KERN_WARNING "ondemand governor failed to load " - "due to too long transition latency\n"); + "due to too long transition latency\n"); return -EINVAL; } - if (this_dbs_info->enable) + if ( this_dbs_info->enable ) /* Already enabled */ break; dbs_enable++; - for_each_cpu(j, policy->cpus) { + for_each_cpu (j, policy->cpus) + { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; @@ -253,26 +263,33 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) * Start the timerschedule work, when this governor * is used for first time */ - if ((dbs_enable == 1) && !dbs_tuners_ins.sampling_rate) { - def_sampling_rate = (uint64_t) policy->cpuinfo.transition_latency * - DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; + if ( (dbs_enable == 1) && !dbs_tuners_ins.sampling_rate ) + { + def_sampling_rate = (uint64_t)policy->cpuinfo.transition_latency * + DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; - if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) + if ( def_sampling_rate < MIN_STAT_SAMPLING_RATE ) def_sampling_rate = MIN_STAT_SAMPLING_RATE; - if (!usr_sampling_rate) + if ( !usr_sampling_rate ) dbs_tuners_ins.sampling_rate = def_sampling_rate; - else if (usr_sampling_rate < MIN_SAMPLING_RATE) { - printk(KERN_WARNING "cpufreq/ondemand: " - "specified sampling rate too low, using %"PRIu64"\n", + else if ( usr_sampling_rate < MIN_SAMPLING_RATE ) + { + printk(KERN_WARNING + "cpufreq/ondemand: " + "specified sampling rate too low, using %" PRIu64 "\n", MIN_SAMPLING_RATE); dbs_tuners_ins.sampling_rate = MIN_SAMPLING_RATE; - } else if (usr_sampling_rate > MAX_SAMPLING_RATE) { - printk(KERN_WARNING "cpufreq/ondemand: " - "specified sampling rate too high, using %"PRIu64"\n", + } + else if ( usr_sampling_rate > MAX_SAMPLING_RATE ) + { + printk(KERN_WARNING + "cpufreq/ondemand: " + "specified sampling rate too high, using %" PRIu64 "\n", MAX_SAMPLING_RATE); dbs_tuners_ins.sampling_rate = MAX_SAMPLING_RATE; - } else + } + else dbs_tuners_ins.sampling_rate = usr_sampling_rate; } dbs_timer_init(this_dbs_info); @@ -293,21 +310,23 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) if ( this_dbs_info->cur_policy == NULL ) { printk(KERN_WARNING "CPU%d ondemand governor not started yet," - "unable to GOV_LIMIT\n", cpu); + "unable to GOV_LIMIT\n", + cpu); return -EINVAL; } - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); + if ( policy->max < this_dbs_info->cur_policy->cur ) + __cpufreq_driver_target(this_dbs_info->cur_policy, policy->max, + CPUFREQ_RELATION_H); + else if ( policy->min > this_dbs_info->cur_policy->cur ) + __cpufreq_driver_target(this_dbs_info->cur_policy, policy->min, + CPUFREQ_RELATION_L); break; } return 0; } -static bool_t __init cpufreq_dbs_handle_option(const char *name, const char *val) +static bool_t __init cpufreq_dbs_handle_option(const char *name, + const char *val) { if ( !strcmp(name, "rate") && val ) { @@ -320,14 +339,14 @@ static bool_t __init cpufreq_dbs_handle_option(const char *name, const char *val if ( tmp < MIN_FREQUENCY_UP_THRESHOLD ) { printk(XENLOG_WARNING "cpufreq/ondemand: " - "specified threshold too low, using %d\n", + "specified threshold too low, using %d\n", MIN_FREQUENCY_UP_THRESHOLD); tmp = MIN_FREQUENCY_UP_THRESHOLD; } else if ( tmp > MAX_FREQUENCY_UP_THRESHOLD ) { printk(XENLOG_WARNING "cpufreq/ondemand: " - "specified threshold too high, using %d\n", + "specified threshold too high, using %d\n", MAX_FREQUENCY_UP_THRESHOLD); tmp = MAX_FREQUENCY_UP_THRESHOLD; } @@ -340,7 +359,7 @@ static bool_t __init cpufreq_dbs_handle_option(const char *name, const char *val if ( tmp > 1000 ) { printk(XENLOG_WARNING "cpufreq/ondemand: " - "specified bias too high, using 1000\n"); + "specified bias too high, using 1000\n"); tmp = 1000; } dbs_tuners_ins.powersave_bias = tmp; @@ -350,11 +369,10 @@ static bool_t __init cpufreq_dbs_handle_option(const char *name, const char *val return 1; } -struct cpufreq_governor cpufreq_gov_dbs = { - .name = "ondemand", - .governor = cpufreq_governor_dbs, - .handle_option = cpufreq_dbs_handle_option -}; +struct cpufreq_governor cpufreq_gov_dbs = {.name = "ondemand", + .governor = cpufreq_governor_dbs, + .handle_option = + cpufreq_dbs_handle_option}; static int __init cpufreq_gov_dbs_init(void) { @@ -368,9 +386,9 @@ void cpufreq_dbs_timer_suspend(void) cpu = smp_processor_id(); - if ( per_cpu(cpu_dbs_info,cpu).stoppable ) + if ( per_cpu(cpu_dbs_info, cpu).stoppable ) { - stop_timer( &per_cpu(dbs_timer, cpu) ); + stop_timer(&per_cpu(dbs_timer, cpu)); } } diff --git a/xen/drivers/cpufreq/utility.c b/xen/drivers/cpufreq/utility.c index 15e15fd458..3ae97b5947 100644 --- a/xen/drivers/cpufreq/utility.c +++ b/xen/drivers/cpufreq/utility.c @@ -50,8 +50,8 @@ void cpufreq_residency_update(unsigned int cpu, uint8_t state) total_idle_ns = get_cpu_idle_time(cpu); now = NOW(); - delta = (now - pxpt->prev_state_wall) - - (total_idle_ns - pxpt->prev_idle_wall); + delta = + (now - pxpt->prev_state_wall) - (total_idle_ns - pxpt->prev_idle_wall); if ( likely(delta >= 0) ) pxpt->u.pt[state].residency += delta; @@ -64,13 +64,13 @@ void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to) { struct pm_px *pxpt; struct processor_pminfo *pmpt = processor_pminfo[cpu]; - spinlock_t *cpufreq_statistic_lock = - &per_cpu(cpufreq_statistic_lock, cpu); + spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu); spin_lock(cpufreq_statistic_lock); pxpt = per_cpu(cpufreq_statistic_data, cpu); - if ( !pxpt || !pmpt ) { + if ( !pxpt || !pmpt ) + { spin_unlock(cpufreq_statistic_lock); return; } @@ -91,8 +91,8 @@ int cpufreq_statistic_init(unsigned int cpuid) uint32_t i, count; struct pm_px *pxpt; const struct processor_pminfo *pmpt = processor_pminfo[cpuid]; - spinlock_t *cpufreq_statistic_lock = - &per_cpu(cpufreq_statistic_lock, cpuid); + spinlock_t *cpufreq_statistic_lock = + &per_cpu(cpufreq_statistic_lock, cpuid); spin_lock_init(cpufreq_statistic_lock); @@ -102,7 +102,8 @@ int cpufreq_statistic_init(unsigned int cpuid) spin_lock(cpufreq_statistic_lock); pxpt = per_cpu(cpufreq_statistic_data, cpuid); - if ( pxpt ) { + if ( pxpt ) + { spin_unlock(cpufreq_statistic_lock); return 0; } @@ -110,21 +111,24 @@ int cpufreq_statistic_init(unsigned int cpuid) count = pmpt->perf.state_count; pxpt = xzalloc(struct pm_px); - if ( !pxpt ) { + if ( !pxpt ) + { spin_unlock(cpufreq_statistic_lock); return -ENOMEM; } per_cpu(cpufreq_statistic_data, cpuid) = pxpt; pxpt->u.trans_pt = xzalloc_array(uint64_t, count * count); - if (!pxpt->u.trans_pt) { + if ( !pxpt->u.trans_pt ) + { xfree(pxpt); spin_unlock(cpufreq_statistic_lock); return -ENOMEM; } pxpt->u.pt = xzalloc_array(struct pm_px_val, count); - if (!pxpt->u.pt) { + if ( !pxpt->u.pt ) + { xfree(pxpt->u.trans_pt); xfree(pxpt); spin_unlock(cpufreq_statistic_lock); @@ -134,7 +138,7 @@ int cpufreq_statistic_init(unsigned int cpuid) pxpt->u.total = pmpt->perf.state_count; pxpt->u.usable = pmpt->perf.state_count - pmpt->perf.platform_limit; - for (i=0; i < pmpt->perf.state_count; i++) + for ( i = 0; i < pmpt->perf.state_count; i++ ) pxpt->u.pt[i].freq = pmpt->perf.states[i].core_frequency; pxpt->prev_state_wall = NOW(); @@ -148,13 +152,14 @@ int cpufreq_statistic_init(unsigned int cpuid) void cpufreq_statistic_exit(unsigned int cpuid) { struct pm_px *pxpt; - spinlock_t *cpufreq_statistic_lock = - &per_cpu(cpufreq_statistic_lock, cpuid); + spinlock_t *cpufreq_statistic_lock = + &per_cpu(cpufreq_statistic_lock, cpuid); spin_lock(cpufreq_statistic_lock); pxpt = per_cpu(cpufreq_statistic_data, cpuid); - if (!pxpt) { + if ( !pxpt ) + { spin_unlock(cpufreq_statistic_lock); return; } @@ -172,25 +177,27 @@ void cpufreq_statistic_reset(unsigned int cpuid) uint32_t i, j, count; struct pm_px *pxpt; const struct processor_pminfo *pmpt = processor_pminfo[cpuid]; - spinlock_t *cpufreq_statistic_lock = - &per_cpu(cpufreq_statistic_lock, cpuid); + spinlock_t *cpufreq_statistic_lock = + &per_cpu(cpufreq_statistic_lock, cpuid); spin_lock(cpufreq_statistic_lock); pxpt = per_cpu(cpufreq_statistic_data, cpuid); - if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt ) { + if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt ) + { spin_unlock(cpufreq_statistic_lock); return; } count = pmpt->perf.state_count; - for (i=0; i < count; i++) { + for ( i = 0; i < count; i++ ) + { pxpt->u.pt[i].residency = 0; pxpt->u.pt[i].count = 0; - for (j=0; j < count; j++) - *(pxpt->u.trans_pt + i*count + j) = 0; + for ( j = 0; j < count; j++ ) + *(pxpt->u.trans_pt + i * count + j) = 0; } pxpt->prev_state_wall = NOW(); @@ -199,7 +206,6 @@ void cpufreq_statistic_reset(unsigned int cpuid) spin_unlock(cpufreq_statistic_lock); } - /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ @@ -212,33 +218,35 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, unsigned int second_max_freq = 0; unsigned int i; - for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { + for ( i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++ ) + { unsigned int freq = table[i].frequency; - if (freq == CPUFREQ_ENTRY_INVALID) + if ( freq == CPUFREQ_ENTRY_INVALID ) continue; - if (freq < min_freq) + if ( freq < min_freq ) min_freq = freq; - if (freq > max_freq) + if ( freq > max_freq ) max_freq = freq; } - for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { + for ( i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++ ) + { unsigned int freq = table[i].frequency; - if (freq == CPUFREQ_ENTRY_INVALID || freq == max_freq) + if ( freq == CPUFREQ_ENTRY_INVALID || freq == max_freq ) continue; - if (freq > second_max_freq) + if ( freq > second_max_freq ) second_max_freq = freq; } - if (second_max_freq == 0) + if ( second_max_freq == 0 ) second_max_freq = max_freq; - if (cpufreq_verbose) - printk("max_freq: %u second_max_freq: %u\n", - max_freq, second_max_freq); + if ( cpufreq_verbose ) + printk("max_freq: %u second_max_freq: %u\n", max_freq, + second_max_freq); policy->min = policy->cpuinfo.min_freq = min_freq; policy->max = policy->cpuinfo.max_freq = max_freq; policy->cpuinfo.second_max_freq = second_max_freq; - if (policy->min == ~0) + if ( policy->min == ~0 ) return -EINVAL; else return 0; @@ -251,23 +259,24 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, unsigned int i; unsigned int count = 0; - if (!cpu_online(policy->cpu)) + if ( !cpu_online(policy->cpu) ) return -EINVAL; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); - for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { + for ( i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++ ) + { unsigned int freq = table[i].frequency; - if (freq == CPUFREQ_ENTRY_INVALID) + if ( freq == CPUFREQ_ENTRY_INVALID ) continue; - if ((freq >= policy->min) && (freq <= policy->max)) + if ( (freq >= policy->min) && (freq <= policy->max) ) count++; - else if ((next_larger > freq) && (freq > policy->max)) + else if ( (next_larger > freq) && (freq > policy->max) ) next_larger = freq; } - if (!count) + if ( !count ) policy->max = next_larger; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, @@ -279,8 +288,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, int cpufreq_frequency_table_target(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table, unsigned int target_freq, - unsigned int relation, - unsigned int *index) + unsigned int relation, unsigned int *index) { struct cpufreq_frequency_table optimal = { .index = ~0, @@ -292,7 +300,8 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, }; unsigned int i; - switch (relation) { + switch (relation) + { case CPUFREQ_RELATION_H: suboptimal.frequency = ~0; break; @@ -301,37 +310,49 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, break; } - if (!cpu_online(policy->cpu)) + if ( !cpu_online(policy->cpu) ) return -EINVAL; - for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { + for ( i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++ ) + { unsigned int freq = table[i].frequency; - if (freq == CPUFREQ_ENTRY_INVALID) + if ( freq == CPUFREQ_ENTRY_INVALID ) continue; - if ((freq < policy->min) || (freq > policy->max)) + if ( (freq < policy->min) || (freq > policy->max) ) continue; - switch(relation) { + switch (relation) + { case CPUFREQ_RELATION_H: - if (freq <= target_freq) { - if (freq >= optimal.frequency) { + if ( freq <= target_freq ) + { + if ( freq >= optimal.frequency ) + { optimal.frequency = freq; optimal.index = i; } - } else { - if (freq <= suboptimal.frequency) { + } + else + { + if ( freq <= suboptimal.frequency ) + { suboptimal.frequency = freq; suboptimal.index = i; } } break; case CPUFREQ_RELATION_L: - if (freq >= target_freq) { - if (freq <= optimal.frequency) { + if ( freq >= target_freq ) + { + if ( freq <= optimal.frequency ) + { optimal.frequency = freq; optimal.index = i; } - } else { - if (freq >= suboptimal.frequency) { + } + else + { + if ( freq >= suboptimal.frequency ) + { suboptimal.frequency = freq; suboptimal.index = i; } @@ -339,34 +360,34 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, break; } } - if (optimal.index > i) { - if (suboptimal.index > i) + if ( optimal.index > i ) + { + if ( suboptimal.index > i ) return -EINVAL; *index = suboptimal.index; - } else + } + else *index = optimal.index; return 0; } - /********************************************************************* * GOVERNORS * *********************************************************************/ int __cpufreq_driver_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int target_freq, unsigned int relation) { int retval = -EINVAL; - if (cpu_online(policy->cpu) && cpufreq_driver.target) + if ( cpu_online(policy->cpu) && cpufreq_driver.target ) { unsigned int prev_freq = policy->cur; retval = cpufreq_driver.target(policy, target_freq, relation); if ( retval == 0 ) - TRACE_2D(TRC_PM_FREQ_CHANGE, prev_freq/1000, policy->cur/1000); + TRACE_2D(TRC_PM_FREQ_CHANGE, prev_freq / 1000, policy->cur / 1000); } return retval; @@ -377,13 +398,13 @@ int cpufreq_driver_getavg(unsigned int cpu, unsigned int flag) struct cpufreq_policy *policy; int freq_avg; - if (!cpu_online(cpu) || !(policy = per_cpu(cpufreq_cpu_policy, cpu))) + if ( !cpu_online(cpu) || !(policy = per_cpu(cpufreq_cpu_policy, cpu)) ) return 0; - if (cpufreq_driver.getavg) + if ( cpufreq_driver.getavg ) { freq_avg = cpufreq_driver.getavg(cpu, flag); - if (freq_avg > 0) + if ( freq_avg > 0 ) return freq_avg; } @@ -396,33 +417,32 @@ int cpufreq_update_turbo(int cpuid, int new_state) int curr_state; int ret = 0; - if (new_state != CPUFREQ_TURBO_ENABLED && - new_state != CPUFREQ_TURBO_DISABLED) + if ( new_state != CPUFREQ_TURBO_ENABLED && + new_state != CPUFREQ_TURBO_DISABLED ) return -EINVAL; policy = per_cpu(cpufreq_cpu_policy, cpuid); - if (!policy) + if ( !policy ) return -EACCES; - if (policy->turbo == CPUFREQ_TURBO_UNSUPPORTED) + if ( policy->turbo == CPUFREQ_TURBO_UNSUPPORTED ) return -EOPNOTSUPP; curr_state = policy->turbo; - if (curr_state == new_state) + if ( curr_state == new_state ) return 0; policy->turbo = new_state; - if (cpufreq_driver.update) + if ( cpufreq_driver.update ) { ret = cpufreq_driver.update(cpuid, policy); - if (ret) + if ( ret ) policy->turbo = curr_state; } return ret; } - int cpufreq_get_turbo_status(int cpuid) { struct cpufreq_policy *policy; @@ -440,46 +460,49 @@ int cpufreq_get_turbo_status(int cpuid) * policy : policy to be set. */ int __cpufreq_set_policy(struct cpufreq_policy *data, - struct cpufreq_policy *policy) + struct cpufreq_policy *policy) { int ret = 0; memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); - if (policy->min > data->min && policy->min > policy->max) + if ( policy->min > data->min && policy->min > policy->max ) return -EINVAL; /* verify the cpu speed can be set within this limit */ ret = cpufreq_driver.verify(policy); - if (ret) + if ( ret ) return ret; data->min = policy->min; data->max = policy->max; data->limits = policy->limits; - if (cpufreq_driver.setpolicy) + if ( cpufreq_driver.setpolicy ) return cpufreq_driver.setpolicy(data); - if (policy->governor != data->governor) { + if ( policy->governor != data->governor ) + { /* save old, working values */ struct cpufreq_governor *old_gov = data->governor; /* end old governor */ - if (data->governor) + if ( data->governor ) __cpufreq_governor(data, CPUFREQ_GOV_STOP); /* start new governor */ data->governor = policy->governor; - if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { + if ( __cpufreq_governor(data, CPUFREQ_GOV_START) ) + { printk(KERN_WARNING "Fail change to %s governor\n", - data->governor->name); + data->governor->name); /* new governor failed, so re-start old one */ data->governor = old_gov; - if (old_gov) { + if ( old_gov ) + { __cpufreq_governor(data, CPUFREQ_GOV_START); printk(KERN_WARNING "Still stay at %s governor\n", - data->governor->name); + data->governor->name); } return -EINVAL; } diff --git a/xen/drivers/passthrough/amd/iommu_acpi.c b/xen/drivers/passthrough/amd/iommu_acpi.c index 64d10481d7..871e6953bb 100644 --- a/xen/drivers/passthrough/amd/iommu_acpi.c +++ b/xen/drivers/passthrough/amd/iommu_acpi.c @@ -26,38 +26,41 @@ /* Some helper structures, particularly to deal with ranges. */ -struct acpi_ivhd_device_range { - struct acpi_ivrs_device4 start; - struct acpi_ivrs_device4 end; +struct acpi_ivhd_device_range +{ + struct acpi_ivrs_device4 start; + struct acpi_ivrs_device4 end; }; -struct acpi_ivhd_device_alias_range { - struct acpi_ivrs_device8a alias; - struct acpi_ivrs_device4 end; +struct acpi_ivhd_device_alias_range +{ + struct acpi_ivrs_device8a alias; + struct acpi_ivrs_device4 end; }; -struct acpi_ivhd_device_extended_range { - struct acpi_ivrs_device8b extended; - struct acpi_ivrs_device4 end; +struct acpi_ivhd_device_extended_range +{ + struct acpi_ivrs_device8b extended; + struct acpi_ivrs_device4 end; }; union acpi_ivhd_device { - struct acpi_ivrs_de_header header; - struct acpi_ivrs_device4 select; - struct acpi_ivhd_device_range range; - struct acpi_ivrs_device8a alias; - struct acpi_ivhd_device_alias_range alias_range; - struct acpi_ivrs_device8b extended; - struct acpi_ivhd_device_extended_range extended_range; - struct acpi_ivrs_device8c special; + struct acpi_ivrs_de_header header; + struct acpi_ivrs_device4 select; + struct acpi_ivhd_device_range range; + struct acpi_ivrs_device8a alias; + struct acpi_ivhd_device_alias_range alias_range; + struct acpi_ivrs_device8b extended; + struct acpi_ivhd_device_extended_range extended_range; + struct acpi_ivrs_device8c special; }; -static void __init add_ivrs_mapping_entry( - u16 bdf, u16 alias_id, u8 flags, struct amd_iommu *iommu) +static void __init add_ivrs_mapping_entry(u16 bdf, u16 alias_id, u8 flags, + struct amd_iommu *iommu) { struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(iommu->seg); - ASSERT( ivrs_mappings != NULL ); + ASSERT(ivrs_mappings != NULL); /* setup requestor id */ ivrs_mappings[bdf].dte_requestor_id = alias_id; @@ -65,32 +68,32 @@ static void __init add_ivrs_mapping_entry( /* override flags for range of devices */ ivrs_mappings[bdf].device_flags = flags; - if (ivrs_mappings[alias_id].intremap_table == NULL ) + if ( ivrs_mappings[alias_id].intremap_table == NULL ) { - /* allocate per-device interrupt remapping table */ - if ( amd_iommu_perdev_intremap ) - ivrs_mappings[alias_id].intremap_table = + /* allocate per-device interrupt remapping table */ + if ( amd_iommu_perdev_intremap ) + ivrs_mappings[alias_id].intremap_table = amd_iommu_alloc_intremap_table( &ivrs_mappings[alias_id].intremap_inuse); - else - { - if ( shared_intremap_table == NULL ) - shared_intremap_table = amd_iommu_alloc_intremap_table( - &shared_intremap_inuse); - ivrs_mappings[alias_id].intremap_table = shared_intremap_table; - ivrs_mappings[alias_id].intremap_inuse = shared_intremap_inuse; - } + else + { + if ( shared_intremap_table == NULL ) + shared_intremap_table = + amd_iommu_alloc_intremap_table(&shared_intremap_inuse); + ivrs_mappings[alias_id].intremap_table = shared_intremap_table; + ivrs_mappings[alias_id].intremap_inuse = shared_intremap_inuse; + } } /* assgin iommu hardware */ ivrs_mappings[bdf].iommu = iommu; } -static struct amd_iommu * __init find_iommu_from_bdf_cap( - u16 seg, u16 bdf, u16 cap_offset) +static struct amd_iommu *__init find_iommu_from_bdf_cap(u16 seg, u16 bdf, + u16 cap_offset) { struct amd_iommu *iommu; - for_each_amd_iommu ( iommu ) + for_each_amd_iommu (iommu) if ( (iommu->seg == seg) && (iommu->bdf == bdf) && (iommu->cap_offset == cap_offset) ) return iommu; @@ -98,8 +101,8 @@ static struct amd_iommu * __init find_iommu_from_bdf_cap( return NULL; } -static void __init reserve_iommu_exclusion_range( - struct amd_iommu *iommu, uint64_t base, uint64_t limit) +static void __init reserve_iommu_exclusion_range(struct amd_iommu *iommu, + uint64_t base, uint64_t limit) { /* need to extend exclusion range? */ if ( iommu->exclusion_enable ) @@ -115,17 +118,18 @@ static void __init reserve_iommu_exclusion_range( iommu->exclusion_limit = limit; } -static void __init reserve_iommu_exclusion_range_all( - struct amd_iommu *iommu, - unsigned long base, unsigned long limit) +static void __init reserve_iommu_exclusion_range_all(struct amd_iommu *iommu, + unsigned long base, + unsigned long limit) { reserve_iommu_exclusion_range(iommu, base, limit); iommu->exclusion_allow_all = IOMMU_CONTROL_ENABLED; } -static void __init reserve_unity_map_for_device( - u16 seg, u16 bdf, unsigned long base, - unsigned long length, u8 iw, u8 ir) +static void __init reserve_unity_map_for_device(u16 seg, u16 bdf, + unsigned long base, + unsigned long length, u8 iw, + u8 ir) { struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg); unsigned long old_top, new_top; @@ -134,7 +138,7 @@ static void __init reserve_unity_map_for_device( if ( ivrs_mappings[bdf].unity_map_enable ) { old_top = ivrs_mappings[bdf].addr_range_start + - ivrs_mappings[bdf].addr_range_length; + ivrs_mappings[bdf].addr_range_length; new_top = base + length; if ( old_top > new_top ) new_top = old_top; @@ -151,8 +155,9 @@ static void __init reserve_unity_map_for_device( ivrs_mappings[bdf].addr_range_length = length; } -static int __init register_exclusion_range_for_all_devices( - unsigned long base, unsigned long limit, u8 iw, u8 ir) +static int __init register_exclusion_range_for_all_devices(unsigned long base, + unsigned long limit, + u8 iw, u8 ir) { int seg = 0; /* XXX */ unsigned long range_top, iommu_top, length; @@ -178,15 +183,17 @@ static int __init register_exclusion_range_for_all_devices( /* register IOMMU exclusion range settings */ if ( limit >= iommu_top ) { - for_each_amd_iommu( iommu ) + for_each_amd_iommu (iommu) reserve_iommu_exclusion_range_all(iommu, base, limit); } return 0; } -static int __init register_exclusion_range_for_device( - u16 bdf, unsigned long base, unsigned long limit, u8 iw, u8 ir) +static int __init register_exclusion_range_for_device(u16 bdf, + unsigned long base, + unsigned long limit, + u8 iw, u8 ir) { int seg = 0; /* XXX */ struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg); @@ -220,7 +227,7 @@ static int __init register_exclusion_range_for_device( } /* register IOMMU exclusion range settings for device */ - if ( limit >= iommu_top ) + if ( limit >= iommu_top ) { reserve_iommu_exclusion_range(iommu, base, limit); ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_ENABLED; @@ -231,8 +238,8 @@ static int __init register_exclusion_range_for_device( } static int __init register_exclusion_range_for_iommu_devices( - struct amd_iommu *iommu, - unsigned long base, unsigned long limit, u8 iw, u8 ir) + struct amd_iommu *iommu, unsigned long base, unsigned long limit, u8 iw, + u8 ir) { unsigned long range_top, iommu_top, length; unsigned int bdf; @@ -253,11 +260,11 @@ static int __init register_exclusion_range_for_iommu_devices( { if ( iommu == find_iommu_for_device(iommu->seg, bdf) ) { - reserve_unity_map_for_device(iommu->seg, bdf, base, length, - iw, ir); + reserve_unity_map_for_device(iommu->seg, bdf, base, length, iw, + ir); req = get_ivrs_mappings(iommu->seg)[bdf].dte_requestor_id; - reserve_unity_map_for_device(iommu->seg, req, base, length, - iw, ir); + reserve_unity_map_for_device(iommu->seg, req, base, length, iw, + ir); } } @@ -271,9 +278,9 @@ static int __init register_exclusion_range_for_iommu_devices( return 0; } -static int __init parse_ivmd_device_select( - const struct acpi_ivrs_memory *ivmd_block, - unsigned long base, unsigned long limit, u8 iw, u8 ir) +static int __init +parse_ivmd_device_select(const struct acpi_ivrs_memory *ivmd_block, + unsigned long base, unsigned long limit, u8 iw, u8 ir) { u16 bdf; @@ -287,9 +294,9 @@ static int __init parse_ivmd_device_select( return register_exclusion_range_for_device(bdf, base, limit, iw, ir); } -static int __init parse_ivmd_device_range( - const struct acpi_ivrs_memory *ivmd_block, - unsigned long base, unsigned long limit, u8 iw, u8 ir) +static int __init +parse_ivmd_device_range(const struct acpi_ivrs_memory *ivmd_block, + unsigned long base, unsigned long limit, u8 iw, u8 ir) { unsigned int first_bdf, last_bdf, bdf; int error; @@ -298,7 +305,8 @@ static int __init parse_ivmd_device_range( if ( first_bdf >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG("IVMD Error: " - "Invalid Range_First Dev_Id %#x\n", first_bdf); + "Invalid Range_First Dev_Id %#x\n", + first_bdf); return -ENODEV; } @@ -306,20 +314,20 @@ static int __init parse_ivmd_device_range( if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) { AMD_IOMMU_DEBUG("IVMD Error: " - "Invalid Range_Last Dev_Id %#x\n", last_bdf); + "Invalid Range_Last Dev_Id %#x\n", + last_bdf); return -ENODEV; } for ( bdf = first_bdf, error = 0; (bdf <= last_bdf) && !error; bdf++ ) - error = register_exclusion_range_for_device( - bdf, base, limit, iw, ir); + error = register_exclusion_range_for_device(bdf, base, limit, iw, ir); return error; } -static int __init parse_ivmd_device_iommu( - const struct acpi_ivrs_memory *ivmd_block, - unsigned long base, unsigned long limit, u8 iw, u8 ir) +static int __init +parse_ivmd_device_iommu(const struct acpi_ivrs_memory *ivmd_block, + unsigned long base, unsigned long limit, u8 iw, u8 ir) { int seg = 0; /* XXX */ struct amd_iommu *iommu; @@ -334,8 +342,8 @@ static int __init parse_ivmd_device_iommu( return -ENODEV; } - return register_exclusion_range_for_iommu_devices( - iommu, base, limit, iw, ir); + return register_exclusion_range_for_iommu_devices(iommu, base, limit, iw, + ir); } static int __init parse_ivmd_block(const struct acpi_ivrs_memory *ivmd_block) @@ -361,10 +369,11 @@ static int __init parse_ivmd_block(const struct acpi_ivrs_memory *ivmd_block) iw = ir = IOMMU_CONTROL_ENABLED; else if ( ivmd_block->header.flags & ACPI_IVMD_UNITY ) { - iw = ivmd_block->header.flags & ACPI_IVMD_READ ? - IOMMU_CONTROL_ENABLED : IOMMU_CONTROL_DISABLED; - ir = ivmd_block->header.flags & ACPI_IVMD_WRITE ? - IOMMU_CONTROL_ENABLED : IOMMU_CONTROL_DISABLED; + iw = ivmd_block->header.flags & ACPI_IVMD_READ ? IOMMU_CONTROL_ENABLED + : IOMMU_CONTROL_DISABLED; + ir = ivmd_block->header.flags & ACPI_IVMD_WRITE + ? IOMMU_CONTROL_ENABLED + : IOMMU_CONTROL_DISABLED; } else { @@ -372,23 +381,19 @@ static int __init parse_ivmd_block(const struct acpi_ivrs_memory *ivmd_block) return -ENODEV; } - switch( ivmd_block->header.type ) + switch (ivmd_block->header.type) { case ACPI_IVRS_TYPE_MEMORY_ALL: - return register_exclusion_range_for_all_devices( - base, limit, iw, ir); + return register_exclusion_range_for_all_devices(base, limit, iw, ir); case ACPI_IVRS_TYPE_MEMORY_ONE: - return parse_ivmd_device_select(ivmd_block, - base, limit, iw, ir); + return parse_ivmd_device_select(ivmd_block, base, limit, iw, ir); case ACPI_IVRS_TYPE_MEMORY_RANGE: - return parse_ivmd_device_range(ivmd_block, - base, limit, iw, ir); + return parse_ivmd_device_range(ivmd_block, base, limit, iw, ir); case ACPI_IVRS_TYPE_MEMORY_IOMMU: - return parse_ivmd_device_iommu(ivmd_block, - base, limit, iw, ir); + return parse_ivmd_device_iommu(ivmd_block, base, limit, iw, ir); default: AMD_IOMMU_DEBUG("IVMD Error: Invalid Block Type!\n"); @@ -396,8 +401,8 @@ static int __init parse_ivmd_block(const struct acpi_ivrs_memory *ivmd_block) } } -static u16 __init parse_ivhd_device_padding( - u16 pad_length, u16 header_length, u16 block_length) +static u16 __init parse_ivhd_device_padding(u16 pad_length, u16 header_length, + u16 block_length) { if ( header_length < (block_length + pad_length) ) { @@ -426,8 +431,8 @@ static u16 __init parse_ivhd_device_select( } static u16 __init parse_ivhd_device_range( - const struct acpi_ivhd_device_range *range, - u16 header_length, u16 block_length, struct amd_iommu *iommu) + const struct acpi_ivhd_device_range *range, u16 header_length, + u16 block_length, struct amd_iommu *iommu) { unsigned int dev_length, first_bdf, last_bdf, bdf; @@ -450,7 +455,8 @@ static u16 __init parse_ivhd_device_range( if ( first_bdf >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG("IVHD Error: " - "Invalid Range: First Dev_Id %#x\n", first_bdf); + "Invalid Range: First Dev_Id %#x\n", + first_bdf); return 0; } @@ -458,7 +464,8 @@ static u16 __init parse_ivhd_device_range( if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) { AMD_IOMMU_DEBUG("IVHD Error: " - "Invalid Range: Last Dev_Id %#x\n", last_bdf); + "Invalid Range: Last Dev_Id %#x\n", + last_bdf); return 0; } @@ -472,8 +479,8 @@ static u16 __init parse_ivhd_device_range( } static u16 __init parse_ivhd_device_alias( - const struct acpi_ivrs_device8a *alias, - u16 header_length, u16 block_length, struct amd_iommu *iommu) + const struct acpi_ivrs_device8a *alias, u16 header_length, u16 block_length, + struct amd_iommu *iommu) { u16 dev_length, alias_id, bdf; @@ -506,10 +513,9 @@ static u16 __init parse_ivhd_device_alias( } static u16 __init parse_ivhd_device_alias_range( - const struct acpi_ivhd_device_alias_range *range, - u16 header_length, u16 block_length, struct amd_iommu *iommu) + const struct acpi_ivhd_device_alias_range *range, u16 header_length, + u16 block_length, struct amd_iommu *iommu) { - unsigned int dev_length, first_bdf, last_bdf, alias_id, bdf; dev_length = sizeof(*range); @@ -531,15 +537,16 @@ static u16 __init parse_ivhd_device_alias_range( if ( first_bdf >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG("IVHD Error: " - "Invalid Range: First Dev_Id %#x\n", first_bdf); + "Invalid Range: First Dev_Id %#x\n", + first_bdf); return 0; } last_bdf = range->end.header.id; if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf ) { - AMD_IOMMU_DEBUG( - "IVHD Error: Invalid Range: Last Dev_Id %#x\n", last_bdf); + AMD_IOMMU_DEBUG("IVHD Error: Invalid Range: Last Dev_Id %#x\n", + last_bdf); return 0; } @@ -550,8 +557,8 @@ static u16 __init parse_ivhd_device_alias_range( return 0; } - AMD_IOMMU_DEBUG(" Dev_Id Range: %#x -> %#x alias %#x\n", - first_bdf, last_bdf, alias_id); + AMD_IOMMU_DEBUG(" Dev_Id Range: %#x -> %#x alias %#x\n", first_bdf, + last_bdf, alias_id); for ( bdf = first_bdf; bdf <= last_bdf; bdf++ ) add_ivrs_mapping_entry(bdf, alias_id, range->alias.header.data_setting, @@ -561,8 +568,8 @@ static u16 __init parse_ivhd_device_alias_range( } static u16 __init parse_ivhd_device_extended( - const struct acpi_ivrs_device8b *ext, - u16 header_length, u16 block_length, struct amd_iommu *iommu) + const struct acpi_ivrs_device8b *ext, u16 header_length, u16 block_length, + struct amd_iommu *iommu) { u16 dev_length, bdf; @@ -586,8 +593,8 @@ static u16 __init parse_ivhd_device_extended( } static u16 __init parse_ivhd_device_extended_range( - const struct acpi_ivhd_device_extended_range *range, - u16 header_length, u16 block_length, struct amd_iommu *iommu) + const struct acpi_ivhd_device_extended_range *range, u16 header_length, + u16 block_length, struct amd_iommu *iommu) { unsigned int dev_length, first_bdf, last_bdf, bdf; @@ -610,7 +617,8 @@ static u16 __init parse_ivhd_device_extended_range( if ( first_bdf >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG("IVHD Error: " - "Invalid Range: First Dev_Id %#x\n", first_bdf); + "Invalid Range: First Dev_Id %#x\n", + first_bdf); return 0; } @@ -618,12 +626,12 @@ static u16 __init parse_ivhd_device_extended_range( if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) { AMD_IOMMU_DEBUG("IVHD Error: " - "Invalid Range: Last Dev_Id %#x\n", last_bdf); + "Invalid Range: Last Dev_Id %#x\n", + last_bdf); return 0; } - AMD_IOMMU_DEBUG(" Dev_Id Range: %#x -> %#x\n", - first_bdf, last_bdf); + AMD_IOMMU_DEBUG(" Dev_Id Range: %#x -> %#x\n", first_bdf, last_bdf); for ( bdf = first_bdf; bdf <= last_bdf; bdf++ ) add_ivrs_mapping_entry(bdf, bdf, range->extended.header.data_setting, @@ -697,8 +705,8 @@ static int __init parse_ivrs_hpet(const char *str) custom_param("ivrs_hpet[", parse_ivrs_hpet); static u16 __init parse_ivhd_device_special( - const struct acpi_ivrs_device8c *special, u16 seg, - u16 header_length, u16 block_length, struct amd_iommu *iommu) + const struct acpi_ivrs_device8c *special, u16 seg, u16 header_length, + u16 block_length, struct amd_iommu *iommu) { u16 dev_length, bdf; unsigned int apic, idx; @@ -722,7 +730,7 @@ static u16 __init parse_ivhd_device_special( special->variety, special->handle); add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, iommu); - switch ( special->variety ) + switch (special->variety) { case ACPI_IVHD_IOAPIC: if ( !iommu_intremap ) @@ -734,17 +742,17 @@ static u16 __init parse_ivhd_device_special( */ for ( idx = 0; idx < nr_ioapic_sbdf; idx++ ) { - if ( ioapic_sbdf[idx].bdf == bdf && - ioapic_sbdf[idx].seg == seg && + if ( ioapic_sbdf[idx].bdf == bdf && ioapic_sbdf[idx].seg == seg && ioapic_sbdf[idx].cmdline ) break; } if ( idx < nr_ioapic_sbdf ) { - AMD_IOMMU_DEBUG("IVHD: Command line override present for IO-APIC %#x" - "(IVRS: %#x devID %04x:%02x:%02x.%u)\n", - ioapic_sbdf[idx].id, special->handle, seg, - PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf)); + AMD_IOMMU_DEBUG( + "IVHD: Command line override present for IO-APIC %#x" + "(IVRS: %#x devID %04x:%02x:%02x.%u)\n", + ioapic_sbdf[idx].id, special->handle, seg, PCI_BUS(bdf), + PCI_SLOT(bdf), PCI_FUNC(bdf)); break; } @@ -755,17 +763,20 @@ static u16 __init parse_ivhd_device_special( idx = ioapic_id_to_index(special->handle); if ( idx != MAX_IO_APICS && ioapic_sbdf[idx].cmdline ) - AMD_IOMMU_DEBUG("IVHD: Command line override present for IO-APIC %#x\n", - special->handle); + AMD_IOMMU_DEBUG( + "IVHD: Command line override present for IO-APIC %#x\n", + special->handle); else if ( idx != MAX_IO_APICS && ioapic_sbdf[idx].pin_2_idx ) { if ( ioapic_sbdf[idx].bdf == bdf && ioapic_sbdf[idx].seg == seg ) - AMD_IOMMU_DEBUG("IVHD Warning: Duplicate IO-APIC %#x entries\n", - special->handle); + AMD_IOMMU_DEBUG( + "IVHD Warning: Duplicate IO-APIC %#x entries\n", + special->handle); else { - printk(XENLOG_ERR "IVHD Error: Conflicting IO-APIC %#x entries\n", + printk(XENLOG_ERR + "IVHD Error: Conflicting IO-APIC %#x entries\n", special->handle); if ( amd_iommu_perdev_intremap ) return 0; @@ -785,17 +796,16 @@ static u16 __init parse_ivhd_device_special( ioapic_sbdf[idx].seg = seg; ioapic_sbdf[idx].id = special->handle; - ioapic_sbdf[idx].pin_2_idx = xmalloc_array( - u16, nr_ioapic_entries[apic]); - if ( nr_ioapic_entries[apic] && - !ioapic_sbdf[idx].pin_2_idx ) + ioapic_sbdf[idx].pin_2_idx = + xmalloc_array(u16, nr_ioapic_entries[apic]); + if ( nr_ioapic_entries[apic] && !ioapic_sbdf[idx].pin_2_idx ) { printk(XENLOG_ERR "IVHD Error: Out of memory\n"); return 0; } memset(ioapic_sbdf[idx].pin_2_idx, -1, nr_ioapic_entries[apic] * - sizeof(*ioapic_sbdf->pin_2_idx)); + sizeof(*ioapic_sbdf->pin_2_idx)); } break; } @@ -842,7 +852,7 @@ static u16 __init parse_ivhd_device_special( static inline size_t get_ivhd_header_size(const struct acpi_ivrs_hardware *ivhd_block) { - switch ( ivhd_block->header.type ) + switch (ivhd_block->header.type) { case ACPI_IVRS_TYPE_HARDWARE: return offsetof(struct acpi_ivrs_hardware, efr_image); @@ -856,7 +866,7 @@ static int __init parse_ivhd_block(const struct acpi_ivrs_hardware *ivhd_block) { const union acpi_ivhd_device *ivhd_device; u16 block_length, dev_length; - size_t hdr_size = get_ivhd_header_size(ivhd_block) ; + size_t hdr_size = get_ivhd_header_size(ivhd_block); struct amd_iommu *iommu; if ( ivhd_block->header.length < hdr_size ) @@ -887,45 +897,43 @@ static int __init parse_ivhd_block(const struct acpi_ivrs_hardware *ivhd_block) ivhd_device->header.type, ivhd_device->header.id, ivhd_device->header.data_setting); - switch ( ivhd_device->header.type ) + switch (ivhd_device->header.type) { case ACPI_IVRS_TYPE_PAD4: dev_length = parse_ivhd_device_padding( - sizeof(u32), - ivhd_block->header.length, block_length); + sizeof(u32), ivhd_block->header.length, block_length); break; case ACPI_IVRS_TYPE_PAD8: dev_length = parse_ivhd_device_padding( - sizeof(u64), - ivhd_block->header.length, block_length); + sizeof(u64), ivhd_block->header.length, block_length); break; case ACPI_IVRS_TYPE_SELECT: dev_length = parse_ivhd_device_select(&ivhd_device->select, iommu); break; case ACPI_IVRS_TYPE_START: - dev_length = parse_ivhd_device_range( - &ivhd_device->range, - ivhd_block->header.length, block_length, iommu); + dev_length = parse_ivhd_device_range(&ivhd_device->range, + ivhd_block->header.length, + block_length, iommu); break; case ACPI_IVRS_TYPE_ALIAS_SELECT: - dev_length = parse_ivhd_device_alias( - &ivhd_device->alias, - ivhd_block->header.length, block_length, iommu); + dev_length = parse_ivhd_device_alias(&ivhd_device->alias, + ivhd_block->header.length, + block_length, iommu); break; case ACPI_IVRS_TYPE_ALIAS_START: dev_length = parse_ivhd_device_alias_range( - &ivhd_device->alias_range, - ivhd_block->header.length, block_length, iommu); + &ivhd_device->alias_range, ivhd_block->header.length, + block_length, iommu); break; case ACPI_IVRS_TYPE_EXT_SELECT: - dev_length = parse_ivhd_device_extended( - &ivhd_device->extended, - ivhd_block->header.length, block_length, iommu); + dev_length = parse_ivhd_device_extended(&ivhd_device->extended, + ivhd_block->header.length, + block_length, iommu); break; case ACPI_IVRS_TYPE_EXT_START: dev_length = parse_ivhd_device_extended_range( - &ivhd_device->extended_range, - ivhd_block->header.length, block_length, iommu); + &ivhd_device->extended_range, ivhd_block->header.length, + block_length, iommu); break; case ACPI_IVRS_TYPE_SPECIAL: dev_length = parse_ivhd_device_special( @@ -977,9 +985,7 @@ static void __init dump_acpi_table_header(struct acpi_table_header *table) printk("%c", table->asl_compiler_id[i]); printk("\n"); - AMD_IOMMU_DEBUG(" Creator_Revision %#x\n", - table->asl_compiler_revision); - + AMD_IOMMU_DEBUG(" Creator_Revision %#x\n", table->asl_compiler_revision); } #define to_ivhd_block(hdr) \ @@ -1021,21 +1027,20 @@ static int __init parse_ivrs_table(struct acpi_table_header *table) ivrs_block = (struct acpi_ivrs_header *)((u8 *)table + length); AMD_IOMMU_DEBUG("IVRS Block: type %#x flags %#x len %#x id %#x\n", - ivrs_block->type, ivrs_block->flags, - ivrs_block->length, ivrs_block->device_id); + ivrs_block->type, ivrs_block->flags, ivrs_block->length, + ivrs_block->device_id); if ( table->length < (length + ivrs_block->length) ) { AMD_IOMMU_DEBUG("IVRS Error: " "Table Length Exceeded: %#x -> %#lx\n", - table->length, - (length + ivrs_block->length)); + table->length, (length + ivrs_block->length)); return -ENODEV; } if ( ivrs_block->type == ivhd_type ) error = parse_ivhd_block(to_ivhd_block(ivrs_block)); - else if ( is_ivmd_block (ivrs_block->type) ) + else if ( is_ivmd_block(ivrs_block->type) ) error = parse_ivmd_block(to_ivmd_block(ivrs_block)); length += ivrs_block->length; } @@ -1065,8 +1070,8 @@ static int __init parse_ivrs_table(struct acpi_table_header *table) if ( ioapic_sbdf[idx].pin_2_idx ) continue; - ioapic_sbdf[idx].pin_2_idx = xmalloc_array( - u16, nr_ioapic_entries[apic]); + ioapic_sbdf[idx].pin_2_idx = + xmalloc_array(u16, nr_ioapic_entries[apic]); if ( ioapic_sbdf[idx].pin_2_idx ) memset(ioapic_sbdf[idx].pin_2_idx, -1, nr_ioapic_entries[apic] * sizeof(*ioapic_sbdf->pin_2_idx)); @@ -1119,13 +1124,13 @@ static int __init detect_iommu_acpi(struct acpi_table_header *table) return 0; } -#define UPDATE_LAST_BDF(x) do {\ - if ((x) > last_bdf) \ - last_bdf = (x); \ - } while(0); +#define UPDATE_LAST_BDF(x) \ + do { \ + if ( (x) > last_bdf ) \ + last_bdf = (x); \ + } while ( 0 ); -static int __init get_last_bdf_ivhd( - const struct acpi_ivrs_hardware *ivhd_block) +static int __init get_last_bdf_ivhd(const struct acpi_ivrs_hardware *ivhd_block) { const union acpi_ivhd_device *ivhd_device; u16 block_length, dev_length; @@ -1144,7 +1149,7 @@ static int __init get_last_bdf_ivhd( { ivhd_device = (const void *)((u8 *)ivhd_block + block_length); - switch ( ivhd_device->header.type ) + switch (ivhd_device->header.type) { case ACPI_IVRS_TYPE_PAD4: dev_length = sizeof(u32); @@ -1236,8 +1241,7 @@ int __init amd_iommu_update_ivrs_mapping_acpi(void) return acpi_table_parse(ACPI_SIG_IVRS, parse_ivrs_table); } -static int __init -get_supported_ivhd_type(struct acpi_table_header *table) +static int __init get_supported_ivhd_type(struct acpi_table_header *table) { size_t length = sizeof(struct acpi_table_ivrs); const struct acpi_ivrs_header *ivrs_block, *blk = NULL; @@ -1250,17 +1254,17 @@ get_supported_ivhd_type(struct acpi_table_header *table) { AMD_IOMMU_DEBUG("IVRS Error: " "Table Length Exceeded: %#x -> %#lx\n", - table->length, - (length + ivrs_block->length)); + table->length, (length + ivrs_block->length)); return -ENODEV; } if ( is_ivhd_block(ivrs_block->type) && - (!blk || blk->type < ivrs_block->type) ) + (!blk || blk->type < ivrs_block->type) ) { - AMD_IOMMU_DEBUG("IVRS Block: Found type %#x flags %#x len %#x id %#x\n", - ivrs_block->type, ivrs_block->flags, - ivrs_block->length, ivrs_block->device_id); + AMD_IOMMU_DEBUG( + "IVRS Block: Found type %#x flags %#x len %#x id %#x\n", + ivrs_block->type, ivrs_block->flags, ivrs_block->length, + ivrs_block->device_id); blk = ivrs_block; } length += ivrs_block->length; diff --git a/xen/drivers/passthrough/amd/iommu_cmd.c b/xen/drivers/passthrough/amd/iommu_cmd.c index af3a1fb865..523b2b9abd 100644 --- a/xen/drivers/passthrough/amd/iommu_cmd.c +++ b/xen/drivers/passthrough/amd/iommu_cmd.c @@ -30,12 +30,12 @@ static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[]) if ( ++tail == iommu->cmd_buffer.entries ) tail = 0; - head = iommu_get_rb_pointer(readl(iommu->mmio_base + - IOMMU_CMD_BUFFER_HEAD_OFFSET)); + head = iommu_get_rb_pointer( + readl(iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET)); if ( head != tail ) { memcpy(iommu->cmd_buffer.buffer + - (iommu->cmd_buffer.tail * IOMMU_CMD_BUFFER_ENTRY_SIZE), + (iommu->cmd_buffer.tail * IOMMU_CMD_BUFFER_ENTRY_SIZE), cmd, IOMMU_CMD_BUFFER_ENTRY_SIZE); iommu->cmd_buffer.tail = tail; @@ -50,7 +50,7 @@ static void commit_iommu_command_buffer(struct amd_iommu *iommu) u32 tail = 0; iommu_set_rb_pointer(&tail, iommu->cmd_buffer.tail); - writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET); + writel(tail, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET); } int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]) @@ -75,11 +75,9 @@ static void flush_command_buffer(struct amd_iommu *iommu) /* send an empty COMPLETION_WAIT command to flush command buffer */ cmd[3] = cmd[2] = 0; - set_field_in_reg_u32(IOMMU_CMD_COMPLETION_WAIT, 0, - IOMMU_CMD_OPCODE_MASK, + set_field_in_reg_u32(IOMMU_CMD_COMPLETION_WAIT, 0, IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, &cmd[1]); - set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0, - IOMMU_COMP_WAIT_I_FLAG_MASK, + set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0, IOMMU_COMP_WAIT_I_FLAG_MASK, IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]); send_iommu_command(iommu, cmd); @@ -87,9 +85,9 @@ static void flush_command_buffer(struct amd_iommu *iommu) loop_count = 1000; do { status = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); - comp_wait = get_field_from_reg_u32(status, - IOMMU_STATUS_COMP_WAIT_INT_MASK, - IOMMU_STATUS_COMP_WAIT_INT_SHIFT); + comp_wait = + get_field_from_reg_u32(status, IOMMU_STATUS_COMP_WAIT_INT_MASK, + IOMMU_STATUS_COMP_WAIT_INT_SHIFT); --loop_count; } while ( !comp_wait && loop_count ); @@ -104,17 +102,17 @@ static void flush_command_buffer(struct amd_iommu *iommu) } /* Build low level iommu command messages */ -static void invalidate_iommu_pages(struct amd_iommu *iommu, - u64 io_addr, u16 domain_id, u16 order) +static void invalidate_iommu_pages(struct amd_iommu *iommu, u64 io_addr, + u16 domain_id, u16 order) { u64 addr_lo, addr_hi; u32 cmd[4], entry; int sflag = 0, pde = 0; - ASSERT ( order == 0 || order == 9 || order == 18 ); + ASSERT(order == 0 || order == 9 || order == 18); /* All pages associated with the domainID are invalidated */ - if ( order || (io_addr == INV_IOMMU_ALL_PAGES_ADDRESS ) ) + if ( order || (io_addr == INV_IOMMU_ALL_PAGES_ADDRESS) ) { sflag = 1; pde = 1; @@ -132,27 +130,22 @@ static void invalidate_iommu_pages(struct amd_iommu *iommu, addr_lo = io_addr & DMA_32BIT_MASK; addr_hi = io_addr >> 32; - set_field_in_reg_u32(domain_id, 0, - IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK, + set_field_in_reg_u32(domain_id, 0, IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK, IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry); set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry, - IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, - &entry); + IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, &entry); cmd[1] = entry; - set_field_in_reg_u32(sflag, 0, - IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK, + set_field_in_reg_u32(sflag, 0, IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK, IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry); - set_field_in_reg_u32(pde, entry, - IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK, + set_field_in_reg_u32(pde, entry, IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK, IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry); set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry, IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK, IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry); cmd[2] = entry; - set_field_in_reg_u32((u32)addr_hi, 0, - IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK, + set_field_in_reg_u32((u32)addr_hi, 0, IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK, IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry); cmd[3] = entry; @@ -160,17 +153,17 @@ static void invalidate_iommu_pages(struct amd_iommu *iommu, send_iommu_command(iommu, cmd); } -static void invalidate_iotlb_pages(struct amd_iommu *iommu, - u16 maxpend, u32 pasid, u16 queueid, - u64 io_addr, u16 dev_id, u16 order) +static void invalidate_iotlb_pages(struct amd_iommu *iommu, u16 maxpend, + u32 pasid, u16 queueid, u64 io_addr, + u16 dev_id, u16 order) { u64 addr_lo, addr_hi; u32 cmd[4], entry; int sflag = 0; - ASSERT ( order == 0 || order == 9 || order == 18 ); + ASSERT(order == 0 || order == 9 || order == 18); - if ( order || (io_addr == INV_IOMMU_ALL_PAGES_ADDRESS ) ) + if ( order || (io_addr == INV_IOMMU_ALL_PAGES_ADDRESS) ) sflag = 1; /* If sflag == 1, the size of the invalidate command is determined @@ -185,36 +178,27 @@ static void invalidate_iotlb_pages(struct amd_iommu *iommu, addr_lo = io_addr & DMA_32BIT_MASK; addr_hi = io_addr >> 32; - set_field_in_reg_u32(dev_id, 0, - IOMMU_INV_IOTLB_PAGES_DEVICE_ID_MASK, + set_field_in_reg_u32(dev_id, 0, IOMMU_INV_IOTLB_PAGES_DEVICE_ID_MASK, IOMMU_INV_IOTLB_PAGES_DEVICE_ID_SHIFT, &entry); - set_field_in_reg_u32(maxpend, entry, - IOMMU_INV_IOTLB_PAGES_MAXPEND_MASK, + set_field_in_reg_u32(maxpend, entry, IOMMU_INV_IOTLB_PAGES_MAXPEND_MASK, IOMMU_INV_IOTLB_PAGES_MAXPEND_SHIFT, &entry); - set_field_in_reg_u32(pasid & 0xff, entry, - IOMMU_INV_IOTLB_PAGES_PASID1_MASK, + set_field_in_reg_u32(pasid & 0xff, entry, IOMMU_INV_IOTLB_PAGES_PASID1_MASK, IOMMU_INV_IOTLB_PAGES_PASID1_SHIFT, &entry); cmd[0] = entry; set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOTLB_PAGES, 0, - IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, - &entry); - - set_field_in_reg_u32(pasid >> 8, entry, - IOMMU_INV_IOTLB_PAGES_PASID2_MASK, - IOMMU_INV_IOTLB_PAGES_PASID2_SHIFT, - &entry); - - set_field_in_reg_u32(queueid, entry, - IOMMU_INV_IOTLB_PAGES_QUEUEID_MASK, - IOMMU_INV_IOTLB_PAGES_QUEUEID_SHIFT, - &entry); + IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, &entry); + + set_field_in_reg_u32(pasid >> 8, entry, IOMMU_INV_IOTLB_PAGES_PASID2_MASK, + IOMMU_INV_IOTLB_PAGES_PASID2_SHIFT, &entry); + + set_field_in_reg_u32(queueid, entry, IOMMU_INV_IOTLB_PAGES_QUEUEID_MASK, + IOMMU_INV_IOTLB_PAGES_QUEUEID_SHIFT, &entry); cmd[1] = entry; - set_field_in_reg_u32(sflag, 0, - IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK, + set_field_in_reg_u32(sflag, 0, IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK, IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK, &entry); set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry, @@ -222,28 +206,24 @@ static void invalidate_iotlb_pages(struct amd_iommu *iommu, IOMMU_INV_IOTLB_PAGES_ADDR_LOW_SHIFT, &entry); cmd[2] = entry; - set_field_in_reg_u32((u32)addr_hi, 0, - IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_MASK, + set_field_in_reg_u32((u32)addr_hi, 0, IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_MASK, IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_SHIFT, &entry); cmd[3] = entry; send_iommu_command(iommu, cmd); } -static void invalidate_dev_table_entry(struct amd_iommu *iommu, - u16 device_id) +static void invalidate_dev_table_entry(struct amd_iommu *iommu, u16 device_id) { u32 cmd[4], entry; cmd[3] = cmd[2] = 0; - set_field_in_reg_u32(device_id, 0, - IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK, + set_field_in_reg_u32(device_id, 0, IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK, IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT, &entry); cmd[0] = entry; set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY, 0, - IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, - &entry); + IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, &entry); cmd[1] = entry; send_iommu_command(iommu, cmd); @@ -254,13 +234,11 @@ static void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id) u32 cmd[4], entry; cmd[3] = cmd[2] = 0; - set_field_in_reg_u32(device_id, 0, - IOMMU_INV_INT_TABLE_DEVICE_ID_MASK, + set_field_in_reg_u32(device_id, 0, IOMMU_INV_INT_TABLE_DEVICE_ID_MASK, IOMMU_INV_INT_TABLE_DEVICE_ID_SHIFT, &entry); cmd[0] = entry; set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_INT_TABLE, 0, - IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, - &entry); + IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, &entry); cmd[1] = entry; send_iommu_command(iommu, cmd); } @@ -272,15 +250,14 @@ void invalidate_iommu_all(struct amd_iommu *iommu) cmd[3] = cmd[2] = cmd[0] = 0; set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_ALL, 0, - IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, - &entry); + IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, &entry); cmd[1] = entry; send_iommu_command(iommu, cmd); } -void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev, - daddr_t daddr, unsigned int order) +void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev, daddr_t daddr, + unsigned int order) { unsigned long flags; struct amd_iommu *iommu; @@ -297,8 +274,8 @@ void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev, if ( !iommu ) { AMD_IOMMU_DEBUG("%s: Can't find iommu for %04x:%02x:%02x.%u\n", - __func__, pdev->seg, pdev->bus, - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + __func__, pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); return; } @@ -324,7 +301,7 @@ static void amd_iommu_flush_all_iotlbs(struct domain *d, daddr_t daddr, if ( !ats_enabled ) return; - for_each_pdev( d, pdev ) + for_each_pdev (d, pdev) { u8 devfn = pdev->devfn; @@ -337,15 +314,15 @@ static void amd_iommu_flush_all_iotlbs(struct domain *d, daddr_t daddr, } /* Flush iommu cache after p2m changes. */ -static void _amd_iommu_flush_pages(struct domain *d, - daddr_t daddr, unsigned int order) +static void _amd_iommu_flush_pages(struct domain *d, daddr_t daddr, + unsigned int order) { unsigned long flags; struct amd_iommu *iommu; unsigned int dom_id = d->domain_id; /* send INVALIDATE_IOMMU_PAGES command */ - for_each_amd_iommu ( iommu ) + for_each_amd_iommu (iommu) { spin_lock_irqsave(&iommu->lock, flags); invalidate_iommu_pages(iommu, daddr, dom_id, order); @@ -362,15 +339,15 @@ void amd_iommu_flush_all_pages(struct domain *d) _amd_iommu_flush_pages(d, INV_IOMMU_ALL_PAGES_ADDRESS, 0); } -void amd_iommu_flush_pages(struct domain *d, - unsigned long dfn, unsigned int order) +void amd_iommu_flush_pages(struct domain *d, unsigned long dfn, + unsigned int order) { _amd_iommu_flush_pages(d, __dfn_to_daddr(dfn), order); } void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf) { - ASSERT( spin_is_locked(&iommu->lock) ); + ASSERT(spin_is_locked(&iommu->lock)); invalidate_dev_table_entry(iommu, bdf); flush_command_buffer(iommu); @@ -378,7 +355,7 @@ void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf) void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf) { - ASSERT( spin_is_locked(&iommu->lock) ); + ASSERT(spin_is_locked(&iommu->lock)); invalidate_interrupt_table(iommu, bdf); flush_command_buffer(iommu); @@ -386,7 +363,7 @@ void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf) void amd_iommu_flush_all_caches(struct amd_iommu *iommu) { - ASSERT( spin_is_locked(&iommu->lock) ); + ASSERT(spin_is_locked(&iommu->lock)); invalidate_iommu_all(iommu); flush_command_buffer(iommu); diff --git a/xen/drivers/passthrough/amd/iommu_detect.c b/xen/drivers/passthrough/amd/iommu_detect.c index 3c5d4de1a3..2ca050d671 100644 --- a/xen/drivers/passthrough/amd/iommu_detect.c +++ b/xen/drivers/passthrough/amd/iommu_detect.c @@ -25,8 +25,8 @@ #include #include -static int __init get_iommu_msi_capabilities( - u16 seg, u8 bus, u8 dev, u8 func, struct amd_iommu *iommu) +static int __init get_iommu_msi_capabilities(u16 seg, u8 bus, u8 dev, u8 func, + struct amd_iommu *iommu) { int pos; @@ -43,8 +43,8 @@ static int __init get_iommu_msi_capabilities( return 0; } -static int __init get_iommu_capabilities( - u16 seg, u8 bus, u8 dev, u8 func, u16 cap_ptr, struct amd_iommu *iommu) +static int __init get_iommu_capabilities(u16 seg, u8 bus, u8 dev, u8 func, + u16 cap_ptr, struct amd_iommu *iommu) { u8 type; @@ -61,22 +61,21 @@ static int __init get_iommu_capabilities( void __init get_iommu_features(struct amd_iommu *iommu) { u32 low, high; - int i = 0 ; + int i = 0; static const char *__initdata feature_str[] = { - "- Prefetch Pages Command", - "- Peripheral Page Service Request", - "- X2APIC Supported", - "- NX bit Supported", - "- Guest Translation", + "- Prefetch Pages Command", + "- Peripheral Page Service Request", + "- X2APIC Supported", + "- NX bit Supported", + "- Guest Translation", "- Reserved bit [5]", - "- Invalidate All Command", - "- Guest APIC supported", - "- Hardware Error Registers", - "- Performance Counters", - NULL - }; + "- Invalidate All Command", + "- Guest APIC supported", + "- Hardware Error Registers", + "- Performance Counters", + NULL}; - ASSERT( iommu->mmio_base ); + ASSERT(iommu->mmio_base); if ( !iommu_has_cap(iommu, PCI_CAP_EFRSUP_SHIFT) ) { @@ -94,13 +93,13 @@ void __init get_iommu_features(struct amd_iommu *iommu) while ( feature_str[i] ) { if ( amd_iommu_has_feature(iommu, i) ) - printk( " %s\n", feature_str[i]); + printk(" %s\n", feature_str[i]); i++; } } -int __init amd_iommu_detect_one_acpi( - const struct acpi_ivrs_hardware *ivhd_block) +int __init +amd_iommu_detect_one_acpi(const struct acpi_ivrs_hardware *ivhd_block) { struct amd_iommu *iommu; u8 bus, dev, func; @@ -112,8 +111,8 @@ int __init amd_iommu_detect_one_acpi( return -ENODEV; } - if ( !ivhd_block->header.device_id || - !ivhd_block->capability_offset || !ivhd_block->base_address) + if ( !ivhd_block->header.device_id || !ivhd_block->capability_offset || + !ivhd_block->base_address ) { AMD_IOMMU_DEBUG("Invalid IVHD Block!\n"); return -ENODEV; @@ -141,8 +140,8 @@ int __init amd_iommu_detect_one_acpi( dev = PCI_SLOT(iommu->bdf); func = PCI_FUNC(iommu->bdf); - rt = get_iommu_capabilities(iommu->seg, bus, dev, func, - iommu->cap_offset, iommu); + rt = get_iommu_capabilities(iommu->seg, bus, dev, func, iommu->cap_offset, + iommu); if ( rt ) goto out; @@ -152,14 +151,15 @@ int __init amd_iommu_detect_one_acpi( rt = pci_ro_device(iommu->seg, bus, PCI_DEVFN(dev, func)); if ( rt ) - printk(XENLOG_ERR - "Could not mark config space of %04x:%02x:%02x.%u read-only (%d)\n", - iommu->seg, bus, dev, func, rt); + printk( + XENLOG_ERR + "Could not mark config space of %04x:%02x:%02x.%u read-only (%d)\n", + iommu->seg, bus, dev, func, rt); list_add_tail(&iommu->list, &amd_iommu_head); rt = 0; - out: +out: if ( rt ) xfree(iommu); diff --git a/xen/drivers/passthrough/amd/iommu_guest.c b/xen/drivers/passthrough/amd/iommu_guest.c index 96175bb9ac..69f26e8280 100644 --- a/xen/drivers/passthrough/amd/iommu_guest.c +++ b/xen/drivers/passthrough/amd/iommu_guest.c @@ -21,30 +21,28 @@ #include #include +#define IOMMU_MMIO_SIZE 0x8000 +#define IOMMU_MMIO_PAGE_NR 0x8 +#define RING_BF_LENGTH_MASK 0x0F000000 +#define RING_BF_LENGTH_SHIFT 24 -#define IOMMU_MMIO_SIZE 0x8000 -#define IOMMU_MMIO_PAGE_NR 0x8 -#define RING_BF_LENGTH_MASK 0x0F000000 -#define RING_BF_LENGTH_SHIFT 24 - -#define PASMAX_9_bit 0x8 -#define GUEST_CR3_1_LEVEL 0x0 -#define GUEST_ADDRESS_SIZE_6_LEVEL 0x2 -#define HOST_ADDRESS_SIZE_6_LEVEL 0x2 +#define PASMAX_9_bit 0x8 +#define GUEST_CR3_1_LEVEL 0x0 +#define GUEST_ADDRESS_SIZE_6_LEVEL 0x2 +#define HOST_ADDRESS_SIZE_6_LEVEL 0x2 #define guest_iommu_set_status(iommu, bit) \ - iommu_set_bit(&((iommu)->reg_status.lo), bit) + iommu_set_bit(&((iommu)->reg_status.lo), bit) #define guest_iommu_clear_status(iommu, bit) \ - iommu_clear_bit(&((iommu)->reg_status.lo), bit) + iommu_clear_bit(&((iommu)->reg_status.lo), bit) -#define reg_to_u64(reg) (((uint64_t)reg.hi << 32) | reg.lo ) -#define u64_to_reg(reg, val) \ - do \ - { \ - (reg)->lo = (u32)(val); \ +#define reg_to_u64(reg) (((uint64_t)reg.hi << 32) | reg.lo) +#define u64_to_reg(reg, val) \ + do { \ + (reg)->lo = (u32)(val); \ (reg)->hi = (val) >> 32; \ - } while (0) + } while ( 0 ) static unsigned int machine_bdf(struct domain *d, uint16_t guest_bdf) { @@ -80,17 +78,14 @@ static uint64_t get_guest_cr3_from_dte(dev_entry_t *dte) { uint64_t gcr3_1, gcr3_2, gcr3_3; - gcr3_1 = get_field_from_reg_u32(dte->data[1], - IOMMU_DEV_TABLE_GCR3_1_MASK, + gcr3_1 = get_field_from_reg_u32(dte->data[1], IOMMU_DEV_TABLE_GCR3_1_MASK, IOMMU_DEV_TABLE_GCR3_1_SHIFT); - gcr3_2 = get_field_from_reg_u32(dte->data[2], - IOMMU_DEV_TABLE_GCR3_2_MASK, + gcr3_2 = get_field_from_reg_u32(dte->data[2], IOMMU_DEV_TABLE_GCR3_2_MASK, IOMMU_DEV_TABLE_GCR3_2_SHIFT); - gcr3_3 = get_field_from_reg_u32(dte->data[3], - IOMMU_DEV_TABLE_GCR3_3_MASK, + gcr3_3 = get_field_from_reg_u32(dte->data[3], IOMMU_DEV_TABLE_GCR3_3_MASK, IOMMU_DEV_TABLE_GCR3_3_SHIFT); - return ((gcr3_3 << 31) | (gcr3_2 << 15 ) | (gcr3_1 << 12)) >> PAGE_SHIFT; + return ((gcr3_3 << 31) | (gcr3_2 << 15) | (gcr3_1 << 12)) >> PAGE_SHIFT; } static uint16_t get_domid_from_dte(dev_entry_t *dte) @@ -107,7 +102,7 @@ static uint16_t get_glx_from_dte(dev_entry_t *dte) static uint16_t get_gv_from_dte(dev_entry_t *dte) { - return get_field_from_reg_u32(dte->data[1],IOMMU_DEV_TABLE_GV_MASK, + return get_field_from_reg_u32(dte->data[1], IOMMU_DEV_TABLE_GV_MASK, IOMMU_DEV_TABLE_GV_SHIFT); } @@ -120,7 +115,7 @@ static unsigned int host_domid(struct domain *d, uint64_t g_domid) static unsigned long get_gfn_from_base_reg(uint64_t base_raw) { base_raw &= PADDR_MASK; - ASSERT ( base_raw != 0 ); + ASSERT(base_raw != 0); return base_raw >> PAGE_SHIFT; } @@ -167,9 +162,8 @@ static void guest_iommu_enable_ring_buffer(struct guest_iommu *iommu, struct guest_buffer *buffer, uint32_t entry_size) { - uint32_t length_raw = get_field_from_reg_u32(buffer->reg_base.hi, - RING_BF_LENGTH_MASK, - RING_BF_LENGTH_SHIFT); + uint32_t length_raw = get_field_from_reg_u32( + buffer->reg_base.hi, RING_BF_LENGTH_MASK, RING_BF_LENGTH_SHIFT); buffer->entries = 1 << length_raw; } @@ -281,8 +275,8 @@ static int do_complete_ppr_request(struct domain *d, cmd_entry_t *cmd) if ( !iommu ) { - AMD_IOMMU_DEBUG("%s: Fail to find iommu for bdf %x\n", - __func__, dev_id); + AMD_IOMMU_DEBUG("%s: Fail to find iommu for bdf %x\n", __func__, + dev_id); return -ENODEV; } @@ -299,15 +293,15 @@ static int do_invalidate_pages(struct domain *d, cmd_entry_t *cmd) struct amd_iommu *iommu = NULL; gdom_id = get_field_from_reg_u32(cmd->data[1], - IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK, - IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT); + IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK, + IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT); hdom_id = host_domid(d, gdom_id); set_field_in_reg_u32(hdom_id, cmd->data[1], IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK, IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &cmd->data[1]); - for_each_amd_iommu ( iommu ) + for_each_amd_iommu (iommu) amd_iommu_send_guest_cmd(iommu, cmd->data); return 0; @@ -317,7 +311,7 @@ static int do_invalidate_all(struct domain *d, cmd_entry_t *cmd) { struct amd_iommu *iommu = NULL; - for_each_amd_iommu ( iommu ) + for_each_amd_iommu (iommu) amd_iommu_flush_all_pages(d); return 0; @@ -333,8 +327,8 @@ static int do_invalidate_iotlb_pages(struct domain *d, cmd_entry_t *cmd) iommu = find_iommu_for_device(0, dev_id); if ( !iommu ) { - AMD_IOMMU_DEBUG("%s: Fail to find iommu for bdf %x\n", - __func__, dev_id); + AMD_IOMMU_DEBUG("%s: Fail to find iommu for bdf %x\n", __func__, + dev_id); return -ENODEV; } @@ -365,28 +359,28 @@ static int do_completion_wait(struct domain *d, cmd_entry_t *cmd) void *vaddr; data = (uint64_t)cmd->data[3] << 32 | cmd->data[2]; - gaddr_lo = get_field_from_reg_u32(cmd->data[0], - IOMMU_COMP_WAIT_ADDR_LOW_MASK, - IOMMU_COMP_WAIT_ADDR_LOW_SHIFT); - gaddr_hi = get_field_from_reg_u32(cmd->data[1], - IOMMU_COMP_WAIT_ADDR_HIGH_MASK, - IOMMU_COMP_WAIT_ADDR_HIGH_SHIFT); + gaddr_lo = + get_field_from_reg_u32(cmd->data[0], IOMMU_COMP_WAIT_ADDR_LOW_MASK, + IOMMU_COMP_WAIT_ADDR_LOW_SHIFT); + gaddr_hi = + get_field_from_reg_u32(cmd->data[1], IOMMU_COMP_WAIT_ADDR_HIGH_MASK, + IOMMU_COMP_WAIT_ADDR_HIGH_SHIFT); gaddr_64 = (gaddr_hi << 32) | (gaddr_lo << 3); gfn = gaddr_64 >> PAGE_SHIFT; - vaddr = map_domain_page(get_gfn(d, gfn ,&p2mt)); + vaddr = map_domain_page(get_gfn(d, gfn, &p2mt)); put_gfn(d, gfn); - write_u64_atomic((uint64_t *)(vaddr + (gaddr_64 & (PAGE_SIZE-1))), + write_u64_atomic((uint64_t *)(vaddr + (gaddr_64 & (PAGE_SIZE - 1))), data); unmap_domain_page(vaddr); } - com_wait_int_en = iommu_get_bit(iommu->reg_ctrl.lo, - IOMMU_CONTROL_COMP_WAIT_INT_SHIFT); - com_wait_int = iommu_get_bit(iommu->reg_status.lo, - IOMMU_STATUS_COMP_WAIT_INT_SHIFT); + com_wait_int_en = + iommu_get_bit(iommu->reg_ctrl.lo, IOMMU_CONTROL_COMP_WAIT_INT_SHIFT); + com_wait_int = + iommu_get_bit(iommu->reg_status.lo, IOMMU_STATUS_COMP_WAIT_INT_SHIFT); if ( com_wait_int_en && com_wait_int ) guest_iommu_deliver_msi(d); @@ -417,9 +411,8 @@ static int do_invalidate_dte(struct domain *d, cmd_entry_t *cmd) if ( (gbdf * sizeof(dev_entry_t)) > g_iommu->dev_table.size ) return 0; - dte_mfn = guest_iommu_get_table_mfn(d, - reg_to_u64(g_iommu->dev_table.reg_base), - sizeof(dev_entry_t), gbdf); + dte_mfn = guest_iommu_get_table_mfn( + d, reg_to_u64(g_iommu->dev_table.reg_base), sizeof(dev_entry_t), gbdf); ASSERT(mfn_valid(_mfn(dte_mfn))); /* Read guest dte information */ @@ -427,10 +420,10 @@ static int do_invalidate_dte(struct domain *d, cmd_entry_t *cmd) gdte = dte_base + gbdf % (PAGE_SIZE / sizeof(dev_entry_t)); - gdom_id = get_domid_from_dte(gdte); + gdom_id = get_domid_from_dte(gdte); gcr3_gfn = get_guest_cr3_from_dte(gdte); - glx = get_glx_from_dte(gdte); - gv = get_gv_from_dte(gdte); + glx = get_glx_from_dte(gdte); + gv = get_gv_from_dte(gdte); unmap_domain_page(dte_base); @@ -446,8 +439,7 @@ static int do_invalidate_dte(struct domain *d, cmd_entry_t *cmd) iommu = find_iommu_for_device(0, mbdf); if ( !iommu ) { - AMD_IOMMU_DEBUG("%s: Fail to find iommu for bdf %x!\n", - __func__, mbdf); + AMD_IOMMU_DEBUG("%s: Fail to find iommu for bdf %x!\n", __func__, mbdf); return -ENODEV; } @@ -457,8 +449,8 @@ static int do_invalidate_dte(struct domain *d, cmd_entry_t *cmd) mdte = iommu->dev_table.buffer + (req_id * sizeof(dev_entry_t)); spin_lock_irqsave(&iommu->lock, flags); - iommu_dte_set_guest_cr3((u32 *)mdte, hdom_id, - gcr3_mfn << PAGE_SHIFT, gv, glx); + iommu_dte_set_guest_cr3((u32 *)mdte, hdom_id, gcr3_mfn << PAGE_SHIFT, gv, + glx); amd_iommu_flush_device(iommu, req_id); spin_unlock_irqrestore(&iommu->lock, flags); @@ -499,18 +491,17 @@ static void guest_iommu_process_command(unsigned long _d) { int ret = 0; - cmd_mfn = guest_iommu_get_table_mfn(d, - reg_to_u64(iommu->cmd_buffer.reg_base), - sizeof(cmd_entry_t), head); + cmd_mfn = + guest_iommu_get_table_mfn(d, reg_to_u64(iommu->cmd_buffer.reg_base), + sizeof(cmd_entry_t), head); ASSERT(mfn_valid(_mfn(cmd_mfn))); cmd_base = map_domain_page(_mfn(cmd_mfn)); cmd = cmd_base + head % entries_per_page; - opcode = get_field_from_reg_u32(cmd->data[1], - IOMMU_CMD_OPCODE_MASK, + opcode = get_field_from_reg_u32(cmd->data[1], IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT); - switch ( opcode ) + switch (opcode) { case IOMMU_CMD_COMPLETION_WAIT: ret = do_completion_wait(d, cmd); @@ -534,7 +525,8 @@ static void guest_iommu_process_command(unsigned long _d) break; default: AMD_IOMMU_DEBUG("CMD: Unknown command cmd_type = %lx " - "head = %ld\n", opcode, head); + "head = %ld\n", + opcode, head); break; } @@ -556,26 +548,21 @@ static int guest_iommu_write_ctrl(struct guest_iommu *iommu, uint64_t newctrl) bool_t cmd_en_old, event_en_old, iommu_en_old; bool_t cmd_run; - iommu_en = iommu_get_bit(newctrl, - IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT); + iommu_en = iommu_get_bit(newctrl, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT); iommu_en_old = iommu_get_bit(iommu->reg_ctrl.lo, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT); - cmd_en = iommu_get_bit(newctrl, - IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT); + cmd_en = iommu_get_bit(newctrl, IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT); cmd_en_old = iommu_get_bit(iommu->reg_ctrl.lo, IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT); - cmd_run = iommu_get_bit(iommu->reg_status.lo, - IOMMU_STATUS_CMD_BUFFER_RUN_SHIFT); - event_en = iommu_get_bit(newctrl, - IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT); - event_en_old = iommu_get_bit(iommu->reg_ctrl.lo, - IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT); - - ppr_en = iommu_get_bit(newctrl, - IOMMU_CONTROL_PPR_ENABLE_SHIFT); - ppr_log_en = iommu_get_bit(newctrl, - IOMMU_CONTROL_PPR_LOG_ENABLE_SHIFT); + cmd_run = + iommu_get_bit(iommu->reg_status.lo, IOMMU_STATUS_CMD_BUFFER_RUN_SHIFT); + event_en = iommu_get_bit(newctrl, IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT); + event_en_old = + iommu_get_bit(iommu->reg_ctrl.lo, IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT); + + ppr_en = iommu_get_bit(newctrl, IOMMU_CONTROL_PPR_ENABLE_SHIFT); + ppr_log_en = iommu_get_bit(newctrl, IOMMU_CONTROL_PPR_LOG_ENABLE_SHIFT); if ( iommu_en ) { @@ -628,7 +615,7 @@ static uint64_t iommu_mmio_read64(struct guest_iommu *iommu, { uint64_t val; - switch ( offset ) + switch (offset) { case IOMMU_DEV_TABLE_BASE_LOW_OFFSET: val = reg_to_u64(iommu->dev_table.reg_base); @@ -690,10 +677,11 @@ static int guest_iommu_mmio_read(struct vcpu *v, unsigned long addr, offset = addr - iommu->mmio_base; - if ( unlikely((offset & (len - 1 )) || (len > 8)) ) + if ( unlikely((offset & (len - 1)) || (len > 8)) ) { AMD_IOMMU_DEBUG("iommu mmio read access is not aligned:" - " offset = %lx, len = %x\n", offset, len); + " offset = %lx, len = %x\n", + offset, len); return X86EMUL_UNHANDLEABLE; } @@ -707,15 +695,15 @@ static int guest_iommu_mmio_read(struct vcpu *v, unsigned long addr, val = iommu_mmio_read64(iommu, mmio); spin_unlock(&iommu->lock); - *pval = (val >> shift ) & mask; + *pval = (val >> shift) & mask; return X86EMUL_OKAY; } static void guest_iommu_mmio_write64(struct guest_iommu *iommu, - unsigned long offset, uint64_t val) + unsigned long offset, uint64_t val) { - switch ( offset ) + switch (offset) { case IOMMU_DEV_TABLE_BASE_LOW_OFFSET: u64_to_reg(&iommu->dev_table.reg_base, val); @@ -752,19 +740,19 @@ static void guest_iommu_mmio_write64(struct guest_iommu *iommu, u64_to_reg(&iommu->ppr_log.reg_tail, val); break; case IOMMU_STATUS_MMIO_OFFSET: - val &= IOMMU_STATUS_EVENT_OVERFLOW_MASK | - IOMMU_STATUS_EVENT_LOG_INT_MASK | - IOMMU_STATUS_COMP_WAIT_INT_MASK | - IOMMU_STATUS_PPR_LOG_OVERFLOW_MASK | - IOMMU_STATUS_PPR_LOG_INT_MASK | - IOMMU_STATUS_GAPIC_LOG_OVERFLOW_MASK | - IOMMU_STATUS_GAPIC_LOG_INT_MASK; + val &= + IOMMU_STATUS_EVENT_OVERFLOW_MASK | IOMMU_STATUS_EVENT_LOG_INT_MASK | + IOMMU_STATUS_COMP_WAIT_INT_MASK | + IOMMU_STATUS_PPR_LOG_OVERFLOW_MASK | IOMMU_STATUS_PPR_LOG_INT_MASK | + IOMMU_STATUS_GAPIC_LOG_OVERFLOW_MASK | + IOMMU_STATUS_GAPIC_LOG_INT_MASK; u64_to_reg(&iommu->reg_status, reg_to_u64(iommu->reg_status) & ~val); break; default: AMD_IOMMU_DEBUG("guest writes unknown mmio offset = %lx," - " val = %" PRIx64 "\n", offset, val); + " val = %" PRIx64 "\n", + offset, val); break; } } @@ -783,7 +771,8 @@ static int guest_iommu_mmio_write(struct vcpu *v, unsigned long addr, if ( unlikely((offset & (len - 1)) || (len > 8)) ) { AMD_IOMMU_DEBUG("iommu mmio write access is not aligned:" - " offset = %lx, len = %x\n", offset, len); + " offset = %lx, len = %x\n", + offset, len); return X86EMUL_UNHANDLEABLE; } @@ -835,31 +824,28 @@ static void guest_iommu_reg_init(struct guest_iommu *iommu) lower = upper = 0; /* Support prefetch */ - iommu_set_bit(&lower,IOMMU_EXT_FEATURE_PREFSUP_SHIFT); + iommu_set_bit(&lower, IOMMU_EXT_FEATURE_PREFSUP_SHIFT); /* Support PPR log */ - iommu_set_bit(&lower,IOMMU_EXT_FEATURE_PPRSUP_SHIFT); + iommu_set_bit(&lower, IOMMU_EXT_FEATURE_PPRSUP_SHIFT); /* Support guest translation */ - iommu_set_bit(&lower,IOMMU_EXT_FEATURE_GTSUP_SHIFT); + iommu_set_bit(&lower, IOMMU_EXT_FEATURE_GTSUP_SHIFT); /* Support invalidate all command */ - iommu_set_bit(&lower,IOMMU_EXT_FEATURE_IASUP_SHIFT); + iommu_set_bit(&lower, IOMMU_EXT_FEATURE_IASUP_SHIFT); /* Host translation size has 6 levels */ set_field_in_reg_u32(HOST_ADDRESS_SIZE_6_LEVEL, lower, IOMMU_EXT_FEATURE_HATS_MASK, - IOMMU_EXT_FEATURE_HATS_SHIFT, - &lower); + IOMMU_EXT_FEATURE_HATS_SHIFT, &lower); /* Guest translation size has 6 levels */ set_field_in_reg_u32(GUEST_ADDRESS_SIZE_6_LEVEL, lower, IOMMU_EXT_FEATURE_GATS_MASK, - IOMMU_EXT_FEATURE_GATS_SHIFT, - &lower); + IOMMU_EXT_FEATURE_GATS_SHIFT, &lower); /* Single level gCR3 */ set_field_in_reg_u32(GUEST_CR3_1_LEVEL, lower, IOMMU_EXT_FEATURE_GLXSUP_MASK, IOMMU_EXT_FEATURE_GLXSUP_SHIFT, &lower); /* 9 bit PASID */ - set_field_in_reg_u32(PASMAX_9_bit, upper, - IOMMU_EXT_FEATURE_PASMAX_MASK, + set_field_in_reg_u32(PASMAX_9_bit, upper, IOMMU_EXT_FEATURE_PASMAX_MASK, IOMMU_EXT_FEATURE_PASMAX_SHIFT, &upper); iommu->reg_ext_feature.lo = lower; @@ -877,11 +863,10 @@ static int guest_iommu_mmio_range(struct vcpu *v, unsigned long addr) static const struct hvm_mmio_ops iommu_mmio_ops = { .check = guest_iommu_mmio_range, .read = guest_iommu_mmio_read, - .write = guest_iommu_mmio_write -}; + .write = guest_iommu_mmio_write}; /* Domain specific initialization */ -int guest_iommu_init(struct domain* d) +int guest_iommu_init(struct domain *d) { struct guest_iommu *iommu; struct domain_iommu *hd = dom_iommu(d); @@ -902,8 +887,8 @@ int guest_iommu_init(struct domain* d) iommu->domain = d; hd->arch.g_iommu = iommu; - tasklet_init(&iommu->cmd_buffer_tasklet, - guest_iommu_process_command, (unsigned long)d); + tasklet_init(&iommu->cmd_buffer_tasklet, guest_iommu_process_command, + (unsigned long)d); spin_lock_init(&iommu->lock); diff --git a/xen/drivers/passthrough/amd/iommu_init.c b/xen/drivers/passthrough/amd/iommu_init.c index 17f39552a9..b5e15dc577 100644 --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -47,8 +47,7 @@ static int iommu_has_ht_flag(struct amd_iommu *iommu, u8 mask) static int __init map_iommu_mmio_region(struct amd_iommu *iommu) { - iommu->mmio_base = ioremap(iommu->mmio_base_phys, - IOMMU_MMIO_REGION_LENGTH); + iommu->mmio_base = ioremap(iommu->mmio_base_phys, IOMMU_MMIO_REGION_LENGTH); if ( !iommu->mmio_base ) return -ENOMEM; @@ -73,26 +72,27 @@ static void set_iommu_ht_flags(struct amd_iommu *iommu) /* Setup HT flags */ if ( iommu_has_cap(iommu, PCI_CAP_HT_TUNNEL_SHIFT) ) - iommu_has_ht_flag(iommu, ACPI_IVHD_TT_ENABLE) ? - iommu_set_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT) : - iommu_clear_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT); + iommu_has_ht_flag(iommu, ACPI_IVHD_TT_ENABLE) + ? iommu_set_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT) + : iommu_clear_bit(&entry, + IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT); - iommu_has_ht_flag(iommu, ACPI_IVHD_RES_PASS_PW) ? - iommu_set_bit(&entry, IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT): - iommu_clear_bit(&entry, IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT); + iommu_has_ht_flag(iommu, ACPI_IVHD_RES_PASS_PW) + ? iommu_set_bit(&entry, IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT) + : iommu_clear_bit(&entry, IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT); - iommu_has_ht_flag(iommu, ACPI_IVHD_ISOC) ? - iommu_set_bit(&entry, IOMMU_CONTROL_ISOCHRONOUS_SHIFT): - iommu_clear_bit(&entry, IOMMU_CONTROL_ISOCHRONOUS_SHIFT); + iommu_has_ht_flag(iommu, ACPI_IVHD_ISOC) + ? iommu_set_bit(&entry, IOMMU_CONTROL_ISOCHRONOUS_SHIFT) + : iommu_clear_bit(&entry, IOMMU_CONTROL_ISOCHRONOUS_SHIFT); - iommu_has_ht_flag(iommu, ACPI_IVHD_PASS_PW) ? - iommu_set_bit(&entry, IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT): - iommu_clear_bit(&entry, IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT); + iommu_has_ht_flag(iommu, ACPI_IVHD_PASS_PW) + ? iommu_set_bit(&entry, IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT) + : iommu_clear_bit(&entry, IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT); /* Force coherent */ iommu_set_bit(&entry, IOMMU_CONTROL_COHERENT_SHIFT); - writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); + writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); } static void register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu) @@ -100,7 +100,7 @@ static void register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu) u64 addr_64, addr_lo, addr_hi; u32 entry; - ASSERT( iommu->dev_table.buffer ); + ASSERT(iommu->dev_table.buffer); addr_64 = (u64)virt_to_maddr(iommu->dev_table.buffer); addr_lo = addr_64 & DMA_32BIT_MASK; @@ -108,9 +108,9 @@ static void register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu) entry = 0; iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT); - set_field_in_reg_u32((iommu->dev_table.alloc_size / PAGE_SIZE) - 1, - entry, IOMMU_DEV_TABLE_SIZE_MASK, - IOMMU_DEV_TABLE_SIZE_SHIFT, &entry); + set_field_in_reg_u32((iommu->dev_table.alloc_size / PAGE_SIZE) - 1, entry, + IOMMU_DEV_TABLE_SIZE_MASK, IOMMU_DEV_TABLE_SIZE_SHIFT, + &entry); writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_LOW_OFFSET); entry = 0; @@ -125,7 +125,7 @@ static void register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu) u32 power_of2_entries; u32 entry; - ASSERT( iommu->cmd_buffer.buffer ); + ASSERT(iommu->cmd_buffer.buffer); addr_64 = virt_to_maddr(iommu->cmd_buffer.buffer); addr_lo = addr_64; @@ -136,14 +136,13 @@ static void register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu) writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET); power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) + - IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE; + IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE; entry = 0; iommu_set_addr_hi_to_reg(&entry, addr_hi); - set_field_in_reg_u32(power_of2_entries, entry, - IOMMU_CMD_BUFFER_LENGTH_MASK, + set_field_in_reg_u32(power_of2_entries, entry, IOMMU_CMD_BUFFER_LENGTH_MASK, IOMMU_CMD_BUFFER_LENGTH_SHIFT, &entry); - writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET); + writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET); } static void register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu) @@ -153,7 +152,7 @@ static void register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu) u32 power_of2_entries; u32 entry; - ASSERT( iommu->event_log.buffer ); + ASSERT(iommu->event_log.buffer); addr_64 = virt_to_maddr(iommu->event_log.buffer); addr_lo = addr_64; @@ -168,10 +167,9 @@ static void register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu) entry = 0; iommu_set_addr_hi_to_reg(&entry, addr_hi); - set_field_in_reg_u32(power_of2_entries, entry, - IOMMU_EVENT_LOG_LENGTH_MASK, - IOMMU_EVENT_LOG_LENGTH_SHIFT, &entry); - writel(entry, iommu->mmio_base+IOMMU_EVENT_LOG_BASE_HIGH_OFFSET); + set_field_in_reg_u32(power_of2_entries, entry, IOMMU_EVENT_LOG_LENGTH_MASK, + IOMMU_EVENT_LOG_LENGTH_SHIFT, &entry); + writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_HIGH_OFFSET); } static void register_iommu_ppr_log_in_mmio_space(struct amd_iommu *iommu) @@ -181,7 +179,7 @@ static void register_iommu_ppr_log_in_mmio_space(struct amd_iommu *iommu) u32 power_of2_entries; u32 entry; - ASSERT ( iommu->ppr_log.buffer ); + ASSERT(iommu->ppr_log.buffer); addr_64 = virt_to_maddr(iommu->ppr_log.buffer); addr_lo = addr_64; @@ -196,25 +194,21 @@ static void register_iommu_ppr_log_in_mmio_space(struct amd_iommu *iommu) entry = 0; iommu_set_addr_hi_to_reg(&entry, addr_hi); - set_field_in_reg_u32(power_of2_entries, entry, - IOMMU_PPR_LOG_LENGTH_MASK, - IOMMU_PPR_LOG_LENGTH_SHIFT, &entry); + set_field_in_reg_u32(power_of2_entries, entry, IOMMU_PPR_LOG_LENGTH_MASK, + IOMMU_PPR_LOG_LENGTH_SHIFT, &entry); writel(entry, iommu->mmio_base + IOMMU_PPR_LOG_BASE_HIGH_OFFSET); } - -static void set_iommu_translation_control(struct amd_iommu *iommu, - int enable) +static void set_iommu_translation_control(struct amd_iommu *iommu, int enable) { u32 entry; entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); - enable ? - iommu_set_bit(&entry, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT) : - iommu_clear_bit(&entry, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT); + enable ? iommu_set_bit(&entry, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT) + : iommu_clear_bit(&entry, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT); - writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); + writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); } static void set_iommu_guest_translation_control(struct amd_iommu *iommu, @@ -224,18 +218,17 @@ static void set_iommu_guest_translation_control(struct amd_iommu *iommu, entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); - enable ? - iommu_set_bit(&entry, IOMMU_CONTROL_GT_ENABLE_SHIFT) : - iommu_clear_bit(&entry, IOMMU_CONTROL_GT_ENABLE_SHIFT); + enable ? iommu_set_bit(&entry, IOMMU_CONTROL_GT_ENABLE_SHIFT) + : iommu_clear_bit(&entry, IOMMU_CONTROL_GT_ENABLE_SHIFT); - writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); + writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); if ( enable ) AMD_IOMMU_DEBUG("Guest Translation Enabled.\n"); } static void set_iommu_command_buffer_control(struct amd_iommu *iommu, - int enable) + int enable) { u32 entry; @@ -252,7 +245,7 @@ static void set_iommu_command_buffer_control(struct amd_iommu *iommu, else iommu_clear_bit(&entry, IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT); - writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); + writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); } static void register_iommu_exclusion_range(struct amd_iommu *iommu) @@ -263,22 +256,21 @@ static void register_iommu_exclusion_range(struct amd_iommu *iommu) addr_lo = iommu->exclusion_limit; addr_hi = iommu->exclusion_limit >> 32; - set_field_in_reg_u32((u32)addr_hi, 0, - IOMMU_EXCLUSION_LIMIT_HIGH_MASK, + set_field_in_reg_u32((u32)addr_hi, 0, IOMMU_EXCLUSION_LIMIT_HIGH_MASK, IOMMU_EXCLUSION_LIMIT_HIGH_SHIFT, &entry); - writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_HIGH_OFFSET); + writel(entry, iommu->mmio_base + IOMMU_EXCLUSION_LIMIT_HIGH_OFFSET); set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0, IOMMU_EXCLUSION_LIMIT_LOW_MASK, IOMMU_EXCLUSION_LIMIT_LOW_SHIFT, &entry); - writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_LOW_OFFSET); + writel(entry, iommu->mmio_base + IOMMU_EXCLUSION_LIMIT_LOW_OFFSET); addr_lo = iommu->exclusion_base & DMA_32BIT_MASK; addr_hi = iommu->exclusion_base >> 32; entry = 0; iommu_set_addr_hi_to_reg(&entry, addr_hi); - writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_HIGH_OFFSET); + writel(entry, iommu->mmio_base + IOMMU_EXCLUSION_BASE_HIGH_OFFSET); entry = 0; iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT); @@ -290,11 +282,10 @@ static void register_iommu_exclusion_range(struct amd_iommu *iommu) set_field_in_reg_u32(iommu->exclusion_enable, entry, IOMMU_EXCLUSION_RANGE_ENABLE_MASK, IOMMU_EXCLUSION_RANGE_ENABLE_SHIFT, &entry); - writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET); + writel(entry, iommu->mmio_base + IOMMU_EXCLUSION_BASE_LOW_OFFSET); } -static void set_iommu_event_log_control(struct amd_iommu *iommu, - int enable) +static void set_iommu_event_log_control(struct amd_iommu *iommu, int enable) { u32 entry; @@ -320,8 +311,7 @@ static void set_iommu_event_log_control(struct amd_iommu *iommu, writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); } -static void set_iommu_ppr_log_control(struct amd_iommu *iommu, - int enable) +static void set_iommu_ppr_log_control(struct amd_iommu *iommu, int enable) { u32 entry; @@ -350,25 +340,22 @@ static void set_iommu_ppr_log_control(struct amd_iommu *iommu, } /* read event log or ppr log from iommu ring buffer */ -static int iommu_read_log(struct amd_iommu *iommu, - struct ring_buffer *log, +static int iommu_read_log(struct amd_iommu *iommu, struct ring_buffer *log, unsigned int entry_size, void (*parse_func)(struct amd_iommu *, u32 *)) { u32 tail, head, *entry, tail_offest, head_offset; BUG_ON(!iommu || ((log != &iommu->event_log) && (log != &iommu->ppr_log))); - + spin_lock(&log->lock); /* make sure there's an entry in the log */ - tail_offest = ( log == &iommu->event_log ) ? - IOMMU_EVENT_LOG_TAIL_OFFSET : - IOMMU_PPR_LOG_TAIL_OFFSET; + tail_offest = (log == &iommu->event_log) ? IOMMU_EVENT_LOG_TAIL_OFFSET + : IOMMU_PPR_LOG_TAIL_OFFSET; - head_offset = ( log == &iommu->event_log ) ? - IOMMU_EVENT_LOG_HEAD_OFFSET : - IOMMU_PPR_LOG_HEAD_OFFSET; + head_offset = (log == &iommu->event_log) ? IOMMU_EVENT_LOG_HEAD_OFFSET + : IOMMU_PPR_LOG_HEAD_OFFSET; tail = readl(iommu->mmio_base + tail_offest); tail = iommu_get_rb_pointer(tail); @@ -390,13 +377,12 @@ static int iommu_read_log(struct amd_iommu *iommu, } spin_unlock(&log->lock); - + return 0; } /* reset event log or ppr log when overflow */ -static void iommu_reset_log(struct amd_iommu *iommu, - struct ring_buffer *log, +static void iommu_reset_log(struct amd_iommu *iommu, struct ring_buffer *log, void (*ctrl_func)(struct amd_iommu *iommu, int)) { u32 entry; @@ -405,9 +391,8 @@ static void iommu_reset_log(struct amd_iommu *iommu, BUG_ON(!iommu || ((log != &iommu->event_log) && (log != &iommu->ppr_log))); - run_bit = ( log == &iommu->event_log ) ? - IOMMU_STATUS_EVENT_LOG_RUN_SHIFT : - IOMMU_STATUS_PPR_LOG_RUN_SHIFT; + run_bit = (log == &iommu->event_log) ? IOMMU_STATUS_EVENT_LOG_RUN_SHIFT + : IOMMU_STATUS_PPR_LOG_RUN_SHIFT; /* wait until EventLogRun bit = 0 */ do { @@ -419,7 +404,8 @@ static void iommu_reset_log(struct amd_iommu *iommu, if ( log_run ) { AMD_IOMMU_DEBUG("Warning: Log Run bit %d is not cleared" - "before reset!\n", run_bit); + "before reset!\n", + run_bit); return; } @@ -478,7 +464,6 @@ static void iommu_msi_end(struct irq_desc *desc, u8 vector) ack_APIC_irq(); } - static hw_irq_controller iommu_msi_type = { .typename = "AMD-IOMMU-MSI", .startup = iommu_msi_startup, @@ -542,7 +527,7 @@ static void parse_event_log_entry(struct amd_iommu *iommu, u32 entry[]) }; code = get_field_from_reg_u32(entry[1], IOMMU_EVENT_CODE_MASK, - IOMMU_EVENT_CODE_SHIFT); + IOMMU_EVENT_CODE_SHIFT); /* * Workaround for erratum 732: @@ -566,17 +551,15 @@ static void parse_event_log_entry(struct amd_iommu *iommu, u32 entry[]) if ( code == IOMMU_EVENT_IO_PAGE_FAULT ) { device_id = iommu_get_devid_from_event(entry[0]); - domain_id = get_field_from_reg_u32(entry[1], - IOMMU_EVENT_DOMAIN_ID_MASK, + domain_id = get_field_from_reg_u32(entry[1], IOMMU_EVENT_DOMAIN_ID_MASK, IOMMU_EVENT_DOMAIN_ID_SHIFT); - flags = get_field_from_reg_u32(entry[1], - IOMMU_EVENT_FLAGS_MASK, + flags = get_field_from_reg_u32(entry[1], IOMMU_EVENT_FLAGS_MASK, IOMMU_EVENT_FLAGS_SHIFT); - addr= (u64*) (entry + 2); + addr = (u64 *)(entry + 2); printk(XENLOG_ERR "AMD-Vi: " - "%s: domain = %d, device id = %#x, " - "fault address = %#"PRIx64", flags = %#x\n", - event_str[code-1], domain_id, device_id, *addr, flags); + "%s: domain = %d, device id = %#x, " + "fault address = %#" PRIx64 ", flags = %#x\n", + event_str[code - 1], domain_id, device_id, *addr, flags); for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) if ( get_dma_requestor_id(iommu->seg, bdf) == device_id ) @@ -603,11 +586,11 @@ static void iommu_check_event_log(struct amd_iommu *iommu) writel(IOMMU_STATUS_EVENT_LOG_INT_MASK, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); - iommu_read_log(iommu, &iommu->event_log, - sizeof(event_entry_t), parse_event_log_entry); + iommu_read_log(iommu, &iommu->event_log, sizeof(event_entry_t), + parse_event_log_entry); spin_lock_irqsave(&iommu->lock, flags); - + /* Check event overflow. */ entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); if ( iommu_get_bit(entry, IOMMU_STATUS_EVENT_OVERFLOW_SHIFT) ) @@ -640,7 +623,6 @@ static void iommu_check_event_log(struct amd_iommu *iommu) void parse_ppr_log_entry(struct amd_iommu *iommu, u32 entry[]) { - u16 device_id; u8 bus, devfn, code; struct pci_dev *pdev; @@ -692,9 +674,9 @@ static void iommu_check_ppr_log(struct amd_iommu *iommu) writel(IOMMU_STATUS_PPR_LOG_INT_MASK, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); - iommu_read_log(iommu, &iommu->ppr_log, - sizeof(ppr_entry_t), parse_ppr_log_entry); - + iommu_read_log(iommu, &iommu->ppr_log, sizeof(ppr_entry_t), + parse_ppr_log_entry); + spin_lock_irqsave(&iommu->lock, flags); /* Check event overflow. */ @@ -742,7 +724,8 @@ static void do_amd_iommu_irq(unsigned long data) * IOMMUs present in the system. This allows for having just one * tasklet (instead of one per each IOMMUs). */ - for_each_amd_iommu ( iommu ) { + for_each_amd_iommu (iommu) + { iommu_check_event_log(iommu); if ( iommu->ppr_log.buffer != NULL ) @@ -788,14 +771,14 @@ static bool_t __init set_iommu_interrupt_handler(struct amd_iommu *iommu) } pcidevs_lock(); - iommu->msi.dev = pci_get_pdev(iommu->seg, PCI_BUS(iommu->bdf), - PCI_DEVFN2(iommu->bdf)); + iommu->msi.dev = + pci_get_pdev(iommu->seg, PCI_BUS(iommu->bdf), PCI_DEVFN2(iommu->bdf)); pcidevs_unlock(); if ( !iommu->msi.dev ) { - AMD_IOMMU_DEBUG("IOMMU: no pdev for %04x:%02x:%02x.%u\n", - iommu->seg, PCI_BUS(iommu->bdf), - PCI_SLOT(iommu->bdf), PCI_FUNC(iommu->bdf)); + AMD_IOMMU_DEBUG("IOMMU: no pdev for %04x:%02x:%02x.%u\n", iommu->seg, + PCI_BUS(iommu->bdf), PCI_SLOT(iommu->bdf), + PCI_FUNC(iommu->bdf)); return 0; } control = pci_conf_read16(iommu->seg, PCI_BUS(iommu->bdf), @@ -839,8 +822,7 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) u8 dev = PCI_SLOT(iommu->bdf); u8 func = PCI_FUNC(iommu->bdf); - if ( (boot_cpu_data.x86 != 0x15) || - (boot_cpu_data.x86_model < 0x10) || + if ( (boot_cpu_data.x86 != 0x15) || (boot_cpu_data.x86_model < 0x10) || (boot_cpu_data.x86_model > 0x1f) ) return; @@ -854,8 +836,8 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) pci_conf_write32(iommu->seg, bus, dev, func, 0xf0, 0x90 | (1 << 8)); pci_conf_write32(iommu->seg, bus, dev, func, 0xf4, value | (1 << 2)); - printk(XENLOG_INFO - "AMD-Vi: Applying erratum 746 workaround for IOMMU at %04x:%02x:%02x.%u\n", + printk(XENLOG_INFO "AMD-Vi: Applying erratum 746 workaround for IOMMU at " + "%04x:%02x:%02x.%u\n", iommu->seg, bus, dev, func); /* Clear the enable writing bit */ @@ -871,7 +853,7 @@ static void enable_iommu(struct amd_iommu *iommu) if ( iommu->enabled ) { - spin_unlock_irqrestore(&iommu->lock, flags); + spin_unlock_irqrestore(&iommu->lock, flags); return; } @@ -909,7 +891,6 @@ static void enable_iommu(struct amd_iommu *iommu) iommu->enabled = 1; spin_unlock_irqrestore(&iommu->lock, flags); - } static void __init deallocate_buffer(void *buf, uint32_t sz) @@ -936,9 +917,9 @@ static void __init deallocate_ring_buffer(struct ring_buffer *ring_buf) ring_buf->tail = 0; } -static void * __init allocate_buffer(uint32_t alloc_size, const char *name) +static void *__init allocate_buffer(uint32_t alloc_size, const char *name) { - void * buffer; + void *buffer; int order = get_order_from_bytes(alloc_size); buffer = __alloc_amd_iommu_tables(order); @@ -953,23 +934,23 @@ static void * __init allocate_buffer(uint32_t alloc_size, const char *name) return buffer; } -static void * __init allocate_ring_buffer(struct ring_buffer *ring_buf, - uint32_t entry_size, - uint64_t entries, const char *name) +static void *__init allocate_ring_buffer(struct ring_buffer *ring_buf, + uint32_t entry_size, uint64_t entries, + const char *name) { ring_buf->head = 0; ring_buf->tail = 0; spin_lock_init(&ring_buf->lock); - - ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes(entries * - entry_size); + + ring_buf->alloc_size = PAGE_SIZE + << get_order_from_bytes(entries * entry_size); ring_buf->entries = ring_buf->alloc_size / entry_size; ring_buf->buffer = allocate_buffer(ring_buf->alloc_size, name); return ring_buf->buffer; } -static void * __init allocate_cmd_buffer(struct amd_iommu *iommu) +static void *__init allocate_cmd_buffer(struct amd_iommu *iommu) { /* allocate 'command buffer' in power of 2 increments of 4K */ return allocate_ring_buffer(&iommu->cmd_buffer, sizeof(cmd_entry_t), @@ -977,14 +958,14 @@ static void * __init allocate_cmd_buffer(struct amd_iommu *iommu) "Command Buffer"); } -static void * __init allocate_event_log(struct amd_iommu *iommu) +static void *__init allocate_event_log(struct amd_iommu *iommu) { /* allocate 'event log' in power of 2 increments of 4K */ return allocate_ring_buffer(&iommu->event_log, sizeof(event_entry_t), IOMMU_EVENT_LOG_DEFAULT_ENTRIES, "Event Log"); } -static void * __init allocate_ppr_log(struct amd_iommu *iommu) +static void *__init allocate_ppr_log(struct amd_iommu *iommu) { /* allocate 'ppr log' in power of 2 increments of 4K */ return allocate_ring_buffer(&iommu->ppr_log, sizeof(ppr_entry_t), @@ -1025,7 +1006,7 @@ static int __init amd_iommu_init_one(struct amd_iommu *iommu) iommu->dev_table.buffer = device_table.buffer; enable_iommu(iommu); - printk("AMD-Vi: IOMMU %d Enabled.\n", nr_amd_iommus ); + printk("AMD-Vi: IOMMU %d Enabled.\n", nr_amd_iommus); nr_amd_iommus++; softirq_tasklet_init(&amd_iommu_irq_tasklet, do_amd_iommu_irq, 0); @@ -1041,7 +1022,7 @@ static void __init amd_iommu_init_cleanup(void) struct amd_iommu *iommu, *next; /* free amd iommu list */ - list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list ) + list_for_each_entry_safe(iommu, next, &amd_iommu_head, list) { list_del(&iommu->list); if ( iommu->enabled ) @@ -1121,7 +1102,7 @@ static int __init alloc_ivrs_mappings(u16 seg) struct ivrs_mappings *ivrs_mappings; unsigned int bdf; - BUG_ON( !ivrs_bdf_entries ); + BUG_ON(!ivrs_bdf_entries); if ( get_ivrs_mappings(seg) ) return 0; @@ -1154,25 +1135,23 @@ static int __init alloc_ivrs_mappings(u16 seg) return 0; } -static int __init amd_iommu_setup_device_table( - u16 seg, struct ivrs_mappings *ivrs_mappings) +static int __init +amd_iommu_setup_device_table(u16 seg, struct ivrs_mappings *ivrs_mappings) { unsigned int bdf; void *intr_tb, *dte; - BUG_ON( (ivrs_bdf_entries == 0) ); + BUG_ON((ivrs_bdf_entries == 0)); /* allocate 'device table' on a 4K boundary */ - device_table.alloc_size = PAGE_SIZE << - get_order_from_bytes( - PAGE_ALIGN(ivrs_bdf_entries * - IOMMU_DEV_TABLE_ENTRY_SIZE)); - device_table.entries = device_table.alloc_size / - IOMMU_DEV_TABLE_ENTRY_SIZE; - - device_table.buffer = allocate_buffer(device_table.alloc_size, - "Device Table"); - if ( device_table.buffer == NULL ) + device_table.alloc_size = + PAGE_SIZE << get_order_from_bytes( + PAGE_ALIGN(ivrs_bdf_entries * IOMMU_DEV_TABLE_ENTRY_SIZE)); + device_table.entries = device_table.alloc_size / IOMMU_DEV_TABLE_ENTRY_SIZE; + + device_table.buffer = + allocate_buffer(device_table.alloc_size, "Device Table"); + if ( device_table.buffer == NULL ) return -ENOMEM; /* Add device table entries */ @@ -1186,8 +1165,8 @@ static int __init amd_iommu_setup_device_table( dte = device_table.buffer + (bdf * IOMMU_DEV_TABLE_ENTRY_SIZE); iommu_dte_add_device_entry(dte, &ivrs_mappings[bdf]); - amd_iommu_set_intremap_table( - dte, (u64)virt_to_maddr(intr_tb), iommu_intremap); + amd_iommu_set_intremap_table(dte, (u64)virt_to_maddr(intr_tb), + iommu_intremap); } } @@ -1201,7 +1180,7 @@ static bool_t __init amd_sp5100_erratum28(void) u16 vendor_id, dev_id; u8 byte; - for (bus = 0; bus < 256; bus++) + for ( bus = 0; bus < 256; bus++ ) { id = pci_conf_read32(0, bus, 0x14, 0, PCI_VENDOR_ID); @@ -1209,14 +1188,16 @@ static bool_t __init amd_sp5100_erratum28(void) dev_id = (id >> 16) & 0xffff; /* SP5100 SMBus module sets Combined mode on */ - if (vendor_id != 0x1002 || dev_id != 0x4385) + if ( vendor_id != 0x1002 || dev_id != 0x4385 ) continue; byte = pci_conf_read8(0, bus, 0x14, 0, 0xad); if ( (byte >> 3) & 1 ) { - printk(XENLOG_WARNING "AMD-Vi: SP5100 erratum 28 detected, disabling IOMMU.\n" - "If possible, disable SATA Combined mode in BIOS or contact your vendor for BIOS update.\n"); + printk(XENLOG_WARNING + "AMD-Vi: SP5100 erratum 28 detected, disabling IOMMU.\n" + "If possible, disable SATA Combined mode in BIOS or contact " + "your vendor for BIOS update.\n"); return 1; } } @@ -1229,10 +1210,9 @@ int __init amd_iommu_init(void) struct amd_iommu *iommu; int rc = -ENODEV; - BUG_ON( !iommu_found() ); + BUG_ON(!iommu_found()); - if ( iommu_intremap && amd_iommu_perdev_intremap && - amd_sp5100_erratum28() ) + if ( iommu_intremap && amd_iommu_perdev_intremap && amd_sp5100_erratum28() ) goto error_out; /* We implies no IOMMU if ACPI indicates no MSI. */ @@ -1252,7 +1232,7 @@ int __init amd_iommu_init(void) ivrs_bdf_entries = rc; radix_tree_init(&ivrs_maps); - for_each_amd_iommu ( iommu ) + for_each_amd_iommu (iommu) { rc = alloc_ivrs_mappings(iommu->seg); if ( rc ) @@ -1283,7 +1263,7 @@ int __init amd_iommu_init(void) printk(XENLOG_DEBUG "AMD-Vi: Disabled HAP memory map sharing with IOMMU\n"); /* per iommu initialization */ - for_each_amd_iommu ( iommu ) + for_each_amd_iommu (iommu) { rc = amd_iommu_init_one(iommu); if ( rc ) @@ -1305,7 +1285,7 @@ static void disable_iommu(struct amd_iommu *iommu) if ( !iommu->enabled ) { - spin_unlock_irqrestore(&iommu->lock, flags); + spin_unlock_irqrestore(&iommu->lock, flags); return; } @@ -1324,20 +1304,18 @@ static void disable_iommu(struct amd_iommu *iommu) iommu->enabled = 0; spin_unlock_irqrestore(&iommu->lock, flags); - } static void invalidate_all_domain_pages(void) { struct domain *d; - for_each_domain( d ) + for_each_domain (d) amd_iommu_flush_all_pages(d); } -static int _invalidate_all_devices( - u16 seg, struct ivrs_mappings *ivrs_mappings) +static int _invalidate_all_devices(u16 seg, struct ivrs_mappings *ivrs_mappings) { - unsigned int bdf; + unsigned int bdf; u16 req_id; unsigned long flags; struct amd_iommu *iommu; @@ -1374,7 +1352,7 @@ void amd_iommu_crash_shutdown(void) { struct amd_iommu *iommu; - for_each_amd_iommu ( iommu ) + for_each_amd_iommu (iommu) disable_iommu(iommu); } @@ -1382,12 +1360,12 @@ void amd_iommu_resume(void) { struct amd_iommu *iommu; - for_each_amd_iommu ( iommu ) + for_each_amd_iommu (iommu) { - /* - * To make sure that iommus have not been touched - * before re-enablement - */ + /* + * To make sure that iommus have not been touched + * before re-enablement + */ disable_iommu(iommu); enable_iommu(iommu); } diff --git a/xen/drivers/passthrough/amd/iommu_intr.c b/xen/drivers/passthrough/amd/iommu_intr.c index dad2d1e5ab..d4d650ec33 100644 --- a/xen/drivers/passthrough/amd/iommu_intr.c +++ b/xen/drivers/passthrough/amd/iommu_intr.c @@ -23,7 +23,7 @@ #include #include -#define INTREMAP_TABLE_ORDER 1 +#define INTREMAP_TABLE_ORDER 1 #define INTREMAP_LENGTH 0xB #define INTREMAP_ENTRIES (1 << INTREMAP_LENGTH) @@ -40,7 +40,7 @@ unsigned int ioapic_id_to_index(unsigned int apic_id) { unsigned int idx; - for ( idx = 0 ; idx < nr_ioapic_sbdf; idx++ ) + for ( idx = 0; idx < nr_ioapic_sbdf; idx++ ) if ( ioapic_sbdf[idx].id == apic_id ) break; @@ -58,16 +58,16 @@ unsigned int __init get_next_ioapic_sbdf_index(void) return MAX_IO_APICS; } -static spinlock_t* get_intremap_lock(int seg, int req_id) +static spinlock_t *get_intremap_lock(int seg, int req_id) { - return (amd_iommu_perdev_intremap ? - &get_ivrs_mappings(seg)[req_id].intremap_lock: - &shared_intremap_lock); + return (amd_iommu_perdev_intremap + ? &get_ivrs_mappings(seg)[req_id].intremap_lock + : &shared_intremap_lock); } static int get_intremap_requestor_id(int seg, int bdf) { - ASSERT( bdf < ivrs_bdf_entries ); + ASSERT(bdf < ivrs_bdf_entries); return get_ivrs_mappings(seg)[bdf].dte_requestor_id; } @@ -76,7 +76,7 @@ static unsigned int alloc_intremap_entry(int seg, int bdf, unsigned int nr) unsigned long *inuse = get_ivrs_mappings(seg)[bdf].intremap_inuse; unsigned int slot = find_first_zero_bit(inuse, INTREMAP_ENTRIES); - for ( ; ; ) + for ( ;; ) { unsigned int end; @@ -105,7 +105,7 @@ static u32 *get_intremap_entry(int seg, int bdf, int offset) { u32 *table = get_ivrs_mappings(seg)[bdf].intremap_table; - ASSERT( (table != NULL) && (offset < INTREMAP_ENTRIES) ); + ASSERT((table != NULL) && (offset < INTREMAP_ENTRIES)); return table + offset; } @@ -118,30 +118,25 @@ static void free_intremap_entry(int seg, int bdf, int offset) __clear_bit(offset, get_ivrs_mappings(seg)[bdf].intremap_inuse); } -static void update_intremap_entry(u32* entry, u8 vector, u8 int_type, - u8 dest_mode, u8 dest) +static void update_intremap_entry(u32 *entry, u8 vector, u8 int_type, + u8 dest_mode, u8 dest) { - set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0, - INT_REMAP_ENTRY_REMAPEN_MASK, - INT_REMAP_ENTRY_REMAPEN_SHIFT, entry); + set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0, INT_REMAP_ENTRY_REMAPEN_MASK, + INT_REMAP_ENTRY_REMAPEN_SHIFT, entry); set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, *entry, - INT_REMAP_ENTRY_SUPIOPF_MASK, - INT_REMAP_ENTRY_SUPIOPF_SHIFT, entry); - set_field_in_reg_u32(int_type, *entry, - INT_REMAP_ENTRY_INTTYPE_MASK, - INT_REMAP_ENTRY_INTTYPE_SHIFT, entry); + INT_REMAP_ENTRY_SUPIOPF_MASK, + INT_REMAP_ENTRY_SUPIOPF_SHIFT, entry); + set_field_in_reg_u32(int_type, *entry, INT_REMAP_ENTRY_INTTYPE_MASK, + INT_REMAP_ENTRY_INTTYPE_SHIFT, entry); set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, *entry, - INT_REMAP_ENTRY_REQEOI_MASK, - INT_REMAP_ENTRY_REQEOI_SHIFT, entry); - set_field_in_reg_u32((u32)dest_mode, *entry, - INT_REMAP_ENTRY_DM_MASK, - INT_REMAP_ENTRY_DM_SHIFT, entry); - set_field_in_reg_u32((u32)dest, *entry, - INT_REMAP_ENTRY_DEST_MAST, - INT_REMAP_ENTRY_DEST_SHIFT, entry); - set_field_in_reg_u32((u32)vector, *entry, - INT_REMAP_ENTRY_VECTOR_MASK, - INT_REMAP_ENTRY_VECTOR_SHIFT, entry); + INT_REMAP_ENTRY_REQEOI_MASK, + INT_REMAP_ENTRY_REQEOI_SHIFT, entry); + set_field_in_reg_u32((u32)dest_mode, *entry, INT_REMAP_ENTRY_DM_MASK, + INT_REMAP_ENTRY_DM_SHIFT, entry); + set_field_in_reg_u32((u32)dest, *entry, INT_REMAP_ENTRY_DEST_MAST, + INT_REMAP_ENTRY_DEST_SHIFT, entry); + set_field_in_reg_u32((u32)vector, *entry, INT_REMAP_ENTRY_VECTOR_MASK, + INT_REMAP_ENTRY_VECTOR_SHIFT, entry); } static inline int get_rte_index(const struct IO_APIC_route_entry *rte) @@ -155,15 +150,12 @@ static inline void set_rte_index(struct IO_APIC_route_entry *rte, int offset) rte->delivery_mode = offset >> 8; } -static int update_intremap_entry_from_ioapic( - int bdf, - struct amd_iommu *iommu, - struct IO_APIC_route_entry *rte, - bool_t lo_update, - u16 *index) +static int update_intremap_entry_from_ioapic(int bdf, struct amd_iommu *iommu, + struct IO_APIC_route_entry *rte, + bool_t lo_update, u16 *index) { unsigned long flags; - u32* entry; + u32 *entry; u8 delivery_mode, dest, vector, dest_mode; int req_id; spinlock_t *lock; @@ -201,12 +193,11 @@ static int update_intremap_entry_from_ioapic( * so need to recover vector and delivery mode from IRTE. */ ASSERT(get_rte_index(rte) == offset); - vector = get_field_from_reg_u32(*entry, - INT_REMAP_ENTRY_VECTOR_MASK, + vector = get_field_from_reg_u32(*entry, INT_REMAP_ENTRY_VECTOR_MASK, INT_REMAP_ENTRY_VECTOR_SHIFT); - delivery_mode = get_field_from_reg_u32(*entry, - INT_REMAP_ENTRY_INTTYPE_MASK, - INT_REMAP_ENTRY_INTTYPE_SHIFT); + delivery_mode = + get_field_from_reg_u32(*entry, INT_REMAP_ENTRY_INTTYPE_MASK, + INT_REMAP_ENTRY_INTTYPE_SHIFT); } update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest); @@ -228,7 +219,7 @@ int __init amd_iommu_setup_ioapic_remapping(void) { struct IO_APIC_route_entry rte; unsigned long flags; - u32* entry; + u32 *entry; int apic, pin; u8 delivery_mode, dest, vector, dest_mode; u16 seg, bdf, req_id; @@ -258,7 +249,8 @@ int __init amd_iommu_setup_ioapic_remapping(void) if ( !iommu ) { AMD_IOMMU_DEBUG("Fail to find iommu for ioapic " - "device id = %04x:%04x\n", seg, bdf); + "device id = %04x:%04x\n", + seg, bdf); continue; } @@ -274,8 +266,8 @@ int __init amd_iommu_setup_ioapic_remapping(void) offset = alloc_intremap_entry(seg, req_id, 1); BUG_ON(offset >= INTREMAP_ENTRIES); entry = get_intremap_entry(iommu->seg, req_id, offset); - update_intremap_entry(entry, vector, - delivery_mode, dest_mode, dest); + update_intremap_entry(entry, vector, delivery_mode, dest_mode, + dest); spin_unlock_irqrestore(lock, flags); set_rte_index(&rte, offset); @@ -297,11 +289,11 @@ int __init amd_iommu_setup_ioapic_remapping(void) return 0; } -void amd_iommu_ioapic_update_ire( - unsigned int apic, unsigned int reg, unsigned int value) +void amd_iommu_ioapic_update_ire(unsigned int apic, unsigned int reg, + unsigned int value) { - struct IO_APIC_route_entry old_rte = { 0 }; - struct IO_APIC_route_entry new_rte = { 0 }; + struct IO_APIC_route_entry old_rte = {0}; + struct IO_APIC_route_entry new_rte = {0}; unsigned int rte_lo = (reg & 1) ? reg - 1 : reg; unsigned int pin = (reg - 0x10) / 2; int saved_mask, seg, bdf, rc; @@ -325,13 +317,14 @@ void amd_iommu_ioapic_update_ire( if ( !iommu ) { AMD_IOMMU_DEBUG("Fail to find iommu for ioapic device id =" - " %04x:%04x\n", seg, bdf); + " %04x:%04x\n", + seg, bdf); __io_apic_write(apic, reg, value); return; } /* save io-apic rte lower 32 bits */ - *((u32 *)&old_rte) = __io_apic_read(apic, rte_lo); + *((u32 *)&old_rte) = __io_apic_read(apic, rte_lo); saved_mask = old_rte.mask; if ( reg == rte_lo ) @@ -346,8 +339,7 @@ void amd_iommu_ioapic_update_ire( *(((u32 *)&new_rte) + 1) = value; } - if ( new_rte.mask && - ioapic_sbdf[idx].pin_2_idx[pin] >= INTREMAP_ENTRIES ) + if ( new_rte.mask && ioapic_sbdf[idx].pin_2_idx[pin] >= INTREMAP_ENTRIES ) { ASSERT(saved_mask); __io_apic_write(apic, reg, value); @@ -362,9 +354,8 @@ void amd_iommu_ioapic_update_ire( } /* Update interrupt remapping entry */ - rc = update_intremap_entry_from_ioapic( - bdf, iommu, &new_rte, reg == rte_lo, - &ioapic_sbdf[idx].pin_2_idx[pin]); + rc = update_intremap_entry_from_ioapic(bdf, iommu, &new_rte, reg == rte_lo, + &ioapic_sbdf[idx].pin_2_idx[pin]); __io_apic_write(apic, reg, ((u32 *)&new_rte)[reg != rte_lo]); @@ -388,8 +379,7 @@ void amd_iommu_ioapic_update_ire( } } -unsigned int amd_iommu_read_ioapic_from_ire( - unsigned int apic, unsigned int reg) +unsigned int amd_iommu_read_ioapic_from_ire(unsigned int apic, unsigned int reg) { unsigned int idx; unsigned int offset; @@ -411,23 +401,23 @@ unsigned int amd_iommu_read_ioapic_from_ire( ASSERT(offset == (val & (INTREMAP_ENTRIES - 1))); val &= ~(INTREMAP_ENTRIES - 1); - val |= get_field_from_reg_u32(*entry, - INT_REMAP_ENTRY_INTTYPE_MASK, - INT_REMAP_ENTRY_INTTYPE_SHIFT) << 8; - val |= get_field_from_reg_u32(*entry, - INT_REMAP_ENTRY_VECTOR_MASK, + val |= get_field_from_reg_u32(*entry, INT_REMAP_ENTRY_INTTYPE_MASK, + INT_REMAP_ENTRY_INTTYPE_SHIFT) + << 8; + val |= get_field_from_reg_u32(*entry, INT_REMAP_ENTRY_VECTOR_MASK, INT_REMAP_ENTRY_VECTOR_SHIFT); } return val; } -static int update_intremap_entry_from_msi_msg( - struct amd_iommu *iommu, u16 bdf, unsigned int nr, - int *remap_index, const struct msi_msg *msg, u32 *data) +static int update_intremap_entry_from_msi_msg(struct amd_iommu *iommu, u16 bdf, + unsigned int nr, int *remap_index, + const struct msi_msg *msg, + u32 *data) { unsigned long flags; - u32* entry; + u32 *entry; u16 req_id, alias_id; u8 delivery_mode, dest, vector, dest_mode; spinlock_t *lock; @@ -479,7 +469,7 @@ static int update_intremap_entry_from_msi_msg( * devices. */ - if ( ( req_id != alias_id ) && + if ( (req_id != alias_id) && get_ivrs_mappings(iommu->seg)[alias_id].intremap_table != NULL ) { BUG_ON(get_ivrs_mappings(iommu->seg)[req_id].intremap_table != @@ -503,7 +493,7 @@ static struct amd_iommu *_find_iommu_for_device(int seg, int bdf) { struct amd_iommu *iommu; - list_for_each_entry ( iommu, &amd_iommu_head, list ) + list_for_each_entry (iommu, &amd_iommu_head, list) if ( iommu->seg == seg && iommu->bdf == bdf ) return NULL; @@ -511,13 +501,12 @@ static struct amd_iommu *_find_iommu_for_device(int seg, int bdf) if ( iommu ) return iommu; - AMD_IOMMU_DEBUG("No IOMMU for MSI dev = %04x:%02x:%02x.%u\n", - seg, PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf)); + AMD_IOMMU_DEBUG("No IOMMU for MSI dev = %04x:%02x:%02x.%u\n", seg, + PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf)); return ERR_PTR(-EINVAL); } -int amd_iommu_msi_msg_update_ire( - struct msi_desc *msi_desc, struct msi_msg *msg) +int amd_iommu_msi_msg_update_ire(struct msi_desc *msi_desc, struct msi_msg *msg) { struct pci_dev *pdev = msi_desc->dev; int bdf, seg, rc; @@ -538,9 +527,8 @@ int amd_iommu_msi_msg_update_ire( if ( msi_desc->remap_index >= 0 && !msg ) { do { - update_intremap_entry_from_msi_msg(iommu, bdf, nr, - &msi_desc->remap_index, - NULL, NULL); + update_intremap_entry_from_msi_msg( + iommu, bdf, nr, &msi_desc->remap_index, NULL, NULL); if ( !pdev || !pdev->phantom_stride ) break; bdf += pdev->phantom_stride; @@ -556,9 +544,8 @@ int amd_iommu_msi_msg_update_ire( return 0; do { - rc = update_intremap_entry_from_msi_msg(iommu, bdf, nr, - &msi_desc->remap_index, - msg, &data); + rc = update_intremap_entry_from_msi_msg( + iommu, bdf, nr, &msi_desc->remap_index, msg, &data); if ( rc || !pdev || !pdev->phantom_stride ) break; bdf += pdev->phantom_stride; @@ -574,8 +561,7 @@ int amd_iommu_msi_msg_update_ire( return rc; } -void amd_iommu_read_msi_from_ire( - struct msi_desc *msi_desc, struct msi_msg *msg) +void amd_iommu_read_msi_from_ire(struct msi_desc *msi_desc, struct msi_msg *msg) { unsigned int offset = msg->data & (INTREMAP_ENTRIES - 1); const struct pci_dev *pdev = msi_desc->dev; @@ -597,16 +583,15 @@ void amd_iommu_read_msi_from_ire( } msg->data &= ~(INTREMAP_ENTRIES - 1); - msg->data |= get_field_from_reg_u32(*entry, - INT_REMAP_ENTRY_INTTYPE_MASK, - INT_REMAP_ENTRY_INTTYPE_SHIFT) << 8; - msg->data |= get_field_from_reg_u32(*entry, - INT_REMAP_ENTRY_VECTOR_MASK, + msg->data |= get_field_from_reg_u32(*entry, INT_REMAP_ENTRY_INTTYPE_MASK, + INT_REMAP_ENTRY_INTTYPE_SHIFT) + << 8; + msg->data |= get_field_from_reg_u32(*entry, INT_REMAP_ENTRY_VECTOR_MASK, INT_REMAP_ENTRY_VECTOR_SHIFT); } -int __init amd_iommu_free_intremap_table( - u16 seg, struct ivrs_mappings *ivrs_mapping) +int __init amd_iommu_free_intremap_table(u16 seg, + struct ivrs_mappings *ivrs_mapping) { void *tb = ivrs_mapping->intremap_table; @@ -619,7 +604,7 @@ int __init amd_iommu_free_intremap_table( return 0; } -void* __init amd_iommu_alloc_intremap_table(unsigned long **inuse_map) +void *__init amd_iommu_alloc_intremap_table(unsigned long **inuse_map) { void *tb; tb = __alloc_amd_iommu_tables(INTREMAP_TABLE_ORDER); @@ -652,8 +637,8 @@ int __init amd_setup_hpet_msi(struct msi_desc *msi_desc) lock = get_intremap_lock(hpet_sbdf.seg, hpet_sbdf.bdf); spin_lock_irqsave(lock, flags); - msi_desc->remap_index = alloc_intremap_entry(hpet_sbdf.seg, - hpet_sbdf.bdf, 1); + msi_desc->remap_index = + alloc_intremap_entry(hpet_sbdf.seg, hpet_sbdf.bdf, 1); if ( msi_desc->remap_index >= INTREMAP_ENTRIES ) { msi_desc->remap_index = -1; diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c index 67329b0c95..e07e50da6e 100644 --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -46,8 +46,9 @@ static unsigned int clear_iommu_pte_present(unsigned long l1_mfn, pte = (table + pfn_to_pde_idx(dfn, 1)); flush_flags = get_field_from_reg_u32(*pte, IOMMU_PTE_PRESENT_MASK, - IOMMU_PTE_PRESENT_SHIFT) ? - IOMMU_FLUSHF_modified : 0; + IOMMU_PTE_PRESENT_SHIFT) + ? IOMMU_FLUSHF_modified + : 0; *pte = 0; unmap_domain_page(table); @@ -55,8 +56,7 @@ static unsigned int clear_iommu_pte_present(unsigned long l1_mfn, return flush_flags; } -static unsigned int set_iommu_pde_present(uint32_t *pde, - unsigned long next_mfn, +static unsigned int set_iommu_pde_present(uint32_t *pde, unsigned long next_mfn, unsigned int next_level, bool iw, bool ir) { @@ -75,24 +75,21 @@ static unsigned int set_iommu_pde_present(uint32_t *pde, unsigned int old_level; uint64_t maddr_old; - addr_hi = get_field_from_reg_u32(pde[1], - IOMMU_PTE_ADDR_HIGH_MASK, + addr_hi = get_field_from_reg_u32(pde[1], IOMMU_PTE_ADDR_HIGH_MASK, IOMMU_PTE_ADDR_HIGH_SHIFT); - addr_lo = get_field_from_reg_u32(pde[0], - IOMMU_PTE_ADDR_LOW_MASK, + addr_lo = get_field_from_reg_u32(pde[0], IOMMU_PTE_ADDR_LOW_MASK, IOMMU_PTE_ADDR_LOW_SHIFT); - old_level = get_field_from_reg_u32(pde[0], - IOMMU_PDE_NEXT_LEVEL_MASK, + old_level = get_field_from_reg_u32(pde[0], IOMMU_PDE_NEXT_LEVEL_MASK, IOMMU_PDE_NEXT_LEVEL_SHIFT); - old_w = get_field_from_reg_u32(pde[1], - IOMMU_PTE_IO_WRITE_PERMISSION_MASK, - IOMMU_PTE_IO_WRITE_PERMISSION_SHIFT); - old_r = get_field_from_reg_u32(pde[1], - IOMMU_PTE_IO_READ_PERMISSION_MASK, - IOMMU_PTE_IO_READ_PERMISSION_SHIFT); + old_w = + get_field_from_reg_u32(pde[1], IOMMU_PTE_IO_WRITE_PERMISSION_MASK, + IOMMU_PTE_IO_WRITE_PERMISSION_SHIFT); + old_r = + get_field_from_reg_u32(pde[1], IOMMU_PTE_IO_READ_PERMISSION_MASK, + IOMMU_PTE_IO_READ_PERMISSION_SHIFT); - maddr_old = ((uint64_t)addr_hi << 32) | - ((uint64_t)addr_lo << PAGE_SHIFT); + maddr_old = + ((uint64_t)addr_hi << 32) | ((uint64_t)addr_lo << PAGE_SHIFT); if ( maddr_old != maddr_next || iw != old_w || ir != old_r || old_level != next_level ) @@ -103,33 +100,27 @@ static unsigned int set_iommu_pde_present(uint32_t *pde, addr_hi = maddr_next >> 32; /* enable read/write permissions,which will be enforced at the PTE */ - set_field_in_reg_u32(addr_hi, 0, - IOMMU_PDE_ADDR_HIGH_MASK, + set_field_in_reg_u32(addr_hi, 0, IOMMU_PDE_ADDR_HIGH_MASK, IOMMU_PDE_ADDR_HIGH_SHIFT, &entry); - set_field_in_reg_u32(iw, entry, - IOMMU_PDE_IO_WRITE_PERMISSION_MASK, + set_field_in_reg_u32(iw, entry, IOMMU_PDE_IO_WRITE_PERMISSION_MASK, IOMMU_PDE_IO_WRITE_PERMISSION_SHIFT, &entry); - set_field_in_reg_u32(ir, entry, - IOMMU_PDE_IO_READ_PERMISSION_MASK, + set_field_in_reg_u32(ir, entry, IOMMU_PDE_IO_READ_PERMISSION_MASK, IOMMU_PDE_IO_READ_PERMISSION_SHIFT, &entry); /* FC bit should be enabled in PTE, this helps to solve potential * issues with ATS devices */ if ( next_level == 0 ) - set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, - IOMMU_PTE_FC_MASK, IOMMU_PTE_FC_SHIFT, &entry); + set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, IOMMU_PTE_FC_MASK, + IOMMU_PTE_FC_SHIFT, &entry); pde[1] = entry; /* mark next level as 'present' */ - set_field_in_reg_u32(addr_lo >> PAGE_SHIFT, 0, - IOMMU_PDE_ADDR_LOW_MASK, + set_field_in_reg_u32(addr_lo >> PAGE_SHIFT, 0, IOMMU_PDE_ADDR_LOW_MASK, IOMMU_PDE_ADDR_LOW_SHIFT, &entry); - set_field_in_reg_u32(next_level, entry, - IOMMU_PDE_NEXT_LEVEL_MASK, + set_field_in_reg_u32(next_level, entry, IOMMU_PDE_NEXT_LEVEL_MASK, IOMMU_PDE_NEXT_LEVEL_SHIFT, &entry); - set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, - IOMMU_PDE_PRESENT_MASK, + set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, IOMMU_PDE_PRESENT_MASK, IOMMU_PDE_PRESENT_SHIFT, &entry); pde[0] = entry; @@ -138,8 +129,7 @@ static unsigned int set_iommu_pde_present(uint32_t *pde, static unsigned int set_iommu_pte_present(unsigned long pt_mfn, unsigned long dfn, - unsigned long next_mfn, - int pde_level, + unsigned long next_mfn, int pde_level, bool iw, bool ir) { uint64_t *table; @@ -161,16 +151,14 @@ void amd_iommu_set_root_page_table(uint32_t *dte, uint64_t root_ptr, uint8_t valid) { uint32_t addr_hi, addr_lo, entry; - set_field_in_reg_u32(domain_id, 0, - IOMMU_DEV_TABLE_DOMAIN_ID_MASK, + set_field_in_reg_u32(domain_id, 0, IOMMU_DEV_TABLE_DOMAIN_ID_MASK, IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry); dte[2] = entry; addr_lo = root_ptr & DMA_32BIT_MASK; addr_hi = root_ptr >> 32; - set_field_in_reg_u32(addr_hi, 0, - IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK, + set_field_in_reg_u32(addr_hi, 0, IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK, IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT, &entry); set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_MASK, @@ -183,15 +171,13 @@ void amd_iommu_set_root_page_table(uint32_t *dte, uint64_t root_ptr, set_field_in_reg_u32(addr_lo >> PAGE_SHIFT, 0, IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK, IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT, &entry); - set_field_in_reg_u32(paging_mode, entry, - IOMMU_DEV_TABLE_PAGING_MODE_MASK, + set_field_in_reg_u32(paging_mode, entry, IOMMU_DEV_TABLE_PAGING_MODE_MASK, IOMMU_DEV_TABLE_PAGING_MODE_SHIFT, &entry); set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK, IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry); - set_field_in_reg_u32(valid ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, entry, - IOMMU_DEV_TABLE_VALID_MASK, + set_field_in_reg_u32(valid ? IOMMU_CONTROL_ENABLED : IOMMU_CONTROL_DISABLED, + entry, IOMMU_DEV_TABLE_VALID_MASK, IOMMU_DEV_TABLE_VALID_SHIFT, &entry); dte[0] = entry; } @@ -201,14 +187,13 @@ void iommu_dte_set_iotlb(uint32_t *dte, uint8_t i) uint32_t entry; entry = dte[3]; - set_field_in_reg_u32(!!i, entry, - IOMMU_DEV_TABLE_IOTLB_SUPPORT_MASK, + set_field_in_reg_u32(!!i, entry, IOMMU_DEV_TABLE_IOTLB_SUPPORT_MASK, IOMMU_DEV_TABLE_IOTLB_SUPPORT_SHIFT, &entry); dte[3] = entry; } -void __init amd_iommu_set_intremap_table( - uint32_t *dte, uint64_t intremap_ptr, uint8_t int_valid) +void __init amd_iommu_set_intremap_table(uint32_t *dte, uint64_t intremap_ptr, + uint8_t int_valid) { uint32_t addr_hi, addr_lo, entry; @@ -220,8 +205,7 @@ void __init amd_iommu_set_intremap_table( IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_MASK, IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_SHIFT, &entry); /* Fixed and arbitrated interrupts remapepd */ - set_field_in_reg_u32(2, entry, - IOMMU_DEV_TABLE_INT_CONTROL_MASK, + set_field_in_reg_u32(2, entry, IOMMU_DEV_TABLE_INT_CONTROL_MASK, IOMMU_DEV_TABLE_INT_CONTROL_SHIFT, &entry); dte[5] = entry; @@ -229,17 +213,16 @@ void __init amd_iommu_set_intremap_table( IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_MASK, IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_SHIFT, &entry); /* 2048 entries */ - set_field_in_reg_u32(0xB, entry, - IOMMU_DEV_TABLE_INT_TABLE_LENGTH_MASK, + set_field_in_reg_u32(0xB, entry, IOMMU_DEV_TABLE_INT_TABLE_LENGTH_MASK, IOMMU_DEV_TABLE_INT_TABLE_LENGTH_SHIFT, &entry); /* unmapped interrupt results io page faults*/ set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry, IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_MASK, IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_SHIFT, &entry); - set_field_in_reg_u32(int_valid ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, entry, - IOMMU_DEV_TABLE_INT_VALID_MASK, + set_field_in_reg_u32(int_valid ? IOMMU_CONTROL_ENABLED + : IOMMU_CONTROL_DISABLED, + entry, IOMMU_DEV_TABLE_INT_VALID_MASK, IOMMU_DEV_TABLE_INT_VALID_SHIFT, &entry); dte[4] = entry; } @@ -258,16 +241,13 @@ void __init iommu_dte_add_device_entry(uint32_t *dte, dev_ex = ivrs_dev->dte_allow_exclusion; flags &= mask; - set_field_in_reg_u32(flags, 0, - IOMMU_DEV_TABLE_IVHD_FLAGS_MASK, + set_field_in_reg_u32(flags, 0, IOMMU_DEV_TABLE_IVHD_FLAGS_MASK, IOMMU_DEV_TABLE_IVHD_FLAGS_SHIFT, &entry); dte[5] = entry; - set_field_in_reg_u32(sys_mgt, 0, - IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK, + set_field_in_reg_u32(sys_mgt, 0, IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK, IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT, &entry); - set_field_in_reg_u32(dev_ex, entry, - IOMMU_DEV_TABLE_ALLOW_EXCLUSION_MASK, + set_field_in_reg_u32(dev_ex, entry, IOMMU_DEV_TABLE_ALLOW_EXCLUSION_MASK, IOMMU_DEV_TABLE_ALLOW_EXCLUSION_SHIFT, &entry); dte[3] = entry; } @@ -287,34 +267,28 @@ void iommu_dte_set_guest_cr3(uint32_t *dte, uint16_t dom_id, uint64_t gcr3, IOMMU_DEV_TABLE_IOTLB_SUPPORT_MASK, IOMMU_DEV_TABLE_IOTLB_SUPPORT_SHIFT, &entry); /* update gcr3 */ - set_field_in_reg_u32(gcr3_3, entry, - IOMMU_DEV_TABLE_GCR3_3_MASK, + set_field_in_reg_u32(gcr3_3, entry, IOMMU_DEV_TABLE_GCR3_3_MASK, IOMMU_DEV_TABLE_GCR3_3_SHIFT, &entry); dte[3] = entry; - set_field_in_reg_u32(dom_id, entry, - IOMMU_DEV_TABLE_DOMAIN_ID_MASK, + set_field_in_reg_u32(dom_id, entry, IOMMU_DEV_TABLE_DOMAIN_ID_MASK, IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry); /* update gcr3 */ entry = dte[2]; - set_field_in_reg_u32(gcr3_2, entry, - IOMMU_DEV_TABLE_GCR3_2_MASK, + set_field_in_reg_u32(gcr3_2, entry, IOMMU_DEV_TABLE_GCR3_2_MASK, IOMMU_DEV_TABLE_GCR3_2_SHIFT, &entry); dte[2] = entry; entry = dte[1]; /* Enable GV bit */ - set_field_in_reg_u32(!!gv, entry, - IOMMU_DEV_TABLE_GV_MASK, + set_field_in_reg_u32(!!gv, entry, IOMMU_DEV_TABLE_GV_MASK, IOMMU_DEV_TABLE_GV_SHIFT, &entry); /* 1 level guest cr3 table */ - set_field_in_reg_u32(glx, entry, - IOMMU_DEV_TABLE_GLX_MASK, + set_field_in_reg_u32(glx, entry, IOMMU_DEV_TABLE_GLX_MASK, IOMMU_DEV_TABLE_GLX_SHIFT, &entry); /* update gcr3 */ - set_field_in_reg_u32(gcr3_1, entry, - IOMMU_DEV_TABLE_GCR3_1_MASK, + set_field_in_reg_u32(gcr3_1, entry, IOMMU_DEV_TABLE_GCR3_1_MASK, IOMMU_DEV_TABLE_GCR3_1_SHIFT, &entry); dte[1] = entry; } @@ -325,16 +299,13 @@ uint64_t amd_iommu_get_address_from_pte(void *pte) uint32_t addr_lo, addr_hi; uint64_t ptr; - addr_lo = get_field_from_reg_u32(entry[0], - IOMMU_PTE_ADDR_LOW_MASK, + addr_lo = get_field_from_reg_u32(entry[0], IOMMU_PTE_ADDR_LOW_MASK, IOMMU_PTE_ADDR_LOW_SHIFT); - addr_hi = get_field_from_reg_u32(entry[1], - IOMMU_PTE_ADDR_HIGH_MASK, + addr_hi = get_field_from_reg_u32(entry[1], IOMMU_PTE_ADDR_HIGH_MASK, IOMMU_PTE_ADDR_HIGH_SHIFT); - ptr = ((uint64_t)addr_hi << 32) | - ((uint64_t)addr_lo << PAGE_SHIFT); + ptr = ((uint64_t)addr_hi << 32) | ((uint64_t)addr_lo << PAGE_SHIFT); return ptr; } @@ -346,7 +317,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned long dfn, unsigned long pt_mfn[]) { uint64_t *pde, *next_table_vaddr; - unsigned long next_table_mfn; + unsigned long next_table_mfn; unsigned int level; struct page_info *table; const struct domain_iommu *hd = dom_iommu(d); @@ -354,7 +325,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned long dfn, table = hd->arch.root_table; level = hd->arch.paging_mode; - BUG_ON( table == NULL || level < 1 || level > 6 ); + BUG_ON(table == NULL || level < 1 || level > 6); next_table_mfn = mfn_x(page_to_mfn(table)); @@ -377,15 +348,14 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned long dfn, /* Split super page frame into smaller pieces.*/ if ( iommu_is_pte_present((uint32_t *)pde) && - (iommu_next_level((uint32_t *)pde) == 0) && - next_table_mfn != 0 ) + (iommu_next_level((uint32_t *)pde) == 0) && next_table_mfn != 0 ) { int i; unsigned long mfn, pfn; unsigned int page_sz; page_sz = 1 << (PTE_PER_TABLE_SHIFT * (next_level - 1)); - pfn = dfn & ~((1 << (PTE_PER_TABLE_SHIFT * next_level)) - 1); + pfn = dfn & ~((1 << (PTE_PER_TABLE_SHIFT * next_level)) - 1); mfn = next_table_mfn; /* allocate lower level page table */ @@ -407,7 +377,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned long dfn, !!IOMMUF_writable, !!IOMMUF_readable); mfn += page_sz; pfn += page_sz; - } + } amd_iommu_flush_all_pages(d); } @@ -476,8 +446,7 @@ static int update_paging_mode(struct domain *d, unsigned long dfn) new_root = alloc_amd_iommu_pgtable(); if ( new_root == NULL ) { - AMD_IOMMU_DEBUG("%s Cannot allocate I/O page table\n", - __func__); + AMD_IOMMU_DEBUG("%s Cannot allocate I/O page table\n", __func__); return -ENOMEM; } @@ -498,10 +467,11 @@ static int update_paging_mode(struct domain *d, unsigned long dfn) if ( !pcidevs_locked() ) AMD_IOMMU_DEBUG("%s Try to access pdev_list " - "without aquiring pcidevs_lock.\n", __func__); + "without aquiring pcidevs_lock.\n", + __func__); /* Update device table entries using new root table and paging mode */ - for_each_pdev( d, pdev ) + for_each_pdev (d, pdev) { if ( pdev->type == DEV_TYPE_PCI_HOST_BRIDGE ) continue; @@ -521,10 +491,10 @@ static int update_paging_mode(struct domain *d, unsigned long dfn) (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE); /* valid = 0 only works for dom0 passthrough mode */ - amd_iommu_set_root_page_table((uint32_t *)device_entry, - page_to_maddr(hd->arch.root_table), - d->domain_id, - hd->arch.paging_mode, 1); + amd_iommu_set_root_page_table( + (uint32_t *)device_entry, + page_to_maddr(hd->arch.root_table), d->domain_id, + hd->arch.paging_mode, 1); amd_iommu_flush_device(iommu, req_id); bdf += pdev->phantom_stride; @@ -557,7 +527,7 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn, if ( rc ) { spin_unlock(&hd->arch.mapping_lock); - AMD_IOMMU_DEBUG("Root table alloc failed, dfn = %"PRI_dfn"\n", + AMD_IOMMU_DEBUG("Root table alloc failed, dfn = %" PRI_dfn "\n", dfn_x(dfn)); domain_crash(d); return rc; @@ -570,7 +540,7 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn, if ( update_paging_mode(d, dfn_x(dfn)) ) { spin_unlock(&hd->arch.mapping_lock); - AMD_IOMMU_DEBUG("Update page mode failed dfn = %"PRI_dfn"\n", + AMD_IOMMU_DEBUG("Update page mode failed dfn = %" PRI_dfn "\n", dfn_x(dfn)); domain_crash(d); return -EFAULT; @@ -580,15 +550,15 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn, if ( iommu_pde_from_dfn(d, dfn_x(dfn), pt_mfn) || (pt_mfn[1] == 0) ) { spin_unlock(&hd->arch.mapping_lock); - AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %"PRI_dfn"\n", + AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %" PRI_dfn "\n", dfn_x(dfn)); domain_crash(d); return -EFAULT; } /* Install 4k mapping */ - *flush_flags |= set_iommu_pte_present(pt_mfn[1], dfn_x(dfn), mfn_x(mfn), - 1, (flags & IOMMUF_writable), + *flush_flags |= set_iommu_pte_present(pt_mfn[1], dfn_x(dfn), mfn_x(mfn), 1, + (flags & IOMMUF_writable), (flags & IOMMUF_readable)); spin_unlock(&hd->arch.mapping_lock); @@ -596,8 +566,7 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn, return 0; } -int amd_iommu_unmap_page(struct domain *d, dfn_t dfn, - unsigned int *flush_flags) +int amd_iommu_unmap_page(struct domain *d, dfn_t dfn, unsigned int *flush_flags) { unsigned long pt_mfn[7]; struct domain_iommu *hd = dom_iommu(d); @@ -624,7 +593,7 @@ int amd_iommu_unmap_page(struct domain *d, dfn_t dfn, if ( rc ) { spin_unlock(&hd->arch.mapping_lock); - AMD_IOMMU_DEBUG("Update page mode failed dfn = %"PRI_dfn"\n", + AMD_IOMMU_DEBUG("Update page mode failed dfn = %" PRI_dfn "\n", dfn_x(dfn)); if ( rc != -EADDRNOTAVAIL ) domain_crash(d); @@ -635,7 +604,7 @@ int amd_iommu_unmap_page(struct domain *d, dfn_t dfn, if ( iommu_pde_from_dfn(d, dfn_x(dfn), pt_mfn) || (pt_mfn[1] == 0) ) { spin_unlock(&hd->arch.mapping_lock); - AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %"PRI_dfn"\n", + AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %" PRI_dfn "\n", dfn_x(dfn)); domain_crash(d); return -EFAULT; @@ -706,8 +675,7 @@ int amd_iommu_flush_iotlb_all(struct domain *d) return 0; } -int amd_iommu_reserve_domain_unity_map(struct domain *domain, - paddr_t phys_addr, +int amd_iommu_reserve_domain_unity_map(struct domain *domain, paddr_t phys_addr, unsigned long size, int iw, int ir) { unsigned long npages, i; @@ -732,9 +700,8 @@ int amd_iommu_reserve_domain_unity_map(struct domain *domain, } /* Use while-break to avoid compiler warning */ - while ( flush_flags && - amd_iommu_flush_iotlb_pages(domain, _dfn(gfn), - npages, flush_flags) ) + while ( flush_flags && amd_iommu_flush_iotlb_pages(domain, _dfn(gfn), + npages, flush_flags) ) break; return rt; diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c index 1dc02602f1..d71d31160a 100644 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -52,7 +52,7 @@ struct amd_iommu *find_iommu_for_device(int seg, int bdf) ivrs_mappings[bdf] = tmp; printk(XENLOG_WARNING "%04x:%02x:%02x.%u not found in ACPI tables;" - " using same IOMMU as function 0\n", + " using same IOMMU as function 0\n", seg, PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf)); /* write iommu field last */ @@ -76,7 +76,7 @@ int get_dma_requestor_id(u16 seg, u16 bdf) struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg); int req_id; - BUG_ON ( bdf >= ivrs_bdf_entries ); + BUG_ON(bdf >= ivrs_bdf_entries); req_id = ivrs_mappings[bdf].dte_requestor_id; if ( (ivrs_mappings[bdf].intremap_table != NULL) && (ivrs_mappings[req_id].intremap_table != NULL) ) @@ -87,8 +87,7 @@ int get_dma_requestor_id(u16 seg, u16 bdf) static int is_translation_valid(u32 *entry) { - return (get_field_from_reg_u32(entry[0], - IOMMU_DEV_TABLE_VALID_MASK, + return (get_field_from_reg_u32(entry[0], IOMMU_DEV_TABLE_VALID_MASK, IOMMU_DEV_TABLE_VALID_SHIFT) && get_field_from_reg_u32(entry[0], IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK, @@ -109,9 +108,9 @@ static void disable_translation(u32 *dte) dte[0] = entry; } -static void amd_iommu_setup_domain_device( - struct domain *domain, struct amd_iommu *iommu, - u8 devfn, struct pci_dev *pdev) +static void amd_iommu_setup_domain_device(struct domain *domain, + struct amd_iommu *iommu, u8 devfn, + struct pci_dev *pdev) { void *dte; unsigned long flags; @@ -120,8 +119,8 @@ static void amd_iommu_setup_domain_device( u8 bus = pdev->bus; const struct domain_iommu *hd = dom_iommu(domain); - BUG_ON( !hd->arch.root_table || !hd->arch.paging_mode || - !iommu->dev_table.buffer ); + BUG_ON(!hd->arch.root_table || !hd->arch.paging_mode || + !iommu->dev_table.buffer); if ( iommu_hwdom_passthrough && is_hardware_domain(domain) ) valid = 0; @@ -149,10 +148,9 @@ static void amd_iommu_setup_domain_device( amd_iommu_flush_device(iommu, req_id); AMD_IOMMU_DEBUG("Setup I/O page table: device id = %#x, type = %#x, " - "root table = %#"PRIx64", " + "root table = %#" PRIx64 ", " "domain = %d, paging mode = %d\n", - req_id, pdev->type, - page_to_maddr(hd->arch.root_table), + req_id, pdev->type, page_to_maddr(hd->arch.root_table), domain->domain_id, hd->arch.paging_mode); } @@ -177,7 +175,7 @@ int __init amd_iov_detect(void) if ( !iommu_enable && !iommu_intremap ) return 0; - if ( (amd_iommu_detect_acpi() !=0) || (iommu_found() == 0) ) + if ( (amd_iommu_detect_acpi() != 0) || (iommu_found() == 0) ) { printk("AMD-Vi: IOMMU not found!\n"); iommu_intremap = 0; @@ -195,7 +193,8 @@ int __init amd_iov_detect(void) init_done = 1; if ( !amd_iommu_perdev_intremap ) - printk(XENLOG_WARNING "AMD-Vi: Using global interrupt remap table is not recommended (see XSA-36)!\n"); + printk(XENLOG_WARNING "AMD-Vi: Using global interrupt remap table is " + "not recommended (see XSA-36)!\n"); return scan_pci_devices(); } @@ -226,7 +225,7 @@ static int get_paging_mode(unsigned long entries) { int level = 1; - BUG_ON( !entries ); + BUG_ON(!entries); while ( entries > PTE_PER_TABLE_SIZE ) { @@ -257,7 +256,7 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d) if ( allocate_domain_resources(dom_iommu(d)) ) BUG(); - for_each_amd_iommu ( iommu ) + for_each_amd_iommu (iommu) if ( iomem_deny_access(d, PFN_DOWN(iommu->mmio_base_phys), PFN_DOWN(iommu->mmio_base_phys + IOMMU_MMIO_REGION_LENGTH - 1)) ) @@ -269,15 +268,15 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d) } void amd_iommu_disable_domain_device(struct domain *domain, - struct amd_iommu *iommu, - u8 devfn, struct pci_dev *pdev) + struct amd_iommu *iommu, u8 devfn, + struct pci_dev *pdev) { void *dte; unsigned long flags; int req_id; u8 bus = pdev->bus; - BUG_ON ( iommu->dev_table.buffer == NULL ); + BUG_ON(iommu->dev_table.buffer == NULL); req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(bus, devfn)); dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE); @@ -294,15 +293,14 @@ void amd_iommu_disable_domain_device(struct domain *domain, AMD_IOMMU_DEBUG("Disable: device id = %#x, " "domain = %d, paging mode = %d\n", - req_id, domain->domain_id, + req_id, domain->domain_id, dom_iommu(domain)->arch.paging_mode); } spin_unlock_irqrestore(&iommu->lock, flags); ASSERT(pcidevs_locked()); - if ( devfn == pdev->devfn && - pci_ats_device(iommu->seg, bus, devfn) && + if ( devfn == pdev->devfn && pci_ats_device(iommu->seg, bus, devfn) && pci_ats_enabled(iommu->seg, bus, devfn) ) disable_ats_device(pdev); } @@ -346,8 +344,7 @@ static int reassign_device(struct domain *source, struct domain *target, } static int amd_iommu_assign_device(struct domain *d, u8 devfn, - struct pci_dev *pdev, - u32 flag) + struct pci_dev *pdev, u32 flag) { struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->seg); int bdf = PCI_BDF2(pdev->bus, devfn); @@ -356,8 +353,7 @@ static int amd_iommu_assign_device(struct domain *d, u8 devfn, if ( ivrs_mappings[req_id].unity_map_enable ) { amd_iommu_reserve_domain_unity_map( - d, - ivrs_mappings[req_id].addr_range_start, + d, ivrs_mappings[req_id].addr_range_start, ivrs_mappings[req_id].addr_range_length, ivrs_mappings[req_id].write_permission, ivrs_mappings[req_id].read_permission); @@ -401,7 +397,7 @@ static void deallocate_page_table(struct page_info *pg) { /* We do not support skip levels yet */ ASSERT(next_level == level - 1); - deallocate_next_page_table(maddr_to_page(next_table_maddr), + deallocate_next_page_table(maddr_to_page(next_table_maddr), next_level); } } @@ -426,7 +422,6 @@ static void deallocate_iommu_page_tables(struct domain *d) spin_unlock(&hd->arch.mapping_lock); } - static void amd_iommu_domain_destroy(struct domain *d) { deallocate_iommu_page_tables(d); @@ -455,9 +450,10 @@ static int amd_iommu_add_device(u8 devfn, struct pci_dev *pdev) return 0; } - AMD_IOMMU_DEBUG("No iommu for %04x:%02x:%02x.%u; cannot be handed to d%d\n", - pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn), - pdev->domain->domain_id); + AMD_IOMMU_DEBUG( + "No iommu for %04x:%02x:%02x.%u; cannot be handed to d%d\n", + pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn), + pdev->domain->domain_id); return -ENODEV; } @@ -496,7 +492,7 @@ static int amd_iommu_group_id(u16 seg, u8 bus, u8 devfn) #include -static void amd_dump_p2m_table_level(struct page_info* pg, int level, +static void amd_dump_p2m_table_level(struct page_info *pg, int level, paddr_t gpa, int indent) { paddr_t address; @@ -511,8 +507,8 @@ static void amd_dump_p2m_table_level(struct page_info* pg, int level, table_vaddr = __map_domain_page(pg); if ( table_vaddr == NULL ) { - printk("Failed to map IOMMU domain page %"PRIpaddr"\n", - page_to_maddr(pg)); + printk("Failed to map IOMMU domain page %" PRIpaddr "\n", + page_to_maddr(pg)); return; } @@ -525,15 +521,13 @@ static void amd_dump_p2m_table_level(struct page_info* pg, int level, next_table_maddr = amd_iommu_get_address_from_pte(pde); entry = pde; - present = get_field_from_reg_u32(entry[0], - IOMMU_PDE_PRESENT_MASK, + present = get_field_from_reg_u32(entry[0], IOMMU_PDE_PRESENT_MASK, IOMMU_PDE_PRESENT_SHIFT); if ( !present ) continue; - next_level = get_field_from_reg_u32(entry[0], - IOMMU_PDE_NEXT_LEVEL_MASK, + next_level = get_field_from_reg_u32(entry[0], IOMMU_PDE_NEXT_LEVEL_MASK, IOMMU_PDE_NEXT_LEVEL_SHIFT); if ( next_level && (next_level != (level - 1)) ) @@ -546,12 +540,10 @@ static void amd_dump_p2m_table_level(struct page_info* pg, int level, address = gpa + amd_offset_level_address(index, level); if ( next_level >= 1 ) - amd_dump_p2m_table_level( - maddr_to_page(next_table_maddr), next_level, - address, indent + 1); + amd_dump_p2m_table_level(maddr_to_page(next_table_maddr), + next_level, address, indent + 1); else - printk("%*sdfn: %08lx mfn: %08lx\n", - indent, "", + printk("%*sdfn: %08lx mfn: %08lx\n", indent, "", (unsigned long)PFN_DOWN(address), (unsigned long)PFN_DOWN(next_table_maddr)); } @@ -575,7 +567,7 @@ static const struct iommu_ops __initconstrel amd_iommu_ops = { .hwdom_init = amd_iommu_hwdom_init, .add_device = amd_iommu_add_device, .remove_device = amd_iommu_remove_device, - .assign_device = amd_iommu_assign_device, + .assign_device = amd_iommu_assign_device, .teardown = amd_iommu_domain_destroy, .map_page = amd_iommu_map_page, .unmap_page = amd_iommu_unmap_page, diff --git a/xen/drivers/passthrough/arm/io-pgtable-arm.c b/xen/drivers/passthrough/arm/io-pgtable-arm.c index ca69d094b5..ca4cde65d9 100644 --- a/xen/drivers/passthrough/arm/io-pgtable-arm.c +++ b/xen/drivers/passthrough/arm/io-pgtable-arm.c @@ -49,23 +49,24 @@ /***** Start of Xen specific code *****/ -#define IOMMU_READ (1 << 0) -#define IOMMU_WRITE (1 << 1) -#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ -#define IOMMU_NOEXEC (1 << 3) -#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ +#define IOMMU_READ (1 << 0) +#define IOMMU_WRITE (1 << 1) +#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ +#define IOMMU_NOEXEC (1 << 3) +#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ #define kfree xfree -#define kmalloc(size, flags) _xmalloc(size, sizeof(void *)) -#define kzalloc(size, flags) _xzalloc(size, sizeof(void *)) -#define devm_kzalloc(dev, size, flags) _xzalloc(size, sizeof(void *)) -#define kmalloc_array(size, n, flags) _xmalloc_array(size, sizeof(void *), n) - -typedef enum { - GFP_KERNEL, - GFP_ATOMIC, - __GFP_HIGHMEM, - __GFP_HIGH +#define kmalloc(size, flags) _xmalloc(size, sizeof(void *)) +#define kzalloc(size, flags) _xzalloc(size, sizeof(void *)) +#define devm_kzalloc(dev, size, flags) _xzalloc(size, sizeof(void *)) +#define kmalloc_array(size, n, flags) _xmalloc_array(size, sizeof(void *), n) + +typedef enum +{ + GFP_KERNEL, + GFP_ATOMIC, + __GFP_HIGHMEM, + __GFP_HIGH } gfp_t; #define __fls(x) (fls(x) - 1) @@ -78,186 +79,187 @@ typedef enum { * return error; */ #undef WARN_ON -#define WARN_ON(condition) ({ \ - int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) \ - WARN(); \ - unlikely(__ret_warn_on); \ -}) +#define WARN_ON(condition) \ + ({ \ + int __ret_warn_on = !!(condition); \ + if ( unlikely(__ret_warn_on) ) \ + WARN(); \ + unlikely(__ret_warn_on); \ + }) /***** Start of Linux allocator code *****/ -#define ARM_LPAE_MAX_ADDR_BITS 48 -#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 -#define ARM_LPAE_MAX_LEVELS 4 +#define ARM_LPAE_MAX_ADDR_BITS 48 +#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 +#define ARM_LPAE_MAX_LEVELS 4 /* Struct accessors */ -#define io_pgtable_to_data(x) \ - container_of((x), struct arm_lpae_io_pgtable, iop) +#define io_pgtable_to_data(x) container_of((x), struct arm_lpae_io_pgtable, iop) -#define io_pgtable_ops_to_data(x) \ - io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) +#define io_pgtable_ops_to_data(x) \ + io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) /* * For consistency with the architecture, we always consider * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0 */ -#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels) +#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels) /* * Calculate the right shift amount to get to the portion describing level l * in a virtual address mapped by the pagetable in d. */ -#define ARM_LPAE_LVL_SHIFT(l,d) \ - ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ - * (d)->bits_per_level) + (d)->pg_shift) +#define ARM_LPAE_LVL_SHIFT(l, d) \ + ((((d)->levels - ((l)-ARM_LPAE_START_LVL(d) + 1)) * (d)->bits_per_level) + \ + (d)->pg_shift) -#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift) +#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift) -#define ARM_LPAE_PAGES_PER_PGD(d) \ - DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d)) +#define ARM_LPAE_PAGES_PER_PGD(d) \ + DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d)) /* * Calculate the index at level l used to map virtual address a using the * pagetable in d. */ -#define ARM_LPAE_PGD_IDX(l,d) \ - ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) +#define ARM_LPAE_PGD_IDX(l, d) \ + ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) -#define ARM_LPAE_LVL_IDX(a,l,d) \ - (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ - ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) +#define ARM_LPAE_LVL_IDX(a, l, d) \ + (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l, d)) & \ + ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l, d))) - 1)) /* Calculate the block/page mapping size at level l for pagetable in d. */ -#define ARM_LPAE_BLOCK_SIZE(l,d) \ - (1 << (ilog2(sizeof(arm_lpae_iopte)) + \ - ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) +#define ARM_LPAE_BLOCK_SIZE(l, d) \ + (1 << (ilog2(sizeof(arm_lpae_iopte)) + \ + ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) /* Page table bits */ -#define ARM_LPAE_PTE_TYPE_SHIFT 0 -#define ARM_LPAE_PTE_TYPE_MASK 0x3 - -#define ARM_LPAE_PTE_TYPE_BLOCK 1 -#define ARM_LPAE_PTE_TYPE_TABLE 3 -#define ARM_LPAE_PTE_TYPE_PAGE 3 - -#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) -#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) -#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) -#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) -#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) -#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) -#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) -#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) - -#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) +#define ARM_LPAE_PTE_TYPE_SHIFT 0 +#define ARM_LPAE_PTE_TYPE_MASK 0x3 + +#define ARM_LPAE_PTE_TYPE_BLOCK 1 +#define ARM_LPAE_PTE_TYPE_TABLE 3 +#define ARM_LPAE_PTE_TYPE_PAGE 3 + +#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) +#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) +#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) +#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) +#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) +#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) +#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) +#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) + +#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) /* Ignore the contiguous bit for block splitting */ -#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) -#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ - ARM_LPAE_PTE_ATTR_HI_MASK) +#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) +#define ARM_LPAE_PTE_ATTR_MASK \ + (ARM_LPAE_PTE_ATTR_LO_MASK | ARM_LPAE_PTE_ATTR_HI_MASK) /* Stage-1 PTE */ -#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) -#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) -#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 -#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) +#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) +#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) +#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 +#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) /* Stage-2 PTE */ -#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) -#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) -#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) -#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) -#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) -#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) +#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) +#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) +#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) +#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) +#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) +#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) /* Register bits */ -#define ARM_32_LPAE_TCR_EAE (1 << 31) -#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) - -#define ARM_LPAE_TCR_EPD1 (1 << 23) - -#define ARM_LPAE_TCR_TG0_4K (0 << 14) -#define ARM_LPAE_TCR_TG0_64K (1 << 14) -#define ARM_LPAE_TCR_TG0_16K (2 << 14) - -#define ARM_LPAE_TCR_SH0_SHIFT 12 -#define ARM_LPAE_TCR_SH0_MASK 0x3 -#define ARM_LPAE_TCR_SH_NS 0 -#define ARM_LPAE_TCR_SH_OS 2 -#define ARM_LPAE_TCR_SH_IS 3 - -#define ARM_LPAE_TCR_ORGN0_SHIFT 10 -#define ARM_LPAE_TCR_IRGN0_SHIFT 8 -#define ARM_LPAE_TCR_RGN_MASK 0x3 -#define ARM_LPAE_TCR_RGN_NC 0 -#define ARM_LPAE_TCR_RGN_WBWA 1 -#define ARM_LPAE_TCR_RGN_WT 2 -#define ARM_LPAE_TCR_RGN_WB 3 - -#define ARM_LPAE_TCR_SL0_SHIFT 6 -#define ARM_LPAE_TCR_SL0_MASK 0x3 - -#define ARM_LPAE_TCR_T0SZ_SHIFT 0 -#define ARM_LPAE_TCR_SZ_MASK 0xf - -#define ARM_LPAE_TCR_PS_SHIFT 16 -#define ARM_LPAE_TCR_PS_MASK 0x7 - -#define ARM_LPAE_TCR_IPS_SHIFT 32 -#define ARM_LPAE_TCR_IPS_MASK 0x7 - -#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL -#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL -#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL -#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL -#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL -#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL - -#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) -#define ARM_LPAE_MAIR_ATTR_MASK 0xff -#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 -#define ARM_LPAE_MAIR_ATTR_NC 0x44 -#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff -#define ARM_LPAE_MAIR_ATTR_IDX_NC 0 -#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 -#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 +#define ARM_32_LPAE_TCR_EAE (1 << 31) +#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) + +#define ARM_LPAE_TCR_EPD1 (1 << 23) + +#define ARM_LPAE_TCR_TG0_4K (0 << 14) +#define ARM_LPAE_TCR_TG0_64K (1 << 14) +#define ARM_LPAE_TCR_TG0_16K (2 << 14) + +#define ARM_LPAE_TCR_SH0_SHIFT 12 +#define ARM_LPAE_TCR_SH0_MASK 0x3 +#define ARM_LPAE_TCR_SH_NS 0 +#define ARM_LPAE_TCR_SH_OS 2 +#define ARM_LPAE_TCR_SH_IS 3 + +#define ARM_LPAE_TCR_ORGN0_SHIFT 10 +#define ARM_LPAE_TCR_IRGN0_SHIFT 8 +#define ARM_LPAE_TCR_RGN_MASK 0x3 +#define ARM_LPAE_TCR_RGN_NC 0 +#define ARM_LPAE_TCR_RGN_WBWA 1 +#define ARM_LPAE_TCR_RGN_WT 2 +#define ARM_LPAE_TCR_RGN_WB 3 + +#define ARM_LPAE_TCR_SL0_SHIFT 6 +#define ARM_LPAE_TCR_SL0_MASK 0x3 + +#define ARM_LPAE_TCR_T0SZ_SHIFT 0 +#define ARM_LPAE_TCR_SZ_MASK 0xf + +#define ARM_LPAE_TCR_PS_SHIFT 16 +#define ARM_LPAE_TCR_PS_MASK 0x7 + +#define ARM_LPAE_TCR_IPS_SHIFT 32 +#define ARM_LPAE_TCR_IPS_MASK 0x7 + +#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL +#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL +#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL +#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL +#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL +#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL + +#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) +#define ARM_LPAE_MAIR_ATTR_MASK 0xff +#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 +#define ARM_LPAE_MAIR_ATTR_NC 0x44 +#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff +#define ARM_LPAE_MAIR_ATTR_IDX_NC 0 +#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 +#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 /* Xen: __va is not suitable here use maddr_to_page instead. */ /* IOPTE accessors */ -#define iopte_deref(pte,d) \ - (maddr_to_page((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \ - & ~(ARM_LPAE_GRANULE(d) - 1ULL))) - -#define iopte_type(pte,l) \ - (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) +#define iopte_deref(pte, d) \ + (maddr_to_page((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) & \ + ~(ARM_LPAE_GRANULE(d) - 1ULL))) -#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) +#define iopte_type(pte, l) \ + (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) -#define iopte_leaf(pte,l) \ - (l == (ARM_LPAE_MAX_LEVELS - 1) ? \ - (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \ - (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK)) +#define iopte_prot(pte) ((pte)&ARM_LPAE_PTE_ATTR_MASK) -#define iopte_to_pfn(pte,d) \ - (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift) +#define iopte_leaf(pte, l) \ + (l == (ARM_LPAE_MAX_LEVELS - 1) \ + ? (iopte_type(pte, l) == ARM_LPAE_PTE_TYPE_PAGE) \ + : (iopte_type(pte, l) == ARM_LPAE_PTE_TYPE_BLOCK)) -#define pfn_to_iopte(pfn,d) \ - (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) +#define iopte_to_pfn(pte, d) \ + (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift) -struct arm_lpae_io_pgtable { - struct io_pgtable iop; +#define pfn_to_iopte(pfn, d) \ + (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) - int levels; - size_t pgd_size; - unsigned long pg_shift; - unsigned long bits_per_level; - - /* Xen: We deal with domain pages. */ - struct page_info *pgd; - /* Xen: To indicate that deallocation sequence is in progress. */ - bool_t cleanup; - /* Xen: To count allocated domain pages. */ - unsigned int page_count; +struct arm_lpae_io_pgtable +{ + struct io_pgtable iop; + + int levels; + size_t pgd_size; + unsigned long pg_shift; + unsigned long bits_per_level; + + /* Xen: We deal with domain pages. */ + struct page_info *pgd; + /* Xen: To indicate that deallocation sequence is in progress. */ + bool_t cleanup; + /* Xen: To count allocated domain pages. */ + unsigned int page_count; }; typedef u64 arm_lpae_iopte; @@ -265,8 +267,8 @@ typedef u64 arm_lpae_iopte; /* * Xen: Overwrite Linux functions that are in charge of memory * allocation/deallocation by Xen ones. The main reason is that we want to - * operate with domain pages and as the result we have to use Xen's API for this. - * Taking into account that Xen's API deals with struct page_info *page + * operate with domain pages and as the result we have to use Xen's API for + * this. Taking into account that Xen's API deals with struct page_info *page * modify all depended code. Also keep in mind that the domain pages must be * mapped just before using it and unmapped right after we completed. */ @@ -332,212 +334,219 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, } #endif -static struct page_info *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, - struct arm_lpae_io_pgtable *data) +static struct page_info * +__arm_lpae_alloc_pages(size_t size, gfp_t gfp, struct arm_lpae_io_pgtable *data) { - struct page_info *pages; - unsigned int order = get_order_from_bytes(size); - int i; + struct page_info *pages; + unsigned int order = get_order_from_bytes(size); + int i; - pages = alloc_domheap_pages(NULL, order, 0); - if (pages == NULL) - return NULL; + pages = alloc_domheap_pages(NULL, order, 0); + if ( pages == NULL ) + return NULL; - for (i = 0; i < (1 << order); i ++) - clear_and_clean_page(pages + i); + for ( i = 0; i < (1 << order); i++ ) + clear_and_clean_page(pages + i); - data->page_count += (1<page_count += (1 << order); - return pages; + return pages; } static void __arm_lpae_free_pages(struct page_info *pages, size_t size, - struct arm_lpae_io_pgtable *data) + struct arm_lpae_io_pgtable *data) { - unsigned int order = get_order_from_bytes(size); + unsigned int order = get_order_from_bytes(size); - BUG_ON((int)data->page_count <= 0); + BUG_ON((int)data->page_count <= 0); - free_domheap_pages(pages, order); + free_domheap_pages(pages, order); - data->page_count -= (1<page_count -= (1 << order); } static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, - struct io_pgtable_cfg *cfg) + struct io_pgtable_cfg *cfg) { - smp_mb(); - *ptep = pte; - smp_mb(); - clean_dcache(*ptep); + smp_mb(); + *ptep = pte; + smp_mb(); + clean_dcache(*ptep); } static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, - unsigned long iova, size_t size, int lvl, - arm_lpae_iopte *ptep); + unsigned long iova, size_t size, int lvl, + arm_lpae_iopte *ptep); static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, - unsigned long iova, phys_addr_t paddr, - arm_lpae_iopte prot, int lvl, - arm_lpae_iopte *ptep) + unsigned long iova, phys_addr_t paddr, + arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep) { - arm_lpae_iopte pte = prot; - struct io_pgtable_cfg *cfg = &data->iop.cfg; + arm_lpae_iopte pte = prot; + struct io_pgtable_cfg *cfg = &data->iop.cfg; - if (iopte_leaf(*ptep, lvl)) { - /* We require an unmap first */ + if ( iopte_leaf(*ptep, lvl) ) + { + /* We require an unmap first */ #if 0 /* Xen: Not needed */ WARN_ON(!selftest_running); #endif - return -EEXIST; - } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { - /* - * We need to unmap and free the old table before - * overwriting it with a block entry. - */ - arm_lpae_iopte *tblp; - size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); - - tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); - if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) - return -EINVAL; - } - - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) - pte |= ARM_LPAE_PTE_NS; - - if (lvl == ARM_LPAE_MAX_LEVELS - 1) - pte |= ARM_LPAE_PTE_TYPE_PAGE; - else - pte |= ARM_LPAE_PTE_TYPE_BLOCK; - - pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; - pte |= pfn_to_iopte(paddr >> data->pg_shift, data); - - __arm_lpae_set_pte(ptep, pte, cfg); - return 0; + return -EEXIST; + } + else if ( iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE ) + { + /* + * We need to unmap and free the old table before + * overwriting it with a block entry. + */ + arm_lpae_iopte *tblp; + size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); + + tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); + if ( WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz) ) + return -EINVAL; + } + + if ( cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS ) + pte |= ARM_LPAE_PTE_NS; + + if ( lvl == ARM_LPAE_MAX_LEVELS - 1 ) + pte |= ARM_LPAE_PTE_TYPE_PAGE; + else + pte |= ARM_LPAE_PTE_TYPE_BLOCK; + + pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; + pte |= pfn_to_iopte(paddr >> data->pg_shift, data); + + __arm_lpae_set_pte(ptep, pte, cfg); + return 0; } /* Xen: We deal with domain pages. */ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, - phys_addr_t paddr, size_t size, arm_lpae_iopte prot, - int lvl, arm_lpae_iopte *ptep) + phys_addr_t paddr, size_t size, arm_lpae_iopte prot, + int lvl, arm_lpae_iopte *ptep) { - arm_lpae_iopte *cptep, pte; - size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); - struct io_pgtable_cfg *cfg = &data->iop.cfg; - struct page_info *page; - int ret; - - /* Find our entry at the current level */ - ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); - - /* If we can install a leaf entry at this level, then do so */ - if (size == block_size && (size & cfg->pgsize_bitmap)) - return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); - - /* We can't allocate tables at the final level */ - if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) - return -EINVAL; - - /* Grab a pointer to the next level */ - pte = *ptep; - if (!pte) { - page = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data), - GFP_ATOMIC, data); - if (!page) - return -ENOMEM; - - /* Xen: __pa is not suitable here use page_to_maddr instead. */ - pte = page_to_maddr(page) | ARM_LPAE_PTE_TYPE_TABLE; - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) - pte |= ARM_LPAE_PTE_NSTABLE; - __arm_lpae_set_pte(ptep, pte, cfg); - /* Xen: Sync with my fix for Linux */ - } else if (!iopte_leaf(pte, lvl)) { - page = iopte_deref(pte, data); - } else { - /* We require an unmap first */ + arm_lpae_iopte *cptep, pte; + size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); + struct io_pgtable_cfg *cfg = &data->iop.cfg; + struct page_info *page; + int ret; + + /* Find our entry at the current level */ + ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); + + /* If we can install a leaf entry at this level, then do so */ + if ( size == block_size && (size & cfg->pgsize_bitmap) ) + return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); + + /* We can't allocate tables at the final level */ + if ( WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1) ) + return -EINVAL; + + /* Grab a pointer to the next level */ + pte = *ptep; + if ( !pte ) + { + page = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data), GFP_ATOMIC, data); + if ( !page ) + return -ENOMEM; + + /* Xen: __pa is not suitable here use page_to_maddr instead. */ + pte = page_to_maddr(page) | ARM_LPAE_PTE_TYPE_TABLE; + if ( cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS ) + pte |= ARM_LPAE_PTE_NSTABLE; + __arm_lpae_set_pte(ptep, pte, cfg); + /* Xen: Sync with my fix for Linux */ + } + else if ( !iopte_leaf(pte, lvl) ) + { + page = iopte_deref(pte, data); + } + else + { + /* We require an unmap first */ #if 0 /* Xen: Not needed */ WARN_ON(!selftest_running); #endif - return -EEXIST; - } - - /* Rinse, repeat */ - cptep = __map_domain_page(page); - ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); - unmap_domain_page(cptep); - return ret; + return -EEXIST; + } + + /* Rinse, repeat */ + cptep = __map_domain_page(page); + ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); + unmap_domain_page(cptep); + return ret; } static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, - int prot) + int prot) { - arm_lpae_iopte pte; - - if (data->iop.fmt == ARM_64_LPAE_S1 || - data->iop.fmt == ARM_32_LPAE_S1) { - pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG; - - if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) - pte |= ARM_LPAE_PTE_AP_RDONLY; - - if (prot & IOMMU_MMIO) - pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV - << ARM_LPAE_PTE_ATTRINDX_SHIFT); - else if (prot & IOMMU_CACHE) - pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE - << ARM_LPAE_PTE_ATTRINDX_SHIFT); - } else { - pte = ARM_LPAE_PTE_HAP_FAULT; - if (prot & IOMMU_READ) - pte |= ARM_LPAE_PTE_HAP_READ; - if (prot & IOMMU_WRITE) - pte |= ARM_LPAE_PTE_HAP_WRITE; - if (prot & IOMMU_MMIO) - pte |= ARM_LPAE_PTE_MEMATTR_DEV; - else if (prot & IOMMU_CACHE) - pte |= ARM_LPAE_PTE_MEMATTR_OIWB; - else - pte |= ARM_LPAE_PTE_MEMATTR_NC; - } - - if (prot & IOMMU_NOEXEC) - pte |= ARM_LPAE_PTE_XN; - - return pte; + arm_lpae_iopte pte; + + if ( data->iop.fmt == ARM_64_LPAE_S1 || data->iop.fmt == ARM_32_LPAE_S1 ) + { + pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG; + + if ( !(prot & IOMMU_WRITE) && (prot & IOMMU_READ) ) + pte |= ARM_LPAE_PTE_AP_RDONLY; + + if ( prot & IOMMU_MMIO ) + pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV << ARM_LPAE_PTE_ATTRINDX_SHIFT); + else if ( prot & IOMMU_CACHE ) + pte |= + (ARM_LPAE_MAIR_ATTR_IDX_CACHE << ARM_LPAE_PTE_ATTRINDX_SHIFT); + } + else + { + pte = ARM_LPAE_PTE_HAP_FAULT; + if ( prot & IOMMU_READ ) + pte |= ARM_LPAE_PTE_HAP_READ; + if ( prot & IOMMU_WRITE ) + pte |= ARM_LPAE_PTE_HAP_WRITE; + if ( prot & IOMMU_MMIO ) + pte |= ARM_LPAE_PTE_MEMATTR_DEV; + else if ( prot & IOMMU_CACHE ) + pte |= ARM_LPAE_PTE_MEMATTR_OIWB; + else + pte |= ARM_LPAE_PTE_MEMATTR_NC; + } + + if ( prot & IOMMU_NOEXEC ) + pte |= ARM_LPAE_PTE_XN; + + return pte; } /* Xen: We deal with domain pages. */ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int iommu_prot) + phys_addr_t paddr, size_t size, int iommu_prot) { - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); - arm_lpae_iopte *ptep; - int ret, lvl = ARM_LPAE_START_LVL(data); - arm_lpae_iopte prot; - - /* If no access, then nothing to do */ - if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) - return 0; - - prot = arm_lpae_prot_to_pte(data, iommu_prot); - ptep = __map_domain_page(data->pgd); - ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); - unmap_domain_page(ptep); - - /* - * Synchronise all PTE updates for the new mapping before there's - * a chance for anything to kick off a table walk for the new iova. - */ - smp_wmb(); - - return ret; + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + arm_lpae_iopte *ptep; + int ret, lvl = ARM_LPAE_START_LVL(data); + arm_lpae_iopte prot; + + /* If no access, then nothing to do */ + if ( !(iommu_prot & (IOMMU_READ | IOMMU_WRITE)) ) + return 0; + + prot = arm_lpae_prot_to_pte(data, iommu_prot); + ptep = __map_domain_page(data->pgd); + ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); + unmap_domain_page(ptep); + + /* + * Synchronise all PTE updates for the new mapping before there's + * a chance for anything to kick off a table walk for the new iova. + */ + smp_wmb(); + + return ret; } static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, - struct page_info *page); + struct page_info *page); /* * TODO: We have reused unused at the moment "page->pad" variable for @@ -553,313 +562,331 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, * corresponding #define-s. */ static void __arm_lpae_free_next_pgtable(struct arm_lpae_io_pgtable *data, - int lvl, struct page_info *page) + int lvl, struct page_info *page) { - if (!data->cleanup) { - /* - * We are here during normal page table maintenance. Just call - * __arm_lpae_free_pgtable(), what we actually had to call. - */ - __arm_lpae_free_pgtable(data, lvl, page); - } else { - /* - * The page table deallocation sequence is in progress. Use some fields - * in struct page_info to pass arguments we will need during handling - * this page back. Queue page to list. - */ - PFN_ORDER(page) = lvl; - page->pad = (u64)&data->iop.ops; - - spin_lock(&iommu_pt_cleanup_lock); - page_list_add_tail(page, &iommu_pt_cleanup_list); - spin_unlock(&iommu_pt_cleanup_lock); - } + if ( !data->cleanup ) + { + /* + * We are here during normal page table maintenance. Just call + * __arm_lpae_free_pgtable(), what we actually had to call. + */ + __arm_lpae_free_pgtable(data, lvl, page); + } + else + { + /* + * The page table deallocation sequence is in progress. Use some fields + * in struct page_info to pass arguments we will need during handling + * this page back. Queue page to list. + */ + PFN_ORDER(page) = lvl; + page->pad = (u64)&data->iop.ops; + + spin_lock(&iommu_pt_cleanup_lock); + page_list_add_tail(page, &iommu_pt_cleanup_list); + spin_unlock(&iommu_pt_cleanup_lock); + } } /* Xen: We deal with domain pages. */ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, - struct page_info *page) + struct page_info *page) { - arm_lpae_iopte *start, *end; - unsigned long table_size; - arm_lpae_iopte *ptep = __map_domain_page(page); + arm_lpae_iopte *start, *end; + unsigned long table_size; + arm_lpae_iopte *ptep = __map_domain_page(page); - if (lvl == ARM_LPAE_START_LVL(data)) - table_size = data->pgd_size; - else - table_size = ARM_LPAE_GRANULE(data); + if ( lvl == ARM_LPAE_START_LVL(data) ) + table_size = data->pgd_size; + else + table_size = ARM_LPAE_GRANULE(data); - start = ptep; + start = ptep; - /* Only leaf entries at the last level */ - if (lvl == ARM_LPAE_MAX_LEVELS - 1) - end = ptep; - else - end = (void *)ptep + table_size; + /* Only leaf entries at the last level */ + if ( lvl == ARM_LPAE_MAX_LEVELS - 1 ) + end = ptep; + else + end = (void *)ptep + table_size; - while (ptep != end) { - arm_lpae_iopte pte = *ptep++; + while ( ptep != end ) + { + arm_lpae_iopte pte = *ptep++; - if (!pte || iopte_leaf(pte, lvl)) - continue; + if ( !pte || iopte_leaf(pte, lvl) ) + continue; - __arm_lpae_free_next_pgtable(data, lvl + 1, iopte_deref(pte, data)); - } + __arm_lpae_free_next_pgtable(data, lvl + 1, iopte_deref(pte, data)); + } - unmap_domain_page(start); - __arm_lpae_free_pages(page, table_size, data); + unmap_domain_page(start); + __arm_lpae_free_pages(page, table_size, data); } /* * We added extra "page" argument since we want to know what page is processed * at the moment and should be freed. * */ -static void arm_lpae_free_pgtable(struct io_pgtable *iop, struct page_info *page) +static void arm_lpae_free_pgtable(struct io_pgtable *iop, + struct page_info *page) { - struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); - int lvl; - - if (!data->cleanup) { - /* Start page table deallocation sequence from the first level. */ - data->cleanup = true; - lvl = ARM_LPAE_START_LVL(data); - } else { - /* Retrieve the level to continue deallocation sequence from. */ - lvl = PFN_ORDER(page); - PFN_ORDER(page) = 0; - page->pad = 0; - } - - __arm_lpae_free_pgtable(data, lvl, page); - - /* - * Seems, we have already deallocated all pages, so it is time - * to release unfreed resource. - */ - if (!data->page_count) - kfree(data); + struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); + int lvl; + + if ( !data->cleanup ) + { + /* Start page table deallocation sequence from the first level. */ + data->cleanup = true; + lvl = ARM_LPAE_START_LVL(data); + } + else + { + /* Retrieve the level to continue deallocation sequence from. */ + lvl = PFN_ORDER(page); + PFN_ORDER(page) = 0; + page->pad = 0; + } + + __arm_lpae_free_pgtable(data, lvl, page); + + /* + * Seems, we have already deallocated all pages, so it is time + * to release unfreed resource. + */ + if ( !data->page_count ) + kfree(data); } /* Xen: We deal with domain pages. */ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, - unsigned long iova, size_t size, - arm_lpae_iopte prot, int lvl, - arm_lpae_iopte *ptep, size_t blk_size) + unsigned long iova, size_t size, + arm_lpae_iopte prot, int lvl, + arm_lpae_iopte *ptep, size_t blk_size) { - unsigned long blk_start, blk_end; - phys_addr_t blk_paddr; - arm_lpae_iopte table = 0; - - blk_start = iova & ~(blk_size - 1); - blk_end = blk_start + blk_size; - blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift; - - for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { - arm_lpae_iopte *tablep; - - /* Unmap! */ - if (blk_start == iova) - continue; - - /* __arm_lpae_map expects a pointer to the start of the table */ - tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data); - if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl, - tablep) < 0) { - if (table) { - /* Free the table we allocated */ - /* - * Xen: iopte_deref returns struct page_info *, - * it is exactly what we need. Pass it directly to function - * instead of adding new variable. - */ - __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(table, data)); - } - return 0; /* Bytes unmapped */ - } - } - - __arm_lpae_set_pte(ptep, table, &data->iop.cfg); - iova &= ~(blk_size - 1); - io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true); - return size; + unsigned long blk_start, blk_end; + phys_addr_t blk_paddr; + arm_lpae_iopte table = 0; + + blk_start = iova & ~(blk_size - 1); + blk_end = blk_start + blk_size; + blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift; + + for ( ; blk_start < blk_end; blk_start += size, blk_paddr += size ) + { + arm_lpae_iopte *tablep; + + /* Unmap! */ + if ( blk_start == iova ) + continue; + + /* __arm_lpae_map expects a pointer to the start of the table */ + tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data); + if ( __arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl, + tablep) < 0 ) + { + if ( table ) + { + /* Free the table we allocated */ + /* + * Xen: iopte_deref returns struct page_info *, + * it is exactly what we need. Pass it directly to function + * instead of adding new variable. + */ + __arm_lpae_free_pgtable(data, lvl + 1, + iopte_deref(table, data)); + } + return 0; /* Bytes unmapped */ + } + } + + __arm_lpae_set_pte(ptep, table, &data->iop.cfg); + iova &= ~(blk_size - 1); + io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true); + return size; } /* Xen: We deal with domain pages. */ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, - unsigned long iova, size_t size, int lvl, - arm_lpae_iopte *ptep) + unsigned long iova, size_t size, int lvl, + arm_lpae_iopte *ptep) { - arm_lpae_iopte pte; - struct io_pgtable *iop = &data->iop; - size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); - int ret; - - /* Something went horribly wrong and we ran out of page table */ - if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) - return 0; - - ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); - pte = *ptep; - /* - * Xen: TODO: Sometimes we catch this since p2m tries to unmap - * the same page twice. - */ - if (WARN_ON(!pte)) - return 0; - - /* If the size matches this level, we're in the right place */ - if (size == blk_size) { - __arm_lpae_set_pte(ptep, 0, &iop->cfg); - - if (!iopte_leaf(pte, lvl)) { - /* Also flush any partial walks */ - io_pgtable_tlb_add_flush(iop, iova, size, - ARM_LPAE_GRANULE(data), false); - io_pgtable_tlb_sync(iop); - /* - * Xen: iopte_deref returns struct page_info *, - * it is exactly what we need. Pass it directly to function - * instead of adding new variable. - */ - __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); - } else { - io_pgtable_tlb_add_flush(iop, iova, size, size, true); - } - - return size; - } else if (iopte_leaf(pte, lvl)) { - /* - * Insert a table at the next level to map the old region, - * minus the part we want to unmap - */ - return arm_lpae_split_blk_unmap(data, iova, size, - iopte_prot(pte), lvl, ptep, - blk_size); - } - - /* Keep on walkin' */ - ptep = __map_domain_page(iopte_deref(pte, data)); - ret = __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); - unmap_domain_page(ptep); - return ret; + arm_lpae_iopte pte; + struct io_pgtable *iop = &data->iop; + size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); + int ret; + + /* Something went horribly wrong and we ran out of page table */ + if ( WARN_ON(lvl == ARM_LPAE_MAX_LEVELS) ) + return 0; + + ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); + pte = *ptep; + /* + * Xen: TODO: Sometimes we catch this since p2m tries to unmap + * the same page twice. + */ + if ( WARN_ON(!pte) ) + return 0; + + /* If the size matches this level, we're in the right place */ + if ( size == blk_size ) + { + __arm_lpae_set_pte(ptep, 0, &iop->cfg); + + if ( !iopte_leaf(pte, lvl) ) + { + /* Also flush any partial walks */ + io_pgtable_tlb_add_flush(iop, iova, size, ARM_LPAE_GRANULE(data), + false); + io_pgtable_tlb_sync(iop); + /* + * Xen: iopte_deref returns struct page_info *, + * it is exactly what we need. Pass it directly to function + * instead of adding new variable. + */ + __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); + } + else + { + io_pgtable_tlb_add_flush(iop, iova, size, size, true); + } + + return size; + } + else if ( iopte_leaf(pte, lvl) ) + { + /* + * Insert a table at the next level to map the old region, + * minus the part we want to unmap + */ + return arm_lpae_split_blk_unmap(data, iova, size, iopte_prot(pte), lvl, + ptep, blk_size); + } + + /* Keep on walkin' */ + ptep = __map_domain_page(iopte_deref(pte, data)); + ret = __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); + unmap_domain_page(ptep); + return ret; } /* Xen: We deal with domain pages. */ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, - size_t size) + size_t size) { - size_t unmapped; - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); - arm_lpae_iopte *ptep = __map_domain_page(data->pgd); - int lvl = ARM_LPAE_START_LVL(data); + size_t unmapped; + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + arm_lpae_iopte *ptep = __map_domain_page(data->pgd); + int lvl = ARM_LPAE_START_LVL(data); - unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); - if (unmapped) - io_pgtable_tlb_sync(&data->iop); - unmap_domain_page(ptep); + unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); + if ( unmapped ) + io_pgtable_tlb_sync(&data->iop); + unmap_domain_page(ptep); - /* Xen: Add barrier here to synchronise all PTE updates. */ - smp_wmb(); + /* Xen: Add barrier here to synchronise all PTE updates. */ + smp_wmb(); - return unmapped; + return unmapped; } /* Xen: We deal with domain pages. */ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, - unsigned long iova) + unsigned long iova) { - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); - arm_lpae_iopte pte, *ptep = __map_domain_page(data->pgd); - int lvl = ARM_LPAE_START_LVL(data); + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + arm_lpae_iopte pte, *ptep = __map_domain_page(data->pgd); + int lvl = ARM_LPAE_START_LVL(data); - do { - /* Valid IOPTE pointer? */ - if (!ptep) - break; + do { + /* Valid IOPTE pointer? */ + if ( !ptep ) + break; - /* Grab the IOPTE we're interested in */ - pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data)); - unmap_domain_page(ptep); + /* Grab the IOPTE we're interested in */ + pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data)); + unmap_domain_page(ptep); - /* Valid entry? */ - if (!pte) - return 0; + /* Valid entry? */ + if ( !pte ) + return 0; - /* Leaf entry? */ - if (iopte_leaf(pte,lvl)) - goto found_translation; + /* Leaf entry? */ + if ( iopte_leaf(pte, lvl) ) + goto found_translation; - /* Take it to the next level */ - ptep = __map_domain_page(iopte_deref(pte, data)); - } while (++lvl < ARM_LPAE_MAX_LEVELS); + /* Take it to the next level */ + ptep = __map_domain_page(iopte_deref(pte, data)); + } while ( ++lvl < ARM_LPAE_MAX_LEVELS ); - unmap_domain_page(ptep); - /* Ran out of page tables to walk */ - return 0; + unmap_domain_page(ptep); + /* Ran out of page tables to walk */ + return 0; found_translation: - iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); - return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova; + iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); + return ((phys_addr_t)iopte_to_pfn(pte, data) << data->pg_shift) | iova; } static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) { - unsigned long granule; - - /* - * We need to restrict the supported page sizes to match the - * translation regime for a particular granule. Aim to match - * the CPU page size if possible, otherwise prefer smaller sizes. - * While we're at it, restrict the block sizes to match the - * chosen granule. - */ - if (cfg->pgsize_bitmap & PAGE_SIZE) - granule = PAGE_SIZE; - else if (cfg->pgsize_bitmap & ~PAGE_MASK) - granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); - else if (cfg->pgsize_bitmap & PAGE_MASK) - granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); - else - granule = 0; - - switch (granule) { - case SZ_4K: - cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); - break; - case SZ_16K: - cfg->pgsize_bitmap &= (SZ_16K | SZ_32M); - break; - case SZ_64K: - cfg->pgsize_bitmap &= (SZ_64K | SZ_512M); - break; - default: - cfg->pgsize_bitmap = 0; - } + unsigned long granule; + + /* + * We need to restrict the supported page sizes to match the + * translation regime for a particular granule. Aim to match + * the CPU page size if possible, otherwise prefer smaller sizes. + * While we're at it, restrict the block sizes to match the + * chosen granule. + */ + if ( cfg->pgsize_bitmap & PAGE_SIZE ) + granule = PAGE_SIZE; + else if ( cfg->pgsize_bitmap & ~PAGE_MASK ) + granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); + else if ( cfg->pgsize_bitmap & PAGE_MASK ) + granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); + else + granule = 0; + + switch (granule) + { + case SZ_4K: + cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); + break; + case SZ_16K: + cfg->pgsize_bitmap &= (SZ_16K | SZ_32M); + break; + case SZ_64K: + cfg->pgsize_bitmap &= (SZ_64K | SZ_512M); + break; + default: + cfg->pgsize_bitmap = 0; + } } static struct arm_lpae_io_pgtable * arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) { - unsigned long va_bits, pgd_bits; - struct arm_lpae_io_pgtable *data; + unsigned long va_bits, pgd_bits; + struct arm_lpae_io_pgtable *data; - arm_lpae_restrict_pgsizes(cfg); + arm_lpae_restrict_pgsizes(cfg); - if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) - return NULL; + if ( !(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)) ) + return NULL; - /* - * Xen: Just to be sure that minimum page supported by the IOMMU - * is not bigger than PAGE_SIZE. - */ - if (PAGE_SIZE & ((1 << __ffs(cfg->pgsize_bitmap)) - 1)) - return NULL; + /* + * Xen: Just to be sure that minimum page supported by the IOMMU + * is not bigger than PAGE_SIZE. + */ + if ( PAGE_SIZE & ((1 << __ffs(cfg->pgsize_bitmap)) - 1) ) + return NULL; - if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) - return NULL; + if ( cfg->ias > ARM_LPAE_MAX_ADDR_BITS ) + return NULL; - if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) - return NULL; + if ( cfg->oas > ARM_LPAE_MAX_ADDR_BITS ) + return NULL; #if 0 /* Xen: Not needed */ if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) { @@ -868,120 +895,122 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) } #endif - data = kmalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return NULL; + data = kmalloc(sizeof(*data), GFP_KERNEL); + if ( !data ) + return NULL; - data->pg_shift = __ffs(cfg->pgsize_bitmap); - data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte)); + data->pg_shift = __ffs(cfg->pgsize_bitmap); + data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte)); - va_bits = cfg->ias - data->pg_shift; - data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level); + va_bits = cfg->ias - data->pg_shift; + data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level); - /* Calculate the actual size of our pgd (without concatenation) */ - pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); - data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); + /* Calculate the actual size of our pgd (without concatenation) */ + pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); + data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); - data->iop.ops = (struct io_pgtable_ops) { - .map = arm_lpae_map, - .unmap = arm_lpae_unmap, - .iova_to_phys = arm_lpae_iova_to_phys, - }; + data->iop.ops = (struct io_pgtable_ops){ + .map = arm_lpae_map, + .unmap = arm_lpae_unmap, + .iova_to_phys = arm_lpae_iova_to_phys, + }; - return data; + return data; } static struct io_pgtable * arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) { - u64 reg; - struct arm_lpae_io_pgtable *data; - - if (cfg->quirks & ~IO_PGTABLE_QUIRK_ARM_NS) - return NULL; - - data = arm_lpae_alloc_pgtable(cfg); - if (!data) - return NULL; - - /* TCR */ - reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | - (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | - (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); - - switch (ARM_LPAE_GRANULE(data)) { - case SZ_4K: - reg |= ARM_LPAE_TCR_TG0_4K; - break; - case SZ_16K: - reg |= ARM_LPAE_TCR_TG0_16K; - break; - case SZ_64K: - reg |= ARM_LPAE_TCR_TG0_64K; - break; - } - - switch (cfg->oas) { - case 32: - reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT); - break; - case 36: - reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT); - break; - case 40: - reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT); - break; - case 42: - reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT); - break; - case 44: - reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT); - break; - case 48: - reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT); - break; - default: - goto out_free_data; - } - - reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; - - /* Disable speculative walks through TTBR1 */ - reg |= ARM_LPAE_TCR_EPD1; - cfg->arm_lpae_s1_cfg.tcr = reg; - - /* MAIRs */ - reg = (ARM_LPAE_MAIR_ATTR_NC - << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | - (ARM_LPAE_MAIR_ATTR_WBRWA - << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | - (ARM_LPAE_MAIR_ATTR_DEVICE - << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); - - cfg->arm_lpae_s1_cfg.mair[0] = reg; - cfg->arm_lpae_s1_cfg.mair[1] = 0; - - /* Just to be sure */ - data->cleanup = false; - data->page_count = 0; - - /* Looking good; allocate a pgd */ - data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, data); - if (!data->pgd) - goto out_free_data; - - /* Ensure the empty pgd is visible before any actual TTBR write */ - smp_wmb(); - - /* TTBRs */ - /* Xen: virt_to_phys is not suitable here use page_to_maddr instead */ - cfg->arm_lpae_s1_cfg.ttbr[0] = page_to_maddr(data->pgd); - cfg->arm_lpae_s1_cfg.ttbr[1] = 0; - return &data->iop; + u64 reg; + struct arm_lpae_io_pgtable *data; + + if ( cfg->quirks & ~IO_PGTABLE_QUIRK_ARM_NS ) + return NULL; + + data = arm_lpae_alloc_pgtable(cfg); + if ( !data ) + return NULL; + + /* TCR */ + reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | + (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | + (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); + + switch (ARM_LPAE_GRANULE(data)) + { + case SZ_4K: + reg |= ARM_LPAE_TCR_TG0_4K; + break; + case SZ_16K: + reg |= ARM_LPAE_TCR_TG0_16K; + break; + case SZ_64K: + reg |= ARM_LPAE_TCR_TG0_64K; + break; + } + + switch (cfg->oas) + { + case 32: + reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT); + break; + case 36: + reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT); + break; + case 40: + reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT); + break; + case 42: + reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT); + break; + case 44: + reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT); + break; + case 48: + reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT); + break; + default: + goto out_free_data; + } + + reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; + + /* Disable speculative walks through TTBR1 */ + reg |= ARM_LPAE_TCR_EPD1; + cfg->arm_lpae_s1_cfg.tcr = reg; + + /* MAIRs */ + reg = (ARM_LPAE_MAIR_ATTR_NC + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | + (ARM_LPAE_MAIR_ATTR_WBRWA + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | + (ARM_LPAE_MAIR_ATTR_DEVICE + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); + + cfg->arm_lpae_s1_cfg.mair[0] = reg; + cfg->arm_lpae_s1_cfg.mair[1] = 0; + + /* Just to be sure */ + data->cleanup = false; + data->page_count = 0; + + /* Looking good; allocate a pgd */ + data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, data); + if ( !data->pgd ) + goto out_free_data; + + /* Ensure the empty pgd is visible before any actual TTBR write */ + smp_wmb(); + + /* TTBRs */ + /* Xen: virt_to_phys is not suitable here use page_to_maddr instead */ + cfg->arm_lpae_s1_cfg.ttbr[0] = page_to_maddr(data->pgd); + cfg->arm_lpae_s1_cfg.ttbr[1] = 0; + return &data->iop; out_free_data: - kfree(data); - return NULL; + kfree(data); + return NULL; } #if 0 /* Xen: Not needed */ @@ -1082,19 +1111,20 @@ out_free_data: static struct io_pgtable * arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) { - struct io_pgtable *iop; + struct io_pgtable *iop; - if (cfg->ias > 32 || cfg->oas > 40) - return NULL; + if ( cfg->ias > 32 || cfg->oas > 40 ) + return NULL; - cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); - iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie); - if (iop) { - cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE; - cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff; - } + cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); + iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie); + if ( iop ) + { + cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE; + cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff; + } - return iop; + return iop; } #if 0 /* Xen: Not needed */ @@ -1116,8 +1146,8 @@ arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) #endif struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { - .alloc = arm_64_lpae_alloc_pgtable_s1, - .free = arm_lpae_free_pgtable, + .alloc = arm_64_lpae_alloc_pgtable_s1, + .free = arm_lpae_free_pgtable, }; #if 0 /* Xen: Not needed */ @@ -1128,8 +1158,8 @@ struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { #endif struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { - .alloc = arm_32_lpae_alloc_pgtable_s1, - .free = arm_lpae_free_pgtable, + .alloc = arm_32_lpae_alloc_pgtable_s1, + .free = arm_lpae_free_pgtable, }; #if 0 /* Xen: Not needed */ @@ -1148,184 +1178,191 @@ static struct io_pgtable_cfg *cfg_cookie; static void dummy_tlb_flush_all(void *cookie) { - WARN_ON(cookie != cfg_cookie); + WARN_ON(cookie != cfg_cookie); } -static void dummy_tlb_add_flush(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void dummy_tlb_add_flush(unsigned long iova, size_t size, size_t granule, + bool leaf, void *cookie) { - WARN_ON(cookie != cfg_cookie); - WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); + WARN_ON(cookie != cfg_cookie); + WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); } static void dummy_tlb_sync(void *cookie) { - WARN_ON(cookie != cfg_cookie); + WARN_ON(cookie != cfg_cookie); } static struct iommu_gather_ops dummy_tlb_ops __initdata = { - .tlb_flush_all = dummy_tlb_flush_all, - .tlb_add_flush = dummy_tlb_add_flush, - .tlb_sync = dummy_tlb_sync, + .tlb_flush_all = dummy_tlb_flush_all, + .tlb_add_flush = dummy_tlb_add_flush, + .tlb_sync = dummy_tlb_sync, }; static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) { - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); - struct io_pgtable_cfg *cfg = &data->iop.cfg; - - pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", - cfg->pgsize_bitmap, cfg->ias); - pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n", - data->levels, data->pgd_size, data->pg_shift, - data->bits_per_level, data->pgd); + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct io_pgtable_cfg *cfg = &data->iop.cfg; + + pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", cfg->pgsize_bitmap, + cfg->ias); + pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, " + "pgd @ %p\n", + data->levels, data->pgd_size, data->pg_shift, data->bits_per_level, + data->pgd); } -#define __FAIL(ops, i) ({ \ - WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ - arm_lpae_dump_ops(ops); \ - selftest_running = false; \ - -EFAULT; \ -}) +#define __FAIL(ops, i) \ + ({ \ + WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ + arm_lpae_dump_ops(ops); \ + selftest_running = false; \ + -EFAULT; \ + }) static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) { - static const enum io_pgtable_fmt fmts[] = { - ARM_64_LPAE_S1, - ARM_64_LPAE_S2, - }; - - int i, j; - unsigned long iova; - size_t size; - struct io_pgtable_ops *ops; - - selftest_running = true; - - for (i = 0; i < ARRAY_SIZE(fmts); ++i) { - cfg_cookie = cfg; - ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); - if (!ops) { - pr_err("selftest: failed to allocate io pgtable ops\n"); - return -ENOMEM; - } - - /* - * Initial sanity checks. - * Empty page tables shouldn't provide any translations. - */ - if (ops->iova_to_phys(ops, 42)) - return __FAIL(ops, i); - - if (ops->iova_to_phys(ops, SZ_1G + 42)) - return __FAIL(ops, i); - - if (ops->iova_to_phys(ops, SZ_2G + 42)) - return __FAIL(ops, i); - - /* - * Distinct mappings of different granule sizes. - */ - iova = 0; - j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); - while (j != BITS_PER_LONG) { - size = 1UL << j; - - if (ops->map(ops, iova, iova, size, IOMMU_READ | - IOMMU_WRITE | - IOMMU_NOEXEC | - IOMMU_CACHE)) - return __FAIL(ops, i); - - /* Overlapping mappings */ - if (!ops->map(ops, iova, iova + size, size, - IOMMU_READ | IOMMU_NOEXEC)) - return __FAIL(ops, i); - - if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) - return __FAIL(ops, i); - - iova += SZ_1G; - j++; - j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); - } - - /* Partial unmap */ - size = 1UL << __ffs(cfg->pgsize_bitmap); - if (ops->unmap(ops, SZ_1G + size, size) != size) - return __FAIL(ops, i); - - /* Remap of partial unmap */ - if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) - return __FAIL(ops, i); - - if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) - return __FAIL(ops, i); - - /* Full unmap */ - iova = 0; - j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); - while (j != BITS_PER_LONG) { - size = 1UL << j; - - if (ops->unmap(ops, iova, size) != size) - return __FAIL(ops, i); - - if (ops->iova_to_phys(ops, iova + 42)) - return __FAIL(ops, i); - - /* Remap full block */ - if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) - return __FAIL(ops, i); - - if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) - return __FAIL(ops, i); - - iova += SZ_1G; - j++; - j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); - } - - free_io_pgtable_ops(ops); - } - - selftest_running = false; - return 0; + static const enum io_pgtable_fmt fmts[] = { + ARM_64_LPAE_S1, + ARM_64_LPAE_S2, + }; + + int i, j; + unsigned long iova; + size_t size; + struct io_pgtable_ops *ops; + + selftest_running = true; + + for ( i = 0; i < ARRAY_SIZE(fmts); ++i ) + { + cfg_cookie = cfg; + ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); + if ( !ops ) + { + pr_err("selftest: failed to allocate io pgtable ops\n"); + return -ENOMEM; + } + + /* + * Initial sanity checks. + * Empty page tables shouldn't provide any translations. + */ + if ( ops->iova_to_phys(ops, 42) ) + return __FAIL(ops, i); + + if ( ops->iova_to_phys(ops, SZ_1G + 42) ) + return __FAIL(ops, i); + + if ( ops->iova_to_phys(ops, SZ_2G + 42) ) + return __FAIL(ops, i); + + /* + * Distinct mappings of different granule sizes. + */ + iova = 0; + j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); + while ( j != BITS_PER_LONG ) + { + size = 1UL << j; + + if ( ops->map(ops, iova, iova, size, + IOMMU_READ | IOMMU_WRITE | IOMMU_NOEXEC | + IOMMU_CACHE) ) + return __FAIL(ops, i); + + /* Overlapping mappings */ + if ( !ops->map(ops, iova, iova + size, size, + IOMMU_READ | IOMMU_NOEXEC) ) + return __FAIL(ops, i); + + if ( ops->iova_to_phys(ops, iova + 42) != (iova + 42) ) + return __FAIL(ops, i); + + iova += SZ_1G; + j++; + j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); + } + + /* Partial unmap */ + size = 1UL << __ffs(cfg->pgsize_bitmap); + if ( ops->unmap(ops, SZ_1G + size, size) != size ) + return __FAIL(ops, i); + + /* Remap of partial unmap */ + if ( ops->map(ops, SZ_1G + size, size, size, IOMMU_READ) ) + return __FAIL(ops, i); + + if ( ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42) ) + return __FAIL(ops, i); + + /* Full unmap */ + iova = 0; + j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); + while ( j != BITS_PER_LONG ) + { + size = 1UL << j; + + if ( ops->unmap(ops, iova, size) != size ) + return __FAIL(ops, i); + + if ( ops->iova_to_phys(ops, iova + 42) ) + return __FAIL(ops, i); + + /* Remap full block */ + if ( ops->map(ops, iova, iova, size, IOMMU_WRITE) ) + return __FAIL(ops, i); + + if ( ops->iova_to_phys(ops, iova + 42) != (iova + 42) ) + return __FAIL(ops, i); + + iova += SZ_1G; + j++; + j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); + } + + free_io_pgtable_ops(ops); + } + + selftest_running = false; + return 0; } static int __init arm_lpae_do_selftests(void) { - static const unsigned long pgsize[] = { - SZ_4K | SZ_2M | SZ_1G, - SZ_16K | SZ_32M, - SZ_64K | SZ_512M, - }; - - static const unsigned int ias[] = { - 32, 36, 40, 42, 44, 48, - }; - - int i, j, pass = 0, fail = 0; - struct io_pgtable_cfg cfg = { - .tlb = &dummy_tlb_ops, - .oas = 48, - }; - - for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { - for (j = 0; j < ARRAY_SIZE(ias); ++j) { - cfg.pgsize_bitmap = pgsize[i]; - cfg.ias = ias[j]; - pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", - pgsize[i], ias[j]); - if (arm_lpae_run_tests(&cfg)) - fail++; - else - pass++; - } - } - - pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); - return fail ? -EFAULT : 0; + static const unsigned long pgsize[] = { + SZ_4K | SZ_2M | SZ_1G, + SZ_16K | SZ_32M, + SZ_64K | SZ_512M, + }; + + static const unsigned int ias[] = { + 32, 36, 40, 42, 44, 48, + }; + + int i, j, pass = 0, fail = 0; + struct io_pgtable_cfg cfg = { + .tlb = &dummy_tlb_ops, + .oas = 48, + }; + + for ( i = 0; i < ARRAY_SIZE(pgsize); ++i ) + { + for ( j = 0; j < ARRAY_SIZE(ias); ++j ) + { + cfg.pgsize_bitmap = pgsize[i]; + cfg.ias = ias[j]; + pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", pgsize[i], + ias[j]); + if ( arm_lpae_run_tests(&cfg) ) + fail++; + else + pass++; + } + } + + pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); + return fail ? -EFAULT : 0; } subsys_initcall(arm_lpae_do_selftests); #endif diff --git a/xen/drivers/passthrough/arm/io-pgtable.c b/xen/drivers/passthrough/arm/io-pgtable.c index e25d731016..e535b58b27 100644 --- a/xen/drivers/passthrough/arm/io-pgtable.c +++ b/xen/drivers/passthrough/arm/io-pgtable.c @@ -31,46 +31,46 @@ /* Xen: Just compile what we exactly want. */ #define CONFIG_IOMMU_IO_PGTABLE_LPAE -static const struct io_pgtable_init_fns * -io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = { +static const struct io_pgtable_init_fns + *io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = { #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE - [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns, + [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns, #if 0 /* Xen: Not needed */ [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns, #endif - [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns, + [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns, #if 0 /* Xen: Not needed */ [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns, #endif #endif #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S - [ARM_V7S] = &io_pgtable_arm_v7s_init_fns, + [ARM_V7S] = &io_pgtable_arm_v7s_init_fns, #endif }; struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, - struct io_pgtable_cfg *cfg, - void *cookie) + struct io_pgtable_cfg *cfg, + void *cookie) { - struct io_pgtable *iop; - const struct io_pgtable_init_fns *fns; + struct io_pgtable *iop; + const struct io_pgtable_init_fns *fns; - if (fmt >= IO_PGTABLE_NUM_FMTS) - return NULL; + if ( fmt >= IO_PGTABLE_NUM_FMTS ) + return NULL; - fns = io_pgtable_init_table[fmt]; - if (!fns) - return NULL; + fns = io_pgtable_init_table[fmt]; + if ( !fns ) + return NULL; - iop = fns->alloc(cfg, cookie); - if (!iop) - return NULL; + iop = fns->alloc(cfg, cookie); + if ( !iop ) + return NULL; - iop->fmt = fmt; - iop->cookie = cookie; - iop->cfg = *cfg; + iop->fmt = fmt; + iop->cookie = cookie; + iop->cfg = *cfg; - return &iop->ops; + return &iop->ops; } /* @@ -79,13 +79,13 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, */ void free_io_pgtable_ops(struct io_pgtable_ops *ops, struct page_info *page) { - struct io_pgtable *iop; + struct io_pgtable *iop; - if (!ops) - return; + if ( !ops ) + return; - iop = container_of(ops, struct io_pgtable, ops); - io_pgtable_tlb_flush_all(iop); - iop->cookie = NULL; - io_pgtable_init_table[iop->fmt]->free(iop, page); + iop = container_of(ops, struct io_pgtable, ops); + io_pgtable_tlb_flush_all(iop); + iop->cookie = NULL; + io_pgtable_init_table[iop->fmt]->free(iop, page); } diff --git a/xen/drivers/passthrough/arm/iommu.c b/xen/drivers/passthrough/arm/iommu.c index 7c1cdf4e86..ae811a5aeb 100644 --- a/xen/drivers/passthrough/arm/iommu.c +++ b/xen/drivers/passthrough/arm/iommu.c @@ -34,7 +34,8 @@ void __init iommu_set_ops(const struct iommu_ops *ops) if ( iommu_ops && iommu_ops != ops ) { - printk("WARNING: Cannot set IOMMU ops, already set to a different value\n"); + printk("WARNING: Cannot set IOMMU ops, already set to a different " + "value\n"); return; } @@ -47,14 +48,14 @@ int __init iommu_hardware_setup(void) int rc; unsigned int num_iommus = 0; - dt_for_each_device_node(dt_host, np) + dt_for_each_device_node (dt_host, np) { rc = device_init(np, DEVICE_IOMMU, NULL); if ( !rc ) num_iommus++; } - return ( num_iommus > 0 ) ? 0 : -ENODEV; + return (num_iommus > 0) ? 0 : -ENODEV; } void __hwdom_init arch_iommu_check_autotranslated_hwdom(struct domain *d) diff --git a/xen/drivers/passthrough/arm/ipmmu-vmsa-plat.c b/xen/drivers/passthrough/arm/ipmmu-vmsa-plat.c index a83438282d..50cc176e49 100644 --- a/xen/drivers/passthrough/arm/ipmmu-vmsa-plat.c +++ b/xen/drivers/passthrough/arm/ipmmu-vmsa-plat.c @@ -31,49 +31,50 @@ static void __iomem *rcar_sysc_base = NULL; /* SYSC MMIO range */ -#define RCAR_SYSC_BASE 0xe6180000 -#define RCAR_SYSC_SIZE 0x400 +#define RCAR_SYSC_BASE 0xe6180000 +#define RCAR_SYSC_SIZE 0x400 /* * These power domain indices match the numbers of the interrupt bits * representing the power areas in the various Interrupt Registers * (e.g. SYSCISR, Interrupt Status Register) */ -#define RCAR_GEN3_PD_A3VP 9 -#define RCAR_GEN3_PD_A3VC 14 -#define RCAR_GEN3_PD_A3IR 24 +#define RCAR_GEN3_PD_A3VP 9 +#define RCAR_GEN3_PD_A3VC 14 +#define RCAR_GEN3_PD_A3IR 24 /* Always-on power area */ -#define RCAR_GEN3_PD_ALWAYS_ON 32 +#define RCAR_GEN3_PD_ALWAYS_ON 32 /* SYSC Common */ -#define SYSCSR 0x00 /* SYSC Status Register */ -#define SYSCISR 0x04 /* Interrupt Status Register */ -#define SYSCISCR 0x08 /* Interrupt Status Clear Register */ -#define SYSCIER 0x0c /* Interrupt Enable Register */ -#define SYSCIMR 0x10 /* Interrupt Mask Register */ +#define SYSCSR 0x00 /* SYSC Status Register */ +#define SYSCISR 0x04 /* Interrupt Status Register */ +#define SYSCISCR 0x08 /* Interrupt Status Clear Register */ +#define SYSCIER 0x0c /* Interrupt Enable Register */ +#define SYSCIMR 0x10 /* Interrupt Mask Register */ /* SYSC Status Register */ -#define SYSCSR_PONENB 1 /* Ready for power resume requests */ +#define SYSCSR_PONENB 1 /* Ready for power resume requests */ /* Power Control Register Offsets inside the register block for each domain */ -#define PWRSR_OFFS 0x00 /* Power Status Register */ -#define PWRONCR_OFFS 0x0c /* Power Resume Control Register */ -#define PWRER_OFFS 0x14 /* Power Shutoff/Resume Error */ +#define PWRSR_OFFS 0x00 /* Power Status Register */ +#define PWRONCR_OFFS 0x0c /* Power Resume Control Register */ +#define PWRER_OFFS 0x14 /* Power Shutoff/Resume Error */ -#define SYSCSR_RETRIES 1000 -#define SYSCSR_DELAY_US 10 +#define SYSCSR_RETRIES 1000 +#define SYSCSR_DELAY_US 10 -#define PWRER_RETRIES 1000 -#define PWRER_DELAY_US 10 +#define PWRER_RETRIES 1000 +#define PWRER_DELAY_US 10 -#define SYSCISR_RETRIES 1000 -#define SYSCISR_DELAY_US 10 +#define SYSCISR_RETRIES 1000 +#define SYSCISR_DELAY_US 10 -struct rcar_sysc_ch { - const char *name; - u16 chan_offs; /* Offset of PWRSR register for this area */ - u8 chan_bit; /* Bit in PWR* (except for PWRUP in PWRSR) */ - u8 isr_bit; /* Bit in SYSCI*R */ +struct rcar_sysc_ch +{ + const char *name; + u16 chan_offs; /* Offset of PWRSR register for this area */ + u8 chan_bit; /* Bit in PWR* (except for PWRUP in PWRSR) */ + u8 isr_bit; /* Bit in SYSCI*R */ }; /* @@ -81,136 +82,139 @@ struct rcar_sysc_ch { * we don't care at all. But some of them are located in other domains * and must be turned on once at boot. * Hopefully, the each of domains we are dealing with within this file - * (A3VP, A3VP, A3IR) is identically configured across all SoCs (H3, M3 and M3N). - * This allow us not to introduce support for each SoC separately. + * (A3VP, A3VP, A3IR) is identically configured across all SoCs (H3, M3 and + * M3N). This allow us not to introduce support for each SoC separately. */ static const struct rcar_sysc_ch rcar_sysc_chs[3] = { - { - .name = "A3VP", - .chan_offs = 0x340, - .chan_bit = 0, - .isr_bit = RCAR_GEN3_PD_A3VP, - }, - { - .name = "A3VC", - .chan_offs = 0x380, - .chan_bit = 0, - .isr_bit = RCAR_GEN3_PD_A3VC, - }, - { - .name = "A3IR", - .chan_offs = 0x180, - .chan_bit = 0, - .isr_bit = RCAR_GEN3_PD_A3IR, - }, + { + .name = "A3VP", + .chan_offs = 0x340, + .chan_bit = 0, + .isr_bit = RCAR_GEN3_PD_A3VP, + }, + { + .name = "A3VC", + .chan_offs = 0x380, + .chan_bit = 0, + .isr_bit = RCAR_GEN3_PD_A3VC, + }, + { + .name = "A3IR", + .chan_offs = 0x180, + .chan_bit = 0, + .isr_bit = RCAR_GEN3_PD_A3IR, + }, }; #define dev_name(dev) dt_node_full_name(dev_to_dt(dev)) static int __init rcar_sysc_init(void) { - u32 syscier, syscimr; - int i; - - /* - * As this function might be called more then once, just return if we - * have already initialized sysc. - */ - if (rcar_sysc_base) - return 0; - - rcar_sysc_base = ioremap_nocache(RCAR_SYSC_BASE, RCAR_SYSC_SIZE); - if (!rcar_sysc_base) { - printk("failed to map SYSC MMIO range\n"); - return -ENOMEM; - } - - syscier = 0; - for (i = 0; i < ARRAY_SIZE(rcar_sysc_chs); i++) - syscier |= BIT(rcar_sysc_chs[i].isr_bit); - - /* - * Mask all interrupt sources to prevent the CPU from receiving them. - * Make sure not to clear reserved bits that were set before. - */ - syscimr = readl(rcar_sysc_base + SYSCIMR); - syscimr |= syscier; - writel(syscimr, rcar_sysc_base + SYSCIMR); - - /* SYSC needs all interrupt sources enabled to control power */ - writel(syscier, rcar_sysc_base + SYSCIER); - - return 0; + u32 syscier, syscimr; + int i; + + /* + * As this function might be called more then once, just return if we + * have already initialized sysc. + */ + if ( rcar_sysc_base ) + return 0; + + rcar_sysc_base = ioremap_nocache(RCAR_SYSC_BASE, RCAR_SYSC_SIZE); + if ( !rcar_sysc_base ) + { + printk("failed to map SYSC MMIO range\n"); + return -ENOMEM; + } + + syscier = 0; + for ( i = 0; i < ARRAY_SIZE(rcar_sysc_chs); i++ ) + syscier |= BIT(rcar_sysc_chs[i].isr_bit); + + /* + * Mask all interrupt sources to prevent the CPU from receiving them. + * Make sure not to clear reserved bits that were set before. + */ + syscimr = readl(rcar_sysc_base + SYSCIMR); + syscimr |= syscier; + writel(syscimr, rcar_sysc_base + SYSCIMR); + + /* SYSC needs all interrupt sources enabled to control power */ + writel(syscier, rcar_sysc_base + SYSCIER); + + return 0; } static bool __init rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch) { - unsigned int status; + unsigned int status; - status = readl(rcar_sysc_base + sysc_ch->chan_offs + PWRSR_OFFS); - if (status & BIT(sysc_ch->chan_bit)) - return true; + status = readl(rcar_sysc_base + sysc_ch->chan_offs + PWRSR_OFFS); + if ( status & BIT(sysc_ch->chan_bit) ) + return true; - return false; + return false; } static int __init rcar_sysc_power_on(const struct rcar_sysc_ch *sysc_ch) { - unsigned int status; - int ret = 0, i, j; - - writel(BIT(sysc_ch->isr_bit), rcar_sysc_base + SYSCISCR); - - /* Submit power resume request until it was accepted */ - for (i = 0; i < PWRER_RETRIES; i++) { - - /* Wait until SYSC is ready to accept a power request */ - for (j = 0; j < SYSCSR_RETRIES; j++) { - if (readl(rcar_sysc_base + SYSCSR) & BIT(SYSCSR_PONENB)) - break; - udelay(SYSCSR_DELAY_US); - } - - if (j == SYSCSR_RETRIES) - return -EAGAIN; - - /* Submit power resume request */ - writel(BIT(sysc_ch->chan_bit), - rcar_sysc_base + sysc_ch->chan_offs + PWRONCR_OFFS); - - status = readl(rcar_sysc_base + sysc_ch->chan_offs + PWRER_OFFS); - if (!(status & BIT(sysc_ch->chan_bit))) - break; - udelay(PWRER_DELAY_US); - } - - if (i == PWRER_RETRIES) - return -EIO; - - /* Wait until the power resume request has completed */ - for (i = 0; i < SYSCISR_RETRIES; i++) { - if (readl(rcar_sysc_base + SYSCISR) & BIT(sysc_ch->isr_bit)) - break; - udelay(SYSCISR_DELAY_US); - } - - if (i == SYSCISR_RETRIES) - ret = -EIO; - - writel(BIT(sysc_ch->isr_bit), rcar_sysc_base + SYSCISCR); - - return ret; + unsigned int status; + int ret = 0, i, j; + + writel(BIT(sysc_ch->isr_bit), rcar_sysc_base + SYSCISCR); + + /* Submit power resume request until it was accepted */ + for ( i = 0; i < PWRER_RETRIES; i++ ) + { + /* Wait until SYSC is ready to accept a power request */ + for ( j = 0; j < SYSCSR_RETRIES; j++ ) + { + if ( readl(rcar_sysc_base + SYSCSR) & BIT(SYSCSR_PONENB) ) + break; + udelay(SYSCSR_DELAY_US); + } + + if ( j == SYSCSR_RETRIES ) + return -EAGAIN; + + /* Submit power resume request */ + writel(BIT(sysc_ch->chan_bit), + rcar_sysc_base + sysc_ch->chan_offs + PWRONCR_OFFS); + + status = readl(rcar_sysc_base + sysc_ch->chan_offs + PWRER_OFFS); + if ( !(status & BIT(sysc_ch->chan_bit)) ) + break; + udelay(PWRER_DELAY_US); + } + + if ( i == PWRER_RETRIES ) + return -EIO; + + /* Wait until the power resume request has completed */ + for ( i = 0; i < SYSCISR_RETRIES; i++ ) + { + if ( readl(rcar_sysc_base + SYSCISR) & BIT(sysc_ch->isr_bit) ) + break; + udelay(SYSCISR_DELAY_US); + } + + if ( i == SYSCISR_RETRIES ) + ret = -EIO; + + writel(BIT(sysc_ch->isr_bit), rcar_sysc_base + SYSCISCR); + + return ret; } static uint32_t ipmmu_get_mmu_pd(struct dt_device_node *np) { - struct dt_phandle_args pd_spec; + struct dt_phandle_args pd_spec; - if (dt_parse_phandle_with_args(np, "power-domains", "#power-domain-cells", - 0, &pd_spec)) - return -ENODEV; + if ( dt_parse_phandle_with_args(np, "power-domains", "#power-domain-cells", + 0, &pd_spec) ) + return -ENODEV; - return pd_spec.args[0]; + return pd_spec.args[0]; } /* @@ -221,83 +225,84 @@ static uint32_t ipmmu_get_mmu_pd(struct dt_device_node *np) */ static int __init ipmmu_power_on(struct dt_device_node *np) { - int i, pd, ret = -ENODEV; - - pd = ipmmu_get_mmu_pd(np); - if (pd < 0 || pd == RCAR_GEN3_PD_ALWAYS_ON) - return 0; - - rcar_sysc_init(); - - for (i = 0; i < ARRAY_SIZE(rcar_sysc_chs); i++) { - if (rcar_sysc_chs[i].isr_bit != pd) - continue; - - if (!rcar_sysc_power_is_off(&rcar_sysc_chs[i])) { - printk("ipmmu: %s: %s domain is already powered on\n", - dev_name(&np->dev), rcar_sysc_chs[i].name); - return 0; - } - - ret = rcar_sysc_power_on(&rcar_sysc_chs[i]); - if (ret) { - printk("ipmmu: %s: failed to power on %s domain\n", - dev_name(&np->dev), rcar_sysc_chs[i].name); - break; - } - - printk("ipmmu: %s: powered on %s domain\n", dev_name(&np->dev), - rcar_sysc_chs[i].name); - return 0; - } - - return ret; + int i, pd, ret = -ENODEV; + + pd = ipmmu_get_mmu_pd(np); + if ( pd < 0 || pd == RCAR_GEN3_PD_ALWAYS_ON ) + return 0; + + rcar_sysc_init(); + + for ( i = 0; i < ARRAY_SIZE(rcar_sysc_chs); i++ ) + { + if ( rcar_sysc_chs[i].isr_bit != pd ) + continue; + + if ( !rcar_sysc_power_is_off(&rcar_sysc_chs[i]) ) + { + printk("ipmmu: %s: %s domain is already powered on\n", + dev_name(&np->dev), rcar_sysc_chs[i].name); + return 0; + } + + ret = rcar_sysc_power_on(&rcar_sysc_chs[i]); + if ( ret ) + { + printk("ipmmu: %s: failed to power on %s domain\n", + dev_name(&np->dev), rcar_sysc_chs[i].name); + break; + } + + printk("ipmmu: %s: powered on %s domain\n", dev_name(&np->dev), + rcar_sysc_chs[i].name); + return 0; + } + + return ret; } /* PRR MMIO range */ -#define PRR_BASE 0xfff00044 -#define PRR_SIZE 0x4 +#define PRR_BASE 0xfff00044 +#define PRR_SIZE 0x4 -#define RCAR_PRODUCT_CUT_MASK 0x00007fff -#define RCAR_PRODUCT_H3_CUT_VER30 0x00004f20 +#define RCAR_PRODUCT_CUT_MASK 0x00007fff +#define RCAR_PRODUCT_H3_CUT_VER30 0x00004f20 static bool is_soc_h3_es30(void) { - void __iomem *base; - u32 val; - static enum { - UNKNOWN, - DETECTED, - NOTDETECTED - } h3_es30 = UNKNOWN; - - /* Use the flag to avoid checking for the H3 revision more then once */ - switch (h3_es30) { - case DETECTED: - return true; - - case NOTDETECTED: - return false; - - case UNKNOWN: - default: - h3_es30 = NOTDETECTED; - break; - } - - base = ioremap_nocache(PRR_BASE, PRR_SIZE); - if (!base) { - printk("failed to ioremap PRR MMIO\n"); - return false; - } - - val = readl(base); - if ((val & RCAR_PRODUCT_CUT_MASK) == RCAR_PRODUCT_H3_CUT_VER30) - h3_es30 = DETECTED; - - iounmap(base); - - return h3_es30 == DETECTED; + void __iomem *base; + u32 val; + static enum { UNKNOWN, DETECTED, NOTDETECTED } h3_es30 = UNKNOWN; + + /* Use the flag to avoid checking for the H3 revision more then once */ + switch (h3_es30) + { + case DETECTED: + return true; + + case NOTDETECTED: + return false; + + case UNKNOWN: + default: + h3_es30 = NOTDETECTED; + break; + } + + base = ioremap_nocache(PRR_BASE, PRR_SIZE); + if ( !base ) + { + printk("failed to ioremap PRR MMIO\n"); + return false; + } + + val = readl(base); + if ( (val & RCAR_PRODUCT_CUT_MASK) == RCAR_PRODUCT_H3_CUT_VER30 ) + h3_es30 = DETECTED; + + iounmap(base); + + return h3_es30 == DETECTED; } /* @@ -309,29 +314,30 @@ static bool is_soc_h3_es30(void) */ bool ipmmu_is_mmu_tlb_disable_needed(struct dt_device_node *np) { - int i, pd; + int i, pd; - /* W/A is not actual for H3 ES3.0 and M3 any revisions */ - if (is_soc_h3_es30() || - dt_device_is_compatible(np, "renesas,ipmmu-r8a7796")) - return false; + /* W/A is not actual for H3 ES3.0 and M3 any revisions */ + if ( is_soc_h3_es30() || + dt_device_is_compatible(np, "renesas,ipmmu-r8a7796") ) + return false; - pd = ipmmu_get_mmu_pd(np); - if (pd < 0 || pd == RCAR_GEN3_PD_ALWAYS_ON) - return false; + pd = ipmmu_get_mmu_pd(np); + if ( pd < 0 || pd == RCAR_GEN3_PD_ALWAYS_ON ) + return false; - /* Actually check among power domains we have already powered on */ - for (i = 0; i < ARRAY_SIZE(rcar_sysc_chs); i++) { - if (rcar_sysc_chs[i].isr_bit == pd) - return true; - } + /* Actually check among power domains we have already powered on */ + for ( i = 0; i < ARRAY_SIZE(rcar_sysc_chs); i++ ) + { + if ( rcar_sysc_chs[i].isr_bit == pd ) + return true; + } - return false; + return false; } int __init ipmmu_preinit(struct dt_device_node *np) { - return ipmmu_power_on(np); + return ipmmu_power_on(np); } /* diff --git a/xen/drivers/passthrough/arm/ipmmu-vmsa.c b/xen/drivers/passthrough/arm/ipmmu-vmsa.c index fcbf7b1a6d..4cc74a1d4a 100644 --- a/xen/drivers/passthrough/arm/ipmmu-vmsa.c +++ b/xen/drivers/passthrough/arm/ipmmu-vmsa.c @@ -42,23 +42,23 @@ */ /* This one came from Linux drivers/iommu/Kconfig */ -#define CONFIG_IPMMU_VMSA_CTX_NUM 8 +#define CONFIG_IPMMU_VMSA_CTX_NUM 8 extern int ipmmu_preinit(struct dt_device_node *np); extern bool ipmmu_is_mmu_tlb_disable_needed(struct dt_device_node *np); /***** Start of Xen specific code *****/ -#define IOMMU_READ (1 << 0) -#define IOMMU_WRITE (1 << 1) -#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ -#define IOMMU_NOEXEC (1 << 3) -#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ +#define IOMMU_READ (1 << 0) +#define IOMMU_WRITE (1 << 1) +#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ +#define IOMMU_NOEXEC (1 << 3) +#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ #define __fls(x) (fls(x) - 1) #define __ffs(x) (ffs(x) - 1) -#define IO_PGTABLE_QUIRK_ARM_NS BIT(0) +#define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define ioread32 readl #define iowrite32 writel @@ -66,7 +66,7 @@ extern bool ipmmu_is_mmu_tlb_disable_needed(struct dt_device_node *np); #define dev_info dev_notice #define devm_request_irq(unused, irq, func, flags, name, dev) \ - request_irq(irq, flags, func, name, dev) + request_irq(irq, flags, func, name, dev) /* Alias to Xen device tree helpers */ #define device_node dt_device_node @@ -80,9 +80,9 @@ extern bool ipmmu_is_mmu_tlb_disable_needed(struct dt_device_node *np); /* Xen: Helpers to get device MMIO and IRQs */ struct resource { - u64 addr; - u64 size; - unsigned int type; + u64 addr; + u64 size; + unsigned int type; }; #define resource_size(res) (res)->size; @@ -93,92 +93,98 @@ struct resource #define IORESOURCE_IRQ 1 static struct resource *platform_get_resource(struct platform_device *pdev, - unsigned int type, - unsigned int num) + unsigned int type, + unsigned int num) { - /* - * The resource is only used between 2 calls of platform_get_resource. - * It's quite ugly but it's avoid to add too much code in the part - * imported from Linux - */ - static struct resource res; - int ret = 0; + /* + * The resource is only used between 2 calls of platform_get_resource. + * It's quite ugly but it's avoid to add too much code in the part + * imported from Linux + */ + static struct resource res; + int ret = 0; - res.type = type; + res.type = type; - switch (type) { - case IORESOURCE_MEM: - ret = dt_device_get_address(pdev, num, &res.addr, &res.size); + switch (type) + { + case IORESOURCE_MEM: + ret = dt_device_get_address(pdev, num, &res.addr, &res.size); - return ((ret) ? NULL : &res); + return ((ret) ? NULL : &res); - case IORESOURCE_IRQ: - ret = platform_get_irq(pdev, num); - if (ret < 0) - return NULL; + case IORESOURCE_IRQ: + ret = platform_get_irq(pdev, num); + if ( ret < 0 ) + return NULL; - res.addr = ret; - res.size = 1; + res.addr = ret; + res.size = 1; - return &res; + return &res; - default: - return NULL; - } + default: + return NULL; + } } -enum irqreturn { - IRQ_NONE = (0 << 0), - IRQ_HANDLED = (1 << 0), +enum irqreturn +{ + IRQ_NONE = (0 << 0), + IRQ_HANDLED = (1 << 0), }; typedef enum irqreturn irqreturn_t; /* Device logger functions */ -#define dev_print(dev, lvl, fmt, ...) \ - printk(lvl "ipmmu: %s: " fmt, dt_node_full_name(dev_to_dt(dev)), ## __VA_ARGS__) +#define dev_print(dev, lvl, fmt, ...) \ + printk(lvl "ipmmu: %s: " fmt, dt_node_full_name(dev_to_dt(dev)), \ + ##__VA_ARGS__) -#define dev_dbg(dev, fmt, ...) dev_print(dev, XENLOG_DEBUG, fmt, ## __VA_ARGS__) -#define dev_notice(dev, fmt, ...) dev_print(dev, XENLOG_INFO, fmt, ## __VA_ARGS__) -#define dev_warn(dev, fmt, ...) dev_print(dev, XENLOG_WARNING, fmt, ## __VA_ARGS__) -#define dev_err(dev, fmt, ...) dev_print(dev, XENLOG_ERR, fmt, ## __VA_ARGS__) +#define dev_dbg(dev, fmt, ...) dev_print(dev, XENLOG_DEBUG, fmt, ##__VA_ARGS__) +#define dev_notice(dev, fmt, ...) \ + dev_print(dev, XENLOG_INFO, fmt, ##__VA_ARGS__) +#define dev_warn(dev, fmt, ...) \ + dev_print(dev, XENLOG_WARNING, fmt, ##__VA_ARGS__) +#define dev_err(dev, fmt, ...) dev_print(dev, XENLOG_ERR, fmt, ##__VA_ARGS__) -#define dev_err_ratelimited(dev, fmt, ...) \ - dev_print(dev, XENLOG_ERR, fmt, ## __VA_ARGS__) +#define dev_err_ratelimited(dev, fmt, ...) \ + dev_print(dev, XENLOG_ERR, fmt, ##__VA_ARGS__) #define dev_name(dev) dt_node_full_name(dev_to_dt(dev)) /* Alias to Xen allocation helpers */ #define kfree xfree -#define kmalloc(size, flags) _xmalloc(size, sizeof(void *)) -#define kzalloc(size, flags) _xzalloc(size, sizeof(void *)) -#define devm_kzalloc(dev, size, flags) _xzalloc(size, sizeof(void *)) -#define kmalloc_array(size, n, flags) _xmalloc_array(size, sizeof(void *), n) -#define kcalloc(size, n, flags) _xzalloc_array(size, sizeof(void *), n) +#define kmalloc(size, flags) _xmalloc(size, sizeof(void *)) +#define kzalloc(size, flags) _xzalloc(size, sizeof(void *)) +#define devm_kzalloc(dev, size, flags) _xzalloc(size, sizeof(void *)) +#define kmalloc_array(size, n, flags) _xmalloc_array(size, sizeof(void *), n) +#define kcalloc(size, n, flags) _xzalloc_array(size, sizeof(void *), n) static void __iomem *devm_ioremap_resource(struct device *dev, - struct resource *res) + struct resource *res) { - void __iomem *ptr; + void __iomem *ptr; - if (!res || res->type != IORESOURCE_MEM) { - dev_err(dev, "Invalid resource\n"); - return ERR_PTR(-EINVAL); - } + if ( !res || res->type != IORESOURCE_MEM ) + { + dev_err(dev, "Invalid resource\n"); + return ERR_PTR(-EINVAL); + } - ptr = ioremap_nocache(res->addr, res->size); - if (!ptr) { - dev_err(dev, - "ioremap failed (addr 0x%"PRIx64" size 0x%"PRIx64")\n", - res->addr, res->size); - return ERR_PTR(-ENOMEM); - } + ptr = ioremap_nocache(res->addr, res->size); + if ( !ptr ) + { + dev_err(dev, "ioremap failed (addr 0x%" PRIx64 " size 0x%" PRIx64 ")\n", + res->addr, res->size); + return ERR_PTR(-ENOMEM); + } - return ptr; + return ptr; } /* Xen doesn't handle IOMMU fault */ -#define report_iommu_fault(...) 1 +#define report_iommu_fault(...) 1 #define MODULE_DEVICE_TABLE(type, name) #define module_param_named(name, value, type, perm) @@ -187,19 +193,20 @@ static void __iomem *devm_ioremap_resource(struct device *dev, /* Xen: Dummy iommu_domain */ struct iommu_domain { - atomic_t ref; - /* Used to link iommu_domain contexts for a same domain. - * There is at least one per-IPMMU to used by the domain. - * */ - struct list_head list; + atomic_t ref; + /* Used to link iommu_domain contexts for a same domain. + * There is at least one per-IPMMU to used by the domain. + * */ + struct list_head list; }; /* Xen: Describes informations required for a Xen domain */ -struct ipmmu_vmsa_xen_domain { - spinlock_t lock; - /* List of context (i.e iommu_domain) associated to this domain */ - struct list_head contexts; - struct iommu_domain *base_context; +struct ipmmu_vmsa_xen_domain +{ + spinlock_t lock; + /* List of context (i.e iommu_domain) associated to this domain */ + struct list_head contexts; + struct iommu_domain *base_context; }; /* @@ -208,9 +215,10 @@ struct ipmmu_vmsa_xen_domain { * On Linux the dev->archdata.iommu only stores the arch specific information, * but, on Xen, we also have to store the iommu domain. */ -struct ipmmu_vmsa_xen_device { - struct iommu_domain *domain; - struct ipmmu_vmsa_archdata *archdata; +struct ipmmu_vmsa_xen_device +{ + struct iommu_domain *domain; + struct ipmmu_vmsa_archdata *archdata; }; #define dev_iommu(dev) ((struct ipmmu_vmsa_xen_device *)dev->archdata.iommu) @@ -222,80 +230,86 @@ struct ipmmu_vmsa_xen_device { #define IPMMU_PER_DEV_MAX 4 -struct ipmmu_features { - bool use_ns_alias_offset; - bool has_cache_leaf_nodes; - bool has_eight_ctx; - bool setup_imbuscr; - bool twobit_imttbcr_sl0; - bool imctr_va64; +struct ipmmu_features +{ + bool use_ns_alias_offset; + bool has_cache_leaf_nodes; + bool has_eight_ctx; + bool setup_imbuscr; + bool twobit_imttbcr_sl0; + bool imctr_va64; }; #ifdef CONFIG_RCAR_DDR_BACKUP -struct hw_register { - char *reg_name; - unsigned int reg_offset; - unsigned int reg_data; +struct hw_register +{ + char *reg_name; + unsigned int reg_offset; + unsigned int reg_data; }; #endif -struct ipmmu_vmsa_device { - struct device *dev; - void __iomem *base; - struct list_head list; - const struct ipmmu_features *features; - bool is_leaf; - unsigned int num_utlbs; - unsigned int num_ctx; - spinlock_t lock; /* Protects ctx and domains[] */ - DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); - struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; +struct ipmmu_vmsa_device +{ + struct device *dev; + void __iomem *base; + struct list_head list; + const struct ipmmu_features *features; + bool is_leaf; + unsigned int num_utlbs; + unsigned int num_ctx; + spinlock_t lock; /* Protects ctx and domains[] */ + DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); + struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; #ifdef CONFIG_RCAR_DDR_BACKUP - struct hw_register *reg_backup[IPMMU_CTX_MAX]; + struct hw_register *reg_backup[IPMMU_CTX_MAX]; #endif #if 0 /* Xen: Not needed */ struct dma_iommu_mapping *mapping; #endif - /* To show whether we have to disable IPMMU TLB cache function */ - bool is_mmu_tlb_disabled; + /* To show whether we have to disable IPMMU TLB cache function */ + bool is_mmu_tlb_disabled; }; -struct ipmmu_vmsa_domain { - /* Cache IPMMUs the master device can be tied to */ - struct ipmmu_vmsa_device *mmus[IPMMU_PER_DEV_MAX]; - unsigned int num_mmus; - struct ipmmu_vmsa_device *root; - struct iommu_domain io_domain; +struct ipmmu_vmsa_domain +{ + /* Cache IPMMUs the master device can be tied to */ + struct ipmmu_vmsa_device *mmus[IPMMU_PER_DEV_MAX]; + unsigned int num_mmus; + struct ipmmu_vmsa_device *root; + struct iommu_domain io_domain; - struct io_pgtable_cfg cfg; - struct io_pgtable_ops *iop; + struct io_pgtable_cfg cfg; + struct io_pgtable_ops *iop; - unsigned int context_id; - spinlock_t lock; /* Protects mappings */ + unsigned int context_id; + spinlock_t lock; /* Protects mappings */ - /* Xen: Domain associated to this configuration */ - struct domain *d; + /* Xen: Domain associated to this configuration */ + struct domain *d; }; -struct ipmmu_vmsa_utlb { - /* Cache IPMMU the uTLB is connected to */ - struct ipmmu_vmsa_device *mmu; - unsigned int utlb; +struct ipmmu_vmsa_utlb +{ + /* Cache IPMMU the uTLB is connected to */ + struct ipmmu_vmsa_device *mmu; + unsigned int utlb; }; -struct ipmmu_vmsa_archdata { - /* Cache IPMMUs the master device can be tied to */ - struct ipmmu_vmsa_device *mmus[IPMMU_PER_DEV_MAX]; - unsigned int num_mmus; - struct ipmmu_vmsa_utlb *utlbs; - unsigned int num_utlbs; - struct device *dev; - struct list_head list; +struct ipmmu_vmsa_archdata +{ + /* Cache IPMMUs the master device can be tied to */ + struct ipmmu_vmsa_device *mmus[IPMMU_PER_DEV_MAX]; + unsigned int num_mmus; + struct ipmmu_vmsa_utlb *utlbs; + unsigned int num_utlbs; + struct device *dev; + struct list_head list; #ifdef CONFIG_RCAR_DDR_BACKUP - unsigned int *utlbs_val; - unsigned int *asids_val; + unsigned int *utlbs_val; + unsigned int *asids_val; #endif }; @@ -309,7 +323,7 @@ static LIST_HEAD(ipmmu_slave_devices); static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) { - return container_of(dom, struct ipmmu_vmsa_domain, io_domain); + return container_of(dom, struct ipmmu_vmsa_domain, io_domain); } /* @@ -337,252 +351,213 @@ static void set_archdata(struct device *dev, struct ipmmu_vmsa_archdata *p) #else static struct ipmmu_vmsa_archdata *to_archdata(struct device *dev) { - return dev_iommu(dev)->archdata; + return dev_iommu(dev)->archdata; } static void set_archdata(struct device *dev, struct ipmmu_vmsa_archdata *p) { - dev_iommu(dev)->archdata = p; + dev_iommu(dev)->archdata = p; } #endif -#define TLB_LOOP_TIMEOUT 100 /* 100us */ +#define TLB_LOOP_TIMEOUT 100 /* 100us */ /* ----------------------------------------------------------------------------- * Registers Definition */ -#define IM_NS_ALIAS_OFFSET 0x800 - -#define IM_CTX_SIZE 0x40 - -#define IMCTR 0x0000 -#define IMCTR_VA64 (1 << 29) -#define IMCTR_TRE (1 << 17) -#define IMCTR_AFE (1 << 16) -#define IMCTR_RTSEL_MASK (3 << 4) -#define IMCTR_RTSEL_SHIFT 4 -#define IMCTR_TREN (1 << 3) -#define IMCTR_INTEN (1 << 2) -#define IMCTR_FLUSH (1 << 1) -#define IMCTR_MMUEN (1 << 0) - -#define IMCAAR 0x0004 - -#define IMTTBCR 0x0008 -#define IMTTBCR_EAE (1 << 31) -#define IMTTBCR_PMB (1 << 30) -#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) -#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) -#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) -#define IMTTBCR_SH1_MASK (3 << 28) -#define IMTTBCR_ORGN1_NC (0 << 26) -#define IMTTBCR_ORGN1_WB_WA (1 << 26) -#define IMTTBCR_ORGN1_WT (2 << 26) -#define IMTTBCR_ORGN1_WB (3 << 26) -#define IMTTBCR_ORGN1_MASK (3 << 26) -#define IMTTBCR_IRGN1_NC (0 << 24) -#define IMTTBCR_IRGN1_WB_WA (1 << 24) -#define IMTTBCR_IRGN1_WT (2 << 24) -#define IMTTBCR_IRGN1_WB (3 << 24) -#define IMTTBCR_IRGN1_MASK (3 << 24) -#define IMTTBCR_TSZ1_MASK (7 << 16) -#define IMTTBCR_TSZ1_SHIFT 16 -#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) -#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) -#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) -#define IMTTBCR_SH0_MASK (3 << 12) -#define IMTTBCR_ORGN0_NC (0 << 10) -#define IMTTBCR_ORGN0_WB_WA (1 << 10) -#define IMTTBCR_ORGN0_WT (2 << 10) -#define IMTTBCR_ORGN0_WB (3 << 10) -#define IMTTBCR_ORGN0_MASK (3 << 10) -#define IMTTBCR_IRGN0_NC (0 << 8) -#define IMTTBCR_IRGN0_WB_WA (1 << 8) -#define IMTTBCR_IRGN0_WT (2 << 8) -#define IMTTBCR_IRGN0_WB (3 << 8) -#define IMTTBCR_IRGN0_MASK (3 << 8) -#define IMTTBCR_SL0_LVL_2 (0 << 4) -#define IMTTBCR_SL0_LVL_1 (1 << 4) -#define IMTTBCR_TSZ0_MASK (7 << 0) -#define IMTTBCR_TSZ0_SHIFT 0 - -#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) -#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) -#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) - -#define IMBUSCR 0x000c -#define IMBUSCR_DVM (1 << 2) -#define IMBUSCR_BUSSEL_SYS (0 << 0) -#define IMBUSCR_BUSSEL_CCI (1 << 0) -#define IMBUSCR_BUSSEL_IMCAAR (2 << 0) -#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0) -#define IMBUSCR_BUSSEL_MASK (3 << 0) - -#define IMTTLBR0 0x0010 -#define IMTTUBR0 0x0014 -#define IMTTLBR1 0x0018 -#define IMTTUBR1 0x001c - -#define IMTTLBR_MASK 0xFFFFF000 - -#define IMSTR 0x0020 -#define IMSTR_ERRLVL_MASK (3 << 12) -#define IMSTR_ERRLVL_SHIFT 12 -#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8) -#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8) -#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8) -#define IMSTR_ERRCODE_MASK (7 << 8) -#define IMSTR_MHIT (1 << 4) -#define IMSTR_ABORT (1 << 2) -#define IMSTR_PF (1 << 1) -#define IMSTR_TF (1 << 0) - -#define IMMAIR0 0x0028 -#define IMMAIR1 0x002c -#define IMMAIR_ATTR_MASK 0xff -#define IMMAIR_ATTR_DEVICE 0x04 -#define IMMAIR_ATTR_NC 0x44 -#define IMMAIR_ATTR_WBRWA 0xff -#define IMMAIR_ATTR_SHIFT(n) ((n) << 3) -#define IMMAIR_ATTR_IDX_NC 0 -#define IMMAIR_ATTR_IDX_WBRWA 1 -#define IMMAIR_ATTR_IDX_DEV 2 - -#define IMEAR 0x0030 -#define IMEUAR 0x0034 - -#define IMPCTR 0x0200 -#define IMPSTR 0x0208 -#define IMPEAR 0x020c -#define IMPMBA(n) (0x0280 + ((n) * 4)) -#define IMPMBD(n) (0x02c0 + ((n) * 4)) - -#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n)) -#define IMUCTR0(n) (0x0300 + ((n) * 16)) -#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) -#define IMUCTR_FIXADDEN (1 << 31) -#define IMUCTR_FIXADD_MASK (0xff << 16) -#define IMUCTR_FIXADD_SHIFT 16 -#define IMUCTR_TTSEL_MMU(n) ((n) << 4) -#define IMUCTR_TTSEL_PMB (8 << 4) -#define IMUCTR_TTSEL_MASK (15 << 4) -#define IMUCTR_FLUSH (1 << 1) -#define IMUCTR_MMUEN (1 << 0) - -#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n)) -#define IMUASID0(n) (0x0308 + ((n) * 16)) -#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) -#define IMUASID_ASID8_MASK (0xff << 8) -#define IMUASID_ASID8_SHIFT 8 -#define IMUASID_ASID0_MASK (0xff << 0) -#define IMUASID_ASID0_SHIFT 0 - -#define IMSCTLR 0x0500 -#define IMSCTLR_DISCACHE 0xE0000000 - -#define IMSAUXCTLR 0x0504 -#define IMSAUXCTLR_S2PTE (1 << 3) - +#define IM_NS_ALIAS_OFFSET 0x800 + +#define IM_CTX_SIZE 0x40 + +#define IMCTR 0x0000 +#define IMCTR_VA64 (1 << 29) +#define IMCTR_TRE (1 << 17) +#define IMCTR_AFE (1 << 16) +#define IMCTR_RTSEL_MASK (3 << 4) +#define IMCTR_RTSEL_SHIFT 4 +#define IMCTR_TREN (1 << 3) +#define IMCTR_INTEN (1 << 2) +#define IMCTR_FLUSH (1 << 1) +#define IMCTR_MMUEN (1 << 0) + +#define IMCAAR 0x0004 + +#define IMTTBCR 0x0008 +#define IMTTBCR_EAE (1 << 31) +#define IMTTBCR_PMB (1 << 30) +#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) +#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) +#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) +#define IMTTBCR_SH1_MASK (3 << 28) +#define IMTTBCR_ORGN1_NC (0 << 26) +#define IMTTBCR_ORGN1_WB_WA (1 << 26) +#define IMTTBCR_ORGN1_WT (2 << 26) +#define IMTTBCR_ORGN1_WB (3 << 26) +#define IMTTBCR_ORGN1_MASK (3 << 26) +#define IMTTBCR_IRGN1_NC (0 << 24) +#define IMTTBCR_IRGN1_WB_WA (1 << 24) +#define IMTTBCR_IRGN1_WT (2 << 24) +#define IMTTBCR_IRGN1_WB (3 << 24) +#define IMTTBCR_IRGN1_MASK (3 << 24) +#define IMTTBCR_TSZ1_MASK (7 << 16) +#define IMTTBCR_TSZ1_SHIFT 16 +#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) +#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) +#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) +#define IMTTBCR_SH0_MASK (3 << 12) +#define IMTTBCR_ORGN0_NC (0 << 10) +#define IMTTBCR_ORGN0_WB_WA (1 << 10) +#define IMTTBCR_ORGN0_WT (2 << 10) +#define IMTTBCR_ORGN0_WB (3 << 10) +#define IMTTBCR_ORGN0_MASK (3 << 10) +#define IMTTBCR_IRGN0_NC (0 << 8) +#define IMTTBCR_IRGN0_WB_WA (1 << 8) +#define IMTTBCR_IRGN0_WT (2 << 8) +#define IMTTBCR_IRGN0_WB (3 << 8) +#define IMTTBCR_IRGN0_MASK (3 << 8) +#define IMTTBCR_SL0_LVL_2 (0 << 4) +#define IMTTBCR_SL0_LVL_1 (1 << 4) +#define IMTTBCR_TSZ0_MASK (7 << 0) +#define IMTTBCR_TSZ0_SHIFT 0 + +#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) +#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) +#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) + +#define IMBUSCR 0x000c +#define IMBUSCR_DVM (1 << 2) +#define IMBUSCR_BUSSEL_SYS (0 << 0) +#define IMBUSCR_BUSSEL_CCI (1 << 0) +#define IMBUSCR_BUSSEL_IMCAAR (2 << 0) +#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0) +#define IMBUSCR_BUSSEL_MASK (3 << 0) + +#define IMTTLBR0 0x0010 +#define IMTTUBR0 0x0014 +#define IMTTLBR1 0x0018 +#define IMTTUBR1 0x001c + +#define IMTTLBR_MASK 0xFFFFF000 + +#define IMSTR 0x0020 +#define IMSTR_ERRLVL_MASK (3 << 12) +#define IMSTR_ERRLVL_SHIFT 12 +#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8) +#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8) +#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8) +#define IMSTR_ERRCODE_MASK (7 << 8) +#define IMSTR_MHIT (1 << 4) +#define IMSTR_ABORT (1 << 2) +#define IMSTR_PF (1 << 1) +#define IMSTR_TF (1 << 0) + +#define IMMAIR0 0x0028 +#define IMMAIR1 0x002c +#define IMMAIR_ATTR_MASK 0xff +#define IMMAIR_ATTR_DEVICE 0x04 +#define IMMAIR_ATTR_NC 0x44 +#define IMMAIR_ATTR_WBRWA 0xff +#define IMMAIR_ATTR_SHIFT(n) ((n) << 3) +#define IMMAIR_ATTR_IDX_NC 0 +#define IMMAIR_ATTR_IDX_WBRWA 1 +#define IMMAIR_ATTR_IDX_DEV 2 + +#define IMEAR 0x0030 +#define IMEUAR 0x0034 + +#define IMPCTR 0x0200 +#define IMPSTR 0x0208 +#define IMPEAR 0x020c +#define IMPMBA(n) (0x0280 + ((n)*4)) +#define IMPMBD(n) (0x02c0 + ((n)*4)) + +#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n)) +#define IMUCTR0(n) (0x0300 + ((n)*16)) +#define IMUCTR32(n) (0x0600 + (((n)-32) * 16)) +#define IMUCTR_FIXADDEN (1 << 31) +#define IMUCTR_FIXADD_MASK (0xff << 16) +#define IMUCTR_FIXADD_SHIFT 16 +#define IMUCTR_TTSEL_MMU(n) ((n) << 4) +#define IMUCTR_TTSEL_PMB (8 << 4) +#define IMUCTR_TTSEL_MASK (15 << 4) +#define IMUCTR_FLUSH (1 << 1) +#define IMUCTR_MMUEN (1 << 0) + +#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n)) +#define IMUASID0(n) (0x0308 + ((n)*16)) +#define IMUASID32(n) (0x0608 + (((n)-32) * 16)) +#define IMUASID_ASID8_MASK (0xff << 8) +#define IMUASID_ASID8_SHIFT 8 +#define IMUASID_ASID0_MASK (0xff << 0) +#define IMUASID_ASID0_SHIFT 0 + +#define IMSCTLR 0x0500 +#define IMSCTLR_DISCACHE 0xE0000000 + +#define IMSAUXCTLR 0x0504 +#define IMSAUXCTLR_S2PTE (1 << 3) #ifdef CONFIG_RCAR_DDR_BACKUP -#define HW_REGISTER_BACKUP_SIZE ARRAY_SIZE(root_pgtable0_reg) +#define HW_REGISTER_BACKUP_SIZE ARRAY_SIZE(root_pgtable0_reg) static struct hw_register root_pgtable0_reg[] = { - {"IMTTLBR0", IMTTLBR0, 0}, - {"IMTTUBR0", IMTTUBR0, 0}, - {"IMTTBCR", IMTTBCR, 0}, - {"IMTTLBR1", IMTTLBR1, 0}, - {"IMTTUBR1", IMTTUBR1, 0}, - {"IMMAIR0", IMMAIR0, 0}, - {"IMMAIR1", IMMAIR1, 0}, - {"IMCTR", IMCTR, 0}, + {"IMTTLBR0", IMTTLBR0, 0}, {"IMTTUBR0", IMTTUBR0, 0}, + {"IMTTBCR", IMTTBCR, 0}, {"IMTTLBR1", IMTTLBR1, 0}, + {"IMTTUBR1", IMTTUBR1, 0}, {"IMMAIR0", IMMAIR0, 0}, + {"IMMAIR1", IMMAIR1, 0}, {"IMCTR", IMCTR, 0}, }; static struct hw_register root_pgtable1_reg[] = { - {"IMTTLBR0", IMTTLBR0, 0}, - {"IMTTUBR0", IMTTUBR0, 0}, - {"IMTTBCR", IMTTBCR, 0}, - {"IMTTLBR1", IMTTLBR1, 0}, - {"IMTTUBR1", IMTTUBR1, 0}, - {"IMMAIR0", IMMAIR0, 0}, - {"IMMAIR1", IMMAIR1, 0}, - {"IMCTR", IMCTR, 0}, + {"IMTTLBR0", IMTTLBR0, 0}, {"IMTTUBR0", IMTTUBR0, 0}, + {"IMTTBCR", IMTTBCR, 0}, {"IMTTLBR1", IMTTLBR1, 0}, + {"IMTTUBR1", IMTTUBR1, 0}, {"IMMAIR0", IMMAIR0, 0}, + {"IMMAIR1", IMMAIR1, 0}, {"IMCTR", IMCTR, 0}, }; static struct hw_register root_pgtable2_reg[] = { - {"IMTTLBR0", IMTTLBR0, 0}, - {"IMTTUBR0", IMTTUBR0, 0}, - {"IMTTBCR", IMTTBCR, 0}, - {"IMTTLBR1", IMTTLBR1, 0}, - {"IMTTUBR1", IMTTUBR1, 0}, - {"IMMAIR0", IMMAIR0, 0}, - {"IMMAIR1", IMMAIR1, 0}, - {"IMCTR", IMCTR, 0}, + {"IMTTLBR0", IMTTLBR0, 0}, {"IMTTUBR0", IMTTUBR0, 0}, + {"IMTTBCR", IMTTBCR, 0}, {"IMTTLBR1", IMTTLBR1, 0}, + {"IMTTUBR1", IMTTUBR1, 0}, {"IMMAIR0", IMMAIR0, 0}, + {"IMMAIR1", IMMAIR1, 0}, {"IMCTR", IMCTR, 0}, }; static struct hw_register root_pgtable3_reg[] = { - {"IMTTLBR0", IMTTLBR0, 0}, - {"IMTTUBR0", IMTTUBR0, 0}, - {"IMTTBCR", IMTTBCR, 0}, - {"IMTTLBR1", IMTTLBR1, 0}, - {"IMTTUBR1", IMTTUBR1, 0}, - {"IMMAIR0", IMMAIR0, 0}, - {"IMMAIR1", IMMAIR1, 0}, - {"IMCTR", IMCTR, 0}, + {"IMTTLBR0", IMTTLBR0, 0}, {"IMTTUBR0", IMTTUBR0, 0}, + {"IMTTBCR", IMTTBCR, 0}, {"IMTTLBR1", IMTTLBR1, 0}, + {"IMTTUBR1", IMTTUBR1, 0}, {"IMMAIR0", IMMAIR0, 0}, + {"IMMAIR1", IMMAIR1, 0}, {"IMCTR", IMCTR, 0}, }; static struct hw_register root_pgtable4_reg[] = { - {"IMTTLBR0", IMTTLBR0, 0}, - {"IMTTUBR0", IMTTUBR0, 0}, - {"IMTTBCR", IMTTBCR, 0}, - {"IMTTLBR1", IMTTLBR1, 0}, - {"IMTTUBR1", IMTTUBR1, 0}, - {"IMMAIR0", IMMAIR0, 0}, - {"IMMAIR1", IMMAIR1, 0}, - {"IMCTR", IMCTR, 0}, + {"IMTTLBR0", IMTTLBR0, 0}, {"IMTTUBR0", IMTTUBR0, 0}, + {"IMTTBCR", IMTTBCR, 0}, {"IMTTLBR1", IMTTLBR1, 0}, + {"IMTTUBR1", IMTTUBR1, 0}, {"IMMAIR0", IMMAIR0, 0}, + {"IMMAIR1", IMMAIR1, 0}, {"IMCTR", IMCTR, 0}, }; static struct hw_register root_pgtable5_reg[] = { - {"IMTTLBR0", IMTTLBR0, 0}, - {"IMTTUBR0", IMTTUBR0, 0}, - {"IMTTBCR", IMTTBCR, 0}, - {"IMTTLBR1", IMTTLBR1, 0}, - {"IMTTUBR1", IMTTUBR1, 0}, - {"IMMAIR0", IMMAIR0, 0}, - {"IMMAIR1", IMMAIR1, 0}, - {"IMCTR", IMCTR, 0}, + {"IMTTLBR0", IMTTLBR0, 0}, {"IMTTUBR0", IMTTUBR0, 0}, + {"IMTTBCR", IMTTBCR, 0}, {"IMTTLBR1", IMTTLBR1, 0}, + {"IMTTUBR1", IMTTUBR1, 0}, {"IMMAIR0", IMMAIR0, 0}, + {"IMMAIR1", IMMAIR1, 0}, {"IMCTR", IMCTR, 0}, }; static struct hw_register root_pgtable6_reg[] = { - {"IMTTLBR0", IMTTLBR0, 0}, - {"IMTTUBR0", IMTTUBR0, 0}, - {"IMTTBCR", IMTTBCR, 0}, - {"IMTTLBR1", IMTTLBR1, 0}, - {"IMTTUBR1", IMTTUBR1, 0}, - {"IMMAIR0", IMMAIR0, 0}, - {"IMMAIR1", IMMAIR1, 0}, - {"IMCTR", IMCTR, 0}, + {"IMTTLBR0", IMTTLBR0, 0}, {"IMTTUBR0", IMTTUBR0, 0}, + {"IMTTBCR", IMTTBCR, 0}, {"IMTTLBR1", IMTTLBR1, 0}, + {"IMTTUBR1", IMTTUBR1, 0}, {"IMMAIR0", IMMAIR0, 0}, + {"IMMAIR1", IMMAIR1, 0}, {"IMCTR", IMCTR, 0}, }; static struct hw_register root_pgtable7_reg[] = { - {"IMTTLBR0", IMTTLBR0, 0}, - {"IMTTUBR0", IMTTUBR0, 0}, - {"IMTTBCR", IMTTBCR, 0}, - {"IMTTLBR1", IMTTLBR1, 0}, - {"IMTTUBR1", IMTTUBR1, 0}, - {"IMMAIR0", IMMAIR0, 0}, - {"IMMAIR1", IMMAIR1, 0}, - {"IMCTR", IMCTR, 0}, + {"IMTTLBR0", IMTTLBR0, 0}, {"IMTTUBR0", IMTTUBR0, 0}, + {"IMTTBCR", IMTTBCR, 0}, {"IMTTLBR1", IMTTLBR1, 0}, + {"IMTTUBR1", IMTTUBR1, 0}, {"IMMAIR0", IMMAIR0, 0}, + {"IMMAIR1", IMMAIR1, 0}, {"IMCTR", IMCTR, 0}, }; static struct hw_register *root_pgtable[IPMMU_CTX_MAX] = { - root_pgtable0_reg, - root_pgtable1_reg, - root_pgtable2_reg, - root_pgtable3_reg, - root_pgtable4_reg, - root_pgtable5_reg, - root_pgtable6_reg, - root_pgtable7_reg, + root_pgtable0_reg, root_pgtable1_reg, root_pgtable2_reg, root_pgtable3_reg, + root_pgtable4_reg, root_pgtable5_reg, root_pgtable6_reg, root_pgtable7_reg, }; #endif @@ -592,32 +567,33 @@ static struct hw_register *root_pgtable[IPMMU_CTX_MAX] = { static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) { - /* Xen: Fix */ - if (!mmu) - return false; + /* Xen: Fix */ + if ( !mmu ) + return false; - if (mmu->features->has_cache_leaf_nodes) - return mmu->is_leaf ? false : true; - else - return true; /* older IPMMU hardware treated as single root */ + if ( mmu->features->has_cache_leaf_nodes ) + return mmu->is_leaf ? false : true; + else + return true; /* older IPMMU hardware treated as single root */ } static struct ipmmu_vmsa_device *ipmmu_find_root(struct ipmmu_vmsa_device *leaf) { - struct ipmmu_vmsa_device *mmu = NULL; + struct ipmmu_vmsa_device *mmu = NULL; - if (ipmmu_is_root(leaf)) - return leaf; + if ( ipmmu_is_root(leaf) ) + return leaf; - spin_lock(&ipmmu_devices_lock); + spin_lock(&ipmmu_devices_lock); - list_for_each_entry(mmu, &ipmmu_devices, list) { - if (ipmmu_is_root(mmu)) - break; - } + list_for_each_entry (mmu, &ipmmu_devices, list) + { + if ( ipmmu_is_root(mmu) ) + break; + } - spin_unlock(&ipmmu_devices_lock); - return mmu; + spin_unlock(&ipmmu_devices_lock); + return mmu; } /* ----------------------------------------------------------------------------- @@ -626,34 +602,36 @@ static struct ipmmu_vmsa_device *ipmmu_find_root(struct ipmmu_vmsa_device *leaf) static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) { - return ioread32(mmu->base + offset); + return ioread32(mmu->base + offset); } static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, - u32 data) + u32 data) { - iowrite32(data, mmu->base + offset); + iowrite32(data, mmu->base + offset); } -static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain, unsigned int reg) +static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain, + unsigned int reg) { - return ipmmu_read(domain->root, domain->context_id * IM_CTX_SIZE + reg); + return ipmmu_read(domain->root, domain->context_id * IM_CTX_SIZE + reg); } -static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, unsigned int reg, - u32 data) +static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, + unsigned int reg, u32 data) { - ipmmu_write(domain->root, domain->context_id * IM_CTX_SIZE + reg, data); + ipmmu_write(domain->root, domain->context_id * IM_CTX_SIZE + reg, data); } /* Xen: Write the context for cache IPMMU only. */ -static void ipmmu_ctx_write_cache(struct ipmmu_vmsa_domain *domain, unsigned int reg, - u32 data) +static void ipmmu_ctx_write_cache(struct ipmmu_vmsa_domain *domain, + unsigned int reg, u32 data) { - unsigned int i; + unsigned int i; - for (i = 0; i < domain->num_mmus; i++) - ipmmu_write(domain->mmus[i], domain->context_id * IM_CTX_SIZE + reg, data); + for ( i = 0; i < domain->num_mmus; i++ ) + ipmmu_write(domain->mmus[i], domain->context_id * IM_CTX_SIZE + reg, + data); } /* @@ -661,15 +639,15 @@ static void ipmmu_ctx_write_cache(struct ipmmu_vmsa_domain *domain, unsigned int * that assigned to this Xen domain. */ static __maybe_unused void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, - unsigned int reg, u32 data) + unsigned int reg, u32 data) { - struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(domain->d)->arch.priv; - struct iommu_domain *io_domain; + struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(domain->d)->arch.priv; + struct iommu_domain *io_domain; - list_for_each_entry(io_domain, &xen_domain->contexts, list) - ipmmu_ctx_write_cache(to_vmsa_domain(io_domain), reg, data); + list_for_each_entry (io_domain, &xen_domain->contexts, list) + ipmmu_ctx_write_cache(to_vmsa_domain(io_domain), reg, data); - ipmmu_ctx_write_root(domain, reg, data); + ipmmu_ctx_write_root(domain, reg, data); } /* ----------------------------------------------------------------------------- @@ -679,91 +657,95 @@ static __maybe_unused void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, /* Wait for any pending TLB invalidations to complete */ static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) { - unsigned int count = 0; + unsigned int count = 0; - while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) { - cpu_relax(); - if (++count == TLB_LOOP_TIMEOUT) { - dev_err_ratelimited(domain->root->dev, - "TLB sync timed out -- MMU may be deadlocked\n"); - return; - } - udelay(1); - } + while ( ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH ) + { + cpu_relax(); + if ( ++count == TLB_LOOP_TIMEOUT ) + { + dev_err_ratelimited( + domain->root->dev, + "TLB sync timed out -- MMU may be deadlocked\n"); + return; + } + udelay(1); + } } static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) { - u32 reg; + u32 reg; - reg = ipmmu_ctx_read_root(domain, IMCTR); - reg |= IMCTR_FLUSH; + reg = ipmmu_ctx_read_root(domain, IMCTR); + reg |= IMCTR_FLUSH; #ifdef CONFIG_RCAR_IPMMU_PGT_IS_SHARED - ipmmu_ctx_write_all(domain, IMCTR, reg); + ipmmu_ctx_write_all(domain, IMCTR, reg); #else - ipmmu_ctx_write_root(domain, IMCTR, reg); + ipmmu_ctx_write_root(domain, IMCTR, reg); #endif - ipmmu_tlb_sync(domain); + ipmmu_tlb_sync(domain); } /* * Enable MMU translation for the microTLB. */ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, - struct ipmmu_vmsa_utlb *utlb_p) + struct ipmmu_vmsa_utlb *utlb_p) { - struct ipmmu_vmsa_device *mmu = utlb_p->mmu; - unsigned int utlb = utlb_p->utlb; + struct ipmmu_vmsa_device *mmu = utlb_p->mmu; + unsigned int utlb = utlb_p->utlb; - /* - * TODO: Reference-count the microTLB as several bus masters can be - * connected to the same microTLB. - */ + /* + * TODO: Reference-count the microTLB as several bus masters can be + * connected to the same microTLB. + */ - /* TODO: What should we set the ASID to ? */ - ipmmu_write(mmu, IMUASID(utlb), 0); + /* TODO: What should we set the ASID to ? */ + ipmmu_write(mmu, IMUASID(utlb), 0); - /* TODO: Do we need to flush the microTLB ? */ - ipmmu_write(mmu, IMUCTR(utlb), - IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | - IMUCTR_MMUEN); + /* TODO: Do we need to flush the microTLB ? */ + ipmmu_write(mmu, IMUCTR(utlb), + IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | + IMUCTR_MMUEN); } /* * Disable MMU translation for the microTLB. */ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, - struct ipmmu_vmsa_utlb *utlb_p) + struct ipmmu_vmsa_utlb *utlb_p) { - struct ipmmu_vmsa_device *mmu = utlb_p->mmu; - unsigned int utlb = utlb_p->utlb; + struct ipmmu_vmsa_device *mmu = utlb_p->mmu; + unsigned int utlb = utlb_p->utlb; - ipmmu_write(mmu, IMUCTR(utlb), 0); + ipmmu_write(mmu, IMUCTR(utlb), 0); } #ifndef CONFIG_RCAR_IPMMU_PGT_IS_SHARED static void ipmmu_tlb_flush_all(void *cookie) { - struct ipmmu_vmsa_domain *domain = cookie; + struct ipmmu_vmsa_domain *domain = cookie; - /* Xen: Just return if context is absent or context_id has non-existent value */ - if (!domain || domain->context_id >= domain->root->num_ctx) - return; + /* Xen: Just return if context is absent or context_id has non-existent + * value */ + if ( !domain || domain->context_id >= domain->root->num_ctx ) + return; - ipmmu_tlb_invalidate(domain); + ipmmu_tlb_invalidate(domain); } -static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, size_t granule, + bool leaf, void *cookie) { - /* The hardware doesn't support selective TLB flush. */ + /* The hardware doesn't support selective TLB flush. */ } static struct iommu_gather_ops ipmmu_gather_ops = { - .tlb_flush_all = ipmmu_tlb_flush_all, - .tlb_add_flush = ipmmu_tlb_add_flush, - .tlb_sync = ipmmu_tlb_flush_all, + .tlb_flush_all = ipmmu_tlb_flush_all, + .tlb_add_flush = ipmmu_tlb_add_flush, + .tlb_sync = ipmmu_tlb_flush_all, }; #endif @@ -772,275 +754,282 @@ static struct iommu_gather_ops ipmmu_gather_ops = { */ static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, - struct ipmmu_vmsa_domain *domain) + struct ipmmu_vmsa_domain *domain) { - unsigned long flags; - int ret; + unsigned long flags; + int ret; - spin_lock_irqsave(&mmu->lock, flags); + spin_lock_irqsave(&mmu->lock, flags); - ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); - if (ret != mmu->num_ctx) { - mmu->domains[ret] = domain; - set_bit(ret, mmu->ctx); - } else - ret = -EBUSY; + ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); + if ( ret != mmu->num_ctx ) + { + mmu->domains[ret] = domain; + set_bit(ret, mmu->ctx); + } + else + ret = -EBUSY; - spin_unlock_irqrestore(&mmu->lock, flags); + spin_unlock_irqrestore(&mmu->lock, flags); - return ret; + return ret; } static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) { #ifndef CONFIG_RCAR_IPMMU_PGT_IS_SHARED - u64 ttbr; - u32 tmp; - int ret; - - /* - * Allocate the page table operations. - * - * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory - * access, Long-descriptor format" that the NStable bit being set in a - * table descriptor will result in the NStable and NS bits of all child - * entries being ignored and considered as being set. The IPMMU seems - * not to comply with this, as it generates a secure access page fault - * if any of the NStable and NS bits isn't set when running in - * non-secure mode. - */ - domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; - domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, - domain->cfg.ias = domain->root->features->imctr_va64 ? 39 : 32; - domain->cfg.oas = 40; - domain->cfg.tlb = &ipmmu_gather_ops; + u64 ttbr; + u32 tmp; + int ret; + + /* + * Allocate the page table operations. + * + * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory + * access, Long-descriptor format" that the NStable bit being set in a + * table descriptor will result in the NStable and NS bits of all child + * entries being ignored and considered as being set. The IPMMU seems + * not to comply with this, as it generates a secure access page fault + * if any of the NStable and NS bits isn't set when running in + * non-secure mode. + */ + domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; + domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, + domain->cfg.ias = domain->root->features->imctr_va64 ? 39 : 32; + domain->cfg.oas = 40; + domain->cfg.tlb = &ipmmu_gather_ops; #if 0 /* Xen: Not needed */ domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); domain->io_domain.geometry.force_aperture = true; #endif - /* - * TODO: Add support for coherent walk through CCI with DVM and remove - * cache handling. For now, delegate it to the io-pgtable code. - */ - domain->cfg.iommu_dev = domain->root->dev; - - domain->iop = alloc_io_pgtable_ops(domain->root->features->imctr_va64 ? - ARM_64_LPAE_S1 : ARM_32_LPAE_S1, - &domain->cfg, domain); - if (!domain->iop) - return -EINVAL; - - /* Xen: Initialize context_id with non-existent value */ - domain->context_id = domain->root->num_ctx; - - /* - * Find an unused context. - */ - ret = ipmmu_domain_allocate_context(domain->root, domain); - if (ret < 0) { - /* Pass root page table for this domain as an argument. */ - free_io_pgtable_ops(domain->iop, - maddr_to_page(domain->cfg.arm_lpae_s1_cfg.ttbr[0])); - return ret; - } - - domain->context_id = ret; + /* + * TODO: Add support for coherent walk through CCI with DVM and remove + * cache handling. For now, delegate it to the io-pgtable code. + */ + domain->cfg.iommu_dev = domain->root->dev; + + domain->iop = alloc_io_pgtable_ops( + domain->root->features->imctr_va64 ? ARM_64_LPAE_S1 : ARM_32_LPAE_S1, + &domain->cfg, domain); + if ( !domain->iop ) + return -EINVAL; + + /* Xen: Initialize context_id with non-existent value */ + domain->context_id = domain->root->num_ctx; + + /* + * Find an unused context. + */ + ret = ipmmu_domain_allocate_context(domain->root, domain); + if ( ret < 0 ) + { + /* Pass root page table for this domain as an argument. */ + free_io_pgtable_ops(domain->iop, + maddr_to_page(domain->cfg.arm_lpae_s1_cfg.ttbr[0])); + return ret; + } + + domain->context_id = ret; #ifdef CONFIG_RCAR_DDR_BACKUP - domain->root->reg_backup[ret] = root_pgtable[ret]; + domain->root->reg_backup[ret] = root_pgtable[ret]; #endif - /* TTBR0 */ - ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; - - /* Xen: */ - dev_notice(domain->root->dev, "d%d: Set IPMMU context %u (pgd 0x%"PRIx64")\n", - domain->d->domain_id, domain->context_id, ttbr); - - ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr & IMTTLBR_MASK); - ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); - - /* - * With enabling IMCTR_VA64 we need to setup TTBR1 as well - */ - if (domain->root->features->imctr_va64) { - ipmmu_ctx_write_root(domain, IMTTLBR1, ttbr & IMTTLBR_MASK); - ipmmu_ctx_write_root(domain, IMTTUBR1, ttbr >> 32); - } - - /* - * TTBCR - * We use long descriptors with inner-shareable WBWA tables and allocate - * the whole 32-bit VA space to TTBR0. - */ - - if (domain->root->features->twobit_imttbcr_sl0) - tmp = IMTTBCR_SL0_TWOBIT_LVL_1; - else - tmp = IMTTBCR_SL0_LVL_1; - - /* - * As we are going to use TTBR1 we need to setup attributes for the memory - * associated with the translation table walks using TTBR1. - * Also for using IMCTR_VA64 mode we need to calculate and setup - * TTBR0/TTBR1 addressed regions. - */ - if (domain->root->features->imctr_va64) { - tmp |= IMTTBCR_SH1_INNER_SHAREABLE | IMTTBCR_ORGN1_WB_WA | - IMTTBCR_IRGN1_WB_WA; - tmp |= (64ULL - domain->cfg.ias) << IMTTBCR_TSZ0_SHIFT; - tmp |= (64ULL - domain->cfg.ias) << IMTTBCR_TSZ1_SHIFT; - } - - ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | - IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | - IMTTBCR_IRGN0_WB_WA | tmp); - - /* MAIR0 */ - ipmmu_ctx_write_root(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]); - - /* IMBUSCR */ - if (domain->root->features->setup_imbuscr) - ipmmu_ctx_write_root(domain, IMBUSCR, - ipmmu_ctx_read_root(domain, IMBUSCR) & - ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); - /* - * IMSTR - * Clear all interrupt flags. - */ - ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR)); - - /* - * IMCTR - * Enable the MMU and interrupt generation. The long-descriptor - * translation table format doesn't use TEX remapping. Don't enable AF - * software management as we have no use for it. Flush the TLB as - * required when modifying the context registers. - * Xen: Enable the context for the root IPMMU only. - */ - ipmmu_ctx_write_root(domain, IMCTR, - (domain->root->features->imctr_va64 ? IMCTR_VA64 : 0) - | IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); - - return 0; + /* TTBR0 */ + ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; + + /* Xen: */ + dev_notice(domain->root->dev, + "d%d: Set IPMMU context %u (pgd 0x%" PRIx64 ")\n", + domain->d->domain_id, domain->context_id, ttbr); + + ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr & IMTTLBR_MASK); + ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); + + /* + * With enabling IMCTR_VA64 we need to setup TTBR1 as well + */ + if ( domain->root->features->imctr_va64 ) + { + ipmmu_ctx_write_root(domain, IMTTLBR1, ttbr & IMTTLBR_MASK); + ipmmu_ctx_write_root(domain, IMTTUBR1, ttbr >> 32); + } + + /* + * TTBCR + * We use long descriptors with inner-shareable WBWA tables and allocate + * the whole 32-bit VA space to TTBR0. + */ + + if ( domain->root->features->twobit_imttbcr_sl0 ) + tmp = IMTTBCR_SL0_TWOBIT_LVL_1; + else + tmp = IMTTBCR_SL0_LVL_1; + + /* + * As we are going to use TTBR1 we need to setup attributes for the memory + * associated with the translation table walks using TTBR1. + * Also for using IMCTR_VA64 mode we need to calculate and setup + * TTBR0/TTBR1 addressed regions. + */ + if ( domain->root->features->imctr_va64 ) + { + tmp |= IMTTBCR_SH1_INNER_SHAREABLE | IMTTBCR_ORGN1_WB_WA | + IMTTBCR_IRGN1_WB_WA; + tmp |= (64ULL - domain->cfg.ias) << IMTTBCR_TSZ0_SHIFT; + tmp |= (64ULL - domain->cfg.ias) << IMTTBCR_TSZ1_SHIFT; + } + + ipmmu_ctx_write_root(domain, IMTTBCR, + IMTTBCR_EAE | IMTTBCR_SH0_INNER_SHAREABLE | + IMTTBCR_ORGN0_WB_WA | IMTTBCR_IRGN0_WB_WA | tmp); + + /* MAIR0 */ + ipmmu_ctx_write_root(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]); + + /* IMBUSCR */ + if ( domain->root->features->setup_imbuscr ) + ipmmu_ctx_write_root(domain, IMBUSCR, + ipmmu_ctx_read_root(domain, IMBUSCR) & + ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); + /* + * IMSTR + * Clear all interrupt flags. + */ + ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR)); + + /* + * IMCTR + * Enable the MMU and interrupt generation. The long-descriptor + * translation table format doesn't use TEX remapping. Don't enable AF + * software management as we have no use for it. Flush the TLB as + * required when modifying the context registers. + * Xen: Enable the context for the root IPMMU only. + */ + ipmmu_ctx_write_root(domain, IMCTR, + (domain->root->features->imctr_va64 ? IMCTR_VA64 : 0) | + IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); + + return 0; #else - u64 ttbr; - u32 tmp; - int ret; + u64 ttbr; + u32 tmp; + int ret; - /* Xen: Initialize context_id with non-existent value */ - domain->context_id = domain->root->num_ctx; + /* Xen: Initialize context_id with non-existent value */ + domain->context_id = domain->root->num_ctx; - /* - * Find an unused context. - */ - ret = ipmmu_domain_allocate_context(domain->root, domain); - if (ret < 0) - return ret; + /* + * Find an unused context. + */ + ret = ipmmu_domain_allocate_context(domain->root, domain); + if ( ret < 0 ) + return ret; - domain->context_id = ret; + domain->context_id = ret; #ifdef CONFIG_RCAR_DDR_BACKUP - domain->root->reg_backup[ret] = root_pgtable[ret]; + domain->root->reg_backup[ret] = root_pgtable[ret]; #endif - /* - * TTBR0 - * Use P2M table. With IPA size being forced to 40 bit (pa_range = 2) - * we get 3-level P2M with two concatenated translation tables - * at level 1. Which seems to be an appropriate case for the IPMMU. - */ - ASSERT(domain->d != NULL); - ttbr = page_to_maddr(domain->d->arch.p2m.root); - - /* Xen: */ - dev_notice(domain->root->dev, "d%d: Set IPMMU context %u (pgd 0x%"PRIx64")\n", - domain->d->domain_id, domain->context_id, ttbr); - - ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr & IMTTLBR_MASK); - ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); - - /* - * TTBCR - * We use long descriptors with inner-shareable WBWA tables and allocate - * the whole 40-bit VA space to TTBR0. - * Bypass stage 1 translation. - */ - if (domain->root->features->twobit_imttbcr_sl0) - tmp = IMTTBCR_SL0_TWOBIT_LVL_1; - else - tmp = IMTTBCR_SL0_LVL_1; - - - tmp |= (64ULL - 40ULL) << IMTTBCR_TSZ0_SHIFT; - - ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | IMTTBCR_PMB | - IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | - IMTTBCR_IRGN0_WB_WA | tmp); - - /* IMBUSCR */ - if (domain->root->features->setup_imbuscr) - ipmmu_ctx_write_root(domain, IMBUSCR, - ipmmu_ctx_read_root(domain, IMBUSCR) & - ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); - - /* - * IMSTR - * Clear all interrupt flags. - */ - ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR)); - - /* - * IMCTR - * Enable the MMU and interrupt generation. The long-descriptor - * translation table format doesn't use TEX remapping. Don't enable AF - * software management as we have no use for it. Flush the TLB as - * required when modifying the context registers. - * Xen: Enable the context for the root IPMMU only. - */ - ipmmu_ctx_write_root(domain, IMCTR, - IMCTR_VA64 | IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); - - return 0; + /* + * TTBR0 + * Use P2M table. With IPA size being forced to 40 bit (pa_range = 2) + * we get 3-level P2M with two concatenated translation tables + * at level 1. Which seems to be an appropriate case for the IPMMU. + */ + ASSERT(domain->d != NULL); + ttbr = page_to_maddr(domain->d->arch.p2m.root); + + /* Xen: */ + dev_notice(domain->root->dev, + "d%d: Set IPMMU context %u (pgd 0x%" PRIx64 ")\n", + domain->d->domain_id, domain->context_id, ttbr); + + ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr & IMTTLBR_MASK); + ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); + + /* + * TTBCR + * We use long descriptors with inner-shareable WBWA tables and allocate + * the whole 40-bit VA space to TTBR0. + * Bypass stage 1 translation. + */ + if ( domain->root->features->twobit_imttbcr_sl0 ) + tmp = IMTTBCR_SL0_TWOBIT_LVL_1; + else + tmp = IMTTBCR_SL0_LVL_1; + + tmp |= (64ULL - 40ULL) << IMTTBCR_TSZ0_SHIFT; + + ipmmu_ctx_write_root(domain, IMTTBCR, + IMTTBCR_EAE | IMTTBCR_PMB | + IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | + IMTTBCR_IRGN0_WB_WA | tmp); + + /* IMBUSCR */ + if ( domain->root->features->setup_imbuscr ) + ipmmu_ctx_write_root(domain, IMBUSCR, + ipmmu_ctx_read_root(domain, IMBUSCR) & + ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); + + /* + * IMSTR + * Clear all interrupt flags. + */ + ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR)); + + /* + * IMCTR + * Enable the MMU and interrupt generation. The long-descriptor + * translation table format doesn't use TEX remapping. Don't enable AF + * software management as we have no use for it. Flush the TLB as + * required when modifying the context registers. + * Xen: Enable the context for the root IPMMU only. + */ + ipmmu_ctx_write_root(domain, IMCTR, + IMCTR_VA64 | IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); + + return 0; #endif } static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, - unsigned int context_id) + unsigned int context_id) { - unsigned long flags; + unsigned long flags; - spin_lock_irqsave(&mmu->lock, flags); + spin_lock_irqsave(&mmu->lock, flags); - clear_bit(context_id, mmu->ctx); - mmu->domains[context_id] = NULL; + clear_bit(context_id, mmu->ctx); + mmu->domains[context_id] = NULL; - spin_unlock_irqrestore(&mmu->lock, flags); + spin_unlock_irqrestore(&mmu->lock, flags); } static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) { - /* Xen: Just return if context_id has non-existent value */ - if (domain->context_id >= domain->root->num_ctx) - return; + /* Xen: Just return if context_id has non-existent value */ + if ( domain->context_id >= domain->root->num_ctx ) + return; - /* - * Disable the context. Flush the TLB as required when modifying the - * context registers. - * - * TODO: Is TLB flush really needed ? - * Xen: Disable the context for the root IPMMU only. - */ - ipmmu_ctx_write_root(domain, IMCTR, IMCTR_FLUSH); - ipmmu_tlb_sync(domain); + /* + * Disable the context. Flush the TLB as required when modifying the + * context registers. + * + * TODO: Is TLB flush really needed ? + * Xen: Disable the context for the root IPMMU only. + */ + ipmmu_ctx_write_root(domain, IMCTR, IMCTR_FLUSH); + ipmmu_tlb_sync(domain); #ifdef CONFIG_RCAR_DDR_BACKUP - domain->root->reg_backup[domain->context_id] = NULL; + domain->root->reg_backup[domain->context_id] = NULL; #endif - ipmmu_domain_free_context(domain->root, domain->context_id); + ipmmu_domain_free_context(domain->root, domain->context_id); - /* Xen: Initialize context_id with non-existent value */ - domain->context_id = domain->root->num_ctx; + /* Xen: Initialize context_id with non-existent value */ + domain->context_id = domain->root->num_ctx; } /* ----------------------------------------------------------------------------- @@ -1050,85 +1039,86 @@ static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) /* Xen: Show domain_id in every printk */ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) { - const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; - struct ipmmu_vmsa_device *mmu = domain->root; - u32 status; - u64 iova; - - status = ipmmu_ctx_read_root(domain, IMSTR); - if (!(status & err_mask)) - return IRQ_NONE; - - iova = ipmmu_ctx_read_root(domain, IMEAR) | - ((u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32); - - /* - * Clear the error status flags. Unlike traditional interrupt flag - * registers that must be cleared by writing 1, this status register - * seems to require 0. The error address register must be read before, - * otherwise its value will be 0. - */ - ipmmu_ctx_write_root(domain, IMSTR, 0); - - /* Log fatal errors. */ - if (status & IMSTR_MHIT) - dev_err_ratelimited(mmu->dev, "d%d: Multiple TLB hits @0x%"PRIx64"\n", - domain->d->domain_id, iova); - if (status & IMSTR_ABORT) - dev_err_ratelimited(mmu->dev, "d%d: Page Table Walk Abort @0x%"PRIx64"\n", - domain->d->domain_id, iova); - - if (!(status & (IMSTR_PF | IMSTR_TF))) - return IRQ_NONE; - - /* Flush the TLB as required when IPMMU translation error occurred. */ - ipmmu_tlb_invalidate(domain); - - /* - * Try to handle page faults and translation faults. - * - * TODO: We need to look up the faulty device based on the I/O VA. Use - * the IOMMU device for now. - */ - if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) - return IRQ_HANDLED; - - dev_err_ratelimited(mmu->dev, - "d%d: Unhandled fault: status 0x%08x iova 0x%"PRIx64"\n", - domain->d->domain_id, status, iova); - - return IRQ_HANDLED; + const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; + struct ipmmu_vmsa_device *mmu = domain->root; + u32 status; + u64 iova; + + status = ipmmu_ctx_read_root(domain, IMSTR); + if ( !(status & err_mask) ) + return IRQ_NONE; + + iova = ipmmu_ctx_read_root(domain, IMEAR) | + ((u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32); + + /* + * Clear the error status flags. Unlike traditional interrupt flag + * registers that must be cleared by writing 1, this status register + * seems to require 0. The error address register must be read before, + * otherwise its value will be 0. + */ + ipmmu_ctx_write_root(domain, IMSTR, 0); + + /* Log fatal errors. */ + if ( status & IMSTR_MHIT ) + dev_err_ratelimited(mmu->dev, "d%d: Multiple TLB hits @0x%" PRIx64 "\n", + domain->d->domain_id, iova); + if ( status & IMSTR_ABORT ) + dev_err_ratelimited(mmu->dev, + "d%d: Page Table Walk Abort @0x%" PRIx64 "\n", + domain->d->domain_id, iova); + + if ( !(status & (IMSTR_PF | IMSTR_TF)) ) + return IRQ_NONE; + + /* Flush the TLB as required when IPMMU translation error occurred. */ + ipmmu_tlb_invalidate(domain); + + /* + * Try to handle page faults and translation faults. + * + * TODO: We need to look up the faulty device based on the I/O VA. Use + * the IOMMU device for now. + */ + if ( !report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0) ) + return IRQ_HANDLED; + + dev_err_ratelimited( + mmu->dev, "d%d: Unhandled fault: status 0x%08x iova 0x%" PRIx64 "\n", + domain->d->domain_id, status, iova); + + return IRQ_HANDLED; } static irqreturn_t ipmmu_irq(int irq, void *dev) { - struct ipmmu_vmsa_device *mmu = dev; - irqreturn_t status = IRQ_NONE; - unsigned int i; - unsigned long flags; + struct ipmmu_vmsa_device *mmu = dev; + irqreturn_t status = IRQ_NONE; + unsigned int i; + unsigned long flags; - spin_lock_irqsave(&mmu->lock, flags); + spin_lock_irqsave(&mmu->lock, flags); - /* - * Check interrupts for all active contexts. - */ - for (i = 0; i < mmu->num_ctx; i++) { - if (!mmu->domains[i]) - continue; - if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) - status = IRQ_HANDLED; - } + /* + * Check interrupts for all active contexts. + */ + for ( i = 0; i < mmu->num_ctx; i++ ) + { + if ( !mmu->domains[i] ) + continue; + if ( ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED ) + status = IRQ_HANDLED; + } - spin_unlock_irqrestore(&mmu->lock, flags); + spin_unlock_irqrestore(&mmu->lock, flags); - return status; + return status; } /* Xen: Interrupt handlers wrapper */ -static void ipmmu_irq_xen(int irq, void *dev, - struct cpu_user_regs *regs) +static void ipmmu_irq_xen(int irq, void *dev, struct cpu_user_regs *regs) { - ipmmu_irq(irq, dev); + ipmmu_irq(irq, dev); } #define ipmmu_irq ipmmu_irq_xen @@ -1166,55 +1156,60 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain) #endif bool ipmmus_are_equal(struct ipmmu_vmsa_domain *domain, - struct ipmmu_vmsa_archdata *archdata) + struct ipmmu_vmsa_archdata *archdata) { - unsigned int i; + unsigned int i; - if (domain->num_mmus != archdata->num_mmus) - return false; + if ( domain->num_mmus != archdata->num_mmus ) + return false; - for (i = 0; i < archdata->num_mmus; i++) { - if (domain->mmus[i] != archdata->mmus[i]) - return false; - } + for ( i = 0; i < archdata->num_mmus; i++ ) + { + if ( domain->mmus[i] != archdata->mmus[i] ) + return false; + } - return true; + return true; } static int ipmmu_attach_device(struct iommu_domain *io_domain, - struct device *dev) -{ - struct ipmmu_vmsa_archdata *archdata = to_archdata(dev); - struct ipmmu_vmsa_device *root; - struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - unsigned long flags; - unsigned int i; - int ret = 0; - - for (i = 0; i < archdata->num_mmus; i++) { - if (!archdata->mmus[i]) - break; - } - - if (!archdata->num_mmus || i != archdata->num_mmus) { - dev_err(dev, "Cannot attach to IPMMU\n"); - return -ENXIO; - } - - root = ipmmu_find_root(archdata->mmus[0]); - if (!root) { - dev_err(dev, "Unable to locate root IPMMU\n"); - return -EAGAIN; - } - - spin_lock_irqsave(&domain->lock, flags); - - if (!domain->mmus[0]) { - /* The domain hasn't been used yet, initialize it. */ - domain->num_mmus = archdata->num_mmus; - memcpy(domain->mmus, archdata->mmus, - archdata->num_mmus * sizeof(*archdata->mmus)); - domain->root = root; + struct device *dev) +{ + struct ipmmu_vmsa_archdata *archdata = to_archdata(dev); + struct ipmmu_vmsa_device *root; + struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); + unsigned long flags; + unsigned int i; + int ret = 0; + + for ( i = 0; i < archdata->num_mmus; i++ ) + { + if ( !archdata->mmus[i] ) + break; + } + + if ( !archdata->num_mmus || i != archdata->num_mmus ) + { + dev_err(dev, "Cannot attach to IPMMU\n"); + return -ENXIO; + } + + root = ipmmu_find_root(archdata->mmus[0]); + if ( !root ) + { + dev_err(dev, "Unable to locate root IPMMU\n"); + return -EAGAIN; + } + + spin_lock_irqsave(&domain->lock, flags); + + if ( !domain->mmus[0] ) + { + /* The domain hasn't been used yet, initialize it. */ + domain->num_mmus = archdata->num_mmus; + memcpy(domain->mmus, archdata->mmus, + archdata->num_mmus * sizeof(*archdata->mmus)); + domain->root = root; /* * Xen: We have already initialized and enabled context for root IPMMU @@ -1225,10 +1220,10 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, ret = ipmmu_domain_init_context(domain); #endif - ipmmu_ctx_write_cache(domain, IMCTR, - ipmmu_ctx_read_root(domain, IMCTR) | IMCTR_FLUSH); + ipmmu_ctx_write_cache(domain, IMCTR, + ipmmu_ctx_read_root(domain, IMCTR) | IMCTR_FLUSH); - dev_info(dev, "Using IPMMU context %u\n", domain->context_id); + dev_info(dev, "Using IPMMU context %u\n", domain->context_id); #if 0 /* Xen: Not needed */ if (ret < 0) { dev_err(dev, "Unable to initialize IPMMU context\n"); @@ -1238,45 +1233,51 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, domain->context_id); } #endif - } else if (!ipmmus_are_equal(domain, archdata)) { - /* - * Something is wrong, we can't attach two devices using - * different IOMMUs to the same domain. - */ - for (i = 0; i < archdata->num_mmus || i < domain->num_mmus; i++) - dev_err(dev, "Can't attach IPMMU%d %s to domain on IPMMU%d %s\n", - i + 1, i < archdata->num_mmus ? dev_name(archdata->mmus[i]->dev) : "---", - i + 1, i < domain->num_mmus ? dev_name(domain->mmus[i]->dev) : "---"); - ret = -EINVAL; - } else { - dev_info(dev, "Reusing IPMMU context %u\n", - domain->context_id); - } - - spin_unlock_irqrestore(&domain->lock, flags); - - if (ret < 0) - return ret; - - for (i = 0; i < archdata->num_utlbs; ++i) - ipmmu_utlb_enable(domain, &archdata->utlbs[i]); - - return 0; + } + else if ( !ipmmus_are_equal(domain, archdata) ) + { + /* + * Something is wrong, we can't attach two devices using + * different IOMMUs to the same domain. + */ + for ( i = 0; i < archdata->num_mmus || i < domain->num_mmus; i++ ) + dev_err( + dev, "Can't attach IPMMU%d %s to domain on IPMMU%d %s\n", i + 1, + i < archdata->num_mmus ? dev_name(archdata->mmus[i]->dev) + : "---", + i + 1, + i < domain->num_mmus ? dev_name(domain->mmus[i]->dev) : "---"); + ret = -EINVAL; + } + else + { + dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); + } + + spin_unlock_irqrestore(&domain->lock, flags); + + if ( ret < 0 ) + return ret; + + for ( i = 0; i < archdata->num_utlbs; ++i ) + ipmmu_utlb_enable(domain, &archdata->utlbs[i]); + + return 0; } static void ipmmu_detach_device(struct iommu_domain *io_domain, - struct device *dev) + struct device *dev) { - struct ipmmu_vmsa_archdata *archdata = to_archdata(dev); - struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - unsigned int i; + struct ipmmu_vmsa_archdata *archdata = to_archdata(dev); + struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); + unsigned int i; - for (i = 0; i < archdata->num_utlbs; ++i) - ipmmu_utlb_disable(domain, &archdata->utlbs[i]); + for ( i = 0; i < archdata->num_utlbs; ++i ) + ipmmu_utlb_disable(domain, &archdata->utlbs[i]); - /* - * TODO: Optimize by disabling the context when no device is attached. - */ + /* + * TODO: Optimize by disabling the context when no device is attached. + */ } /* @@ -1319,169 +1320,182 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, #ifndef CONFIG_RCAR_IPMMU_PGT_IS_SHARED static size_t ipmmu_pgsize(struct iommu_domain *io_domain, - unsigned long addr_merge, size_t size) + unsigned long addr_merge, size_t size) { - struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - unsigned int pgsize_idx; - size_t pgsize; + struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); + unsigned int pgsize_idx; + size_t pgsize; - /* Max page size that still fits into 'size' */ - pgsize_idx = __fls(size); + /* Max page size that still fits into 'size' */ + pgsize_idx = __fls(size); - /* need to consider alignment requirements ? */ - if (likely(addr_merge)) { - /* Max page size allowed by address */ - unsigned int align_pgsize_idx = __ffs(addr_merge); - pgsize_idx = min(pgsize_idx, align_pgsize_idx); - } + /* need to consider alignment requirements ? */ + if ( likely(addr_merge) ) + { + /* Max page size allowed by address */ + unsigned int align_pgsize_idx = __ffs(addr_merge); + pgsize_idx = min(pgsize_idx, align_pgsize_idx); + } - /* build a mask of acceptable page sizes */ - pgsize = (1UL << (pgsize_idx + 1)) - 1; + /* build a mask of acceptable page sizes */ + pgsize = (1UL << (pgsize_idx + 1)) - 1; - /* throw away page sizes not supported by the hardware */ - pgsize &= domain->cfg.pgsize_bitmap; + /* throw away page sizes not supported by the hardware */ + pgsize &= domain->cfg.pgsize_bitmap; - /* make sure we're still sane */ - BUG_ON(!pgsize); + /* make sure we're still sane */ + BUG_ON(!pgsize); - /* pick the biggest page */ - pgsize_idx = __fls(pgsize); - pgsize = 1UL << pgsize_idx; + /* pick the biggest page */ + pgsize_idx = __fls(pgsize); + pgsize = 1UL << pgsize_idx; - return pgsize; + return pgsize; } phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, dma_addr_t iova) { - struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - - if (unlikely(domain->iop->iova_to_phys == NULL)) - return 0; - - return domain->iop->iova_to_phys(domain->iop, iova); -} - -size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, size_t size) -{ - struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - size_t unmapped_page, unmapped = 0; - dma_addr_t max_iova; - unsigned int min_pagesz; - - if (unlikely(domain->iop->unmap == NULL || - domain->cfg.pgsize_bitmap == 0UL)) - return -ENODEV; - - /* find out the minimum page size supported */ - min_pagesz = 1 << __ffs(domain->cfg.pgsize_bitmap); - - /* - * The virtual address, as well as the size of the mapping, must be - * aligned (at least) to the size of the smallest page supported - * by the hardware - */ - if (!IS_ALIGNED(iova | size, min_pagesz)) { - printk("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", - iova, size, min_pagesz); - return -EINVAL; - } - - /* - * the sum of virtual address and size must be inside the IOVA space - * that hardware supports - */ - max_iova = (1UL << domain->cfg.ias) - 1; - if ((dma_addr_t)iova + size > max_iova) { - printk("out-of-bound: iova 0x%lx + size 0x%zx > max_iova 0x%"PRIx64"\n", - iova, size, max_iova); - return -EINVAL; - } - - /* - * Keep iterating until we either unmap 'size' bytes (or more) - * or we hit an area that isn't mapped. - */ - while (unmapped < size) { - size_t pgsize = ipmmu_pgsize(io_domain, iova, size - unmapped); - - unmapped_page = domain->iop->unmap(domain->iop, iova, pgsize); - if (!unmapped_page) - break; - - iova += unmapped_page; - unmapped += unmapped_page; - } - - return unmapped; + struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); + + if ( unlikely(domain->iop->iova_to_phys == NULL) ) + return 0; + + return domain->iop->iova_to_phys(domain->iop, iova); +} + +size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, + size_t size) +{ + struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); + size_t unmapped_page, unmapped = 0; + dma_addr_t max_iova; + unsigned int min_pagesz; + + if ( unlikely(domain->iop->unmap == NULL || + domain->cfg.pgsize_bitmap == 0UL) ) + return -ENODEV; + + /* find out the minimum page size supported */ + min_pagesz = 1 << __ffs(domain->cfg.pgsize_bitmap); + + /* + * The virtual address, as well as the size of the mapping, must be + * aligned (at least) to the size of the smallest page supported + * by the hardware + */ + if ( !IS_ALIGNED(iova | size, min_pagesz) ) + { + printk("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", iova, size, + min_pagesz); + return -EINVAL; + } + + /* + * the sum of virtual address and size must be inside the IOVA space + * that hardware supports + */ + max_iova = (1UL << domain->cfg.ias) - 1; + if ( (dma_addr_t)iova + size > max_iova ) + { + printk("out-of-bound: iova 0x%lx + size 0x%zx > max_iova 0x%" PRIx64 + "\n", + iova, size, max_iova); + return -EINVAL; + } + + /* + * Keep iterating until we either unmap 'size' bytes (or more) + * or we hit an area that isn't mapped. + */ + while ( unmapped < size ) + { + size_t pgsize = ipmmu_pgsize(io_domain, iova, size - unmapped); + + unmapped_page = domain->iop->unmap(domain->iop, iova, pgsize); + if ( !unmapped_page ) + break; + + iova += unmapped_page; + unmapped += unmapped_page; + } + + return unmapped; } int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) -{ - struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - unsigned long orig_iova = iova; - dma_addr_t max_iova; - unsigned int min_pagesz; - size_t orig_size = size; - int ret = 0; - - if (unlikely(domain->iop->map == NULL || - domain->cfg.pgsize_bitmap == 0UL)) - return -ENODEV; - - /* find out the minimum page size supported */ - min_pagesz = 1 << __ffs(domain->cfg.pgsize_bitmap); - - /* - * both the virtual address and the physical one, as well as - * the size of the mapping, must be aligned (at least) to the - * size of the smallest page supported by the hardware - */ - if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { - printk("unaligned: iova 0x%lx pa 0x%"PRIx64" size 0x%zx min_pagesz 0x%x\n", - iova, paddr, size, min_pagesz); - return -EINVAL; - } - - /* - * the sum of virtual address and size must be inside the IOVA space - * that hardware supports - */ - max_iova = (1UL << domain->cfg.ias) - 1; - if ((dma_addr_t)iova + size > max_iova) { - printk("out-of-bound: iova 0x%lx + size 0x%zx > max_iova 0x%"PRIx64"\n", - iova, size, max_iova); - return -EINVAL; - } - - while (size) { - size_t pgsize = ipmmu_pgsize(io_domain, iova | paddr, size); - - ret = domain->iop->map(domain->iop, iova, paddr, pgsize, prot); - if (ret == -EEXIST) { - phys_addr_t exist_paddr = ipmmu_iova_to_phys(io_domain, iova); - if (exist_paddr == paddr) - ret = 0; - else if (exist_paddr) { - printk("remap: iova 0x%lx pa 0x%"PRIx64" pgsize 0x%zx\n", - iova, paddr, pgsize); - ipmmu_unmap(io_domain, iova, pgsize); - ret = domain->iop->map(domain->iop, iova, paddr, pgsize, prot); - } - } - if (ret) - break; - - iova += pgsize; - paddr += pgsize; - size -= pgsize; - } - - /* unroll mapping in case something went wrong */ - if (ret && orig_size != size) - ipmmu_unmap(io_domain, orig_iova, orig_size - size); - - return ret; + phys_addr_t paddr, size_t size, int prot) +{ + struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); + unsigned long orig_iova = iova; + dma_addr_t max_iova; + unsigned int min_pagesz; + size_t orig_size = size; + int ret = 0; + + if ( unlikely(domain->iop->map == NULL || + domain->cfg.pgsize_bitmap == 0UL) ) + return -ENODEV; + + /* find out the minimum page size supported */ + min_pagesz = 1 << __ffs(domain->cfg.pgsize_bitmap); + + /* + * both the virtual address and the physical one, as well as + * the size of the mapping, must be aligned (at least) to the + * size of the smallest page supported by the hardware + */ + if ( !IS_ALIGNED(iova | paddr | size, min_pagesz) ) + { + printk("unaligned: iova 0x%lx pa 0x%" PRIx64 + " size 0x%zx min_pagesz 0x%x\n", + iova, paddr, size, min_pagesz); + return -EINVAL; + } + + /* + * the sum of virtual address and size must be inside the IOVA space + * that hardware supports + */ + max_iova = (1UL << domain->cfg.ias) - 1; + if ( (dma_addr_t)iova + size > max_iova ) + { + printk("out-of-bound: iova 0x%lx + size 0x%zx > max_iova 0x%" PRIx64 + "\n", + iova, size, max_iova); + return -EINVAL; + } + + while ( size ) + { + size_t pgsize = ipmmu_pgsize(io_domain, iova | paddr, size); + + ret = domain->iop->map(domain->iop, iova, paddr, pgsize, prot); + if ( ret == -EEXIST ) + { + phys_addr_t exist_paddr = ipmmu_iova_to_phys(io_domain, iova); + if ( exist_paddr == paddr ) + ret = 0; + else if ( exist_paddr ) + { + printk("remap: iova 0x%lx pa 0x%" PRIx64 " pgsize 0x%zx\n", + iova, paddr, pgsize); + ipmmu_unmap(io_domain, iova, pgsize); + ret = domain->iop->map(domain->iop, iova, paddr, pgsize, prot); + } + } + if ( ret ) + break; + + iova += pgsize; + paddr += pgsize; + size -= pgsize; + } + + /* unroll mapping in case something went wrong */ + if ( ret && orig_size != size ) + ipmmu_unmap(io_domain, orig_iova, orig_size - size); + + return ret; } #endif @@ -1523,147 +1537,156 @@ static struct iommu_group *ipmmu_find_group(struct device *dev) } #endif -static int ipmmu_find_utlbs(struct device *dev, - struct ipmmu_vmsa_utlb *utlbs, unsigned int num_utlbs) +static int ipmmu_find_utlbs(struct device *dev, struct ipmmu_vmsa_utlb *utlbs, + unsigned int num_utlbs) { - unsigned int i; - int ret = -ENODEV; + unsigned int i; + int ret = -ENODEV; - spin_lock(&ipmmu_devices_lock); + spin_lock(&ipmmu_devices_lock); - for (i = 0; i < num_utlbs; ++i) { - struct ipmmu_vmsa_device *mmu; - struct of_phandle_args args; + for ( i = 0; i < num_utlbs; ++i ) + { + struct ipmmu_vmsa_device *mmu; + struct of_phandle_args args; - ret = of_parse_phandle_with_args(dev->of_node, "iommus", - "#iommu-cells", i, &args); - if (ret < 0) - break; + ret = of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", + i, &args); + if ( ret < 0 ) + break; #if 0 /* Xen: Not needed */ of_node_put(args.np); #endif - ret = -ENODEV; - list_for_each_entry(mmu, &ipmmu_devices, list) { - if (args.np != mmu->dev->of_node || args.args_count != 1) - continue; + ret = -ENODEV; + list_for_each_entry (mmu, &ipmmu_devices, list) + { + if ( args.np != mmu->dev->of_node || args.args_count != 1 ) + continue; - /* - * TODO Take a reference to the MMU to protect - * against device removal. - */ - ret = 0; - break; - } - if (ret < 0) - break; + /* + * TODO Take a reference to the MMU to protect + * against device removal. + */ + ret = 0; + break; + } + if ( ret < 0 ) + break; - utlbs[i].utlb = args.args[0]; - utlbs[i].mmu = mmu; - } + utlbs[i].utlb = args.args[0]; + utlbs[i].mmu = mmu; + } - spin_unlock(&ipmmu_devices_lock); + spin_unlock(&ipmmu_devices_lock); - return ret; + return ret; } /* Xen: To roll back actions that took place it init */ static __maybe_unused void ipmmu_destroy_platform_device(struct device *dev) { - struct ipmmu_vmsa_archdata *archdata = to_archdata(dev); + struct ipmmu_vmsa_archdata *archdata = to_archdata(dev); - if (!archdata) - return; + if ( !archdata ) + return; - kfree(archdata->utlbs); - kfree(archdata); - set_archdata(dev, NULL); + kfree(archdata->utlbs); + kfree(archdata); + set_archdata(dev, NULL); } static int ipmmu_init_platform_device(struct device *dev) { - struct ipmmu_vmsa_archdata *archdata; - struct ipmmu_vmsa_device *mmus[IPMMU_PER_DEV_MAX]; - struct ipmmu_vmsa_utlb *utlbs; + struct ipmmu_vmsa_archdata *archdata; + struct ipmmu_vmsa_device *mmus[IPMMU_PER_DEV_MAX]; + struct ipmmu_vmsa_utlb *utlbs; #ifdef CONFIG_RCAR_DDR_BACKUP - unsigned int *utlbs_val, *asids_val; + unsigned int *utlbs_val, *asids_val; #endif - unsigned int i; - int num_utlbs; - int num_mmus; - int ret; + unsigned int i; + int num_utlbs; + int num_mmus; + int ret; - /* Find the master corresponding to the device. */ + /* Find the master corresponding to the device. */ - num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus", - "#iommu-cells"); - if (num_utlbs < 0) - return -ENODEV; + num_utlbs = + of_count_phandle_with_args(dev->of_node, "iommus", "#iommu-cells"); + if ( num_utlbs < 0 ) + return -ENODEV; - utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL); - if (!utlbs) - return -ENOMEM; + utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL); + if ( !utlbs ) + return -ENOMEM; #ifdef CONFIG_RCAR_DDR_BACKUP - utlbs_val = kcalloc(num_utlbs, sizeof(*utlbs_val), GFP_KERNEL); - if (!utlbs_val) - return -ENOMEM; - asids_val = kcalloc(num_utlbs, sizeof(*asids_val), GFP_KERNEL); - if (!asids_val) - return -ENOMEM; + utlbs_val = kcalloc(num_utlbs, sizeof(*utlbs_val), GFP_KERNEL); + if ( !utlbs_val ) + return -ENOMEM; + asids_val = kcalloc(num_utlbs, sizeof(*asids_val), GFP_KERNEL); + if ( !asids_val ) + return -ENOMEM; #endif - ret = ipmmu_find_utlbs(dev, utlbs, num_utlbs); - if (ret < 0) - goto error; - - num_mmus = 0; - for (i = 0; i < num_utlbs; i++) { - if (!utlbs[i].mmu || utlbs[i].utlb >= utlbs[i].mmu->num_utlbs) { - ret = -EINVAL; - goto error; - } - - if (!num_mmus || mmus[num_mmus - 1] != utlbs[i].mmu) { - if (num_mmus >= IPMMU_PER_DEV_MAX) { - ret = -EINVAL; - goto error; - } else { - num_mmus ++; - mmus[num_mmus - 1] = utlbs[i].mmu; - } - } - } - - archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); - if (!archdata) { - ret = -ENOMEM; - goto error; - } - - archdata->num_mmus = num_mmus; - memcpy(archdata->mmus, mmus, num_mmus * sizeof(*mmus)); - archdata->utlbs = utlbs; + ret = ipmmu_find_utlbs(dev, utlbs, num_utlbs); + if ( ret < 0 ) + goto error; + + num_mmus = 0; + for ( i = 0; i < num_utlbs; i++ ) + { + if ( !utlbs[i].mmu || utlbs[i].utlb >= utlbs[i].mmu->num_utlbs ) + { + ret = -EINVAL; + goto error; + } + + if ( !num_mmus || mmus[num_mmus - 1] != utlbs[i].mmu ) + { + if ( num_mmus >= IPMMU_PER_DEV_MAX ) + { + ret = -EINVAL; + goto error; + } + else + { + num_mmus++; + mmus[num_mmus - 1] = utlbs[i].mmu; + } + } + } + + archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); + if ( !archdata ) + { + ret = -ENOMEM; + goto error; + } + + archdata->num_mmus = num_mmus; + memcpy(archdata->mmus, mmus, num_mmus * sizeof(*mmus)); + archdata->utlbs = utlbs; #ifdef CONFIG_RCAR_DDR_BACKUP - archdata->utlbs_val = utlbs_val; - archdata->asids_val = asids_val; + archdata->utlbs_val = utlbs_val; + archdata->asids_val = asids_val; #endif - archdata->num_utlbs = num_utlbs; - archdata->dev = dev; - set_archdata(dev, archdata); + archdata->num_utlbs = num_utlbs; + archdata->dev = dev; + set_archdata(dev, archdata); - /* Xen: */ - dev_notice(dev, "Initialized master device (IPMMUs %u micro-TLBs %u)\n", - num_mmus, num_utlbs); - for (i = 0; i < num_mmus; i++) - dev_notice(dev, "IPMMU%d: %s\n", i + 1, dev_name(mmus[i]->dev)); + /* Xen: */ + dev_notice(dev, "Initialized master device (IPMMUs %u micro-TLBs %u)\n", + num_mmus, num_utlbs); + for ( i = 0; i < num_mmus; i++ ) + dev_notice(dev, "IPMMU%d: %s\n", i + 1, dev_name(mmus[i]->dev)); - return 0; + return 0; error: - kfree(utlbs); - return ret; + kfree(utlbs); + return ret; } #if 0 /* Xen: Not needed */ @@ -1905,51 +1928,56 @@ static const struct iommu_ops ipmmu_ops = { static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) { - unsigned int i; + unsigned int i; - /* Disable all contexts. */ - for (i = 0; i < mmu->num_ctx; ++i) - ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); + /* Disable all contexts. */ + for ( i = 0; i < mmu->num_ctx; ++i ) + ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); } static const struct ipmmu_features ipmmu_features_default = { - .use_ns_alias_offset = true, - .has_cache_leaf_nodes = false, - .has_eight_ctx = false, - .setup_imbuscr = true, - .twobit_imttbcr_sl0 = false, - .imctr_va64 = false, + .use_ns_alias_offset = true, + .has_cache_leaf_nodes = false, + .has_eight_ctx = false, + .setup_imbuscr = true, + .twobit_imttbcr_sl0 = false, + .imctr_va64 = false, }; static const struct ipmmu_features ipmmu_features_rcar_gen3 = { - .use_ns_alias_offset = false, - .has_cache_leaf_nodes = true, - .has_eight_ctx = true, - .setup_imbuscr = false, - .twobit_imttbcr_sl0 = true, - .imctr_va64 = true, + .use_ns_alias_offset = false, + .has_cache_leaf_nodes = true, + .has_eight_ctx = true, + .setup_imbuscr = false, + .twobit_imttbcr_sl0 = true, + .imctr_va64 = true, }; static const struct of_device_id ipmmu_of_ids[] = { - { - .compatible = "renesas,ipmmu-vmsa", - .data = &ipmmu_features_default, - }, { - .compatible = "renesas,ipmmu-r8a7795", - .data = &ipmmu_features_rcar_gen3, - }, { - .compatible = "renesas,ipmmu-r8a77965", - .data = &ipmmu_features_rcar_gen3, - }, { - .compatible = "renesas,ipmmu-r8a7796", - .data = &ipmmu_features_rcar_gen3, - }, { - /* Xen: It is not clear how to deal with it */ - .compatible = "renesas,ipmmu-pmb-r8a7795", - .data = NULL, - }, { - /* Terminator */ - }, + { + .compatible = "renesas,ipmmu-vmsa", + .data = &ipmmu_features_default, + }, + { + .compatible = "renesas,ipmmu-r8a7795", + .data = &ipmmu_features_rcar_gen3, + }, + { + .compatible = "renesas,ipmmu-r8a77965", + .data = &ipmmu_features_rcar_gen3, + }, + { + .compatible = "renesas,ipmmu-r8a7796", + .data = &ipmmu_features_rcar_gen3, + }, + { + /* Xen: It is not clear how to deal with it */ + .compatible = "renesas,ipmmu-pmb-r8a7795", + .data = NULL, + }, + { + /* Terminator */ + }, }; MODULE_DEVICE_TABLE(of, ipmmu_of_ids); @@ -1960,139 +1988,145 @@ MODULE_DEVICE_TABLE(of, ipmmu_of_ids); */ static int ipmmu_probe(struct platform_device *pdev) { - struct ipmmu_vmsa_device *mmu; - const struct of_device_id *match; - struct resource *res; - int irq; - int ret; - - match = of_match_node(ipmmu_of_ids, pdev->dev.of_node); - if (!match) - return -EINVAL; - - mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); - if (!mmu) { - dev_err(&pdev->dev, "cannot allocate device data\n"); - return -ENOMEM; - } - - mmu->dev = &pdev->dev; - mmu->num_utlbs = 48; - spin_lock_init(&mmu->lock); - bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); - mmu->features = match->data; + struct ipmmu_vmsa_device *mmu; + const struct of_device_id *match; + struct resource *res; + int irq; + int ret; + + match = of_match_node(ipmmu_of_ids, pdev->dev.of_node); + if ( !match ) + return -EINVAL; + + mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); + if ( !mmu ) + { + dev_err(&pdev->dev, "cannot allocate device data\n"); + return -ENOMEM; + } + + mmu->dev = &pdev->dev; + mmu->num_utlbs = 48; + spin_lock_init(&mmu->lock); + bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); + mmu->features = match->data; #if 0 /* Xen: Not needed */ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); #endif - /* Map I/O memory and request IRQ. */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mmu->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(mmu->base)) { - ret = PTR_ERR(mmu->base); - goto out; - } - - /* - * The IPMMU has two register banks, for secure and non-secure modes. - * The bank mapped at the beginning of the IPMMU address space - * corresponds to the running mode of the CPU. When running in secure - * mode the non-secure register bank is also available at an offset. - * - * Secure mode operation isn't clearly documented and is thus currently - * not implemented in the driver. Furthermore, preliminary tests of - * non-secure operation with the main register bank were not successful. - * Offset the registers base unconditionally to point to the non-secure - * alias space for now. - */ - if (mmu->features->use_ns_alias_offset) - mmu->base += IM_NS_ALIAS_OFFSET; - - /* - * The number of contexts varies with generation and instance. - * Newer SoCs get a total of 8 contexts enabled, older ones just one. - */ - if (mmu->features->has_eight_ctx) - mmu->num_ctx = 8; - else - mmu->num_ctx = 1; - - mmu->num_ctx = min_t(unsigned int, CONFIG_IPMMU_VMSA_CTX_NUM, - mmu->num_ctx); - - WARN_ON(mmu->num_ctx > IPMMU_CTX_MAX); - - irq = platform_get_irq(pdev, 0); - - /* - * Determine if this IPMMU instance is a leaf device by checking - * if the renesas,ipmmu-main property exists or not. - */ - if (mmu->features->has_cache_leaf_nodes && - of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL)) - mmu->is_leaf = true; - - /* Root devices have mandatory IRQs */ - if (ipmmu_is_root(mmu)) { - if (irq < 0) { - dev_err(&pdev->dev, "no IRQ found\n"); - ret = irq; - goto out; - } - - ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, - dev_name(&pdev->dev), mmu); - if (ret < 0) { - dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); - goto out; - } - - ipmmu_device_reset(mmu); + /* Map I/O memory and request IRQ. */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mmu->base = devm_ioremap_resource(&pdev->dev, res); + if ( IS_ERR(mmu->base) ) + { + ret = PTR_ERR(mmu->base); + goto out; + } + + /* + * The IPMMU has two register banks, for secure and non-secure modes. + * The bank mapped at the beginning of the IPMMU address space + * corresponds to the running mode of the CPU. When running in secure + * mode the non-secure register bank is also available at an offset. + * + * Secure mode operation isn't clearly documented and is thus currently + * not implemented in the driver. Furthermore, preliminary tests of + * non-secure operation with the main register bank were not successful. + * Offset the registers base unconditionally to point to the non-secure + * alias space for now. + */ + if ( mmu->features->use_ns_alias_offset ) + mmu->base += IM_NS_ALIAS_OFFSET; + + /* + * The number of contexts varies with generation and instance. + * Newer SoCs get a total of 8 contexts enabled, older ones just one. + */ + if ( mmu->features->has_eight_ctx ) + mmu->num_ctx = 8; + else + mmu->num_ctx = 1; + + mmu->num_ctx = min_t(unsigned int, CONFIG_IPMMU_VMSA_CTX_NUM, mmu->num_ctx); + + WARN_ON(mmu->num_ctx > IPMMU_CTX_MAX); + + irq = platform_get_irq(pdev, 0); + + /* + * Determine if this IPMMU instance is a leaf device by checking + * if the renesas,ipmmu-main property exists or not. + */ + if ( mmu->features->has_cache_leaf_nodes && + of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL) ) + mmu->is_leaf = true; + + /* Root devices have mandatory IRQs */ + if ( ipmmu_is_root(mmu) ) + { + if ( irq < 0 ) + { + dev_err(&pdev->dev, "no IRQ found\n"); + ret = irq; + goto out; + } + + ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, + dev_name(&pdev->dev), mmu); + if ( ret < 0 ) + { + dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); + goto out; + } + + ipmmu_device_reset(mmu); #ifdef CONFIG_RCAR_IPMMU_PGT_IS_SHARED - /* Use stage 2 translation table format */ - ipmmu_write(mmu, IMSAUXCTLR, - ipmmu_read(mmu, IMSAUXCTLR) | IMSAUXCTLR_S2PTE); + /* Use stage 2 translation table format */ + ipmmu_write(mmu, IMSAUXCTLR, + ipmmu_read(mmu, IMSAUXCTLR) | IMSAUXCTLR_S2PTE); #endif - } else { - /* Only IPMMU caches are affected */ - mmu->is_mmu_tlb_disabled = ipmmu_is_mmu_tlb_disable_needed(pdev); - - /* - * Disable IPMMU TLB cache function of IPMMU caches - * that do require such action. - */ - if (mmu->is_mmu_tlb_disabled) - ipmmu_write(mmu, IMSCTLR, - ipmmu_read(mmu, IMSCTLR) | IMSCTLR_DISCACHE); - } - - /* - * We can't create the ARM mapping here as it requires the bus to have - * an IOMMU, which only happens when bus_set_iommu() is called in - * ipmmu_init() after the probe function returns. - */ - - spin_lock(&ipmmu_devices_lock); - list_add(&mmu->list, &ipmmu_devices); - spin_unlock(&ipmmu_devices_lock); + } + else + { + /* Only IPMMU caches are affected */ + mmu->is_mmu_tlb_disabled = ipmmu_is_mmu_tlb_disable_needed(pdev); + + /* + * Disable IPMMU TLB cache function of IPMMU caches + * that do require such action. + */ + if ( mmu->is_mmu_tlb_disabled ) + ipmmu_write(mmu, IMSCTLR, + ipmmu_read(mmu, IMSCTLR) | IMSCTLR_DISCACHE); + } + + /* + * We can't create the ARM mapping here as it requires the bus to have + * an IOMMU, which only happens when bus_set_iommu() is called in + * ipmmu_init() after the probe function returns. + */ + + spin_lock(&ipmmu_devices_lock); + list_add(&mmu->list, &ipmmu_devices); + spin_unlock(&ipmmu_devices_lock); #if 0 /* Xen: Not needed */ platform_set_drvdata(pdev, mmu); #endif - /* Xen: */ - dev_notice(&pdev->dev, "registered %s IPMMU\n", - ipmmu_is_root(mmu) ? "root" : "cache"); + /* Xen: */ + dev_notice(&pdev->dev, "registered %s IPMMU\n", + ipmmu_is_root(mmu) ? "root" : "cache"); - return 0; + return 0; out: - if (!IS_ERR(mmu->base)) - iounmap(mmu->base); - kfree(mmu); + if ( !IS_ERR(mmu->base) ) + iounmap(mmu->base); + kfree(mmu); - return ret; + return ret; } #if 0 /* Xen: Not needed */ @@ -2371,281 +2405,302 @@ MODULE_LICENSE("GPL v2"); static int __must_check ipmmu_vmsa_iotlb_flush_all(struct domain *d) { #ifdef CONFIG_RCAR_IPMMU_PGT_IS_SHARED - struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; + struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; - if (!xen_domain || !xen_domain->base_context) - return 0; + if ( !xen_domain || !xen_domain->base_context ) + return 0; - spin_lock(&xen_domain->lock); - ipmmu_tlb_invalidate(to_vmsa_domain(xen_domain->base_context)); - spin_unlock(&xen_domain->lock); + spin_lock(&xen_domain->lock); + ipmmu_tlb_invalidate(to_vmsa_domain(xen_domain->base_context)); + spin_unlock(&xen_domain->lock); #endif - return 0; + return 0; } static int __must_check ipmmu_vmsa_iotlb_flush(struct domain *d, dfn_t dfn, - unsigned int page_count, unsigned int flush_flags) + unsigned int page_count, + unsigned int flush_flags) { #ifdef CONFIG_RCAR_IPMMU_PGT_IS_SHARED - ASSERT(flush_flags); + ASSERT(flush_flags); - /* The hardware doesn't support selective TLB flush. */ - return ipmmu_vmsa_iotlb_flush_all(d); + /* The hardware doesn't support selective TLB flush. */ + return ipmmu_vmsa_iotlb_flush_all(d); #endif - return 0; + return 0; } static struct iommu_domain *ipmmu_vmsa_get_domain(struct domain *d, - struct device *dev) + struct device *dev) { - struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; - struct iommu_domain *io_domain; + struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; + struct iommu_domain *io_domain; - if (!to_archdata(dev)->mmus[0] || !to_archdata(dev)->num_mmus) - return NULL; + if ( !to_archdata(dev)->mmus[0] || !to_archdata(dev)->num_mmus ) + return NULL; - /* - * Loop through the &xen_domain->contexts to locate a context - * assigned to this IPMMU - */ - list_for_each_entry(io_domain, &xen_domain->contexts, list) { - if (ipmmus_are_equal(to_vmsa_domain(io_domain), to_archdata(dev))) - return io_domain; - } + /* + * Loop through the &xen_domain->contexts to locate a context + * assigned to this IPMMU + */ + list_for_each_entry (io_domain, &xen_domain->contexts, list) + { + if ( ipmmus_are_equal(to_vmsa_domain(io_domain), to_archdata(dev)) ) + return io_domain; + } - return NULL; + return NULL; } static void ipmmu_vmsa_destroy_domain(struct iommu_domain *io_domain) { - struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - - list_del(&io_domain->list); - - if (domain->num_mmus) { - /* - * Disable the context for cache IPMMU only. Flush the TLB as required - * when modifying the context registers. - */ - ipmmu_ctx_write_cache(domain, IMCTR, IMCTR_FLUSH); - } else { - /* - * Free main domain resources. We assume that all devices have already - * been detached. - */ - ipmmu_domain_destroy_context(domain); + struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); + + list_del(&io_domain->list); + + if ( domain->num_mmus ) + { + /* + * Disable the context for cache IPMMU only. Flush the TLB as required + * when modifying the context registers. + */ + ipmmu_ctx_write_cache(domain, IMCTR, IMCTR_FLUSH); + } + else + { + /* + * Free main domain resources. We assume that all devices have already + * been detached. + */ + ipmmu_domain_destroy_context(domain); #ifndef CONFIG_RCAR_IPMMU_PGT_IS_SHARED - /* - * Pass root page table for this domain as an argument. - * This call will lead to start deallocation sequence. - */ - free_io_pgtable_ops(domain->iop, - maddr_to_page(domain->cfg.arm_lpae_s1_cfg.ttbr[0])); + /* + * Pass root page table for this domain as an argument. + * This call will lead to start deallocation sequence. + */ + free_io_pgtable_ops(domain->iop, + maddr_to_page(domain->cfg.arm_lpae_s1_cfg.ttbr[0])); #endif - } + } - kfree(domain); + kfree(domain); } static int ipmmu_vmsa_alloc_page_table(struct domain *d); -static int ipmmu_vmsa_assign_dev(struct domain *d, u8 devfn, - struct device *dev, u32 flag) +static int ipmmu_vmsa_assign_dev(struct domain *d, u8 devfn, struct device *dev, + u32 flag) { - struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; - struct iommu_domain *io_domain; - struct ipmmu_vmsa_domain *domain; - int ret = 0; + struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; + struct iommu_domain *io_domain; + struct ipmmu_vmsa_domain *domain; + int ret = 0; - if (!xen_domain) - return -EINVAL; + if ( !xen_domain ) + return -EINVAL; - if (!xen_domain->base_context) { + if ( !xen_domain->base_context ) + { #ifndef CONFIG_RCAR_IPMMU_PGT_IS_SHARED - /* - * Page table must be already allocated as we always allocate - * it in advance for non-shared IOMMU. - */ - return -EINVAL; + /* + * Page table must be already allocated as we always allocate + * it in advance for non-shared IOMMU. + */ + return -EINVAL; #else - ret = ipmmu_vmsa_alloc_page_table(d); - if (ret) - return ret; + ret = ipmmu_vmsa_alloc_page_table(d); + if ( ret ) + return ret; #endif - } - - if (!dev->archdata.iommu) { - dev->archdata.iommu = xzalloc(struct ipmmu_vmsa_xen_device); - if (!dev->archdata.iommu) - return -ENOMEM; - } - - if (!to_archdata(dev)) { - ret = ipmmu_init_platform_device(dev); - if (ret) - return ret; - } - - spin_lock(&xen_domain->lock); - - if (dev_iommu_domain(dev)) { - dev_err(dev, "already attached to IPMMU domain\n"); - ret = -EEXIST; - goto out; - } - - /* - * Check to see if a context bank (iommu_domain) already exists for - * this Xen domain under the same IPMMU - */ - io_domain = ipmmu_vmsa_get_domain(d, dev); - if (!io_domain) { - domain = xzalloc(struct ipmmu_vmsa_domain); - if (!domain) { - ret = -ENOMEM; - goto out; - } - spin_lock_init(&domain->lock); - - domain->d = d; - domain->context_id = to_vmsa_domain(xen_domain->base_context)->context_id; - io_domain = &domain->io_domain; - - /* Chain the new context to the Xen domain */ - list_add(&io_domain->list, &xen_domain->contexts); - } - - ret = ipmmu_attach_device(io_domain, dev); - if (ret) { - if (io_domain->ref.counter == 0) - ipmmu_vmsa_destroy_domain(io_domain); - } else { - atomic_inc(&io_domain->ref); - dev_iommu_domain(dev) = io_domain; - } + } + + if ( !dev->archdata.iommu ) + { + dev->archdata.iommu = xzalloc(struct ipmmu_vmsa_xen_device); + if ( !dev->archdata.iommu ) + return -ENOMEM; + } + + if ( !to_archdata(dev) ) + { + ret = ipmmu_init_platform_device(dev); + if ( ret ) + return ret; + } + + spin_lock(&xen_domain->lock); + + if ( dev_iommu_domain(dev) ) + { + dev_err(dev, "already attached to IPMMU domain\n"); + ret = -EEXIST; + goto out; + } + + /* + * Check to see if a context bank (iommu_domain) already exists for + * this Xen domain under the same IPMMU + */ + io_domain = ipmmu_vmsa_get_domain(d, dev); + if ( !io_domain ) + { + domain = xzalloc(struct ipmmu_vmsa_domain); + if ( !domain ) + { + ret = -ENOMEM; + goto out; + } + spin_lock_init(&domain->lock); + + domain->d = d; + domain->context_id = + to_vmsa_domain(xen_domain->base_context)->context_id; + io_domain = &domain->io_domain; + + /* Chain the new context to the Xen domain */ + list_add(&io_domain->list, &xen_domain->contexts); + } + + ret = ipmmu_attach_device(io_domain, dev); + if ( ret ) + { + if ( io_domain->ref.counter == 0 ) + ipmmu_vmsa_destroy_domain(io_domain); + } + else + { + atomic_inc(&io_domain->ref); + dev_iommu_domain(dev) = io_domain; + } out: - spin_unlock(&xen_domain->lock); + spin_unlock(&xen_domain->lock); - return ret; + return ret; } static int ipmmu_vmsa_deassign_dev(struct domain *d, struct device *dev) { - struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; - struct iommu_domain *io_domain = dev_iommu_domain(dev); + struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; + struct iommu_domain *io_domain = dev_iommu_domain(dev); - if (!io_domain || to_vmsa_domain(io_domain)->d != d) { - dev_err(dev, " not attached to domain %d\n", d->domain_id); - return -ESRCH; - } + if ( !io_domain || to_vmsa_domain(io_domain)->d != d ) + { + dev_err(dev, " not attached to domain %d\n", d->domain_id); + return -ESRCH; + } - spin_lock(&xen_domain->lock); + spin_lock(&xen_domain->lock); - ipmmu_detach_device(io_domain, dev); - dev_iommu_domain(dev) = NULL; - atomic_dec(&io_domain->ref); + ipmmu_detach_device(io_domain, dev); + dev_iommu_domain(dev) = NULL; + atomic_dec(&io_domain->ref); - if (io_domain->ref.counter == 0) - ipmmu_vmsa_destroy_domain(io_domain); + if ( io_domain->ref.counter == 0 ) + ipmmu_vmsa_destroy_domain(io_domain); - spin_unlock(&xen_domain->lock); + spin_unlock(&xen_domain->lock); - return 0; + return 0; } -static int ipmmu_vmsa_reassign_dev(struct domain *s, struct domain *t, - u8 devfn, struct device *dev) +static int ipmmu_vmsa_reassign_dev(struct domain *s, struct domain *t, u8 devfn, + struct device *dev) { - int ret = 0; + int ret = 0; - /* Don't allow remapping on other domain than hwdom */ - if (t && t != hardware_domain) - return -EPERM; + /* Don't allow remapping on other domain than hwdom */ + if ( t && t != hardware_domain ) + return -EPERM; - if (t == s) - return 0; + if ( t == s ) + return 0; - ret = ipmmu_vmsa_deassign_dev(s, dev); - if (ret) - return ret; + ret = ipmmu_vmsa_deassign_dev(s, dev); + if ( ret ) + return ret; - if (t) { - /* No flags are defined for ARM. */ - ret = ipmmu_vmsa_assign_dev(t, devfn, dev, 0); - if (ret) - return ret; - } + if ( t ) + { + /* No flags are defined for ARM. */ + ret = ipmmu_vmsa_assign_dev(t, devfn, dev, 0); + if ( ret ) + return ret; + } - return 0; + return 0; } static int ipmmu_vmsa_alloc_page_table(struct domain *d) { - struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; - struct ipmmu_vmsa_domain *domain; - struct ipmmu_vmsa_device *root; - int ret; - - root = ipmmu_find_root(NULL); - if (!root) { - printk("d%d: Unable to locate root IPMMU\n", d->domain_id); - return -EAGAIN; - } - - domain = xzalloc(struct ipmmu_vmsa_domain); - if (!domain) - return -ENOMEM; - - spin_lock_init(&domain->lock); - INIT_LIST_HEAD(&domain->io_domain.list); - domain->d = d; - domain->root = root; - /* Clear num_mmus explicitly. */ - domain->num_mmus = 0; - - spin_lock(&xen_domain->lock); - ret = ipmmu_domain_init_context(domain); - if (ret < 0) { - dev_err(root->dev, "d%d: Unable to initialize IPMMU context\n", - d->domain_id); - spin_unlock(&xen_domain->lock); - xfree(domain); - return ret; - } - xen_domain->base_context = &domain->io_domain; - spin_unlock(&xen_domain->lock); - - return 0; + struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; + struct ipmmu_vmsa_domain *domain; + struct ipmmu_vmsa_device *root; + int ret; + + root = ipmmu_find_root(NULL); + if ( !root ) + { + printk("d%d: Unable to locate root IPMMU\n", d->domain_id); + return -EAGAIN; + } + + domain = xzalloc(struct ipmmu_vmsa_domain); + if ( !domain ) + return -ENOMEM; + + spin_lock_init(&domain->lock); + INIT_LIST_HEAD(&domain->io_domain.list); + domain->d = d; + domain->root = root; + /* Clear num_mmus explicitly. */ + domain->num_mmus = 0; + + spin_lock(&xen_domain->lock); + ret = ipmmu_domain_init_context(domain); + if ( ret < 0 ) + { + dev_err(root->dev, "d%d: Unable to initialize IPMMU context\n", + d->domain_id); + spin_unlock(&xen_domain->lock); + xfree(domain); + return ret; + } + xen_domain->base_context = &domain->io_domain; + spin_unlock(&xen_domain->lock); + + return 0; } static int ipmmu_vmsa_domain_init(struct domain *d, bool use_iommu) { - struct ipmmu_vmsa_xen_domain *xen_domain; + struct ipmmu_vmsa_xen_domain *xen_domain; - xen_domain = xzalloc(struct ipmmu_vmsa_xen_domain); - if (!xen_domain) - return -ENOMEM; + xen_domain = xzalloc(struct ipmmu_vmsa_xen_domain); + if ( !xen_domain ) + return -ENOMEM; - spin_lock_init(&xen_domain->lock); - INIT_LIST_HEAD(&xen_domain->contexts); + spin_lock_init(&xen_domain->lock); + INIT_LIST_HEAD(&xen_domain->contexts); - dom_iommu(d)->arch.priv = xen_domain; + dom_iommu(d)->arch.priv = xen_domain; #ifndef CONFIG_RCAR_IPMMU_PGT_IS_SHARED - /* We allocate page table in advance only for non-shared IOMMU. */ - if (use_iommu) { - int ret = ipmmu_vmsa_alloc_page_table(d); - - if (ret) { - xfree(xen_domain); - dom_iommu(d)->arch.priv = NULL; - return ret; - } - } + /* We allocate page table in advance only for non-shared IOMMU. */ + if ( use_iommu ) + { + int ret = ipmmu_vmsa_alloc_page_table(d); + + if ( ret ) + { + xfree(xen_domain); + dom_iommu(d)->arch.priv = NULL; + return ret; + } + } #endif - return 0; + return 0; } /* @@ -2655,344 +2710,357 @@ static int ipmmu_vmsa_domain_init(struct domain *d, bool use_iommu) static void ipmmu_vmsa_free_page_table(struct page_info *page) { #ifndef CONFIG_RCAR_IPMMU_PGT_IS_SHARED - struct io_pgtable_ops *ops = (struct io_pgtable_ops *)page->pad; + struct io_pgtable_ops *ops = (struct io_pgtable_ops *)page->pad; - free_io_pgtable_ops(ops, page); + free_io_pgtable_ops(ops, page); #endif } static void __hwdom_init ipmmu_vmsa_hwdom_init(struct domain *d) { - /* Set to false options not supported on ARM. */ - if ( iommu_hwdom_inclusive ) - printk(XENLOG_WARNING - "map-inclusive dom0-iommu option is not supported on ARM\n"); - iommu_hwdom_inclusive = false; - if ( iommu_hwdom_reserved == 1 ) - printk(XENLOG_WARNING - "map-reserved dom0-iommu option is not supported on ARM\n"); - iommu_hwdom_reserved = 0; + /* Set to false options not supported on ARM. */ + if ( iommu_hwdom_inclusive ) + printk(XENLOG_WARNING + "map-inclusive dom0-iommu option is not supported on ARM\n"); + iommu_hwdom_inclusive = false; + if ( iommu_hwdom_reserved == 1 ) + printk(XENLOG_WARNING + "map-reserved dom0-iommu option is not supported on ARM\n"); + iommu_hwdom_reserved = 0; - arch_iommu_hwdom_init(d); + arch_iommu_hwdom_init(d); } static void ipmmu_vmsa_domain_teardown(struct domain *d) { - struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; + struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; - if (!xen_domain) - return; + if ( !xen_domain ) + return; - spin_lock(&xen_domain->lock); - if (xen_domain->base_context) { - ipmmu_vmsa_destroy_domain(xen_domain->base_context); - xen_domain->base_context = NULL; - } - spin_unlock(&xen_domain->lock); + spin_lock(&xen_domain->lock); + if ( xen_domain->base_context ) + { + ipmmu_vmsa_destroy_domain(xen_domain->base_context); + xen_domain->base_context = NULL; + } + spin_unlock(&xen_domain->lock); - ASSERT(list_empty(&xen_domain->contexts)); - xfree(xen_domain); - dom_iommu(d)->arch.priv = NULL; - /* - * Please note that the comment below only makes sence when the IPMMU - * page table isn't shared. - * After this point we have all domain resources deallocated, except - * page table which we will deallocate asynchronously. The IOMMU code - * provides us with iommu_pt_cleanup_list and free_page_table platform - * callback what we actually going to use. - */ + ASSERT(list_empty(&xen_domain->contexts)); + xfree(xen_domain); + dom_iommu(d)->arch.priv = NULL; + /* + * Please note that the comment below only makes sence when the IPMMU + * page table isn't shared. + * After this point we have all domain resources deallocated, except + * page table which we will deallocate asynchronously. The IOMMU code + * provides us with iommu_pt_cleanup_list and free_page_table platform + * callback what we actually going to use. + */ } static int __must_check ipmmu_vmsa_map_pages(struct domain *d, dfn_t dfn, - mfn_t mfn, unsigned int order, unsigned int flags, - unsigned int *flush_flags) + mfn_t mfn, unsigned int order, + unsigned int flags, + unsigned int *flush_flags) { #ifndef CONFIG_RCAR_IPMMU_PGT_IS_SHARED - struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; - size_t size = PAGE_SIZE * (1UL << order); - int ret, prot = 0; + struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; + size_t size = PAGE_SIZE * (1UL << order); + int ret, prot = 0; - if (!xen_domain || !xen_domain->base_context) - return -EINVAL; + if ( !xen_domain || !xen_domain->base_context ) + return -EINVAL; - if (flags & IOMMUF_writable) - prot |= IOMMU_WRITE; - if (flags & IOMMUF_readable) - prot |= IOMMU_READ; + if ( flags & IOMMUF_writable ) + prot |= IOMMU_WRITE; + if ( flags & IOMMUF_readable ) + prot |= IOMMU_READ; - spin_lock(&xen_domain->lock); - ret = ipmmu_map(xen_domain->base_context, pfn_to_paddr(dfn_x(dfn)), - pfn_to_paddr(mfn_x(mfn)), size, prot); - spin_unlock(&xen_domain->lock); + spin_lock(&xen_domain->lock); + ret = ipmmu_map(xen_domain->base_context, pfn_to_paddr(dfn_x(dfn)), + pfn_to_paddr(mfn_x(mfn)), size, prot); + spin_unlock(&xen_domain->lock); - return ret; + return ret; #else - p2m_type_t t; - - /* - * Grant mappings can be used for DMA requests. The dev_bus_addr - * returned by the hypercall is the MFN (not the IPA). For device - * protected by an IOMMU, Xen needs to add a 1:1 mapping in the domain - * p2m to allow DMA request to work. - * This is only valid when the domain is directed mapped. Hence this - * function should only be used by gnttab code with gfn == mfn == dfn. - */ - BUG_ON(!is_domain_direct_mapped(d)); - BUG_ON(mfn_x(mfn) != dfn_x(dfn)); - - /* We only support readable and writable flags */ - if (!(flags & (IOMMUF_readable | IOMMUF_writable))) - return -EINVAL; - - t = (flags & IOMMUF_writable) ? p2m_iommu_map_rw : p2m_iommu_map_ro; - - /* - * The function guest_physmap_add_entry replaces the current mapping - * if there is already one... - */ - return guest_physmap_add_entry(d, _gfn(dfn_x(dfn)), _mfn(dfn_x(dfn)), - order, t); + p2m_type_t t; + + /* + * Grant mappings can be used for DMA requests. The dev_bus_addr + * returned by the hypercall is the MFN (not the IPA). For device + * protected by an IOMMU, Xen needs to add a 1:1 mapping in the domain + * p2m to allow DMA request to work. + * This is only valid when the domain is directed mapped. Hence this + * function should only be used by gnttab code with gfn == mfn == dfn. + */ + BUG_ON(!is_domain_direct_mapped(d)); + BUG_ON(mfn_x(mfn) != dfn_x(dfn)); + + /* We only support readable and writable flags */ + if ( !(flags & (IOMMUF_readable | IOMMUF_writable)) ) + return -EINVAL; + + t = (flags & IOMMUF_writable) ? p2m_iommu_map_rw : p2m_iommu_map_ro; + + /* + * The function guest_physmap_add_entry replaces the current mapping + * if there is already one... + */ + return guest_physmap_add_entry(d, _gfn(dfn_x(dfn)), _mfn(dfn_x(dfn)), order, + t); #endif } static int __must_check ipmmu_vmsa_unmap_pages(struct domain *d, dfn_t dfn, - unsigned int order, unsigned int *flush_flags) + unsigned int order, + unsigned int *flush_flags) { #ifndef CONFIG_RCAR_IPMMU_PGT_IS_SHARED - struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; - size_t ret, size = PAGE_SIZE * (1UL << order); - - if (!xen_domain || !xen_domain->base_context) - return -EINVAL; - - spin_lock(&xen_domain->lock); - ret = ipmmu_unmap(xen_domain->base_context, pfn_to_paddr(dfn_x(dfn)), size); - spin_unlock(&xen_domain->lock); - - /* - * We don't check how many bytes were actually unmapped. Otherwise we - * should have raised an error every time we hit an area that isn't mapped. - * And the p2m's attempt to unmap the same page twice can lead to crash or - * panic. We think it is better to have corresponding warns inside - * page table allocator for complaining about that rather than - * breaking the whole system. - */ - return IS_ERR_VALUE(ret) ? ret : 0; + struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; + size_t ret, size = PAGE_SIZE * (1UL << order); + + if ( !xen_domain || !xen_domain->base_context ) + return -EINVAL; + + spin_lock(&xen_domain->lock); + ret = ipmmu_unmap(xen_domain->base_context, pfn_to_paddr(dfn_x(dfn)), size); + spin_unlock(&xen_domain->lock); + + /* + * We don't check how many bytes were actually unmapped. Otherwise we + * should have raised an error every time we hit an area that isn't mapped. + * And the p2m's attempt to unmap the same page twice can lead to crash or + * panic. We think it is better to have corresponding warns inside + * page table allocator for complaining about that rather than + * breaking the whole system. + */ + return IS_ERR_VALUE(ret) ? ret : 0; #else - /* - * This function should only be used by gnttab code when the domain - * is direct mapped (i.e. gfn == mfn == dfn). - */ - if ( !is_domain_direct_mapped(d) ) - return -EINVAL; - - return guest_physmap_remove_page(d, _gfn(dfn_x(dfn)), _mfn(dfn_x(dfn)), - order); + /* + * This function should only be used by gnttab code when the domain + * is direct mapped (i.e. gfn == mfn == dfn). + */ + if ( !is_domain_direct_mapped(d) ) + return -EINVAL; + + return guest_physmap_remove_page(d, _gfn(dfn_x(dfn)), _mfn(dfn_x(dfn)), + order); #endif } static void ipmmu_vmsa_dump_p2m_table(struct domain *d) { #ifndef CONFIG_RCAR_IPMMU_PGT_IS_SHARED - /* TODO: This platform callback should be implemented. */ + /* TODO: This platform callback should be implemented. */ #endif } static const struct iommu_ops ipmmu_vmsa_iommu_ops = { - .init = ipmmu_vmsa_domain_init, - .hwdom_init = ipmmu_vmsa_hwdom_init, - .free_page_table = ipmmu_vmsa_free_page_table, - .teardown = ipmmu_vmsa_domain_teardown, - .iotlb_flush = ipmmu_vmsa_iotlb_flush, - .iotlb_flush_all = ipmmu_vmsa_iotlb_flush_all, - .assign_device = ipmmu_vmsa_assign_dev, - .reassign_device = ipmmu_vmsa_reassign_dev, - .map_pages = ipmmu_vmsa_map_pages, - .unmap_pages = ipmmu_vmsa_unmap_pages, - .dump_p2m_table = ipmmu_vmsa_dump_p2m_table, + .init = ipmmu_vmsa_domain_init, + .hwdom_init = ipmmu_vmsa_hwdom_init, + .free_page_table = ipmmu_vmsa_free_page_table, + .teardown = ipmmu_vmsa_domain_teardown, + .iotlb_flush = ipmmu_vmsa_iotlb_flush, + .iotlb_flush_all = ipmmu_vmsa_iotlb_flush_all, + .assign_device = ipmmu_vmsa_assign_dev, + .reassign_device = ipmmu_vmsa_reassign_dev, + .map_pages = ipmmu_vmsa_map_pages, + .unmap_pages = ipmmu_vmsa_unmap_pages, + .dump_p2m_table = ipmmu_vmsa_dump_p2m_table, }; -static __init const struct ipmmu_vmsa_device *find_ipmmu(const struct device *dev) +static __init const struct ipmmu_vmsa_device * +find_ipmmu(const struct device *dev) { - struct ipmmu_vmsa_device *mmu; - bool found = false; + struct ipmmu_vmsa_device *mmu; + bool found = false; - spin_lock(&ipmmu_devices_lock); - list_for_each_entry(mmu, &ipmmu_devices, list) { - if (mmu->dev == dev) { - found = true; - break; - } - } - spin_unlock(&ipmmu_devices_lock); + spin_lock(&ipmmu_devices_lock); + list_for_each_entry (mmu, &ipmmu_devices, list) + { + if ( mmu->dev == dev ) + { + found = true; + break; + } + } + spin_unlock(&ipmmu_devices_lock); - return (found) ? mmu : NULL; + return (found) ? mmu : NULL; } static __init void populate_ipmmu_masters(const struct ipmmu_vmsa_device *mmu) { - struct dt_device_node *np; + struct dt_device_node *np; - dt_for_each_device_node(dt_host, np) { - if (mmu->dev->of_node != dt_parse_phandle(np, "iommus", 0)) - continue; + dt_for_each_device_node (dt_host, np) + { + if ( mmu->dev->of_node != dt_parse_phandle(np, "iommus", 0) ) + continue; - /* Let Xen know that the device is protected by an IPMMU */ - dt_device_set_protected(np); + /* Let Xen know that the device is protected by an IPMMU */ + dt_device_set_protected(np); - dev_notice(mmu->dev, "found master device %s\n", dt_node_full_name(np)); - } + dev_notice(mmu->dev, "found master device %s\n", dt_node_full_name(np)); + } } #ifdef CONFIG_RCAR_IPMMU_PGT_IS_SHARED /* RCAR GEN3 product and cut information */ -#define RCAR_PRODUCT_MASK 0x00007F00 -#define RCAR_PRODUCT_H3 0x00004F00 -#define RCAR_PRODUCT_M3 0x00005200 -#define RCAR_PRODUCT_M3N 0x00005500 -#define RCAR_CUT_MASK 0x000000FF -#define RCAR_CUT_VER30 0x00000020 +#define RCAR_PRODUCT_MASK 0x00007F00 +#define RCAR_PRODUCT_H3 0x00004F00 +#define RCAR_PRODUCT_M3 0x00005200 +#define RCAR_PRODUCT_M3N 0x00005500 +#define RCAR_CUT_MASK 0x000000FF +#define RCAR_CUT_VER30 0x00000020 static __init bool ipmmu_vmsa_stage2_supported(void) { - struct dt_device_node *np; - u64 addr, size; - void __iomem *base; - u32 product, cut; - static enum { - UNKNOWN, - SUPPORTED, - NOTSUPPORTED - } stage2_supported = UNKNOWN; - - /* Use the flag to avoid checking for the compatibility more then once */ - switch (stage2_supported) { - case SUPPORTED: - return true; - - case NOTSUPPORTED: - return false; - - case UNKNOWN: - default: - stage2_supported = NOTSUPPORTED; - break; - } - - np = dt_find_compatible_node(NULL, NULL, "renesas,prr"); - if (!np) { - printk("failed to find PRR node\n"); - return false; - } - - if (dt_device_get_address(np, 0, &addr, &size)) { - printk("failed to get PRR MMIO\n"); - return false; - } - - base = ioremap_nocache(addr, size); - if (!base) { - printk("failed to ioremap PRR MMIO\n"); - return false; - } - - product = readl(base); - cut = product & RCAR_CUT_MASK; - product &= RCAR_PRODUCT_MASK; - - switch (product) { - case RCAR_PRODUCT_H3: - case RCAR_PRODUCT_M3: - if (cut >= RCAR_CUT_VER30) - stage2_supported = SUPPORTED; - break; - - case RCAR_PRODUCT_M3N: - stage2_supported = SUPPORTED; - break; - } - - iounmap(base); - - return stage2_supported == SUPPORTED; + struct dt_device_node *np; + u64 addr, size; + void __iomem *base; + u32 product, cut; + static enum { UNKNOWN, SUPPORTED, NOTSUPPORTED } stage2_supported = UNKNOWN; + + /* Use the flag to avoid checking for the compatibility more then once */ + switch (stage2_supported) + { + case SUPPORTED: + return true; + + case NOTSUPPORTED: + return false; + + case UNKNOWN: + default: + stage2_supported = NOTSUPPORTED; + break; + } + + np = dt_find_compatible_node(NULL, NULL, "renesas,prr"); + if ( !np ) + { + printk("failed to find PRR node\n"); + return false; + } + + if ( dt_device_get_address(np, 0, &addr, &size) ) + { + printk("failed to get PRR MMIO\n"); + return false; + } + + base = ioremap_nocache(addr, size); + if ( !base ) + { + printk("failed to ioremap PRR MMIO\n"); + return false; + } + + product = readl(base); + cut = product & RCAR_CUT_MASK; + product &= RCAR_PRODUCT_MASK; + + switch (product) + { + case RCAR_PRODUCT_H3: + case RCAR_PRODUCT_M3: + if ( cut >= RCAR_CUT_VER30 ) + stage2_supported = SUPPORTED; + break; + + case RCAR_PRODUCT_M3N: + stage2_supported = SUPPORTED; + break; + } + + iounmap(base); + + return stage2_supported == SUPPORTED; } #endif /* TODO: What to do if we failed to init cache/root IPMMU? */ -static __init int ipmmu_vmsa_init(struct dt_device_node *dev, - const void *data) +static __init int ipmmu_vmsa_init(struct dt_device_node *dev, const void *data) { - int rc; - const struct ipmmu_vmsa_device *mmu; - static bool set_ops_done = false; + int rc; + const struct ipmmu_vmsa_device *mmu; + static bool set_ops_done = false; - /* - * Even if the device can't be initialized, we don't want to - * give the IPMMU device to dom0. - */ - dt_device_set_used_by(dev, DOMID_XEN); + /* + * Even if the device can't be initialized, we don't want to + * give the IPMMU device to dom0. + */ + dt_device_set_used_by(dev, DOMID_XEN); #ifndef CONFIG_RCAR_IPMMU_PGT_IS_SHARED - /* - * The IPMMU can't utilize P2M table since it doesn't use the same - * page-table format as the CPU. - */ - if (iommu_hap_pt_share) { - iommu_hap_pt_share = false; - dev_notice(&dev->dev, - "disable sharing P2M table between the CPU and IPMMU\n"); - } + /* + * The IPMMU can't utilize P2M table since it doesn't use the same + * page-table format as the CPU. + */ + if ( iommu_hap_pt_share ) + { + iommu_hap_pt_share = false; + dev_notice(&dev->dev, + "disable sharing P2M table between the CPU and IPMMU\n"); + } #else - if (!iommu_hap_pt_share) { - dev_err(&dev->dev, - "P2M table must always be shared between the CPU and the IPMMU\n"); - return -EINVAL; - } - - if (!ipmmu_vmsa_stage2_supported()) { - dev_err(&dev->dev, - "P2M sharing is not supported in current SoC revision\n"); - return -EOPNOTSUPP; - } + if ( !iommu_hap_pt_share ) + { + dev_err( + &dev->dev, + "P2M table must always be shared between the CPU and the IPMMU\n"); + return -EINVAL; + } + + if ( !ipmmu_vmsa_stage2_supported() ) + { + dev_err(&dev->dev, + "P2M sharing is not supported in current SoC revision\n"); + return -EOPNOTSUPP; + } #endif - /* - * Perform platform specific actions such as power-on, errata maintenance - * if required. - */ - rc = ipmmu_preinit(dev); - if (rc) { - dev_err(&dev->dev, "failed to preinit IPMMU (%d)\n", rc); - return rc; - } - - rc = ipmmu_probe(dev); - if (rc) { - dev_err(&dev->dev, "failed to init IPMMU\n"); - return rc; - } - - /* - * Since IPMMU is composed of two parts (a number of cache IPMMUs and - * the root IPMMU) this function will be called more than once. - * Use the flag below to avoid setting IOMMU ops if they already set. - */ - if (!set_ops_done) { - iommu_set_ops(&ipmmu_vmsa_iommu_ops); - set_ops_done = true; - } - - /* Find the last IPMMU added. */ - mmu = find_ipmmu(dt_to_dev(dev)); - BUG_ON(mmu == NULL); - - /* Mark all masters that connected to the last IPMMU as protected. */ - populate_ipmmu_masters(mmu); - - return 0; + /* + * Perform platform specific actions such as power-on, errata maintenance + * if required. + */ + rc = ipmmu_preinit(dev); + if ( rc ) + { + dev_err(&dev->dev, "failed to preinit IPMMU (%d)\n", rc); + return rc; + } + + rc = ipmmu_probe(dev); + if ( rc ) + { + dev_err(&dev->dev, "failed to init IPMMU\n"); + return rc; + } + + /* + * Since IPMMU is composed of two parts (a number of cache IPMMUs and + * the root IPMMU) this function will be called more than once. + * Use the flag below to avoid setting IOMMU ops if they already set. + */ + if ( !set_ops_done ) + { + iommu_set_ops(&ipmmu_vmsa_iommu_ops); + set_ops_done = true; + } + + /* Find the last IPMMU added. */ + mmu = find_ipmmu(dt_to_dev(dev)); + BUG_ON(mmu == NULL); + + /* Mark all masters that connected to the last IPMMU as protected. */ + populate_ipmmu_masters(mmu); + + return 0; } DT_DEVICE_START(ipmmu, "Renesas IPMMU-VMSA", DEVICE_IOMMU) - .dt_match = ipmmu_of_ids, - .init = ipmmu_vmsa_init, -DT_DEVICE_END + .dt_match = ipmmu_of_ids, + .init = ipmmu_vmsa_init, DT_DEVICE_END diff --git a/xen/drivers/passthrough/arm/smmu.c b/xen/drivers/passthrough/arm/smmu.c index d992b5b03b..9b5924e400 100644 --- a/xen/drivers/passthrough/arm/smmu.c +++ b/xen/drivers/passthrough/arm/smmu.c @@ -34,7 +34,6 @@ * - Context fault reporting */ - #include #include #include @@ -62,16 +61,17 @@ #define of_phandle_args dt_phandle_args #define of_device_id dt_device_match #define of_match_node dt_match_node -#define of_property_read_u32(np, pname, out) (!dt_property_read_u32(np, pname, out)) +#define of_property_read_u32(np, pname, out) \ + (!dt_property_read_u32(np, pname, out)) #define of_property_read_bool dt_property_read_bool #define of_parse_phandle_with_args dt_parse_phandle_with_args /* Xen: Helpers to get device MMIO and IRQs */ struct resource { - u64 addr; - u64 size; - unsigned int type; + u64 addr; + u64 size; + unsigned int type; }; #define resource_size(res) (res)->size; @@ -82,47 +82,50 @@ struct resource #define IORESOURCE_IRQ 1 static struct resource *platform_get_resource(struct platform_device *pdev, - unsigned int type, - unsigned int num) + unsigned int type, + unsigned int num) { - /* - * The resource is only used between 2 calls of platform_get_resource. - * It's quite ugly but it's avoid to add too much code in the part - * imported from Linux - */ - static struct resource res; - int ret = 0; + /* + * The resource is only used between 2 calls of platform_get_resource. + * It's quite ugly but it's avoid to add too much code in the part + * imported from Linux + */ + static struct resource res; + int ret = 0; - res.type = type; + res.type = type; - switch (type) { - case IORESOURCE_MEM: - ret = dt_device_get_address(pdev, num, &res.addr, &res.size); + switch (type) + { + case IORESOURCE_MEM: + ret = dt_device_get_address(pdev, num, &res.addr, &res.size); - return ((ret) ? NULL : &res); + return ((ret) ? NULL : &res); - case IORESOURCE_IRQ: - ret = platform_get_irq(pdev, num); - if (ret < 0) - return NULL; + case IORESOURCE_IRQ: + ret = platform_get_irq(pdev, num); + if ( ret < 0 ) + return NULL; - res.addr = ret; - res.size = 1; + res.addr = ret; + res.size = 1; - return &res; + return &res; - default: - return NULL; - } + default: + return NULL; + } } /* Xen: Helpers for IRQ functions */ -#define request_irq(irq, func, flags, name, dev) request_irq(irq, flags, func, name, dev) +#define request_irq(irq, func, flags, name, dev) \ + request_irq(irq, flags, func, name, dev) #define free_irq release_irq -enum irqreturn { - IRQ_NONE = (0 << 0), - IRQ_HANDLED = (1 << 0), +enum irqreturn +{ + IRQ_NONE = (0 << 0), + IRQ_HANDLED = (1 << 0), }; typedef enum irqreturn irqreturn_t; @@ -130,72 +133,76 @@ typedef enum irqreturn irqreturn_t; /* Device logger functions * TODO: Handle PCI */ -#define dev_print(dev, lvl, fmt, ...) \ - printk(lvl "smmu: %s: " fmt, dt_node_full_name(dev_to_dt(dev)), ## __VA_ARGS__) +#define dev_print(dev, lvl, fmt, ...) \ + printk(lvl "smmu: %s: " fmt, dt_node_full_name(dev_to_dt(dev)), \ + ##__VA_ARGS__) -#define dev_dbg(dev, fmt, ...) dev_print(dev, XENLOG_DEBUG, fmt, ## __VA_ARGS__) -#define dev_notice(dev, fmt, ...) dev_print(dev, XENLOG_INFO, fmt, ## __VA_ARGS__) -#define dev_warn(dev, fmt, ...) dev_print(dev, XENLOG_WARNING, fmt, ## __VA_ARGS__) -#define dev_err(dev, fmt, ...) dev_print(dev, XENLOG_ERR, fmt, ## __VA_ARGS__) +#define dev_dbg(dev, fmt, ...) dev_print(dev, XENLOG_DEBUG, fmt, ##__VA_ARGS__) +#define dev_notice(dev, fmt, ...) \ + dev_print(dev, XENLOG_INFO, fmt, ##__VA_ARGS__) +#define dev_warn(dev, fmt, ...) \ + dev_print(dev, XENLOG_WARNING, fmt, ##__VA_ARGS__) +#define dev_err(dev, fmt, ...) dev_print(dev, XENLOG_ERR, fmt, ##__VA_ARGS__) -#define dev_err_ratelimited(dev, fmt, ...) \ - dev_print(dev, XENLOG_ERR, fmt, ## __VA_ARGS__) +#define dev_err_ratelimited(dev, fmt, ...) \ + dev_print(dev, XENLOG_ERR, fmt, ##__VA_ARGS__) #define dev_name(dev) dt_node_full_name(dev_to_dt(dev)) /* Alias to Xen allocation helpers */ #define kfree xfree -#define kmalloc(size, flags) _xmalloc(size, sizeof(void *)) -#define kzalloc(size, flags) _xzalloc(size, sizeof(void *)) -#define devm_kzalloc(dev, size, flags) _xzalloc(size, sizeof(void *)) -#define kmalloc_array(size, n, flags) _xmalloc_array(size, sizeof(void *), n) +#define kmalloc(size, flags) _xmalloc(size, sizeof(void *)) +#define kzalloc(size, flags) _xzalloc(size, sizeof(void *)) +#define devm_kzalloc(dev, size, flags) _xzalloc(size, sizeof(void *)) +#define kmalloc_array(size, n, flags) _xmalloc_array(size, sizeof(void *), n) static void __iomem *devm_ioremap_resource(struct device *dev, - struct resource *res) + struct resource *res) { - void __iomem *ptr; + void __iomem *ptr; - if (!res || res->type != IORESOURCE_MEM) { - dev_err(dev, "Invalid resource\n"); - return ERR_PTR(-EINVAL); - } + if ( !res || res->type != IORESOURCE_MEM ) + { + dev_err(dev, "Invalid resource\n"); + return ERR_PTR(-EINVAL); + } - ptr = ioremap_nocache(res->addr, res->size); - if (!ptr) { - dev_err(dev, - "ioremap failed (addr 0x%"PRIx64" size 0x%"PRIx64")\n", - res->addr, res->size); - return ERR_PTR(-ENOMEM); - } + ptr = ioremap_nocache(res->addr, res->size); + if ( !ptr ) + { + dev_err(dev, "ioremap failed (addr 0x%" PRIx64 " size 0x%" PRIx64 ")\n", + res->addr, res->size); + return ERR_PTR(-ENOMEM); + } - return ptr; + return ptr; } /* Xen doesn't handle IOMMU fault */ -#define report_iommu_fault(...) 1 +#define report_iommu_fault(...) 1 -#define IOMMU_FAULT_READ 0 -#define IOMMU_FAULT_WRITE 1 +#define IOMMU_FAULT_READ 0 +#define IOMMU_FAULT_WRITE 1 /* * Xen: PCI functions * TODO: It should be implemented when PCI will be supported */ -#define to_pci_dev(dev) (NULL) +#define to_pci_dev(dev) (NULL) static inline int pci_for_each_dma_alias(struct pci_dev *pdev, - int (*fn) (struct pci_dev *pdev, - u16 alias, void *data), - void *data) + int (*fn)(struct pci_dev *pdev, + u16 alias, void *data), + void *data) { - BUG(); - return 0; + BUG(); + return 0; } /* Xen: misc */ -#define PHYS_MASK_SHIFT PADDR_BITS +#define PHYS_MASK_SHIFT PADDR_BITS typedef paddr_t phys_addr_t; -#define VA_BITS 0 /* Only used for configuring stage-1 input size */ +#define VA_BITS 0 /* Only used for configuring stage-1 input size */ #define MODULE_DEVICE_TABLE(type, name) #define module_param_named(name, value, type, perm) @@ -204,21 +211,22 @@ typedef paddr_t phys_addr_t; /* Xen: Dummy iommu_domain */ struct iommu_domain { - /* Runtime SMMU configuration for this iommu_domain */ - struct arm_smmu_domain *priv; + /* Runtime SMMU configuration for this iommu_domain */ + struct arm_smmu_domain *priv; - atomic_t ref; - /* Used to link iommu_domain contexts for a same domain. - * There is at least one per-SMMU to used by the domain. - * */ - struct list_head list; + atomic_t ref; + /* Used to link iommu_domain contexts for a same domain. + * There is at least one per-SMMU to used by the domain. + * */ + struct list_head list; }; /* Xen: Describes informations required for a Xen domain */ -struct arm_smmu_xen_domain { - spinlock_t lock; - /* List of context (i.e iommu_domain) associated to this domain */ - struct list_head contexts; +struct arm_smmu_xen_domain +{ + spinlock_t lock; + /* List of context (i.e iommu_domain) associated to this domain */ + struct list_head contexts; }; /* @@ -232,9 +240,10 @@ struct arm_smmu_xen_domain { * that would require to move some hackery (dummy iommu_group) in a more generic * place. * */ -struct arm_smmu_xen_device { - struct iommu_domain *domain; - struct iommu_group *group; +struct arm_smmu_xen_device +{ + struct iommu_domain *domain; + struct iommu_group *group; }; #define dev_archdata(dev) ((struct arm_smmu_xen_device *)dev->archdata.iommu) @@ -244,58 +253,57 @@ struct arm_smmu_xen_device { /* Xen: Dummy iommu_group */ struct iommu_group { - /* Streamids of the device */ - struct arm_smmu_master_cfg *cfg; + /* Streamids of the device */ + struct arm_smmu_master_cfg *cfg; - atomic_t ref; + atomic_t ref; }; static struct iommu_group *iommu_group_alloc(void) { - struct iommu_group *group = xzalloc(struct iommu_group); + struct iommu_group *group = xzalloc(struct iommu_group); - if (!group) - return ERR_PTR(-ENOMEM); + if ( !group ) + return ERR_PTR(-ENOMEM); - atomic_set(&group->ref, 1); + atomic_set(&group->ref, 1); - return group; + return group; } static void iommu_group_put(struct iommu_group *group) { - if (atomic_dec_and_test(&group->ref)) - xfree(group); + if ( atomic_dec_and_test(&group->ref) ) + xfree(group); } static void iommu_group_set_iommudata(struct iommu_group *group, - struct arm_smmu_master_cfg *cfg, - void (*releasefn)(void *)) + struct arm_smmu_master_cfg *cfg, + void (*releasefn)(void *)) { - /* TODO: Store the releasefn for the PCI */ - ASSERT(releasefn == NULL); + /* TODO: Store the releasefn for the PCI */ + ASSERT(releasefn == NULL); - group->cfg = cfg; + group->cfg = cfg; } -static int iommu_group_add_device(struct iommu_group *group, - struct device *dev) +static int iommu_group_add_device(struct iommu_group *group, struct device *dev) { - dev_iommu_group(dev) = group; + dev_iommu_group(dev) = group; - atomic_inc(&group->ref); + atomic_inc(&group->ref); - return 0; + return 0; } static struct iommu_group *iommu_group_get(struct device *dev) { - struct iommu_group *group = dev_iommu_group(dev); + struct iommu_group *group = dev_iommu_group(dev); - if (group) - atomic_inc(&group->ref); + if ( group ) + atomic_inc(&group->ref); - return group; + return group; } #define iommu_group_get_iommudata(group) (group)->cfg @@ -303,401 +311,412 @@ static struct iommu_group *iommu_group_get(struct device *dev) /***** Start of Linux SMMU code *****/ /* Maximum number of stream IDs assigned to a single device */ -#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS +#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS /* Maximum number of context banks per SMMU */ -#define ARM_SMMU_MAX_CBS 128 +#define ARM_SMMU_MAX_CBS 128 /* Maximum number of mapping groups per SMMU */ -#define ARM_SMMU_MAX_SMRS 128 +#define ARM_SMMU_MAX_SMRS 128 /* SMMU global address space */ -#define ARM_SMMU_GR0(smmu) ((smmu)->base) -#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift)) +#define ARM_SMMU_GR0(smmu) ((smmu)->base) +#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift)) /* * SMMU global address space with conditional offset to access secure * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448, * nsGFSYNR0: 0x450) */ -#define ARM_SMMU_GR0_NS(smmu) \ - ((smmu)->base + \ - ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ - ? 0x400 : 0)) +#define ARM_SMMU_GR0_NS(smmu) \ + ((smmu)->base + \ + ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) ? 0x400 : 0)) /* Page table bits */ -#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) -#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) -#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10) -#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8) -#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8) -#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8) -#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) +#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) +#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) +#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10) +#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8) +#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8) +#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8) +#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) #if PAGE_SIZE == SZ_4K -#define ARM_SMMU_PTE_CONT_ENTRIES 16 +#define ARM_SMMU_PTE_CONT_ENTRIES 16 #elif PAGE_SIZE == SZ_64K -#define ARM_SMMU_PTE_CONT_ENTRIES 32 +#define ARM_SMMU_PTE_CONT_ENTRIES 32 #else -#define ARM_SMMU_PTE_CONT_ENTRIES 1 +#define ARM_SMMU_PTE_CONT_ENTRIES 1 #endif -#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) -#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) +#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) +#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) /* Stage-1 PTE */ -#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) -#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6) -#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2 -#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11) +#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) +#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6) +#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2 +#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11) /* Stage-2 PTE */ -#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6) -#define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6) -#define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6) -#define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2) -#define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2) -#define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2) +#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6) +#define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6) +#define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6) +#define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2) +#define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2) +#define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2) /* Configuration registers */ -#define ARM_SMMU_GR0_sCR0 0x0 -#define sCR0_CLIENTPD (1 << 0) -#define sCR0_GFRE (1 << 1) -#define sCR0_GFIE (1 << 2) -#define sCR0_GCFGFRE (1 << 4) -#define sCR0_GCFGFIE (1 << 5) -#define sCR0_USFCFG (1 << 10) -#define sCR0_VMIDPNE (1 << 11) -#define sCR0_PTM (1 << 12) -#define sCR0_FB (1 << 13) -#define sCR0_BSU_SHIFT 14 -#define sCR0_BSU_MASK 0x3 +#define ARM_SMMU_GR0_sCR0 0x0 +#define sCR0_CLIENTPD (1 << 0) +#define sCR0_GFRE (1 << 1) +#define sCR0_GFIE (1 << 2) +#define sCR0_GCFGFRE (1 << 4) +#define sCR0_GCFGFIE (1 << 5) +#define sCR0_USFCFG (1 << 10) +#define sCR0_VMIDPNE (1 << 11) +#define sCR0_PTM (1 << 12) +#define sCR0_FB (1 << 13) +#define sCR0_BSU_SHIFT 14 +#define sCR0_BSU_MASK 0x3 /* Identification registers */ -#define ARM_SMMU_GR0_ID0 0x20 -#define ARM_SMMU_GR0_ID1 0x24 -#define ARM_SMMU_GR0_ID2 0x28 -#define ARM_SMMU_GR0_ID3 0x2c -#define ARM_SMMU_GR0_ID4 0x30 -#define ARM_SMMU_GR0_ID5 0x34 -#define ARM_SMMU_GR0_ID6 0x38 -#define ARM_SMMU_GR0_ID7 0x3c -#define ARM_SMMU_GR0_sGFSR 0x48 -#define ARM_SMMU_GR0_sGFSYNR0 0x50 -#define ARM_SMMU_GR0_sGFSYNR1 0x54 -#define ARM_SMMU_GR0_sGFSYNR2 0x58 -#define ARM_SMMU_GR0_PIDR0 0xfe0 -#define ARM_SMMU_GR0_PIDR1 0xfe4 -#define ARM_SMMU_GR0_PIDR2 0xfe8 - -#define ID0_S1TS (1 << 30) -#define ID0_S2TS (1 << 29) -#define ID0_NTS (1 << 28) -#define ID0_SMS (1 << 27) -#define ID0_PTFS_SHIFT 24 -#define ID0_PTFS_MASK 0x2 -#define ID0_PTFS_V8_ONLY 0x2 -#define ID0_CTTW (1 << 14) -#define ID0_NUMIRPT_SHIFT 16 -#define ID0_NUMIRPT_MASK 0xff -#define ID0_NUMSIDB_SHIFT 9 -#define ID0_NUMSIDB_MASK 0xf -#define ID0_NUMSMRG_SHIFT 0 -#define ID0_NUMSMRG_MASK 0xff - -#define ID1_PAGESIZE (1 << 31) -#define ID1_NUMPAGENDXB_SHIFT 28 -#define ID1_NUMPAGENDXB_MASK 7 -#define ID1_NUMS2CB_SHIFT 16 -#define ID1_NUMS2CB_MASK 0xff -#define ID1_NUMCB_SHIFT 0 -#define ID1_NUMCB_MASK 0xff - -#define ID2_OAS_SHIFT 4 -#define ID2_OAS_MASK 0xf -#define ID2_IAS_SHIFT 0 -#define ID2_IAS_MASK 0xf -#define ID2_UBS_SHIFT 8 -#define ID2_UBS_MASK 0xf -#define ID2_PTFS_4K (1 << 12) -#define ID2_PTFS_16K (1 << 13) -#define ID2_PTFS_64K (1 << 14) - -#define PIDR2_ARCH_SHIFT 4 -#define PIDR2_ARCH_MASK 0xf +#define ARM_SMMU_GR0_ID0 0x20 +#define ARM_SMMU_GR0_ID1 0x24 +#define ARM_SMMU_GR0_ID2 0x28 +#define ARM_SMMU_GR0_ID3 0x2c +#define ARM_SMMU_GR0_ID4 0x30 +#define ARM_SMMU_GR0_ID5 0x34 +#define ARM_SMMU_GR0_ID6 0x38 +#define ARM_SMMU_GR0_ID7 0x3c +#define ARM_SMMU_GR0_sGFSR 0x48 +#define ARM_SMMU_GR0_sGFSYNR0 0x50 +#define ARM_SMMU_GR0_sGFSYNR1 0x54 +#define ARM_SMMU_GR0_sGFSYNR2 0x58 +#define ARM_SMMU_GR0_PIDR0 0xfe0 +#define ARM_SMMU_GR0_PIDR1 0xfe4 +#define ARM_SMMU_GR0_PIDR2 0xfe8 + +#define ID0_S1TS (1 << 30) +#define ID0_S2TS (1 << 29) +#define ID0_NTS (1 << 28) +#define ID0_SMS (1 << 27) +#define ID0_PTFS_SHIFT 24 +#define ID0_PTFS_MASK 0x2 +#define ID0_PTFS_V8_ONLY 0x2 +#define ID0_CTTW (1 << 14) +#define ID0_NUMIRPT_SHIFT 16 +#define ID0_NUMIRPT_MASK 0xff +#define ID0_NUMSIDB_SHIFT 9 +#define ID0_NUMSIDB_MASK 0xf +#define ID0_NUMSMRG_SHIFT 0 +#define ID0_NUMSMRG_MASK 0xff + +#define ID1_PAGESIZE (1 << 31) +#define ID1_NUMPAGENDXB_SHIFT 28 +#define ID1_NUMPAGENDXB_MASK 7 +#define ID1_NUMS2CB_SHIFT 16 +#define ID1_NUMS2CB_MASK 0xff +#define ID1_NUMCB_SHIFT 0 +#define ID1_NUMCB_MASK 0xff + +#define ID2_OAS_SHIFT 4 +#define ID2_OAS_MASK 0xf +#define ID2_IAS_SHIFT 0 +#define ID2_IAS_MASK 0xf +#define ID2_UBS_SHIFT 8 +#define ID2_UBS_MASK 0xf +#define ID2_PTFS_4K (1 << 12) +#define ID2_PTFS_16K (1 << 13) +#define ID2_PTFS_64K (1 << 14) + +#define PIDR2_ARCH_SHIFT 4 +#define PIDR2_ARCH_MASK 0xf /* Global TLB invalidation */ -#define ARM_SMMU_GR0_STLBIALL 0x60 -#define ARM_SMMU_GR0_TLBIVMID 0x64 -#define ARM_SMMU_GR0_TLBIALLNSNH 0x68 -#define ARM_SMMU_GR0_TLBIALLH 0x6c -#define ARM_SMMU_GR0_sTLBGSYNC 0x70 -#define ARM_SMMU_GR0_sTLBGSTATUS 0x74 -#define sTLBGSTATUS_GSACTIVE (1 << 0) -#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ +#define ARM_SMMU_GR0_STLBIALL 0x60 +#define ARM_SMMU_GR0_TLBIVMID 0x64 +#define ARM_SMMU_GR0_TLBIALLNSNH 0x68 +#define ARM_SMMU_GR0_TLBIALLH 0x6c +#define ARM_SMMU_GR0_sTLBGSYNC 0x70 +#define ARM_SMMU_GR0_sTLBGSTATUS 0x74 +#define sTLBGSTATUS_GSACTIVE (1 << 0) +#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ /* Stream mapping registers */ -#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) -#define SMR_VALID (1 << 31) -#define SMR_MASK_SHIFT 16 -#define SMR_MASK_MASK 0x7fff -#define SMR_ID_SHIFT 0 -#define SMR_ID_MASK 0x7fff - -#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) -#define S2CR_CBNDX_SHIFT 0 -#define S2CR_CBNDX_MASK 0xff -#define S2CR_TYPE_SHIFT 16 -#define S2CR_TYPE_MASK 0x3 -#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT) -#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT) -#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT) +#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) +#define SMR_VALID (1 << 31) +#define SMR_MASK_SHIFT 16 +#define SMR_MASK_MASK 0x7fff +#define SMR_ID_SHIFT 0 +#define SMR_ID_MASK 0x7fff + +#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) +#define S2CR_CBNDX_SHIFT 0 +#define S2CR_CBNDX_MASK 0xff +#define S2CR_TYPE_SHIFT 16 +#define S2CR_TYPE_MASK 0x3 +#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT) +#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT) +#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT) /* Context bank attribute registers */ -#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) -#define CBAR_VMID_SHIFT 0 -#define CBAR_VMID_MASK 0xff -#define CBAR_S1_BPSHCFG_SHIFT 8 -#define CBAR_S1_BPSHCFG_MASK 3 -#define CBAR_S1_BPSHCFG_NSH 3 -#define CBAR_S1_MEMATTR_SHIFT 12 -#define CBAR_S1_MEMATTR_MASK 0xf -#define CBAR_S1_MEMATTR_WB 0xf -#define CBAR_TYPE_SHIFT 16 -#define CBAR_TYPE_MASK 0x3 -#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT) -#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT) -#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT) -#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT) -#define CBAR_IRPTNDX_SHIFT 24 -#define CBAR_IRPTNDX_MASK 0xff - -#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) -#define CBA2R_RW64_32BIT (0 << 0) -#define CBA2R_RW64_64BIT (1 << 0) +#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) +#define CBAR_VMID_SHIFT 0 +#define CBAR_VMID_MASK 0xff +#define CBAR_S1_BPSHCFG_SHIFT 8 +#define CBAR_S1_BPSHCFG_MASK 3 +#define CBAR_S1_BPSHCFG_NSH 3 +#define CBAR_S1_MEMATTR_SHIFT 12 +#define CBAR_S1_MEMATTR_MASK 0xf +#define CBAR_S1_MEMATTR_WB 0xf +#define CBAR_TYPE_SHIFT 16 +#define CBAR_TYPE_MASK 0x3 +#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT) +#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT) +#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT) +#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT) +#define CBAR_IRPTNDX_SHIFT 24 +#define CBAR_IRPTNDX_MASK 0xff + +#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) +#define CBA2R_RW64_32BIT (0 << 0) +#define CBA2R_RW64_64BIT (1 << 0) /* Translation context bank */ -#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1)) -#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift)) - -#define ARM_SMMU_CB_SCTLR 0x0 -#define ARM_SMMU_CB_RESUME 0x8 -#define ARM_SMMU_CB_TTBCR2 0x10 -#define ARM_SMMU_CB_TTBR0_LO 0x20 -#define ARM_SMMU_CB_TTBR0_HI 0x24 -#define ARM_SMMU_CB_TTBCR 0x30 -#define ARM_SMMU_CB_S1_MAIR0 0x38 -#define ARM_SMMU_CB_FSR 0x58 -#define ARM_SMMU_CB_FAR_LO 0x60 -#define ARM_SMMU_CB_FAR_HI 0x64 -#define ARM_SMMU_CB_FSYNR0 0x68 -#define ARM_SMMU_CB_S1_TLBIASID 0x610 - -#define SCTLR_S1_ASIDPNE (1 << 12) -#define SCTLR_CFCFG (1 << 7) -#define SCTLR_CFIE (1 << 6) -#define SCTLR_CFRE (1 << 5) -#define SCTLR_E (1 << 4) -#define SCTLR_AFE (1 << 2) -#define SCTLR_TRE (1 << 1) -#define SCTLR_M (1 << 0) -#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) - -#define RESUME_RETRY (0 << 0) -#define RESUME_TERMINATE (1 << 0) - -#define TTBCR_EAE (1 << 31) - -#define TTBCR_PASIZE_SHIFT 16 -#define TTBCR_PASIZE_MASK 0x7 - -#define TTBCR_TG0_4K (0 << 14) -#define TTBCR_TG0_64K (1 << 14) - -#define TTBCR_SH0_SHIFT 12 -#define TTBCR_SH0_MASK 0x3 -#define TTBCR_SH_NS 0 -#define TTBCR_SH_OS 2 -#define TTBCR_SH_IS 3 - -#define TTBCR_ORGN0_SHIFT 10 -#define TTBCR_IRGN0_SHIFT 8 -#define TTBCR_RGN_MASK 0x3 -#define TTBCR_RGN_NC 0 -#define TTBCR_RGN_WBWA 1 -#define TTBCR_RGN_WT 2 -#define TTBCR_RGN_WB 3 - -#define TTBCR_SL0_SHIFT 6 -#define TTBCR_SL0_MASK 0x3 -#define TTBCR_SL0_LVL_2 0 -#define TTBCR_SL0_LVL_1 1 - -#define TTBCR_T1SZ_SHIFT 16 -#define TTBCR_T0SZ_SHIFT 0 -#define TTBCR_SZ_MASK 0xf - -#define TTBCR2_SEP_SHIFT 15 -#define TTBCR2_SEP_MASK 0x7 - -#define TTBCR2_PASIZE_SHIFT 0 -#define TTBCR2_PASIZE_MASK 0x7 +#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1)) +#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift)) + +#define ARM_SMMU_CB_SCTLR 0x0 +#define ARM_SMMU_CB_RESUME 0x8 +#define ARM_SMMU_CB_TTBCR2 0x10 +#define ARM_SMMU_CB_TTBR0_LO 0x20 +#define ARM_SMMU_CB_TTBR0_HI 0x24 +#define ARM_SMMU_CB_TTBCR 0x30 +#define ARM_SMMU_CB_S1_MAIR0 0x38 +#define ARM_SMMU_CB_FSR 0x58 +#define ARM_SMMU_CB_FAR_LO 0x60 +#define ARM_SMMU_CB_FAR_HI 0x64 +#define ARM_SMMU_CB_FSYNR0 0x68 +#define ARM_SMMU_CB_S1_TLBIASID 0x610 + +#define SCTLR_S1_ASIDPNE (1 << 12) +#define SCTLR_CFCFG (1 << 7) +#define SCTLR_CFIE (1 << 6) +#define SCTLR_CFRE (1 << 5) +#define SCTLR_E (1 << 4) +#define SCTLR_AFE (1 << 2) +#define SCTLR_TRE (1 << 1) +#define SCTLR_M (1 << 0) +#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) + +#define RESUME_RETRY (0 << 0) +#define RESUME_TERMINATE (1 << 0) + +#define TTBCR_EAE (1 << 31) + +#define TTBCR_PASIZE_SHIFT 16 +#define TTBCR_PASIZE_MASK 0x7 + +#define TTBCR_TG0_4K (0 << 14) +#define TTBCR_TG0_64K (1 << 14) + +#define TTBCR_SH0_SHIFT 12 +#define TTBCR_SH0_MASK 0x3 +#define TTBCR_SH_NS 0 +#define TTBCR_SH_OS 2 +#define TTBCR_SH_IS 3 + +#define TTBCR_ORGN0_SHIFT 10 +#define TTBCR_IRGN0_SHIFT 8 +#define TTBCR_RGN_MASK 0x3 +#define TTBCR_RGN_NC 0 +#define TTBCR_RGN_WBWA 1 +#define TTBCR_RGN_WT 2 +#define TTBCR_RGN_WB 3 + +#define TTBCR_SL0_SHIFT 6 +#define TTBCR_SL0_MASK 0x3 +#define TTBCR_SL0_LVL_2 0 +#define TTBCR_SL0_LVL_1 1 + +#define TTBCR_T1SZ_SHIFT 16 +#define TTBCR_T0SZ_SHIFT 0 +#define TTBCR_SZ_MASK 0xf + +#define TTBCR2_SEP_SHIFT 15 +#define TTBCR2_SEP_MASK 0x7 + +#define TTBCR2_PASIZE_SHIFT 0 +#define TTBCR2_PASIZE_MASK 0x7 /* Common definitions for PASize and SEP fields */ -#define TTBCR2_ADDR_32 0 -#define TTBCR2_ADDR_36 1 -#define TTBCR2_ADDR_40 2 -#define TTBCR2_ADDR_42 3 -#define TTBCR2_ADDR_44 4 -#define TTBCR2_ADDR_48 5 - -#define TTBRn_HI_ASID_SHIFT 16 - -#define MAIR_ATTR_SHIFT(n) ((n) << 3) -#define MAIR_ATTR_MASK 0xff -#define MAIR_ATTR_DEVICE 0x04 -#define MAIR_ATTR_NC 0x44 -#define MAIR_ATTR_WBRWA 0xff -#define MAIR_ATTR_IDX_NC 0 -#define MAIR_ATTR_IDX_CACHE 1 -#define MAIR_ATTR_IDX_DEV 2 - -#define FSR_MULTI (1 << 31) -#define FSR_SS (1 << 30) -#define FSR_UUT (1 << 8) -#define FSR_ASF (1 << 7) -#define FSR_TLBLKF (1 << 6) -#define FSR_TLBMCF (1 << 5) -#define FSR_EF (1 << 4) -#define FSR_PF (1 << 3) -#define FSR_AFF (1 << 2) -#define FSR_TF (1 << 1) - -#define FSR_IGN (FSR_AFF | FSR_ASF | \ - FSR_TLBMCF | FSR_TLBLKF) -#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ - FSR_EF | FSR_PF | FSR_TF | FSR_IGN) - -#define FSYNR0_WNR (1 << 4) +#define TTBCR2_ADDR_32 0 +#define TTBCR2_ADDR_36 1 +#define TTBCR2_ADDR_40 2 +#define TTBCR2_ADDR_42 3 +#define TTBCR2_ADDR_44 4 +#define TTBCR2_ADDR_48 5 + +#define TTBRn_HI_ASID_SHIFT 16 + +#define MAIR_ATTR_SHIFT(n) ((n) << 3) +#define MAIR_ATTR_MASK 0xff +#define MAIR_ATTR_DEVICE 0x04 +#define MAIR_ATTR_NC 0x44 +#define MAIR_ATTR_WBRWA 0xff +#define MAIR_ATTR_IDX_NC 0 +#define MAIR_ATTR_IDX_CACHE 1 +#define MAIR_ATTR_IDX_DEV 2 + +#define FSR_MULTI (1 << 31) +#define FSR_SS (1 << 30) +#define FSR_UUT (1 << 8) +#define FSR_ASF (1 << 7) +#define FSR_TLBLKF (1 << 6) +#define FSR_TLBMCF (1 << 5) +#define FSR_EF (1 << 4) +#define FSR_PF (1 << 3) +#define FSR_AFF (1 << 2) +#define FSR_TF (1 << 1) + +#define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | FSR_TLBLKF) +#define FSR_FAULT \ + (FSR_MULTI | FSR_SS | FSR_UUT | FSR_EF | FSR_PF | FSR_TF | FSR_IGN) + +#define FSYNR0_WNR (1 << 4) static int force_stage; module_param_named(force_stage, force_stage, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(force_stage, - "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation."); +MODULE_PARM_DESC( + force_stage, + "Force SMMU mappings to be installed at a particular stage of translation. " + "A value of '1' or '2' forces the corresponding stage. All other values " + "are ignored (i.e. no stage is forced). Note that selecting a specific " + "stage will disable support for nested translation."); -enum arm_smmu_arch_version { - ARM_SMMU_V1 = 1, - ARM_SMMU_V2, +enum arm_smmu_arch_version +{ + ARM_SMMU_V1 = 1, + ARM_SMMU_V2, }; -struct arm_smmu_smr { - u8 idx; - u16 mask; - u16 id; +struct arm_smmu_smr +{ + u8 idx; + u16 mask; + u16 id; }; -struct arm_smmu_master_cfg { - int num_streamids; - u16 streamids[MAX_MASTER_STREAMIDS]; - struct arm_smmu_smr *smrs; +struct arm_smmu_master_cfg +{ + int num_streamids; + u16 streamids[MAX_MASTER_STREAMIDS]; + struct arm_smmu_smr *smrs; }; -struct arm_smmu_master { - struct device_node *of_node; - struct rb_node node; - struct arm_smmu_master_cfg cfg; +struct arm_smmu_master +{ + struct device_node *of_node; + struct rb_node node; + struct arm_smmu_master_cfg cfg; }; -struct arm_smmu_device { - struct device *dev; +struct arm_smmu_device +{ + struct device *dev; - void __iomem *base; - unsigned long size; - unsigned long pgshift; + void __iomem *base; + unsigned long size; + unsigned long pgshift; -#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) -#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) -#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) -#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) -#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) - u32 features; +#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) +#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) +#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) +#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) +#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) + u32 features; #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) - u32 options; - enum arm_smmu_arch_version version; + u32 options; + enum arm_smmu_arch_version version; - u32 num_context_banks; - u32 num_s2_context_banks; - DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS); - atomic_t irptndx; + u32 num_context_banks; + u32 num_s2_context_banks; + DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS); + atomic_t irptndx; - u32 num_mapping_groups; - DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); + u32 num_mapping_groups; + DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); - unsigned long s1_input_size; - unsigned long s1_output_size; - unsigned long s2_input_size; - unsigned long s2_output_size; + unsigned long s1_input_size; + unsigned long s1_output_size; + unsigned long s2_input_size; + unsigned long s2_output_size; - u32 num_global_irqs; - u32 num_context_irqs; - unsigned int *irqs; + u32 num_global_irqs; + u32 num_context_irqs; + unsigned int *irqs; - struct list_head list; - struct rb_root masters; + struct list_head list; + struct rb_root masters; }; -struct arm_smmu_cfg { - u8 cbndx; - u8 irptndx; - u32 cbar; +struct arm_smmu_cfg +{ + u8 cbndx; + u8 irptndx; + u32 cbar; - /* Xen: Domain associated to this configuration */ - struct domain *domain; + /* Xen: Domain associated to this configuration */ + struct domain *domain; }; -#define INVALID_IRPTNDX 0xff +#define INVALID_IRPTNDX 0xff -#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) -#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) +#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) +#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) -enum arm_smmu_domain_stage { - ARM_SMMU_DOMAIN_S1 = 0, - ARM_SMMU_DOMAIN_S2, - ARM_SMMU_DOMAIN_NESTED, +enum arm_smmu_domain_stage +{ + ARM_SMMU_DOMAIN_S1 = 0, + ARM_SMMU_DOMAIN_S2, + ARM_SMMU_DOMAIN_NESTED, }; -struct arm_smmu_domain { - struct arm_smmu_device *smmu; - struct arm_smmu_cfg cfg; - enum arm_smmu_domain_stage stage; - spinlock_t lock; +struct arm_smmu_domain +{ + struct arm_smmu_device *smmu; + struct arm_smmu_cfg cfg; + enum arm_smmu_domain_stage stage; + spinlock_t lock; }; static DEFINE_SPINLOCK(arm_smmu_devices_lock); static LIST_HEAD(arm_smmu_devices); -struct arm_smmu_option_prop { - u32 opt; - const char *prop; +struct arm_smmu_option_prop +{ + u32 opt; + const char *prop; }; static struct arm_smmu_option_prop arm_smmu_options[] = { - { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, - { 0, NULL}, + {ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access"}, + {0, NULL}, }; static void parse_driver_options(struct arm_smmu_device *smmu) { - int i = 0; + int i = 0; - do { - if (of_property_read_bool(smmu->dev->of_node, - arm_smmu_options[i].prop)) { - smmu->options |= arm_smmu_options[i].opt; - dev_notice(smmu->dev, "option %s\n", - arm_smmu_options[i].prop); - } - } while (arm_smmu_options[++i].opt); + do { + if ( of_property_read_bool(smmu->dev->of_node, + arm_smmu_options[i].prop) ) + { + smmu->options |= arm_smmu_options[i].opt; + dev_notice(smmu->dev, "option %s\n", arm_smmu_options[i].prop); + } + } while ( arm_smmu_options[++i].opt ); } static struct device_node *dev_get_dev_node(struct device *dev) @@ -712,260 +731,273 @@ static struct device_node *dev_get_dev_node(struct device *dev) } #endif - return dev->of_node; + return dev->of_node; } static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, - struct device_node *dev_node) + struct device_node *dev_node) { - struct rb_node *node = smmu->masters.rb_node; + struct rb_node *node = smmu->masters.rb_node; - while (node) { - struct arm_smmu_master *master; + while ( node ) + { + struct arm_smmu_master *master; - master = container_of(node, struct arm_smmu_master, node); + master = container_of(node, struct arm_smmu_master, node); - if (dev_node < master->of_node) - node = node->rb_left; - else if (dev_node > master->of_node) - node = node->rb_right; - else - return master; - } + if ( dev_node < master->of_node ) + node = node->rb_left; + else if ( dev_node > master->of_node ) + node = node->rb_right; + else + return master; + } - return NULL; + return NULL; } -static struct arm_smmu_master_cfg * -find_smmu_master_cfg(struct device *dev) +static struct arm_smmu_master_cfg *find_smmu_master_cfg(struct device *dev) { - struct arm_smmu_master_cfg *cfg = NULL; - struct iommu_group *group = iommu_group_get(dev); + struct arm_smmu_master_cfg *cfg = NULL; + struct iommu_group *group = iommu_group_get(dev); - if (group) { - cfg = iommu_group_get_iommudata(group); - iommu_group_put(group); - } + if ( group ) + { + cfg = iommu_group_get_iommudata(group); + iommu_group_put(group); + } - return cfg; + return cfg; } static int insert_smmu_master(struct arm_smmu_device *smmu, - struct arm_smmu_master *master) + struct arm_smmu_master *master) { - struct rb_node **new, *parent; + struct rb_node **new, *parent; - new = &smmu->masters.rb_node; - parent = NULL; - while (*new) { - struct arm_smmu_master *this - = container_of(*new, struct arm_smmu_master, node); + new = &smmu->masters.rb_node; + parent = NULL; + while ( *new ) + { + struct arm_smmu_master *this = + container_of(*new, struct arm_smmu_master, node); - parent = *new; - if (master->of_node < this->of_node) - new = &((*new)->rb_left); - else if (master->of_node > this->of_node) - new = &((*new)->rb_right); - else - return -EEXIST; - } + parent = *new; + if ( master->of_node < this->of_node ) + new = &((*new)->rb_left); + else if ( master->of_node > this->of_node ) + new = &((*new)->rb_right); + else + return -EEXIST; + } - rb_link_node(&master->node, parent, new); - rb_insert_color(&master->node, &smmu->masters); - return 0; + rb_link_node(&master->node, parent, new); + rb_insert_color(&master->node, &smmu->masters); + return 0; } static int register_smmu_master(struct arm_smmu_device *smmu, - struct device *dev, - struct of_phandle_args *masterspec) -{ - int i; - struct arm_smmu_master *master; - - master = find_smmu_master(smmu, masterspec->np); - if (master) { - dev_err(dev, - "rejecting multiple registrations for master device %s\n", - masterspec->np->name); - return -EBUSY; - } - - if (masterspec->args_count > MAX_MASTER_STREAMIDS) { - dev_err(dev, - "reached maximum number (%d) of stream IDs for master device %s\n", - MAX_MASTER_STREAMIDS, masterspec->np->name); - return -ENOSPC; - } - - master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); - if (!master) - return -ENOMEM; - - master->of_node = masterspec->np; - master->cfg.num_streamids = masterspec->args_count; - - /* Xen: Let Xen know that the device is protected by an SMMU */ - dt_device_set_protected(masterspec->np); - - for (i = 0; i < master->cfg.num_streamids; ++i) { - u16 streamid = masterspec->args[i]; - - if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && - (streamid >= smmu->num_mapping_groups)) { - dev_err(dev, - "stream ID for master device %s greater than maximum allowed (%d)\n", - masterspec->np->name, smmu->num_mapping_groups); - return -ERANGE; - } - master->cfg.streamids[i] = streamid; - } - return insert_smmu_master(smmu, master); + struct device *dev, + struct of_phandle_args *masterspec) +{ + int i; + struct arm_smmu_master *master; + + master = find_smmu_master(smmu, masterspec->np); + if ( master ) + { + dev_err(dev, "rejecting multiple registrations for master device %s\n", + masterspec->np->name); + return -EBUSY; + } + + if ( masterspec->args_count > MAX_MASTER_STREAMIDS ) + { + dev_err( + dev, + "reached maximum number (%d) of stream IDs for master device %s\n", + MAX_MASTER_STREAMIDS, masterspec->np->name); + return -ENOSPC; + } + + master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); + if ( !master ) + return -ENOMEM; + + master->of_node = masterspec->np; + master->cfg.num_streamids = masterspec->args_count; + + /* Xen: Let Xen know that the device is protected by an SMMU */ + dt_device_set_protected(masterspec->np); + + for ( i = 0; i < master->cfg.num_streamids; ++i ) + { + u16 streamid = masterspec->args[i]; + + if ( !(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && + (streamid >= smmu->num_mapping_groups) ) + { + dev_err(dev, + "stream ID for master device %s greater than maximum " + "allowed (%d)\n", + masterspec->np->name, smmu->num_mapping_groups); + return -ERANGE; + } + master->cfg.streamids[i] = streamid; + } + return insert_smmu_master(smmu, master); } static struct arm_smmu_device *find_smmu_for_device(struct device *dev) { - struct arm_smmu_device *smmu; - struct arm_smmu_master *master = NULL; - struct device_node *dev_node = dev_get_dev_node(dev); + struct arm_smmu_device *smmu; + struct arm_smmu_master *master = NULL; + struct device_node *dev_node = dev_get_dev_node(dev); - spin_lock(&arm_smmu_devices_lock); - list_for_each_entry(smmu, &arm_smmu_devices, list) { - master = find_smmu_master(smmu, dev_node); - if (master) - break; - } - spin_unlock(&arm_smmu_devices_lock); + spin_lock(&arm_smmu_devices_lock); + list_for_each_entry (smmu, &arm_smmu_devices, list) + { + master = find_smmu_master(smmu, dev_node); + if ( master ) + break; + } + spin_unlock(&arm_smmu_devices_lock); - return master ? smmu : NULL; + return master ? smmu : NULL; } static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) { - int idx; + int idx; - do { - idx = find_next_zero_bit(map, end, start); - if (idx == end) - return -ENOSPC; - } while (test_and_set_bit(idx, map)); + do { + idx = find_next_zero_bit(map, end, start); + if ( idx == end ) + return -ENOSPC; + } while ( test_and_set_bit(idx, map) ); - return idx; + return idx; } static void __arm_smmu_free_bitmap(unsigned long *map, int idx) { - clear_bit(idx, map); + clear_bit(idx, map); } /* Wait for any pending TLB invalidations to complete */ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) { - int count = 0; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC); - while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS) - & sTLBGSTATUS_GSACTIVE) { - cpu_relax(); - if (++count == TLB_LOOP_TIMEOUT) { - dev_err_ratelimited(smmu->dev, - "TLB sync timed out -- SMMU may be deadlocked\n"); - return; - } - udelay(1); - } + int count = 0; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + + writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC); + while ( readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS) & + sTLBGSTATUS_GSACTIVE ) + { + cpu_relax(); + if ( ++count == TLB_LOOP_TIMEOUT ) + { + dev_err_ratelimited( + smmu->dev, "TLB sync timed out -- SMMU may be deadlocked\n"); + return; + } + udelay(1); + } } static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) { - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *base = ARM_SMMU_GR0(smmu); - bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + struct arm_smmu_device *smmu = smmu_domain->smmu; + void __iomem *base = ARM_SMMU_GR0(smmu); + bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; - if (stage1) { - base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); - writel_relaxed(ARM_SMMU_CB_ASID(cfg), - base + ARM_SMMU_CB_S1_TLBIASID); - } else { - base = ARM_SMMU_GR0(smmu); - writel_relaxed(ARM_SMMU_CB_VMID(cfg), - base + ARM_SMMU_GR0_TLBIVMID); - } + if ( stage1 ) + { + base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + writel_relaxed(ARM_SMMU_CB_ASID(cfg), base + ARM_SMMU_CB_S1_TLBIASID); + } + else + { + base = ARM_SMMU_GR0(smmu); + writel_relaxed(ARM_SMMU_CB_VMID(cfg), base + ARM_SMMU_GR0_TLBIVMID); + } - arm_smmu_tlb_sync(smmu); + arm_smmu_tlb_sync(smmu); } static irqreturn_t arm_smmu_context_fault(int irq, void *dev) { - u32 fsr, far, fsynr; - unsigned long iova; - struct iommu_domain *domain = dev; - struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *cb_base; + u32 fsr, far, fsynr; + unsigned long iova; + struct iommu_domain *domain = dev; + struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + struct arm_smmu_device *smmu = smmu_domain->smmu; + void __iomem *cb_base; - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); - fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); + cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); - if (!(fsr & FSR_FAULT)) - return IRQ_NONE; + if ( !(fsr & FSR_FAULT) ) + return IRQ_NONE; - fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); + fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); - far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO); - iova = far; + far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO); + iova = far; #ifdef CONFIG_64BIT - far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI); - iova |= ((unsigned long)far << 32); + far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI); + iova |= ((unsigned long)far << 32); #endif - dev_err_ratelimited(smmu->dev, - "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n", - fsr, iova, fsynr, cfg->cbndx); - - writel(fsr, cb_base + ARM_SMMU_CB_FSR); - return IRQ_HANDLED; + dev_err_ratelimited( + smmu->dev, + "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n", + fsr, iova, fsynr, cfg->cbndx); + + writel(fsr, cb_base + ARM_SMMU_CB_FSR); + return IRQ_HANDLED; } static irqreturn_t arm_smmu_global_fault(int irq, void *dev) { - u32 gfsr, gfsynr0, gfsynr1, gfsynr2; - struct arm_smmu_device *smmu = dev; - void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); + u32 gfsr, gfsynr0, gfsynr1, gfsynr2; + struct arm_smmu_device *smmu = dev; + void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); - gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); - gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); - gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); - gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); + gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); + gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); + gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); + gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); - if (!gfsr) - return IRQ_NONE; + if ( !gfsr ) + return IRQ_NONE; - dev_err_ratelimited(smmu->dev, - "Unexpected global fault, this could be serious\n"); - dev_err_ratelimited(smmu->dev, - "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", - gfsr, gfsynr0, gfsynr1, gfsynr2); + dev_err_ratelimited(smmu->dev, + "Unexpected global fault, this could be serious\n"); + dev_err_ratelimited( + smmu->dev, + "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", gfsr, + gfsynr0, gfsynr1, gfsynr2); - writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR); - return IRQ_HANDLED; + writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR); + return IRQ_HANDLED; } /* Xen: Interrupt handlers wrapper */ static void arm_smmu_context_fault_xen(int irq, void *dev, - struct cpu_user_regs *regs) + struct cpu_user_regs *regs) { - arm_smmu_context_fault(irq, dev); + arm_smmu_context_fault(irq, dev); } #define arm_smmu_context_fault arm_smmu_context_fault_xen static void arm_smmu_global_fault_xen(int irq, void *dev, - struct cpu_user_regs *regs) + struct cpu_user_regs *regs) { - arm_smmu_global_fault(irq, dev); + arm_smmu_global_fault(irq, dev); } #define arm_smmu_global_fault arm_smmu_global_fault_xen @@ -996,311 +1028,329 @@ static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) { - u32 reg; - bool stage1; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *cb_base, *gr0_base, *gr1_base; - paddr_t p2maddr; - - gr0_base = ARM_SMMU_GR0(smmu); - gr1_base = ARM_SMMU_GR1(smmu); - stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); - - /* CBAR */ - reg = cfg->cbar; - if (smmu->version == ARM_SMMU_V1) - reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT; - - /* - * Use the weakest shareability/memory types, so they are - * overridden by the ttbcr/pte. - */ - if (stage1) { - reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | - (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); - } else { - reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT; - } - writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); - - if (smmu->version > ARM_SMMU_V1) { - /* CBA2R */ + u32 reg; + bool stage1; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + struct arm_smmu_device *smmu = smmu_domain->smmu; + void __iomem *cb_base, *gr0_base, *gr1_base; + paddr_t p2maddr; + + gr0_base = ARM_SMMU_GR0(smmu); + gr1_base = ARM_SMMU_GR1(smmu); + stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; + cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + + /* CBAR */ + reg = cfg->cbar; + if ( smmu->version == ARM_SMMU_V1 ) + reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT; + + /* + * Use the weakest shareability/memory types, so they are + * overridden by the ttbcr/pte. + */ + if ( stage1 ) + { + reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | + (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); + } + else + { + reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT; + } + writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); + + if ( smmu->version > ARM_SMMU_V1 ) + { + /* CBA2R */ #ifdef CONFIG_64BIT - reg = CBA2R_RW64_64BIT; + reg = CBA2R_RW64_64BIT; #else - reg = CBA2R_RW64_32BIT; + reg = CBA2R_RW64_32BIT; #endif - writel_relaxed(reg, - gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); - - /* TTBCR2 */ - switch (smmu->s1_input_size) { - case 32: - reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); - break; - case 36: - reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); - break; - case 39: - case 40: - reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); - break; - case 42: - reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); - break; - case 44: - reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); - break; - case 48: - reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); - break; - } - - switch (smmu->s1_output_size) { - case 32: - reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT); - break; - case 36: - reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); - break; - case 39: - case 40: - reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); - break; - case 42: - reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT); - break; - case 44: - reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT); - break; - case 48: - reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT); - break; - } - - if (stage1) - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); - } - - /* TTBR0 */ - /* Xen: The page table is shared with the P2M code */ - ASSERT(smmu_domain->cfg.domain != NULL); - p2maddr = page_to_maddr(smmu_domain->cfg.domain->arch.p2m.root); - - dev_notice(smmu->dev, "d%u: p2maddr 0x%"PRIpaddr"\n", - smmu_domain->cfg.domain->domain_id, p2maddr); - - reg = (p2maddr & ((1ULL << 32) - 1)); - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); - reg = (p2maddr >> 32); - if (stage1) - reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); - - /* - * TTBCR - * We use long descriptor, with inner-shareable WBWA tables in TTBR0. - */ - if (smmu->version > ARM_SMMU_V1) { - if (PAGE_SIZE == SZ_4K) - reg = TTBCR_TG0_4K; - else - reg = TTBCR_TG0_64K; - - if (!stage1) { - reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT; - - switch (smmu->s2_output_size) { - case 32: - reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); - break; - case 36: - reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT); - break; - case 40: - reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT); - break; - case 42: - reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT); - break; - case 44: - reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT); - break; - case 48: - reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT); - break; - } - } else { - reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT; - } - } else { - reg = 0; - } - - /* Xen: The attributes to walk the page table should be the same as - * VTCR_EL2. Currently doesn't differ from Linux ones. - */ - reg |= TTBCR_EAE | - (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | - (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | - (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT); - - if (!stage1) - reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); - - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); - - /* MAIR0 (stage-1 only) */ - if (stage1) { - reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) | - (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) | - (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV)); - writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); - } - - /* - * SCTLR - * - * Do not set SCTLR_CFCFG, because of Erratum #842869 - */ - reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; - if (stage1) - reg |= SCTLR_S1_ASIDPNE; + writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); + + /* TTBCR2 */ + switch (smmu->s1_input_size) + { + case 32: + reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); + break; + case 36: + reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); + break; + case 39: + case 40: + reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); + break; + case 42: + reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); + break; + case 44: + reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); + break; + case 48: + reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); + break; + } + + switch (smmu->s1_output_size) + { + case 32: + reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT); + break; + case 36: + reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); + break; + case 39: + case 40: + reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); + break; + case 42: + reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT); + break; + case 44: + reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT); + break; + case 48: + reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT); + break; + } + + if ( stage1 ) + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); + } + + /* TTBR0 */ + /* Xen: The page table is shared with the P2M code */ + ASSERT(smmu_domain->cfg.domain != NULL); + p2maddr = page_to_maddr(smmu_domain->cfg.domain->arch.p2m.root); + + dev_notice(smmu->dev, "d%u: p2maddr 0x%" PRIpaddr "\n", + smmu_domain->cfg.domain->domain_id, p2maddr); + + reg = (p2maddr & ((1ULL << 32) - 1)); + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); + reg = (p2maddr >> 32); + if ( stage1 ) + reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); + + /* + * TTBCR + * We use long descriptor, with inner-shareable WBWA tables in TTBR0. + */ + if ( smmu->version > ARM_SMMU_V1 ) + { + if ( PAGE_SIZE == SZ_4K ) + reg = TTBCR_TG0_4K; + else + reg = TTBCR_TG0_64K; + + if ( !stage1 ) + { + reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT; + + switch (smmu->s2_output_size) + { + case 32: + reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); + break; + case 36: + reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT); + break; + case 40: + reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT); + break; + case 42: + reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT); + break; + case 44: + reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT); + break; + case 48: + reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT); + break; + } + } + else + { + reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT; + } + } + else + { + reg = 0; + } + + /* Xen: The attributes to walk the page table should be the same as + * VTCR_EL2. Currently doesn't differ from Linux ones. + */ + reg |= TTBCR_EAE | (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | + (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | + (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT); + + if ( !stage1 ) + reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); + + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); + + /* MAIR0 (stage-1 only) */ + if ( stage1 ) + { + reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) | + (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) | + (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV)); + writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); + } + + /* + * SCTLR + * + * Do not set SCTLR_CFCFG, because of Erratum #842869 + */ + reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; + if ( stage1 ) + reg |= SCTLR_S1_ASIDPNE; #ifdef __BIG_ENDIAN - reg |= SCTLR_E; + reg |= SCTLR_E; #endif - writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR); + writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR); } static int arm_smmu_init_domain_context(struct iommu_domain *domain, - struct arm_smmu_device *smmu) -{ - int irq, start, ret = 0; - unsigned long flags; - struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - - spin_lock_irqsave(&smmu_domain->lock, flags); - if (smmu_domain->smmu) - goto out_unlock; - - /* - * Mapping the requested stage onto what we support is surprisingly - * complicated, mainly because the spec allows S1+S2 SMMUs without - * support for nested translation. That means we end up with the - * following table: - * - * Requested Supported Actual - * S1 N S1 - * S1 S1+S2 S1 - * S1 S2 S2 - * S1 S1 S1 - * N N N - * N S1+S2 S2 - * N S2 S2 - * N S1 S1 - * - * Note that you can't actually request stage-2 mappings. - */ - if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) - smmu_domain->stage = ARM_SMMU_DOMAIN_S2; - if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) - smmu_domain->stage = ARM_SMMU_DOMAIN_S1; - - switch (smmu_domain->stage) { - case ARM_SMMU_DOMAIN_S1: - cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; - start = smmu->num_s2_context_banks; - break; - case ARM_SMMU_DOMAIN_NESTED: - /* - * We will likely want to change this if/when KVM gets - * involved. - */ - case ARM_SMMU_DOMAIN_S2: - cfg->cbar = CBAR_TYPE_S2_TRANS; - start = 0; - break; - default: - ret = -EINVAL; - goto out_unlock; - } - - ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, - smmu->num_context_banks); - if (IS_ERR_VALUE(ret)) - goto out_unlock; - - cfg->cbndx = ret; - if (smmu->version == ARM_SMMU_V1) { - cfg->irptndx = atomic_inc_return(&smmu->irptndx); - cfg->irptndx %= smmu->num_context_irqs; - } else { - cfg->irptndx = cfg->cbndx; - } - - ACCESS_ONCE(smmu_domain->smmu) = smmu; - arm_smmu_init_context_bank(smmu_domain); - spin_unlock_irqrestore(&smmu_domain->lock, flags); - - irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; - ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, - "arm-smmu-context-fault", domain); - if (IS_ERR_VALUE(ret)) { - dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", - cfg->irptndx, irq); - cfg->irptndx = INVALID_IRPTNDX; - } - - return 0; + struct arm_smmu_device *smmu) +{ + int irq, start, ret = 0; + unsigned long flags; + struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + + spin_lock_irqsave(&smmu_domain->lock, flags); + if ( smmu_domain->smmu ) + goto out_unlock; + + /* + * Mapping the requested stage onto what we support is surprisingly + * complicated, mainly because the spec allows S1+S2 SMMUs without + * support for nested translation. That means we end up with the + * following table: + * + * Requested Supported Actual + * S1 N S1 + * S1 S1+S2 S1 + * S1 S2 S2 + * S1 S1 S1 + * N N N + * N S1+S2 S2 + * N S2 S2 + * N S1 S1 + * + * Note that you can't actually request stage-2 mappings. + */ + if ( !(smmu->features & ARM_SMMU_FEAT_TRANS_S1) ) + smmu_domain->stage = ARM_SMMU_DOMAIN_S2; + if ( !(smmu->features & ARM_SMMU_FEAT_TRANS_S2) ) + smmu_domain->stage = ARM_SMMU_DOMAIN_S1; + + switch (smmu_domain->stage) + { + case ARM_SMMU_DOMAIN_S1: + cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; + start = smmu->num_s2_context_banks; + break; + case ARM_SMMU_DOMAIN_NESTED: + /* + * We will likely want to change this if/when KVM gets + * involved. + */ + case ARM_SMMU_DOMAIN_S2: + cfg->cbar = CBAR_TYPE_S2_TRANS; + start = 0; + break; + default: + ret = -EINVAL; + goto out_unlock; + } + + ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, + smmu->num_context_banks); + if ( IS_ERR_VALUE(ret) ) + goto out_unlock; + + cfg->cbndx = ret; + if ( smmu->version == ARM_SMMU_V1 ) + { + cfg->irptndx = atomic_inc_return(&smmu->irptndx); + cfg->irptndx %= smmu->num_context_irqs; + } + else + { + cfg->irptndx = cfg->cbndx; + } + + ACCESS_ONCE(smmu_domain->smmu) = smmu; + arm_smmu_init_context_bank(smmu_domain); + spin_unlock_irqrestore(&smmu_domain->lock, flags); + + irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; + ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, + "arm-smmu-context-fault", domain); + if ( IS_ERR_VALUE(ret) ) + { + dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", + cfg->irptndx, irq); + cfg->irptndx = INVALID_IRPTNDX; + } + + return 0; out_unlock: - spin_unlock_irqrestore(&smmu_domain->lock, flags); - return ret; + spin_unlock_irqrestore(&smmu_domain->lock, flags); + return ret; } static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) { - struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_device *smmu = smmu_domain->smmu; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - void __iomem *cb_base; - int irq; + struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_device *smmu = smmu_domain->smmu; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + void __iomem *cb_base; + int irq; - if (!smmu) - return; + if ( !smmu ) + return; - /* Disable the context bank and nuke the TLB before freeing it. */ - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); - writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); - arm_smmu_tlb_inv_context(smmu_domain); + /* Disable the context bank and nuke the TLB before freeing it. */ + cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); + arm_smmu_tlb_inv_context(smmu_domain); - if (cfg->irptndx != INVALID_IRPTNDX) { - irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; - free_irq(irq, domain); - } + if ( cfg->irptndx != INVALID_IRPTNDX ) + { + irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; + free_irq(irq, domain); + } - __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); + __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); } static int arm_smmu_domain_init(struct iommu_domain *domain) { - struct arm_smmu_domain *smmu_domain; + struct arm_smmu_domain *smmu_domain; - /* - * Allocate the domain and initialise some of its data structures. - * We can't really do anything meaningful until we've added a - * master. - */ - smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); - if (!smmu_domain) - return -ENOMEM; + /* + * Allocate the domain and initialise some of its data structures. + * We can't really do anything meaningful until we've added a + * master. + */ + smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); + if ( !smmu_domain ) + return -ENOMEM; - spin_lock_init(&smmu_domain->lock); - domain->priv = smmu_domain; - return 0; + spin_lock_init(&smmu_domain->lock); + domain->priv = smmu_domain; + return 0; } #if 0 /* Xen: Page tables are shared with the processor */ @@ -1370,208 +1420,217 @@ static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) static void arm_smmu_domain_destroy(struct iommu_domain *domain) { - struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_domain *smmu_domain = domain->priv; - /* - * Free the domain resources. We assume that all devices have - * already been detached. - */ - arm_smmu_destroy_domain_context(domain); - kfree(smmu_domain); + /* + * Free the domain resources. We assume that all devices have + * already been detached. + */ + arm_smmu_destroy_domain_context(domain); + kfree(smmu_domain); } static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, - struct arm_smmu_master_cfg *cfg) -{ - int i; - struct arm_smmu_smr *smrs; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - - if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)) - return 0; - - if (cfg->smrs) - return -EEXIST; - - smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL); - if (!smrs) { - dev_err(smmu->dev, "failed to allocate %d SMRs\n", - cfg->num_streamids); - return -ENOMEM; - } - - /* Allocate the SMRs on the SMMU */ - for (i = 0; i < cfg->num_streamids; ++i) { - int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, - smmu->num_mapping_groups); - if (IS_ERR_VALUE(idx)) { - dev_err(smmu->dev, "failed to allocate free SMR\n"); - goto err_free_smrs; - } - - smrs[i] = (struct arm_smmu_smr) { - .idx = idx, - .mask = 0, /* We don't currently share SMRs */ - .id = cfg->streamids[i], - }; - } - - /* It worked! Now, poke the actual hardware */ - for (i = 0; i < cfg->num_streamids; ++i) { - u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT | - smrs[i].mask << SMR_MASK_SHIFT; - writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx)); - } - - cfg->smrs = smrs; - return 0; + struct arm_smmu_master_cfg *cfg) +{ + int i; + struct arm_smmu_smr *smrs; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + + if ( !(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) ) + return 0; + + if ( cfg->smrs ) + return -EEXIST; + + smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL); + if ( !smrs ) + { + dev_err(smmu->dev, "failed to allocate %d SMRs\n", cfg->num_streamids); + return -ENOMEM; + } + + /* Allocate the SMRs on the SMMU */ + for ( i = 0; i < cfg->num_streamids; ++i ) + { + int idx = + __arm_smmu_alloc_bitmap(smmu->smr_map, 0, smmu->num_mapping_groups); + if ( IS_ERR_VALUE(idx) ) + { + dev_err(smmu->dev, "failed to allocate free SMR\n"); + goto err_free_smrs; + } + + smrs[i] = (struct arm_smmu_smr){ + .idx = idx, + .mask = 0, /* We don't currently share SMRs */ + .id = cfg->streamids[i], + }; + } + + /* It worked! Now, poke the actual hardware */ + for ( i = 0; i < cfg->num_streamids; ++i ) + { + u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT | + smrs[i].mask << SMR_MASK_SHIFT; + writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx)); + } + + cfg->smrs = smrs; + return 0; err_free_smrs: - while (--i >= 0) - __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx); - kfree(smrs); - return -ENOSPC; + while ( --i >= 0 ) + __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx); + kfree(smrs); + return -ENOSPC; } static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, - struct arm_smmu_master_cfg *cfg) + struct arm_smmu_master_cfg *cfg) { - int i; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - struct arm_smmu_smr *smrs = cfg->smrs; + int i; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + struct arm_smmu_smr *smrs = cfg->smrs; - if (!smrs) - return; + if ( !smrs ) + return; - /* Invalidate the SMRs before freeing back to the allocator */ - for (i = 0; i < cfg->num_streamids; ++i) { - u8 idx = smrs[i].idx; + /* Invalidate the SMRs before freeing back to the allocator */ + for ( i = 0; i < cfg->num_streamids; ++i ) + { + u8 idx = smrs[i].idx; - writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); - __arm_smmu_free_bitmap(smmu->smr_map, idx); - } + writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); + __arm_smmu_free_bitmap(smmu->smr_map, idx); + } - cfg->smrs = NULL; - kfree(smrs); + cfg->smrs = NULL; + kfree(smrs); } static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, - struct arm_smmu_master_cfg *cfg) + struct arm_smmu_master_cfg *cfg) { - int i, ret; - struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + int i, ret; + struct arm_smmu_device *smmu = smmu_domain->smmu; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - /* Devices in an IOMMU group may already be configured */ - ret = arm_smmu_master_configure_smrs(smmu, cfg); - if (ret) - return ret == -EEXIST ? 0 : ret; + /* Devices in an IOMMU group may already be configured */ + ret = arm_smmu_master_configure_smrs(smmu, cfg); + if ( ret ) + return ret == -EEXIST ? 0 : ret; - for (i = 0; i < cfg->num_streamids; ++i) { - u32 idx, s2cr; + for ( i = 0; i < cfg->num_streamids; ++i ) + { + u32 idx, s2cr; - idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; - s2cr = S2CR_TYPE_TRANS | - (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT); - writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); - } + idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; + s2cr = S2CR_TYPE_TRANS | (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT); + writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); + } - return 0; + return 0; } static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, - struct arm_smmu_master_cfg *cfg) + struct arm_smmu_master_cfg *cfg) { - int i; - struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + int i; + struct arm_smmu_device *smmu = smmu_domain->smmu; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - /* An IOMMU group is torn down by the first device to be removed */ - if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs) - return; + /* An IOMMU group is torn down by the first device to be removed */ + if ( (smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs ) + return; - /* - * We *must* clear the S2CR first, because freeing the SMR means - * that it can be re-allocated immediately. - * Xen: Unlike Linux, any access to non-configured stream will fault. - */ - for (i = 0; i < cfg->num_streamids; ++i) { - u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; + /* + * We *must* clear the S2CR first, because freeing the SMR means + * that it can be re-allocated immediately. + * Xen: Unlike Linux, any access to non-configured stream will fault. + */ + for ( i = 0; i < cfg->num_streamids; ++i ) + { + u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; - writel_relaxed(S2CR_TYPE_FAULT, - gr0_base + ARM_SMMU_GR0_S2CR(idx)); - } + writel_relaxed(S2CR_TYPE_FAULT, gr0_base + ARM_SMMU_GR0_S2CR(idx)); + } - arm_smmu_master_free_smrs(smmu, cfg); + arm_smmu_master_free_smrs(smmu, cfg); } static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { - int ret; - struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_device *smmu, *dom_smmu; - struct arm_smmu_master_cfg *cfg; - - smmu = find_smmu_for_device(dev); - if (!smmu) { - dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); - return -ENXIO; - } - - if (dev_iommu_domain(dev)) { - dev_err(dev, "already attached to IOMMU domain\n"); - return -EEXIST; - } - - /* - * Sanity check the domain. We don't support domains across - * different SMMUs. - */ - dom_smmu = ACCESS_ONCE(smmu_domain->smmu); - if (!dom_smmu) { - /* Now that we have a master, we can finalise the domain */ - ret = arm_smmu_init_domain_context(domain, smmu); - if (IS_ERR_VALUE(ret)) - return ret; - - dom_smmu = smmu_domain->smmu; - } - - if (dom_smmu != smmu) { - dev_err(dev, - "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", - dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); - return -EINVAL; - } - - /* Looks ok, so add the device to the domain */ - cfg = find_smmu_master_cfg(dev); - if (!cfg) - return -ENODEV; - - ret = arm_smmu_domain_add_master(smmu_domain, cfg); - - if (!ret) - dev_iommu_domain(dev) = domain; - return ret; + int ret; + struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_device *smmu, *dom_smmu; + struct arm_smmu_master_cfg *cfg; + + smmu = find_smmu_for_device(dev); + if ( !smmu ) + { + dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); + return -ENXIO; + } + + if ( dev_iommu_domain(dev) ) + { + dev_err(dev, "already attached to IOMMU domain\n"); + return -EEXIST; + } + + /* + * Sanity check the domain. We don't support domains across + * different SMMUs. + */ + dom_smmu = ACCESS_ONCE(smmu_domain->smmu); + if ( !dom_smmu ) + { + /* Now that we have a master, we can finalise the domain */ + ret = arm_smmu_init_domain_context(domain, smmu); + if ( IS_ERR_VALUE(ret) ) + return ret; + + dom_smmu = smmu_domain->smmu; + } + + if ( dom_smmu != smmu ) + { + dev_err(dev, + "cannot attach to SMMU %s whilst already attached to domain on " + "SMMU %s\n", + dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); + return -EINVAL; + } + + /* Looks ok, so add the device to the domain */ + cfg = find_smmu_master_cfg(dev); + if ( !cfg ) + return -ENODEV; + + ret = arm_smmu_domain_add_master(smmu_domain, cfg); + + if ( !ret ) + dev_iommu_domain(dev) = domain; + return ret; } static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) { - struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_master_cfg *cfg; + struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_master_cfg *cfg; - cfg = find_smmu_master_cfg(dev); - if (!cfg) - return; + cfg = find_smmu_master_cfg(dev); + if ( !cfg ) + return; - dev_iommu_domain(dev) = NULL; - arm_smmu_domain_remove_master(smmu_domain, cfg); + dev_iommu_domain(dev) = NULL; + arm_smmu_domain_remove_master(smmu_domain, cfg); } -#if 0 /* - * Xen: The page table is shared with the processor, therefore - * helpers to implement separate is not necessary. +#if 0 /* \ + * Xen: The page table is shared with the processor, therefore \ + * helpers to implement separate is not necessary. \ */ static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, unsigned long end) @@ -1885,71 +1944,78 @@ static bool arm_smmu_capable(enum iommu_cap cap) static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) { - *((u16 *)data) = alias; - return 0; /* Continue walking */ + *((u16 *)data) = alias; + return 0; /* Continue walking */ } static void __arm_smmu_release_pci_iommudata(void *data) { - kfree(data); + kfree(data); } static int arm_smmu_add_device(struct device *dev) { - struct arm_smmu_device *smmu; - struct arm_smmu_master_cfg *cfg; - struct iommu_group *group; - void (*releasefn)(void *) = NULL; - int ret; - - smmu = find_smmu_for_device(dev); - if (!smmu) - return -ENODEV; - - group = iommu_group_alloc(); - if (IS_ERR(group)) { - dev_err(dev, "Failed to allocate IOMMU group\n"); - return PTR_ERR(group); - } - - if (dev_is_pci(dev)) { - struct pci_dev *pdev = to_pci_dev(dev); - - cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); - if (!cfg) { - ret = -ENOMEM; - goto out_put_group; - } - - cfg->num_streamids = 1; - /* - * Assume Stream ID == Requester ID for now. - * We need a way to describe the ID mappings in FDT. - */ - pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, - &cfg->streamids[0]); - releasefn = __arm_smmu_release_pci_iommudata; - } else { - struct arm_smmu_master *master; - - master = find_smmu_master(smmu, dev->of_node); - if (!master) { - ret = -ENODEV; - goto out_put_group; - } - - cfg = &master->cfg; - } - - iommu_group_set_iommudata(group, cfg, releasefn); - ret = iommu_group_add_device(group, dev); + struct arm_smmu_device *smmu; + struct arm_smmu_master_cfg *cfg; + struct iommu_group *group; + void (*releasefn)(void *) = NULL; + int ret; + + smmu = find_smmu_for_device(dev); + if ( !smmu ) + return -ENODEV; + + group = iommu_group_alloc(); + if ( IS_ERR(group) ) + { + dev_err(dev, "Failed to allocate IOMMU group\n"); + return PTR_ERR(group); + } + + if ( dev_is_pci(dev) ) + { + struct pci_dev *pdev = to_pci_dev(dev); + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if ( !cfg ) + { + ret = -ENOMEM; + goto out_put_group; + } + + cfg->num_streamids = 1; + /* + * Assume Stream ID == Requester ID for now. + * We need a way to describe the ID mappings in FDT. + */ + pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, + &cfg->streamids[0]); + releasefn = __arm_smmu_release_pci_iommudata; + } + else + { + struct arm_smmu_master *master; + + master = find_smmu_master(smmu, dev->of_node); + if ( !master ) + { + ret = -ENODEV; + goto out_put_group; + } + + cfg = &master->cfg; + } + + iommu_group_set_iommudata(group, cfg, releasefn); + ret = iommu_group_add_device(group, dev); out_put_group: - iommu_group_put(group); - return ret; + iommu_group_put(group); + return ret; } -#if 0 /* Xen: We don't support remove device for now. Will be useful for PCI */ +#if 0 /* Xen: We don't support remove device for now. Will be useful for PCI \ + */ static void arm_smmu_remove_device(struct device *dev) { iommu_group_remove_device(dev); @@ -2011,201 +2077,211 @@ static const struct iommu_ops arm_smmu_ops = { static void arm_smmu_device_reset(struct arm_smmu_device *smmu) { - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - void __iomem *cb_base; - int i = 0; - u32 reg; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + void __iomem *cb_base; + int i = 0; + u32 reg; - /* clear global FSR */ - reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); - writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); + /* clear global FSR */ + reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); + writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); - /* Mark all SMRn as invalid and all S2CRn as bypass */ - for (i = 0; i < smmu->num_mapping_groups; ++i) { - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i)); - /* - * Xen: Unlike Linux, any access to a non-configure stream - * will fault by default. - */ - writel_relaxed(S2CR_TYPE_FAULT, - gr0_base + ARM_SMMU_GR0_S2CR(i)); - } + /* Mark all SMRn as invalid and all S2CRn as bypass */ + for ( i = 0; i < smmu->num_mapping_groups; ++i ) + { + writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i)); + /* + * Xen: Unlike Linux, any access to a non-configure stream + * will fault by default. + */ + writel_relaxed(S2CR_TYPE_FAULT, gr0_base + ARM_SMMU_GR0_S2CR(i)); + } - /* Make sure all context banks are disabled and clear CB_FSR */ - for (i = 0; i < smmu->num_context_banks; ++i) { - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i); - writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); - writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); - } + /* Make sure all context banks are disabled and clear CB_FSR */ + for ( i = 0; i < smmu->num_context_banks; ++i ) + { + cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i); + writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); + writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); + } - /* Invalidate the TLB, just in case */ - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); + /* Invalidate the TLB, just in case */ + writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); + writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); + writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); - reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); + reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); - /* Enable fault reporting */ - reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); + /* Enable fault reporting */ + reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); - /* Disable TLB broadcasting. */ - reg |= (sCR0_VMIDPNE | sCR0_PTM); + /* Disable TLB broadcasting. */ + reg |= (sCR0_VMIDPNE | sCR0_PTM); - /* Enable client access, but bypass when no mapping is found */ - reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG); - /* Xen: Unlike Linux, generate a fault when no mapping is found */ - reg |= sCR0_USFCFG; + /* Enable client access, but bypass when no mapping is found */ + reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG); + /* Xen: Unlike Linux, generate a fault when no mapping is found */ + reg |= sCR0_USFCFG; - /* Disable forced broadcasting */ - reg &= ~sCR0_FB; + /* Disable forced broadcasting */ + reg &= ~sCR0_FB; - /* Don't upgrade barriers */ - reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); + /* Don't upgrade barriers */ + reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); - /* Push the button */ - arm_smmu_tlb_sync(smmu); - writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); + /* Push the button */ + arm_smmu_tlb_sync(smmu); + writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); } static int arm_smmu_id_size_to_bits(int size) { - switch (size) { - case 0: - return 32; - case 1: - return 36; - case 2: - return 40; - case 3: - return 42; - case 4: - return 44; - case 5: - default: - return 48; - } + switch (size) + { + case 0: + return 32; + case 1: + return 36; + case 2: + return 40; + case 3: + return 42; + case 4: + return 44; + case 5: + default: + return 48; + } } static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) { - unsigned long size; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - u32 id; + unsigned long size; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + u32 id; - dev_notice(smmu->dev, "probing hardware configuration...\n"); - dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version); + dev_notice(smmu->dev, "probing hardware configuration...\n"); + dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version); - /* ID0 */ - id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); + /* ID0 */ + id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); #ifndef CONFIG_64BIT - if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) { - dev_err(smmu->dev, "\tno v7 descriptor support!\n"); - return -ENODEV; - } + if ( ((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY ) + { + dev_err(smmu->dev, "\tno v7 descriptor support!\n"); + return -ENODEV; + } #endif - /* Restrict available stages based on module parameter */ - if (force_stage == 1) - id &= ~(ID0_S2TS | ID0_NTS); - else if (force_stage == 2) - id &= ~(ID0_S1TS | ID0_NTS); - - if (id & ID0_S1TS) { - smmu->features |= ARM_SMMU_FEAT_TRANS_S1; - dev_notice(smmu->dev, "\tstage 1 translation\n"); - } - - if (id & ID0_S2TS) { - smmu->features |= ARM_SMMU_FEAT_TRANS_S2; - dev_notice(smmu->dev, "\tstage 2 translation\n"); - } - - if (id & ID0_NTS) { - smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; - dev_notice(smmu->dev, "\tnested translation\n"); - } - - if (!(smmu->features & - (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) { - dev_err(smmu->dev, "\tno translation support!\n"); - return -ENODEV; - } - - if (id & ID0_CTTW) { - smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; - dev_notice(smmu->dev, "\tcoherent table walk\n"); - } - - if (id & ID0_SMS) { - u32 smr, sid, mask; - - smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; - smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) & - ID0_NUMSMRG_MASK; - if (smmu->num_mapping_groups == 0) { - dev_err(smmu->dev, - "stream-matching supported, but no SMRs present!\n"); - return -ENODEV; - } - - smr = SMR_MASK_MASK << SMR_MASK_SHIFT; - smr |= (SMR_ID_MASK << SMR_ID_SHIFT); - writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); - smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); - - mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK; - sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK; - if ((mask & sid) != sid) { - dev_err(smmu->dev, - "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n", - mask, sid); - return -ENODEV; - } - - dev_notice(smmu->dev, - "\tstream matching with %u register groups, mask 0x%x\n", - smmu->num_mapping_groups, mask); - } else { - smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) & - ID0_NUMSIDB_MASK; - } - - /* ID1 */ - id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); - smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; - - /* Check for size mismatch of SMMU address space from mapped region */ - size = 1 << - (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); - size *= 2 << smmu->pgshift; - if (smmu->size != size) - dev_warn(smmu->dev, - "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", - size, smmu->size); - - smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & - ID1_NUMS2CB_MASK; - smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; - if (smmu->num_s2_context_banks > smmu->num_context_banks) { - dev_err(smmu->dev, "impossible number of S2 context banks!\n"); - return -ENODEV; - } - dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", - smmu->num_context_banks, smmu->num_s2_context_banks); - - /* ID2 */ - id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); - size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); - smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); - - /* Xen: Stage-2 input size has to match p2m_ipa_bits. */ - if (size < p2m_ipa_bits) { - dev_err(smmu->dev, - "P2M IPA size not supported (P2M=%u SMMU=%lu)!\n", - p2m_ipa_bits, size); - return -ENODEV; - } - smmu->s2_input_size = p2m_ipa_bits; + /* Restrict available stages based on module parameter */ + if ( force_stage == 1 ) + id &= ~(ID0_S2TS | ID0_NTS); + else if ( force_stage == 2 ) + id &= ~(ID0_S1TS | ID0_NTS); + + if ( id & ID0_S1TS ) + { + smmu->features |= ARM_SMMU_FEAT_TRANS_S1; + dev_notice(smmu->dev, "\tstage 1 translation\n"); + } + + if ( id & ID0_S2TS ) + { + smmu->features |= ARM_SMMU_FEAT_TRANS_S2; + dev_notice(smmu->dev, "\tstage 2 translation\n"); + } + + if ( id & ID0_NTS ) + { + smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; + dev_notice(smmu->dev, "\tnested translation\n"); + } + + if ( !(smmu->features & (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2)) ) + { + dev_err(smmu->dev, "\tno translation support!\n"); + return -ENODEV; + } + + if ( id & ID0_CTTW ) + { + smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; + dev_notice(smmu->dev, "\tcoherent table walk\n"); + } + + if ( id & ID0_SMS ) + { + u32 smr, sid, mask; + + smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; + smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK; + if ( smmu->num_mapping_groups == 0 ) + { + dev_err(smmu->dev, + "stream-matching supported, but no SMRs present!\n"); + return -ENODEV; + } + + smr = SMR_MASK_MASK << SMR_MASK_SHIFT; + smr |= (SMR_ID_MASK << SMR_ID_SHIFT); + writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); + smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); + + mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK; + sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK; + if ( (mask & sid) != sid ) + { + dev_err(smmu->dev, + "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n", + mask, sid); + return -ENODEV; + } + + dev_notice(smmu->dev, + "\tstream matching with %u register groups, mask 0x%x\n", + smmu->num_mapping_groups, mask); + } + else + { + smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK; + } + + /* ID1 */ + id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); + smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; + + /* Check for size mismatch of SMMU address space from mapped region */ + size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); + size *= 2 << smmu->pgshift; + if ( smmu->size != size ) + dev_warn(smmu->dev, + "SMMU address space size (0x%lx) differs from mapped region " + "size (0x%lx)!\n", + size, smmu->size); + + smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; + smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; + if ( smmu->num_s2_context_banks > smmu->num_context_banks ) + { + dev_err(smmu->dev, "impossible number of S2 context banks!\n"); + return -ENODEV; + } + dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", + smmu->num_context_banks, smmu->num_s2_context_banks); + + /* ID2 */ + id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); + size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); + smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); + + /* Xen: Stage-2 input size has to match p2m_ipa_bits. */ + if ( size < p2m_ipa_bits ) + { + dev_err(smmu->dev, "P2M IPA size not supported (P2M=%u SMMU=%lu)!\n", + p2m_ipa_bits, size); + return -ENODEV; + } + smmu->s2_input_size = p2m_ipa_bits; #if 0 /* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */ #ifdef CONFIG_64BIT @@ -2215,48 +2291,51 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) #endif #endif - /* The stage-2 output mask is also applied for bypass */ - size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); - smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); + /* The stage-2 output mask is also applied for bypass */ + size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); + smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); - if (smmu->version == ARM_SMMU_V1) { - smmu->s1_input_size = 32; - } else { + if ( smmu->version == ARM_SMMU_V1 ) + { + smmu->s1_input_size = 32; + } + else + { #ifdef CONFIG_64BIT - size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; - size = min(VA_BITS, arm_smmu_id_size_to_bits(size)); + size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; + size = min(VA_BITS, arm_smmu_id_size_to_bits(size)); #else - size = 32; + size = 32; #endif - smmu->s1_input_size = size; - - if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) || - (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) || - (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) { - dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n", - PAGE_SIZE); - return -ENODEV; - } - } + smmu->s1_input_size = size; - if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) - dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", - smmu->s1_input_size, smmu->s1_output_size); + if ( (PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) || + (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) || + (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K) ) + { + dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n", PAGE_SIZE); + return -ENODEV; + } + } - if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) - dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", - smmu->s2_input_size, smmu->s2_output_size); + if ( smmu->features & ARM_SMMU_FEAT_TRANS_S1 ) + dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", + smmu->s1_input_size, smmu->s1_output_size); - return 0; + if ( smmu->features & ARM_SMMU_FEAT_TRANS_S2 ) + dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", + smmu->s2_input_size, smmu->s2_output_size); + + return 0; } static const struct of_device_id arm_smmu_of_match[] = { - { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 }, - { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 }, - { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 }, - { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 }, - { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 }, - { }, + {.compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1}, + {.compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2}, + {.compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1}, + {.compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1}, + {.compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2}, + {}, }; MODULE_DEVICE_TABLE(of, arm_smmu_of_match); @@ -2266,142 +2345,149 @@ MODULE_DEVICE_TABLE(of, arm_smmu_of_match); */ static int arm_smmu_device_dt_probe(struct platform_device *pdev) { - const struct of_device_id *of_id; - struct resource *res; - struct arm_smmu_device *smmu; - struct device *dev = &pdev->dev; - struct rb_node *node; - struct of_phandle_args masterspec; - int num_irqs, i, err; - - smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); - if (!smmu) { - dev_err(dev, "failed to allocate arm_smmu_device\n"); - return -ENOMEM; - } - smmu->dev = dev; - - of_id = of_match_node(arm_smmu_of_match, dev->of_node); - smmu->version = (enum arm_smmu_arch_version)of_id->data; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - smmu->base = devm_ioremap_resource(dev, res); - if (IS_ERR(smmu->base)) { - err = PTR_ERR(smmu->base); - goto out_free; - } - smmu->size = resource_size(res); - - if (of_property_read_u32(dev->of_node, "#global-interrupts", - &smmu->num_global_irqs)) { - dev_err(dev, "missing #global-interrupts property\n"); - err = -ENODEV; - goto out_free; - } - - num_irqs = 0; - while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { - num_irqs++; - if (num_irqs > smmu->num_global_irqs) - smmu->num_context_irqs++; - } - - if (!smmu->num_context_irqs) { - dev_err(dev, "found %d interrupts but expected at least %d\n", - num_irqs, smmu->num_global_irqs + 1); - err = -ENODEV; - goto out_free; - } - - smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs, - GFP_KERNEL); - if (!smmu->irqs) { - dev_err(dev, "failed to allocate %d irqs\n", num_irqs); - err = -ENOMEM; - goto out_free; - } - - for (i = 0; i < num_irqs; ++i) { - int irq = platform_get_irq(pdev, i); - - if (irq < 0) { - dev_err(dev, "failed to get irq index %d\n", i); - err = -ENODEV; - goto out_free; - } - smmu->irqs[i] = irq; - } - - err = arm_smmu_device_cfg_probe(smmu); - if (err) - return err; - - i = 0; - smmu->masters = RB_ROOT; - while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters", - "#stream-id-cells", i, - &masterspec)) { - err = register_smmu_master(smmu, dev, &masterspec); - if (err) { - dev_err(dev, "failed to add master %s\n", - masterspec.np->name); - goto out_put_masters; - } - - i++; - } - dev_notice(dev, "registered %d master devices\n", i); - - parse_driver_options(smmu); - - if (smmu->version > ARM_SMMU_V1 && - smmu->num_context_banks != smmu->num_context_irqs) { - dev_err(dev, - "found only %d context interrupt(s) but %d required\n", - smmu->num_context_irqs, smmu->num_context_banks); - err = -ENODEV; - goto out_put_masters; - } - - for (i = 0; i < smmu->num_global_irqs; ++i) { - err = request_irq(smmu->irqs[i], - arm_smmu_global_fault, - IRQF_SHARED, - "arm-smmu global fault", - smmu); - if (err) { - dev_err(dev, "failed to request global IRQ %d (%u)\n", - i, smmu->irqs[i]); - goto out_free_irqs; - } - } - - INIT_LIST_HEAD(&smmu->list); - spin_lock(&arm_smmu_devices_lock); - list_add(&smmu->list, &arm_smmu_devices); - spin_unlock(&arm_smmu_devices_lock); - - arm_smmu_device_reset(smmu); - return 0; + const struct of_device_id *of_id; + struct resource *res; + struct arm_smmu_device *smmu; + struct device *dev = &pdev->dev; + struct rb_node *node; + struct of_phandle_args masterspec; + int num_irqs, i, err; + + smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); + if ( !smmu ) + { + dev_err(dev, "failed to allocate arm_smmu_device\n"); + return -ENOMEM; + } + smmu->dev = dev; + + of_id = of_match_node(arm_smmu_of_match, dev->of_node); + smmu->version = (enum arm_smmu_arch_version)of_id->data; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + smmu->base = devm_ioremap_resource(dev, res); + if ( IS_ERR(smmu->base) ) + { + err = PTR_ERR(smmu->base); + goto out_free; + } + smmu->size = resource_size(res); + + if ( of_property_read_u32(dev->of_node, "#global-interrupts", + &smmu->num_global_irqs) ) + { + dev_err(dev, "missing #global-interrupts property\n"); + err = -ENODEV; + goto out_free; + } + + num_irqs = 0; + while ( (res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs)) ) + { + num_irqs++; + if ( num_irqs > smmu->num_global_irqs ) + smmu->num_context_irqs++; + } + + if ( !smmu->num_context_irqs ) + { + dev_err(dev, "found %d interrupts but expected at least %d\n", num_irqs, + smmu->num_global_irqs + 1); + err = -ENODEV; + goto out_free; + } + + smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs, GFP_KERNEL); + if ( !smmu->irqs ) + { + dev_err(dev, "failed to allocate %d irqs\n", num_irqs); + err = -ENOMEM; + goto out_free; + } + + for ( i = 0; i < num_irqs; ++i ) + { + int irq = platform_get_irq(pdev, i); + + if ( irq < 0 ) + { + dev_err(dev, "failed to get irq index %d\n", i); + err = -ENODEV; + goto out_free; + } + smmu->irqs[i] = irq; + } + + err = arm_smmu_device_cfg_probe(smmu); + if ( err ) + return err; + + i = 0; + smmu->masters = RB_ROOT; + while ( !of_parse_phandle_with_args(dev->of_node, "mmu-masters", + "#stream-id-cells", i, &masterspec) ) + { + err = register_smmu_master(smmu, dev, &masterspec); + if ( err ) + { + dev_err(dev, "failed to add master %s\n", masterspec.np->name); + goto out_put_masters; + } + + i++; + } + dev_notice(dev, "registered %d master devices\n", i); + + parse_driver_options(smmu); + + if ( smmu->version > ARM_SMMU_V1 && + smmu->num_context_banks != smmu->num_context_irqs ) + { + dev_err(dev, "found only %d context interrupt(s) but %d required\n", + smmu->num_context_irqs, smmu->num_context_banks); + err = -ENODEV; + goto out_put_masters; + } + + for ( i = 0; i < smmu->num_global_irqs; ++i ) + { + err = request_irq(smmu->irqs[i], arm_smmu_global_fault, IRQF_SHARED, + "arm-smmu global fault", smmu); + if ( err ) + { + dev_err(dev, "failed to request global IRQ %d (%u)\n", i, + smmu->irqs[i]); + goto out_free_irqs; + } + } + + INIT_LIST_HEAD(&smmu->list); + spin_lock(&arm_smmu_devices_lock); + list_add(&smmu->list, &arm_smmu_devices); + spin_unlock(&arm_smmu_devices_lock); + + arm_smmu_device_reset(smmu); + return 0; out_free_irqs: - while (i--) - free_irq(smmu->irqs[i], smmu); + while ( i-- ) + free_irq(smmu->irqs[i], smmu); out_put_masters: - for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { - struct arm_smmu_master *master - = container_of(node, struct arm_smmu_master, node); - kfree(master); - } + for ( node = rb_first(&smmu->masters); node; node = rb_next(node) ) + { + struct arm_smmu_master *master = + container_of(node, struct arm_smmu_master, node); + kfree(master); + } out_free: - kfree(smmu->irqs); - if (!IS_ERR(smmu->base)) - iounmap(smmu->base); - kfree(smmu); + kfree(smmu->irqs); + if ( !IS_ERR(smmu->base) ) + iounmap(smmu->base); + kfree(smmu); - return err; + return err; } #if 0 /* Xen: We never remove SMMU */ @@ -2515,267 +2601,275 @@ static u32 platform_features = ARM_SMMU_FEAT_COHERENT_WALK; static int __must_check arm_smmu_iotlb_flush_all(struct domain *d) { - struct arm_smmu_xen_domain *smmu_domain = dom_iommu(d)->arch.priv; - struct iommu_domain *cfg; + struct arm_smmu_xen_domain *smmu_domain = dom_iommu(d)->arch.priv; + struct iommu_domain *cfg; - spin_lock(&smmu_domain->lock); - list_for_each_entry(cfg, &smmu_domain->contexts, list) { - /* - * Only invalidate the context when SMMU is present. - * This is because the context initialization is delayed - * until a master has been added. - */ - if (unlikely(!ACCESS_ONCE(cfg->priv->smmu))) - continue; - arm_smmu_tlb_inv_context(cfg->priv); - } - spin_unlock(&smmu_domain->lock); + spin_lock(&smmu_domain->lock); + list_for_each_entry (cfg, &smmu_domain->contexts, list) + { + /* + * Only invalidate the context when SMMU is present. + * This is because the context initialization is delayed + * until a master has been added. + */ + if ( unlikely(!ACCESS_ONCE(cfg->priv->smmu)) ) + continue; + arm_smmu_tlb_inv_context(cfg->priv); + } + spin_unlock(&smmu_domain->lock); - return 0; + return 0; } static int __must_check arm_smmu_iotlb_flush(struct domain *d, dfn_t dfn, - unsigned int page_count, - unsigned int flush_flags) + unsigned int page_count, + unsigned int flush_flags) { - ASSERT(flush_flags); + ASSERT(flush_flags); - /* ARM SMMU v1 doesn't have flush by VMA and VMID */ - return arm_smmu_iotlb_flush_all(d); + /* ARM SMMU v1 doesn't have flush by VMA and VMID */ + return arm_smmu_iotlb_flush_all(d); } static struct iommu_domain *arm_smmu_get_domain(struct domain *d, - struct device *dev) + struct device *dev) { - struct iommu_domain *domain; - struct arm_smmu_xen_domain *xen_domain; - struct arm_smmu_device *smmu; + struct iommu_domain *domain; + struct arm_smmu_xen_domain *xen_domain; + struct arm_smmu_device *smmu; - xen_domain = dom_iommu(d)->arch.priv; + xen_domain = dom_iommu(d)->arch.priv; - smmu = find_smmu_for_device(dev); - if (!smmu) - return NULL; + smmu = find_smmu_for_device(dev); + if ( !smmu ) + return NULL; - /* - * Loop through the &xen_domain->contexts to locate a context - * assigned to this SMMU - */ - list_for_each_entry(domain, &xen_domain->contexts, list) { - if (domain->priv->smmu == smmu) - return domain; - } - - return NULL; + /* + * Loop through the &xen_domain->contexts to locate a context + * assigned to this SMMU + */ + list_for_each_entry (domain, &xen_domain->contexts, list) + { + if ( domain->priv->smmu == smmu ) + return domain; + } + return NULL; } static void arm_smmu_destroy_iommu_domain(struct iommu_domain *domain) { - list_del(&domain->list); - arm_smmu_domain_destroy(domain); - xfree(domain); -} - -static int arm_smmu_assign_dev(struct domain *d, u8 devfn, - struct device *dev, u32 flag) -{ - struct iommu_domain *domain; - struct arm_smmu_xen_domain *xen_domain; - int ret = 0; - - xen_domain = dom_iommu(d)->arch.priv; - - if (!dev->archdata.iommu) { - dev->archdata.iommu = xzalloc(struct arm_smmu_xen_device); - if (!dev->archdata.iommu) - return -ENOMEM; - } - - if (!dev_iommu_group(dev)) { - ret = arm_smmu_add_device(dev); - if (ret) - return ret; - } - - spin_lock(&xen_domain->lock); - - /* - * Check to see if a context bank (iommu_domain) already exists for - * this xen domain under the same SMMU - */ - domain = arm_smmu_get_domain(d, dev); - if (!domain) { - - domain = xzalloc(struct iommu_domain); - if (!domain) { - ret = -ENOMEM; - goto out; - } - - ret = arm_smmu_domain_init(domain); - if (ret) { - xfree(domain); - goto out; - } - - domain->priv->cfg.domain = d; - - /* Chain the new context to the domain */ - list_add(&domain->list, &xen_domain->contexts); - - } - - ret = arm_smmu_attach_dev(domain, dev); - if (ret) { - if (domain->ref.counter == 0) - arm_smmu_destroy_iommu_domain(domain); - } else { - atomic_inc(&domain->ref); - } + list_del(&domain->list); + arm_smmu_domain_destroy(domain); + xfree(domain); +} + +static int arm_smmu_assign_dev(struct domain *d, u8 devfn, struct device *dev, + u32 flag) +{ + struct iommu_domain *domain; + struct arm_smmu_xen_domain *xen_domain; + int ret = 0; + + xen_domain = dom_iommu(d)->arch.priv; + + if ( !dev->archdata.iommu ) + { + dev->archdata.iommu = xzalloc(struct arm_smmu_xen_device); + if ( !dev->archdata.iommu ) + return -ENOMEM; + } + + if ( !dev_iommu_group(dev) ) + { + ret = arm_smmu_add_device(dev); + if ( ret ) + return ret; + } + + spin_lock(&xen_domain->lock); + + /* + * Check to see if a context bank (iommu_domain) already exists for + * this xen domain under the same SMMU + */ + domain = arm_smmu_get_domain(d, dev); + if ( !domain ) + { + domain = xzalloc(struct iommu_domain); + if ( !domain ) + { + ret = -ENOMEM; + goto out; + } + + ret = arm_smmu_domain_init(domain); + if ( ret ) + { + xfree(domain); + goto out; + } + + domain->priv->cfg.domain = d; + + /* Chain the new context to the domain */ + list_add(&domain->list, &xen_domain->contexts); + } + + ret = arm_smmu_attach_dev(domain, dev); + if ( ret ) + { + if ( domain->ref.counter == 0 ) + arm_smmu_destroy_iommu_domain(domain); + } + else + { + atomic_inc(&domain->ref); + } out: - spin_unlock(&xen_domain->lock); + spin_unlock(&xen_domain->lock); - return ret; + return ret; } static int arm_smmu_deassign_dev(struct domain *d, struct device *dev) { - struct iommu_domain *domain = dev_iommu_domain(dev); - struct arm_smmu_xen_domain *xen_domain; + struct iommu_domain *domain = dev_iommu_domain(dev); + struct arm_smmu_xen_domain *xen_domain; - xen_domain = dom_iommu(d)->arch.priv; + xen_domain = dom_iommu(d)->arch.priv; - if (!domain || domain->priv->cfg.domain != d) { - dev_err(dev, " not attached to domain %d\n", d->domain_id); - return -ESRCH; - } + if ( !domain || domain->priv->cfg.domain != d ) + { + dev_err(dev, " not attached to domain %d\n", d->domain_id); + return -ESRCH; + } - spin_lock(&xen_domain->lock); + spin_lock(&xen_domain->lock); - arm_smmu_detach_dev(domain, dev); - atomic_dec(&domain->ref); + arm_smmu_detach_dev(domain, dev); + atomic_dec(&domain->ref); - if (domain->ref.counter == 0) - arm_smmu_destroy_iommu_domain(domain); + if ( domain->ref.counter == 0 ) + arm_smmu_destroy_iommu_domain(domain); - spin_unlock(&xen_domain->lock); + spin_unlock(&xen_domain->lock); - return 0; + return 0; } -static int arm_smmu_reassign_dev(struct domain *s, struct domain *t, - u8 devfn, struct device *dev) +static int arm_smmu_reassign_dev(struct domain *s, struct domain *t, u8 devfn, + struct device *dev) { - int ret = 0; + int ret = 0; - /* Don't allow remapping on other domain than hwdom */ - if (t && t != hardware_domain) - return -EPERM; + /* Don't allow remapping on other domain than hwdom */ + if ( t && t != hardware_domain ) + return -EPERM; - if (t == s) - return 0; + if ( t == s ) + return 0; - ret = arm_smmu_deassign_dev(s, dev); - if (ret) - return ret; + ret = arm_smmu_deassign_dev(s, dev); + if ( ret ) + return ret; - if (t) { - /* No flags are defined for ARM. */ - ret = arm_smmu_assign_dev(t, devfn, dev, 0); - if (ret) - return ret; - } + if ( t ) + { + /* No flags are defined for ARM. */ + ret = arm_smmu_assign_dev(t, devfn, dev, 0); + if ( ret ) + return ret; + } - return 0; + return 0; } static int arm_smmu_iommu_domain_init(struct domain *d, bool use_iommu) { - struct arm_smmu_xen_domain *xen_domain; + struct arm_smmu_xen_domain *xen_domain; - xen_domain = xzalloc(struct arm_smmu_xen_domain); - if ( !xen_domain ) - return -ENOMEM; + xen_domain = xzalloc(struct arm_smmu_xen_domain); + if ( !xen_domain ) + return -ENOMEM; - spin_lock_init(&xen_domain->lock); - INIT_LIST_HEAD(&xen_domain->contexts); + spin_lock_init(&xen_domain->lock); + INIT_LIST_HEAD(&xen_domain->contexts); - dom_iommu(d)->arch.priv = xen_domain; + dom_iommu(d)->arch.priv = xen_domain; - /* Coherent walk can be enabled only when all SMMUs support it. */ - if (platform_features & ARM_SMMU_FEAT_COHERENT_WALK) - iommu_set_feature(d, IOMMU_FEAT_COHERENT_WALK); + /* Coherent walk can be enabled only when all SMMUs support it. */ + if ( platform_features & ARM_SMMU_FEAT_COHERENT_WALK ) + iommu_set_feature(d, IOMMU_FEAT_COHERENT_WALK); - return 0; + return 0; } static void __hwdom_init arm_smmu_iommu_hwdom_init(struct domain *d) { - /* Set to false options not supported on ARM. */ - if ( iommu_hwdom_inclusive ) - printk(XENLOG_WARNING - "map-inclusive dom0-iommu option is not supported on ARM\n"); - iommu_hwdom_inclusive = false; - if ( iommu_hwdom_reserved == 1 ) - printk(XENLOG_WARNING - "map-reserved dom0-iommu option is not supported on ARM\n"); - iommu_hwdom_reserved = 0; + /* Set to false options not supported on ARM. */ + if ( iommu_hwdom_inclusive ) + printk(XENLOG_WARNING + "map-inclusive dom0-iommu option is not supported on ARM\n"); + iommu_hwdom_inclusive = false; + if ( iommu_hwdom_reserved == 1 ) + printk(XENLOG_WARNING + "map-reserved dom0-iommu option is not supported on ARM\n"); + iommu_hwdom_reserved = 0; - arch_iommu_hwdom_init(d); + arch_iommu_hwdom_init(d); } static void arm_smmu_iommu_domain_teardown(struct domain *d) { - struct arm_smmu_xen_domain *xen_domain = dom_iommu(d)->arch.priv; + struct arm_smmu_xen_domain *xen_domain = dom_iommu(d)->arch.priv; - ASSERT(list_empty(&xen_domain->contexts)); - xfree(xen_domain); + ASSERT(list_empty(&xen_domain->contexts)); + xfree(xen_domain); } static int __must_check arm_smmu_map_page(struct domain *d, dfn_t dfn, - mfn_t mfn, unsigned int flags, - unsigned int *flush_flags) + mfn_t mfn, unsigned int flags, + unsigned int *flush_flags) { - p2m_type_t t; + p2m_type_t t; - /* - * Grant mappings can be used for DMA requests. The dev_bus_addr - * returned by the hypercall is the MFN (not the IPA). For device - * protected by an IOMMU, Xen needs to add a 1:1 mapping in the domain - * p2m to allow DMA request to work. - * This is only valid when the domain is directed mapped. Hence this - * function should only be used by gnttab code with gfn == mfn == dfn. - */ - BUG_ON(!is_domain_direct_mapped(d)); - BUG_ON(mfn_x(mfn) != dfn_x(dfn)); + /* + * Grant mappings can be used for DMA requests. The dev_bus_addr + * returned by the hypercall is the MFN (not the IPA). For device + * protected by an IOMMU, Xen needs to add a 1:1 mapping in the domain + * p2m to allow DMA request to work. + * This is only valid when the domain is directed mapped. Hence this + * function should only be used by gnttab code with gfn == mfn == dfn. + */ + BUG_ON(!is_domain_direct_mapped(d)); + BUG_ON(mfn_x(mfn) != dfn_x(dfn)); - /* We only support readable and writable flags */ - if (!(flags & (IOMMUF_readable | IOMMUF_writable))) - return -EINVAL; + /* We only support readable and writable flags */ + if ( !(flags & (IOMMUF_readable | IOMMUF_writable)) ) + return -EINVAL; - t = (flags & IOMMUF_writable) ? p2m_iommu_map_rw : p2m_iommu_map_ro; + t = (flags & IOMMUF_writable) ? p2m_iommu_map_rw : p2m_iommu_map_ro; - /* - * The function guest_physmap_add_entry replaces the current mapping - * if there is already one... - */ - return guest_physmap_add_entry(d, _gfn(dfn_x(dfn)), _mfn(dfn_x(dfn)), - 0, t); + /* + * The function guest_physmap_add_entry replaces the current mapping + * if there is already one... + */ + return guest_physmap_add_entry(d, _gfn(dfn_x(dfn)), _mfn(dfn_x(dfn)), 0, t); } static int __must_check arm_smmu_unmap_page(struct domain *d, dfn_t dfn, unsigned int *flush_flags) { - /* - * This function should only be used by gnttab code when the domain - * is direct mapped (i.e. gfn == mfn == dfn). - */ - if ( !is_domain_direct_mapped(d) ) - return -EINVAL; + /* + * This function should only be used by gnttab code when the domain + * is direct mapped (i.e. gfn == mfn == dfn). + */ + if ( !is_domain_direct_mapped(d) ) + return -EINVAL; - return guest_physmap_remove_page(d, _gfn(dfn_x(dfn)), _mfn(dfn_x(dfn)), 0); + return guest_physmap_remove_page(d, _gfn(dfn_x(dfn)), _mfn(dfn_x(dfn)), 0); } static const struct iommu_ops arm_smmu_iommu_ops = { @@ -2792,55 +2886,56 @@ static const struct iommu_ops arm_smmu_iommu_ops = { static __init const struct arm_smmu_device *find_smmu(const struct device *dev) { - struct arm_smmu_device *smmu; - bool found = false; + struct arm_smmu_device *smmu; + bool found = false; - spin_lock(&arm_smmu_devices_lock); - list_for_each_entry(smmu, &arm_smmu_devices, list) { - if (smmu->dev == dev) { - found = true; - break; - } - } - spin_unlock(&arm_smmu_devices_lock); + spin_lock(&arm_smmu_devices_lock); + list_for_each_entry (smmu, &arm_smmu_devices, list) + { + if ( smmu->dev == dev ) + { + found = true; + break; + } + } + spin_unlock(&arm_smmu_devices_lock); - return (found) ? smmu : NULL; + return (found) ? smmu : NULL; } -static __init int arm_smmu_dt_init(struct dt_device_node *dev, - const void *data) +static __init int arm_smmu_dt_init(struct dt_device_node *dev, const void *data) { - int rc; - const struct arm_smmu_device *smmu; + int rc; + const struct arm_smmu_device *smmu; - /* - * Even if the device can't be initialized, we don't want to - * give the SMMU device to dom0. - */ - dt_device_set_used_by(dev, DOMID_XEN); + /* + * Even if the device can't be initialized, we don't want to + * give the SMMU device to dom0. + */ + dt_device_set_used_by(dev, DOMID_XEN); - if (!iommu_hap_pt_share) { - dev_err(dt_to_dev(dev), - "P2M table must always be shared between the CPU and the SMMU\n"); - return -EINVAL; - } + if ( !iommu_hap_pt_share ) + { + dev_err( + dt_to_dev(dev), + "P2M table must always be shared between the CPU and the SMMU\n"); + return -EINVAL; + } - rc = arm_smmu_device_dt_probe(dev); - if (rc) - return rc; + rc = arm_smmu_device_dt_probe(dev); + if ( rc ) + return rc; - iommu_set_ops(&arm_smmu_iommu_ops); + iommu_set_ops(&arm_smmu_iommu_ops); - /* Find the last SMMU added and retrieve its features. */ - smmu = find_smmu(dt_to_dev(dev)); - BUG_ON(smmu == NULL); + /* Find the last SMMU added and retrieve its features. */ + smmu = find_smmu(dt_to_dev(dev)); + BUG_ON(smmu == NULL); - platform_features &= smmu->features; + platform_features &= smmu->features; - return 0; + return 0; } -DT_DEVICE_START(smmu, "ARM SMMU", DEVICE_IOMMU) - .dt_match = arm_smmu_of_match, - .init = arm_smmu_dt_init, -DT_DEVICE_END +DT_DEVICE_START(smmu, "ARM SMMU", DEVICE_IOMMU).dt_match = arm_smmu_of_match, + .init = arm_smmu_dt_init, DT_DEVICE_END diff --git a/xen/drivers/passthrough/device_tree.c b/xen/drivers/passthrough/device_tree.c index b6eaae7283..239d0b5e82 100644 --- a/xen/drivers/passthrough/device_tree.c +++ b/xen/drivers/passthrough/device_tree.c @@ -44,8 +44,7 @@ int iommu_assign_dt_device(struct domain *d, struct dt_device_node *dev) * The hwdom is forced to use IOMMU for protecting assigned * device. Therefore the IOMMU data is already set up. */ - ASSERT(!is_hardware_domain(d) || - hd->status == IOMMU_STATUS_initialized); + ASSERT(!is_hardware_domain(d) || hd->status == IOMMU_STATUS_initialized); rc = iommu_construct(d); if ( rc ) @@ -139,7 +138,7 @@ int iommu_do_dt_domctl(struct xen_domctl *domctl, struct domain *d, int ret; struct dt_device_node *dev; - switch ( domctl->cmd ) + switch (domctl->cmd) { case XEN_DOMCTL_assign_device: ASSERT(d); @@ -154,8 +153,7 @@ int iommu_do_dt_domctl(struct xen_domctl *domctl, struct domain *d, break; ret = dt_find_node_by_gpath(domctl->u.assign_device.u.dt.path, - domctl->u.assign_device.u.dt.size, - &dev); + domctl->u.assign_device.u.dt.size, &dev); if ( ret ) break; @@ -178,7 +176,7 @@ int iommu_do_dt_domctl(struct xen_domctl *domctl, struct domain *d, if ( ret ) printk(XENLOG_G_ERR "XEN_DOMCTL_assign_dt_device: assign \"%s\"" - " to dom%u failed (%d)\n", + " to dom%u failed (%d)\n", dt_node_full_name(dev), d->domain_id, ret); break; @@ -192,8 +190,7 @@ int iommu_do_dt_domctl(struct xen_domctl *domctl, struct domain *d, break; ret = dt_find_node_by_gpath(domctl->u.assign_device.u.dt.path, - domctl->u.assign_device.u.dt.size, - &dev); + domctl->u.assign_device.u.dt.size, &dev); if ( ret ) break; @@ -203,7 +200,7 @@ int iommu_do_dt_domctl(struct xen_domctl *domctl, struct domain *d, if ( ret ) printk(XENLOG_G_ERR "XEN_DOMCTL_assign_dt_device: assign \"%s\"" - " to dom%u failed (%d)\n", + " to dom%u failed (%d)\n", dt_node_full_name(dev), d->domain_id, ret); break; diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c index 4290c7c710..b7645e66b7 100644 --- a/xen/drivers/passthrough/io.c +++ b/xen/drivers/passthrough/io.c @@ -46,7 +46,8 @@ static DEFINE_PER_CPU(struct list_head, dpci_list); * ref-counting. */ -enum { +enum +{ STATE_SCHED, STATE_RUN }; @@ -107,7 +108,7 @@ static void pt_pirq_softirq_reset(struct hvm_pirq_dpci *pirq_dpci) ASSERT(spin_is_locked(&d->event_lock)); - switch ( cmpxchg(&pirq_dpci->state, 1 << STATE_SCHED, 0) ) + switch (cmpxchg(&pirq_dpci->state, 1 << STATE_SCHED, 0)) { case (1 << STATE_SCHED): /* @@ -187,12 +188,12 @@ static void pt_irq_time_out(void *data) spin_unlock(&irq_map->dom->event_lock); return; } - list_for_each_entry ( digl, &irq_map->digl_list, list ) + list_for_each_entry (digl, &irq_map->digl_list, list) { unsigned int guest_gsi = hvm_pci_intx_gsi(digl->device, digl->intx); const struct hvm_girq_dpci_mapping *girq; - list_for_each_entry ( girq, &dpci->girq[guest_gsi], list ) + list_for_each_entry (girq, &dpci->girq[guest_gsi], list) { struct pirq *pirq = pirq_info(irq_map->dom, girq->machine_gsi); @@ -231,8 +232,7 @@ void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci) * destination vCPU in the array for the lowest-priority interrupt. */ static struct vcpu *vector_hashing_dest(const struct domain *d, - uint32_t dest_id, - bool dest_mode, + uint32_t dest_id, bool dest_mode, uint8_t gvec) { @@ -241,12 +241,12 @@ static struct vcpu *vector_hashing_dest(const struct domain *d, struct vcpu *v, *dest = NULL; unsigned int i; - dest_vcpu_bitmap = xzalloc_array(unsigned long, - BITS_TO_LONGS(d->max_vcpus)); + dest_vcpu_bitmap = + xzalloc_array(unsigned long, BITS_TO_LONGS(d->max_vcpus)); if ( !dest_vcpu_bitmap ) return NULL; - for_each_vcpu ( d, v ) + for_each_vcpu (d, v) { if ( !vlapic_match_dest(vcpu_vlapic(v), NULL, APIC_DEST_NOSHORT, dest_id, dest_mode) ) @@ -275,8 +275,8 @@ static struct vcpu *vector_hashing_dest(const struct domain *d, return dest; } -int pt_irq_create_bind( - struct domain *d, const struct xen_domctl_bind_pt_irq *pt_irq_bind) +int pt_irq_create_bind(struct domain *d, + const struct xen_domctl_bind_pt_irq *pt_irq_bind) { struct hvm_irq_dpci *hvm_irq_dpci; struct hvm_pirq_dpci *pirq_dpci; @@ -286,7 +286,7 @@ int pt_irq_create_bind( if ( pirq < 0 || pirq >= d->nr_pirqs ) return -EINVAL; - restart: +restart: spin_lock(&d->event_lock); hvm_irq_dpci = domain_get_irq_dpci(d); @@ -335,7 +335,7 @@ int pt_irq_create_bind( goto restart; } - switch ( pt_irq_bind->irq_type ) + switch (pt_irq_bind->irq_type) { case PT_IRQ_TYPE_MSI: { @@ -343,8 +343,8 @@ int pt_irq_create_bind( bool dest_mode; int dest_vcpu_id; const struct vcpu *vcpu; - uint32_t gflags = pt_irq_bind->u.msi.gflags & - ~XEN_DOMCTL_VMSI_X86_UNMASKED; + uint32_t gflags = + pt_irq_bind->u.msi.gflags & ~XEN_DOMCTL_VMSI_X86_UNMASKED; if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) ) { @@ -414,11 +414,11 @@ int pt_irq_create_bind( } } /* Calculate dest_vcpu_id for MSI-type pirq migration. */ - dest = MASK_EXTR(pirq_dpci->gmsi.gflags, - XEN_DOMCTL_VMSI_X86_DEST_ID_MASK); + dest = + MASK_EXTR(pirq_dpci->gmsi.gflags, XEN_DOMCTL_VMSI_X86_DEST_ID_MASK); dest_mode = pirq_dpci->gmsi.gflags & XEN_DOMCTL_VMSI_X86_DM_MASK; - delivery_mode = MASK_EXTR(pirq_dpci->gmsi.gflags, - XEN_DOMCTL_VMSI_X86_DELIV_MASK); + delivery_mode = + MASK_EXTR(pirq_dpci->gmsi.gflags, XEN_DOMCTL_VMSI_X86_DELIV_MASK); dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode); pirq_dpci->gmsi.dest_vcpu_id = dest_vcpu_id; @@ -439,8 +439,8 @@ int pt_irq_create_bind( /* Use interrupt posting if it is supported. */ if ( iommu_intpost ) - pi_update_irte(vcpu ? &vcpu->arch.hvm.vmx.pi_desc : NULL, - info, pirq_dpci->gmsi.gvec); + pi_update_irte(vcpu ? &vcpu->arch.hvm.vmx.pi_desc : NULL, info, + pirq_dpci->gmsi.gvec); if ( pt_irq_bind->u.msi.gflags & XEN_DOMCTL_VMSI_X86_UNMASKED ) { @@ -525,16 +525,14 @@ int pt_irq_create_bind( pirq_dpci->dom = d; if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI_TRANSLATE ) { - pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | - HVM_IRQ_DPCI_MACH_MSI | + pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | HVM_IRQ_DPCI_MACH_MSI | HVM_IRQ_DPCI_GUEST_PCI | HVM_IRQ_DPCI_TRANSLATE; share = 0; } - else /* PT_IRQ_TYPE_PCI */ + else /* PT_IRQ_TYPE_PCI */ { - pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | - HVM_IRQ_DPCI_MACH_PCI | + pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | HVM_IRQ_DPCI_MACH_PCI | HVM_IRQ_DPCI_GUEST_PCI; if ( !is_hardware_domain(d) ) share = BIND_PIRQ__WILL_SHARE; @@ -619,8 +617,8 @@ int pt_irq_create_bind( return 0; } -int pt_irq_destroy_bind( - struct domain *d, const struct xen_domctl_bind_pt_irq *pt_irq_bind) +int pt_irq_destroy_bind(struct domain *d, + const struct xen_domctl_bind_pt_irq *pt_irq_bind) { struct hvm_irq_dpci *hvm_irq_dpci; struct hvm_pirq_dpci *pirq_dpci; @@ -628,7 +626,7 @@ int pt_irq_destroy_bind( struct pirq *pirq; const char *what = NULL; - switch ( pt_irq_bind->irq_type ) + switch (pt_irq_bind->irq_type) { case PT_IRQ_TYPE_PCI: case PT_IRQ_TYPE_MSI_TRANSLATE: @@ -640,15 +638,15 @@ int pt_irq_destroy_bind( printk(XENLOG_G_INFO "d%d: unbind: m_gsi=%u g_gsi=%u dev=%02x:%02x.%u intx=%u\n", d->domain_id, machine_gsi, hvm_pci_intx_gsi(device, intx), - pt_irq_bind->u.pci.bus, - PCI_SLOT(device), PCI_FUNC(device), intx); + pt_irq_bind->u.pci.bus, PCI_SLOT(device), PCI_FUNC(device), + intx); } break; case PT_IRQ_TYPE_MSI: { unsigned long flags; - struct irq_desc *desc = domain_spin_lock_irq_desc(d, machine_gsi, - &flags); + struct irq_desc *desc = + domain_spin_lock_irq_desc(d, machine_gsi, &flags); if ( !desc ) return -EINVAL; @@ -688,12 +686,10 @@ int pt_irq_destroy_bind( struct hvm_girq_dpci_mapping *girq; struct dev_intx_gsi_link *digl, *tmp; - list_for_each_entry ( girq, &hvm_irq_dpci->girq[guest_gsi], list ) + list_for_each_entry (girq, &hvm_irq_dpci->girq[guest_gsi], list) { - if ( girq->bus == bus && - girq->device == device && - girq->intx == intx && - girq->machine_gsi == machine_gsi ) + if ( girq->bus == bus && girq->device == device && + girq->intx == intx && girq->machine_gsi == machine_gsi ) { list_del(&girq->list); xfree(girq); @@ -713,11 +709,10 @@ int pt_irq_destroy_bind( /* clear the mirq info */ if ( pirq_dpci && (pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) ) { - list_for_each_entry_safe ( digl, tmp, &pirq_dpci->digl_list, list ) + list_for_each_entry_safe(digl, tmp, &pirq_dpci->digl_list, list) { - if ( digl->bus == bus && - digl->device == device && - digl->intx == intx ) + if ( digl->bus == bus && digl->device == device && + digl->intx == intx ) { list_del(&digl->list); xfree(digl); @@ -757,11 +752,11 @@ int pt_irq_destroy_bind( if ( hvm_irq_dpci ) snprintf(buf, ARRAY_SIZE(buf), " dev=%02x.%02x.%u intx=%u", - pt_irq_bind->u.pci.bus, PCI_SLOT(device), - PCI_FUNC(device), pt_irq_bind->u.pci.intx); + pt_irq_bind->u.pci.bus, PCI_SLOT(device), PCI_FUNC(device), + pt_irq_bind->u.pci.intx); - printk(XENLOG_G_INFO "d%d %s unmap: m_irq=%u%s\n", - d->domain_id, what, machine_gsi, buf); + printk(XENLOG_G_INFO "d%d %s unmap: m_irq=%u%s\n", d->domain_id, what, + machine_gsi, buf); } return 0; @@ -784,8 +779,7 @@ bool pt_pirq_cleanup_check(struct hvm_pirq_dpci *dpci) } int pt_pirq_iterate(struct domain *d, - int (*cb)(struct domain *, - struct hvm_pirq_dpci *, void *), + int (*cb)(struct domain *, struct hvm_pirq_dpci *, void *), void *arg) { int rc = 0; @@ -817,8 +811,8 @@ int hvm_do_IRQ_dpci(struct domain *d, struct pirq *pirq) ASSERT(is_hvm_domain(d)); - if ( !iommu_enabled || (!is_hardware_domain(d) && !dpci) || - !pirq_dpci || !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) ) + if ( !iommu_enabled || (!is_hardware_domain(d) && !dpci) || !pirq_dpci || + !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) ) return 0; pirq_dpci->masked = 1; @@ -844,20 +838,19 @@ static void __msi_pirq_eoi(struct hvm_pirq_dpci *pirq_dpci) } } -static int _hvm_dpci_msi_eoi(struct domain *d, - struct hvm_pirq_dpci *pirq_dpci, void *arg) +static int _hvm_dpci_msi_eoi(struct domain *d, struct hvm_pirq_dpci *pirq_dpci, + void *arg) { int vector = (long)arg; if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) && (pirq_dpci->gmsi.gvec == vector) ) { - unsigned int dest = MASK_EXTR(pirq_dpci->gmsi.gflags, - XEN_DOMCTL_VMSI_X86_DEST_ID_MASK); + unsigned int dest = + MASK_EXTR(pirq_dpci->gmsi.gflags, XEN_DOMCTL_VMSI_X86_DEST_ID_MASK); bool dest_mode = pirq_dpci->gmsi.gflags & XEN_DOMCTL_VMSI_X86_DM_MASK; - if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest, - dest_mode) ) + if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest, dest_mode) ) { __msi_pirq_eoi(pirq_dpci); return 1; @@ -871,7 +864,7 @@ void hvm_dpci_msi_eoi(struct domain *d, int vector) { if ( !iommu_enabled || (!hvm_domain_irq(d)->dpci && !is_hardware_domain(d)) ) - return; + return; spin_lock(&d->event_lock); pt_pirq_iterate(d, _hvm_dpci_msi_eoi, (void *)(long)vector); @@ -910,7 +903,7 @@ static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci) return; } - list_for_each_entry ( digl, &pirq_dpci->digl_list, list ) + list_for_each_entry (digl, &pirq_dpci->digl_list, list) { ASSERT(!(pirq_dpci->flags & HVM_IRQ_DPCI_IDENTITY_GSI)); hvm_pci_intx_assert(d, digl->device, digl->intx); @@ -961,8 +954,7 @@ static void hvm_pirq_eoi(struct pirq *pirq, * No need to get vector lock for timer * since interrupt is still not EOIed */ - if ( --pirq_dpci->pending || - (ent && ent->fields.mask) || + if ( --pirq_dpci->pending || (ent && ent->fields.mask) || !pt_irq_need_timer(pirq_dpci->flags) ) return; @@ -1023,7 +1015,7 @@ void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi, if ( !hvm_irq_dpci ) goto unlock; - list_for_each_entry ( girq, &hvm_irq_dpci->girq[guest_gsi], list ) + list_for_each_entry (girq, &hvm_irq_dpci->girq[guest_gsi], list) __hvm_dpci_eoi(d, girq, ent); unlock: @@ -1048,7 +1040,8 @@ static void dpci_softirq(void) struct hvm_pirq_dpci *pirq_dpci; struct domain *d; - pirq_dpci = list_entry(our_list.next, struct hvm_pirq_dpci, softirq_list); + pirq_dpci = + list_entry(our_list.next, struct hvm_pirq_dpci, softirq_list); list_del(&pirq_dpci->softirq_list); d = pirq_dpci->dom; @@ -1077,12 +1070,12 @@ static void dpci_softirq(void) } } -static int cpu_callback( - struct notifier_block *nfb, unsigned long action, void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - switch ( action ) + switch (action) { case CPU_UP_PREPARE: INIT_LIST_HEAD(&per_cpu(dpci_list, cpu)); @@ -1111,7 +1104,7 @@ static int __init setup_dpci_softirq(void) { unsigned int cpu; - for_each_online_cpu(cpu) + for_each_online_cpu (cpu) INIT_LIST_HEAD(&per_cpu(dpci_list, cpu)); open_softirq(HVM_DPCI_SOFTIRQ, dpci_softirq); diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index 5ed3e7ff0c..870552380c 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -91,7 +91,8 @@ static int __init parse_iommu_param(const char *s) if ( val ) iommu_verbose = 1; } - else if ( (val = parse_boolean("amd-iommu-perdev-intremap", s, ss)) >= 0 ) + else if ( (val = parse_boolean("amd-iommu-perdev-intremap", s, ss)) >= + 0 ) amd_iommu_perdev_intremap = val; else if ( (val = parse_boolean("dom0-passthrough", s, ss)) >= 0 ) iommu_hwdom_passthrough = val; @@ -160,7 +161,7 @@ int iommu_domain_init(struct domain *d, bool use_iommu) */ if ( is_hardware_domain(d) ) use_iommu = (paging_mode_translate(d) && !iommu_hwdom_passthrough) || - iommu_hwdom_strict; + iommu_hwdom_strict; hd->platform_ops = iommu_get_ops(); ret = hd->platform_ops->init(d, use_iommu); @@ -262,9 +263,8 @@ void iommu_domain_destroy(struct domain *d) arch_iommu_domain_destroy(d); } -int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn, - unsigned int page_order, unsigned int flags, - unsigned int *flush_flags) +int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn, unsigned int page_order, + unsigned int flags, unsigned int *flush_flags) { const struct domain_iommu *hd = dom_iommu(d); int rc = 0; @@ -280,12 +280,12 @@ int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn, { rc = hd->platform_ops->map_pages(d, dfn, mfn, page_order, flags, flush_flags); - if ( unlikely (rc) ) + if ( unlikely(rc) ) if ( !d->is_shutting_down && printk_ratelimit() ) - printk(XENLOG_ERR - "d%d: IOMMU mapping dfn %"PRI_dfn" to mfn %"PRI_mfn - " with order %"PRIu32" failed: %d\n", d->domain_id, - dfn_x(dfn), mfn_x(mfn), page_order, rc); + printk(XENLOG_ERR "d%d: IOMMU mapping dfn %" PRI_dfn + " to mfn %" PRI_mfn " with order %" PRIu32 + " failed: %d\n", + d->domain_id, dfn_x(dfn), mfn_x(mfn), page_order, rc); } else { @@ -300,9 +300,9 @@ int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn, continue; if ( !d->is_shutting_down && printk_ratelimit() ) - printk(XENLOG_ERR - "d%d: IOMMU mapping dfn %"PRI_dfn" to mfn %"PRI_mfn - " failed: %d\n", d->domain_id, dfn_x(dfn_add(dfn, i)), + printk(XENLOG_ERR "d%d: IOMMU mapping dfn %" PRI_dfn + " to mfn %" PRI_mfn " failed: %d\n", + d->domain_id, dfn_x(dfn_add(dfn, i)), mfn_x(mfn_add(mfn, i)), rc); while ( i-- ) @@ -315,7 +315,7 @@ int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn, } } - if ( unlikely (rc) ) + if ( unlikely(rc) ) if ( !is_hardware_domain(d) ) domain_crash(d); @@ -330,8 +330,7 @@ int iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn, if ( !this_cpu(iommu_dont_flush_iotlb) ) { - int err = iommu_iotlb_flush(d, dfn, (1u << page_order), - flush_flags); + int err = iommu_iotlb_flush(d, dfn, (1u << page_order), flush_flags); if ( !rc ) rc = err; @@ -355,12 +354,11 @@ int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order, if ( hd->platform_ops->unmap_pages ) { rc = hd->platform_ops->unmap_pages(d, dfn, page_order, flush_flags); - if ( unlikely (rc) ) + if ( unlikely(rc) ) if ( !d->is_shutting_down && printk_ratelimit() ) - printk(XENLOG_ERR - "d%d: IOMMU unmapping dfn %"PRI_dfn" with order %"PRIu32 - " failed: %d\n", d->domain_id, dfn_x(dfn), - page_order, rc); + printk(XENLOG_ERR "d%d: IOMMU unmapping dfn %" PRI_dfn + " with order %" PRIu32 " failed: %d\n", + d->domain_id, dfn_x(dfn), page_order, rc); } else { @@ -368,15 +366,15 @@ int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order, for ( i = 0; i < (1ul << page_order); i++ ) { - int err = hd->platform_ops->unmap_page(d, dfn_add(dfn, i), - flush_flags); + int err = + hd->platform_ops->unmap_page(d, dfn_add(dfn, i), flush_flags); if ( likely(!err) ) continue; if ( !d->is_shutting_down && printk_ratelimit() ) - printk(XENLOG_ERR - "d%d: IOMMU unmapping dfn %"PRI_dfn" failed: %d\n", + printk(XENLOG_ERR "d%d: IOMMU unmapping dfn %" PRI_dfn + " failed: %d\n", d->domain_id, dfn_x(dfn_add(dfn, i)), err); if ( !rc ) @@ -387,7 +385,7 @@ int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order, } } - if ( unlikely (rc) ) + if ( unlikely(rc) ) if ( !is_hardware_domain(d) ) domain_crash(d); @@ -401,8 +399,7 @@ int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned int page_order) if ( !this_cpu(iommu_dont_flush_iotlb) ) { - int err = iommu_iotlb_flush(d, dfn, (1u << page_order), - flush_flags); + int err = iommu_iotlb_flush(d, dfn, (1u << page_order), flush_flags); if ( !rc ) rc = err; @@ -456,8 +453,8 @@ int iommu_iotlb_flush(struct domain *d, dfn_t dfn, unsigned int page_count, if ( unlikely(rc) ) { if ( !d->is_shutting_down && printk_ratelimit() ) - printk(XENLOG_ERR - "d%d: IOMMU IOTLB flush failed: %d, dfn %"PRI_dfn", page count %u flags %x\n", + printk(XENLOG_ERR "d%d: IOMMU IOTLB flush failed: %d, dfn %" PRI_dfn + ", page count %u flags %x\n", d->domain_id, rc, dfn_x(dfn), page_count, flush_flags); if ( !is_hardware_domain(d) ) @@ -484,8 +481,7 @@ int iommu_iotlb_flush_all(struct domain *d, unsigned int flush_flags) if ( unlikely(rc) ) { if ( !d->is_shutting_down && printk_ratelimit() ) - printk(XENLOG_ERR - "d%d: IOMMU IOTLB flush all failed: %d\n", + printk(XENLOG_ERR "d%d: IOMMU IOTLB flush all failed: %d\n", d->domain_id, rc); if ( !is_hardware_domain(d) ) @@ -529,8 +525,9 @@ int __init iommu_setup(void) if ( iommu_enabled ) { printk(" - Dom0 mode: %s\n", - iommu_hwdom_passthrough ? "Passthrough" : - iommu_hwdom_strict ? "Strict" : "Relaxed"); + iommu_hwdom_passthrough + ? "Passthrough" + : iommu_hwdom_strict ? "Strict" : "Relaxed"); printk("Interrupt remapping %sabled\n", iommu_intremap ? "en" : "dis"); tasklet_init(&iommu_pt_cleanup_tasklet, iommu_free_pagetables, 0); } @@ -552,9 +549,8 @@ void iommu_resume() iommu_get_ops()->resume(); } -int iommu_do_domctl( - struct xen_domctl *domctl, struct domain *d, - XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) +int iommu_do_domctl(struct xen_domctl *domctl, struct domain *d, + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) { int ret = -ENODEV; @@ -573,7 +569,7 @@ int iommu_do_domctl( return ret; } -void iommu_share_p2m_table(struct domain* d) +void iommu_share_p2m_table(struct domain *d) { #ifdef CONFIG_X86 ASSERT(hap_enabled(d)); @@ -627,7 +623,7 @@ static void iommu_dump_p2m_table(unsigned char key) } ops = iommu_get_ops(); - for_each_domain(d) + for_each_domain (d) { if ( is_hardware_domain(d) || dom_iommu(d)->status < IOMMU_STATUS_initialized ) @@ -635,7 +631,8 @@ static void iommu_dump_p2m_table(unsigned char key) if ( iommu_use_hap_pt(d) ) { - printk("\ndomain%d IOMMU p2m table shared with MMU: \n", d->domain_id); + printk("\ndomain%d IOMMU p2m table shared with MMU: \n", + d->domain_id); continue; } diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c index 8108ed5f9a..0ccc5d579c 100644 --- a/xen/drivers/passthrough/pci.c +++ b/xen/drivers/passthrough/pci.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2008, Netronome Systems, Inc. - * + * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. @@ -37,14 +37,16 @@ #include #include "ats.h" -struct pci_seg { +struct pci_seg +{ struct list_head alldevs_list; u16 nr; unsigned long *ro_map; /* bus2bridge_lock protects bus2bridge array */ spinlock_t bus2bridge_lock; #define MAX_BUSES 256 - struct { + struct + { u8 map; u8 bus; u8 devfn; @@ -109,8 +111,8 @@ static struct pci_seg *alloc_pseg(u16 seg) return pseg; } -static int pci_segments_iterate( - int (*handler)(struct pci_seg *, void *), void *arg) +static int pci_segments_iterate(int (*handler)(struct pci_seg *, void *), + void *arg) { u16 seg = 0; int rc = 0; @@ -122,7 +124,7 @@ static int pci_segments_iterate( break; rc = handler(pseg, arg); seg = pseg->nr + 1; - } while (!rc && seg); + } while ( !rc && seg ); return rc; } @@ -146,7 +148,8 @@ const unsigned long *pci_get_ro_map(u16 seg) return pseg ? pseg->ro_map : NULL; } -static struct phantom_dev { +static struct phantom_dev +{ u16 seg; u8 bus, slot, stride; } phantom_devs[8]; @@ -171,11 +174,13 @@ static int __init parse_phantom_dev(const char *str) phantom.bus = bus; phantom.slot = slot; - switch ( phantom.stride = simple_strtol(s + 1, &s, 0) ) + switch (phantom.stride = simple_strtol(s + 1, &s, 0)) { - case 1: case 2: case 4: + case 1: + case 2: + case 4: if ( *s ) - default: + default: return -EINVAL; } @@ -234,8 +239,8 @@ custom_param("pci", parse_pci_param); static void check_pdev(const struct pci_dev *pdev) { -#define PCI_STATUS_CHECK \ - (PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | \ +#define PCI_STATUS_CHECK \ + (PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | \ PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | \ PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY) u16 seg = pdev->seg; @@ -253,14 +258,14 @@ static void check_pdev(const struct pci_dev *pdev) val = pci_conf_read16(seg, bus, dev, func, PCI_STATUS); if ( val & PCI_STATUS_CHECK ) { - printk(XENLOG_INFO "%04x:%02x:%02x.%u status %04x -> %04x\n", - seg, bus, dev, func, val, val & ~PCI_STATUS_CHECK); + printk(XENLOG_INFO "%04x:%02x:%02x.%u status %04x -> %04x\n", seg, + bus, dev, func, val, val & ~PCI_STATUS_CHECK); pci_conf_write16(seg, bus, dev, func, PCI_STATUS, val & PCI_STATUS_CHECK); } } - switch ( pci_conf_read8(seg, bus, dev, func, PCI_HEADER_TYPE) & 0x7f ) + switch (pci_conf_read8(seg, bus, dev, func, PCI_HEADER_TYPE) & 0x7f) { case PCI_HEADER_TYPE_BRIDGE: if ( !bridge_ctl_mask ) @@ -289,35 +294,37 @@ static void check_pdev(const struct pci_dev *pdev) static void apply_quirks(struct pci_dev *pdev) { - uint16_t vendor = pci_conf_read16(pdev->seg, pdev->bus, - PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn), PCI_VENDOR_ID); - uint16_t device = pci_conf_read16(pdev->seg, pdev->bus, - PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn), PCI_DEVICE_ID); - static const struct { + uint16_t vendor = + pci_conf_read16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn), PCI_VENDOR_ID); + uint16_t device = + pci_conf_read16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn), PCI_DEVICE_ID); + static const struct + { uint16_t vendor, device; } ignore_bars[] = { /* * Device [8086:2fc0] * Erratum HSE43 * CONFIG_TDP_NOMINAL CSR Implemented at Incorrect Offset - * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v3-spec-update.html + * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v3-spec-update.html */ - { PCI_VENDOR_ID_INTEL, 0x2fc0 }, + {PCI_VENDOR_ID_INTEL, 0x2fc0}, /* * Devices [8086:6f60,6fa0,6fc0] * Errata BDF2 / BDX2 - * PCI BARs in the Home Agent Will Return Non-Zero Values During Enumeration - * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html - */ - { PCI_VENDOR_ID_INTEL, 0x6f60 }, - { PCI_VENDOR_ID_INTEL, 0x6fa0 }, - { PCI_VENDOR_ID_INTEL, 0x6fc0 }, + * PCI BARs in the Home Agent Will Return Non-Zero Values During + * Enumeration + * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html + */ + {PCI_VENDOR_ID_INTEL, 0x6f60}, + {PCI_VENDOR_ID_INTEL, 0x6fa0}, + {PCI_VENDOR_ID_INTEL, 0x6fc0}, }; unsigned int i; - for ( i = 0; i < ARRAY_SIZE(ignore_bars); i++) + for ( i = 0; i < ARRAY_SIZE(ignore_bars); i++ ) if ( vendor == ignore_bars[i].vendor && device == ignore_bars[i].device ) /* @@ -331,7 +338,7 @@ static struct pci_dev *alloc_pdev(struct pci_seg *pseg, u8 bus, u8 devfn) { struct pci_dev *pdev; - list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list ) + list_for_each_entry (pdev, &pseg->alldevs_list, alldevs_list) if ( pdev->bus == bus && pdev->devfn == devfn ) return pdev; @@ -339,9 +346,9 @@ static struct pci_dev *alloc_pdev(struct pci_seg *pseg, u8 bus, u8 devfn) if ( !pdev ) return NULL; - *(u16*) &pdev->seg = pseg->nr; - *((u8*) &pdev->bus) = bus; - *((u8*) &pdev->devfn) = devfn; + *(u16 *)&pdev->seg = pseg->nr; + *((u8 *)&pdev->bus) = bus; + *((u8 *)&pdev->devfn) = devfn; pdev->domain = NULL; INIT_LIST_HEAD(&pdev->msi_list); @@ -362,67 +369,66 @@ static struct pci_dev *alloc_pdev(struct pci_seg *pseg, u8 bus, u8 devfn) list_add(&pdev->alldevs_list, &pseg->alldevs_list); /* update bus2bridge */ - switch ( pdev->type = pdev_type(pseg->nr, bus, devfn) ) + switch (pdev->type = pdev_type(pseg->nr, bus, devfn)) { int pos; u16 cap; u8 sec_bus, sub_bus; - case DEV_TYPE_PCIe2PCI_BRIDGE: - case DEV_TYPE_LEGACY_PCI_BRIDGE: - sec_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn), - PCI_FUNC(devfn), PCI_SECONDARY_BUS); - sub_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn), - PCI_FUNC(devfn), PCI_SUBORDINATE_BUS); + case DEV_TYPE_PCIe2PCI_BRIDGE: + case DEV_TYPE_LEGACY_PCI_BRIDGE: + sec_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn), PCI_SECONDARY_BUS); + sub_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn), PCI_SUBORDINATE_BUS); - spin_lock(&pseg->bus2bridge_lock); - for ( ; sec_bus <= sub_bus; sec_bus++ ) - { - pseg->bus2bridge[sec_bus].map = 1; - pseg->bus2bridge[sec_bus].bus = bus; - pseg->bus2bridge[sec_bus].devfn = devfn; - } - spin_unlock(&pseg->bus2bridge_lock); - break; + spin_lock(&pseg->bus2bridge_lock); + for ( ; sec_bus <= sub_bus; sec_bus++ ) + { + pseg->bus2bridge[sec_bus].map = 1; + pseg->bus2bridge[sec_bus].bus = bus; + pseg->bus2bridge[sec_bus].devfn = devfn; + } + spin_unlock(&pseg->bus2bridge_lock); + break; - case DEV_TYPE_PCIe_ENDPOINT: - pos = pci_find_cap_offset(pseg->nr, bus, PCI_SLOT(devfn), - PCI_FUNC(devfn), PCI_CAP_ID_EXP); - BUG_ON(!pos); - cap = pci_conf_read16(pseg->nr, bus, PCI_SLOT(devfn), - PCI_FUNC(devfn), pos + PCI_EXP_DEVCAP); - if ( cap & PCI_EXP_DEVCAP_PHANTOM ) - { - pdev->phantom_stride = 8 >> MASK_EXTR(cap, - PCI_EXP_DEVCAP_PHANTOM); - if ( PCI_FUNC(devfn) >= pdev->phantom_stride ) - pdev->phantom_stride = 0; - } - else - { - unsigned int i; - - for ( i = 0; i < nr_phantom_devs; ++i ) - if ( phantom_devs[i].seg == pseg->nr && - phantom_devs[i].bus == bus && - phantom_devs[i].slot == PCI_SLOT(devfn) && - phantom_devs[i].stride > PCI_FUNC(devfn) ) - { - pdev->phantom_stride = phantom_devs[i].stride; - break; - } - } - break; + case DEV_TYPE_PCIe_ENDPOINT: + pos = pci_find_cap_offset(pseg->nr, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn), PCI_CAP_ID_EXP); + BUG_ON(!pos); + cap = pci_conf_read16(pseg->nr, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), + pos + PCI_EXP_DEVCAP); + if ( cap & PCI_EXP_DEVCAP_PHANTOM ) + { + pdev->phantom_stride = 8 >> MASK_EXTR(cap, PCI_EXP_DEVCAP_PHANTOM); + if ( PCI_FUNC(devfn) >= pdev->phantom_stride ) + pdev->phantom_stride = 0; + } + else + { + unsigned int i; - case DEV_TYPE_PCI: - case DEV_TYPE_PCIe_BRIDGE: - case DEV_TYPE_PCI_HOST_BRIDGE: - break; + for ( i = 0; i < nr_phantom_devs; ++i ) + if ( phantom_devs[i].seg == pseg->nr && + phantom_devs[i].bus == bus && + phantom_devs[i].slot == PCI_SLOT(devfn) && + phantom_devs[i].stride > PCI_FUNC(devfn) ) + { + pdev->phantom_stride = phantom_devs[i].stride; + break; + } + } + break; - default: - printk(XENLOG_WARNING "%04x:%02x:%02x.%u: unknown type %d\n", - pseg->nr, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pdev->type); - break; + case DEV_TYPE_PCI: + case DEV_TYPE_PCIe_BRIDGE: + case DEV_TYPE_PCI_HOST_BRIDGE: + break; + + default: + printk(XENLOG_WARNING "%04x:%02x:%02x.%u: unknown type %d\n", pseg->nr, + bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pdev->type); + break; } check_pdev(pdev); @@ -434,27 +440,27 @@ static struct pci_dev *alloc_pdev(struct pci_seg *pseg, u8 bus, u8 devfn) static void free_pdev(struct pci_seg *pseg, struct pci_dev *pdev) { /* update bus2bridge */ - switch ( pdev->type ) + switch (pdev->type) { u8 dev, func, sec_bus, sub_bus; - case DEV_TYPE_PCIe2PCI_BRIDGE: - case DEV_TYPE_LEGACY_PCI_BRIDGE: - dev = PCI_SLOT(pdev->devfn); - func = PCI_FUNC(pdev->devfn); - sec_bus = pci_conf_read8(pseg->nr, pdev->bus, dev, func, - PCI_SECONDARY_BUS); - sub_bus = pci_conf_read8(pseg->nr, pdev->bus, dev, func, - PCI_SUBORDINATE_BUS); - - spin_lock(&pseg->bus2bridge_lock); - for ( ; sec_bus <= sub_bus; sec_bus++ ) - pseg->bus2bridge[sec_bus] = pseg->bus2bridge[pdev->bus]; - spin_unlock(&pseg->bus2bridge_lock); - break; + case DEV_TYPE_PCIe2PCI_BRIDGE: + case DEV_TYPE_LEGACY_PCI_BRIDGE: + dev = PCI_SLOT(pdev->devfn); + func = PCI_FUNC(pdev->devfn); + sec_bus = + pci_conf_read8(pseg->nr, pdev->bus, dev, func, PCI_SECONDARY_BUS); + sub_bus = + pci_conf_read8(pseg->nr, pdev->bus, dev, func, PCI_SUBORDINATE_BUS); + + spin_lock(&pseg->bus2bridge_lock); + for ( ; sec_bus <= sub_bus; sec_bus++ ) + pseg->bus2bridge[sec_bus] = pseg->bus2bridge[pdev->bus]; + spin_unlock(&pseg->bus2bridge_lock); + break; - default: - break; + default: + break; } list_del(&pdev->alldevs_list); @@ -538,7 +544,7 @@ struct pci_dev *pci_get_pdev(int seg, int bus, int devfn) } do { - list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list ) + list_for_each_entry (pdev, &pseg->alldevs_list, alldevs_list) if ( (pdev->bus == bus || bus == -1) && (pdev->devfn == devfn || devfn == -1) ) return pdev; @@ -556,8 +562,8 @@ struct pci_dev *pci_get_real_pdev(int seg, int bus, int devfn) if ( seg < 0 || bus < 0 || devfn < 0 ) return NULL; - for ( pdev = pci_get_pdev(seg, bus, devfn), stride = 4; - !pdev && stride; stride >>= 1 ) + for ( pdev = pci_get_pdev(seg, bus, devfn), stride = 4; !pdev && stride; + stride >>= 1 ) { if ( !(devfn & (8 - stride)) ) continue; @@ -569,8 +575,8 @@ struct pci_dev *pci_get_real_pdev(int seg, int bus, int devfn) return pdev; } -struct pci_dev *pci_get_pdev_by_domain(const struct domain *d, int seg, - int bus, int devfn) +struct pci_dev *pci_get_pdev_by_domain(const struct domain *d, int seg, int bus, + int devfn) { struct pci_seg *pseg = get_pseg(seg); struct pci_dev *pdev = NULL; @@ -587,10 +593,9 @@ struct pci_dev *pci_get_pdev_by_domain(const struct domain *d, int seg, } do { - list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list ) + list_for_each_entry (pdev, &pseg->alldevs_list, alldevs_list) if ( (pdev->bus == bus || bus == -1) && - (pdev->devfn == devfn || devfn == -1) && - (pdev->domain == d) ) + (pdev->devfn == devfn || devfn == -1) && (pdev->domain == d) ) return pdev; } while ( radix_tree_gang_lookup(&pci_segments, (void **)&pseg, pseg->nr + 1, 1) ); @@ -614,7 +619,7 @@ static void pci_enable_acs(struct pci_dev *pdev) return; pos = pci_find_ext_capability(seg, bus, pdev->devfn, PCI_EXT_CAP_ID_ACS); - if (!pos) + if ( !pos ) return; cap = pci_conf_read16(seg, bus, dev, func, pos + PCI_ACS_CAP); @@ -646,7 +651,8 @@ unsigned int pci_size_mem_bar(pci_sbdf_t sbdf, unsigned int pos, uint32_t hi = 0, bar = pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos); uint64_t size; - bool is64bits = !(flags & PCI_BAR_ROM) && + bool is64bits = + !(flags & PCI_BAR_ROM) && (bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64; uint32_t mask = (flags & PCI_BAR_ROM) ? (uint32_t)PCI_ROM_ADDRESS_MASK : (uint32_t)PCI_BASE_ADDRESS_MEM_MASK; @@ -659,22 +665,23 @@ unsigned int pci_size_mem_bar(pci_sbdf_t sbdf, unsigned int pos, { if ( flags & PCI_BAR_LAST ) { - printk(XENLOG_WARNING - "%sdevice %04x:%02x:%02x.%u with 64-bit %sBAR in last slot\n", - (flags & PCI_BAR_VF) ? "SR-IOV " : "", sbdf.seg, sbdf.bus, - sbdf.dev, sbdf.func, (flags & PCI_BAR_VF) ? "vf " : ""); + printk( + XENLOG_WARNING + "%sdevice %04x:%02x:%02x.%u with 64-bit %sBAR in last slot\n", + (flags & PCI_BAR_VF) ? "SR-IOV " : "", sbdf.seg, sbdf.bus, + sbdf.dev, sbdf.func, (flags & PCI_BAR_VF) ? "vf " : ""); *psize = 0; return 1; } hi = pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos + 4); pci_conf_write32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos + 4, ~0); } - size = pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, - pos) & mask; + size = pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos) & mask; if ( is64bits ) { size |= (uint64_t)pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev, - sbdf.func, pos + 4) << 32; + sbdf.func, pos + 4) + << 32; pci_conf_write32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos + 4, hi); } else if ( size ) @@ -689,8 +696,8 @@ unsigned int pci_size_mem_bar(pci_sbdf_t sbdf, unsigned int pos, return is64bits ? 2 : 1; } -int pci_add_device(u16 seg, u8 bus, u8 devfn, - const struct pci_dev_info *info, nodeid_t node) +int pci_add_device(u16 seg, u8 bus, u8 devfn, const struct pci_dev_info *info, + nodeid_t node) { struct pci_seg *pseg; struct pci_dev *pdev; @@ -709,8 +716,8 @@ int pci_add_device(u16 seg, u8 bus, u8 devfn, pf_is_extfn = pdev->info.is_extfn; pcidevs_unlock(); if ( !pdev ) - pci_add_device(seg, info->physfn.bus, info->physfn.devfn, - NULL, node); + pci_add_device(seg, info->physfn.bus, info->physfn.devfn, NULL, + node); pdev_type = "virtual function"; } else if ( info->is_extfn ) @@ -747,8 +754,8 @@ int pci_add_device(u16 seg, u8 bus, u8 devfn, if ( !pdev->info.is_virtfn && !pdev->vf_rlen[0] ) { - unsigned int pos = pci_find_ext_capability(seg, bus, devfn, - PCI_EXT_CAP_ID_SRIOV); + unsigned int pos = + pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_SRIOV); u16 ctrl = pci_conf_read16(seg, bus, slot, func, pos + PCI_SRIOV_CTRL); if ( !pos ) @@ -775,10 +782,10 @@ int pci_add_device(u16 seg, u8 bus, u8 devfn, seg, bus, slot, func, i); continue; } - ret = pci_size_mem_bar(sbdf, idx, NULL, &pdev->vf_rlen[i], - PCI_BAR_VF | - ((i == PCI_SRIOV_NUM_BARS - 1) ? - PCI_BAR_LAST : 0)); + ret = pci_size_mem_bar( + sbdf, idx, NULL, &pdev->vf_rlen[i], + PCI_BAR_VF | + ((i == PCI_SRIOV_NUM_BARS - 1) ? PCI_BAR_LAST : 0)); ASSERT(ret); i += ret; } @@ -814,15 +821,15 @@ out: pcidevs_unlock(); if ( !ret ) { - printk(XENLOG_DEBUG "PCI add %s %04x:%02x:%02x.%u\n", pdev_type, - seg, bus, slot, func); + printk(XENLOG_DEBUG "PCI add %s %04x:%02x:%02x.%u\n", pdev_type, seg, + bus, slot, func); while ( pdev->phantom_stride ) { func += pdev->phantom_stride; if ( PCI_SLOT(func) ) break; - printk(XENLOG_DEBUG "PCI phantom %04x:%02x:%02x.%u\n", - seg, bus, slot, func); + printk(XENLOG_DEBUG "PCI phantom %04x:%02x:%02x.%u\n", seg, bus, + slot, func); } } return ret; @@ -844,7 +851,7 @@ int pci_remove_device(u16 seg, u8 bus, u8 devfn) return -ENODEV; pcidevs_lock(); - list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list ) + list_for_each_entry (pdev, &pseg->alldevs_list, alldevs_list) if ( pdev->bus == bus && pdev->devfn == devfn ) { ret = iommu_remove_device(pdev); @@ -852,8 +859,8 @@ int pci_remove_device(u16 seg, u8 bus, u8 devfn) list_del(&pdev->domain_list); pci_cleanup_msi(pdev); free_pdev(pseg, pdev); - printk(XENLOG_DEBUG "PCI remove device %04x:%02x:%02x.%u\n", - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); + printk(XENLOG_DEBUG "PCI remove device %04x:%02x:%02x.%u\n", seg, + bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); break; } @@ -861,8 +868,8 @@ int pci_remove_device(u16 seg, u8 bus, u8 devfn) return ret; } -static int pci_clean_dpci_irq(struct domain *d, - struct hvm_pirq_dpci *pirq_dpci, void *arg) +static int pci_clean_dpci_irq(struct domain *d, struct hvm_pirq_dpci *pirq_dpci, + void *arg) { struct dev_intx_gsi_link *digl, *tmp; @@ -871,7 +878,7 @@ static int pci_clean_dpci_irq(struct domain *d, if ( pt_irq_need_timer(pirq_dpci->flags) ) kill_timer(&pirq_dpci->timer); - list_for_each_entry_safe ( digl, tmp, &pirq_dpci->digl_list, list ) + list_for_each_entry_safe(digl, tmp, &pirq_dpci->digl_list, list) { list_del(&digl->list); xfree(digl); @@ -928,16 +935,16 @@ int pci_release_devices(struct domain *d) devfn = pdev->devfn; if ( deassign_device(d, pdev->seg, bus, devfn) ) printk("domain %d: deassign device (%04x:%02x:%02x.%u) failed!\n", - d->domain_id, pdev->seg, bus, - PCI_SLOT(devfn), PCI_FUNC(devfn)); + d->domain_id, pdev->seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); } pcidevs_unlock(); return 0; } -#define PCI_CLASS_BRIDGE_HOST 0x0600 -#define PCI_CLASS_BRIDGE_PCI 0x0604 +#define PCI_CLASS_BRIDGE_HOST 0x0600 +#define PCI_CLASS_BRIDGE_PCI 0x0604 enum pdev_type pdev_type(u16 seg, u8 bus, u8 devfn) { @@ -946,13 +953,13 @@ enum pdev_type pdev_type(u16 seg, u8 bus, u8 devfn) int pos = pci_find_cap_offset(seg, bus, d, f, PCI_CAP_ID_EXP); class_device = pci_conf_read16(seg, bus, d, f, PCI_CLASS_DEVICE); - switch ( class_device ) + switch (class_device) { case PCI_CLASS_BRIDGE_PCI: if ( !pos ) return DEV_TYPE_LEGACY_PCI_BRIDGE; creg = pci_conf_read16(seg, bus, d, f, pos + PCI_EXP_FLAGS); - switch ( (creg & PCI_EXP_FLAGS_TYPE) >> 4 ) + switch ((creg & PCI_EXP_FLAGS_TYPE) >> 4) { case PCI_EXP_TYPE_PCI_BRIDGE: return DEV_TYPE_PCIe2PCI_BRIDGE; @@ -1033,8 +1040,7 @@ void pci_check_disable_device(u16 seg, u8 bus, u8 devfn) pdev = pci_get_real_pdev(seg, bus, devfn); if ( pdev ) { - if ( now < pdev->fault.time || - now - pdev->fault.time > MILLISECS(10) ) + if ( now < pdev->fault.time || now - pdev->fault.time > MILLISECS(10) ) pdev->fault.count >>= 1; pdev->fault.time = now; if ( ++pdev->fault.count < PT_FAULT_THRESHOLD ) @@ -1050,8 +1056,8 @@ void pci_check_disable_device(u16 seg, u8 bus, u8 devfn) devfn = pdev->devfn; cword = pci_conf_read16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), PCI_COMMAND); - pci_conf_write16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), - PCI_COMMAND, cword & ~PCI_COMMAND_MASTER); + pci_conf_write16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), PCI_COMMAND, + cword & ~PCI_COMMAND_MASTER); } /* @@ -1079,13 +1085,15 @@ static int __init _scan_pci_devices(struct pci_seg *pseg, void *arg) pdev = alloc_pdev(pseg, bus, PCI_DEVFN(dev, func)); if ( !pdev ) { - printk(XENLOG_WARNING "%04x:%02x:%02x.%u: alloc_pdev failed\n", + printk(XENLOG_WARNING + "%04x:%02x:%02x.%u: alloc_pdev failed\n", pseg->nr, bus, dev, func); return -ENOMEM; } if ( !func && !(pci_conf_read8(pseg->nr, bus, dev, func, - PCI_HEADER_TYPE) & 0x80) ) + PCI_HEADER_TYPE) & + 0x80) ) break; } } @@ -1105,7 +1113,8 @@ int __init scan_pci_devices(void) return ret; } -struct setup_hwdom { +struct setup_hwdom +{ struct domain *d; int (*handler)(u8 devfn, struct pci_dev *); }; @@ -1136,7 +1145,8 @@ static void __hwdom_init setup_one_hwdom_device(const struct setup_hwdom *ctxt, ctxt->d->domain_id, err); } -static int __hwdom_init _setup_hwdom_pci_devices(struct pci_seg *pseg, void *arg) +static int __hwdom_init _setup_hwdom_pci_devices(struct pci_seg *pseg, + void *arg) { struct setup_hwdom *ctxt = arg; int bus, devfn; @@ -1164,8 +1174,8 @@ static int __hwdom_init _setup_hwdom_pci_devices(struct pci_seg *pseg, void *arg } else if ( pdev->domain != ctxt->d ) printk(XENLOG_WARNING "Dom%d owning %04x:%02x:%02x.%u?\n", - pdev->domain->domain_id, pseg->nr, bus, - PCI_SLOT(devfn), PCI_FUNC(devfn)); + pdev->domain->domain_id, pseg->nr, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); if ( iommu_verbose ) { @@ -1186,10 +1196,11 @@ static int __hwdom_init _setup_hwdom_pci_devices(struct pci_seg *pseg, void *arg return 0; } -void __hwdom_init setup_hwdom_pci_devices( - struct domain *d, int (*handler)(u8 devfn, struct pci_dev *)) +void __hwdom_init setup_hwdom_pci_devices(struct domain *d, + int (*handler)(u8 devfn, + struct pci_dev *)) { - struct setup_hwdom ctxt = { .d = d, .handler = handler }; + struct setup_hwdom ctxt = {.d = d, .handler = handler}; pcidevs_lock(); pci_segments_iterate(_setup_hwdom_pci_devices, &ctxt); @@ -1204,25 +1215,23 @@ static int hest_match_pci(const struct acpi_hest_aer_common *p, const struct pci_dev *pdev) { return ACPI_HEST_SEGMENT(p->bus) == pdev->seg && - ACPI_HEST_BUS(p->bus) == pdev->bus && - p->device == PCI_SLOT(pdev->devfn) && - p->function == PCI_FUNC(pdev->devfn); + ACPI_HEST_BUS(p->bus) == pdev->bus && + p->device == PCI_SLOT(pdev->devfn) && + p->function == PCI_FUNC(pdev->devfn); } static bool_t hest_match_type(const struct acpi_hest_header *hest_hdr, const struct pci_dev *pdev) { - unsigned int pos = pci_find_cap_offset(pdev->seg, pdev->bus, - PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn), - PCI_CAP_ID_EXP); - u8 pcie = MASK_EXTR(pci_conf_read16(pdev->seg, pdev->bus, - PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn), - pos + PCI_EXP_FLAGS), - PCI_EXP_FLAGS_TYPE); - - switch ( hest_hdr->type ) + unsigned int pos = + pci_find_cap_offset(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn), PCI_CAP_ID_EXP); + u8 pcie = + MASK_EXTR(pci_conf_read16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn), pos + PCI_EXP_FLAGS), + PCI_EXP_FLAGS_TYPE); + + switch (hest_hdr->type) { case ACPI_HEST_TYPE_AER_ROOT_PORT: return pcie == PCI_EXP_TYPE_ROOT_PORT; @@ -1230,14 +1239,15 @@ static bool_t hest_match_type(const struct acpi_hest_header *hest_hdr, return pcie == PCI_EXP_TYPE_ENDPOINT; case ACPI_HEST_TYPE_AER_BRIDGE: return pci_conf_read16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn), PCI_CLASS_DEVICE) == - PCI_CLASS_BRIDGE_PCI; + PCI_FUNC(pdev->devfn), + PCI_CLASS_DEVICE) == PCI_CLASS_BRIDGE_PCI; } return 0; } -struct aer_hest_parse_info { +struct aer_hest_parse_info +{ const struct pci_dev *pdev; bool_t firmware_first; }; @@ -1274,9 +1284,8 @@ static int aer_hest_parse(const struct acpi_hest_header *hest_hdr, void *data) } /* Otherwise, check the specific device */ - if ( p->flags & ACPI_HEST_GLOBAL ? - hest_match_type(hest_hdr, info->pdev) : - hest_match_pci(p, info->pdev) ) + if ( p->flags & ACPI_HEST_GLOBAL ? hest_match_type(hest_hdr, info->pdev) + : hest_match_pci(p, info->pdev) ) { info->firmware_first = ff; return 1; @@ -1287,12 +1296,11 @@ static int aer_hest_parse(const struct acpi_hest_header *hest_hdr, void *data) bool_t pcie_aer_get_firmware_first(const struct pci_dev *pdev) { - struct aer_hest_parse_info info = { .pdev = pdev }; + struct aer_hest_parse_info info = {.pdev = pdev}; return pci_find_cap_offset(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), PCI_CAP_ID_EXP) && - apei_hest_parse(aer_hest_parse, &info) >= 0 && - info.firmware_first; + apei_hest_parse(aer_hest_parse, &info) >= 0 && info.firmware_first; } #endif @@ -1303,15 +1311,14 @@ static int _dump_pci_devices(struct pci_seg *pseg, void *arg) printk("==== segment %04x ====\n", pseg->nr); - list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list ) + list_for_each_entry (pdev, &pseg->alldevs_list, alldevs_list) { - printk("%04x:%02x:%02x.%u - dom %-3d - node %-3d - MSIs < ", - pseg->nr, pdev->bus, - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), + printk("%04x:%02x:%02x.%u - dom %-3d - node %-3d - MSIs < ", pseg->nr, + pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev->domain ? pdev->domain->domain_id : -1, (pdev->node != NUMA_NO_NODE) ? pdev->node : -1); - list_for_each_entry ( msi, &pdev->msi_list, list ) - printk("%d ", msi->irq); + list_for_each_entry (msi, &pdev->msi_list, list) + printk("%d ", msi->irq); printk(">\n"); } @@ -1333,15 +1340,13 @@ static int __init setup_dump_pcidevs(void) } __initcall(setup_dump_pcidevs); -int iommu_update_ire_from_msi( - struct msi_desc *msi_desc, struct msi_msg *msg) +int iommu_update_ire_from_msi(struct msi_desc *msi_desc, struct msi_msg *msg) { - return iommu_intremap - ? iommu_get_ops()->update_ire_from_msi(msi_desc, msg) : 0; + return iommu_intremap ? iommu_get_ops()->update_ire_from_msi(msi_desc, msg) + : 0; } -void iommu_read_msi_from_ire( - struct msi_desc *msi_desc, struct msi_msg *msg) +void iommu_read_msi_from_ire(struct msi_desc *msi_desc, struct msi_msg *msg) { if ( iommu_intremap ) iommu_get_ops()->read_msi_from_ire(msi_desc, msg); @@ -1366,7 +1371,7 @@ static int iommu_add_device(struct pci_dev *pdev) if ( rc || !pdev->phantom_stride ) return rc; - for ( devfn = pdev->devfn ; ; ) + for ( devfn = pdev->devfn;; ) { devfn += pdev->phantom_stride; if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) ) @@ -1407,7 +1412,7 @@ static int iommu_remove_device(struct pci_dev *pdev) if ( !iommu_enabled || !hd->platform_ops ) return 0; - for ( devfn = pdev->devfn ; pdev->phantom_stride; ) + for ( devfn = pdev->devfn; pdev->phantom_stride; ) { int rc; @@ -1450,7 +1455,7 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag) if ( !iommu_enabled || !hd->platform_ops ) return 0; - /* Prevent device assign if mem paging or mem sharing have been + /* Prevent device assign if mem paging or mem sharing have been * enabled for this domain */ if ( unlikely(d->arch.hvm.mem_sharing_enabled || vm_event_check_ring(d->vm_event_paging) || @@ -1479,7 +1484,8 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag) pdev->fault.count = 0; - if ( (rc = hd->platform_ops->assign_device(d, devfn, pci_to_dev(pdev), flag)) ) + if ( (rc = hd->platform_ops->assign_device(d, devfn, pci_to_dev(pdev), + flag)) ) goto done; for ( ; pdev->phantom_stride; rc = 0 ) @@ -1489,12 +1495,12 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag) break; rc = hd->platform_ops->assign_device(d, devfn, pci_to_dev(pdev), flag); if ( rc ) - printk(XENLOG_G_WARNING "d%d: assign %04x:%02x:%02x.%u failed (%d)\n", - d->domain_id, seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), - rc); + printk( + XENLOG_G_WARNING "d%d: assign %04x:%02x:%02x.%u failed (%d)\n", + d->domain_id, seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), rc); } - done: +done: if ( !has_arch_pdevs(d) && has_iommu_pt(d) ) iommu_teardown(d); pcidevs_unlock(); @@ -1551,9 +1557,9 @@ int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn) return ret; } -static int iommu_get_device_group( - struct domain *d, u16 seg, u8 bus, u8 devfn, - XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs) +static int iommu_get_device_group(struct domain *d, u16 seg, u8 bus, u8 devfn, + XEN_GUEST_HANDLE_64(uint32) buf, + int max_sdevs) { const struct domain_iommu *hd = dom_iommu(d); struct pci_dev *pdev; @@ -1568,13 +1574,14 @@ static int iommu_get_device_group( group_id = ops->get_device_group_id(seg, bus, devfn); pcidevs_lock(); - for_each_pdev( d, pdev ) + for_each_pdev (d, pdev) { if ( (pdev->seg != seg) || ((pdev->bus == bus) && (pdev->devfn == devfn)) ) continue; - if ( xsm_get_device_group(XSM_HOOK, (seg << 16) | (pdev->bus << 8) | pdev->devfn) ) + if ( xsm_get_device_group(XSM_HOOK, (seg << 16) | (pdev->bus << 8) | + pdev->devfn) ) continue; sdev_id = ops->get_device_group_id(seg, pdev->bus, pdev->devfn); @@ -1616,8 +1623,7 @@ void iommu_dev_iotlb_flush_timeout(struct domain *d, struct pci_dev *pdev) _pci_hide_device(pdev); if ( !d->is_shutting_down && printk_ratelimit() ) - printk(XENLOG_ERR - "dom%d: ATS device %04x:%02x:%02x.%u flush failed\n", + printk(XENLOG_ERR "dom%d: ATS device %04x:%02x:%02x.%u flush failed\n", d->domain_id, pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); if ( !is_hardware_domain(d) ) @@ -1626,16 +1632,15 @@ void iommu_dev_iotlb_flush_timeout(struct domain *d, struct pci_dev *pdev) pcidevs_unlock(); } -int iommu_do_pci_domctl( - struct xen_domctl *domctl, struct domain *d, - XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) +int iommu_do_pci_domctl(struct xen_domctl *domctl, struct domain *d, + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) { u16 seg; u8 bus, devfn; int ret = 0; uint32_t machine_sbdf; - switch ( domctl->cmd ) + switch (domctl->cmd) { unsigned int flags; @@ -1644,7 +1649,8 @@ int iommu_do_pci_domctl( u32 max_sdevs; XEN_GUEST_HANDLE_64(uint32) sdevs; - ret = xsm_get_device_group(XSM_HOOK, domctl->u.get_device_group.machine_sbdf); + ret = xsm_get_device_group(XSM_HOOK, + domctl->u.get_device_group.machine_sbdf); if ( ret ) break; @@ -1689,8 +1695,8 @@ int iommu_do_pci_domctl( ret = -EINVAL; flags = domctl->u.assign_device.flags; if ( domctl->cmd == XEN_DOMCTL_assign_device - ? d->is_dying || (flags & ~XEN_DOMCTL_DEV_RDM_RELAXED) - : flags ) + ? d->is_dying || (flags & ~XEN_DOMCTL_DEV_RDM_RELAXED) + : flags ) break; machine_sbdf = domctl->u.assign_device.u.pci.machine_sbdf; @@ -1718,13 +1724,13 @@ int iommu_do_pci_domctl( if ( !ret ) ret = assign_device(d, seg, bus, devfn, flags); if ( ret == -ERESTART ) - ret = hypercall_create_continuation(__HYPERVISOR_domctl, - "h", u_domctl); + ret = hypercall_create_continuation(__HYPERVISOR_domctl, "h", + u_domctl); else if ( ret ) - printk(XENLOG_G_ERR "XEN_DOMCTL_assign_device: " - "assign %04x:%02x:%02x.%u to dom%d failed (%d)\n", - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), - d->domain_id, ret); + printk( + XENLOG_G_ERR "XEN_DOMCTL_assign_device: " + "assign %04x:%02x:%02x.%u to dom%d failed (%d)\n", + seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), d->domain_id, ret); break; @@ -1760,8 +1766,8 @@ int iommu_do_pci_domctl( if ( ret ) printk(XENLOG_G_ERR "deassign %04x:%02x:%02x.%u from dom%d failed (%d)\n", - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), - d->domain_id, ret); + seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), d->domain_id, + ret); break; diff --git a/xen/drivers/passthrough/vtd/dmar.c b/xen/drivers/passthrough/vtd/dmar.c index 2372cd2c74..e7a149a325 100644 --- a/xen/drivers/passthrough/vtd/dmar.c +++ b/xen/drivers/passthrough/vtd/dmar.c @@ -38,8 +38,8 @@ #define PREFIX VTDPREFIX "ACPI DMAR:" #define DEBUG -#define MIN_SCOPE_LEN (sizeof(struct acpi_dmar_device_scope) + \ - sizeof(struct acpi_dmar_pci_path)) +#define MIN_SCOPE_LEN \ + (sizeof(struct acpi_dmar_device_scope) + sizeof(struct acpi_dmar_pci_path)) LIST_HEAD_READ_MOSTLY(acpi_drhd_units); LIST_HEAD_READ_MOSTLY(acpi_rmrr_units); @@ -54,7 +54,7 @@ static void __init dmar_scope_add_buses(struct dmar_scope *scope, u16 sec_bus, u16 sub_bus) { sub_bus &= 0xff; - if (sec_bus > sub_bus) + if ( sec_bus > sub_bus ) return; while ( sec_bus <= sub_bus ) @@ -96,20 +96,20 @@ static void __init disable_all_dmar_units(void) struct acpi_rmrr_unit *rmrr, *_rmrr; struct acpi_atsr_unit *atsr, *_atsr; - list_for_each_entry_safe ( drhd, _drhd, &acpi_drhd_units, list ) + list_for_each_entry_safe(drhd, _drhd, &acpi_drhd_units, list) { list_del(&drhd->list); scope_devices_free(&drhd->scope); iommu_free(drhd); xfree(drhd); } - list_for_each_entry_safe ( rmrr, _rmrr, &acpi_rmrr_units, list ) + list_for_each_entry_safe(rmrr, _rmrr, &acpi_rmrr_units, list) { list_del(&rmrr->list); scope_devices_free(&rmrr->scope); xfree(rmrr); } - list_for_each_entry_safe ( atsr, _atsr, &acpi_atsr_units, list ) + list_for_each_entry_safe(atsr, _atsr, &acpi_atsr_units, list) { list_del(&atsr->list); scope_devices_free(&atsr->scope); @@ -117,57 +117,58 @@ static void __init disable_all_dmar_units(void) } } -static int acpi_ioapic_device_match( - struct list_head *ioapic_list, unsigned int apic_id) +static int acpi_ioapic_device_match(struct list_head *ioapic_list, + unsigned int apic_id) { struct acpi_ioapic_unit *ioapic; - list_for_each_entry( ioapic, ioapic_list, list ) { - if (ioapic->apic_id == apic_id) + list_for_each_entry (ioapic, ioapic_list, list) + { + if ( ioapic->apic_id == apic_id ) return 1; } return 0; } -struct acpi_drhd_unit * ioapic_to_drhd(unsigned int apic_id) +struct acpi_drhd_unit *ioapic_to_drhd(unsigned int apic_id) { struct acpi_drhd_unit *drhd; - list_for_each_entry( drhd, &acpi_drhd_units, list ) + list_for_each_entry (drhd, &acpi_drhd_units, list) if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) ) return drhd; return NULL; } -struct acpi_drhd_unit * iommu_to_drhd(struct iommu *iommu) +struct acpi_drhd_unit *iommu_to_drhd(struct iommu *iommu) { struct acpi_drhd_unit *drhd; if ( iommu == NULL ) return NULL; - list_for_each_entry( drhd, &acpi_drhd_units, list ) + list_for_each_entry (drhd, &acpi_drhd_units, list) if ( drhd->iommu == iommu ) return drhd; return NULL; } -struct iommu * ioapic_to_iommu(unsigned int apic_id) +struct iommu *ioapic_to_iommu(unsigned int apic_id) { struct acpi_drhd_unit *drhd; - list_for_each_entry( drhd, &acpi_drhd_units, list ) + list_for_each_entry (drhd, &acpi_drhd_units, list) if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) ) return drhd->iommu; return NULL; } -static bool_t acpi_hpet_device_match( - struct list_head *list, unsigned int hpet_id) +static bool_t acpi_hpet_device_match(struct list_head *list, + unsigned int hpet_id) { struct acpi_hpet_unit *hpet; - list_for_each_entry( hpet, list, list ) - if (hpet->id == hpet_id) + list_for_each_entry (hpet, list, list) + if ( hpet->id == hpet_id ) return 1; return 0; } @@ -176,7 +177,7 @@ struct acpi_drhd_unit *hpet_to_drhd(unsigned int hpet_id) { struct acpi_drhd_unit *drhd; - list_for_each_entry( drhd, &acpi_drhd_units, list ) + list_for_each_entry (drhd, &acpi_drhd_units, list) if ( acpi_hpet_device_match(&drhd->hpet_list, hpet_id) ) return drhd; return NULL; @@ -228,12 +229,12 @@ struct acpi_drhd_unit *acpi_find_matched_drhd_unit(const struct pci_dev *pdev) devfn = pdev->devfn; } - list_for_each_entry ( drhd, &acpi_drhd_units, list ) + list_for_each_entry (drhd, &acpi_drhd_units, list) { if ( drhd->segment != pdev->seg ) continue; - for (i = 0; i < drhd->scope.devices_cnt; i++) + for ( i = 0; i < drhd->scope.devices_cnt; i++ ) if ( drhd->scope.devices[i] == PCI_BDF2(bus, devfn) ) return drhd; @@ -251,7 +252,7 @@ struct acpi_atsr_unit *acpi_find_matched_atsr_unit(const struct pci_dev *pdev) struct acpi_atsr_unit *atsr; struct acpi_atsr_unit *all_ports = NULL; - list_for_each_entry ( atsr, &acpi_atsr_units, list ) + list_for_each_entry (atsr, &acpi_atsr_units, list) { if ( atsr->segment != pdev->seg ) continue; @@ -265,14 +266,14 @@ struct acpi_atsr_unit *acpi_find_matched_atsr_unit(const struct pci_dev *pdev) return all_ports; } -struct acpi_rhsa_unit * drhd_to_rhsa(struct acpi_drhd_unit *drhd) +struct acpi_rhsa_unit *drhd_to_rhsa(struct acpi_drhd_unit *drhd) { struct acpi_rhsa_unit *rhsa; if ( drhd == NULL ) return NULL; - list_for_each_entry ( rhsa, &acpi_rhsa_units, list ) + list_for_each_entry (rhsa, &acpi_rhsa_units, list) { if ( rhsa->address == drhd->address ) return rhsa; @@ -315,17 +316,17 @@ static int __init scope_device_count(const void *start, const void *end) return count; } - -static int __init acpi_parse_dev_scope( - const void *start, const void *end, struct dmar_scope *scope, - int type, u16 seg) +static int __init acpi_parse_dev_scope(const void *start, const void *end, + struct dmar_scope *scope, int type, + u16 seg) { struct acpi_ioapic_unit *acpi_ioapic_unit; const struct acpi_dmar_device_scope *acpi_scope; u16 bus, sub_bus, sec_bus; const struct acpi_dmar_pci_path *path; - struct acpi_drhd_unit *drhd = type == DMAR_TYPE ? - container_of(scope, struct acpi_drhd_unit, scope) : NULL; + struct acpi_drhd_unit *drhd = + type == DMAR_TYPE ? container_of(scope, struct acpi_drhd_unit, scope) + : NULL; int depth, cnt, didx = 0, ret; if ( (cnt = scope_device_count(start, end)) < 0 ) @@ -353,7 +354,7 @@ static int __init acpi_parse_dev_scope( path++; } - switch ( acpi_scope->entry_type ) + switch (acpi_scope->entry_type) { case ACPI_DMAR_SCOPE_TYPE_BRIDGE: sec_bus = pci_conf_read8(seg, bus, path->dev, path->fn, @@ -363,16 +364,16 @@ static int __init acpi_parse_dev_scope( if ( iommu_verbose ) printk(VTDPREFIX " bridge: %04x:%02x:%02x.%u start=%x sec=%x sub=%x\n", - seg, bus, path->dev, path->fn, - acpi_scope->bus, sec_bus, sub_bus); + seg, bus, path->dev, path->fn, acpi_scope->bus, sec_bus, + sub_bus); dmar_scope_add_buses(scope, sec_bus, sub_bus); break; case ACPI_DMAR_SCOPE_TYPE_HPET: if ( iommu_verbose ) - printk(VTDPREFIX " MSI HPET: %04x:%02x:%02x.%u\n", - seg, bus, path->dev, path->fn); + printk(VTDPREFIX " MSI HPET: %04x:%02x:%02x.%u\n", seg, bus, + path->dev, path->fn); if ( drhd ) { @@ -393,8 +394,8 @@ static int __init acpi_parse_dev_scope( case ACPI_DMAR_SCOPE_TYPE_ENDPOINT: if ( iommu_verbose ) - printk(VTDPREFIX " endpoint: %04x:%02x:%02x.%u\n", - seg, bus, path->dev, path->fn); + printk(VTDPREFIX " endpoint: %04x:%02x:%02x.%u\n", seg, bus, + path->dev, path->fn); if ( drhd ) { @@ -407,8 +408,8 @@ static int __init acpi_parse_dev_scope( case ACPI_DMAR_SCOPE_TYPE_IOAPIC: if ( iommu_verbose ) - printk(VTDPREFIX " IOAPIC: %04x:%02x:%02x.%u\n", - seg, bus, path->dev, path->fn); + printk(VTDPREFIX " IOAPIC: %04x:%02x:%02x.%u\n", seg, bus, + path->dev, path->fn); if ( drhd ) { @@ -434,19 +435,19 @@ static int __init acpi_parse_dev_scope( } scope->devices[didx++] = PCI_BDF(bus, path->dev, path->fn); start += acpi_scope->length; - } + } ret = 0; - out: +out: if ( ret ) scope_devices_free(scope); return ret; } -static int __init acpi_dmar_check_length( - const struct acpi_dmar_header *h, unsigned int min_len) +static int __init acpi_dmar_check_length(const struct acpi_dmar_header *h, + unsigned int min_len) { if ( h->length >= min_len ) return 0; @@ -455,8 +456,7 @@ static int __init acpi_dmar_check_length( return -EINVAL; } -static int __init -acpi_parse_one_drhd(struct acpi_dmar_header *header) +static int __init acpi_parse_one_drhd(struct acpi_dmar_header *header) { struct acpi_dmar_hardware_unit *drhd = container_of(header, struct acpi_dmar_hardware_unit, header); @@ -481,7 +481,7 @@ acpi_parse_one_drhd(struct acpi_dmar_header *header) INIT_LIST_HEAD(&dmaru->ioapic_list); INIT_LIST_HEAD(&dmaru->hpet_list); if ( iommu_verbose ) - printk(VTDPREFIX " dmaru->address = %"PRIx64"\n", dmaru->address); + printk(VTDPREFIX " dmaru->address = %" PRIx64 "\n", dmaru->address); ret = iommu_alloc(dmaru); if ( ret ) @@ -489,8 +489,8 @@ acpi_parse_one_drhd(struct acpi_dmar_header *header) dev_scope_start = (void *)(drhd + 1); dev_scope_end = ((void *)drhd) + header->length; - ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end, - &dmaru->scope, DMAR_TYPE, drhd->segment); + ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end, &dmaru->scope, + DMAR_TYPE, drhd->segment); if ( dmaru->include_all ) { @@ -537,7 +537,8 @@ acpi_parse_one_drhd(struct acpi_dmar_header *header) if ( !pci_device_detect(drhd->segment, b, d, f) ) printk(XENLOG_WARNING VTDPREFIX - " Non-existent device (%04x:%02x:%02x.%u) in this DRHD's scope!\n", + " Non-existent device (%04x:%02x:%02x.%u) in this " + "DRHD's scope!\n", drhd->segment, b, d, f); } @@ -575,9 +576,9 @@ static int register_one_rmrr(struct acpi_rmrr_unit *rmrru) { dprintk(XENLOG_WARNING VTDPREFIX, " Non-existent device (%04x:%02x:%02x.%u) is reported" - " in RMRR (%"PRIx64", %"PRIx64")'s scope!\n", - rmrru->segment, b, d, f, - rmrru->base_address, rmrru->end_address); + " in RMRR (%" PRIx64 ", %" PRIx64 ")'s scope!\n", + rmrru->segment, b, d, f, rmrru->base_address, + rmrru->end_address); ignore = true; } else @@ -590,7 +591,7 @@ static int register_one_rmrr(struct acpi_rmrr_unit *rmrru) if ( ignore ) { dprintk(XENLOG_WARNING VTDPREFIX, - " Ignore the RMRR (%"PRIx64", %"PRIx64") due to " + " Ignore the RMRR (%" PRIx64 ", %" PRIx64 ") due to " "devices under its scope are not PCI discoverable!\n", rmrru->base_address, rmrru->end_address); scope_devices_free(&rmrru->scope); @@ -600,7 +601,7 @@ static int register_one_rmrr(struct acpi_rmrr_unit *rmrru) else if ( rmrru->base_address > rmrru->end_address ) { dprintk(XENLOG_WARNING VTDPREFIX, - " The RMRR (%"PRIx64", %"PRIx64") is incorrect!\n", + " The RMRR (%" PRIx64 ", %" PRIx64 ") is incorrect!\n", rmrru->base_address, rmrru->end_address); scope_devices_free(&rmrru->scope); xfree(rmrru); @@ -610,7 +611,8 @@ static int register_one_rmrr(struct acpi_rmrr_unit *rmrru) { if ( iommu_verbose ) dprintk(VTDPREFIX, - " RMRR region: base_addr %"PRIx64" end_addr %"PRIx64"\n", + " RMRR region: base_addr %" PRIx64 " end_addr %" PRIx64 + "\n", rmrru->base_address, rmrru->end_address); acpi_register_rmrr_unit(rmrru); } @@ -618,8 +620,7 @@ static int register_one_rmrr(struct acpi_rmrr_unit *rmrru) return ret; } -static int __init -acpi_parse_one_rmrr(struct acpi_dmar_header *header) +static int __init acpi_parse_one_rmrr(struct acpi_dmar_header *header) { struct acpi_dmar_reserved_memory *rmrr = container_of(header, struct acpi_dmar_reserved_memory, header); @@ -631,15 +632,16 @@ acpi_parse_one_rmrr(struct acpi_dmar_header *header) if ( (ret = acpi_dmar_check_length(header, sizeof(*rmrr))) != 0 ) return ret; - list_for_each_entry(rmrru, &acpi_rmrr_units, list) - if ( base_addr <= rmrru->end_address && rmrru->base_address <= end_addr ) - { - printk(XENLOG_ERR VTDPREFIX - "Overlapping RMRRs [%"PRIx64",%"PRIx64"] and [%"PRIx64",%"PRIx64"]\n", - rmrru->base_address, rmrru->end_address, - base_addr, end_addr); - return -EEXIST; - } + list_for_each_entry (rmrru, &acpi_rmrr_units, list) + if ( base_addr <= rmrru->end_address && + rmrru->base_address <= end_addr ) + { + printk( + XENLOG_ERR VTDPREFIX "Overlapping RMRRs [%" PRIx64 ",%" PRIx64 + "] and [%" PRIx64 ",%" PRIx64 "]\n", + rmrru->base_address, rmrru->end_address, base_addr, end_addr); + return -EEXIST; + } /* This check is here simply to detect when RMRR values are * not properly represented in the system memory map and @@ -649,9 +651,10 @@ acpi_parse_one_rmrr(struct acpi_dmar_header *header) (!page_is_ram_type(paddr_to_pfn(end_addr), RAM_TYPE_RESERVED)) ) { printk(XENLOG_WARNING VTDPREFIX - " RMRR address range %"PRIx64"..%"PRIx64" not in reserved memory;" + " RMRR address range %" PRIx64 "..%" PRIx64 + " not in reserved memory;" " need \"iommu_inclusive_mapping=1\"?\n", - base_addr, end_addr); + base_addr, end_addr); } rmrru = xzalloc(struct acpi_rmrr_unit); @@ -663,9 +666,9 @@ acpi_parse_one_rmrr(struct acpi_dmar_header *header) rmrru->segment = rmrr->segment; dev_scope_start = (void *)(rmrr + 1); - dev_scope_end = ((void *)rmrr) + header->length; - ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end, - &rmrru->scope, RMRR_TYPE, rmrr->segment); + dev_scope_end = ((void *)rmrr) + header->length; + ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end, &rmrru->scope, + RMRR_TYPE, rmrr->segment); if ( !ret && (rmrru->scope.devices_cnt != 0) ) { @@ -677,7 +680,6 @@ acpi_parse_one_rmrr(struct acpi_dmar_header *header) */ if ( ret > 0 ) ret = 0; - } else xfree(rmrru); @@ -685,8 +687,7 @@ acpi_parse_one_rmrr(struct acpi_dmar_header *header) return ret; } -static int __init -acpi_parse_one_atsr(struct acpi_dmar_header *header) +static int __init acpi_parse_one_atsr(struct acpi_dmar_header *header) { struct acpi_dmar_atsr *atsr = container_of(header, struct acpi_dmar_atsr, header); @@ -709,7 +710,7 @@ acpi_parse_one_atsr(struct acpi_dmar_header *header) if ( !atsru->all_ports ) { dev_scope_start = (void *)(atsr + 1); - dev_scope_end = ((void *)atsr) + header->length; + dev_scope_end = ((void *)atsr) + header->length; ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end, &atsru->scope, ATSR_TYPE, atsr->segment); } @@ -738,8 +739,7 @@ acpi_parse_one_atsr(struct acpi_dmar_header *header) return ret; } -static int __init -acpi_parse_one_rhsa(struct acpi_dmar_header *header) +static int __init acpi_parse_one_rhsa(struct acpi_dmar_header *header) { struct acpi_dmar_rhsa *rhsa = container_of(header, struct acpi_dmar_rhsa, header); @@ -757,8 +757,8 @@ acpi_parse_one_rhsa(struct acpi_dmar_header *header) rhsau->proximity_domain = rhsa->proximity_domain; list_add_tail(&rhsau->list, &acpi_rhsa_units); if ( iommu_verbose ) - printk(VTDPREFIX - " rhsau->address: %"PRIx64" rhsau->proximity_domain: %"PRIx32"\n", + printk(VTDPREFIX " rhsau->address: %" PRIx64 + " rhsau->proximity_domain: %" PRIx32 "\n", rhsau->address, rhsau->proximity_domain); return ret; @@ -799,7 +799,7 @@ static int __init acpi_parse_dmar(struct acpi_table_header *table) if ( ret ) break; - switch ( entry_header->type ) + switch (entry_header->type) { case ACPI_DMAR_TYPE_HARDWARE_UNIT: if ( iommu_verbose ) @@ -835,8 +835,7 @@ static int __init acpi_parse_dmar(struct acpi_table_header *table) if ( ret ) { - printk(XENLOG_WARNING - "Failed to parse ACPI DMAR. Disabling VT-d.\n"); + printk(XENLOG_WARNING "Failed to parse ACPI DMAR. Disabling VT-d.\n"); disable_all_dmar_units(); } @@ -851,7 +850,8 @@ out: /* RMRR units derived from command line rmrr option. */ #define MAX_USER_RMRR_DEV 20 -struct user_rmrr { +struct user_rmrr +{ struct list_head list; unsigned long base_pfn, end_pfn; unsigned int dev_count; @@ -879,8 +879,7 @@ static int __init add_user_rmrr(void) if ( base > end ) { - printk(XENLOG_ERR VTDPREFIX - "Invalid RMRR Range "ERMRRU_FMT"\n", + printk(XENLOG_ERR VTDPREFIX "Invalid RMRR Range " ERMRRU_FMT "\n", ERMRRU_ARG(user_rmrrs[i])); continue; } @@ -888,20 +887,20 @@ static int __init add_user_rmrr(void) if ( (end - base) >= MAX_USER_RMRR_PAGES ) { printk(XENLOG_ERR VTDPREFIX - "RMRR range "ERMRRU_FMT" exceeds "\ - __stringify(MAX_USER_RMRR_PAGES)" pages\n", + "RMRR range " ERMRRU_FMT + " exceeds " __stringify(MAX_USER_RMRR_PAGES) " pages\n", ERMRRU_ARG(user_rmrrs[i])); continue; } overlap = false; - list_for_each_entry(rmrru, &acpi_rmrr_units, list) + list_for_each_entry (rmrru, &acpi_rmrr_units, list) { if ( pfn_to_paddr(base) <= rmrru->end_address && rmrru->base_address <= pfn_to_paddr(end) ) { - printk(XENLOG_ERR VTDPREFIX - "Overlapping RMRRs: "ERMRRU_FMT" and [%lx-%lx]\n", + printk(XENLOG_ERR VTDPREFIX "Overlapping RMRRs: " ERMRRU_FMT + " and [%lx-%lx]\n", ERMRRU_ARG(user_rmrrs[i]), paddr_to_pfn(rmrru->base_address), paddr_to_pfn(rmrru->end_address)); @@ -913,12 +912,11 @@ static int __init add_user_rmrr(void) if ( overlap ) continue; - do - { + do { if ( !mfn_valid(_mfn(base)) ) { printk(XENLOG_ERR VTDPREFIX - "Invalid pfn in RMRR range "ERMRRU_FMT"\n", + "Invalid pfn in RMRR range " ERMRRU_FMT "\n", ERMRRU_ARG(user_rmrrs[i])); break; } @@ -948,7 +946,7 @@ static int __init add_user_rmrr(void) if ( seg != PCI_SEG(user_rmrrs[i].sbdf[0]) ) { printk(XENLOG_ERR VTDPREFIX - "Segments are not equal for RMRR range "ERMRRU_FMT"\n", + "Segments are not equal for RMRR range " ERMRRU_FMT "\n", ERMRRU_ARG(user_rmrrs[i])); scope_devices_free(&rmrr->scope); xfree(rmrr); @@ -963,7 +961,7 @@ static int __init add_user_rmrr(void) if ( register_one_rmrr(rmrr) ) printk(XENLOG_ERR VTDPREFIX - "Could not register RMMR range "ERMRRU_FMT"\n", + "Could not register RMMR range " ERMRRU_FMT "\n", ERMRRU_ARG(user_rmrrs[i])); } @@ -981,10 +979,11 @@ int __init acpi_dmar_init(void) acpi_native_uint dmar_len; int ret; - if ( ACPI_SUCCESS(acpi_get_table_phys(ACPI_SIG_DMAR, 0, - &dmar_addr, &dmar_len)) ) + if ( ACPI_SUCCESS( + acpi_get_table_phys(ACPI_SIG_DMAR, 0, &dmar_addr, &dmar_len)) ) { - map_pages_to_xen((unsigned long)__va(dmar_addr), maddr_to_mfn(dmar_addr), + map_pages_to_xen((unsigned long)__va(dmar_addr), + maddr_to_mfn(dmar_addr), PFN_UP(dmar_addr + dmar_len) - PFN_DOWN(dmar_addr), PAGE_HYPERVISOR); dmar_table = __va(dmar_addr); @@ -1003,7 +1002,7 @@ void acpi_dmar_reinstate(void) uint32_t sig = 0x52414d44; /* "DMAR" */ if ( dmar_table ) - write_atomic((uint32_t*)&dmar_table->signature[0], sig); + write_atomic((uint32_t *)&dmar_table->signature[0], sig); } void acpi_dmar_zap(void) @@ -1011,7 +1010,7 @@ void acpi_dmar_zap(void) uint32_t sig = 0x44414d52; /* "RMAD" - doesn't alter table checksum */ if ( dmar_table ) - write_atomic((uint32_t*)&dmar_table->signature[0], sig); + write_atomic((uint32_t *)&dmar_table->signature[0], sig); } bool_t platform_supports_intremap(void) @@ -1034,7 +1033,7 @@ int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt) unsigned int i; u16 bdf; - for_each_rmrr_device ( rmrr, bdf, i ) + for_each_rmrr_device (rmrr, bdf, i) { int rc; diff --git a/xen/drivers/passthrough/vtd/intremap.c b/xen/drivers/passthrough/vtd/intremap.c index 838268d772..ea151182f2 100644 --- a/xen/drivers/passthrough/vtd/intremap.c +++ b/xen/drivers/passthrough/vtd/intremap.c @@ -31,28 +31,31 @@ #include #include -#define nr_ioapic_entries(i) nr_ioapic_entries[i] +#define nr_ioapic_entries(i) nr_ioapic_entries[i] /* * source validation type (SVT) */ -#define SVT_NO_VERIFY 0x0 /* no verification is required */ -#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */ -#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ +#define SVT_NO_VERIFY 0x0 /* no verification is required */ +#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */ +#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ /* * source-id qualifier (SQ) */ -#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ -#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore - * the third least significant bit - */ -#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore - * the second and third least significant bits - */ -#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore - * the least three significant bits - */ +#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ +#define SQ_13_IGNORE_1 \ + 0x1 /* verify most significant 13 bits, ignore \ + * the third least significant bit \ + */ +#define SQ_13_IGNORE_2 \ + 0x2 /* verify most significant 13 bits, ignore \ + * the second and third least significant bits \ + */ +#define SQ_13_IGNORE_3 \ + 0x3 /* verify most significant 13 bits, ignore \ + * the least three significant bits \ + */ /* apic_pin_2_ir_idx[apicid][pin] = interrupt remapping table index */ static int **apic_pin_2_ir_idx; @@ -97,7 +100,7 @@ static u16 apicid_to_bdf(int apic_id) struct acpi_drhd_unit *drhd = ioapic_to_drhd(apic_id); struct acpi_ioapic_unit *acpi_ioapic_unit; - list_for_each_entry ( acpi_ioapic_unit, &drhd->ioapic_list, list ) + list_for_each_entry (acpi_ioapic_unit, &drhd->ioapic_list, list) if ( acpi_ioapic_unit->apic_id == apic_id ) return acpi_ioapic_unit->ioapic.info; @@ -110,16 +113,17 @@ static u16 hpetid_to_bdf(unsigned int hpet_id) struct acpi_drhd_unit *drhd = hpet_to_drhd(hpet_id); struct acpi_hpet_unit *acpi_hpet_unit; - list_for_each_entry ( acpi_hpet_unit, &drhd->hpet_list, list ) + list_for_each_entry (acpi_hpet_unit, &drhd->hpet_list, list) if ( acpi_hpet_unit->id == hpet_id ) return acpi_hpet_unit->bdf; - dprintk(XENLOG_ERR VTDPREFIX, "Didn't find the bdf for HPET %u!\n", hpet_id); + dprintk(XENLOG_ERR VTDPREFIX, "Didn't find the bdf for HPET %u!\n", + hpet_id); return 0; } -static void set_ire_sid(struct iremap_entry *ire, - unsigned int svt, unsigned int sq, unsigned int sid) +static void set_ire_sid(struct iremap_entry *ire, unsigned int svt, + unsigned int sq, unsigned int sid) { ire->remap.svt = svt; ire->remap.sq = sq; @@ -128,8 +132,7 @@ static void set_ire_sid(struct iremap_entry *ire, static void set_ioapic_source_id(int apic_id, struct iremap_entry *ire) { - set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16, - apicid_to_bdf(apic_id)); + set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16, apicid_to_bdf(apic_id)); } static void set_hpet_source_id(unsigned int id, struct iremap_entry *ire) @@ -155,12 +158,12 @@ bool_t __init iommu_supports_eim(void) if ( !ioapic_to_drhd(IO_APIC_ID(apic)) ) { dprintk(XENLOG_WARNING VTDPREFIX, - "There is not a DRHD for IOAPIC %#x (id: %#x)!\n", - apic, IO_APIC_ID(apic)); + "There is not a DRHD for IOAPIC %#x (id: %#x)!\n", apic, + IO_APIC_ID(apic)); return 0; } - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) if ( !ecap_queued_inval(drhd->iommu->ecap) || !ecap_intr_remap(drhd->iommu->ecap) || !ecap_eim(drhd->iommu->ecap) ) @@ -219,16 +222,16 @@ static void update_irte(struct iommu *iommu, struct iremap_entry *entry, /* Mark specified intr remap entry as free */ static void free_remap_entry(struct iommu *iommu, int index) { - struct iremap_entry *iremap_entry = NULL, *iremap_entries, new_ire = { }; + struct iremap_entry *iremap_entry = NULL, *iremap_entries, new_ire = {}; struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); if ( index < 0 || index > IREMAP_ENTRY_NR - 1 ) return; - ASSERT( spin_is_locked(&ir_ctrl->iremap_lock) ); + ASSERT(spin_is_locked(&ir_ctrl->iremap_lock)); - GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index, - iremap_entries, iremap_entry); + GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index, iremap_entries, + iremap_entry); update_irte(iommu, iremap_entry, &new_ire, false); iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry)); @@ -248,7 +251,7 @@ static unsigned int alloc_remap_entry(struct iommu *iommu, unsigned int nr) struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); unsigned int i, found; - ASSERT( spin_is_locked(&ir_ctrl->iremap_lock) ); + ASSERT(spin_is_locked(&ir_ctrl->iremap_lock)); for ( found = i = 0; i < IREMAP_ENTRY_NR; i++ ) { @@ -259,8 +262,7 @@ static unsigned int alloc_remap_entry(struct iommu *iommu, unsigned int nr) if ( iremap_entries ) unmap_vtd_domain_page(iremap_entries); - GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, i, - iremap_entries, p); + GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, i, iremap_entries, p); } else p = &iremap_entries[i % (1 << IREMAP_ENTRY_ORDER)]; @@ -274,13 +276,13 @@ static unsigned int alloc_remap_entry(struct iommu *iommu, unsigned int nr) if ( iremap_entries ) unmap_vtd_domain_page(iremap_entries); - if ( i < IREMAP_ENTRY_NR ) + if ( i < IREMAP_ENTRY_NR ) ir_ctrl->iremap_num += nr; return i; } -static int remap_entry_to_ioapic_rte( - struct iommu *iommu, int index, struct IO_xAPIC_route_entry *old_rte) +static int remap_entry_to_ioapic_rte(struct iommu *iommu, int index, + struct IO_xAPIC_route_entry *old_rte) { struct iremap_entry *iremap_entry = NULL, *iremap_entries; unsigned long flags; @@ -289,20 +291,18 @@ static int remap_entry_to_ioapic_rte( if ( index < 0 || index > IREMAP_ENTRY_NR - 1 ) { dprintk(XENLOG_ERR VTDPREFIX, - "IO-APIC index (%d) for remap table is invalid\n", - index); + "IO-APIC index (%d) for remap table is invalid\n", index); return -EFAULT; } spin_lock_irqsave(&ir_ctrl->iremap_lock, flags); - GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index, - iremap_entries, iremap_entry); + GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index, iremap_entries, + iremap_entry); if ( iremap_entry->val == 0 ) { - dprintk(XENLOG_ERR VTDPREFIX, - "IO-APIC index (%d) has an empty entry\n", + dprintk(XENLOG_ERR VTDPREFIX, "IO-APIC index (%d) has an empty entry\n", index); unmap_vtd_domain_page(iremap_entries); spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags); @@ -322,9 +322,10 @@ static int remap_entry_to_ioapic_rte( return 0; } -static int ioapic_rte_to_remap_entry(struct iommu *iommu, - int apic, unsigned int ioapic_pin, struct IO_xAPIC_route_entry *old_rte, - unsigned int rte_upper, unsigned int value) +static int ioapic_rte_to_remap_entry(struct iommu *iommu, int apic, + unsigned int ioapic_pin, + struct IO_xAPIC_route_entry *old_rte, + unsigned int rte_upper, unsigned int value) { struct iremap_entry *iremap_entry = NULL, *iremap_entries; struct iremap_entry new_ire; @@ -335,7 +336,7 @@ static int ioapic_rte_to_remap_entry(struct iommu *iommu, struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); bool init = false; - remap_rte = (struct IO_APIC_route_remap_entry *) old_rte; + remap_rte = (struct IO_APIC_route_remap_entry *)old_rte; spin_lock_irqsave(&ir_ctrl->iremap_lock, flags); index = apic_pin_2_ir_idx[apic][ioapic_pin]; @@ -356,8 +357,8 @@ static int ioapic_rte_to_remap_entry(struct iommu *iommu, return -EFAULT; } - GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index, - iremap_entries, iremap_entry); + GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index, iremap_entries, + iremap_entry); new_ire = *iremap_entry; @@ -385,11 +386,11 @@ static int ioapic_rte_to_remap_entry(struct iommu *iommu, set_ioapic_source_id(IO_APIC_ID(apic), &new_ire); new_ire.remap.res_3 = 0; new_ire.remap.res_4 = 0; - new_ire.remap.p = 1; /* finally, set present bit */ + new_ire.remap.p = 1; /* finally, set present bit */ /* now construct new ioapic rte entry */ remap_rte->vector = new_rte.vector; - remap_rte->delivery_mode = 0; /* has to be 0 for remap format */ + remap_rte->delivery_mode = 0; /* has to be 0 for remap format */ remap_rte->index_15 = (index >> 15) & 0x1; remap_rte->index_0_14 = index & 0x7fff; @@ -399,7 +400,7 @@ static int ioapic_rte_to_remap_entry(struct iommu *iommu, remap_rte->trigger = new_rte.trigger; remap_rte->mask = new_rte.mask; remap_rte->reserved = 0; - remap_rte->format = 1; /* indicate remap format */ + remap_rte->format = 1; /* indicate remap format */ } update_irte(iommu, iremap_entry, &new_ire, !init); @@ -411,18 +412,17 @@ static int ioapic_rte_to_remap_entry(struct iommu *iommu, return 0; } -unsigned int io_apic_read_remap_rte( - unsigned int apic, unsigned int reg) +unsigned int io_apic_read_remap_rte(unsigned int apic, unsigned int reg) { unsigned int ioapic_pin = (reg - 0x10) / 2; int index; - struct IO_xAPIC_route_entry old_rte = { 0 }; + struct IO_xAPIC_route_entry old_rte = {0}; int rte_upper = (reg & 1) ? 1 : 0; struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic)); struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); if ( !ir_ctrl->iremap_num || - ( (index = apic_pin_2_ir_idx[apic][ioapic_pin]) < 0 ) ) + ((index = apic_pin_2_ir_idx[apic][ioapic_pin]) < 0) ) return __io_apic_read(apic, reg); old_rte = __ioapic_read_entry(apic, ioapic_pin, 1); @@ -436,11 +436,11 @@ unsigned int io_apic_read_remap_rte( return (*(((u32 *)&old_rte) + 0)); } -void io_apic_write_remap_rte( - unsigned int apic, unsigned int reg, unsigned int value) +void io_apic_write_remap_rte(unsigned int apic, unsigned int reg, + unsigned int value) { unsigned int ioapic_pin = (reg - 0x10) / 2; - struct IO_xAPIC_route_entry old_rte = { 0 }; + struct IO_xAPIC_route_entry old_rte = {0}; struct IO_APIC_route_remap_entry *remap_rte; unsigned int rte_upper = (reg & 1) ? 1 : 0; struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic)); @@ -448,7 +448,7 @@ void io_apic_write_remap_rte( old_rte = __ioapic_read_entry(apic, ioapic_pin, 1); - remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte; + remap_rte = (struct IO_APIC_route_remap_entry *)&old_rte; /* mask the interrupt while we change the intremap table */ saved_mask = remap_rte->mask; @@ -456,8 +456,8 @@ void io_apic_write_remap_rte( __io_apic_write(apic, reg & ~1, *(u32 *)&old_rte); remap_rte->mask = saved_mask; - if ( ioapic_rte_to_remap_entry(iommu, apic, ioapic_pin, - &old_rte, rte_upper, value) ) + if ( ioapic_rte_to_remap_entry(iommu, apic, ioapic_pin, &old_rte, rte_upper, + value) ) { __io_apic_write(apic, reg, value); @@ -481,7 +481,7 @@ static void set_msi_source_id(struct pci_dev *pdev, struct iremap_entry *ire) seg = pdev->seg; bus = pdev->bus; devfn = pdev->devfn; - switch ( pdev->type ) + switch (pdev->type) { unsigned int sq; @@ -489,12 +489,20 @@ static void set_msi_source_id(struct pci_dev *pdev, struct iremap_entry *ire) case DEV_TYPE_PCIe_BRIDGE: case DEV_TYPE_PCIe2PCI_BRIDGE: case DEV_TYPE_PCI_HOST_BRIDGE: - switch ( pdev->phantom_stride ) + switch (pdev->phantom_stride) { - case 1: sq = SQ_13_IGNORE_3; break; - case 2: sq = SQ_13_IGNORE_2; break; - case 4: sq = SQ_13_IGNORE_1; break; - default: sq = SQ_ALL_16; break; + case 1: + sq = SQ_13_IGNORE_3; + break; + case 2: + sq = SQ_13_IGNORE_2; + break; + case 4: + sq = SQ_13_IGNORE_1; + break; + default: + sq = SQ_ALL_16; + break; } set_ire_sid(ire, SVT_VERIFY_SID_SQ, sq, PCI_BDF2(bus, devfn)); break; @@ -520,48 +528,46 @@ static void set_msi_source_id(struct pci_dev *pdev, struct iremap_entry *ire) else dprintk(XENLOG_WARNING VTDPREFIX, "d%d: no upstream bridge for %04x:%02x:%02x.%u\n", - pdev->domain->domain_id, - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); + pdev->domain->domain_id, seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); break; default: dprintk(XENLOG_WARNING VTDPREFIX, "d%d: unknown(%u): %04x:%02x:%02x.%u\n", - pdev->domain->domain_id, pdev->type, - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); + pdev->domain->domain_id, pdev->type, seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); break; - } + } } -static int remap_entry_to_msi_msg( - struct iommu *iommu, struct msi_msg *msg, unsigned int index) +static int remap_entry_to_msi_msg(struct iommu *iommu, struct msi_msg *msg, + unsigned int index) { struct iremap_entry *iremap_entry = NULL, *iremap_entries; struct msi_msg_remap_entry *remap_rte; unsigned long flags; struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); - remap_rte = (struct msi_msg_remap_entry *) msg; + remap_rte = (struct msi_msg_remap_entry *)msg; index += (remap_rte->address_lo.index_15 << 15) | remap_rte->address_lo.index_0_14; if ( index >= IREMAP_ENTRY_NR ) { dprintk(XENLOG_ERR VTDPREFIX, - "MSI index (%d) for remap table is invalid\n", - index); + "MSI index (%d) for remap table is invalid\n", index); return -EFAULT; } spin_lock_irqsave(&ir_ctrl->iremap_lock, flags); - GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index, - iremap_entries, iremap_entry); + GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index, iremap_entries, + iremap_entry); if ( iremap_entry->val == 0 ) { - dprintk(XENLOG_ERR VTDPREFIX, - "MSI index (%d) has an empty entry\n", + dprintk(XENLOG_ERR VTDPREFIX, "MSI index (%d) has an empty entry\n", index); unmap_vtd_domain_page(iremap_entries); spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags); @@ -571,36 +577,33 @@ static int remap_entry_to_msi_msg( msg->address_hi = MSI_ADDR_BASE_HI; msg->address_lo = MSI_ADDR_BASE_LO | - ((iremap_entry->remap.dm == 0) ? - MSI_ADDR_DESTMODE_PHYS: - MSI_ADDR_DESTMODE_LOGIC) | - ((iremap_entry->remap.dlm != dest_LowestPrio) ? - MSI_ADDR_REDIRECTION_CPU: - MSI_ADDR_REDIRECTION_LOWPRI); + ((iremap_entry->remap.dm == 0) ? MSI_ADDR_DESTMODE_PHYS + : MSI_ADDR_DESTMODE_LOGIC) | + ((iremap_entry->remap.dlm != dest_LowestPrio) + ? MSI_ADDR_REDIRECTION_CPU + : MSI_ADDR_REDIRECTION_LOWPRI); if ( x2apic_enabled ) msg->dest32 = iremap_entry->remap.dst; else msg->dest32 = (iremap_entry->remap.dst >> 8) & 0xff; msg->address_lo |= MSI_ADDR_DEST_ID(msg->dest32); - msg->data = - MSI_DATA_TRIGGER_EDGE | - MSI_DATA_LEVEL_ASSERT | - ((iremap_entry->remap.dlm != dest_LowestPrio) ? - MSI_DATA_DELIVERY_FIXED: - MSI_DATA_DELIVERY_LOWPRI) | - iremap_entry->remap.vector; + msg->data = MSI_DATA_TRIGGER_EDGE | MSI_DATA_LEVEL_ASSERT | + ((iremap_entry->remap.dlm != dest_LowestPrio) + ? MSI_DATA_DELIVERY_FIXED + : MSI_DATA_DELIVERY_LOWPRI) | + iremap_entry->remap.vector; unmap_vtd_domain_page(iremap_entries); spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags); return 0; } -static int msi_msg_to_remap_entry( - struct iommu *iommu, struct pci_dev *pdev, - struct msi_desc *msi_desc, struct msi_msg *msg) +static int msi_msg_to_remap_entry(struct iommu *iommu, struct pci_dev *pdev, + struct msi_desc *msi_desc, + struct msi_msg *msg) { - struct iremap_entry *iremap_entry = NULL, *iremap_entries, new_ire = { }; + struct iremap_entry *iremap_entry = NULL, *iremap_entries, new_ire = {}; struct msi_msg_remap_entry *remap_rte; unsigned int index, i, nr = 1; unsigned long flags; @@ -644,8 +647,8 @@ static int msi_msg_to_remap_entry( return -EFAULT; } - GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index, - iremap_entries, iremap_entry); + GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index, iremap_entries, + iremap_entry); if ( !pi_desc ) { @@ -654,8 +657,8 @@ static int msi_msg_to_remap_entry( new_ire.remap.dlm = msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT; /* Hardware requires RH = 1 for lowest priority delivery mode */ new_ire.remap.rh = (new_ire.remap.dlm == dest_LowestPrio); - new_ire.remap.vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) & - MSI_DATA_VECTOR_MASK; + new_ire.remap.vector = + (msg->data >> MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK; if ( x2apic_enabled ) new_ire.remap.dst = msg->dest32; else @@ -702,8 +705,7 @@ static int msi_msg_to_remap_entry( return 0; } -void msi_msg_read_remap_rte( - struct msi_desc *msi_desc, struct msi_msg *msg) +void msi_msg_read_remap_rte(struct msi_desc *msi_desc, struct msi_msg *msg) { struct pci_dev *pdev = msi_desc->dev; struct acpi_drhd_unit *drhd = NULL; @@ -713,11 +715,11 @@ void msi_msg_read_remap_rte( if ( drhd ) remap_entry_to_msi_msg(drhd->iommu, msg, msi_desc->msi_attrib.type == PCI_CAP_ID_MSI - ? msi_desc->msi_attrib.entry_nr : 0); + ? msi_desc->msi_attrib.entry_nr + : 0); } -int msi_msg_write_remap_rte( - struct msi_desc *msi_desc, struct msi_msg *msg) +int msi_msg_write_remap_rte(struct msi_desc *msi_desc, struct msi_msg *msg) { struct pci_dev *pdev = msi_desc->dev; struct acpi_drhd_unit *drhd = NULL; @@ -780,14 +782,16 @@ int enable_intremap(struct iommu *iommu, int eim) { printk(XENLOG_ERR VTDPREFIX " Queued invalidation is not enabled on IOMMU #%u:" - " Should not enable interrupt remapping\n", iommu->index); + " Should not enable interrupt remapping\n", + iommu->index); return -EINVAL; } if ( !eim && (sts & DMA_GSTS_CFIS) ) printk(XENLOG_WARNING VTDPREFIX " Compatibility Format Interrupts permitted on IOMMU #%u:" - " Device pass-through will be insecure\n", iommu->index); + " Device pass-through will be insecure\n", + iommu->index); if ( ir_ctrl->iremap_maddr == 0 ) { @@ -816,8 +820,8 @@ int enable_intremap(struct iommu *iommu, int eim) gcmd |= DMA_GCMD_SIRTP; dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd); - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, - (sts & DMA_GSTS_SIRTPS), sts); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, (sts & DMA_GSTS_SIRTPS), + sts); spin_unlock_irqrestore(&iommu->register_lock, flags); /* After set SIRTP, must globally invalidate the interrupt entry cache */ @@ -828,8 +832,7 @@ int enable_intremap(struct iommu *iommu, int eim) gcmd |= DMA_GCMD_IRE; dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd); - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, - (sts & DMA_GSTS_IRES), sts); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, (sts & DMA_GSTS_IRES), sts); spin_unlock_irqrestore(&iommu->register_lock, flags); return init_apic_pin_2_ir_idx(); @@ -851,11 +854,11 @@ void disable_intremap(struct iommu *iommu) dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE)); - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, - !(sts & DMA_GSTS_IRES), sts); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, !(sts & DMA_GSTS_IRES), + sts); /* If we are disabling Interrupt Remapping, make sure we dont stay in - * Extended Interrupt Mode, as this is unaffected by the Interrupt + * Extended Interrupt Mode, as this is unaffected by the Interrupt * Remapping flag in each DMAR Global Control Register. * Specifically, local apics in xapic mode do not like interrupts delivered * in x2apic mode. Any code turning interrupt remapping back on will set @@ -870,8 +873,7 @@ void disable_intremap(struct iommu *iommu) goto out; dmar_writel(iommu->reg, DMAR_IRTA_REG, irta & ~IRTA_EIME); - IOMMU_WAIT_OP(iommu, DMAR_IRTA_REG, dmar_readl, - !(irta & IRTA_EIME), irta); + IOMMU_WAIT_OP(iommu, DMAR_IRTA_REG, dmar_readl, !(irta & IRTA_EIME), irta); out: spin_unlock_irqrestore(&iommu->register_lock, flags); @@ -899,7 +901,7 @@ int iommu_enable_x2apic_IR(void) else if ( !x2apic_enabled ) return -EOPNOTSUPP; - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { iommu = drhd->iommu; @@ -915,7 +917,7 @@ int iommu_enable_x2apic_IR(void) } /* Enable queue invalidation */ - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { iommu = drhd->iommu; if ( enable_qinval(iommu) != 0 ) @@ -927,7 +929,7 @@ int iommu_enable_x2apic_IR(void) } /* Enable interrupt remapping */ - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { iommu = drhd->iommu; if ( enable_intremap(iommu, 1) ) @@ -953,10 +955,10 @@ void iommu_disable_x2apic_IR(void) if ( !x2apic_enabled ) return; - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) disable_intremap(drhd->iommu); - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) disable_qinval(drhd->iommu); } @@ -965,7 +967,7 @@ void iommu_disable_x2apic_IR(void) * when guest changes MSI/MSI-X information. */ int pi_update_irte(const struct pi_desc *pi_desc, const struct pirq *pirq, - const uint8_t gvec) + const uint8_t gvec) { struct irq_desc *desc; struct msi_desc *msi_desc; @@ -989,7 +991,7 @@ int pi_update_irte(const struct pi_desc *pi_desc, const struct pirq *pirq, ASSERT(pcidevs_locked()); return iommu_update_ire_from_msi(msi_desc, &msi_desc->msg); - unlock_out: +unlock_out: spin_unlock_irq(&desc->lock); return rc; diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index d48b54238a..be3f071b04 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -41,7 +41,8 @@ #include "vtd.h" #include "../ats.h" -struct mapped_rmrr { +struct mapped_rmrr +{ struct list_head list; u64 base, end; unsigned int count; @@ -57,8 +58,7 @@ static struct tasklet vtd_fault_tasklet; static int setup_hwdom_device(u8 devfn, struct pci_dev *); static void setup_hwdom_rmrr(struct domain *d); -static int domain_iommu_domid(struct domain *d, - struct iommu *iommu) +static int domain_iommu_domid(struct domain *d, struct iommu *iommu) { unsigned long nr_dom, i; @@ -69,7 +69,7 @@ static int domain_iommu_domid(struct domain *d, if ( iommu->domid_map[i] == d->domain_id ) return i; - i = find_next_bit(iommu->domid_bitmap, nr_dom, i+1); + i = find_next_bit(iommu->domid_bitmap, nr_dom, i + 1); } dprintk(XENLOG_ERR VTDPREFIX, @@ -81,8 +81,7 @@ static int domain_iommu_domid(struct domain *d, #define DID_FIELD_WIDTH 16 #define DID_HIGH_OFFSET 8 static int context_set_domain_id(struct context_entry *context, - struct domain *d, - struct iommu *iommu) + struct domain *d, struct iommu *iommu) { unsigned long nr_dom, i; int found = 0; @@ -98,7 +97,7 @@ static int context_set_domain_id(struct context_entry *context, found = 1; break; } - i = find_next_bit(iommu->domid_bitmap, nr_dom, i+1); + i = find_next_bit(iommu->domid_bitmap, nr_dom, i + 1); } if ( found == 0 ) @@ -123,7 +122,7 @@ static int context_get_domain_id(struct context_entry *context, unsigned long dom_index, nr_dom; int domid = -1; - if (iommu && context) + if ( iommu && context ) { nr_dom = cap_ndoms(iommu->cap); @@ -132,9 +131,10 @@ static int context_get_domain_id(struct context_entry *context, if ( dom_index < nr_dom && iommu->domid_map ) domid = iommu->domid_map[dom_index]; else - dprintk(XENLOG_DEBUG VTDPREFIX, - "dom_index %lu exceeds nr_dom %lu or iommu has no domid_map\n", - dom_index, nr_dom); + dprintk( + XENLOG_DEBUG VTDPREFIX, + "dom_index %lu exceeds nr_dom %lu or iommu has no domid_map\n", + dom_index, nr_dom); } return domid; } @@ -194,7 +194,7 @@ u64 alloc_pgtable_maddr(struct acpi_drhd_unit *drhd, unsigned long npages) rhsa = drhd_to_rhsa(drhd); if ( rhsa ) - node = pxm_to_node(rhsa->proximity_domain); + node = pxm_to_node(rhsa->proximity_domain); pg = alloc_domheap_pages(NULL, get_order_from_pages(npages), (node == NUMA_NO_NODE) ? 0 : MEMF_node(node)); @@ -244,7 +244,7 @@ static u64 bus_to_context_maddr(struct iommu *iommu, u8 bus) set_root_present(*root); iommu_flush_cache_entry(root, sizeof(struct root_entry)); } - maddr = (u64) get_context_addr(*root); + maddr = (u64)get_context_addr(*root); unmap_vtd_domain_page(root_entries); return maddr; } @@ -270,7 +270,8 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc) */ pdev = pci_get_pdev_by_domain(domain, -1, -1, -1); drhd = acpi_find_matched_drhd_unit(pdev); - if ( !alloc || ((hd->arch.pgd_maddr = alloc_pgtable_maddr(drhd, 1)) == 0) ) + if ( !alloc || + ((hd->arch.pgd_maddr = alloc_pgtable_maddr(drhd, 1)) == 0) ) goto out; } @@ -312,7 +313,7 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc) } unmap_vtd_domain_page(parent); - out: +out: return pte_maddr; } @@ -329,8 +330,8 @@ static void iommu_flush_write_buffer(struct iommu *iommu) dmar_writel(iommu->reg, DMAR_GCMD_REG, val | DMA_GCMD_WBF); /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, - !(val & DMA_GSTS_WBFS), val); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, !(val & DMA_GSTS_WBFS), + val); spin_unlock_irqrestore(&iommu->register_lock, flags); } @@ -340,7 +341,7 @@ static int __must_check flush_context_reg(void *_iommu, u16 did, u16 source_id, u8 function_mask, u64 type, bool_t flush_non_present_entry) { - struct iommu *iommu = (struct iommu *) _iommu; + struct iommu *iommu = (struct iommu *)_iommu; u64 val = 0; unsigned long flags; @@ -359,17 +360,17 @@ static int __must_check flush_context_reg(void *_iommu, u16 did, u16 source_id, } /* use register invalidation */ - switch ( type ) + switch (type) { case DMA_CCMD_GLOBAL_INVL: val = DMA_CCMD_GLOBAL_INVL; break; case DMA_CCMD_DOMAIN_INVL: - val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did); + val = DMA_CCMD_DOMAIN_INVL | DMA_CCMD_DID(did); break; case DMA_CCMD_DEVICE_INVL: - val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did) - |DMA_CCMD_SID(source_id)|DMA_CCMD_FM(function_mask); + val = DMA_CCMD_DEVICE_INVL | DMA_CCMD_DID(did) | + DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask); break; default: BUG(); @@ -380,31 +381,28 @@ static int __must_check flush_context_reg(void *_iommu, u16 did, u16 source_id, dmar_writeq(iommu->reg, DMAR_CCMD_REG, val); /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, dmar_readq, - !(val & DMA_CCMD_ICC), val); + IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, dmar_readq, !(val & DMA_CCMD_ICC), val); spin_unlock_irqrestore(&iommu->register_lock, flags); /* flush context entry will implicitly flush write buffer */ return 0; } -static int __must_check iommu_flush_context_global(struct iommu *iommu, - bool_t flush_non_present_entry) +static int __must_check +iommu_flush_context_global(struct iommu *iommu, bool_t flush_non_present_entry) { struct iommu_flush *flush = iommu_get_flush(iommu); return flush->context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, - flush_non_present_entry); + flush_non_present_entry); } -static int __must_check iommu_flush_context_device(struct iommu *iommu, - u16 did, u16 source_id, - u8 function_mask, - bool_t flush_non_present_entry) +static int __must_check +iommu_flush_context_device(struct iommu *iommu, u16 did, u16 source_id, + u8 function_mask, bool_t flush_non_present_entry) { struct iommu_flush *flush = iommu_get_flush(iommu); return flush->context(iommu, did, source_id, function_mask, - DMA_CCMD_DEVICE_INVL, - flush_non_present_entry); + DMA_CCMD_DEVICE_INVL, flush_non_present_entry); } /* return value determine if we need a write buffer flush */ @@ -413,7 +411,7 @@ static int __must_check flush_iotlb_reg(void *_iommu, u16 did, u64 addr, bool_t flush_non_present_entry, bool_t flush_dev_iotlb) { - struct iommu *iommu = (struct iommu *) _iommu; + struct iommu *iommu = (struct iommu *)_iommu; int tlb_offset = ecap_iotlb_offset(iommu->ecap); u64 val = 0; unsigned long flags; @@ -433,16 +431,16 @@ static int __must_check flush_iotlb_reg(void *_iommu, u16 did, u64 addr, } /* use register invalidation */ - switch ( type ) + switch (type) { case DMA_TLB_GLOBAL_FLUSH: - val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT; + val = DMA_TLB_GLOBAL_FLUSH | DMA_TLB_IVT; break; case DMA_TLB_DSI_FLUSH: - val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); + val = DMA_TLB_DSI_FLUSH | DMA_TLB_IVT | DMA_TLB_DID(did); break; case DMA_TLB_PSI_FLUSH: - val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); + val = DMA_TLB_PSI_FLUSH | DMA_TLB_IVT | DMA_TLB_DID(did); break; default: BUG(); @@ -463,8 +461,8 @@ static int __must_check flush_iotlb_reg(void *_iommu, u16 did, u64 addr, dmar_writeq(iommu->reg, tlb_offset + 8, val); /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, (tlb_offset + 8), dmar_readq, - !(val & DMA_TLB_IVT), val); + IOMMU_WAIT_OP(iommu, (tlb_offset + 8), dmar_readq, !(val & DMA_TLB_IVT), + val); spin_unlock_irqrestore(&iommu->register_lock, flags); /* check IOTLB invalidation granularity */ @@ -486,7 +484,7 @@ static int __must_check iommu_flush_iotlb_global(struct iommu *iommu, vtd_ops_preamble_quirk(iommu); status = flush->iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, - flush_non_present_entry, flush_dev_iotlb); + flush_non_present_entry, flush_dev_iotlb); /* undo platform specific errata workarounds */ vtd_ops_postamble_quirk(iommu); @@ -504,8 +502,8 @@ static int __must_check iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did, /* apply platform specific errata workarounds */ vtd_ops_preamble_quirk(iommu); - status = flush->iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, - flush_non_present_entry, flush_dev_iotlb); + status = flush->iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, + flush_non_present_entry, flush_dev_iotlb); /* undo platform specific errata workarounds */ vtd_ops_postamble_quirk(iommu); @@ -525,11 +523,13 @@ static int __must_check iommu_flush_iotlb_psi(struct iommu *iommu, u16 did, /* Fallback to domain selective flush if no PSI support */ if ( !cap_pgsel_inv(iommu->cap) ) - return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, flush_dev_iotlb); + return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, + flush_dev_iotlb); /* Fallback to domain selective flush if size is too big */ if ( order > cap_max_amask_val(iommu->cap) ) - return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, flush_dev_iotlb); + return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, + flush_dev_iotlb); addr >>= PAGE_SHIFT_4K + order; addr <<= PAGE_SHIFT_4K + order; @@ -538,7 +538,7 @@ static int __must_check iommu_flush_iotlb_psi(struct iommu *iommu, u16 did, vtd_ops_preamble_quirk(iommu); status = flush->iotlb(iommu, did, addr, order, DMA_TLB_PSI_FLUSH, - flush_non_present_entry, flush_dev_iotlb); + flush_non_present_entry, flush_dev_iotlb); /* undo platform specific errata workarounds */ vtd_ops_postamble_quirk(iommu); @@ -554,7 +554,7 @@ static int __must_check iommu_flush_all(void) int rc = 0; flush_all_cache(); - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { int context_rc, iotlb_rc; @@ -599,7 +599,7 @@ static int __must_check iommu_flush_iotlb(struct domain *d, dfn_t dfn, * No need pcideves_lock here because we have flush * when assign/deassign device */ - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { iommu = drhd->iommu; @@ -607,18 +607,15 @@ static int __must_check iommu_flush_iotlb(struct domain *d, dfn_t dfn, continue; flush_dev_iotlb = !!find_ats_dev_drhd(iommu); - iommu_domid= domain_iommu_domid(d, iommu); + iommu_domid = domain_iommu_domid(d, iommu); if ( iommu_domid == -1 ) continue; if ( page_count != 1 || dfn_eq(dfn, INVALID_DFN) ) - rc = iommu_flush_iotlb_dsi(iommu, iommu_domid, - 0, flush_dev_iotlb); + rc = iommu_flush_iotlb_dsi(iommu, iommu_domid, 0, flush_dev_iotlb); else - rc = iommu_flush_iotlb_psi(iommu, iommu_domid, - dfn_to_daddr(dfn), - PAGE_ORDER_4K, - !dma_old_pte_present, + rc = iommu_flush_iotlb_psi(iommu, iommu_domid, dfn_to_daddr(dfn), + PAGE_ORDER_4K, !dma_old_pte_present, flush_dev_iotlb); if ( rc > 0 ) @@ -631,8 +628,7 @@ static int __must_check iommu_flush_iotlb(struct domain *d, dfn_t dfn, return rc; } -static int __must_check iommu_flush_iotlb_pages(struct domain *d, - dfn_t dfn, +static int __must_check iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn, unsigned int page_count, unsigned int flush_flags) { @@ -738,8 +734,7 @@ static int iommu_set_root_entry(struct iommu *iommu) dmar_writel(iommu->reg, DMAR_GCMD_REG, sts | DMA_GCMD_SRTP); /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, - (sts & DMA_GSTS_RTPS), sts); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, (sts & DMA_GSTS_RTPS), sts); spin_unlock_irqrestore(&iommu->register_lock, flags); return 0; @@ -755,18 +750,21 @@ static void iommu_enable_translation(struct acpi_drhd_unit *drhd) { if ( !iommu_igfx ) { - printk(XENLOG_INFO VTDPREFIX - "Passed iommu=no-igfx option. Disabling IGD VT-d engine.\n"); + printk( + XENLOG_INFO VTDPREFIX + "Passed iommu=no-igfx option. Disabling IGD VT-d engine.\n"); return; } if ( !is_igd_vt_enabled_quirk() ) { if ( force_iommu ) - panic("BIOS did not enable IGD for VT properly, crash Xen for security purpose\n"); + panic("BIOS did not enable IGD for VT properly, crash Xen for " + "security purpose\n"); printk(XENLOG_WARNING VTDPREFIX - "BIOS did not enable IGD for VT properly. Disabling IGD VT-d engine.\n"); + "BIOS did not enable IGD for VT properly. Disabling IGD " + "VT-d engine.\n"); return; } } @@ -782,8 +780,7 @@ static void iommu_enable_translation(struct acpi_drhd_unit *drhd) dmar_writel(iommu->reg, DMAR_GCMD_REG, sts | DMA_GCMD_TE); /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, - (sts & DMA_GSTS_TES), sts); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, (sts & DMA_GSTS_TES), sts); spin_unlock_irqrestore(&iommu->register_lock, flags); /* undo platform specific errata workarounds */ @@ -806,22 +803,21 @@ static void iommu_disable_translation(struct iommu *iommu) dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_TE)); /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, - !(sts & DMA_GSTS_TES), sts); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, !(sts & DMA_GSTS_TES), sts); spin_unlock_irqrestore(&iommu->register_lock, flags); /* undo platform specific errata workarounds */ vtd_ops_postamble_quirk(iommu); } -enum faulttype { +enum faulttype +{ DMA_REMAP, INTR_REMAP, UNKNOWN, }; -static const char *dma_remap_fault_reasons[] = -{ +static const char *dma_remap_fault_reasons[] = { "Software", "Present bit in root entry is clear", "Present bit in context entry is clear", @@ -838,8 +834,7 @@ static const char *dma_remap_fault_reasons[] = "Blocked a DMA translation request", }; -static const char *intr_remap_fault_reasons[] = -{ +static const char *intr_remap_fault_reasons[] = { "Detected reserved fields in the decoded interrupt-remapped request", "Interrupt index exceeded the interrupt-remapping table size", "Present field in the IRTE entry is clear", @@ -852,8 +847,8 @@ static const char *intr_remap_fault_reasons[] = static const char *iommu_get_fault_reason(u8 fault_reason, enum faulttype *fault_type) { - if ( fault_reason >= 0x20 && ( fault_reason < 0x20 + - ARRAY_SIZE(intr_remap_fault_reasons)) ) + if ( fault_reason >= 0x20 && + (fault_reason < 0x20 + ARRAY_SIZE(intr_remap_fault_reasons)) ) { *fault_type = INTR_REMAP; return intr_remap_fault_reasons[fault_reason - 0x20]; @@ -878,21 +873,20 @@ static int iommu_page_fault_do_one(struct iommu *iommu, int type, u16 seg = iommu->intel->drhd->segment; reason = iommu_get_fault_reason(fault_reason, &fault_type); - switch ( fault_type ) + switch (fault_type) { case DMA_REMAP: printk(XENLOG_G_WARNING VTDPREFIX "DMAR:[%s] Request device [%04x:%02x:%02x.%u] " - "fault addr %"PRIx64", iommu reg = %p\n", - (type ? "DMA Read" : "DMA Write"), - seg, PCI_BUS(source_id), PCI_SLOT(source_id), - PCI_FUNC(source_id), addr, iommu->reg); + "fault addr %" PRIx64 ", iommu reg = %p\n", + (type ? "DMA Read" : "DMA Write"), seg, PCI_BUS(source_id), + PCI_SLOT(source_id), PCI_FUNC(source_id), addr, iommu->reg); kind = "DMAR"; break; case INTR_REMAP: printk(XENLOG_G_WARNING VTDPREFIX "INTR-REMAP: Request device [%04x:%02x:%02x.%u] " - "fault index %"PRIx64", iommu reg = %p\n", + "fault index %" PRIx64 ", iommu reg = %p\n", seg, PCI_BUS(source_id), PCI_SLOT(source_id), PCI_FUNC(source_id), addr >> 48, iommu->reg); kind = "INTR-REMAP"; @@ -900,15 +894,15 @@ static int iommu_page_fault_do_one(struct iommu *iommu, int type, default: printk(XENLOG_G_WARNING VTDPREFIX "UNKNOWN: Request device [%04x:%02x:%02x.%u] " - "fault addr %"PRIx64", iommu reg = %p\n", + "fault addr %" PRIx64 ", iommu reg = %p\n", seg, PCI_BUS(source_id), PCI_SLOT(source_id), PCI_FUNC(source_id), addr, iommu->reg); kind = "UNKNOWN"; break; } - printk(XENLOG_G_WARNING VTDPREFIX "%s: reason %02x - %s\n", - kind, fault_reason, reason); + printk(XENLOG_G_WARNING VTDPREFIX "%s: reason %02x - %s\n", kind, + fault_reason, reason); if ( iommu_verbose && fault_type == DMA_REMAP ) print_vtd_entries(iommu, PCI_BUS(source_id), PCI_DEVFN2(source_id), @@ -930,7 +924,8 @@ static void iommu_fault_status(u32 fault_status) if ( fault_status & DMA_FSTS_IQE ) INTEL_IOMMU_DEBUG("iommu_fault_status: Invalidation Queue Error\n"); if ( fault_status & DMA_FSTS_ICE ) - INTEL_IOMMU_DEBUG("iommu_fault_status: Invalidation Completion Error\n"); + INTEL_IOMMU_DEBUG( + "iommu_fault_status: Invalidation Completion Error\n"); if ( fault_status & DMA_FSTS_ITE ) INTEL_IOMMU_DEBUG("iommu_fault_status: Invalidation Time-out Error\n"); } @@ -952,7 +947,7 @@ static void __do_iommu_page_fault(struct iommu *iommu) fault_index = dma_fsts_fault_record_index(fault_status); reg = cap_fault_reg_offset(iommu->cap); - while (1) + while ( 1 ) { u8 fault_reason; u16 source_id; @@ -962,8 +957,8 @@ static void __do_iommu_page_fault(struct iommu *iommu) /* highest 32 bits */ spin_lock_irqsave(&iommu->register_lock, flags); - data = dmar_readl(iommu->reg, reg + - fault_index * PRIMARY_FAULT_REG_LEN + 12); + data = dmar_readl(iommu->reg, + reg + fault_index * PRIMARY_FAULT_REG_LEN + 12); if ( !(data & DMA_FRCD_F) ) { spin_unlock_irqrestore(&iommu->register_lock, flags); @@ -973,20 +968,20 @@ static void __do_iommu_page_fault(struct iommu *iommu) fault_reason = dma_frcd_fault_reason(data); type = dma_frcd_type(data); - data = dmar_readl(iommu->reg, reg + - fault_index * PRIMARY_FAULT_REG_LEN + 8); + data = dmar_readl(iommu->reg, + reg + fault_index * PRIMARY_FAULT_REG_LEN + 8); source_id = dma_frcd_source_id(data); - guest_addr = dmar_readq(iommu->reg, reg + - fault_index * PRIMARY_FAULT_REG_LEN); + guest_addr = + dmar_readq(iommu->reg, reg + fault_index * PRIMARY_FAULT_REG_LEN); guest_addr = dma_frcd_page_addr(guest_addr); /* clear the fault */ - dmar_writel(iommu->reg, reg + - fault_index * PRIMARY_FAULT_REG_LEN + 12, DMA_FRCD_F); + dmar_writel(iommu->reg, reg + fault_index * PRIMARY_FAULT_REG_LEN + 12, + DMA_FRCD_F); spin_unlock_irqrestore(&iommu->register_lock, flags); - iommu_page_fault_do_one(iommu, type, fault_reason, - source_id, guest_addr); + iommu_page_fault_do_one(iommu, type, fault_reason, source_id, + guest_addr); pci_check_disable_device(iommu->intel->drhd->segment, PCI_BUS(source_id), PCI_DEVFN2(source_id)); @@ -1012,8 +1007,8 @@ static void do_iommu_page_fault(unsigned long data) if ( list_empty(&acpi_drhd_units) ) { - INTEL_IOMMU_DEBUG("no device found, something must be very wrong!\n"); - return; + INTEL_IOMMU_DEBUG("no device found, something must be very wrong!\n"); + return; } /* @@ -1022,12 +1017,11 @@ static void do_iommu_page_fault(unsigned long data) * tasklet (instead of one per each IOMMUs) and should be more than * fine, considering how rare the event of a fault should be. */ - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) __do_iommu_page_fault(drhd->iommu); } -static void iommu_page_fault(int irq, void *dev_id, - struct cpu_user_regs *regs) +static void iommu_page_fault(int irq, void *dev_id, struct cpu_user_regs *regs) { /* * Just flag the tasklet as runnable. This is fine, according to VT-d @@ -1094,14 +1088,15 @@ static void dma_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask) struct iommu *iommu = desc->action->dev_id; dest = set_desc_affinity(desc, mask); - if (dest == BAD_APICID){ + if ( dest == BAD_APICID ) + { dprintk(XENLOG_ERR VTDPREFIX, "Set iommu interrupt affinity error!\n"); return; } msi_compose_msg(desc->arch.vector, NULL, &msg); msg.dest32 = dest; - if (x2apic_enabled) + if ( x2apic_enabled ) msg.address_hi = dest & 0xFFFFFF00; ASSERT(!(msg.address_lo & MSI_ADDR_DEST_ID_MASK)); msg.address_lo |= MSI_ADDR_DEST_ID(dest); @@ -1137,8 +1132,7 @@ static int __init iommu_set_interrupt(struct acpi_drhd_unit *drhd) struct iommu *iommu = drhd->iommu; struct irq_desc *desc; - irq = create_irq(rhsa ? pxm_to_node(rhsa->proximity_domain) - : NUMA_NO_NODE); + irq = create_irq(rhsa ? pxm_to_node(rhsa->proximity_domain) : NUMA_NO_NODE); if ( irq <= 0 ) { dprintk(XENLOG_ERR VTDPREFIX, "IOMMU: no irq available!\n"); @@ -1173,8 +1167,8 @@ int __init iommu_alloc(struct acpi_drhd_unit *drhd) if ( nr_iommus > MAX_IOMMUS ) { - dprintk(XENLOG_ERR VTDPREFIX, - "IOMMU: nr_iommus %d > MAX_IOMMUS\n", nr_iommus); + dprintk(XENLOG_ERR VTDPREFIX, "IOMMU: nr_iommus %d > MAX_IOMMUS\n", + nr_iommus); return -ENOMEM; } @@ -1207,16 +1201,17 @@ int __init iommu_alloc(struct acpi_drhd_unit *drhd) if ( iommu_verbose ) { - printk(VTDPREFIX "drhd->address = %"PRIx64" iommu->reg = %p\n", + printk(VTDPREFIX "drhd->address = %" PRIx64 " iommu->reg = %p\n", drhd->address, iommu->reg); - printk(VTDPREFIX "cap = %"PRIx64" ecap = %"PRIx64"\n", - iommu->cap, iommu->ecap); + printk(VTDPREFIX "cap = %" PRIx64 " ecap = %" PRIx64 "\n", iommu->cap, + iommu->ecap); } if ( !(iommu->cap + 1) || !(iommu->ecap + 1) ) return -ENODEV; if ( cap_fault_reg_offset(iommu->cap) + - cap_num_fault_regs(iommu->cap) * PRIMARY_FAULT_REG_LEN >= PAGE_SIZE || + cap_num_fault_regs(iommu->cap) * PRIMARY_FAULT_REG_LEN >= + PAGE_SIZE || ecap_iotlb_offset(iommu->ecap) >= PAGE_SIZE ) { printk(XENLOG_ERR VTDPREFIX "IOMMU: unsupported\n"); @@ -1244,7 +1239,7 @@ int __init iommu_alloc(struct acpi_drhd_unit *drhd) nr_dom = cap_ndoms(iommu->cap); iommu->domid_bitmap = xzalloc_array(unsigned long, BITS_TO_LONGS(nr_dom)); if ( !iommu->domid_bitmap ) - return -ENOMEM ; + return -ENOMEM; /* * if Caching mode is set, then invalid translations are tagged with @@ -1255,7 +1250,7 @@ int __init iommu_alloc(struct acpi_drhd_unit *drhd) iommu->domid_map = xzalloc_array(u16, nr_dom); if ( !iommu->domid_map ) - return -ENOMEM ; + return -ENOMEM; spin_lock_init(&iommu->lock); spin_lock_init(&iommu->register_lock); @@ -1290,12 +1285,14 @@ void __init iommu_free(struct acpi_drhd_unit *drhd) xfree(iommu); } -#define guestwidth_to_adjustwidth(gaw) ({ \ - int agaw, r = (gaw - 12) % 9; \ - agaw = (r == 0) ? gaw : (gaw + 9 - r); \ - if ( agaw > 64 ) \ - agaw = 64; \ - agaw; }) +#define guestwidth_to_adjustwidth(gaw) \ + ({ \ + int agaw, r = (gaw - 12) % 9; \ + agaw = (r == 0) ? gaw : (gaw + 9 - r); \ + if ( agaw > 64 ) \ + agaw = 64; \ + agaw; \ + }) static int intel_iommu_domain_init(struct domain *d, bool use_iommu) { @@ -1317,7 +1314,7 @@ static void __hwdom_init intel_iommu_hwdom_init(struct domain *d) printk(XENLOG_WARNING VTDPREFIX " IOMMU flush all failed for hardware domain\n"); - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { if ( iomem_deny_access(d, PFN_DOWN(drhd->address), PFN_DOWN(drhd->address)) ) @@ -1326,10 +1323,8 @@ static void __hwdom_init intel_iommu_hwdom_init(struct domain *d) } } -int domain_context_mapping_one( - struct domain *domain, - struct iommu *iommu, - u8 bus, u8 devfn, const struct pci_dev *pdev) +int domain_context_mapping_one(struct domain *domain, struct iommu *iommu, + u8 bus, u8 devfn, const struct pci_dev *pdev) { struct domain_iommu *hd = dom_iommu(domain); struct context_entry *context, *context_entries; @@ -1356,8 +1351,8 @@ int domain_context_mapping_one( { printk(XENLOG_G_INFO VTDPREFIX "d%d: %04x:%02x:%02x.%u owned by d%d!", - domain->domain_id, - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), + domain->domain_id, seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn), pdev->domain ? pdev->domain->domain_id : -1); res = -EINVAL; } @@ -1366,22 +1361,21 @@ int domain_context_mapping_one( { int cdomain; cdomain = context_get_domain_id(context, iommu); - + if ( cdomain < 0 ) { printk(XENLOG_G_WARNING VTDPREFIX "d%d: %04x:%02x:%02x.%u mapped, but can't find owner!\n", - domain->domain_id, - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); + domain->domain_id, seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); res = -EINVAL; } else if ( cdomain != domain->domain_id ) { printk(XENLOG_G_INFO VTDPREFIX "d%d: %04x:%02x:%02x.%u already mapped to d%d!", - domain->domain_id, - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), - cdomain); + domain->domain_id, seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn), cdomain); res = -EINVAL; } } @@ -1417,8 +1411,7 @@ int domain_context_mapping_one( /* Skip top levels of page tables for 2- and 3-level DRHDs. */ pgd_maddr = hd->arch.pgd_maddr; for ( agaw = level_to_agaw(4); - agaw != level_to_agaw(iommu->nr_pt_levels); - agaw-- ) + agaw != level_to_agaw(iommu->nr_pt_levels); agaw-- ) { struct dma_pte *p = map_vtd_domain_page(pgd_maddr); pgd_maddr = dma_pte_addr(*p); @@ -1492,13 +1485,13 @@ static int domain_context_mapping(struct domain *domain, u8 devfn, ASSERT(pcidevs_locked()); - switch ( pdev->type ) + switch (pdev->type) { case DEV_TYPE_PCI_HOST_BRIDGE: if ( iommu_debug ) printk(VTDPREFIX "d%d:Hostbridge: skip %04x:%02x:%02x.%u map\n", - domain->domain_id, seg, bus, - PCI_SLOT(devfn), PCI_FUNC(devfn)); + domain->domain_id, seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); if ( !is_hardware_domain(domain) ) return -EPERM; break; @@ -1511,10 +1504,9 @@ static int domain_context_mapping(struct domain *domain, u8 devfn, case DEV_TYPE_PCIe_ENDPOINT: if ( iommu_debug ) printk(VTDPREFIX "d%d:PCIe: map %04x:%02x:%02x.%u\n", - domain->domain_id, seg, bus, - PCI_SLOT(devfn), PCI_FUNC(devfn)); - ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn, - pdev); + domain->domain_id, seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); + ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn, pdev); if ( !ret && devfn == pdev->devfn && ats_device(pdev, drhd) > 0 ) enable_ats_device(pdev, &drhd->iommu->ats_devices); @@ -1523,11 +1515,10 @@ static int domain_context_mapping(struct domain *domain, u8 devfn, case DEV_TYPE_PCI: if ( iommu_debug ) printk(VTDPREFIX "d%d:PCI: map %04x:%02x:%02x.%u\n", - domain->domain_id, seg, bus, - PCI_SLOT(devfn), PCI_FUNC(devfn)); + domain->domain_id, seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); - ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn, - pdev); + ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn, pdev); if ( ret ) break; @@ -1551,8 +1542,8 @@ static int domain_context_mapping(struct domain *domain, u8 devfn, default: dprintk(XENLOG_ERR VTDPREFIX, "d%d:unknown(%u): %04x:%02x:%02x.%u\n", - domain->domain_id, pdev->type, - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); + domain->domain_id, pdev->type, seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); ret = -EINVAL; break; } @@ -1563,10 +1554,8 @@ static int domain_context_mapping(struct domain *domain, u8 devfn, return ret; } -int domain_context_unmap_one( - struct domain *domain, - struct iommu *iommu, - u8 bus, u8 devfn) +int domain_context_unmap_one(struct domain *domain, struct iommu *iommu, u8 bus, + u8 devfn) { struct context_entry *context, *context_entries; u64 maddr; @@ -1591,7 +1580,7 @@ int domain_context_unmap_one( context_clear_entry(*context); iommu_flush_cache_entry(context, sizeof(struct context_entry)); - iommu_domid= domain_iommu_domid(domain, iommu); + iommu_domid = domain_iommu_domid(domain, iommu); if ( iommu_domid == -1 ) { spin_unlock(&iommu->lock); @@ -1599,8 +1588,7 @@ int domain_context_unmap_one( return -EINVAL; } - rc = iommu_flush_context_device(iommu, iommu_domid, - PCI_BDF2(bus, devfn), + rc = iommu_flush_context_device(iommu, iommu_domid, PCI_BDF2(bus, devfn), DMA_CCMD_MASK_NOBIT, 0); flush_dev_iotlb = !!find_ats_dev_drhd(iommu); @@ -1643,13 +1631,13 @@ static int domain_context_unmap(struct domain *domain, u8 devfn, return -ENODEV; iommu = drhd->iommu; - switch ( pdev->type ) + switch (pdev->type) { case DEV_TYPE_PCI_HOST_BRIDGE: if ( iommu_debug ) printk(VTDPREFIX "d%d:Hostbridge: skip %04x:%02x:%02x.%u unmap\n", - domain->domain_id, seg, bus, - PCI_SLOT(devfn), PCI_FUNC(devfn)); + domain->domain_id, seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); if ( !is_hardware_domain(domain) ) return -EPERM; goto out; @@ -1662,8 +1650,8 @@ static int domain_context_unmap(struct domain *domain, u8 devfn, case DEV_TYPE_PCIe_ENDPOINT: if ( iommu_debug ) printk(VTDPREFIX "d%d:PCIe: unmap %04x:%02x:%02x.%u\n", - domain->domain_id, seg, bus, - PCI_SLOT(devfn), PCI_FUNC(devfn)); + domain->domain_id, seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); ret = domain_context_unmap_one(domain, iommu, bus, devfn); if ( !ret && devfn == pdev->devfn && ats_device(pdev, drhd) > 0 ) disable_ats_device(pdev); @@ -1673,7 +1661,8 @@ static int domain_context_unmap(struct domain *domain, u8 devfn, case DEV_TYPE_PCI: if ( iommu_debug ) printk(VTDPREFIX "d%d:PCI: unmap %04x:%02x:%02x.%u\n", - domain->domain_id, seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); + domain->domain_id, seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); ret = domain_context_unmap_one(domain, iommu, bus, devfn); if ( ret ) break; @@ -1699,8 +1688,8 @@ static int domain_context_unmap(struct domain *domain, u8 devfn, default: dprintk(XENLOG_ERR VTDPREFIX, "d%d:unknown(%u): %04x:%02x:%02x.%u\n", - domain->domain_id, pdev->type, - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); + domain->domain_id, pdev->type, seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); ret = -EINVAL; goto out; } @@ -1709,7 +1698,7 @@ static int domain_context_unmap(struct domain *domain, u8 devfn, * if no other devices under the same iommu owned by this domain, * clear iommu in iommu_bitmap and clear domain_id in domid_bitmp */ - for_each_pdev ( domain, pdev ) + for_each_pdev (domain, pdev) { if ( pdev->seg == seg && pdev->bus == bus && pdev->devfn == devfn ) continue; @@ -1751,7 +1740,7 @@ static void iommu_domain_teardown(struct domain *d) if ( list_empty(&acpi_drhd_units) ) return; - list_for_each_entry_safe ( mrmrr, tmp, &hd->arch.mapped_rmrrs, list ) + list_for_each_entry_safe(mrmrr, tmp, &hd->arch.mapped_rmrrs, list) { list_del(&mrmrr->list); xfree(mrmrr); @@ -1807,9 +1796,8 @@ static int __must_check intel_iommu_map_page(struct domain *d, dfn_t dfn, old = *pte; dma_set_pte_addr(new, mfn_to_maddr(mfn)); - dma_set_pte_prot(new, - ((flags & IOMMUF_readable) ? DMA_PTE_READ : 0) | - ((flags & IOMMUF_writable) ? DMA_PTE_WRITE : 0)); + dma_set_pte_prot(new, ((flags & IOMMUF_readable) ? DMA_PTE_READ : 0) | + ((flags & IOMMUF_writable) ? DMA_PTE_WRITE : 0)); /* Set the SNP on leaf page table if Snoop Control available */ if ( iommu_snoop ) @@ -1889,8 +1877,8 @@ static int intel_iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn, return 0; } -int iommu_pte_flush(struct domain *d, uint64_t dfn, uint64_t *pte, - int order, int present) +int iommu_pte_flush(struct domain *d, uint64_t dfn, uint64_t *pte, int order, + int present) { struct acpi_drhd_unit *drhd; struct iommu *iommu = NULL; @@ -1901,19 +1889,18 @@ int iommu_pte_flush(struct domain *d, uint64_t dfn, uint64_t *pte, iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { iommu = drhd->iommu; if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) ) continue; flush_dev_iotlb = !!find_ats_dev_drhd(iommu); - iommu_domid= domain_iommu_domid(d, iommu); + iommu_domid = domain_iommu_domid(d, iommu); if ( iommu_domid == -1 ) continue; - rc = iommu_flush_iotlb_psi(iommu, iommu_domid, - __dfn_to_daddr(dfn), + rc = iommu_flush_iotlb_psi(iommu, iommu_domid, __dfn_to_daddr(dfn), order, !present, flush_dev_iotlb); if ( rc > 0 ) { @@ -1925,8 +1912,7 @@ int iommu_pte_flush(struct domain *d, uint64_t dfn, uint64_t *pte, if ( unlikely(rc) ) { if ( !d->is_shutting_down && printk_ratelimit() ) - printk(XENLOG_ERR VTDPREFIX - " d%d: IOMMU pages flush failed: %d\n", + printk(XENLOG_ERR VTDPREFIX " d%d: IOMMU pages flush failed: %d\n", d->domain_id, rc); if ( !is_hardware_domain(d) ) @@ -1942,7 +1928,7 @@ static int __init vtd_ept_page_compatible(struct iommu *iommu) /* EPT is not initialised yet, so we must check the capability in * the MSR explicitly rather than use cpu_has_vmx_ept_*() */ - if ( rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP, ept_cap) != 0 ) + if ( rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP, ept_cap) != 0 ) return 0; return (ept_has_2mb(ept_cap) && opt_hap_2mb) == cap_sps_2mb(vtd_cap) && @@ -1962,8 +1948,7 @@ static void iommu_set_pgd(struct domain *d) } static int rmrr_identity_mapping(struct domain *d, bool_t map, - const struct acpi_rmrr_unit *rmrr, - u32 flag) + const struct acpi_rmrr_unit *rmrr, u32 flag) { unsigned long base_pfn = rmrr->base_address >> PAGE_SHIFT_4K; unsigned long end_pfn = PAGE_ALIGN_4K(rmrr->end_address) >> PAGE_SHIFT_4K; @@ -1977,7 +1962,7 @@ static int rmrr_identity_mapping(struct domain *d, bool_t map, * No need to acquire hd->arch.mapping_lock: Both insertion and removal * get done while holding pcidevs_lock. */ - list_for_each_entry( mrmrr, &hd->arch.mapped_rmrrs, list ) + list_for_each_entry (mrmrr, &hd->arch.mapped_rmrrs, list) { if ( mrmrr->base == rmrr->base_address && mrmrr->end == rmrr->end_address ) @@ -2048,10 +2033,9 @@ static int intel_iommu_add_device(u8 devfn, struct pci_dev *pdev) return ret; } - for_each_rmrr_device ( rmrr, bdf, i ) + for_each_rmrr_device (rmrr, bdf, i) { - if ( rmrr->segment == pdev->seg && - PCI_BUS(bdf) == pdev->bus && + if ( rmrr->segment == pdev->seg && PCI_BUS(bdf) == pdev->bus && PCI_DEVFN2(bdf) == devfn ) { /* @@ -2094,10 +2078,9 @@ static int intel_iommu_remove_device(u8 devfn, struct pci_dev *pdev) if ( !pdev->domain ) return -EINVAL; - for_each_rmrr_device ( rmrr, bdf, i ) + for_each_rmrr_device (rmrr, bdf, i) { - if ( rmrr->segment != pdev->seg || - PCI_BUS(bdf) != pdev->bus || + if ( rmrr->segment != pdev->seg || PCI_BUS(bdf) != pdev->bus || PCI_DEVFN2(bdf) != devfn ) continue; @@ -2131,8 +2114,8 @@ void clear_fault_bits(struct iommu *iommu) static void adjust_irq_affinity(struct acpi_drhd_unit *drhd) { const struct acpi_rhsa_unit *rhsa = drhd_to_rhsa(drhd); - unsigned int node = rhsa ? pxm_to_node(rhsa->proximity_domain) - : NUMA_NO_NODE; + unsigned int node = + rhsa ? pxm_to_node(rhsa->proximity_domain) : NUMA_NO_NODE; const cpumask_t *cpumask = &cpu_online_map; if ( node < MAX_NUMNODES && node_online(node) && @@ -2148,7 +2131,7 @@ int adjust_vtd_irq_affinities(void) if ( !iommu_enabled ) return 0; - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) adjust_irq_affinity(drhd); return 0; @@ -2165,9 +2148,9 @@ static int __must_check init_vtd_hw(void) u32 sts; /* - * Basic VT-d HW init: set VT-d interrupt, clear VT-d faults. + * Basic VT-d HW init: set VT-d interrupt, clear VT-d faults. */ - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { adjust_irq_affinity(drhd); @@ -2184,8 +2167,8 @@ static int __must_check init_vtd_hw(void) /* * Enable queue invalidation - */ - for_each_drhd_unit ( drhd ) + */ + for_each_drhd_unit (drhd) { iommu = drhd->iommu; /* @@ -2202,7 +2185,7 @@ static int __must_check init_vtd_hw(void) /* * Enable interrupt remapping - */ + */ if ( iommu_intremap ) { int apic; @@ -2212,16 +2195,16 @@ static int __must_check init_vtd_hw(void) { iommu_intremap = 0; dprintk(XENLOG_ERR VTDPREFIX, - "ioapic_to_iommu: ioapic %#x (id: %#x) is NULL! " - "Will not try to enable Interrupt Remapping.\n", - apic, IO_APIC_ID(apic)); + "ioapic_to_iommu: ioapic %#x (id: %#x) is NULL! " + "Will not try to enable Interrupt Remapping.\n", + apic, IO_APIC_ID(apic)); break; } } } if ( iommu_intremap ) { - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { iommu = drhd->iommu; if ( enable_intremap(iommu, 0) != 0 ) @@ -2234,7 +2217,7 @@ static int __must_check init_vtd_hw(void) } } if ( !iommu_intremap ) - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) disable_intremap(drhd->iommu); } @@ -2243,7 +2226,7 @@ static int __must_check init_vtd_hw(void) * must globally invalidate context cache, and then globally * invalidate IOTLB */ - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { iommu = drhd->iommu; ret = iommu_set_root_entry(iommu); @@ -2264,7 +2247,7 @@ static void __hwdom_init setup_hwdom_rmrr(struct domain *d) int ret, i; pcidevs_lock(); - for_each_rmrr_device ( rmrr, bdf, i ) + for_each_rmrr_device (rmrr, bdf, i) { /* * Here means we're add a device to the hardware domain. @@ -2275,7 +2258,7 @@ static void __hwdom_init setup_hwdom_rmrr(struct domain *d) ret = rmrr_identity_mapping(d, 1, rmrr, 0); if ( ret ) dprintk(XENLOG_ERR VTDPREFIX, - "IOMMU: mapping reserved region failed\n"); + "IOMMU: mapping reserved region failed\n"); } pcidevs_unlock(); } @@ -2311,16 +2294,16 @@ int __init intel_vtd_setup(void) * engines: Snoop Control, DMA passthrough, Queued Invalidation, Interrupt * Remapping, and Posted Interrupt */ - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { iommu = drhd->iommu; - printk("Intel VT-d iommu %"PRIu32" supported page sizes: 4kB", + printk("Intel VT-d iommu %" PRIu32 " supported page sizes: 4kB", iommu->index); - if (cap_sps_2mb(iommu->cap)) + if ( cap_sps_2mb(iommu->cap) ) printk(", 2MB"); - if (cap_sps_1gb(iommu->cap)) + if ( cap_sps_1gb(iommu->cap) ) printk(", 1GB"); printk(".\n"); @@ -2361,11 +2344,12 @@ int __init intel_vtd_setup(void) if ( !iommu_qinval && iommu_intremap ) { iommu_intremap = 0; - dprintk(XENLOG_WARNING VTDPREFIX, "Interrupt Remapping disabled " - "since Queued Invalidation isn't supported or enabled.\n"); + dprintk(XENLOG_WARNING VTDPREFIX, + "Interrupt Remapping disabled " + "since Queued Invalidation isn't supported or enabled.\n"); } -#define P(p,s) printk("Intel VT-d %s %senabled.\n", s, (p)? "" : "not ") +#define P(p, s) printk("Intel VT-d %s %senabled.\n", s, (p) ? "" : "not ") P(iommu_snoop, "Snoop Control"); P(iommu_hwdom_passthrough, "Dom0 DMA Passthrough"); P(iommu_qinval, "Queued Invalidation"); @@ -2386,7 +2370,7 @@ int __init intel_vtd_setup(void) return 0; - error: +error: iommu_enabled = 0; iommu_snoop = 0; iommu_hwdom_passthrough = false; @@ -2396,10 +2380,9 @@ int __init intel_vtd_setup(void) return ret; } -static int reassign_device_ownership( - struct domain *source, - struct domain *target, - u8 devfn, struct pci_dev *pdev) +static int reassign_device_ownership(struct domain *source, + struct domain *target, u8 devfn, + struct pci_dev *pdev) { int ret; @@ -2422,9 +2405,8 @@ static int reassign_device_ownership( u16 bdf; unsigned int i; - for_each_rmrr_device( rmrr, bdf, i ) - if ( rmrr->segment == pdev->seg && - PCI_BUS(bdf) == pdev->bus && + for_each_rmrr_device (rmrr, bdf, i) + if ( rmrr->segment == pdev->seg && PCI_BUS(bdf) == pdev->bus && PCI_DEVFN2(bdf) == devfn ) { /* @@ -2465,8 +2447,8 @@ static int reassign_device_ownership( return ret; } -static int intel_iommu_assign_device( - struct domain *d, u8 devfn, struct pci_dev *pdev, u32 flag) +static int intel_iommu_assign_device(struct domain *d, u8 devfn, + struct pci_dev *pdev, u32 flag) { struct acpi_rmrr_unit *rmrr; int ret = 0, i; @@ -2489,22 +2471,19 @@ static int intel_iommu_assign_device( * interface to make sure devices sharing RMRR are assigned to the * same domain together. */ - for_each_rmrr_device( rmrr, bdf, i ) + for_each_rmrr_device (rmrr, bdf, i) { - if ( rmrr->segment == seg && - PCI_BUS(bdf) == bus && - PCI_DEVFN2(bdf) == devfn && - rmrr->scope.devices_cnt > 1 ) + if ( rmrr->segment == seg && PCI_BUS(bdf) == bus && + PCI_DEVFN2(bdf) == devfn && rmrr->scope.devices_cnt > 1 ) { bool_t relaxed = !!(flag & XEN_DOMCTL_DEV_RDM_RELAXED); printk(XENLOG_GUEST "%s" VTDPREFIX - " It's %s to assign %04x:%02x:%02x.%u" - " with shared RMRR at %"PRIx64" for Dom%d.\n", + " It's %s to assign %04x:%02x:%02x.%u" + " with shared RMRR at %" PRIx64 " for Dom%d.\n", relaxed ? XENLOG_WARNING : XENLOG_ERR, - relaxed ? "risky" : "disallowed", - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), - rmrr->base_address, d->domain_id); + relaxed ? "risky" : "disallowed", seg, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn), rmrr->base_address, d->domain_id); if ( !relaxed ) return -EPERM; } @@ -2515,10 +2494,9 @@ static int intel_iommu_assign_device( return ret; /* Setup rmrr identity mapping */ - for_each_rmrr_device( rmrr, bdf, i ) + for_each_rmrr_device (rmrr, bdf, i) { - if ( rmrr->segment == seg && - PCI_BUS(bdf) == bus && + if ( rmrr->segment == seg && PCI_BUS(bdf) == bus && PCI_DEVFN2(bdf) == devfn ) { ret = rmrr_identity_mapping(d, 1, rmrr, flag); @@ -2526,9 +2504,10 @@ static int intel_iommu_assign_device( { reassign_device_ownership(d, hardware_domain, devfn, pdev); printk(XENLOG_G_ERR VTDPREFIX - " cannot map reserved region (%"PRIx64",%"PRIx64"] for Dom%d (%d)\n", - rmrr->base_address, rmrr->end_address, - d->domain_id, ret); + " cannot map reserved region (%" PRIx64 ",%" PRIx64 + "] for Dom%d (%d)\n", + rmrr->base_address, rmrr->end_address, d->domain_id, + ret); break; } } @@ -2552,7 +2531,7 @@ static int __must_check vtd_suspend(void) { struct acpi_drhd_unit *drhd; struct iommu *iommu; - u32 i; + u32 i; int rc; if ( !iommu_enabled ) @@ -2562,24 +2541,25 @@ static int __must_check vtd_suspend(void) if ( unlikely(rc) ) { printk(XENLOG_WARNING VTDPREFIX - " suspend: IOMMU flush all failed: %d\n", rc); + " suspend: IOMMU flush all failed: %d\n", + rc); return rc; } - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { iommu = drhd->iommu; i = iommu->index; iommu_state[i][DMAR_FECTL_REG] = - (u32) dmar_readl(iommu->reg, DMAR_FECTL_REG); + (u32)dmar_readl(iommu->reg, DMAR_FECTL_REG); iommu_state[i][DMAR_FEDATA_REG] = - (u32) dmar_readl(iommu->reg, DMAR_FEDATA_REG); + (u32)dmar_readl(iommu->reg, DMAR_FEDATA_REG); iommu_state[i][DMAR_FEADDR_REG] = - (u32) dmar_readl(iommu->reg, DMAR_FEADDR_REG); + (u32)dmar_readl(iommu->reg, DMAR_FEADDR_REG); iommu_state[i][DMAR_FEUADDR_REG] = - (u32) dmar_readl(iommu->reg, DMAR_FEUADDR_REG); + (u32)dmar_readl(iommu->reg, DMAR_FEUADDR_REG); /* don't disable VT-d engine when force_iommu is set. */ if ( force_iommu ) @@ -2610,7 +2590,7 @@ static void vtd_crash_shutdown(void) printk(XENLOG_WARNING VTDPREFIX " crash shutdown: IOMMU flush all failed\n"); - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { iommu = drhd->iommu; iommu_disable_translation(iommu); @@ -2629,30 +2609,30 @@ static void vtd_resume(void) if ( !iommu_enabled ) return; - if ( init_vtd_hw() != 0 && force_iommu ) - panic("IOMMU setup failed, crash Xen for security purpose\n"); + if ( init_vtd_hw() != 0 && force_iommu ) + panic("IOMMU setup failed, crash Xen for security purpose\n"); - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { iommu = drhd->iommu; i = iommu->index; spin_lock_irqsave(&iommu->register_lock, flags); dmar_writel(iommu->reg, DMAR_FECTL_REG, - (u32) iommu_state[i][DMAR_FECTL_REG]); + (u32)iommu_state[i][DMAR_FECTL_REG]); dmar_writel(iommu->reg, DMAR_FEDATA_REG, - (u32) iommu_state[i][DMAR_FEDATA_REG]); + (u32)iommu_state[i][DMAR_FEDATA_REG]); dmar_writel(iommu->reg, DMAR_FEADDR_REG, - (u32) iommu_state[i][DMAR_FEADDR_REG]); + (u32)iommu_state[i][DMAR_FEADDR_REG]); dmar_writel(iommu->reg, DMAR_FEUADDR_REG, - (u32) iommu_state[i][DMAR_FEUADDR_REG]); + (u32)iommu_state[i][DMAR_FEUADDR_REG]); spin_unlock_irqrestore(&iommu->register_lock, flags); iommu_enable_translation(drhd); } } -static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t gpa, +static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t gpa, int indent) { paddr_t address; @@ -2666,7 +2646,7 @@ static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t gpa, pt_vaddr = map_vtd_domain_page(pt_maddr); if ( pt_vaddr == NULL ) { - printk("Failed to map VT-D domain page %"PRIpaddr"\n", pt_maddr); + printk("Failed to map VT-D domain page %" PRIpaddr "\n", pt_maddr); return; } @@ -2681,12 +2661,11 @@ static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t gpa, continue; address = gpa + offset_level_address(i, level); - if ( next_level >= 1 ) - vtd_dump_p2m_table_level(dma_pte_addr(*pte), next_level, - address, indent + 1); + if ( next_level >= 1 ) + vtd_dump_p2m_table_level(dma_pte_addr(*pte), next_level, address, + indent + 1); else - printk("%*sdfn: %08lx mfn: %08lx\n", - indent, "", + printk("%*sdfn: %08lx mfn: %08lx\n", indent, "", (unsigned long)(address >> PAGE_SHIFT_4K), (unsigned long)(dma_pte_addr(*pte) >> PAGE_SHIFT_4K)); } @@ -2703,7 +2682,8 @@ static void vtd_dump_p2m_table(struct domain *d) hd = dom_iommu(d); printk("p2m table has %d levels\n", agaw_to_level(hd->arch.agaw)); - vtd_dump_p2m_table_level(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw), 0, 0); + vtd_dump_p2m_table_level(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw), + 0, 0); } const struct iommu_ops __initconstrel intel_iommu_ops = { @@ -2712,7 +2692,7 @@ const struct iommu_ops __initconstrel intel_iommu_ops = { .add_device = intel_iommu_add_device, .enable_device = intel_iommu_enable_device, .remove_device = intel_iommu_remove_device, - .assign_device = intel_iommu_assign_device, + .assign_device = intel_iommu_assign_device, .teardown = iommu_domain_teardown, .map_page = intel_iommu_map_page, .unmap_page = intel_iommu_unmap_page, diff --git a/xen/drivers/passthrough/vtd/qinval.c b/xen/drivers/passthrough/vtd/qinval.c index e95dc54a8d..06eb58b37a 100644 --- a/xen/drivers/passthrough/vtd/qinval.c +++ b/xen/drivers/passthrough/vtd/qinval.c @@ -17,7 +17,6 @@ * Copyright (C) Xiaohui Xin */ - #include #include #include @@ -29,7 +28,7 @@ #include "extern.h" #include "../ats.h" -#define VTD_QI_TIMEOUT 1 +#define VTD_QI_TIMEOUT 1 static int __must_check invalidate_sync(struct iommu *iommu); @@ -38,13 +37,13 @@ static void print_qi_regs(struct iommu *iommu) u64 val; val = dmar_readq(iommu->reg, DMAR_IQA_REG); - printk("DMAR_IQA_REG = %"PRIx64"\n", val); + printk("DMAR_IQA_REG = %" PRIx64 "\n", val); val = dmar_readq(iommu->reg, DMAR_IQH_REG); - printk("DMAR_IQH_REG = %"PRIx64"\n", val); + printk("DMAR_IQH_REG = %" PRIx64 "\n", val); val = dmar_readq(iommu->reg, DMAR_IQT_REG); - printk("DMAR_IQT_REG = %"PRIx64"\n", val); + printk("DMAR_IQT_REG = %" PRIx64 "\n", val); } static unsigned int qinval_next_index(struct iommu *iommu) @@ -55,8 +54,8 @@ static unsigned int qinval_next_index(struct iommu *iommu) tail >>= QINVAL_INDEX_SHIFT; /* (tail+1 == head) indicates a full queue, wait for HW */ - while ( ( tail + 1 ) % QINVAL_ENTRY_NR == - ( dmar_readq(iommu->reg, DMAR_IQH_REG) >> QINVAL_INDEX_SHIFT ) ) + while ( (tail + 1) % QINVAL_ENTRY_NR == + (dmar_readq(iommu->reg, DMAR_IQH_REG) >> QINVAL_INDEX_SHIFT) ) cpu_relax(); return tail; @@ -67,7 +66,7 @@ static void qinval_update_qtail(struct iommu *iommu, unsigned int index) u64 val; /* Need hold register lock when update tail */ - ASSERT( spin_is_locked(&iommu->register_lock) ); + ASSERT(spin_is_locked(&iommu->register_lock)); val = (index + 1) % QINVAL_ENTRY_NR; dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << QINVAL_INDEX_SHIFT)); } @@ -143,8 +142,8 @@ static int __must_check queue_invalidate_iotlb_sync(struct iommu *iommu, return invalidate_sync(iommu); } -static int __must_check queue_invalidate_wait(struct iommu *iommu, - u8 iflag, u8 sw, u8 fn, +static int __must_check queue_invalidate_wait(struct iommu *iommu, u8 iflag, + u8 sw, u8 fn, bool_t flush_dev_iotlb) { volatile u32 poll_slot = QINVAL_STAT_INIT; @@ -179,8 +178,8 @@ static int __must_check queue_invalidate_wait(struct iommu *iommu, s_time_t timeout; /* In case all wait descriptor writes to same addr with same data */ - timeout = NOW() + MILLISECS(flush_dev_iotlb ? - iommu_dev_iotlb_timeout : VTD_QI_TIMEOUT); + timeout = NOW() + MILLISECS(flush_dev_iotlb ? iommu_dev_iotlb_timeout + : VTD_QI_TIMEOUT); while ( poll_slot != QINVAL_STAT_DONE ) { @@ -237,8 +236,8 @@ static int __must_check dev_invalidate_sync(struct iommu *iommu, return rc; } -int qinval_device_iotlb_sync(struct iommu *iommu, struct pci_dev *pdev, - u16 did, u16 size, u64 addr) +int qinval_device_iotlb_sync(struct iommu *iommu, struct pci_dev *pdev, u16 did, + u16 size, u64 addr) { unsigned long flags; unsigned int index; @@ -271,8 +270,8 @@ int qinval_device_iotlb_sync(struct iommu *iommu, struct pci_dev *pdev, return dev_invalidate_sync(iommu, pdev, did); } -static int __must_check queue_invalidate_iec_sync(struct iommu *iommu, - u8 granu, u8 im, u16 iidx) +static int __must_check queue_invalidate_iec_sync(struct iommu *iommu, u8 granu, + u8 im, u16 iidx) { unsigned long flags; unsigned int index; @@ -320,8 +319,8 @@ int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx) return queue_invalidate_iec_sync(iommu, IEC_INDEX_INVL, im, iidx); } -static int __must_check flush_context_qi(void *_iommu, u16 did, - u16 sid, u8 fm, u64 type, +static int __must_check flush_context_qi(void *_iommu, u16 did, u16 sid, u8 fm, + u64 type, bool_t flush_non_present_entry) { struct iommu *iommu = (struct iommu *)_iommu; @@ -374,13 +373,12 @@ static int __must_check flush_iotlb_qi(void *_iommu, u16 did, u64 addr, } /* use queued invalidation */ - if (cap_write_drain(iommu->cap)) + if ( cap_write_drain(iommu->cap) ) dw = 1; - if (cap_read_drain(iommu->cap)) + if ( cap_read_drain(iommu->cap) ) dr = 1; /* Need to conside the ih bit later */ - rc = queue_invalidate_iotlb_sync(iommu, - type >> DMA_TLB_FLUSH_GRANU_OFFSET, + rc = queue_invalidate_iotlb_sync(iommu, type >> DMA_TLB_FLUSH_GRANU_OFFSET, dr, dw, did, size_order, 0, addr); if ( !ret ) ret = rc; @@ -447,8 +445,7 @@ int enable_qinval(struct iommu *iommu) dmar_writel(iommu->reg, DMAR_GCMD_REG, sts | DMA_GCMD_QIE); /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, - (sts & DMA_GSTS_QIES), sts); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, (sts & DMA_GSTS_QIES), sts); spin_unlock_irqrestore(&iommu->register_lock, flags); return 0; @@ -470,8 +467,8 @@ void disable_qinval(struct iommu *iommu) dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_QIE)); /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, - !(sts & DMA_GSTS_QIES), sts); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, !(sts & DMA_GSTS_QIES), + sts); out: spin_unlock_irqrestore(&iommu->register_lock, flags); } diff --git a/xen/drivers/passthrough/vtd/quirks.c b/xen/drivers/passthrough/vtd/quirks.c index d6db862678..572994c39c 100644 --- a/xen/drivers/passthrough/vtd/quirks.c +++ b/xen/drivers/passthrough/vtd/quirks.c @@ -37,20 +37,22 @@ #include "extern.h" #include "vtd.h" -#define IOH_DEV 0 -#define IGD_DEV 2 +#define IOH_DEV 0 +#define IGD_DEV 2 #define IGD_BAR_MASK 0xFFFFFFFFFFFF0000 #define GGC 0x52 -#define GGC_MEMORY_VT_ENABLED (0x8 << 8) +#define GGC_MEMORY_VT_ENABLED (0x8 << 8) -#define IS_CTG(id) (id == 0x2a408086) -#define IS_ILK(id) (id == 0x00408086 || id == 0x00448086 || id== 0x00628086 || id == 0x006A8086) -#define IS_CPT(id) (id == 0x01008086 || id == 0x01048086) +#define IS_CTG(id) (id == 0x2a408086) +#define IS_ILK(id) \ + (id == 0x00408086 || id == 0x00448086 || id == 0x00628086 || \ + id == 0x006A8086) +#define IS_CPT(id) (id == 0x01008086 || id == 0x01048086) /* SandyBridge IGD timeouts in milliseconds */ -#define SNB_IGD_TIMEOUT_LEGACY 1000 -#define SNB_IGD_TIMEOUT 670 +#define SNB_IGD_TIMEOUT_LEGACY 1000 +#define SNB_IGD_TIMEOUT 670 static unsigned int snb_igd_timeout; static u32 __read_mostly ioh_id; @@ -75,7 +77,7 @@ int is_igd_vt_enabled_quirk(void) /* integrated graphics on Intel platforms is located at 0:2.0 */ ggc = pci_conf_read16(0, 0, IGD_DEV, 0, GGC); - return ( ggc & GGC_MEMORY_VT_ENABLED ? 1 : 0 ); + return (ggc & GGC_MEMORY_VT_ENABLED ? 1 : 0); } /* @@ -128,16 +130,16 @@ static void __init map_igd_reg(void) if ( igd_reg_va ) return; - igd_mmio = pci_conf_read32(0, 0, IGD_DEV, 0, PCI_BASE_ADDRESS_1); + igd_mmio = pci_conf_read32(0, 0, IGD_DEV, 0, PCI_BASE_ADDRESS_1); igd_mmio <<= 32; - igd_mmio += pci_conf_read32(0, 0, IGD_DEV, 0, PCI_BASE_ADDRESS_0); + igd_mmio += pci_conf_read32(0, 0, IGD_DEV, 0, PCI_BASE_ADDRESS_0); igd_reg_va = ioremap(igd_mmio & IGD_BAR_MASK, 0x3000); } /* * force IGD to exit low power mode by accessing a IGD 3D regsiter. */ -static int cantiga_vtd_ops_preamble(struct iommu* iommu) +static int cantiga_vtd_ops_preamble(struct iommu *iommu) { struct intel_iommu *intel = iommu->intel; struct acpi_drhd_unit *drhd = intel ? intel->drhd : NULL; @@ -172,7 +174,7 @@ static int cantiga_vtd_ops_preamble(struct iommu* iommu) * parameter to a numerical value enables the quirk and * sets the timeout to that numerical number of msecs. */ -static void snb_vtd_ops_preamble(struct iommu* iommu) +static void snb_vtd_ops_preamble(struct iommu *iommu) { struct intel_iommu *intel = iommu->intel; struct acpi_drhd_unit *drhd = intel ? intel->drhd : NULL; @@ -202,7 +204,7 @@ static void snb_vtd_ops_preamble(struct iommu* iommu) *(volatile u32 *)(igd_reg_va + 0x2050) = 0x10001; } -static void snb_vtd_ops_postamble(struct iommu* iommu) +static void snb_vtd_ops_postamble(struct iommu *iommu) { struct intel_iommu *intel = iommu->intel; struct acpi_drhd_unit *drhd = intel ? intel->drhd : NULL; @@ -221,7 +223,7 @@ static void snb_vtd_ops_postamble(struct iommu* iommu) * call before VT-d translation enable and IOTLB flush operations. */ -void vtd_ops_preamble_quirk(struct iommu* iommu) +void vtd_ops_preamble_quirk(struct iommu *iommu) { cantiga_vtd_ops_preamble(iommu); if ( snb_igd_timeout != 0 ) @@ -236,7 +238,7 @@ void vtd_ops_preamble_quirk(struct iommu* iommu) /* * call after VT-d translation enable and IOTLB flush operations. */ -void vtd_ops_postamble_quirk(struct iommu* iommu) +void vtd_ops_postamble_quirk(struct iommu *iommu) { if ( snb_igd_timeout != 0 ) { @@ -286,7 +288,8 @@ static void __init tylersburg_intremap_quirk(void) if ( rev == 0x13 && device == 0x342e8086 ) { printk(XENLOG_WARNING VTDPREFIX - "Disabling IOMMU due to Intel 5500/5520/X58 Chipset errata #47, #53\n"); + "Disabling IOMMU due to Intel 5500/5520/X58 Chipset errata " + "#47, #53\n"); iommu_enable = 0; break; } @@ -329,8 +332,8 @@ void __init platform_quirks_init(void) * assigning Intel integrated wifi device to a guest. */ -static int __must_check map_me_phantom_function(struct domain *domain, - u32 dev, int map) +static int __must_check map_me_phantom_function(struct domain *domain, u32 dev, + int map) { struct acpi_drhd_unit *drhd; struct pci_dev *pdev; @@ -345,8 +348,8 @@ static int __must_check map_me_phantom_function(struct domain *domain, rc = domain_context_mapping_one(domain, drhd->iommu, 0, PCI_DEVFN(dev, 7), NULL); else - rc = domain_context_unmap_one(domain, drhd->iommu, 0, - PCI_DEVFN(dev, 7)); + rc = + domain_context_unmap_one(domain, drhd->iommu, 0, PCI_DEVFN(dev, 7)); return rc; } @@ -367,18 +370,18 @@ int me_wifi_quirk(struct domain *domain, u8 bus, u8 devfn, int map) id = pci_conf_read32(0, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 0); switch (id) { - case 0x42328086: - case 0x42358086: - case 0x42368086: - case 0x42378086: - case 0x423a8086: - case 0x423b8086: - case 0x423c8086: - case 0x423d8086: - rc = map_me_phantom_function(domain, 3, map); - break; - default: - break; + case 0x42328086: + case 0x42358086: + case 0x42368086: + case 0x42378086: + case 0x423a8086: + case 0x423b8086: + case 0x423c8086: + case 0x423d8086: + rc = map_me_phantom_function(domain, 3, map); + break; + default: + break; } } else if ( IS_ILK(id) || IS_CPT(id) ) @@ -391,20 +394,20 @@ int me_wifi_quirk(struct domain *domain, u8 bus, u8 devfn, int map) id = pci_conf_read32(0, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 0); switch (id) { - case 0x00878086: /* Kilmer Peak */ - case 0x00898086: - case 0x00828086: /* Taylor Peak */ - case 0x00858086: - case 0x008F8086: /* Rainbow Peak */ - case 0x00908086: - case 0x00918086: - case 0x42388086: /* Puma Peak */ - case 0x422b8086: - case 0x422c8086: - rc = map_me_phantom_function(domain, 22, map); - break; - default: - break; + case 0x00878086: /* Kilmer Peak */ + case 0x00898086: + case 0x00828086: /* Taylor Peak */ + case 0x00858086: + case 0x008F8086: /* Rainbow Peak */ + case 0x00908086: + case 0x00918086: + case 0x42388086: /* Puma Peak */ + case 0x422b8086: + case 0x422c8086: + rc = map_me_phantom_function(domain, 22, map); + break; + default: + break; } } @@ -428,7 +431,7 @@ void pci_vtd_quirk(const struct pci_dev *pdev) PCI_VENDOR_ID_INTEL ) return; - switch ( pci_conf_read16(seg, bus, dev, func, PCI_DEVICE_ID) ) + switch (pci_conf_read16(seg, bus, dev, func, PCI_DEVICE_ID)) { /* * Mask reporting Intel VT-d faults to IOH core logic: @@ -447,23 +450,26 @@ void pci_vtd_quirk(const struct pci_dev *pdev) /* Tylersburg (EP)/Boxboro (MP) chipsets (NHM-EP/EX, WSM-EP/EX) */ case 0x3400 ... 0x3407: /* host bridges */ - case 0x3408 ... 0x3411: case 0x3420 ... 0x3421: /* root ports */ + case 0x3408 ... 0x3411: + case 0x3420 ... 0x3421: /* root ports */ /* JasperForest (Intel Xeon Processor C5500/C3500 */ case 0x3700 ... 0x370f: /* host bridges */ case 0x3720 ... 0x3724: /* root ports */ /* Sandybridge-EP (Romley) */ - case 0x3c00: /* host bridge */ + case 0x3c00: /* host bridge */ case 0x3c01 ... 0x3c0b: /* root ports */ - pos = pci_find_ext_capability(seg, bus, pdev->devfn, - PCI_EXT_CAP_ID_ERR); + pos = + pci_find_ext_capability(seg, bus, pdev->devfn, PCI_EXT_CAP_ID_ERR); if ( !pos ) { pos = pci_find_ext_capability(seg, bus, pdev->devfn, PCI_EXT_CAP_ID_VNDR); while ( pos ) { - val = pci_conf_read32(seg, bus, dev, func, pos + PCI_VNDR_HEADER); - if ( PCI_VNDR_HEADER_ID(val) == 4 && PCI_VNDR_HEADER_REV(val) == 1 ) + val = + pci_conf_read32(seg, bus, dev, func, pos + PCI_VNDR_HEADER); + if ( PCI_VNDR_HEADER_ID(val) == 4 && + PCI_VNDR_HEADER_REV(val) == 1 ) { pos += PCI_VNDR_HEADER; break; @@ -501,20 +507,46 @@ void pci_vtd_quirk(const struct pci_dev *pdev) val = pci_conf_read32(seg, bus, dev, func, 0x20c); pci_conf_write32(seg, bus, dev, func, 0x20c, val | (1 << 4)); - printk(XENLOG_INFO "%s UR signaling on %04x:%02x:%02x.%u\n", - action, seg, bus, dev, func); + printk(XENLOG_INFO "%s UR signaling on %04x:%02x:%02x.%u\n", action, + seg, bus, dev, func); break; - case 0x0040: case 0x0044: case 0x0048: /* Nehalem/Westmere */ - case 0x0100: case 0x0104: case 0x0108: /* Sandybridge */ - case 0x0150: case 0x0154: case 0x0158: /* Ivybridge */ - case 0x0a00: case 0x0a04: case 0x0a08: case 0x0a0f: /* Haswell ULT */ - case 0x0c00: case 0x0c04: case 0x0c08: case 0x0c0f: /* Haswell */ - case 0x0d00: case 0x0d04: case 0x0d08: case 0x0d0f: /* Haswell */ - case 0x1600: case 0x1604: case 0x1608: case 0x160f: /* Broadwell */ - case 0x1610: case 0x1614: case 0x1618: /* Broadwell */ - case 0x1900: case 0x1904: case 0x1908: case 0x190c: case 0x190f: /* Skylake */ - case 0x1910: case 0x1918: case 0x191f: /* Skylake */ + case 0x0040: + case 0x0044: + case 0x0048: /* Nehalem/Westmere */ + case 0x0100: + case 0x0104: + case 0x0108: /* Sandybridge */ + case 0x0150: + case 0x0154: + case 0x0158: /* Ivybridge */ + case 0x0a00: + case 0x0a04: + case 0x0a08: + case 0x0a0f: /* Haswell ULT */ + case 0x0c00: + case 0x0c04: + case 0x0c08: + case 0x0c0f: /* Haswell */ + case 0x0d00: + case 0x0d04: + case 0x0d08: + case 0x0d0f: /* Haswell */ + case 0x1600: + case 0x1604: + case 0x1608: + case 0x160f: /* Broadwell */ + case 0x1610: + case 0x1614: + case 0x1618: /* Broadwell */ + case 0x1900: + case 0x1904: + case 0x1908: + case 0x190c: + case 0x190f: /* Skylake */ + case 0x1910: + case 0x1918: + case 0x191f: /* Skylake */ bar = pci_conf_read32(seg, bus, dev, func, 0x6c); bar = (bar << 32) | pci_conf_read32(seg, bus, dev, func, 0x68); pa = bar & 0x7ffffff000UL; /* bits 12...38 */ @@ -531,11 +563,13 @@ void pci_vtd_quirk(const struct pci_dev *pdev) seg, bus, dev, func); } else - printk(XENLOG_ERR "Could not map %"PRIpaddr" for %04x:%02x:%02x.%u\n", + printk(XENLOG_ERR "Could not map %" PRIpaddr + " for %04x:%02x:%02x.%u\n", pa, seg, bus, dev, func); } else - printk(XENLOG_WARNING "Bogus DMIBAR %#"PRIx64" on %04x:%02x:%02x.%u\n", + printk(XENLOG_WARNING "Bogus DMIBAR %#" PRIx64 + " on %04x:%02x:%02x.%u\n", bar, seg, bus, dev, func); break; } diff --git a/xen/drivers/passthrough/vtd/utils.c b/xen/drivers/passthrough/vtd/utils.c index 85e0f41d85..33e7f2a2c4 100644 --- a/xen/drivers/passthrough/vtd/utils.c +++ b/xen/drivers/passthrough/vtd/utils.c @@ -41,12 +41,10 @@ void disable_pmr(struct iommu *iommu) spin_lock_irqsave(&iommu->register_lock, flags); dmar_writel(iommu->reg, DMAR_PMEN_REG, val & ~DMA_PMEN_EPM); - IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, dmar_readl, - !(val & DMA_PMEN_PRS), val); + IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, dmar_readl, !(val & DMA_PMEN_PRS), val); spin_unlock_irqrestore(&iommu->register_lock, flags); - dprintk(XENLOG_INFO VTDPREFIX, - "Disabled protected memory registers\n"); + dprintk(XENLOG_INFO VTDPREFIX, "Disabled protected memory registers\n"); } void print_iommu_regs(struct acpi_drhd_unit *drhd) @@ -55,23 +53,24 @@ void print_iommu_regs(struct acpi_drhd_unit *drhd) u64 cap; printk("---- print_iommu_regs ----\n"); - printk(" drhd->address = %"PRIx64"\n", drhd->address); + printk(" drhd->address = %" PRIx64 "\n", drhd->address); printk(" VER = %x\n", dmar_readl(iommu->reg, DMAR_VER_REG)); - printk(" CAP = %"PRIx64"\n", cap = dmar_readq(iommu->reg, DMAR_CAP_REG)); - printk(" n_fault_reg = %"PRIx64"\n", cap_num_fault_regs(cap)); - printk(" fault_recording_offset = %"PRIx64"\n", cap_fault_reg_offset(cap)); + printk(" CAP = %" PRIx64 "\n", cap = dmar_readq(iommu->reg, DMAR_CAP_REG)); + printk(" n_fault_reg = %" PRIx64 "\n", cap_num_fault_regs(cap)); + printk(" fault_recording_offset = %" PRIx64 "\n", + cap_fault_reg_offset(cap)); if ( cap_fault_reg_offset(cap) < PAGE_SIZE ) { - printk(" fault_recording_reg_l = %"PRIx64"\n", + printk(" fault_recording_reg_l = %" PRIx64 "\n", dmar_readq(iommu->reg, cap_fault_reg_offset(cap))); - printk(" fault_recording_reg_h = %"PRIx64"\n", + printk(" fault_recording_reg_h = %" PRIx64 "\n", dmar_readq(iommu->reg, cap_fault_reg_offset(cap) + 8)); } - printk(" ECAP = %"PRIx64"\n", dmar_readq(iommu->reg, DMAR_ECAP_REG)); + printk(" ECAP = %" PRIx64 "\n", dmar_readq(iommu->reg, DMAR_ECAP_REG)); printk(" GCMD = %x\n", dmar_readl(iommu->reg, DMAR_GCMD_REG)); printk(" GSTS = %x\n", dmar_readl(iommu->reg, DMAR_GSTS_REG)); - printk(" RTADDR = %"PRIx64"\n", dmar_readq(iommu->reg,DMAR_RTADDR_REG)); - printk(" CCMD = %"PRIx64"\n", dmar_readq(iommu->reg, DMAR_CCMD_REG)); + printk(" RTADDR = %" PRIx64 "\n", dmar_readq(iommu->reg, DMAR_RTADDR_REG)); + printk(" CCMD = %" PRIx64 "\n", dmar_readq(iommu->reg, DMAR_CCMD_REG)); printk(" FSTS = %x\n", dmar_readl(iommu->reg, DMAR_FSTS_REG)); printk(" FECTL = %x\n", dmar_readl(iommu->reg, DMAR_FECTL_REG)); printk(" FEDATA = %x\n", dmar_readl(iommu->reg, DMAR_FEDATA_REG)); @@ -95,9 +94,10 @@ void print_vtd_entries(struct iommu *iommu, int bus, int devfn, u64 gmfn) u64 *l, val; u32 l_index, level; - printk("print_vtd_entries: iommu #%u dev %04x:%02x:%02x.%u gmfn %"PRI_gfn"\n", - iommu->index, iommu->intel->drhd->segment, bus, - PCI_SLOT(devfn), PCI_FUNC(devfn), gmfn); + printk("print_vtd_entries: iommu #%u dev %04x:%02x:%02x.%u gmfn %" PRI_gfn + "\n", + iommu->index, iommu->intel->drhd->segment, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn), gmfn); if ( iommu->root_maddr == 0 ) { @@ -112,7 +112,7 @@ void print_vtd_entries(struct iommu *iommu, int bus, int devfn, u64 gmfn) return; } - printk(" root_entry[%02x] = %"PRIx64"\n", bus, root_entry[bus].val); + printk(" root_entry[%02x] = %" PRIx64 "\n", bus, root_entry[bus].val); if ( !root_present(root_entry[bus]) ) { unmap_vtd_domain_page(root_entry); @@ -130,8 +130,8 @@ void print_vtd_entries(struct iommu *iommu, int bus, int devfn, u64 gmfn) } val = ctxt_entry[devfn].lo; - printk(" context[%02x] = %"PRIx64"_%"PRIx64"\n", - devfn, ctxt_entry[devfn].hi, val); + printk(" context[%02x] = %" PRIx64 "_%" PRIx64 "\n", devfn, + ctxt_entry[devfn].hi, val); if ( !context_present(ctxt_entry[devfn]) ) { unmap_vtd_domain_page(ctxt_entry); @@ -141,15 +141,13 @@ void print_vtd_entries(struct iommu *iommu, int bus, int devfn, u64 gmfn) level = agaw_to_level(context_address_width(ctxt_entry[devfn])); unmap_vtd_domain_page(ctxt_entry); - if ( level != VTD_PAGE_TABLE_LEVEL_3 && - level != VTD_PAGE_TABLE_LEVEL_4) + if ( level != VTD_PAGE_TABLE_LEVEL_3 && level != VTD_PAGE_TABLE_LEVEL_4 ) { printk("Unsupported VTD page table level (%d)!\n", level); return; } - do - { + do { l = map_vtd_domain_page(val); if ( l == NULL ) { @@ -159,7 +157,7 @@ void print_vtd_entries(struct iommu *iommu, int bus, int devfn, u64 gmfn) l_index = get_level_index(gmfn, level); pte.val = l[l_index]; unmap_vtd_domain_page(l); - printk(" l%u[%03x] = %"PRIx64"\n", level, l_index, pte.val); + printk(" l%u[%03x] = %" PRIx64 "\n", level, l_index, pte.val); if ( !dma_pte_present(pte) ) { @@ -178,25 +176,24 @@ void vtd_dump_iommu_info(unsigned char key) struct iommu *iommu; int i; - for_each_drhd_unit ( drhd ) + for_each_drhd_unit (drhd) { u32 status = 0; iommu = drhd->iommu; printk("\niommu %x: nr_pt_levels = %x.\n", iommu->index, - iommu->nr_pt_levels); + iommu->nr_pt_levels); - if ( ecap_queued_inval(iommu->ecap) || ecap_intr_remap(iommu->ecap) ) + if ( ecap_queued_inval(iommu->ecap) || ecap_intr_remap(iommu->ecap) ) status = dmar_readl(iommu->reg, DMAR_GSTS_REG); printk(" Queued Invalidation: %ssupported%s.\n", - ecap_queued_inval(iommu->ecap) ? "" : "not ", - (status & DMA_GSTS_QIES) ? " and enabled" : "" ); - + ecap_queued_inval(iommu->ecap) ? "" : "not ", + (status & DMA_GSTS_QIES) ? " and enabled" : ""); printk(" Interrupt Remapping: %ssupported%s.\n", - ecap_intr_remap(iommu->ecap) ? "" : "not ", - (status & DMA_GSTS_IRES) ? " and enabled" : "" ); + ecap_intr_remap(iommu->ecap) ? "" : "not ", + (status & DMA_GSTS_IRES) ? " and enabled" : ""); printk(" Interrupt Posting: %ssupported.\n", cap_intr_post(iommu->cap) ? "" : "not "); @@ -210,10 +207,13 @@ void vtd_dump_iommu_info(unsigned char key) int print_cnt = 0; printk(" Interrupt remapping table (nr_entry=%#x. " - "Only dump P=1 entries here):\n", nr_entry); + "Only dump P=1 entries here):\n", + nr_entry); printk("R means remapped format, P means posted format.\n"); - printk("R: SVT SQ SID V AVL FPD DST DLM TM RH DM P\n"); - printk("P: SVT SQ SID V AVL FPD PDA URG P\n"); + printk( + "R: SVT SQ SID V AVL FPD DST DLM TM RH DM P\n"); + printk( + "P: SVT SQ SID V AVL FPD PDA URG P\n"); for ( i = 0; i < nr_entry; i++ ) { struct iremap_entry *p; @@ -223,8 +223,7 @@ void vtd_dump_iommu_info(unsigned char key) if ( iremap_entries ) unmap_vtd_domain_page(iremap_entries); - GET_IREMAP_ENTRY(iremap_maddr, i, - iremap_entries, p); + GET_IREMAP_ENTRY(iremap_maddr, i, iremap_entries, p); } else p = &iremap_entries[i % (1 << IREMAP_ENTRY_ORDER)]; @@ -232,17 +231,17 @@ void vtd_dump_iommu_info(unsigned char key) if ( !p->remap.p ) continue; if ( !p->remap.im ) - printk("R: %04x: %x %x %04x %02x %x %x %08x %x %x %x %x %x\n", - i, - p->remap.svt, p->remap.sq, p->remap.sid, + printk("R: %04x: %x %x %04x %02x %x %x %08x %x " + " %x %x %x %x\n", + i, p->remap.svt, p->remap.sq, p->remap.sid, p->remap.vector, p->remap.avail, p->remap.fpd, p->remap.dst, p->remap.dlm, p->remap.tm, p->remap.rh, p->remap.dm, p->remap.p); else - printk("P: %04x: %x %x %04x %02x %x %x %16lx %x %x\n", - i, - p->post.svt, p->post.sq, p->post.sid, p->post.vector, - p->post.avail, p->post.fpd, + printk("P: %04x: %x %x %04x %02x %x %x %16lx " + "%x %x\n", + i, p->post.svt, p->post.sq, p->post.sid, + p->post.vector, p->post.avail, p->post.fpd, ((u64)p->post.pda_h << 32) | (p->post.pda_l << 6), p->post.urg, p->post.p); @@ -252,8 +251,7 @@ void vtd_dump_iommu_info(unsigned char key) unmap_vtd_domain_page(iremap_entries); if ( iommu_ir_ctrl(iommu)->iremap_num != print_cnt ) printk("Warning: Print %d IRTE (actually have %d)!\n", - print_cnt, iommu_ir_ctrl(iommu)->iremap_num); - + print_cnt, iommu_ir_ctrl(iommu)->iremap_num); } } @@ -272,7 +270,7 @@ void vtd_dump_iommu_info(unsigned char key) if ( !ir_ctrl || !ir_ctrl->iremap_maddr || !ir_ctrl->iremap_num ) continue; - printk( "\nRedirection table of IOAPIC %x:\n", apic); + printk("\nRedirection table of IOAPIC %x:\n", apic); /* IO xAPIC Version Register. */ reg_01.raw = __io_apic_read(apic, 1); @@ -283,16 +281,16 @@ void vtd_dump_iommu_info(unsigned char key) struct IO_APIC_route_entry rte = __ioapic_read_entry(apic, i, TRUE); - remap = (struct IO_APIC_route_remap_entry *) &rte; + remap = (struct IO_APIC_route_remap_entry *)&rte; if ( !remap->format ) continue; printk(" %02x: %04x %x %x %x %x %x %x" - " %x %02x\n", i, - remap->index_0_14 | (remap->index_15 << 15), - remap->format, remap->mask, remap->trigger, remap->irr, - remap->polarity, remap->delivery_status, remap->delivery_mode, - remap->vector); + " %x %02x\n", + i, remap->index_0_14 | (remap->index_15 << 15), + remap->format, remap->mask, remap->trigger, remap->irr, + remap->polarity, remap->delivery_status, + remap->delivery_mode, remap->vector); } } } diff --git a/xen/drivers/passthrough/vtd/x86/ats.c b/xen/drivers/passthrough/vtd/x86/ats.c index 1a3adb4acb..024c21eb85 100644 --- a/xen/drivers/passthrough/vtd/x86/ats.c +++ b/xen/drivers/passthrough/vtd/x86/ats.c @@ -30,10 +30,10 @@ static LIST_HEAD(ats_dev_drhd_units); -struct acpi_drhd_unit * find_ats_dev_drhd(struct iommu *iommu) +struct acpi_drhd_unit *find_ats_dev_drhd(struct iommu *iommu) { struct acpi_drhd_unit *drhd; - list_for_each_entry ( drhd, &ats_dev_drhd_units, list ) + list_for_each_entry (drhd, &ats_dev_drhd_units, list) { if ( drhd->iommu == iommu ) return drhd; @@ -78,12 +78,12 @@ static int device_in_domain(const struct iommu *iommu, struct context_entry *ctxt_entry = NULL; int tt, found = 0; - root_entry = (struct root_entry *) map_vtd_domain_page(iommu->root_maddr); + root_entry = (struct root_entry *)map_vtd_domain_page(iommu->root_maddr); if ( !root_entry || !root_present(root_entry[pdev->bus]) ) goto out; - ctxt_entry = (struct context_entry *) - map_vtd_domain_page(root_entry[pdev->bus].val); + ctxt_entry = + (struct context_entry *)map_vtd_domain_page(root_entry[pdev->bus].val); if ( ctxt_entry == NULL ) goto out; @@ -106,8 +106,8 @@ out: return found; } -int dev_invalidate_iotlb(struct iommu *iommu, u16 did, - u64 addr, unsigned int size_order, u64 type) +int dev_invalidate_iotlb(struct iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type) { struct pci_dev *pdev, *temp; int ret = 0; @@ -115,12 +115,12 @@ int dev_invalidate_iotlb(struct iommu *iommu, u16 did, if ( !ecap_dev_iotlb(iommu->ecap) ) return ret; - list_for_each_entry_safe( pdev, temp, &iommu->ats_devices, ats.list ) + list_for_each_entry_safe(pdev, temp, &iommu->ats_devices, ats.list) { bool_t sbit; int rc = 0; - switch ( type ) + switch (type) { case DMA_TLB_DSI_FLUSH: if ( !device_in_domain(iommu, pdev, did) ) diff --git a/xen/drivers/passthrough/vtd/x86/hvm.c b/xen/drivers/passthrough/vtd/x86/hvm.c index 6675dca027..13d5a65cca 100644 --- a/xen/drivers/passthrough/vtd/x86/hvm.c +++ b/xen/drivers/passthrough/vtd/x86/hvm.c @@ -28,7 +28,7 @@ static int _hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq = (long)arg; const struct dev_intx_gsi_link *digl; - list_for_each_entry ( digl, &pirq_dpci->digl_list, list ) + list_for_each_entry (digl, &pirq_dpci->digl_list, list) { unsigned int link = hvm_pci_intx_link(digl->device, digl->intx); diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c b/xen/drivers/passthrough/vtd/x86/vtd.c index ff456e1e70..8344e4ca6b 100644 --- a/xen/drivers/passthrough/vtd/x86/vtd.c +++ b/xen/drivers/passthrough/vtd/x86/vtd.c @@ -51,7 +51,7 @@ unsigned int get_cache_line_size(void) return ((cpuid_ebx(1) >> 8) & 0xff) * 8; } -void cacheline_flush(char * addr) +void cacheline_flush(char *addr) { clflush(addr); } diff --git a/xen/drivers/passthrough/x86/ats.c b/xen/drivers/passthrough/x86/ats.c index 59c163459a..833cd1d040 100644 --- a/xen/drivers/passthrough/x86/ats.c +++ b/xen/drivers/passthrough/x86/ats.c @@ -31,16 +31,16 @@ int enable_ats_device(struct pci_dev *pdev, struct list_head *ats_list) BUG_ON(!pos); if ( iommu_verbose ) - dprintk(XENLOG_INFO, "%04x:%02x:%02x.%u: ATS capability found\n", - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); + dprintk(XENLOG_INFO, "%04x:%02x:%02x.%u: ATS capability found\n", seg, + bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); - value = pci_conf_read16(seg, bus, PCI_SLOT(devfn), - PCI_FUNC(devfn), pos + ATS_REG_CTL); + value = pci_conf_read16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), + pos + ATS_REG_CTL); if ( value & ATS_ENABLE ) { struct pci_dev *other; - list_for_each_entry ( other, ats_list, ats.list ) + list_for_each_entry (other, ats_list, ats.list) if ( other == pdev ) { pos = 0; @@ -58,17 +58,16 @@ int enable_ats_device(struct pci_dev *pdev, struct list_head *ats_list) if ( pos ) { pdev->ats.cap_pos = pos; - value = pci_conf_read16(seg, bus, PCI_SLOT(devfn), - PCI_FUNC(devfn), pos + ATS_REG_CAP); - pdev->ats.queue_depth = value & ATS_QUEUE_DEPTH_MASK ?: - ATS_QUEUE_DEPTH_MASK + 1; + value = pci_conf_read16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), + pos + ATS_REG_CAP); + pdev->ats.queue_depth = + value & ATS_QUEUE_DEPTH_MASK ?: ATS_QUEUE_DEPTH_MASK + 1; list_add(&pdev->ats.list, ats_list); } if ( iommu_verbose ) - dprintk(XENLOG_INFO, "%04x:%02x:%02x.%u: ATS %s enabled\n", - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), - pos ? "is" : "was"); + dprintk(XENLOG_INFO, "%04x:%02x:%02x.%u: ATS %s enabled\n", seg, bus, + PCI_SLOT(devfn), PCI_FUNC(devfn), pos ? "is" : "was"); return pos; } @@ -90,6 +89,6 @@ void disable_ats_device(struct pci_dev *pdev) list_del(&pdev->ats.list); if ( iommu_verbose ) - dprintk(XENLOG_INFO, "%04x:%02x:%02x.%u: ATS is disabled\n", - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); + dprintk(XENLOG_INFO, "%04x:%02x:%02x.%u: ATS is disabled\n", seg, bus, + PCI_SLOT(devfn), PCI_FUNC(devfn)); } diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c index bd6529d419..5bb70a7b82 100644 --- a/xen/drivers/passthrough/x86/iommu.c +++ b/xen/drivers/passthrough/x86/iommu.c @@ -25,8 +25,8 @@ struct iommu_ops iommu_ops; -void iommu_update_ire_from_apic( - unsigned int apic, unsigned int reg, unsigned int value) +void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, + unsigned int value) { const struct iommu_ops *ops = iommu_get_ops(); ops->update_ire_from_apic(apic, reg, value); @@ -57,7 +57,7 @@ int arch_iommu_populate_page_table(struct domain *d) while ( !rc && (page = page_list_remove_head(&d->page_list)) ) { if ( is_hvm_domain(d) || - (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page ) + (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page ) { unsigned long mfn = mfn_x(page_to_mfn(page)); unsigned long gfn = mfn_to_gmfn(d, mfn); @@ -68,8 +68,7 @@ int arch_iommu_populate_page_table(struct domain *d) ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH)); BUG_ON(SHARED_M2P(gfn)); rc = iommu_map(d, _dfn(gfn), _mfn(mfn), PAGE_ORDER_4K, - IOMMUF_readable | IOMMUF_writable, - &flush_flags); + IOMMUF_readable | IOMMUF_writable, &flush_flags); /* * We may be working behind the back of a running guest, which @@ -116,7 +115,7 @@ int arch_iommu_populate_page_table(struct domain *d) page_list_move(&d->page_list, &d->arch.relmem_list); while ( !page_list_empty(&d->page_list) && (page = page_list_first(&d->page_list), - (page->count_info & (PGC_state|PGC_broken))) ) + (page->count_info & (PGC_state | PGC_broken))) ) { page_list_del(page, &d->page_list); page_list_add_tail(page, &d->arch.relmem_list); @@ -130,8 +129,8 @@ int arch_iommu_populate_page_table(struct domain *d) * flush_flags are not tracked across hypercall pre-emption so * assume a full flush is necessary. */ - rc = iommu_iotlb_flush_all( - d, IOMMU_FLUSHF_added | IOMMU_FLUSHF_modified); + rc = iommu_iotlb_flush_all(d, + IOMMU_FLUSHF_added | IOMMU_FLUSHF_modified); if ( rc && rc != -ERESTART ) iommu_teardown(d); @@ -175,7 +174,7 @@ static bool __hwdom_init hwdom_iommu_map(const struct domain *d, if ( (pfn > max_pfn && !mfn_valid(mfn)) || xen_in_range(pfn) ) return false; - switch ( type = page_get_ram_type(mfn) ) + switch (type = page_get_ram_type(mfn)) { case RAM_TYPE_UNUSABLE: return false; @@ -204,7 +203,7 @@ static bool __hwdom_init hwdom_iommu_map(const struct domain *d, { const struct vcpu *v; - for_each_vcpu(d, v) + for_each_vcpu (d, v) if ( pfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(v))) ) return false; } @@ -236,8 +235,8 @@ void __hwdom_init arch_iommu_hwdom_init(struct domain *d) if ( iommu_hwdom_inclusive ) { - printk(XENLOG_WARNING - "IOMMU inclusive mappings are deprecated and will be removed in future versions\n"); + printk(XENLOG_WARNING "IOMMU inclusive mappings are deprecated and " + "will be removed in future versions\n"); if ( !is_pv_domain(d) ) { @@ -271,7 +270,7 @@ void __hwdom_init arch_iommu_hwdom_init(struct domain *d) printk(XENLOG_WARNING " d%d: IOMMU mapping failed: %d\n", d->domain_id, rc); - if (!(i & 0xfffff)) + if ( !(i & 0xfffff) ) process_pending_softirqs(); } diff --git a/xen/drivers/pci/pci.c b/xen/drivers/pci/pci.c index 1c808d6632..f638dfca6a 100644 --- a/xen/drivers/pci/pci.c +++ b/xen/drivers/pci/pci.c @@ -88,7 +88,8 @@ int pci_find_ext_capability(int seg, int bus, int devfn, int cap) * within the device's PCI configuration space or 0 if the device does * not support it. */ -int pci_find_next_ext_capability(int seg, int bus, int devfn, int start, int cap) +int pci_find_next_ext_capability(int seg, int bus, int devfn, int start, + int cap) { u32 header; int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */ @@ -104,13 +105,15 @@ int pci_find_next_ext_capability(int seg, int bus, int devfn, int start, int cap return 0; ASSERT(start != pos || PCI_EXT_CAP_ID(header) == cap); - while ( ttl-- > 0 ) { + while ( ttl-- > 0 ) + { if ( PCI_EXT_CAP_ID(header) == cap && pos != start ) return pos; pos = PCI_EXT_CAP_NEXT(header); if ( pos < 0x100 ) break; - header = pci_conf_read32(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos); + header = + pci_conf_read32(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos); } return 0; } @@ -166,8 +169,7 @@ const char *__init parse_pci_seg(const char *s, unsigned int *seg_p, } else func = 0; - if ( seg != (seg_p ? (u16)seg : 0) || - bus != PCI_BUS(PCI_BDF2(bus, 0)) || + if ( seg != (seg_p ? (u16)seg : 0) || bus != PCI_BUS(PCI_BDF2(bus, 0)) || dev != PCI_SLOT(PCI_DEVFN(dev, 0)) || func != PCI_FUNC(PCI_DEVFN(0, func)) ) return NULL; diff --git a/xen/drivers/video/font_8x14.c b/xen/drivers/video/font_8x14.c index e80d178c38..ccc89662c2 100644 --- a/xen/drivers/video/font_8x14.c +++ b/xen/drivers/video/font_8x14.c @@ -7,7 +7,7 @@ #include #include "font.h" -#define FONTDATAMAX (256*14) +#define FONTDATAMAX (256 * 14) static const unsigned char fontdata_8x14[FONTDATAMAX] = { @@ -4110,9 +4110,4 @@ static const unsigned char fontdata_8x14[FONTDATAMAX] = { }; const struct font_desc font_vga_8x14 = { - "VGA8x14", - 8, - 14, - sizeof(fontdata_8x14) / 14, - fontdata_8x14 -}; + "VGA8x14", 8, 14, sizeof(fontdata_8x14) / 14, fontdata_8x14}; diff --git a/xen/drivers/video/font_8x16.c b/xen/drivers/video/font_8x16.c index c65f98dcbb..b3d380ccd5 100644 --- a/xen/drivers/video/font_8x16.c +++ b/xen/drivers/video/font_8x16.c @@ -7,7 +7,7 @@ #include #include "font.h" -#define FONTDATAMAX (256*16) +#define FONTDATAMAX (256 * 16) static const unsigned char fontdata_8x16[FONTDATAMAX] = { @@ -4622,9 +4622,4 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = { }; const struct font_desc font_vga_8x16 = { - "VGA8x16", - 8, - 16, - sizeof(fontdata_8x16) / 16, - fontdata_8x16 -}; + "VGA8x16", 8, 16, sizeof(fontdata_8x16) / 16, fontdata_8x16}; diff --git a/xen/drivers/video/font_8x8.c b/xen/drivers/video/font_8x8.c index 9441429b42..10377c200b 100644 --- a/xen/drivers/video/font_8x8.c +++ b/xen/drivers/video/font_8x8.c @@ -7,7 +7,7 @@ #include #include "font.h" -#define FONTDATAMAX (256*8) +#define FONTDATAMAX (256 * 8) static const unsigned char fontdata_8x8[FONTDATAMAX] = { @@ -2573,10 +2573,5 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = { }; -const struct font_desc font_vga_8x8 = { - "VGA8x8", - 8, - 8, - sizeof(fontdata_8x8) / 8, - fontdata_8x8 -}; +const struct font_desc font_vga_8x8 = {"VGA8x8", 8, 8, sizeof(fontdata_8x8) / 8, + fontdata_8x8}; diff --git a/xen/drivers/video/lfb.c b/xen/drivers/video/lfb.c index d0c8c492b0..b6ad61a33a 100644 --- a/xen/drivers/video/lfb.c +++ b/xen/drivers/video/lfb.c @@ -16,7 +16,8 @@ #define MAX_FONT_W 8 #define MAX_FONT_H 16 -struct lfb_status { +struct lfb_status +{ struct lfb_prop lfbp; unsigned char *lbuf, *text_buf; @@ -25,11 +26,9 @@ struct lfb_status { }; static struct lfb_status lfb; -static void lfb_show_line( - const unsigned char *text_line, - unsigned char *video_line, - unsigned int nr_chars, - unsigned int nr_cells) +static void lfb_show_line(const unsigned char *text_line, + unsigned char *video_line, unsigned int nr_chars, + unsigned int nr_cells) { unsigned int i, j, b, bpp, pixel; @@ -46,13 +45,14 @@ static void lfb_show_line( ((lfb.lfbp.font->width + 7) >> 3)); for ( b = lfb.lfbp.font->width; b--; ) { - pixel = (*bits & (1u<width) * bpp); + memset(ptr, 0, + (lfb.lfbp.width - nr_chars * lfb.lfbp.font->width) * bpp); memcpy(video_line, lfb.lbuf, nr_cells * lfb.lfbp.font->width * bpp); video_line += lfb.lfbp.bytes_per_line; } @@ -75,7 +75,8 @@ void lfb_redraw_puts(const char *s) lfb.ypos = lfb.lfbp.text_rows - 1; memmove(lfb.text_buf, lfb.text_buf + lfb.lfbp.text_columns, lfb.ypos * lfb.lfbp.text_columns); - memset(lfb.text_buf + lfb.ypos * lfb.lfbp.text_columns, 0, lfb.xpos); + memset(lfb.text_buf + lfb.ypos * lfb.lfbp.text_columns, 0, + lfb.xpos); } lfb.xpos = 0; } @@ -92,10 +93,11 @@ void lfb_redraw_puts(const char *s) for ( width = lfb.lfbp.text_columns; width; --width ) if ( line[width - 1] ) - break; + break; lfb_show_line(line, - lfb.lfbp.lfb + i * lfb.lfbp.font->height * lfb.lfbp.bytes_per_line, - width, max(lfb.line_len[i], width)); + lfb.lfbp.lfb + + i * lfb.lfbp.font->height * lfb.lfbp.bytes_per_line, + width, max(lfb.line_len[i], width)); lfb.line_len[i] = width; } @@ -112,9 +114,10 @@ void lfb_scroll_puts(const char *s) { if ( (c == '\n') || (lfb.xpos >= lfb.lfbp.text_columns) ) { - unsigned int bytes = (lfb.lfbp.width * - ((lfb.lfbp.bits_per_pixel + 7) >> 3)); - unsigned char *src = lfb.lfbp.lfb + lfb.lfbp.font->height * lfb.lfbp.bytes_per_line; + unsigned int bytes = + (lfb.lfbp.width * ((lfb.lfbp.bits_per_pixel + 7) >> 3)); + unsigned char *src = + lfb.lfbp.lfb + lfb.lfbp.font->height * lfb.lfbp.bytes_per_line; unsigned char *dst = lfb.lfbp.lfb; /* New line: scroll all previous rows up one line. */ @@ -126,11 +129,11 @@ void lfb_scroll_puts(const char *s) } /* Render new line. */ - lfb_show_line( - lfb.text_buf, - lfb.lfbp.lfb + (lfb.lfbp.text_rows-1) * lfb.lfbp.font->height * - lfb.lfbp.bytes_per_line, - lfb.xpos, lfb.lfbp.text_columns); + lfb_show_line(lfb.text_buf, + lfb.lfbp.lfb + (lfb.lfbp.text_rows - 1) * + lfb.lfbp.font->height * + lfb.lfbp.bytes_per_line, + lfb.xpos, lfb.lfbp.text_columns); lfb.xpos = 0; } @@ -151,7 +154,8 @@ int __init lfb_init(struct lfb_prop *lfbp) { if ( lfbp->width > MAX_XRES || lfbp->height > MAX_YRES ) { - printk(XENLOG_WARNING "Couldn't initialize a %ux%u framebuffer early.\n", + printk(XENLOG_WARNING + "Couldn't initialize a %ux%u framebuffer early.\n", lfbp->width, lfbp->height); return -EINVAL; } @@ -168,7 +172,8 @@ int __init lfb_init(struct lfb_prop *lfbp) return 0; fail: - printk(XENLOG_ERR "Couldn't allocate enough memory to drive the framebuffer\n"); + printk(XENLOG_ERR + "Couldn't allocate enough memory to drive the framebuffer\n"); lfb_free(); return -ENOMEM; diff --git a/xen/drivers/video/vesa.c b/xen/drivers/video/vesa.c index c92497e0bc..82b37f92da 100644 --- a/xen/drivers/video/vesa.c +++ b/xen/drivers/video/vesa.c @@ -14,7 +14,7 @@ #include "font.h" #include "lfb.h" -#define vlfb_info vga_console_info.u.vesa_lfb +#define vlfb_info vga_console_info.u.vesa_lfb static void lfb_flush(void); @@ -50,8 +50,10 @@ void __init vesa_early_init(void) return; if ( font_height == 0 ) /* choose a sensible default */ - font = ((vlfb_info.height <= 600) ? &font_vga_8x8 : - (vlfb_info.height <= 768) ? &font_vga_8x14 : &font_vga_8x16); + font = + ((vlfb_info.height <= 600) + ? &font_vga_8x8 + : (vlfb_info.height <= 768) ? &font_vga_8x14 : &font_vga_8x16); else if ( font_height <= 8 ) font = &font_vga_8x8; else if ( font_height <= 14 ) @@ -73,10 +75,9 @@ void __init vesa_early_init(void) * use for vesafb. With modern cards it is no * option to simply use vram_total as that * wastes plenty of kernel address space. */ - vram_remap = (vram_remap ? - (vram_remap << 20) : - ((vram_vmode + (1 << L2_PAGETABLE_SHIFT) - 1) & - ~((1 << L2_PAGETABLE_SHIFT) - 1))); + vram_remap = (vram_remap ? (vram_remap << 20) + : ((vram_vmode + (1 << L2_PAGETABLE_SHIFT) - 1) & + ~((1 << L2_PAGETABLE_SHIFT) - 1))); vram_remap = max_t(unsigned int, vram_remap, vram_vmode); vram_remap = min_t(unsigned int, vram_remap, vram_total); } @@ -104,20 +105,18 @@ void __init vesa_init(void) memset(lfb, 0, vram_remap); printk(XENLOG_INFO "vesafb: framebuffer at %#x, mapped to 0x%p, " - "using %uk, total %uk\n", - vlfb_info.lfb_base, lfb, - vram_remap >> 10, vram_total >> 10); + "using %uk, total %uk\n", + vlfb_info.lfb_base, lfb, vram_remap >> 10, vram_total >> 10); printk(XENLOG_INFO "vesafb: mode is %dx%dx%u, linelength=%d, font %ux%u\n", - vlfb_info.width, vlfb_info.height, - vlfb_info.bits_per_pixel, vlfb_info.bytes_per_line, - font->width, font->height); + vlfb_info.width, vlfb_info.height, vlfb_info.bits_per_pixel, + vlfb_info.bytes_per_line, font->width, font->height); printk(XENLOG_INFO "vesafb: %scolor: size=%d:%d:%d:%d, " - "shift=%d:%d:%d:%d\n", - vlfb_info.bits_per_pixel > 8 ? "True" : - vga_compat ? "Pseudo" : "Static Pseudo", - vlfb_info.rsvd_size, vlfb_info.red_size, - vlfb_info.green_size, vlfb_info.blue_size, - vlfb_info.rsvd_pos, vlfb_info.red_pos, + "shift=%d:%d:%d:%d\n", + vlfb_info.bits_per_pixel > 8 + ? "True" + : vga_compat ? "Pseudo" : "Static Pseudo", + vlfb_info.rsvd_size, vlfb_info.red_size, vlfb_info.green_size, + vlfb_info.blue_size, vlfb_info.rsvd_pos, vlfb_info.red_pos, vlfb_info.green_pos, vlfb_info.blue_pos); if ( vlfb_info.bits_per_pixel > 8 ) @@ -125,9 +124,9 @@ void __init vesa_init(void) /* Light grey in truecolor. */ unsigned int grey = 0xaaaaaaaa; lfbp.pixel_on = - ((grey >> (32 - vlfb_info. red_size)) << vlfb_info. red_pos) | + ((grey >> (32 - vlfb_info.red_size)) << vlfb_info.red_pos) | ((grey >> (32 - vlfb_info.green_size)) << vlfb_info.green_pos) | - ((grey >> (32 - vlfb_info. blue_size)) << vlfb_info. blue_pos); + ((grey >> (32 - vlfb_info.blue_size)) << vlfb_info.blue_pos); } else { @@ -147,9 +146,8 @@ integer_param("vesa-mtrr", vesa_mtrr); void __init vesa_mtrr_init(void) { - static const int mtrr_types[] = { - 0, MTRR_TYPE_UNCACHABLE, MTRR_TYPE_WRBACK, - MTRR_TYPE_WRCOMB, MTRR_TYPE_WRTHROUGH }; + static const int mtrr_types[] = {0, MTRR_TYPE_UNCACHABLE, MTRR_TYPE_WRBACK, + MTRR_TYPE_WRCOMB, MTRR_TYPE_WRTHROUGH}; unsigned int size_total; int rc, type; @@ -175,7 +173,7 @@ void __init vesa_mtrr_init(void) static void lfb_flush(void) { if ( vesa_mtrr == 3 ) - __asm__ __volatile__ ("sfence" : : : "memory"); + __asm__ __volatile__("sfence" : : : "memory"); } void __init vesa_endboot(bool_t keep) diff --git a/xen/drivers/video/vga.c b/xen/drivers/video/vga.c index 6a64fd9013..31f7e8cb50 100644 --- a/xen/drivers/video/vga.c +++ b/xen/drivers/video/vga.c @@ -1,6 +1,6 @@ /****************************************************************************** * vga.c - * + * * VGA support routines. */ @@ -19,30 +19,32 @@ static unsigned int xpos, ypos; static unsigned char *video; static void vga_text_puts(const char *s); -static void vga_noop_puts(const char *s) {} +static void vga_noop_puts(const char *s) +{ +} void (*video_puts)(const char *) = vga_noop_puts; /* * 'vga=[,keep]' where is one of: - * + * * 'vga=ask': * display a vga menu of available modes - * + * * 'vga=current': * use the current vga mode without modification - * + * * 'vga=text-80x': * text mode, where is one of {25,28,30,34,43,50,60} - * + * * 'vga=gfx-xx': * graphics mode, e.g., vga=gfx-1024x768x16 - * + * * 'vga=mode-: * specifies a mode as specified in 'vga=ask' menu * (NB. menu modes are displayed in hex, so mode numbers here must * be prefixed with '0x' (e.g., 'vga=mode-0x0318')) - * - * The option 'keep' causes Xen to continue to print to the VGA console even + * + * The option 'keep' causes Xen to continue to print to the VGA console even * after domain 0 starts to boot. The default behaviour is to relinquish * control of the console to domain 0. */ @@ -51,14 +53,14 @@ string_param("vga", opt_vga); /* VGA text-mode definitions. */ static unsigned int columns, lines; -#define ATTRIBUTE 7 +#define ATTRIBUTE 7 #ifdef CONFIG_X86 void vesa_early_init(void); void vesa_endboot(bool_t keep); #else #define vesa_early_init() ((void)0) -#define vesa_endboot(x) ((void)0) +#define vesa_endboot(x) ((void)0) #endif void __init video_init(void) @@ -74,7 +76,7 @@ void __init video_init(void) vgacon_keep = 1; } - switch ( vga_console_info.video_type ) + switch (vga_console_info.video_type) { case XEN_VGATYPE_TEXT_MODE_3: if ( page_is_ram_type(paddr_to_pfn(0xB8000), RAM_TYPE_CONVENTIONAL) || @@ -82,7 +84,7 @@ void __init video_init(void) return; outw(0x200a, 0x3d4); /* disable cursor */ columns = vga_console_info.u.text_mode_3.columns; - lines = vga_console_info.u.text_mode_3.rows; + lines = vga_console_info.u.text_mode_3.rows; memset(video, 0, columns * lines * 2); video_puts = vga_text_puts; break; @@ -123,22 +125,21 @@ void __init video_endboot(void) if ( !pdev || pci_conf_read16(0, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), PCI_CLASS_DEVICE) != 0x0300 || - !(pci_conf_read16(0, bus, PCI_SLOT(devfn), - PCI_FUNC(devfn), PCI_COMMAND) & + !(pci_conf_read16(0, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), + PCI_COMMAND) & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) ) continue; while ( b ) { - switch ( find_upstream_bridge(0, &b, &df, &sb) ) + switch (find_upstream_bridge(0, &b, &df, &sb)) { case 0: b = 0; break; case 1: - switch ( pci_conf_read8(0, b, PCI_SLOT(df), - PCI_FUNC(df), - PCI_HEADER_TYPE) ) + switch (pci_conf_read8(0, b, PCI_SLOT(df), PCI_FUNC(df), + PCI_HEADER_TYPE)) { case PCI_HEADER_TYPE_BRIDGE: case PCI_HEADER_TYPE_CARDBUS: @@ -155,14 +156,14 @@ void __init video_endboot(void) } if ( !b ) { - printk(XENLOG_INFO "Boot video device %02x:%02x.%u\n", - bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); + printk(XENLOG_INFO "Boot video device %02x:%02x.%u\n", bus, + PCI_SLOT(devfn), PCI_FUNC(devfn)); pci_hide_device(0, bus, devfn); } } } - switch ( vga_console_info.video_type ) + switch (vga_console_info.video_type) { case XEN_VGATYPE_TEXT_MODE_3: if ( !vgacon_keep ) @@ -196,7 +197,7 @@ static void vga_text_puts(const char *s) if ( c != '\n' ) { - video[(xpos + ypos * columns) * 2] = c; + video[(xpos + ypos * columns) * 2] = c; video[(xpos + ypos * columns) * 2 + 1] = ATTRIBUTE; xpos++; } diff --git a/xen/drivers/vpci/header.c b/xen/drivers/vpci/header.c index efb6ca90e3..83a094a68d 100644 --- a/xen/drivers/vpci/header.c +++ b/xen/drivers/vpci/header.c @@ -24,11 +24,12 @@ #include #include -#define MAPPABLE_BAR(x) \ - ((x)->type == VPCI_BAR_MEM32 || (x)->type == VPCI_BAR_MEM64_LO || \ +#define MAPPABLE_BAR(x) \ + ((x)->type == VPCI_BAR_MEM32 || (x)->type == VPCI_BAR_MEM64_LO || \ (x)->type == VPCI_BAR_ROM) -struct map_data { +struct map_data +{ struct domain *d; bool map; }; @@ -39,7 +40,7 @@ static int map_range(unsigned long s, unsigned long e, void *data, const struct map_data *map = data; int rc; - for ( ; ; ) + for ( ;; ) { unsigned long size = e - s + 1; @@ -70,7 +71,7 @@ static int map_range(unsigned long s, unsigned long e, void *data, *c += rc; s += rc; if ( general_preempt_check() ) - return -ERESTART; + return -ERESTART; } return rc; @@ -108,9 +109,10 @@ static void modify_decoding(const struct pci_dev *pdev, uint16_t cmd, if ( rom_only && header->bars[i].type == VPCI_BAR_ROM ) { unsigned int rom_pos = (i == PCI_HEADER_NORMAL_NR_BARS) - ? PCI_ROM_ADDRESS : PCI_ROM_ADDRESS1; - uint32_t val = header->bars[i].addr | - (map ? PCI_ROM_ADDRESS_ENABLE : 0); + ? PCI_ROM_ADDRESS + : PCI_ROM_ADDRESS1; + uint32_t val = + header->bars[i].addr | (map ? PCI_ROM_ADDRESS_ENABLE : 0); header->bars[i].enabled = header->rom_enabled = map; pci_conf_write32(pdev->seg, pdev->bus, slot, func, rom_pos, val); @@ -123,8 +125,7 @@ static void modify_decoding(const struct pci_dev *pdev, uint16_t cmd, } if ( !rom_only ) - pci_conf_write16(pdev->seg, pdev->bus, slot, func, PCI_COMMAND, - cmd); + pci_conf_write16(pdev->seg, pdev->bus, slot, func, PCI_COMMAND, cmd); else ASSERT_UNREACHABLE(); } @@ -168,7 +169,7 @@ bool vpci_process_pending(struct vcpu *v) static int __init apply_map(struct domain *d, const struct pci_dev *pdev, struct rangeset *mem, uint16_t cmd) { - struct map_data data = { .d = d, .map = true }; + struct map_data data = {.d = d, .map = true}; int rc; while ( (rc = rangeset_consume_ranges(mem, map_range, &data)) == -ERESTART ) @@ -238,8 +239,8 @@ static int modify_bars(const struct pci_dev *pdev, uint16_t cmd, bool rom_only) rc = rangeset_add_range(mem, start, end); if ( rc ) { - printk(XENLOG_G_WARNING "Failed to add [%lx, %lx]: %d\n", - start, end, rc); + printk(XENLOG_G_WARNING "Failed to add [%lx, %lx]: %d\n", start, + end, rc); rangeset_destroy(mem); return rc; } @@ -267,7 +268,7 @@ static int modify_bars(const struct pci_dev *pdev, uint16_t cmd, bool rom_only) * Check for overlaps with other BARs. Note that only BARs that are * currently mapped (enabled) are checked for overlaps. */ - list_for_each_entry(tmp, &pdev->domain->arch.pdev_list, domain_list) + list_for_each_entry (tmp, &pdev->domain->arch.pdev_list, domain_list) { if ( tmp == pdev ) { @@ -336,8 +337,8 @@ static void cmd_write(const struct pci_dev *pdev, unsigned int reg, uint32_t cmd, void *data) { uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn); - uint16_t current_cmd = pci_conf_read16(pdev->seg, pdev->bus, slot, func, - reg); + uint16_t current_cmd = + pci_conf_read16(pdev->seg, pdev->bus, slot, func, reg); /* * Let Dom0 play with all the bits directly except for the memory @@ -377,13 +378,13 @@ static void bar_write(const struct pci_dev *pdev, unsigned int reg, /* If the value written is the current one avoid printing a warning. */ if ( val != (uint32_t)(bar->addr >> (hi ? 32 : 0)) ) gprintk(XENLOG_WARNING, - "%04x:%02x:%02x.%u: ignored BAR %lu write with memory decoding enabled\n", + "%04x:%02x:%02x.%u: ignored BAR %lu write with memory " + "decoding enabled\n", pdev->seg, pdev->bus, slot, func, bar - pdev->vpci->header.bars + hi); return; } - /* * Update the cached address, so that when memory decoding is enabled * Xen can map the BAR into the guest p2m. @@ -409,14 +410,15 @@ static void rom_write(const struct pci_dev *pdev, unsigned int reg, struct vpci_header *header = &pdev->vpci->header; struct vpci_bar *rom = data; uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn); - uint16_t cmd = pci_conf_read16(pdev->seg, pdev->bus, slot, func, - PCI_COMMAND); + uint16_t cmd = + pci_conf_read16(pdev->seg, pdev->bus, slot, func, PCI_COMMAND); bool new_enabled = val & PCI_ROM_ADDRESS_ENABLE; if ( (cmd & PCI_COMMAND_MEMORY) && header->rom_enabled && new_enabled ) { gprintk(XENLOG_WARNING, - "%04x:%02x:%02x.%u: ignored ROM BAR write with memory decoding enabled\n", + "%04x:%02x:%02x.%u: ignored ROM BAR write with memory decoding " + "enabled\n", pdev->seg, pdev->bus, slot, func); return; } @@ -466,8 +468,8 @@ static int init_bars(struct pci_dev *pdev) }; int rc; - switch ( pci_conf_read8(pdev->seg, pdev->bus, slot, func, PCI_HEADER_TYPE) - & 0x7f ) + switch (pci_conf_read8(pdev->seg, pdev->bus, slot, func, PCI_HEADER_TYPE) & + 0x7f) { case PCI_HEADER_TYPE_NORMAL: num_bars = PCI_HEADER_NORMAL_NR_BARS; @@ -510,8 +512,8 @@ static int init_bars(struct pci_dev *pdev) 4, &bars[i]); if ( rc ) { - pci_conf_write16(pdev->seg, pdev->bus, slot, func, - PCI_COMMAND, cmd); + pci_conf_write16(pdev->seg, pdev->bus, slot, func, PCI_COMMAND, + cmd); return rc; } @@ -568,8 +570,9 @@ static int init_bars(struct pci_dev *pdev) rom->type = VPCI_BAR_ROM; rom->size = size; rom->addr = addr; - header->rom_enabled = pci_conf_read32(pdev->seg, pdev->bus, slot, func, - rom_reg) & PCI_ROM_ADDRESS_ENABLE; + header->rom_enabled = + pci_conf_read32(pdev->seg, pdev->bus, slot, func, rom_reg) & + PCI_ROM_ADDRESS_ENABLE; rc = vpci_add_register(pdev->vpci, vpci_hw_read32, rom_write, rom_reg, 4, rom); diff --git a/xen/drivers/vpci/msi.c b/xen/drivers/vpci/msi.c index 8f15ad7bf2..e9606ae487 100644 --- a/xen/drivers/vpci/msi.c +++ b/xen/drivers/vpci/msi.c @@ -38,9 +38,8 @@ static void control_write(const struct pci_dev *pdev, unsigned int reg, uint32_t val, void *data) { struct vpci_msi *msi = data; - unsigned int vectors = min_t(uint8_t, - 1u << MASK_EXTR(val, PCI_MSI_FLAGS_QSIZE), - msi->max_vectors); + unsigned int vectors = min_t( + uint8_t, 1u << MASK_EXTR(val, PCI_MSI_FLAGS_QSIZE), msi->max_vectors); bool new_enabled = val & PCI_MSI_FLAGS_ENABLE; /* @@ -78,8 +77,7 @@ static void control_write(const struct pci_dev *pdev, unsigned int reg, msi->enabled = new_enabled; pci_conf_write16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn), reg, - control_read(pdev, reg, data)); + PCI_FUNC(pdev->devfn), reg, control_read(pdev, reg, data)); } static void update_msi(const struct pci_dev *pdev, struct vpci_msi *msi) @@ -188,8 +186,8 @@ static void mask_write(const struct pci_dev *pdev, unsigned int reg, static int init_msi(struct pci_dev *pdev) { uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn); - unsigned int pos = pci_find_cap_offset(pdev->seg, pdev->bus, slot, func, - PCI_CAP_ID_MSI); + unsigned int pos = + pci_find_cap_offset(pdev->seg, pdev->bus, slot, func, PCI_CAP_ID_MSI); uint16_t control; int ret; @@ -211,8 +209,8 @@ static int init_msi(struct pci_dev *pdev) return ret; /* Get the maximum number of vectors the device supports. */ - control = pci_conf_read16(pdev->seg, pdev->bus, slot, func, - msi_control_reg(pos)); + control = + pci_conf_read16(pdev->seg, pdev->bus, slot, func, msi_control_reg(pos)); /* * FIXME: I've only been able to test this code with devices using a single @@ -251,10 +249,10 @@ static int init_msi(struct pci_dev *pdev) if ( pdev->vpci->msi->masking ) { - ret = vpci_add_register(pdev->vpci, mask_read, mask_write, - msi_mask_bits_reg(pos, - pdev->vpci->msi->address64), - 4, pdev->vpci->msi); + ret = vpci_add_register( + pdev->vpci, mask_read, mask_write, + msi_mask_bits_reg(pos, pdev->vpci->msi->address64), 4, + pdev->vpci->msi); if ( ret ) return ret; /* @@ -273,7 +271,7 @@ void vpci_dump_msi(void) const struct domain *d; rcu_read_lock(&domlist_read_lock); - for_each_domain ( d ) + for_each_domain (d) { const struct pci_dev *pdev; @@ -282,7 +280,7 @@ void vpci_dump_msi(void) printk("vPCI MSI/MSI-X d%d\n", d->domain_id); - list_for_each_entry ( pdev, &d->arch.pdev_list, domain_list ) + list_for_each_entry (pdev, &d->arch.pdev_list, domain_list) { const struct vpci_msi *msi; const struct vpci_msix *msix; @@ -296,12 +294,12 @@ void vpci_dump_msi(void) printk("%04x:%02x:%02x.%u MSI\n", pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); - printk(" enabled: %d 64-bit: %d", - msi->enabled, msi->address64); + printk(" enabled: %d 64-bit: %d", msi->enabled, + msi->address64); if ( msi->masking ) printk(" mask=%08x", msi->mask); - printk(" vectors max: %u enabled: %u\n", - msi->max_vectors, msi->vectors); + printk(" vectors max: %u enabled: %u\n", msi->max_vectors, + msi->vectors); vpci_msi_arch_print(msi); } diff --git a/xen/drivers/vpci/msix.c b/xen/drivers/vpci/msix.c index af3ffa087d..0d07e25b57 100644 --- a/xen/drivers/vpci/msix.c +++ b/xen/drivers/vpci/msix.c @@ -25,8 +25,8 @@ #define VMSIX_SIZE(num) offsetof(struct vpci_msix, entries[num]) -#define VMSIX_ADDR_IN_RANGE(addr, vpci, nr) \ - ((addr) >= vmsix_table_addr(vpci, nr) && \ +#define VMSIX_ADDR_IN_RANGE(addr, vpci, nr) \ + ((addr) >= vmsix_table_addr(vpci, nr) && \ (addr) < vmsix_table_addr(vpci, nr) + vmsix_table_size(vpci, nr)) static uint32_t control_read(const struct pci_dev *pdev, unsigned int reg, @@ -48,20 +48,20 @@ static int update_entry(struct vpci_msix_entry *entry, /* Ignore ENOENT, it means the entry wasn't setup. */ if ( rc && rc != -ENOENT ) { - gprintk(XENLOG_WARNING, - "%04x:%02x:%02x.%u: unable to disable entry %u for update: %d\n", - pdev->seg, pdev->bus, slot, func, nr, rc); + gprintk( + XENLOG_WARNING, + "%04x:%02x:%02x.%u: unable to disable entry %u for update: %d\n", + pdev->seg, pdev->bus, slot, func, nr, rc); return rc; } - rc = vpci_msix_arch_enable_entry(entry, pdev, - vmsix_table_base(pdev->vpci, - VPCI_MSIX_TABLE)); + rc = vpci_msix_arch_enable_entry( + entry, pdev, vmsix_table_base(pdev->vpci, VPCI_MSIX_TABLE)); if ( rc ) { gprintk(XENLOG_WARNING, - "%04x:%02x:%02x.%u: unable to enable entry %u: %d\n", - pdev->seg, pdev->bus, slot, func, nr, rc); + "%04x:%02x:%02x.%u: unable to enable entry %u: %d\n", pdev->seg, + pdev->bus, slot, func, nr, rc); /* Entry is likely not properly configured. */ return rc; } @@ -113,7 +113,7 @@ static void control_write(const struct pci_dev *pdev, unsigned int reg, * not setup, it will return -ENOENT in that case. */ rc = vpci_msix_arch_disable_entry(&msix->entries[i], pdev); - switch ( rc ) + switch (rc) { case 0: /* @@ -153,7 +153,7 @@ static struct vpci_msix *msix_find(const struct domain *d, unsigned long addr) { struct vpci_msix *msix; - list_for_each_entry ( msix, &d->arch.hvm.msix_tables, next ) + list_for_each_entry (msix, &d->arch.hvm.msix_tables, next) { const struct vpci_bar *bars = msix->pdev->vpci->header.bars; unsigned int i; @@ -186,8 +186,7 @@ static bool access_allowed(const struct pci_dev *pdev, unsigned long addr, return false; } -static struct vpci_msix_entry *get_entry(struct vpci_msix *msix, - paddr_t addr) +static struct vpci_msix_entry *get_entry(struct vpci_msix *msix, paddr_t addr) { paddr_t start = vmsix_table_addr(msix->pdev->vpci, VPCI_MSIX_TABLE); @@ -219,7 +218,7 @@ static int msix_read(struct vcpu *v, unsigned long addr, unsigned int len, * guest address space. If this changes the address will need to be * translated. */ - switch ( len ) + switch (len) { case 4: *data = readl(addr); @@ -241,7 +240,7 @@ static int msix_read(struct vcpu *v, unsigned long addr, unsigned int len, entry = get_entry(msix, addr); offset = addr & (PCI_MSIX_ENTRY_SIZE - 1); - switch ( offset ) + switch (offset) { case PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET: *data = entry->addr; @@ -254,8 +253,8 @@ static int msix_read(struct vcpu *v, unsigned long addr, unsigned int len, case PCI_MSIX_ENTRY_DATA_OFFSET: *data = entry->data; if ( len == 8 ) - *data |= - (uint64_t)(entry->masked ? PCI_MSIX_VECTOR_BITMASK : 0) << 32; + *data |= (uint64_t)(entry->masked ? PCI_MSIX_VECTOR_BITMASK : 0) + << 32; break; case PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET: @@ -290,7 +289,7 @@ static int msix_write(struct vcpu *v, unsigned long addr, unsigned int len, /* Ignore writes to PBA for DomUs, it's behavior is undefined. */ if ( is_hardware_domain(d) ) { - switch ( len ) + switch (len) { case 4: writel(data, addr); @@ -320,7 +319,7 @@ static int msix_write(struct vcpu *v, unsigned long addr, unsigned int len, * in the next mask/unmask cycle. This also mimics the implementation in * QEMU. */ - switch ( offset ) + switch (offset) { case PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET: entry->updated = true; @@ -416,7 +415,7 @@ int vpci_make_msix_hole(const struct pci_dev *pdev) p2m_type_t t; mfn_t mfn = get_gfn_query(d, start, &t); - switch ( t ) + switch (t) { case p2m_mmio_dm: case p2m_invalid: @@ -452,8 +451,8 @@ static int init_msix(struct pci_dev *pdev) uint16_t control; int rc; - msix_offset = pci_find_cap_offset(pdev->seg, pdev->bus, slot, func, - PCI_CAP_ID_MSIX); + msix_offset = + pci_find_cap_offset(pdev->seg, pdev->bus, slot, func, PCI_CAP_ID_MSIX); if ( !msix_offset ) return 0; @@ -469,14 +468,12 @@ static int init_msix(struct pci_dev *pdev) pdev->vpci->msix->max_entries = max_entries; pdev->vpci->msix->pdev = pdev; - pdev->vpci->msix->tables[VPCI_MSIX_TABLE] = - pci_conf_read32(pdev->seg, pdev->bus, slot, func, - msix_table_offset_reg(msix_offset)); - pdev->vpci->msix->tables[VPCI_MSIX_PBA] = - pci_conf_read32(pdev->seg, pdev->bus, slot, func, - msix_pba_offset_reg(msix_offset)); + pdev->vpci->msix->tables[VPCI_MSIX_TABLE] = pci_conf_read32( + pdev->seg, pdev->bus, slot, func, msix_table_offset_reg(msix_offset)); + pdev->vpci->msix->tables[VPCI_MSIX_PBA] = pci_conf_read32( + pdev->seg, pdev->bus, slot, func, msix_pba_offset_reg(msix_offset)); - for ( i = 0; i < pdev->vpci->msix->max_entries; i++) + for ( i = 0; i < pdev->vpci->msix->max_entries; i++ ) { pdev->vpci->msix->entries[i].masked = true; vpci_msix_arch_init_entry(&pdev->vpci->msix->entries[i]); diff --git a/xen/drivers/vpci/vpci.c b/xen/drivers/vpci/vpci.c index 82607bdb9a..169aa0b512 100644 --- a/xen/drivers/vpci/vpci.c +++ b/xen/drivers/vpci/vpci.c @@ -21,7 +21,8 @@ #include /* Internal struct to store the emulated PCI registers. */ -struct vpci_register { +struct vpci_register +{ vpci_read_t *read; vpci_write_t *write; unsigned int size; @@ -40,9 +41,8 @@ void vpci_remove_device(struct pci_dev *pdev) spin_lock(&pdev->vpci->lock); while ( !list_empty(&pdev->vpci->handlers) ) { - struct vpci_register *r = list_first_entry(&pdev->vpci->handlers, - struct vpci_register, - node); + struct vpci_register *r = + list_first_entry(&pdev->vpci->handlers, struct vpci_register, node); list_del(&r->node); xfree(r); @@ -151,7 +151,7 @@ int vpci_add_register(struct vpci *vpci, vpci_read_t *read_handler, spin_lock(&vpci->lock); /* The list of handlers must be kept sorted at all times. */ - list_for_each ( prev, &vpci->handlers ) + list_for_each (prev, &vpci->handlers) { const struct vpci_register *this = list_entry(prev, const struct vpci_register, node); @@ -176,11 +176,11 @@ int vpci_add_register(struct vpci *vpci, vpci_read_t *read_handler, int vpci_remove_register(struct vpci *vpci, unsigned int offset, unsigned int size) { - const struct vpci_register r = { .offset = offset, .size = size }; + const struct vpci_register r = {.offset = offset, .size = size}; struct vpci_register *rm; spin_lock(&vpci->lock); - list_for_each_entry ( rm, &vpci->handlers, node ) + list_for_each_entry (rm, &vpci->handlers, node) { int cmp = vpci_register_cmp(&r, rm); @@ -209,7 +209,7 @@ static uint32_t vpci_read_hw(pci_sbdf_t sbdf, unsigned int reg, { uint32_t data; - switch ( size ) + switch (size) { case 4: data = pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg); @@ -222,17 +222,18 @@ static uint32_t vpci_read_hw(pci_sbdf_t sbdf, unsigned int reg, */ if ( reg & 1 ) { - data = pci_conf_read8(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, - reg); + data = pci_conf_read8(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg); data |= pci_conf_read16(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, - reg + 1) << 8; + reg + 1) + << 8; } else { - data = pci_conf_read16(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, - reg); - data |= pci_conf_read8(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, - reg + 2) << 16; + data = + pci_conf_read16(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg); + data |= + pci_conf_read8(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg + 2) + << 16; } break; @@ -256,7 +257,7 @@ static uint32_t vpci_read_hw(pci_sbdf_t sbdf, unsigned int reg, static void vpci_write_hw(pci_sbdf_t sbdf, unsigned int reg, unsigned int size, uint32_t data) { - switch ( size ) + switch (size) { case 4: pci_conf_write32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg, data); @@ -269,8 +270,7 @@ static void vpci_write_hw(pci_sbdf_t sbdf, unsigned int reg, unsigned int size, */ if ( reg & 1 ) { - pci_conf_write8(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg, - data); + pci_conf_write8(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg, data); pci_conf_write16(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg + 1, data >> 8); } @@ -309,7 +309,7 @@ static uint32_t merge_result(uint32_t data, uint32_t new, unsigned int size, { uint32_t mask = 0xffffffff >> (32 - 8 * size); - return (data & ~(mask << (offset * 8))) | ((new & mask) << (offset * 8)); + return (data & ~(mask << (offset * 8))) | ((new &mask) << (offset * 8)); } uint32_t vpci_read(pci_sbdf_t sbdf, unsigned int reg, unsigned int size) @@ -334,12 +334,10 @@ uint32_t vpci_read(pci_sbdf_t sbdf, unsigned int reg, unsigned int size) spin_lock(&pdev->vpci->lock); /* Read from the hardware or the emulated register handlers. */ - list_for_each_entry ( r, &pdev->vpci->handlers, node ) + list_for_each_entry (r, &pdev->vpci->handlers, node) { - const struct vpci_register emu = { - .offset = reg + data_offset, - .size = size - data_offset - }; + const struct vpci_register emu = {.offset = reg + data_offset, + .size = size - data_offset}; int cmp = vpci_register_cmp(&emu, r); uint32_t val; unsigned int read_size; @@ -378,8 +376,8 @@ uint32_t vpci_read(pci_sbdf_t sbdf, unsigned int reg, unsigned int size) if ( data_offset < size ) { /* Tailing gap, read the remaining. */ - uint32_t tmp_data = vpci_read_hw(sbdf, reg + data_offset, - size - data_offset); + uint32_t tmp_data = + vpci_read_hw(sbdf, reg + data_offset, size - data_offset); data = merge_result(data, tmp_data, size - data_offset, data_offset); } @@ -442,12 +440,10 @@ void vpci_write(pci_sbdf_t sbdf, unsigned int reg, unsigned int size, spin_lock(&pdev->vpci->lock); /* Write the value to the hardware or emulated registers. */ - list_for_each_entry ( r, &pdev->vpci->handlers, node ) + list_for_each_entry (r, &pdev->vpci->handlers, node) { - const struct vpci_register emu = { - .offset = reg + data_offset, - .size = size - data_offset - }; + const struct vpci_register emu = {.offset = reg + data_offset, + .size = size - data_offset}; int cmp = vpci_register_cmp(&emu, r); unsigned int write_size; diff --git a/xen/lib/x86/cpuid.c b/xen/lib/x86/cpuid.c index 5a3159b357..2c86f1ea8c 100644 --- a/xen/lib/x86/cpuid.c +++ b/xen/lib/x86/cpuid.c @@ -7,12 +7,16 @@ void x86_cpuid_policy_fill_native(struct cpuid_policy *p) unsigned int i; cpuid_leaf(0, &p->basic.raw[0]); - for ( i = 1; i < min_t(unsigned int, ARRAY_SIZE(p->basic.raw), - p->basic.max_leaf); ++i ) + for ( i = 1; + i < min_t(unsigned int, ARRAY_SIZE(p->basic.raw), p->basic.max_leaf); + ++i ) { - switch ( i ) + switch (i) { - case 0x4: case 0x7: case 0xb: case 0xd: + case 0x4: + case 0x7: + case 0xb: + case 0xd: /* Multi-invocation leaves. Deferred. */ continue; } @@ -53,7 +57,8 @@ void x86_cpuid_policy_fill_native(struct cpuid_policy *p) cpuid_count_leaf(7, 0, &p->feat.raw[0]); for ( i = 1; i < min_t(unsigned int, ARRAY_SIZE(p->feat.raw), - p->feat.max_subleaf); ++i ) + p->feat.max_subleaf); + ++i ) cpuid_count_leaf(7, i, &p->feat.raw[i]); } @@ -93,11 +98,11 @@ void x86_cpuid_policy_fill_native(struct cpuid_policy *p) cpuid_count_leaf(0xd, 0, &p->xstate.raw[0]); cpuid_count_leaf(0xd, 1, &p->xstate.raw[1]); - xstates = ((uint64_t)(p->xstate.xcr0_high | p->xstate.xss_high) << 32); - xstates |= (p->xstate.xcr0_low | p->xstate.xss_low); + xstates = ((uint64_t)(p->xstate.xcr0_high | p->xstate.xss_high) << 32); + xstates |= (p->xstate.xcr0_low | p->xstate.xss_low); - for ( i = 2; i < min_t(unsigned int, 63, - ARRAY_SIZE(p->xstate.raw)); ++i ) + for ( i = 2; i < min_t(unsigned int, 63, ARRAY_SIZE(p->xstate.raw)); + ++i ) { if ( xstates & (1ul << i) ) cpuid_count_leaf(0xd, i, &p->xstate.raw[i]); @@ -107,14 +112,16 @@ void x86_cpuid_policy_fill_native(struct cpuid_policy *p) /* Extended leaves. */ cpuid_leaf(0x80000000, &p->extd.raw[0]); for ( i = 1; i < min_t(unsigned int, ARRAY_SIZE(p->extd.raw), - p->extd.max_leaf + 1 - 0x80000000); ++i ) + p->extd.max_leaf + 1 - 0x80000000); + ++i ) cpuid_leaf(0x80000000 + i, &p->extd.raw[i]); } const uint32_t *x86_cpuid_lookup_deep_deps(uint32_t feature) { static const uint32_t deep_features[] = INIT_DEEP_FEATURES; - static const struct { + static const struct + { uint32_t feature; uint32_t fs[FEATURESET_NR_ENTRIES]; } deep_deps[] = INIT_DEEP_DEPS; @@ -148,8 +155,8 @@ const uint32_t *x86_cpuid_lookup_deep_deps(uint32_t feature) */ static int copy_leaf_to_buffer(uint32_t leaf, uint32_t subleaf, const struct cpuid_leaf *data, - cpuid_leaf_buffer_t leaves, - uint32_t *curr_entry, const uint32_t nr_entries) + cpuid_leaf_buffer_t leaves, uint32_t *curr_entry, + const uint32_t nr_entries) { const xen_cpuid_leaf_t val = { leaf, subleaf, data->a, data->b, data->c, data->d, @@ -172,20 +179,20 @@ int x86_cpuid_copy_to_buffer(const struct cpuid_policy *p, const uint32_t nr_entries = *nr_entries_p; uint32_t curr_entry = 0, leaf, subleaf; -#define COPY_LEAF(l, s, data) \ - ({ \ - int ret; \ - \ - if ( (ret = copy_leaf_to_buffer( \ - l, s, data, leaves, &curr_entry, nr_entries)) ) \ - return ret; \ +#define COPY_LEAF(l, s, data) \ + ({ \ + int ret; \ + \ + if ( (ret = copy_leaf_to_buffer(l, s, data, leaves, &curr_entry, \ + nr_entries)) ) \ + return ret; \ }) /* Basic leaves. */ - for ( leaf = 0; leaf <= MIN(p->basic.max_leaf, - ARRAY_SIZE(p->basic.raw) - 1); ++leaf ) + for ( leaf = 0; + leaf <= MIN(p->basic.max_leaf, ARRAY_SIZE(p->basic.raw) - 1); ++leaf ) { - switch ( leaf ) + switch (leaf) { case 0x4: for ( subleaf = 0; subleaf < ARRAY_SIZE(p->cache.raw); ++subleaf ) @@ -193,9 +200,9 @@ int x86_cpuid_copy_to_buffer(const struct cpuid_policy *p, break; case 0x7: - for ( subleaf = 0; - subleaf <= MIN(p->feat.max_subleaf, - ARRAY_SIZE(p->feat.raw) - 1); ++subleaf ) + for ( subleaf = 0; subleaf <= MIN(p->feat.max_subleaf, + ARRAY_SIZE(p->feat.raw) - 1); + ++subleaf ) COPY_LEAF(leaf, subleaf, &p->feat.raw[subleaf]); break; @@ -217,13 +224,14 @@ int x86_cpuid_copy_to_buffer(const struct cpuid_policy *p, /* TODO: Port Xen and Viridian leaves to the new CPUID infrastructure. */ COPY_LEAF(0x40000000, XEN_CPUID_NO_SUBLEAF, - &(struct cpuid_leaf){ p->hv_limit }); + &(struct cpuid_leaf){p->hv_limit}); COPY_LEAF(0x40000100, XEN_CPUID_NO_SUBLEAF, - &(struct cpuid_leaf){ p->hv2_limit }); + &(struct cpuid_leaf){p->hv2_limit}); /* Extended leaves. */ - for ( leaf = 0; leaf <= MIN(p->extd.max_leaf & 0xfffful, - ARRAY_SIZE(p->extd.raw) - 1); ++leaf ) + for ( leaf = 0; + leaf <= MIN(p->extd.max_leaf & 0xfffful, ARRAY_SIZE(p->extd.raw) - 1); + ++leaf ) COPY_LEAF(0x80000000 | leaf, XEN_CPUID_NO_SUBLEAF, &p->extd.raw[leaf]); #undef COPY_LEAF diff --git a/xen/lib/x86/msr.c b/xen/lib/x86/msr.c index 60fb567687..b2ff74d2a0 100644 --- a/xen/lib/x86/msr.c +++ b/xen/lib/x86/msr.c @@ -7,10 +7,10 @@ * boundary check against the buffer size. */ static int copy_msr_to_buffer(uint32_t idx, uint64_t val, - msr_entry_buffer_t msrs, - uint32_t *curr_entry, const uint32_t nr_entries) + msr_entry_buffer_t msrs, uint32_t *curr_entry, + const uint32_t nr_entries) { - const xen_msr_entry_t ent = { .idx = idx, .val = val }; + const xen_msr_entry_t ent = {.idx = idx, .val = val}; if ( *curr_entry == nr_entries ) return -ENOBUFS; @@ -23,19 +23,19 @@ static int copy_msr_to_buffer(uint32_t idx, uint64_t val, return 0; } -int x86_msr_copy_to_buffer(const struct msr_policy *p, - msr_entry_buffer_t msrs, uint32_t *nr_entries_p) +int x86_msr_copy_to_buffer(const struct msr_policy *p, msr_entry_buffer_t msrs, + uint32_t *nr_entries_p) { const uint32_t nr_entries = *nr_entries_p; uint32_t curr_entry = 0; -#define COPY_MSR(idx, val) \ - ({ \ - int ret; \ - \ - if ( (ret = copy_msr_to_buffer( \ - idx, val, msrs, &curr_entry, nr_entries)) ) \ - return ret; \ +#define COPY_MSR(idx, val) \ + ({ \ + int ret; \ + \ + if ( (ret = copy_msr_to_buffer(idx, val, msrs, &curr_entry, \ + nr_entries)) ) \ + return ret; \ }) COPY_MSR(MSR_INTEL_PLATFORM_INFO, p->plaform_info.raw); diff --git a/xen/test/livepatch/xen_hello_world.c b/xen/test/livepatch/xen_hello_world.c index 02f3f85dc0..5b34c1bc13 100644 --- a/xen/test/livepatch/xen_hello_world.c +++ b/xen/test/livepatch/xen_hello_world.c @@ -26,7 +26,7 @@ static void revert_hook(void) printk(KERN_DEBUG "Hook unloaded.\n"); } -static void hi_func(void) +static void hi_func(void) { printk(KERN_DEBUG "%s: Hi! (called %u times)\n", __func__, ++cnt); }; @@ -47,13 +47,14 @@ LIVEPATCH_UNLOAD_HOOK(hi_func); LIVEPATCH_UNLOAD_HOOK(check_fnc); -struct livepatch_func __section(".livepatch.funcs") livepatch_xen_hello_world = { - .version = LIVEPATCH_PAYLOAD_VERSION, - .name = hello_world_patch_this_fnc, - .new_addr = xen_hello_world, - .old_addr = xen_extra_version, - .new_size = NEW_CODE_SZ, - .old_size = OLD_CODE_SZ, +struct livepatch_func + __section(".livepatch.funcs") livepatch_xen_hello_world = { + .version = LIVEPATCH_PAYLOAD_VERSION, + .name = hello_world_patch_this_fnc, + .new_addr = xen_hello_world, + .old_addr = xen_extra_version, + .new_size = NEW_CODE_SZ, + .old_size = OLD_CODE_SZ, }; /* diff --git a/xen/test/livepatch/xen_hello_world_func.c b/xen/test/livepatch/xen_hello_world_func.c index b358224e3e..823b630b68 100644 --- a/xen/test/livepatch/xen_hello_world_func.c +++ b/xen/test/livepatch/xen_hello_world_func.c @@ -11,7 +11,8 @@ #include #include -static unsigned long *non_canonical_addr = (unsigned long *)0xdead000000000000ULL; +static unsigned long *non_canonical_addr = + (unsigned long *)0xdead000000000000ULL; #endif /* Our replacement function for xen_extra_version. */ diff --git a/xen/test/livepatch/xen_nop.c b/xen/test/livepatch/xen_nop.c index a224b7c670..b73eaecef6 100644 --- a/xen/test/livepatch/xen_nop.c +++ b/xen/test/livepatch/xen_nop.c @@ -21,7 +21,7 @@ struct livepatch_func __section(".livepatch.funcs") livepatch_nop = { #ifdef CONFIG_X86 .old_addr = (void *)MINOR_VERSION_ADDR, /* Everything but the last instruction: "req". */ - .new_size = MINOR_VERSION_SZ-1, + .new_size = MINOR_VERSION_SZ - 1, #endif #ifdef CONFIG_ARM @@ -30,7 +30,7 @@ struct livepatch_func __section(".livepatch.funcs") livepatch_nop = { * On ARM64 we replace the first one: "mov w0, #0x8". While on * ARM32 we replace all but the return instruction: "bx lr". */ - .new_size = MINOR_VERSION_SZ-4, + .new_size = MINOR_VERSION_SZ - 4, #endif }; diff --git a/xen/test/livepatch/xen_replace_world.c b/xen/test/livepatch/xen_replace_world.c index 78a8f528b3..5ee1ae7290 100644 --- a/xen/test/livepatch/xen_replace_world.c +++ b/xen/test/livepatch/xen_replace_world.c @@ -13,13 +13,14 @@ static const char xen_replace_world_name[] = "xen_extra_version"; extern const char *xen_replace_world(void); -struct livepatch_func __section(".livepatch.funcs") livepatch_xen_replace_world = { - .version = LIVEPATCH_PAYLOAD_VERSION, - .name = xen_replace_world_name, - .old_addr = 0, /* Forces the hypervisor to lookup .name */ - .new_addr = xen_replace_world, - .new_size = NEW_CODE_SZ, - .old_size = OLD_CODE_SZ, +struct livepatch_func + __section(".livepatch.funcs") livepatch_xen_replace_world = { + .version = LIVEPATCH_PAYLOAD_VERSION, + .name = xen_replace_world_name, + .old_addr = 0, /* Forces the hypervisor to lookup .name */ + .new_addr = xen_replace_world, + .new_size = NEW_CODE_SZ, + .old_size = OLD_CODE_SZ, }; /* diff --git a/xen/tools/kconfig/conf.c b/xen/tools/kconfig/conf.c index 6c204318bc..b57ef114d9 100644 --- a/xen/tools/kconfig/conf.c +++ b/xen/tools/kconfig/conf.c @@ -21,19 +21,20 @@ static void conf(struct menu *menu); static void check_conf(struct menu *menu); static void xfgets(char *str, int size, FILE *in); -enum input_mode { - oldaskconfig, - silentoldconfig, - oldconfig, - allnoconfig, - allyesconfig, - allmodconfig, - alldefconfig, - randconfig, - defconfig, - savedefconfig, - listnewconfig, - olddefconfig, +enum input_mode +{ + oldaskconfig, + silentoldconfig, + oldconfig, + allnoconfig, + allyesconfig, + allmodconfig, + alldefconfig, + randconfig, + defconfig, + savedefconfig, + listnewconfig, + olddefconfig, } input_mode = oldaskconfig; static int indent = 1; @@ -46,670 +47,759 @@ static struct menu *rootEntry; static void print_help(struct menu *menu) { - struct gstr help = str_new(); + struct gstr help = str_new(); - menu_get_ext_help(menu, &help); + menu_get_ext_help(menu, &help); - printf("\n%s\n", str_get(&help)); - str_free(&help); + printf("\n%s\n", str_get(&help)); + str_free(&help); } static void strip(char *str) { - char *p = str; - int l; - - while ((isspace(*p))) - p++; - l = strlen(p); - if (p != str) - memmove(str, p, l + 1); - if (!l) - return; - p = str + l - 1; - while ((isspace(*p))) - *p-- = 0; + char *p = str; + int l; + + while ( (isspace(*p)) ) + p++; + l = strlen(p); + if ( p != str ) + memmove(str, p, l + 1); + if ( !l ) + return; + p = str + l - 1; + while ( (isspace(*p)) ) + *p-- = 0; } static void check_stdin(void) { - if (!valid_stdin) { - printf(_("aborted!\n\n")); - printf(_("Console input/output is redirected. ")); - printf(_("Run 'make oldconfig' to update configuration.\n\n")); - exit(1); - } + if ( !valid_stdin ) + { + printf(_("aborted!\n\n")); + printf(_("Console input/output is redirected. ")); + printf(_("Run 'make oldconfig' to update configuration.\n\n")); + exit(1); + } } static int conf_askvalue(struct symbol *sym, const char *def) { - enum symbol_type type = sym_get_type(sym); - - if (!sym_has_value(sym)) - printf(_("(NEW) ")); - - line[0] = '\n'; - line[1] = 0; - - if (!sym_is_changable(sym)) { - printf("%s\n", def); - line[0] = '\n'; - line[1] = 0; - return 0; - } - - switch (input_mode) { - case oldconfig: - case silentoldconfig: - if (sym_has_value(sym)) { - printf("%s\n", def); - return 0; - } - check_stdin(); - /* fall through */ - case oldaskconfig: - fflush(stdout); - xfgets(line, 128, stdin); - if (!tty_stdio) - printf("\n"); - return 1; - default: - break; - } - - switch (type) { - case S_INT: - case S_HEX: - case S_STRING: - printf("%s\n", def); - return 1; - default: - ; - } - printf("%s", line); - return 1; + enum symbol_type type = sym_get_type(sym); + + if ( !sym_has_value(sym) ) + printf(_("(NEW) ")); + + line[0] = '\n'; + line[1] = 0; + + if ( !sym_is_changable(sym) ) + { + printf("%s\n", def); + line[0] = '\n'; + line[1] = 0; + return 0; + } + + switch (input_mode) + { + case oldconfig: + case silentoldconfig: + if ( sym_has_value(sym) ) + { + printf("%s\n", def); + return 0; + } + check_stdin(); + /* fall through */ + case oldaskconfig: + fflush(stdout); + xfgets(line, 128, stdin); + if ( !tty_stdio ) + printf("\n"); + return 1; + default: + break; + } + + switch (type) + { + case S_INT: + case S_HEX: + case S_STRING: + printf("%s\n", def); + return 1; + default:; + } + printf("%s", line); + return 1; } static int conf_string(struct menu *menu) { - struct symbol *sym = menu->sym; - const char *def; - - while (1) { - printf("%*s%s ", indent - 1, "", _(menu->prompt->text)); - printf("(%s) ", sym->name); - def = sym_get_string_value(sym); - if (sym_get_string_value(sym)) - printf("[%s] ", def); - if (!conf_askvalue(sym, def)) - return 0; - switch (line[0]) { - case '\n': - break; - case '?': - /* print help */ - if (line[1] == '\n') { - print_help(menu); - def = NULL; - break; - } - /* fall through */ - default: - line[strlen(line)-1] = 0; - def = line; - } - if (def && sym_set_string_value(sym, def)) - return 0; - } + struct symbol *sym = menu->sym; + const char *def; + + while ( 1 ) + { + printf("%*s%s ", indent - 1, "", _(menu->prompt->text)); + printf("(%s) ", sym->name); + def = sym_get_string_value(sym); + if ( sym_get_string_value(sym) ) + printf("[%s] ", def); + if ( !conf_askvalue(sym, def) ) + return 0; + switch (line[0]) + { + case '\n': + break; + case '?': + /* print help */ + if ( line[1] == '\n' ) + { + print_help(menu); + def = NULL; + break; + } + /* fall through */ + default: + line[strlen(line) - 1] = 0; + def = line; + } + if ( def && sym_set_string_value(sym, def) ) + return 0; + } } static int conf_sym(struct menu *menu) { - struct symbol *sym = menu->sym; - tristate oldval, newval; - - while (1) { - printf("%*s%s ", indent - 1, "", _(menu->prompt->text)); - if (sym->name) - printf("(%s) ", sym->name); - putchar('['); - oldval = sym_get_tristate_value(sym); - switch (oldval) { - case no: - putchar('N'); - break; - case mod: - putchar('M'); - break; - case yes: - putchar('Y'); - break; - } - if (oldval != no && sym_tristate_within_range(sym, no)) - printf("/n"); - if (oldval != mod && sym_tristate_within_range(sym, mod)) - printf("/m"); - if (oldval != yes && sym_tristate_within_range(sym, yes)) - printf("/y"); - if (menu_has_help(menu)) - printf("/?"); - printf("] "); - if (!conf_askvalue(sym, sym_get_string_value(sym))) - return 0; - strip(line); - - switch (line[0]) { - case 'n': - case 'N': - newval = no; - if (!line[1] || !strcmp(&line[1], "o")) - break; - continue; - case 'm': - case 'M': - newval = mod; - if (!line[1]) - break; - continue; - case 'y': - case 'Y': - newval = yes; - if (!line[1] || !strcmp(&line[1], "es")) - break; - continue; - case 0: - newval = oldval; - break; - case '?': - goto help; - default: - continue; - } - if (sym_set_tristate_value(sym, newval)) - return 0; -help: - print_help(menu); - } + struct symbol *sym = menu->sym; + tristate oldval, newval; + + while ( 1 ) + { + printf("%*s%s ", indent - 1, "", _(menu->prompt->text)); + if ( sym->name ) + printf("(%s) ", sym->name); + putchar('['); + oldval = sym_get_tristate_value(sym); + switch (oldval) + { + case no: + putchar('N'); + break; + case mod: + putchar('M'); + break; + case yes: + putchar('Y'); + break; + } + if ( oldval != no && sym_tristate_within_range(sym, no) ) + printf("/n"); + if ( oldval != mod && sym_tristate_within_range(sym, mod) ) + printf("/m"); + if ( oldval != yes && sym_tristate_within_range(sym, yes) ) + printf("/y"); + if ( menu_has_help(menu) ) + printf("/?"); + printf("] "); + if ( !conf_askvalue(sym, sym_get_string_value(sym)) ) + return 0; + strip(line); + + switch (line[0]) + { + case 'n': + case 'N': + newval = no; + if ( !line[1] || !strcmp(&line[1], "o") ) + break; + continue; + case 'm': + case 'M': + newval = mod; + if ( !line[1] ) + break; + continue; + case 'y': + case 'Y': + newval = yes; + if ( !line[1] || !strcmp(&line[1], "es") ) + break; + continue; + case 0: + newval = oldval; + break; + case '?': + goto help; + default: + continue; + } + if ( sym_set_tristate_value(sym, newval) ) + return 0; + help: + print_help(menu); + } } static int conf_choice(struct menu *menu) { - struct symbol *sym, *def_sym; - struct menu *child; - bool is_new; - - sym = menu->sym; - is_new = !sym_has_value(sym); - if (sym_is_changable(sym)) { - conf_sym(menu); - sym_calc_value(sym); - switch (sym_get_tristate_value(sym)) { - case no: - return 1; - case mod: - return 0; - case yes: - break; - } - } else { - switch (sym_get_tristate_value(sym)) { - case no: - return 1; - case mod: - printf("%*s%s\n", indent - 1, "", _(menu_get_prompt(menu))); - return 0; - case yes: - break; - } - } - - while (1) { - int cnt, def; - - printf("%*s%s\n", indent - 1, "", _(menu_get_prompt(menu))); - def_sym = sym_get_choice_value(sym); - cnt = def = 0; - line[0] = 0; - for (child = menu->list; child; child = child->next) { - if (!menu_is_visible(child)) - continue; - if (!child->sym) { - printf("%*c %s\n", indent, '*', _(menu_get_prompt(child))); - continue; - } - cnt++; - if (child->sym == def_sym) { - def = cnt; - printf("%*c", indent, '>'); - } else - printf("%*c", indent, ' '); - printf(" %d. %s", cnt, _(menu_get_prompt(child))); - if (child->sym->name) - printf(" (%s)", child->sym->name); - if (!sym_has_value(child->sym)) - printf(_(" (NEW)")); - printf("\n"); - } - printf(_("%*schoice"), indent - 1, ""); - if (cnt == 1) { - printf("[1]: 1\n"); - goto conf_childs; - } - printf("[1-%d", cnt); - if (menu_has_help(menu)) - printf("?"); - printf("]: "); - switch (input_mode) { - case oldconfig: - case silentoldconfig: - if (!is_new) { - cnt = def; - printf("%d\n", cnt); - break; - } - check_stdin(); - /* fall through */ - case oldaskconfig: - fflush(stdout); - xfgets(line, 128, stdin); - strip(line); - if (line[0] == '?') { - print_help(menu); - continue; - } - if (!line[0]) - cnt = def; - else if (isdigit(line[0])) - cnt = atoi(line); - else - continue; - break; - default: - break; - } - - conf_childs: - for (child = menu->list; child; child = child->next) { - if (!child->sym || !menu_is_visible(child)) - continue; - if (!--cnt) - break; - } - if (!child) - continue; - if (line[0] && line[strlen(line) - 1] == '?') { - print_help(child); - continue; - } - sym_set_choice_value(sym, child->sym); - for (child = child->list; child; child = child->next) { - indent += 2; - conf(child); - indent -= 2; - } - return 1; - } + struct symbol *sym, *def_sym; + struct menu *child; + bool is_new; + + sym = menu->sym; + is_new = !sym_has_value(sym); + if ( sym_is_changable(sym) ) + { + conf_sym(menu); + sym_calc_value(sym); + switch (sym_get_tristate_value(sym)) + { + case no: + return 1; + case mod: + return 0; + case yes: + break; + } + } + else + { + switch (sym_get_tristate_value(sym)) + { + case no: + return 1; + case mod: + printf("%*s%s\n", indent - 1, "", _(menu_get_prompt(menu))); + return 0; + case yes: + break; + } + } + + while ( 1 ) + { + int cnt, def; + + printf("%*s%s\n", indent - 1, "", _(menu_get_prompt(menu))); + def_sym = sym_get_choice_value(sym); + cnt = def = 0; + line[0] = 0; + for ( child = menu->list; child; child = child->next ) + { + if ( !menu_is_visible(child) ) + continue; + if ( !child->sym ) + { + printf("%*c %s\n", indent, '*', _(menu_get_prompt(child))); + continue; + } + cnt++; + if ( child->sym == def_sym ) + { + def = cnt; + printf("%*c", indent, '>'); + } + else + printf("%*c", indent, ' '); + printf(" %d. %s", cnt, _(menu_get_prompt(child))); + if ( child->sym->name ) + printf(" (%s)", child->sym->name); + if ( !sym_has_value(child->sym) ) + printf(_(" (NEW)")); + printf("\n"); + } + printf(_("%*schoice"), indent - 1, ""); + if ( cnt == 1 ) + { + printf("[1]: 1\n"); + goto conf_childs; + } + printf("[1-%d", cnt); + if ( menu_has_help(menu) ) + printf("?"); + printf("]: "); + switch (input_mode) + { + case oldconfig: + case silentoldconfig: + if ( !is_new ) + { + cnt = def; + printf("%d\n", cnt); + break; + } + check_stdin(); + /* fall through */ + case oldaskconfig: + fflush(stdout); + xfgets(line, 128, stdin); + strip(line); + if ( line[0] == '?' ) + { + print_help(menu); + continue; + } + if ( !line[0] ) + cnt = def; + else if ( isdigit(line[0]) ) + cnt = atoi(line); + else + continue; + break; + default: + break; + } + + conf_childs: + for ( child = menu->list; child; child = child->next ) + { + if ( !child->sym || !menu_is_visible(child) ) + continue; + if ( !--cnt ) + break; + } + if ( !child ) + continue; + if ( line[0] && line[strlen(line) - 1] == '?' ) + { + print_help(child); + continue; + } + sym_set_choice_value(sym, child->sym); + for ( child = child->list; child; child = child->next ) + { + indent += 2; + conf(child); + indent -= 2; + } + return 1; + } } static void conf(struct menu *menu) { - struct symbol *sym; - struct property *prop; - struct menu *child; - - if (!menu_is_visible(menu)) - return; - - sym = menu->sym; - prop = menu->prompt; - if (prop) { - const char *prompt; - - switch (prop->type) { - case P_MENU: - if ((input_mode == silentoldconfig || - input_mode == listnewconfig || - input_mode == olddefconfig) && - rootEntry != menu) { - check_conf(menu); - return; - } - /* fall through */ - case P_COMMENT: - prompt = menu_get_prompt(menu); - if (prompt) - printf("%*c\n%*c %s\n%*c\n", - indent, '*', - indent, '*', _(prompt), - indent, '*'); - default: - ; - } - } - - if (!sym) - goto conf_childs; - - if (sym_is_choice(sym)) { - conf_choice(menu); - if (sym->curr.tri != mod) - return; - goto conf_childs; - } - - switch (sym->type) { - case S_INT: - case S_HEX: - case S_STRING: - conf_string(menu); - break; - default: - conf_sym(menu); - break; - } + struct symbol *sym; + struct property *prop; + struct menu *child; + + if ( !menu_is_visible(menu) ) + return; + + sym = menu->sym; + prop = menu->prompt; + if ( prop ) + { + const char *prompt; + + switch (prop->type) + { + case P_MENU: + if ( (input_mode == silentoldconfig || + input_mode == listnewconfig || input_mode == olddefconfig) && + rootEntry != menu ) + { + check_conf(menu); + return; + } + /* fall through */ + case P_COMMENT: + prompt = menu_get_prompt(menu); + if ( prompt ) + printf("%*c\n%*c %s\n%*c\n", indent, '*', indent, '*', + _(prompt), indent, '*'); + default:; + } + } + + if ( !sym ) + goto conf_childs; + + if ( sym_is_choice(sym) ) + { + conf_choice(menu); + if ( sym->curr.tri != mod ) + return; + goto conf_childs; + } + + switch (sym->type) + { + case S_INT: + case S_HEX: + case S_STRING: + conf_string(menu); + break; + default: + conf_sym(menu); + break; + } conf_childs: - if (sym) - indent += 2; - for (child = menu->list; child; child = child->next) - conf(child); - if (sym) - indent -= 2; + if ( sym ) + indent += 2; + for ( child = menu->list; child; child = child->next ) + conf(child); + if ( sym ) + indent -= 2; } static void check_conf(struct menu *menu) { - struct symbol *sym; - struct menu *child; - - if (!menu_is_visible(menu)) - return; - - sym = menu->sym; - if (sym && !sym_has_value(sym)) { - if (sym_is_changable(sym) || - (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)) { - if (input_mode == listnewconfig) { - if (sym->name && !sym_is_choice_value(sym)) { - printf("%s%s\n", CONFIG_, sym->name); - } - } else if (input_mode != olddefconfig) { - if (!conf_cnt++) - printf(_("*\n* Restart config...\n*\n")); - rootEntry = menu_get_parent_menu(menu); - conf(rootEntry); - } - } - } - - for (child = menu->list; child; child = child->next) - check_conf(child); + struct symbol *sym; + struct menu *child; + + if ( !menu_is_visible(menu) ) + return; + + sym = menu->sym; + if ( sym && !sym_has_value(sym) ) + { + if ( sym_is_changable(sym) || + (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes) ) + { + if ( input_mode == listnewconfig ) + { + if ( sym->name && !sym_is_choice_value(sym) ) + { + printf("%s%s\n", CONFIG_, sym->name); + } + } + else if ( input_mode != olddefconfig ) + { + if ( !conf_cnt++ ) + printf(_("*\n* Restart config...\n*\n")); + rootEntry = menu_get_parent_menu(menu); + conf(rootEntry); + } + } + } + + for ( child = menu->list; child; child = child->next ) + check_conf(child); } static struct option long_opts[] = { - {"oldaskconfig", no_argument, NULL, oldaskconfig}, - {"oldconfig", no_argument, NULL, oldconfig}, - {"silentoldconfig", no_argument, NULL, silentoldconfig}, - {"defconfig", optional_argument, NULL, defconfig}, - {"savedefconfig", required_argument, NULL, savedefconfig}, - {"allnoconfig", no_argument, NULL, allnoconfig}, - {"allyesconfig", no_argument, NULL, allyesconfig}, - {"allmodconfig", no_argument, NULL, allmodconfig}, - {"alldefconfig", no_argument, NULL, alldefconfig}, - {"randconfig", no_argument, NULL, randconfig}, - {"listnewconfig", no_argument, NULL, listnewconfig}, - {"olddefconfig", no_argument, NULL, olddefconfig}, - /* - * oldnoconfig is an alias of olddefconfig, because people already - * are dependent on its behavior(sets new symbols to their default - * value but not 'n') with the counter-intuitive name. - */ - {"oldnoconfig", no_argument, NULL, olddefconfig}, - {NULL, 0, NULL, 0} -}; + {"oldaskconfig", no_argument, NULL, oldaskconfig}, + {"oldconfig", no_argument, NULL, oldconfig}, + {"silentoldconfig", no_argument, NULL, silentoldconfig}, + {"defconfig", optional_argument, NULL, defconfig}, + {"savedefconfig", required_argument, NULL, savedefconfig}, + {"allnoconfig", no_argument, NULL, allnoconfig}, + {"allyesconfig", no_argument, NULL, allyesconfig}, + {"allmodconfig", no_argument, NULL, allmodconfig}, + {"alldefconfig", no_argument, NULL, alldefconfig}, + {"randconfig", no_argument, NULL, randconfig}, + {"listnewconfig", no_argument, NULL, listnewconfig}, + {"olddefconfig", no_argument, NULL, olddefconfig}, + /* + * oldnoconfig is an alias of olddefconfig, because people already + * are dependent on its behavior(sets new symbols to their default + * value but not 'n') with the counter-intuitive name. + */ + {"oldnoconfig", no_argument, NULL, olddefconfig}, + {NULL, 0, NULL, 0}}; static void conf_usage(const char *progname) { - - printf("Usage: %s [-s] [option] \n", progname); - printf("[option] is _one_ of the following:\n"); - printf(" --listnewconfig List new options\n"); - printf(" --oldaskconfig Start a new configuration using a line-oriented program\n"); - printf(" --oldconfig Update a configuration using a provided .config as base\n"); - printf(" --silentoldconfig Same as oldconfig, but quietly, additionally update deps\n"); - printf(" --olddefconfig Same as silentoldconfig but sets new symbols to their default value\n"); - printf(" --oldnoconfig An alias of olddefconfig\n"); - printf(" --defconfig New config with default defined in \n"); - printf(" --savedefconfig Save the minimal current configuration to \n"); - printf(" --allnoconfig New config where all options are answered with no\n"); - printf(" --allyesconfig New config where all options are answered with yes\n"); - printf(" --allmodconfig New config where all options are answered with mod\n"); - printf(" --alldefconfig New config with all symbols set to default\n"); - printf(" --randconfig New config with random answer to all options\n"); + printf("Usage: %s [-s] [option] \n", progname); + printf("[option] is _one_ of the following:\n"); + printf(" --listnewconfig List new options\n"); + printf(" --oldaskconfig Start a new configuration using a " + "line-oriented program\n"); + printf(" --oldconfig Update a configuration using a provided " + ".config as base\n"); + printf(" --silentoldconfig Same as oldconfig, but quietly, " + "additionally update deps\n"); + printf(" --olddefconfig Same as silentoldconfig but sets new " + "symbols to their default value\n"); + printf(" --oldnoconfig An alias of olddefconfig\n"); + printf(" --defconfig New config with default defined in " + "\n"); + printf(" --savedefconfig Save the minimal current configuration " + "to \n"); + printf(" --allnoconfig New config where all options are " + "answered with no\n"); + printf(" --allyesconfig New config where all options are " + "answered with yes\n"); + printf(" --allmodconfig New config where all options are " + "answered with mod\n"); + printf(" --alldefconfig New config with all symbols set to " + "default\n"); + printf(" --randconfig New config with random answer to all " + "options\n"); } int main(int ac, char **av) { - const char *progname = av[0]; - int opt; - const char *name, *defconfig_file = NULL /* gcc uninit */; - struct stat tmpstat; - - setlocale(LC_ALL, ""); - bindtextdomain(PACKAGE, LOCALEDIR); - textdomain(PACKAGE); - - tty_stdio = isatty(0) && isatty(1) && isatty(2); - - while ((opt = getopt_long(ac, av, "s", long_opts, NULL)) != -1) { - if (opt == 's') { - conf_set_message_callback(NULL); - continue; - } - input_mode = (enum input_mode)opt; - switch (opt) { - case silentoldconfig: - sync_kconfig = 1; - break; - case defconfig: - case savedefconfig: - defconfig_file = optarg; - break; - case randconfig: - { - struct timeval now; - unsigned int seed; - char *seed_env; - - /* - * Use microseconds derived seed, - * compensate for systems where it may be zero - */ - gettimeofday(&now, NULL); - seed = (unsigned int)((now.tv_sec + 1) * (now.tv_usec + 1)); - - seed_env = getenv("KCONFIG_SEED"); - if( seed_env && *seed_env ) { - char *endp; - int tmp = (int)strtol(seed_env, &endp, 0); - if (*endp == '\0') { - seed = tmp; - } - } - fprintf( stderr, "KCONFIG_SEED=0x%X\n", seed ); - srand(seed); - break; - } - case oldaskconfig: - case oldconfig: - case allnoconfig: - case allyesconfig: - case allmodconfig: - case alldefconfig: - case listnewconfig: - case olddefconfig: - break; - case '?': - conf_usage(progname); - exit(1); - break; - } - } - if (ac == optind) { - printf(_("%s: Kconfig file missing\n"), av[0]); - conf_usage(progname); - exit(1); - } - name = av[optind]; - conf_parse(name); - //zconfdump(stdout); - if (sync_kconfig) { - name = conf_get_configname(); - if (stat(name, &tmpstat)) { - fprintf(stderr, _("***\n" - "*** Configuration file \"%s\" not found!\n" - "***\n" - "*** Please run some configurator (e.g. \"make oldconfig\" or\n" - "*** \"make menuconfig\" or \"make xconfig\").\n" - "***\n"), name); - exit(1); - } - } - - switch (input_mode) { - case defconfig: - if (!defconfig_file) - defconfig_file = conf_get_default_confname(); - if (conf_read(defconfig_file)) { - printf(_("***\n" - "*** Can't find default configuration \"%s\"!\n" - "***\n"), defconfig_file); - exit(1); - } - break; - case savedefconfig: - case silentoldconfig: - case oldaskconfig: - case oldconfig: - case listnewconfig: - case olddefconfig: - conf_read(NULL); - break; - case allnoconfig: - case allyesconfig: - case allmodconfig: - case alldefconfig: - case randconfig: - name = getenv("KCONFIG_ALLCONFIG"); - if (!name) - break; - if ((strcmp(name, "") != 0) && (strcmp(name, "1") != 0)) { - if (conf_read_simple(name, S_DEF_USER)) { - fprintf(stderr, - _("*** Can't read seed configuration \"%s\"!\n"), - name); - exit(1); - } - break; - } - switch (input_mode) { - case allnoconfig: name = "allno.config"; break; - case allyesconfig: name = "allyes.config"; break; - case allmodconfig: name = "allmod.config"; break; - case alldefconfig: name = "alldef.config"; break; - case randconfig: name = "allrandom.config"; break; - default: break; - } - if (conf_read_simple(name, S_DEF_USER) && - conf_read_simple("all.config", S_DEF_USER)) { - fprintf(stderr, - _("*** KCONFIG_ALLCONFIG set, but no \"%s\" or \"all.config\" file found\n"), - name); - exit(1); - } - break; - default: - break; - } - - if (sync_kconfig) { - if (conf_get_changed()) { - name = getenv("KCONFIG_NOSILENTUPDATE"); - if (name && *name) { - fprintf(stderr, - _("\n*** The configuration requires explicit update.\n\n")); - return 1; - } - } - valid_stdin = tty_stdio; - } - - switch (input_mode) { - case allnoconfig: - conf_set_all_new_symbols(def_no); - break; - case allyesconfig: - conf_set_all_new_symbols(def_yes); - break; - case allmodconfig: - conf_set_all_new_symbols(def_mod); - break; - case alldefconfig: - conf_set_all_new_symbols(def_default); - break; - case randconfig: - /* Really nothing to do in this loop */ - while (conf_set_all_new_symbols(def_random)) ; - break; - case defconfig: - conf_set_all_new_symbols(def_default); - break; - case savedefconfig: - break; - case oldaskconfig: - rootEntry = &rootmenu; - conf(&rootmenu); - input_mode = silentoldconfig; - /* fall through */ - case oldconfig: - case listnewconfig: - case olddefconfig: - case silentoldconfig: - /* Update until a loop caused no more changes */ - do { - conf_cnt = 0; - check_conf(&rootmenu); - } while (conf_cnt && - (input_mode != listnewconfig && - input_mode != olddefconfig)); - break; - } - - if (sync_kconfig) { - /* silentoldconfig is used during the build so we shall update autoconf. - * All other commands are only used to generate a config. - */ - if (conf_get_changed() && conf_write(NULL)) { - fprintf(stderr, _("\n*** Error during writing of the configuration.\n\n")); - exit(1); - } - if (conf_write_autoconf()) { - fprintf(stderr, _("\n*** Error during update of the configuration.\n\n")); - return 1; - } - } else if (input_mode == savedefconfig) { - if (conf_write_defconfig(defconfig_file)) { - fprintf(stderr, _("n*** Error while saving defconfig to: %s\n\n"), - defconfig_file); - return 1; - } - } else if (input_mode != listnewconfig) { - if (conf_write(NULL)) { - fprintf(stderr, _("\n*** Error during writing of the configuration.\n\n")); - exit(1); - } - } - return 0; + const char *progname = av[0]; + int opt; + const char *name, *defconfig_file = NULL /* gcc uninit */; + struct stat tmpstat; + + setlocale(LC_ALL, ""); + bindtextdomain(PACKAGE, LOCALEDIR); + textdomain(PACKAGE); + + tty_stdio = isatty(0) && isatty(1) && isatty(2); + + while ( (opt = getopt_long(ac, av, "s", long_opts, NULL)) != -1 ) + { + if ( opt == 's' ) + { + conf_set_message_callback(NULL); + continue; + } + input_mode = (enum input_mode)opt; + switch (opt) + { + case silentoldconfig: + sync_kconfig = 1; + break; + case defconfig: + case savedefconfig: + defconfig_file = optarg; + break; + case randconfig: + { + struct timeval now; + unsigned int seed; + char *seed_env; + + /* + * Use microseconds derived seed, + * compensate for systems where it may be zero + */ + gettimeofday(&now, NULL); + seed = (unsigned int)((now.tv_sec + 1) * (now.tv_usec + 1)); + + seed_env = getenv("KCONFIG_SEED"); + if ( seed_env && *seed_env ) + { + char *endp; + int tmp = (int)strtol(seed_env, &endp, 0); + if ( *endp == '\0' ) + { + seed = tmp; + } + } + fprintf(stderr, "KCONFIG_SEED=0x%X\n", seed); + srand(seed); + break; + } + case oldaskconfig: + case oldconfig: + case allnoconfig: + case allyesconfig: + case allmodconfig: + case alldefconfig: + case listnewconfig: + case olddefconfig: + break; + case '?': + conf_usage(progname); + exit(1); + break; + } + } + if ( ac == optind ) + { + printf(_("%s: Kconfig file missing\n"), av[0]); + conf_usage(progname); + exit(1); + } + name = av[optind]; + conf_parse(name); + // zconfdump(stdout); + if ( sync_kconfig ) + { + name = conf_get_configname(); + if ( stat(name, &tmpstat) ) + { + fprintf(stderr, + _("***\n" + "*** Configuration file \"%s\" not found!\n" + "***\n" + "*** Please run some configurator (e.g. \"make " + "oldconfig\" or\n" + "*** \"make menuconfig\" or \"make xconfig\").\n" + "***\n"), + name); + exit(1); + } + } + + switch (input_mode) + { + case defconfig: + if ( !defconfig_file ) + defconfig_file = conf_get_default_confname(); + if ( conf_read(defconfig_file) ) + { + printf(_("***\n" + "*** Can't find default configuration \"%s\"!\n" + "***\n"), + defconfig_file); + exit(1); + } + break; + case savedefconfig: + case silentoldconfig: + case oldaskconfig: + case oldconfig: + case listnewconfig: + case olddefconfig: + conf_read(NULL); + break; + case allnoconfig: + case allyesconfig: + case allmodconfig: + case alldefconfig: + case randconfig: + name = getenv("KCONFIG_ALLCONFIG"); + if ( !name ) + break; + if ( (strcmp(name, "") != 0) && (strcmp(name, "1") != 0) ) + { + if ( conf_read_simple(name, S_DEF_USER) ) + { + fprintf(stderr, + _("*** Can't read seed configuration \"%s\"!\n"), name); + exit(1); + } + break; + } + switch (input_mode) + { + case allnoconfig: + name = "allno.config"; + break; + case allyesconfig: + name = "allyes.config"; + break; + case allmodconfig: + name = "allmod.config"; + break; + case alldefconfig: + name = "alldef.config"; + break; + case randconfig: + name = "allrandom.config"; + break; + default: + break; + } + if ( conf_read_simple(name, S_DEF_USER) && + conf_read_simple("all.config", S_DEF_USER) ) + { + fprintf(stderr, + _("*** KCONFIG_ALLCONFIG set, but no \"%s\" or " + "\"all.config\" file found\n"), + name); + exit(1); + } + break; + default: + break; + } + + if ( sync_kconfig ) + { + if ( conf_get_changed() ) + { + name = getenv("KCONFIG_NOSILENTUPDATE"); + if ( name && *name ) + { + fprintf( + stderr, + _("\n*** The configuration requires explicit update.\n\n")); + return 1; + } + } + valid_stdin = tty_stdio; + } + + switch (input_mode) + { + case allnoconfig: + conf_set_all_new_symbols(def_no); + break; + case allyesconfig: + conf_set_all_new_symbols(def_yes); + break; + case allmodconfig: + conf_set_all_new_symbols(def_mod); + break; + case alldefconfig: + conf_set_all_new_symbols(def_default); + break; + case randconfig: + /* Really nothing to do in this loop */ + while ( conf_set_all_new_symbols(def_random) ) + ; + break; + case defconfig: + conf_set_all_new_symbols(def_default); + break; + case savedefconfig: + break; + case oldaskconfig: + rootEntry = &rootmenu; + conf(&rootmenu); + input_mode = silentoldconfig; + /* fall through */ + case oldconfig: + case listnewconfig: + case olddefconfig: + case silentoldconfig: + /* Update until a loop caused no more changes */ + do { + conf_cnt = 0; + check_conf(&rootmenu); + } while ( conf_cnt && + (input_mode != listnewconfig && input_mode != olddefconfig) ); + break; + } + + if ( sync_kconfig ) + { + /* silentoldconfig is used during the build so we shall update autoconf. + * All other commands are only used to generate a config. + */ + if ( conf_get_changed() && conf_write(NULL) ) + { + fprintf(stderr, + _("\n*** Error during writing of the configuration.\n\n")); + exit(1); + } + if ( conf_write_autoconf() ) + { + fprintf(stderr, + _("\n*** Error during update of the configuration.\n\n")); + return 1; + } + } + else if ( input_mode == savedefconfig ) + { + if ( conf_write_defconfig(defconfig_file) ) + { + fprintf(stderr, _("n*** Error while saving defconfig to: %s\n\n"), + defconfig_file); + return 1; + } + } + else if ( input_mode != listnewconfig ) + { + if ( conf_write(NULL) ) + { + fprintf(stderr, + _("\n*** Error during writing of the configuration.\n\n")); + exit(1); + } + } + return 0; } /* @@ -717,6 +807,6 @@ int main(int ac, char **av) */ void xfgets(char *str, int size, FILE *in) { - if (fgets(str, size, in) == NULL) - fprintf(stderr, "\nError in reading or end of file.\n"); + if ( fgets(str, size, in) == NULL ) + fprintf(stderr, "\nError in reading or end of file.\n"); } diff --git a/xen/tools/kconfig/confdata.c b/xen/tools/kconfig/confdata.c index 0b7dc2fd7b..3383b2566b 100644 --- a/xen/tools/kconfig/confdata.c +++ b/xen/tools/kconfig/confdata.c @@ -16,16 +16,17 @@ #include "lkc.h" -struct conf_printer { - void (*print_symbol)(FILE *, struct symbol *, const char *, void *); - void (*print_comment)(FILE *, const char *, void *); +struct conf_printer +{ + void (*print_symbol)(FILE *, struct symbol *, const char *, void *); + void (*print_comment)(FILE *, const char *, void *); }; static void conf_warning(const char *fmt, ...) - __attribute__ ((format (printf, 1, 2))); + __attribute__((format(printf, 1, 2))); static void conf_message(const char *fmt, ...) - __attribute__ ((format (printf, 1, 2))); + __attribute__((format(printf, 1, 2))); static const char *conf_filename; static int conf_lineno, conf_warnings, conf_unsaved; @@ -34,443 +35,492 @@ const char conf_defname[] = "arch/$ARCH/defconfig"; static void conf_warning(const char *fmt, ...) { - va_list ap; - va_start(ap, fmt); - fprintf(stderr, "%s:%d:warning: ", conf_filename, conf_lineno); - vfprintf(stderr, fmt, ap); - fprintf(stderr, "\n"); - va_end(ap); - conf_warnings++; + va_list ap; + va_start(ap, fmt); + fprintf(stderr, "%s:%d:warning: ", conf_filename, conf_lineno); + vfprintf(stderr, fmt, ap); + fprintf(stderr, "\n"); + va_end(ap); + conf_warnings++; } static void conf_default_message_callback(const char *fmt, va_list ap) { - printf("#\n# "); - vprintf(fmt, ap); - printf("\n#\n"); + printf("#\n# "); + vprintf(fmt, ap); + printf("\n#\n"); } -static void (*conf_message_callback) (const char *fmt, va_list ap) = - conf_default_message_callback; -void conf_set_message_callback(void (*fn) (const char *fmt, va_list ap)) +static void (*conf_message_callback)(const char *fmt, va_list ap) = + conf_default_message_callback; +void conf_set_message_callback(void (*fn)(const char *fmt, va_list ap)) { - conf_message_callback = fn; + conf_message_callback = fn; } static void conf_message(const char *fmt, ...) { - va_list ap; + va_list ap; - va_start(ap, fmt); - if (conf_message_callback) - conf_message_callback(fmt, ap); - va_end(ap); + va_start(ap, fmt); + if ( conf_message_callback ) + conf_message_callback(fmt, ap); + va_end(ap); } const char *conf_get_configname(void) { - char *name = getenv("KCONFIG_CONFIG"); + char *name = getenv("KCONFIG_CONFIG"); - return name ? name : ".config"; + return name ? name : ".config"; } const char *conf_get_autoconfig_name(void) { - char *name = getenv("KCONFIG_AUTOCONFIG"); + char *name = getenv("KCONFIG_AUTOCONFIG"); - return name ? name : "include/config/auto.conf"; + return name ? name : "include/config/auto.conf"; } static char *conf_expand_value(const char *in) { - struct symbol *sym; - const char *src; - static char res_value[SYMBOL_MAXLENGTH]; - char *dst, name[SYMBOL_MAXLENGTH]; - - res_value[0] = 0; - dst = name; - while ((src = strchr(in, '$'))) { - strncat(res_value, in, src - in); - src++; - dst = name; - while (isalnum(*src) || *src == '_') - *dst++ = *src++; - *dst = 0; - sym = sym_lookup(name, 0); - sym_calc_value(sym); - strcat(res_value, sym_get_string_value(sym)); - in = src; - } - strcat(res_value, in); - - return res_value; + struct symbol *sym; + const char *src; + static char res_value[SYMBOL_MAXLENGTH]; + char *dst, name[SYMBOL_MAXLENGTH]; + + res_value[0] = 0; + dst = name; + while ( (src = strchr(in, '$')) ) + { + strncat(res_value, in, src - in); + src++; + dst = name; + while ( isalnum(*src) || *src == '_' ) + *dst++ = *src++; + *dst = 0; + sym = sym_lookup(name, 0); + sym_calc_value(sym); + strcat(res_value, sym_get_string_value(sym)); + in = src; + } + strcat(res_value, in); + + return res_value; } char *conf_get_default_confname(void) { - struct stat buf; - static char fullname[PATH_MAX+1]; - char *env, *name; - - name = conf_expand_value(conf_defname); - env = getenv(SRCTREE); - if (env) { - sprintf(fullname, "%s/%s", env, name); - if (!stat(fullname, &buf)) - return fullname; - } - return name; + struct stat buf; + static char fullname[PATH_MAX + 1]; + char *env, *name; + + name = conf_expand_value(conf_defname); + env = getenv(SRCTREE); + if ( env ) + { + sprintf(fullname, "%s/%s", env, name); + if ( !stat(fullname, &buf) ) + return fullname; + } + return name; } static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p) { - char *p2; - - switch (sym->type) { - case S_TRISTATE: - if (p[0] == 'm') { - sym->def[def].tri = mod; - sym->flags |= def_flags; - break; - } - /* fall through */ - case S_BOOLEAN: - if (p[0] == 'y') { - sym->def[def].tri = yes; - sym->flags |= def_flags; - break; - } - if (p[0] == 'n') { - sym->def[def].tri = no; - sym->flags |= def_flags; - break; - } - if (def != S_DEF_AUTO) - conf_warning("symbol value '%s' invalid for %s", - p, sym->name); - return 1; - case S_OTHER: - if (*p != '"') { - for (p2 = p; *p2 && !isspace(*p2); p2++) - ; - sym->type = S_STRING; - goto done; - } - /* fall through */ - case S_STRING: - if (*p++ != '"') - break; - for (p2 = p; (p2 = strpbrk(p2, "\"\\")); p2++) { - if (*p2 == '"') { - *p2 = 0; - break; - } - memmove(p2, p2 + 1, strlen(p2)); - } - if (!p2) { - if (def != S_DEF_AUTO) - conf_warning("invalid string found"); - return 1; - } - /* fall through */ - case S_INT: - case S_HEX: - done: - if (sym_string_valid(sym, p)) { - sym->def[def].val = strdup(p); - sym->flags |= def_flags; - } else { - if (def != S_DEF_AUTO) - conf_warning("symbol value '%s' invalid for %s", - p, sym->name); - return 1; - } - break; - default: - ; - } - return 0; + char *p2; + + switch (sym->type) + { + case S_TRISTATE: + if ( p[0] == 'm' ) + { + sym->def[def].tri = mod; + sym->flags |= def_flags; + break; + } + /* fall through */ + case S_BOOLEAN: + if ( p[0] == 'y' ) + { + sym->def[def].tri = yes; + sym->flags |= def_flags; + break; + } + if ( p[0] == 'n' ) + { + sym->def[def].tri = no; + sym->flags |= def_flags; + break; + } + if ( def != S_DEF_AUTO ) + conf_warning("symbol value '%s' invalid for %s", p, sym->name); + return 1; + case S_OTHER: + if ( *p != '"' ) + { + for ( p2 = p; *p2 && !isspace(*p2); p2++ ) + ; + sym->type = S_STRING; + goto done; + } + /* fall through */ + case S_STRING: + if ( *p++ != '"' ) + break; + for ( p2 = p; (p2 = strpbrk(p2, "\"\\")); p2++ ) + { + if ( *p2 == '"' ) + { + *p2 = 0; + break; + } + memmove(p2, p2 + 1, strlen(p2)); + } + if ( !p2 ) + { + if ( def != S_DEF_AUTO ) + conf_warning("invalid string found"); + return 1; + } + /* fall through */ + case S_INT: + case S_HEX: + done: + if ( sym_string_valid(sym, p) ) + { + sym->def[def].val = strdup(p); + sym->flags |= def_flags; + } + else + { + if ( def != S_DEF_AUTO ) + conf_warning("symbol value '%s' invalid for %s", p, sym->name); + return 1; + } + break; + default:; + } + return 0; } #define LINE_GROWTH 16 static int add_byte(int c, char **lineptr, size_t slen, size_t *n) { - char *nline; - size_t new_size = slen + 1; - if (new_size > *n) { - new_size += LINE_GROWTH - 1; - new_size *= 2; - nline = realloc(*lineptr, new_size); - if (!nline) - return -1; - - *lineptr = nline; - *n = new_size; - } - - (*lineptr)[slen] = c; - - return 0; + char *nline; + size_t new_size = slen + 1; + if ( new_size > *n ) + { + new_size += LINE_GROWTH - 1; + new_size *= 2; + nline = realloc(*lineptr, new_size); + if ( !nline ) + return -1; + + *lineptr = nline; + *n = new_size; + } + + (*lineptr)[slen] = c; + + return 0; } static ssize_t compat_getline(char **lineptr, size_t *n, FILE *stream) { - char *line = *lineptr; - size_t slen = 0; - - for (;;) { - int c = getc(stream); - - switch (c) { - case '\n': - if (add_byte(c, &line, slen, n) < 0) - goto e_out; - slen++; - /* fall through */ - case EOF: - if (add_byte('\0', &line, slen, n) < 0) - goto e_out; - *lineptr = line; - if (slen == 0) - return -1; - return slen; - default: - if (add_byte(c, &line, slen, n) < 0) - goto e_out; - slen++; - } - } + char *line = *lineptr; + size_t slen = 0; + + for ( ;; ) + { + int c = getc(stream); + + switch (c) + { + case '\n': + if ( add_byte(c, &line, slen, n) < 0 ) + goto e_out; + slen++; + /* fall through */ + case EOF: + if ( add_byte('\0', &line, slen, n) < 0 ) + goto e_out; + *lineptr = line; + if ( slen == 0 ) + return -1; + return slen; + default: + if ( add_byte(c, &line, slen, n) < 0 ) + goto e_out; + slen++; + } + } e_out: - line[slen-1] = '\0'; - *lineptr = line; - return -1; + line[slen - 1] = '\0'; + *lineptr = line; + return -1; } int conf_read_simple(const char *name, int def) { - FILE *in = NULL; - char *line = NULL; - size_t line_asize = 0; - char *p, *p2; - struct symbol *sym; - int i, def_flags; - - if (name) { - in = zconf_fopen(name); - } else { - struct property *prop; - - name = conf_get_configname(); - in = zconf_fopen(name); - if (in) - goto load; - sym_add_change_count(1); - if (!sym_defconfig_list) { - sym_calc_value(modules_sym); - return 1; - } - - for_all_defaults(sym_defconfig_list, prop) { - if (expr_calc_value(prop->visible.expr) == no || - prop->expr->type != E_SYMBOL) - continue; - name = conf_expand_value(prop->expr->left.sym->name); - in = zconf_fopen(name); - if (in) { - conf_message(_("using defaults found in %s"), - name); - goto load; - } - } - } - if (!in) - return 1; + FILE *in = NULL; + char *line = NULL; + size_t line_asize = 0; + char *p, *p2; + struct symbol *sym; + int i, def_flags; + + if ( name ) + { + in = zconf_fopen(name); + } + else + { + struct property *prop; + + name = conf_get_configname(); + in = zconf_fopen(name); + if ( in ) + goto load; + sym_add_change_count(1); + if ( !sym_defconfig_list ) + { + sym_calc_value(modules_sym); + return 1; + } + + for_all_defaults(sym_defconfig_list, prop) + { + if ( expr_calc_value(prop->visible.expr) == no || + prop->expr->type != E_SYMBOL ) + continue; + name = conf_expand_value(prop->expr->left.sym->name); + in = zconf_fopen(name); + if ( in ) + { + conf_message(_("using defaults found in %s"), name); + goto load; + } + } + } + if ( !in ) + return 1; load: - conf_filename = name; - conf_lineno = 0; - conf_warnings = 0; - conf_unsaved = 0; - - def_flags = SYMBOL_DEF << def; - for_all_symbols(i, sym) { - sym->flags |= SYMBOL_CHANGED; - sym->flags &= ~(def_flags|SYMBOL_VALID); - if (sym_is_choice(sym)) - sym->flags |= def_flags; - switch (sym->type) { - case S_INT: - case S_HEX: - case S_STRING: - if (sym->def[def].val) - free(sym->def[def].val); - /* fall through */ - default: - sym->def[def].val = NULL; - sym->def[def].tri = no; - } - } - - while (compat_getline(&line, &line_asize, in) != -1) { - conf_lineno++; - sym = NULL; - if (line[0] == '#') { - if (memcmp(line + 2, CONFIG_, strlen(CONFIG_))) - continue; - p = strchr(line + 2 + strlen(CONFIG_), ' '); - if (!p) - continue; - *p++ = 0; - if (strncmp(p, "is not set", 10)) - continue; - if (def == S_DEF_USER) { - sym = sym_find(line + 2 + strlen(CONFIG_)); - if (!sym) { - sym_add_change_count(1); - goto setsym; - } - } else { - sym = sym_lookup(line + 2 + strlen(CONFIG_), 0); - if (sym->type == S_UNKNOWN) - sym->type = S_BOOLEAN; - } - if (sym->flags & def_flags) { - conf_warning("override: reassigning to symbol %s", sym->name); - } - switch (sym->type) { - case S_BOOLEAN: - case S_TRISTATE: - sym->def[def].tri = no; - sym->flags |= def_flags; - break; - default: - ; - } - } else if (memcmp(line, CONFIG_, strlen(CONFIG_)) == 0) { - p = strchr(line + strlen(CONFIG_), '='); - if (!p) - continue; - *p++ = 0; - p2 = strchr(p, '\n'); - if (p2) { - *p2-- = 0; - if (*p2 == '\r') - *p2 = 0; - } - if (def == S_DEF_USER) { - sym = sym_find(line + strlen(CONFIG_)); - if (!sym) { - sym_add_change_count(1); - goto setsym; - } - } else { - sym = sym_lookup(line + strlen(CONFIG_), 0); - if (sym->type == S_UNKNOWN) - sym->type = S_OTHER; - } - if (sym->flags & def_flags) { - conf_warning("override: reassigning to symbol %s", sym->name); - } - if (conf_set_sym_val(sym, def, def_flags, p)) - continue; - } else { - if (line[0] != '\r' && line[0] != '\n') - conf_warning("unexpected data"); - continue; - } -setsym: - if (sym && sym_is_choice_value(sym)) { - struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym)); - switch (sym->def[def].tri) { - case no: - break; - case mod: - if (cs->def[def].tri == yes) { - conf_warning("%s creates inconsistent choice state", sym->name); - cs->flags &= ~def_flags; - } - break; - case yes: - if (cs->def[def].tri != no) - conf_warning("override: %s changes choice state", sym->name); - cs->def[def].val = sym; - break; - } - cs->def[def].tri = EXPR_OR(cs->def[def].tri, sym->def[def].tri); - } - } - free(line); - fclose(in); - sym_calc_value(modules_sym); - return 0; + conf_filename = name; + conf_lineno = 0; + conf_warnings = 0; + conf_unsaved = 0; + + def_flags = SYMBOL_DEF << def; + for_all_symbols(i, sym) + { + sym->flags |= SYMBOL_CHANGED; + sym->flags &= ~(def_flags | SYMBOL_VALID); + if ( sym_is_choice(sym) ) + sym->flags |= def_flags; + switch (sym->type) + { + case S_INT: + case S_HEX: + case S_STRING: + if ( sym->def[def].val ) + free(sym->def[def].val); + /* fall through */ + default: + sym->def[def].val = NULL; + sym->def[def].tri = no; + } + } + + while ( compat_getline(&line, &line_asize, in) != -1 ) + { + conf_lineno++; + sym = NULL; + if ( line[0] == '#' ) + { + if ( memcmp(line + 2, CONFIG_, strlen(CONFIG_)) ) + continue; + p = strchr(line + 2 + strlen(CONFIG_), ' '); + if ( !p ) + continue; + *p++ = 0; + if ( strncmp(p, "is not set", 10) ) + continue; + if ( def == S_DEF_USER ) + { + sym = sym_find(line + 2 + strlen(CONFIG_)); + if ( !sym ) + { + sym_add_change_count(1); + goto setsym; + } + } + else + { + sym = sym_lookup(line + 2 + strlen(CONFIG_), 0); + if ( sym->type == S_UNKNOWN ) + sym->type = S_BOOLEAN; + } + if ( sym->flags & def_flags ) + { + conf_warning("override: reassigning to symbol %s", sym->name); + } + switch (sym->type) + { + case S_BOOLEAN: + case S_TRISTATE: + sym->def[def].tri = no; + sym->flags |= def_flags; + break; + default:; + } + } + else if ( memcmp(line, CONFIG_, strlen(CONFIG_)) == 0 ) + { + p = strchr(line + strlen(CONFIG_), '='); + if ( !p ) + continue; + *p++ = 0; + p2 = strchr(p, '\n'); + if ( p2 ) + { + *p2-- = 0; + if ( *p2 == '\r' ) + *p2 = 0; + } + if ( def == S_DEF_USER ) + { + sym = sym_find(line + strlen(CONFIG_)); + if ( !sym ) + { + sym_add_change_count(1); + goto setsym; + } + } + else + { + sym = sym_lookup(line + strlen(CONFIG_), 0); + if ( sym->type == S_UNKNOWN ) + sym->type = S_OTHER; + } + if ( sym->flags & def_flags ) + { + conf_warning("override: reassigning to symbol %s", sym->name); + } + if ( conf_set_sym_val(sym, def, def_flags, p) ) + continue; + } + else + { + if ( line[0] != '\r' && line[0] != '\n' ) + conf_warning("unexpected data"); + continue; + } + setsym: + if ( sym && sym_is_choice_value(sym) ) + { + struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym)); + switch (sym->def[def].tri) + { + case no: + break; + case mod: + if ( cs->def[def].tri == yes ) + { + conf_warning("%s creates inconsistent choice state", + sym->name); + cs->flags &= ~def_flags; + } + break; + case yes: + if ( cs->def[def].tri != no ) + conf_warning("override: %s changes choice state", + sym->name); + cs->def[def].val = sym; + break; + } + cs->def[def].tri = EXPR_OR(cs->def[def].tri, sym->def[def].tri); + } + } + free(line); + fclose(in); + sym_calc_value(modules_sym); + return 0; } int conf_read(const char *name) { - struct symbol *sym; - int i; - - sym_set_change_count(0); - - if (conf_read_simple(name, S_DEF_USER)) - return 1; - - for_all_symbols(i, sym) { - sym_calc_value(sym); - if (sym_is_choice(sym) || (sym->flags & SYMBOL_AUTO)) - continue; - if (sym_has_value(sym) && (sym->flags & SYMBOL_WRITE)) { - /* check that calculated value agrees with saved value */ - switch (sym->type) { - case S_BOOLEAN: - case S_TRISTATE: - if (sym->def[S_DEF_USER].tri != sym_get_tristate_value(sym)) - break; - if (!sym_is_choice(sym)) - continue; - /* fall through */ - default: - if (!strcmp(sym->curr.val, sym->def[S_DEF_USER].val)) - continue; - break; - } - } else if (!sym_has_value(sym) && !(sym->flags & SYMBOL_WRITE)) - /* no previous value and not saved */ - continue; - conf_unsaved++; - /* maybe print value in verbose mode... */ - } - - for_all_symbols(i, sym) { - if (sym_has_value(sym) && !sym_is_choice_value(sym)) { - /* Reset values of generates values, so they'll appear - * as new, if they should become visible, but that - * doesn't quite work if the Kconfig and the saved - * configuration disagree. - */ - if (sym->visible == no && !conf_unsaved) - sym->flags &= ~SYMBOL_DEF_USER; - switch (sym->type) { - case S_STRING: - case S_INT: - case S_HEX: - /* Reset a string value if it's out of range */ - if (sym_string_within_range(sym, sym->def[S_DEF_USER].val)) - break; - sym->flags &= ~(SYMBOL_VALID|SYMBOL_DEF_USER); - conf_unsaved++; - break; - default: - break; - } - } - } - - sym_add_change_count(conf_warnings || conf_unsaved); - - return 0; + struct symbol *sym; + int i; + + sym_set_change_count(0); + + if ( conf_read_simple(name, S_DEF_USER) ) + return 1; + + for_all_symbols(i, sym) + { + sym_calc_value(sym); + if ( sym_is_choice(sym) || (sym->flags & SYMBOL_AUTO) ) + continue; + if ( sym_has_value(sym) && (sym->flags & SYMBOL_WRITE) ) + { + /* check that calculated value agrees with saved value */ + switch (sym->type) + { + case S_BOOLEAN: + case S_TRISTATE: + if ( sym->def[S_DEF_USER].tri != sym_get_tristate_value(sym) ) + break; + if ( !sym_is_choice(sym) ) + continue; + /* fall through */ + default: + if ( !strcmp(sym->curr.val, sym->def[S_DEF_USER].val) ) + continue; + break; + } + } + else if ( !sym_has_value(sym) && !(sym->flags & SYMBOL_WRITE) ) + /* no previous value and not saved */ + continue; + conf_unsaved++; + /* maybe print value in verbose mode... */ + } + + for_all_symbols(i, sym) + { + if ( sym_has_value(sym) && !sym_is_choice_value(sym) ) + { + /* Reset values of generates values, so they'll appear + * as new, if they should become visible, but that + * doesn't quite work if the Kconfig and the saved + * configuration disagree. + */ + if ( sym->visible == no && !conf_unsaved ) + sym->flags &= ~SYMBOL_DEF_USER; + switch (sym->type) + { + case S_STRING: + case S_INT: + case S_HEX: + /* Reset a string value if it's out of range */ + if ( sym_string_within_range(sym, sym->def[S_DEF_USER].val) ) + break; + sym->flags &= ~(SYMBOL_VALID | SYMBOL_DEF_USER); + conf_unsaved++; + break; + default: + break; + } + } + } + + sym_add_change_count(conf_warnings || conf_unsaved); + + return 0; } /* @@ -481,53 +531,53 @@ int conf_read(const char *name) * passing a non-NULL argument to the printer. * */ -static void -kconfig_print_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg) +static void kconfig_print_symbol(FILE *fp, struct symbol *sym, + const char *value, void *arg) { - - switch (sym->type) { - case S_BOOLEAN: - case S_TRISTATE: - if (*value == 'n') { - bool skip_unset = (arg != NULL); - - if (!skip_unset) - fprintf(fp, "# %s%s is not set\n", - CONFIG_, sym->name); - return; - } - break; - default: - break; - } - - fprintf(fp, "%s%s=%s\n", CONFIG_, sym->name, value); + switch (sym->type) + { + case S_BOOLEAN: + case S_TRISTATE: + if ( *value == 'n' ) + { + bool skip_unset = (arg != NULL); + + if ( !skip_unset ) + fprintf(fp, "# %s%s is not set\n", CONFIG_, sym->name); + return; + } + break; + default: + break; + } + + fprintf(fp, "%s%s=%s\n", CONFIG_, sym->name, value); } -static void -kconfig_print_comment(FILE *fp, const char *value, void *arg) +static void kconfig_print_comment(FILE *fp, const char *value, void *arg) { - const char *p = value; - size_t l; - - for (;;) { - l = strcspn(p, "\n"); - fprintf(fp, "#"); - if (l) { - fprintf(fp, " "); - xfwrite(p, l, 1, fp); - p += l; - } - fprintf(fp, "\n"); - if (*p++ == '\0') - break; - } + const char *p = value; + size_t l; + + for ( ;; ) + { + l = strcspn(p, "\n"); + fprintf(fp, "#"); + if ( l ) + { + fprintf(fp, " "); + xfwrite(p, l, 1, fp); + p += l; + } + fprintf(fp, "\n"); + if ( *p++ == '\0' ) + break; + } } -static struct conf_printer kconfig_printer_cb = -{ - .print_symbol = kconfig_print_symbol, - .print_comment = kconfig_print_comment, +static struct conf_printer kconfig_printer_cb = { + .print_symbol = kconfig_print_symbol, + .print_comment = kconfig_print_comment, }; /* @@ -535,73 +585,72 @@ static struct conf_printer kconfig_printer_cb = * * This printer is used when generating the `include/generated/autoconf.h' file. */ -static void -header_print_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg) +static void header_print_symbol(FILE *fp, struct symbol *sym, const char *value, + void *arg) { - - switch (sym->type) { - case S_BOOLEAN: - case S_TRISTATE: { - const char *suffix = ""; - - switch (*value) { - case 'n': - break; - case 'm': - suffix = "_MODULE"; - /* fall through */ - default: - fprintf(fp, "#define %s%s%s 1\n", - CONFIG_, sym->name, suffix); - } - break; - } - case S_HEX: { - const char *prefix = ""; - - if (value[0] != '0' || (value[1] != 'x' && value[1] != 'X')) - prefix = "0x"; - fprintf(fp, "#define %s%s %s%s\n", - CONFIG_, sym->name, prefix, value); - break; - } - case S_STRING: - case S_INT: - fprintf(fp, "#define %s%s %s\n", - CONFIG_, sym->name, value); - break; - default: - break; - } - + switch (sym->type) + { + case S_BOOLEAN: + case S_TRISTATE: + { + const char *suffix = ""; + + switch (*value) + { + case 'n': + break; + case 'm': + suffix = "_MODULE"; + /* fall through */ + default: + fprintf(fp, "#define %s%s%s 1\n", CONFIG_, sym->name, suffix); + } + break; + } + case S_HEX: + { + const char *prefix = ""; + + if ( value[0] != '0' || (value[1] != 'x' && value[1] != 'X') ) + prefix = "0x"; + fprintf(fp, "#define %s%s %s%s\n", CONFIG_, sym->name, prefix, value); + break; + } + case S_STRING: + case S_INT: + fprintf(fp, "#define %s%s %s\n", CONFIG_, sym->name, value); + break; + default: + break; + } } -static void -header_print_comment(FILE *fp, const char *value, void *arg) +static void header_print_comment(FILE *fp, const char *value, void *arg) { - const char *p = value; - size_t l; - - fprintf(fp, "/*\n"); - for (;;) { - l = strcspn(p, "\n"); - fprintf(fp, " *"); - if (l) { - fprintf(fp, " "); - xfwrite(p, l, 1, fp); - p += l; - } - fprintf(fp, "\n"); - if (*p++ == '\0') - break; - } - fprintf(fp, " */\n"); + const char *p = value; + size_t l; + + fprintf(fp, "/*\n"); + for ( ;; ) + { + l = strcspn(p, "\n"); + fprintf(fp, " *"); + if ( l ) + { + fprintf(fp, " "); + xfwrite(p, l, 1, fp); + p += l; + } + fprintf(fp, "\n"); + if ( *p++ == '\0' ) + break; + } + fprintf(fp, " */\n"); } -static struct conf_printer header_printer_cb = -{ - .print_symbol = header_print_symbol, - .print_comment = header_print_comment, +static struct conf_printer header_printer_cb = { + .print_symbol = header_print_symbol, + .print_comment = header_print_comment, }; /* @@ -609,53 +658,52 @@ static struct conf_printer header_printer_cb = * * This printer is used when generating the `include/config/tristate.conf' file. */ -static void -tristate_print_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg) +static void tristate_print_symbol(FILE *fp, struct symbol *sym, + const char *value, void *arg) { - - if (sym->type == S_TRISTATE && *value != 'n') - fprintf(fp, "%s%s=%c\n", CONFIG_, sym->name, (char)toupper(*value)); + if ( sym->type == S_TRISTATE && *value != 'n' ) + fprintf(fp, "%s%s=%c\n", CONFIG_, sym->name, (char)toupper(*value)); } -static struct conf_printer tristate_printer_cb = -{ - .print_symbol = tristate_print_symbol, - .print_comment = kconfig_print_comment, +static struct conf_printer tristate_printer_cb = { + .print_symbol = tristate_print_symbol, + .print_comment = kconfig_print_comment, }; static void conf_write_symbol(FILE *fp, struct symbol *sym, - struct conf_printer *printer, void *printer_arg) + struct conf_printer *printer, void *printer_arg) { - const char *str; - - switch (sym->type) { - case S_OTHER: - case S_UNKNOWN: - break; - case S_STRING: - str = sym_get_string_value(sym); - str = sym_escape_string_value(str); - printer->print_symbol(fp, sym, str, printer_arg); - free((void *)str); - break; - default: - str = sym_get_string_value(sym); - printer->print_symbol(fp, sym, str, printer_arg); - } + const char *str; + + switch (sym->type) + { + case S_OTHER: + case S_UNKNOWN: + break; + case S_STRING: + str = sym_get_string_value(sym); + str = sym_escape_string_value(str); + printer->print_symbol(fp, sym, str, printer_arg); + free((void *)str); + break; + default: + str = sym_get_string_value(sym); + printer->print_symbol(fp, sym, str, printer_arg); + } } -static void -conf_write_heading(FILE *fp, struct conf_printer *printer, void *printer_arg) +static void conf_write_heading(FILE *fp, struct conf_printer *printer, + void *printer_arg) { - char buf[256]; + char buf[256]; - snprintf(buf, sizeof(buf), - "\n" - "Automatically generated file; DO NOT EDIT.\n" - "%s\n", - rootmenu.prompt->text); + snprintf(buf, sizeof(buf), + "\n" + "Automatically generated file; DO NOT EDIT.\n" + "%s\n", + rootmenu.prompt->text); - printer->print_comment(fp, buf, printer_arg); + printer->print_comment(fp, buf, printer_arg); } /* @@ -664,361 +712,410 @@ conf_write_heading(FILE *fp, struct conf_printer *printer, void *printer_arg) */ int conf_write_defconfig(const char *filename) { - struct symbol *sym; - struct menu *menu; - FILE *out; - - out = fopen(filename, "w"); - if (!out) - return 1; - - sym_clear_all_valid(); - - /* Traverse all menus to find all relevant symbols */ - menu = rootmenu.list; - - while (menu != NULL) - { - sym = menu->sym; - if (sym == NULL) { - if (!menu_is_visible(menu)) - goto next_menu; - } else if (!sym_is_choice(sym)) { - sym_calc_value(sym); - if (!(sym->flags & SYMBOL_WRITE)) - goto next_menu; - sym->flags &= ~SYMBOL_WRITE; - /* If we cannot change the symbol - skip */ - if (!sym_is_changable(sym)) - goto next_menu; - /* If symbol equals to default value - skip */ - if (strcmp(sym_get_string_value(sym), sym_get_string_default(sym)) == 0) - goto next_menu; - - /* - * If symbol is a choice value and equals to the - * default for a choice - skip. - * But only if value is bool and equal to "y" and - * choice is not "optional". - * (If choice is "optional" then all values can be "n") - */ - if (sym_is_choice_value(sym)) { - struct symbol *cs; - struct symbol *ds; - - cs = prop_get_symbol(sym_get_choice_prop(sym)); - ds = sym_choice_default(cs); - if (!sym_is_optional(cs) && sym == ds) { - if ((sym->type == S_BOOLEAN) && - sym_get_tristate_value(sym) == yes) - goto next_menu; - } - } - conf_write_symbol(out, sym, &kconfig_printer_cb, NULL); - } -next_menu: - if (menu->list != NULL) { - menu = menu->list; - } - else if (menu->next != NULL) { - menu = menu->next; - } else { - while ((menu = menu->parent)) { - if (menu->next != NULL) { - menu = menu->next; - break; - } - } - } - } - fclose(out); - return 0; + struct symbol *sym; + struct menu *menu; + FILE *out; + + out = fopen(filename, "w"); + if ( !out ) + return 1; + + sym_clear_all_valid(); + + /* Traverse all menus to find all relevant symbols */ + menu = rootmenu.list; + + while ( menu != NULL ) + { + sym = menu->sym; + if ( sym == NULL ) + { + if ( !menu_is_visible(menu) ) + goto next_menu; + } + else if ( !sym_is_choice(sym) ) + { + sym_calc_value(sym); + if ( !(sym->flags & SYMBOL_WRITE) ) + goto next_menu; + sym->flags &= ~SYMBOL_WRITE; + /* If we cannot change the symbol - skip */ + if ( !sym_is_changable(sym) ) + goto next_menu; + /* If symbol equals to default value - skip */ + if ( strcmp(sym_get_string_value(sym), + sym_get_string_default(sym)) == 0 ) + goto next_menu; + + /* + * If symbol is a choice value and equals to the + * default for a choice - skip. + * But only if value is bool and equal to "y" and + * choice is not "optional". + * (If choice is "optional" then all values can be "n") + */ + if ( sym_is_choice_value(sym) ) + { + struct symbol *cs; + struct symbol *ds; + + cs = prop_get_symbol(sym_get_choice_prop(sym)); + ds = sym_choice_default(cs); + if ( !sym_is_optional(cs) && sym == ds ) + { + if ( (sym->type == S_BOOLEAN) && + sym_get_tristate_value(sym) == yes ) + goto next_menu; + } + } + conf_write_symbol(out, sym, &kconfig_printer_cb, NULL); + } + next_menu: + if ( menu->list != NULL ) + { + menu = menu->list; + } + else if ( menu->next != NULL ) + { + menu = menu->next; + } + else + { + while ( (menu = menu->parent) ) + { + if ( menu->next != NULL ) + { + menu = menu->next; + break; + } + } + } + } + fclose(out); + return 0; } int conf_write(const char *name) { - FILE *out; - struct symbol *sym; - struct menu *menu; - const char *basename; - const char *str; - char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1]; - char *env; - - dirname[0] = 0; - if (name && name[0]) { - struct stat st; - char *slash; - - if (!stat(name, &st) && S_ISDIR(st.st_mode)) { - strcpy(dirname, name); - strcat(dirname, "/"); - basename = conf_get_configname(); - } else if ((slash = strrchr(name, '/'))) { - int size = slash - name + 1; - memcpy(dirname, name, size); - dirname[size] = 0; - if (slash[1]) - basename = slash + 1; - else - basename = conf_get_configname(); - } else - basename = name; - } else - basename = conf_get_configname(); - - sprintf(newname, "%s%s", dirname, basename); - env = getenv("KCONFIG_OVERWRITECONFIG"); - if (!env || !*env) { - sprintf(tmpname, "%s.tmpconfig.%d", dirname, (int)getpid()); - out = fopen(tmpname, "w"); - } else { - *tmpname = 0; - out = fopen(newname, "w"); - } - if (!out) - return 1; - - conf_write_heading(out, &kconfig_printer_cb, NULL); - - if (!conf_get_changed()) - sym_clear_all_valid(); - - menu = rootmenu.list; - while (menu) { - sym = menu->sym; - if (!sym) { - if (!menu_is_visible(menu)) - goto next; - str = menu_get_prompt(menu); - fprintf(out, "\n" - "#\n" - "# %s\n" - "#\n", str); - } else if (!(sym->flags & SYMBOL_CHOICE)) { - sym_calc_value(sym); - if (!(sym->flags & SYMBOL_WRITE)) - goto next; - sym->flags &= ~SYMBOL_WRITE; - - conf_write_symbol(out, sym, &kconfig_printer_cb, NULL); - } - -next: - if (menu->list) { - menu = menu->list; - continue; - } - if (menu->next) - menu = menu->next; - else while ((menu = menu->parent)) { - if (menu->next) { - menu = menu->next; - break; - } - } - } - fclose(out); - - if (*tmpname) { - strcat(dirname, basename); - strcat(dirname, ".old"); - rename(newname, dirname); - if (rename(tmpname, newname)) - return 1; - } - - conf_message(_("configuration written to %s"), newname); - - sym_set_change_count(0); - - return 0; + FILE *out; + struct symbol *sym; + struct menu *menu; + const char *basename; + const char *str; + char dirname[PATH_MAX + 1], tmpname[PATH_MAX + 1], newname[PATH_MAX + 1]; + char *env; + + dirname[0] = 0; + if ( name && name[0] ) + { + struct stat st; + char *slash; + + if ( !stat(name, &st) && S_ISDIR(st.st_mode) ) + { + strcpy(dirname, name); + strcat(dirname, "/"); + basename = conf_get_configname(); + } + else if ( (slash = strrchr(name, '/')) ) + { + int size = slash - name + 1; + memcpy(dirname, name, size); + dirname[size] = 0; + if ( slash[1] ) + basename = slash + 1; + else + basename = conf_get_configname(); + } + else + basename = name; + } + else + basename = conf_get_configname(); + + sprintf(newname, "%s%s", dirname, basename); + env = getenv("KCONFIG_OVERWRITECONFIG"); + if ( !env || !*env ) + { + sprintf(tmpname, "%s.tmpconfig.%d", dirname, (int)getpid()); + out = fopen(tmpname, "w"); + } + else + { + *tmpname = 0; + out = fopen(newname, "w"); + } + if ( !out ) + return 1; + + conf_write_heading(out, &kconfig_printer_cb, NULL); + + if ( !conf_get_changed() ) + sym_clear_all_valid(); + + menu = rootmenu.list; + while ( menu ) + { + sym = menu->sym; + if ( !sym ) + { + if ( !menu_is_visible(menu) ) + goto next; + str = menu_get_prompt(menu); + fprintf(out, + "\n" + "#\n" + "# %s\n" + "#\n", + str); + } + else if ( !(sym->flags & SYMBOL_CHOICE) ) + { + sym_calc_value(sym); + if ( !(sym->flags & SYMBOL_WRITE) ) + goto next; + sym->flags &= ~SYMBOL_WRITE; + + conf_write_symbol(out, sym, &kconfig_printer_cb, NULL); + } + + next: + if ( menu->list ) + { + menu = menu->list; + continue; + } + if ( menu->next ) + menu = menu->next; + else + while ( (menu = menu->parent) ) + { + if ( menu->next ) + { + menu = menu->next; + break; + } + } + } + fclose(out); + + if ( *tmpname ) + { + strcat(dirname, basename); + strcat(dirname, ".old"); + rename(newname, dirname); + if ( rename(tmpname, newname) ) + return 1; + } + + conf_message(_("configuration written to %s"), newname); + + sym_set_change_count(0); + + return 0; } static int conf_split_config(void) { - const char *name; - char path[PATH_MAX+1]; - char *s, *d, c; - struct symbol *sym; - struct stat sb; - int res, i, fd; - - name = conf_get_autoconfig_name(); - conf_read_simple(name, S_DEF_AUTO); - - if (chdir("include/config")) - return 1; - - res = 0; - for_all_symbols(i, sym) { - sym_calc_value(sym); - if ((sym->flags & SYMBOL_AUTO) || !sym->name) - continue; - if (sym->flags & SYMBOL_WRITE) { - if (sym->flags & SYMBOL_DEF_AUTO) { - /* - * symbol has old and new value, - * so compare them... - */ - switch (sym->type) { - case S_BOOLEAN: - case S_TRISTATE: - if (sym_get_tristate_value(sym) == - sym->def[S_DEF_AUTO].tri) - continue; - break; - case S_STRING: - case S_HEX: - case S_INT: - if (!strcmp(sym_get_string_value(sym), - sym->def[S_DEF_AUTO].val)) - continue; - break; - default: - break; - } - } else { - /* - * If there is no old value, only 'no' (unset) - * is allowed as new value. - */ - switch (sym->type) { - case S_BOOLEAN: - case S_TRISTATE: - if (sym_get_tristate_value(sym) == no) - continue; - break; - default: - break; - } - } - } else if (!(sym->flags & SYMBOL_DEF_AUTO)) - /* There is neither an old nor a new value. */ - continue; - /* else - * There is an old value, but no new value ('no' (unset) - * isn't saved in auto.conf, so the old value is always - * different from 'no'). - */ - - /* Replace all '_' and append ".h" */ - s = sym->name; - d = path; - while ((c = *s++)) { - c = tolower(c); - *d++ = (c == '_') ? '/' : c; - } - strcpy(d, ".h"); - - /* Assume directory path already exists. */ - fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644); - if (fd == -1) { - if (errno != ENOENT) { - res = 1; - break; - } - /* - * Create directory components, - * unless they exist already. - */ - d = path; - while ((d = strchr(d, '/'))) { - *d = 0; - if (stat(path, &sb) && mkdir(path, 0755)) { - res = 1; - goto out; - } - *d++ = '/'; - } - /* Try it again. */ - fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644); - if (fd == -1) { - res = 1; - break; - } - } - close(fd); - } + const char *name; + char path[PATH_MAX + 1]; + char *s, *d, c; + struct symbol *sym; + struct stat sb; + int res, i, fd; + + name = conf_get_autoconfig_name(); + conf_read_simple(name, S_DEF_AUTO); + + if ( chdir("include/config") ) + return 1; + + res = 0; + for_all_symbols(i, sym) + { + sym_calc_value(sym); + if ( (sym->flags & SYMBOL_AUTO) || !sym->name ) + continue; + if ( sym->flags & SYMBOL_WRITE ) + { + if ( sym->flags & SYMBOL_DEF_AUTO ) + { + /* + * symbol has old and new value, + * so compare them... + */ + switch (sym->type) + { + case S_BOOLEAN: + case S_TRISTATE: + if ( sym_get_tristate_value(sym) == + sym->def[S_DEF_AUTO].tri ) + continue; + break; + case S_STRING: + case S_HEX: + case S_INT: + if ( !strcmp(sym_get_string_value(sym), + sym->def[S_DEF_AUTO].val) ) + continue; + break; + default: + break; + } + } + else + { + /* + * If there is no old value, only 'no' (unset) + * is allowed as new value. + */ + switch (sym->type) + { + case S_BOOLEAN: + case S_TRISTATE: + if ( sym_get_tristate_value(sym) == no ) + continue; + break; + default: + break; + } + } + } + else if ( !(sym->flags & SYMBOL_DEF_AUTO) ) + /* There is neither an old nor a new value. */ + continue; + /* else + * There is an old value, but no new value ('no' (unset) + * isn't saved in auto.conf, so the old value is always + * different from 'no'). + */ + + /* Replace all '_' and append ".h" */ + s = sym->name; + d = path; + while ( (c = *s++) ) + { + c = tolower(c); + *d++ = (c == '_') ? '/' : c; + } + strcpy(d, ".h"); + + /* Assume directory path already exists. */ + fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644); + if ( fd == -1 ) + { + if ( errno != ENOENT ) + { + res = 1; + break; + } + /* + * Create directory components, + * unless they exist already. + */ + d = path; + while ( (d = strchr(d, '/')) ) + { + *d = 0; + if ( stat(path, &sb) && mkdir(path, 0755) ) + { + res = 1; + goto out; + } + *d++ = '/'; + } + /* Try it again. */ + fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644); + if ( fd == -1 ) + { + res = 1; + break; + } + } + close(fd); + } out: - if (chdir("../..")) - return 1; + if ( chdir("../..") ) + return 1; - return res; + return res; } int conf_write_autoconf(void) { - struct symbol *sym; - const char *name; - FILE *out, *tristate, *out_h; - int i; - - sym_clear_all_valid(); - - file_write_dep("include/config/auto.conf.cmd"); - - if (conf_split_config()) - return 1; - - out = fopen(".tmpconfig", "w"); - if (!out) - return 1; - - tristate = fopen(".tmpconfig_tristate", "w"); - if (!tristate) { - fclose(out); - return 1; - } - - out_h = fopen(".tmpconfig.h", "w"); - if (!out_h) { - fclose(out); - fclose(tristate); - return 1; - } - - conf_write_heading(out, &kconfig_printer_cb, NULL); - - conf_write_heading(tristate, &tristate_printer_cb, NULL); - - conf_write_heading(out_h, &header_printer_cb, NULL); - - for_all_symbols(i, sym) { - sym_calc_value(sym); - if (!(sym->flags & SYMBOL_WRITE) || !sym->name) - continue; - - /* write symbol to auto.conf, tristate and header files */ - conf_write_symbol(out, sym, &kconfig_printer_cb, (void *)1); - - conf_write_symbol(tristate, sym, &tristate_printer_cb, (void *)1); - - conf_write_symbol(out_h, sym, &header_printer_cb, NULL); - } - fclose(out); - fclose(tristate); - fclose(out_h); - - name = getenv("KCONFIG_AUTOHEADER"); - if (!name) - name = "include/generated/autoconf.h"; - if (rename(".tmpconfig.h", name)) - return 1; - name = getenv("KCONFIG_TRISTATE"); - if (!name) - name = "include/config/tristate.conf"; - if (rename(".tmpconfig_tristate", name)) - return 1; - name = conf_get_autoconfig_name(); - /* - * This must be the last step, kbuild has a dependency on auto.conf - * and this marks the successful completion of the previous steps. - */ - if (rename(".tmpconfig", name)) - return 1; - - return 0; + struct symbol *sym; + const char *name; + FILE *out, *tristate, *out_h; + int i; + + sym_clear_all_valid(); + + file_write_dep("include/config/auto.conf.cmd"); + + if ( conf_split_config() ) + return 1; + + out = fopen(".tmpconfig", "w"); + if ( !out ) + return 1; + + tristate = fopen(".tmpconfig_tristate", "w"); + if ( !tristate ) + { + fclose(out); + return 1; + } + + out_h = fopen(".tmpconfig.h", "w"); + if ( !out_h ) + { + fclose(out); + fclose(tristate); + return 1; + } + + conf_write_heading(out, &kconfig_printer_cb, NULL); + + conf_write_heading(tristate, &tristate_printer_cb, NULL); + + conf_write_heading(out_h, &header_printer_cb, NULL); + + for_all_symbols(i, sym) + { + sym_calc_value(sym); + if ( !(sym->flags & SYMBOL_WRITE) || !sym->name ) + continue; + + /* write symbol to auto.conf, tristate and header files */ + conf_write_symbol(out, sym, &kconfig_printer_cb, (void *)1); + + conf_write_symbol(tristate, sym, &tristate_printer_cb, (void *)1); + + conf_write_symbol(out_h, sym, &header_printer_cb, NULL); + } + fclose(out); + fclose(tristate); + fclose(out_h); + + name = getenv("KCONFIG_AUTOHEADER"); + if ( !name ) + name = "include/generated/autoconf.h"; + if ( rename(".tmpconfig.h", name) ) + return 1; + name = getenv("KCONFIG_TRISTATE"); + if ( !name ) + name = "include/config/tristate.conf"; + if ( rename(".tmpconfig_tristate", name) ) + return 1; + name = conf_get_autoconfig_name(); + /* + * This must be the last step, kbuild has a dependency on auto.conf + * and this marks the successful completion of the previous steps. + */ + if ( rename(".tmpconfig", name) ) + return 1; + + return 0; } static int sym_change_count; @@ -1026,220 +1123,245 @@ static void (*conf_changed_callback)(void); void sym_set_change_count(int count) { - int _sym_change_count = sym_change_count; - sym_change_count = count; - if (conf_changed_callback && - (bool)_sym_change_count != (bool)count) - conf_changed_callback(); + int _sym_change_count = sym_change_count; + sym_change_count = count; + if ( conf_changed_callback && (bool)_sym_change_count != (bool)count ) + conf_changed_callback(); } void sym_add_change_count(int count) { - sym_set_change_count(count + sym_change_count); + sym_set_change_count(count + sym_change_count); } bool conf_get_changed(void) { - return sym_change_count; + return sym_change_count; } void conf_set_changed_callback(void (*fn)(void)) { - conf_changed_callback = fn; + conf_changed_callback = fn; } static bool randomize_choice_values(struct symbol *csym) { - struct property *prop; - struct symbol *sym; - struct expr *e; - int cnt, def; - - /* - * If choice is mod then we may have more items selected - * and if no then no-one. - * In both cases stop. - */ - if (csym->curr.tri != yes) - return false; - - prop = sym_get_choice_prop(csym); - - /* count entries in choice block */ - cnt = 0; - expr_list_for_each_sym(prop->expr, e, sym) - cnt++; - - /* - * find a random value and set it to yes, - * set the rest to no so we have only one set - */ - def = (rand() % cnt); - - cnt = 0; - expr_list_for_each_sym(prop->expr, e, sym) { - if (def == cnt++) { - sym->def[S_DEF_USER].tri = yes; - csym->def[S_DEF_USER].val = sym; - } - else { - sym->def[S_DEF_USER].tri = no; - } - sym->flags |= SYMBOL_DEF_USER; - /* clear VALID to get value calculated */ - sym->flags &= ~SYMBOL_VALID; - } - csym->flags |= SYMBOL_DEF_USER; - /* clear VALID to get value calculated */ - csym->flags &= ~(SYMBOL_VALID); - - return true; + struct property *prop; + struct symbol *sym; + struct expr *e; + int cnt, def; + + /* + * If choice is mod then we may have more items selected + * and if no then no-one. + * In both cases stop. + */ + if ( csym->curr.tri != yes ) + return false; + + prop = sym_get_choice_prop(csym); + + /* count entries in choice block */ + cnt = 0; + expr_list_for_each_sym(prop->expr, e, sym) cnt++; + + /* + * find a random value and set it to yes, + * set the rest to no so we have only one set + */ + def = (rand() % cnt); + + cnt = 0; + expr_list_for_each_sym(prop->expr, e, sym) + { + if ( def == cnt++ ) + { + sym->def[S_DEF_USER].tri = yes; + csym->def[S_DEF_USER].val = sym; + } + else + { + sym->def[S_DEF_USER].tri = no; + } + sym->flags |= SYMBOL_DEF_USER; + /* clear VALID to get value calculated */ + sym->flags &= ~SYMBOL_VALID; + } + csym->flags |= SYMBOL_DEF_USER; + /* clear VALID to get value calculated */ + csym->flags &= ~(SYMBOL_VALID); + + return true; } void set_all_choice_values(struct symbol *csym) { - struct property *prop; - struct symbol *sym; - struct expr *e; - - prop = sym_get_choice_prop(csym); - - /* - * Set all non-assinged choice values to no - */ - expr_list_for_each_sym(prop->expr, e, sym) { - if (!sym_has_value(sym)) - sym->def[S_DEF_USER].tri = no; - } - csym->flags |= SYMBOL_DEF_USER; - /* clear VALID to get value calculated */ - csym->flags &= ~(SYMBOL_VALID | SYMBOL_NEED_SET_CHOICE_VALUES); + struct property *prop; + struct symbol *sym; + struct expr *e; + + prop = sym_get_choice_prop(csym); + + /* + * Set all non-assinged choice values to no + */ + expr_list_for_each_sym(prop->expr, e, sym) + { + if ( !sym_has_value(sym) ) + sym->def[S_DEF_USER].tri = no; + } + csym->flags |= SYMBOL_DEF_USER; + /* clear VALID to get value calculated */ + csym->flags &= ~(SYMBOL_VALID | SYMBOL_NEED_SET_CHOICE_VALUES); } bool conf_set_all_new_symbols(enum conf_def_mode mode) { - struct symbol *sym, *csym; - int i, cnt, pby, pty, ptm; /* pby: probability of boolean = y - * pty: probability of tristate = y - * ptm: probability of tristate = m - */ - - pby = 50; pty = ptm = 33; /* can't go as the default in switch-case - * below, otherwise gcc whines about - * -Wmaybe-uninitialized */ - if (mode == def_random) { - int n, p[3]; - char *env = getenv("KCONFIG_PROBABILITY"); - n = 0; - while( env && *env ) { - char *endp; - int tmp = strtol( env, &endp, 10 ); - if( tmp >= 0 && tmp <= 100 ) { - p[n++] = tmp; - } else { - errno = ERANGE; - perror( "KCONFIG_PROBABILITY" ); - exit( 1 ); - } - env = (*endp == ':') ? endp+1 : endp; - if( n >=3 ) { - break; - } - } - switch( n ) { - case 1: - pby = p[0]; ptm = pby/2; pty = pby-ptm; - break; - case 2: - pty = p[0]; ptm = p[1]; pby = pty + ptm; - break; - case 3: - pby = p[0]; pty = p[1]; ptm = p[2]; - break; - } - - if( pty+ptm > 100 ) { - errno = ERANGE; - perror( "KCONFIG_PROBABILITY" ); - exit( 1 ); - } - } - bool has_changed = false; - - for_all_symbols(i, sym) { - if (sym_has_value(sym) || (sym->flags & SYMBOL_VALID)) - continue; - switch (sym_get_type(sym)) { - case S_BOOLEAN: - case S_TRISTATE: - has_changed = true; - switch (mode) { - case def_yes: - sym->def[S_DEF_USER].tri = yes; - break; - case def_mod: - sym->def[S_DEF_USER].tri = mod; - break; - case def_no: - if (sym->flags & SYMBOL_ALLNOCONFIG_Y) - sym->def[S_DEF_USER].tri = yes; - else - sym->def[S_DEF_USER].tri = no; - break; - case def_random: - sym->def[S_DEF_USER].tri = no; - cnt = rand() % 100; - if (sym->type == S_TRISTATE) { - if (cnt < pty) - sym->def[S_DEF_USER].tri = yes; - else if (cnt < (pty+ptm)) - sym->def[S_DEF_USER].tri = mod; - } else if (cnt < pby) - sym->def[S_DEF_USER].tri = yes; - break; - default: - continue; - } - if (!(sym_is_choice(sym) && mode == def_random)) - sym->flags |= SYMBOL_DEF_USER; - break; - default: - break; - } - - } - - sym_clear_all_valid(); - - /* - * We have different type of choice blocks. - * If curr.tri equals to mod then we can select several - * choice symbols in one block. - * In this case we do nothing. - * If curr.tri equals yes then only one symbol can be - * selected in a choice block and we set it to yes, - * and the rest to no. - */ - if (mode != def_random) { - for_all_symbols(i, csym) { - if ((sym_is_choice(csym) && !sym_has_value(csym)) || - sym_is_choice_value(csym)) - csym->flags |= SYMBOL_NEED_SET_CHOICE_VALUES; - } - } - - for_all_symbols(i, csym) { - if (sym_has_value(csym) || !sym_is_choice(csym)) - continue; - - sym_calc_value(csym); - if (mode == def_random) - has_changed = randomize_choice_values(csym); - else { - set_all_choice_values(csym); - has_changed = true; - } - } - - return has_changed; + struct symbol *sym, *csym; + int i, cnt, pby, pty, ptm; /* pby: probability of boolean = y + * pty: probability of tristate = y + * ptm: probability of tristate = m + */ + + pby = 50; + pty = ptm = 33; /* can't go as the default in switch-case + * below, otherwise gcc whines about + * -Wmaybe-uninitialized */ + if ( mode == def_random ) + { + int n, p[3]; + char *env = getenv("KCONFIG_PROBABILITY"); + n = 0; + while ( env && *env ) + { + char *endp; + int tmp = strtol(env, &endp, 10); + if ( tmp >= 0 && tmp <= 100 ) + { + p[n++] = tmp; + } + else + { + errno = ERANGE; + perror("KCONFIG_PROBABILITY"); + exit(1); + } + env = (*endp == ':') ? endp + 1 : endp; + if ( n >= 3 ) + { + break; + } + } + switch (n) + { + case 1: + pby = p[0]; + ptm = pby / 2; + pty = pby - ptm; + break; + case 2: + pty = p[0]; + ptm = p[1]; + pby = pty + ptm; + break; + case 3: + pby = p[0]; + pty = p[1]; + ptm = p[2]; + break; + } + + if ( pty + ptm > 100 ) + { + errno = ERANGE; + perror("KCONFIG_PROBABILITY"); + exit(1); + } + } + bool has_changed = false; + + for_all_symbols(i, sym) + { + if ( sym_has_value(sym) || (sym->flags & SYMBOL_VALID) ) + continue; + switch (sym_get_type(sym)) + { + case S_BOOLEAN: + case S_TRISTATE: + has_changed = true; + switch (mode) + { + case def_yes: + sym->def[S_DEF_USER].tri = yes; + break; + case def_mod: + sym->def[S_DEF_USER].tri = mod; + break; + case def_no: + if ( sym->flags & SYMBOL_ALLNOCONFIG_Y ) + sym->def[S_DEF_USER].tri = yes; + else + sym->def[S_DEF_USER].tri = no; + break; + case def_random: + sym->def[S_DEF_USER].tri = no; + cnt = rand() % 100; + if ( sym->type == S_TRISTATE ) + { + if ( cnt < pty ) + sym->def[S_DEF_USER].tri = yes; + else if ( cnt < (pty + ptm) ) + sym->def[S_DEF_USER].tri = mod; + } + else if ( cnt < pby ) + sym->def[S_DEF_USER].tri = yes; + break; + default: + continue; + } + if ( !(sym_is_choice(sym) && mode == def_random) ) + sym->flags |= SYMBOL_DEF_USER; + break; + default: + break; + } + } + + sym_clear_all_valid(); + + /* + * We have different type of choice blocks. + * If curr.tri equals to mod then we can select several + * choice symbols in one block. + * In this case we do nothing. + * If curr.tri equals yes then only one symbol can be + * selected in a choice block and we set it to yes, + * and the rest to no. + */ + if ( mode != def_random ) + { + for_all_symbols(i, csym) + { + if ( (sym_is_choice(csym) && !sym_has_value(csym)) || + sym_is_choice_value(csym) ) + csym->flags |= SYMBOL_NEED_SET_CHOICE_VALUES; + } + } + + for_all_symbols(i, csym) + { + if ( sym_has_value(csym) || !sym_is_choice(csym) ) + continue; + + sym_calc_value(csym); + if ( mode == def_random ) + has_changed = randomize_choice_values(csym); + else + { + set_all_choice_values(csym); + has_changed = true; + } + } + + return has_changed; } diff --git a/xen/tools/kconfig/expr.c b/xen/tools/kconfig/expr.c index 667d1aa237..5f9579b44e 100644 --- a/xen/tools/kconfig/expr.c +++ b/xen/tools/kconfig/expr.c @@ -9,128 +9,132 @@ #include "lkc.h" -#define DEBUG_EXPR 0 +#define DEBUG_EXPR 0 static int expr_eq(struct expr *e1, struct expr *e2); static struct expr *expr_eliminate_yn(struct expr *e); struct expr *expr_alloc_symbol(struct symbol *sym) { - struct expr *e = xcalloc(1, sizeof(*e)); - e->type = E_SYMBOL; - e->left.sym = sym; - return e; + struct expr *e = xcalloc(1, sizeof(*e)); + e->type = E_SYMBOL; + e->left.sym = sym; + return e; } struct expr *expr_alloc_one(enum expr_type type, struct expr *ce) { - struct expr *e = xcalloc(1, sizeof(*e)); - e->type = type; - e->left.expr = ce; - return e; + struct expr *e = xcalloc(1, sizeof(*e)); + e->type = type; + e->left.expr = ce; + return e; } -struct expr *expr_alloc_two(enum expr_type type, struct expr *e1, struct expr *e2) +struct expr *expr_alloc_two(enum expr_type type, struct expr *e1, + struct expr *e2) { - struct expr *e = xcalloc(1, sizeof(*e)); - e->type = type; - e->left.expr = e1; - e->right.expr = e2; - return e; + struct expr *e = xcalloc(1, sizeof(*e)); + e->type = type; + e->left.expr = e1; + e->right.expr = e2; + return e; } -struct expr *expr_alloc_comp(enum expr_type type, struct symbol *s1, struct symbol *s2) +struct expr *expr_alloc_comp(enum expr_type type, struct symbol *s1, + struct symbol *s2) { - struct expr *e = xcalloc(1, sizeof(*e)); - e->type = type; - e->left.sym = s1; - e->right.sym = s2; - return e; + struct expr *e = xcalloc(1, sizeof(*e)); + e->type = type; + e->left.sym = s1; + e->right.sym = s2; + return e; } struct expr *expr_alloc_and(struct expr *e1, struct expr *e2) { - if (!e1) - return e2; - return e2 ? expr_alloc_two(E_AND, e1, e2) : e1; + if ( !e1 ) + return e2; + return e2 ? expr_alloc_two(E_AND, e1, e2) : e1; } struct expr *expr_alloc_or(struct expr *e1, struct expr *e2) { - if (!e1) - return e2; - return e2 ? expr_alloc_two(E_OR, e1, e2) : e1; + if ( !e1 ) + return e2; + return e2 ? expr_alloc_two(E_OR, e1, e2) : e1; } struct expr *expr_copy(const struct expr *org) { - struct expr *e; - - if (!org) - return NULL; - - e = xmalloc(sizeof(*org)); - memcpy(e, org, sizeof(*org)); - switch (org->type) { - case E_SYMBOL: - e->left = org->left; - break; - case E_NOT: - e->left.expr = expr_copy(org->left.expr); - break; - case E_EQUAL: - case E_GEQ: - case E_GTH: - case E_LEQ: - case E_LTH: - case E_UNEQUAL: - e->left.sym = org->left.sym; - e->right.sym = org->right.sym; - break; - case E_AND: - case E_OR: - case E_LIST: - e->left.expr = expr_copy(org->left.expr); - e->right.expr = expr_copy(org->right.expr); - break; - default: - printf("can't copy type %d\n", e->type); - free(e); - e = NULL; - break; - } - - return e; + struct expr *e; + + if ( !org ) + return NULL; + + e = xmalloc(sizeof(*org)); + memcpy(e, org, sizeof(*org)); + switch (org->type) + { + case E_SYMBOL: + e->left = org->left; + break; + case E_NOT: + e->left.expr = expr_copy(org->left.expr); + break; + case E_EQUAL: + case E_GEQ: + case E_GTH: + case E_LEQ: + case E_LTH: + case E_UNEQUAL: + e->left.sym = org->left.sym; + e->right.sym = org->right.sym; + break; + case E_AND: + case E_OR: + case E_LIST: + e->left.expr = expr_copy(org->left.expr); + e->right.expr = expr_copy(org->right.expr); + break; + default: + printf("can't copy type %d\n", e->type); + free(e); + e = NULL; + break; + } + + return e; } void expr_free(struct expr *e) { - if (!e) - return; - - switch (e->type) { - case E_SYMBOL: - break; - case E_NOT: - expr_free(e->left.expr); - return; - case E_EQUAL: - case E_GEQ: - case E_GTH: - case E_LEQ: - case E_LTH: - case E_UNEQUAL: - break; - case E_OR: - case E_AND: - expr_free(e->left.expr); - expr_free(e->right.expr); - break; - default: - printf("how to free type %d?\n", e->type); - break; - } - free(e); + if ( !e ) + return; + + switch (e->type) + { + case E_SYMBOL: + break; + case E_NOT: + expr_free(e->left.expr); + return; + case E_EQUAL: + case E_GEQ: + case E_GTH: + case E_LEQ: + case E_LTH: + case E_UNEQUAL: + break; + case E_OR: + case E_AND: + expr_free(e->left.expr); + expr_free(e->right.expr); + break; + default: + printf("how to free type %d?\n", e->type); + break; + } + free(e); } static int trans_count; @@ -138,60 +142,65 @@ static int trans_count; #define e1 (*ep1) #define e2 (*ep2) -static void __expr_eliminate_eq(enum expr_type type, struct expr **ep1, struct expr **ep2) +static void __expr_eliminate_eq(enum expr_type type, struct expr **ep1, + struct expr **ep2) { - if (e1->type == type) { - __expr_eliminate_eq(type, &e1->left.expr, &e2); - __expr_eliminate_eq(type, &e1->right.expr, &e2); - return; - } - if (e2->type == type) { - __expr_eliminate_eq(type, &e1, &e2->left.expr); - __expr_eliminate_eq(type, &e1, &e2->right.expr); - return; - } - if (e1->type == E_SYMBOL && e2->type == E_SYMBOL && - e1->left.sym == e2->left.sym && - (e1->left.sym == &symbol_yes || e1->left.sym == &symbol_no)) - return; - if (!expr_eq(e1, e2)) - return; - trans_count++; - expr_free(e1); expr_free(e2); - switch (type) { - case E_OR: - e1 = expr_alloc_symbol(&symbol_no); - e2 = expr_alloc_symbol(&symbol_no); - break; - case E_AND: - e1 = expr_alloc_symbol(&symbol_yes); - e2 = expr_alloc_symbol(&symbol_yes); - break; - default: - ; - } + if ( e1->type == type ) + { + __expr_eliminate_eq(type, &e1->left.expr, &e2); + __expr_eliminate_eq(type, &e1->right.expr, &e2); + return; + } + if ( e2->type == type ) + { + __expr_eliminate_eq(type, &e1, &e2->left.expr); + __expr_eliminate_eq(type, &e1, &e2->right.expr); + return; + } + if ( e1->type == E_SYMBOL && e2->type == E_SYMBOL && + e1->left.sym == e2->left.sym && + (e1->left.sym == &symbol_yes || e1->left.sym == &symbol_no) ) + return; + if ( !expr_eq(e1, e2) ) + return; + trans_count++; + expr_free(e1); + expr_free(e2); + switch (type) + { + case E_OR: + e1 = expr_alloc_symbol(&symbol_no); + e2 = expr_alloc_symbol(&symbol_no); + break; + case E_AND: + e1 = expr_alloc_symbol(&symbol_yes); + e2 = expr_alloc_symbol(&symbol_yes); + break; + default:; + } } void expr_eliminate_eq(struct expr **ep1, struct expr **ep2) { - if (!e1 || !e2) - return; - switch (e1->type) { - case E_OR: - case E_AND: - __expr_eliminate_eq(e1->type, ep1, ep2); - default: - ; - } - if (e1->type != e2->type) switch (e2->type) { - case E_OR: - case E_AND: - __expr_eliminate_eq(e2->type, ep1, ep2); - default: - ; - } - e1 = expr_eliminate_yn(e1); - e2 = expr_eliminate_yn(e2); + if ( !e1 || !e2 ) + return; + switch (e1->type) + { + case E_OR: + case E_AND: + __expr_eliminate_eq(e1->type, ep1, ep2); + default:; + } + if ( e1->type != e2->type ) + switch (e2->type) + { + case E_OR: + case E_AND: + __expr_eliminate_eq(e2->type, ep1, ep2); + default:; + } + e1 = expr_eliminate_yn(e1); + e2 = expr_eliminate_yn(e2); } #undef e1 @@ -199,131 +208,150 @@ void expr_eliminate_eq(struct expr **ep1, struct expr **ep2) static int expr_eq(struct expr *e1, struct expr *e2) { - int res, old_count; - - if (e1->type != e2->type) - return 0; - switch (e1->type) { - case E_EQUAL: - case E_GEQ: - case E_GTH: - case E_LEQ: - case E_LTH: - case E_UNEQUAL: - return e1->left.sym == e2->left.sym && e1->right.sym == e2->right.sym; - case E_SYMBOL: - return e1->left.sym == e2->left.sym; - case E_NOT: - return expr_eq(e1->left.expr, e2->left.expr); - case E_AND: - case E_OR: - e1 = expr_copy(e1); - e2 = expr_copy(e2); - old_count = trans_count; - expr_eliminate_eq(&e1, &e2); - res = (e1->type == E_SYMBOL && e2->type == E_SYMBOL && - e1->left.sym == e2->left.sym); - expr_free(e1); - expr_free(e2); - trans_count = old_count; - return res; - case E_LIST: - case E_RANGE: - case E_NONE: - /* panic */; - } - - if (DEBUG_EXPR) { - expr_fprint(e1, stdout); - printf(" = "); - expr_fprint(e2, stdout); - printf(" ?\n"); - } - - return 0; + int res, old_count; + + if ( e1->type != e2->type ) + return 0; + switch (e1->type) + { + case E_EQUAL: + case E_GEQ: + case E_GTH: + case E_LEQ: + case E_LTH: + case E_UNEQUAL: + return e1->left.sym == e2->left.sym && e1->right.sym == e2->right.sym; + case E_SYMBOL: + return e1->left.sym == e2->left.sym; + case E_NOT: + return expr_eq(e1->left.expr, e2->left.expr); + case E_AND: + case E_OR: + e1 = expr_copy(e1); + e2 = expr_copy(e2); + old_count = trans_count; + expr_eliminate_eq(&e1, &e2); + res = (e1->type == E_SYMBOL && e2->type == E_SYMBOL && + e1->left.sym == e2->left.sym); + expr_free(e1); + expr_free(e2); + trans_count = old_count; + return res; + case E_LIST: + case E_RANGE: + case E_NONE: + /* panic */; + } + + if ( DEBUG_EXPR ) + { + expr_fprint(e1, stdout); + printf(" = "); + expr_fprint(e2, stdout); + printf(" ?\n"); + } + + return 0; } static struct expr *expr_eliminate_yn(struct expr *e) { - struct expr *tmp; - - if (e) switch (e->type) { - case E_AND: - e->left.expr = expr_eliminate_yn(e->left.expr); - e->right.expr = expr_eliminate_yn(e->right.expr); - if (e->left.expr->type == E_SYMBOL) { - if (e->left.expr->left.sym == &symbol_no) { - expr_free(e->left.expr); - expr_free(e->right.expr); - e->type = E_SYMBOL; - e->left.sym = &symbol_no; - e->right.expr = NULL; - return e; - } else if (e->left.expr->left.sym == &symbol_yes) { - free(e->left.expr); - tmp = e->right.expr; - *e = *(e->right.expr); - free(tmp); - return e; - } - } - if (e->right.expr->type == E_SYMBOL) { - if (e->right.expr->left.sym == &symbol_no) { - expr_free(e->left.expr); - expr_free(e->right.expr); - e->type = E_SYMBOL; - e->left.sym = &symbol_no; - e->right.expr = NULL; - return e; - } else if (e->right.expr->left.sym == &symbol_yes) { - free(e->right.expr); - tmp = e->left.expr; - *e = *(e->left.expr); - free(tmp); - return e; - } - } - break; - case E_OR: - e->left.expr = expr_eliminate_yn(e->left.expr); - e->right.expr = expr_eliminate_yn(e->right.expr); - if (e->left.expr->type == E_SYMBOL) { - if (e->left.expr->left.sym == &symbol_no) { - free(e->left.expr); - tmp = e->right.expr; - *e = *(e->right.expr); - free(tmp); - return e; - } else if (e->left.expr->left.sym == &symbol_yes) { - expr_free(e->left.expr); - expr_free(e->right.expr); - e->type = E_SYMBOL; - e->left.sym = &symbol_yes; - e->right.expr = NULL; - return e; - } - } - if (e->right.expr->type == E_SYMBOL) { - if (e->right.expr->left.sym == &symbol_no) { - free(e->right.expr); - tmp = e->left.expr; - *e = *(e->left.expr); - free(tmp); - return e; - } else if (e->right.expr->left.sym == &symbol_yes) { - expr_free(e->left.expr); - expr_free(e->right.expr); - e->type = E_SYMBOL; - e->left.sym = &symbol_yes; - e->right.expr = NULL; - return e; - } - } - break; - default: - ; - } - return e; + struct expr *tmp; + + if ( e ) + switch (e->type) + { + case E_AND: + e->left.expr = expr_eliminate_yn(e->left.expr); + e->right.expr = expr_eliminate_yn(e->right.expr); + if ( e->left.expr->type == E_SYMBOL ) + { + if ( e->left.expr->left.sym == &symbol_no ) + { + expr_free(e->left.expr); + expr_free(e->right.expr); + e->type = E_SYMBOL; + e->left.sym = &symbol_no; + e->right.expr = NULL; + return e; + } + else if ( e->left.expr->left.sym == &symbol_yes ) + { + free(e->left.expr); + tmp = e->right.expr; + *e = *(e->right.expr); + free(tmp); + return e; + } + } + if ( e->right.expr->type == E_SYMBOL ) + { + if ( e->right.expr->left.sym == &symbol_no ) + { + expr_free(e->left.expr); + expr_free(e->right.expr); + e->type = E_SYMBOL; + e->left.sym = &symbol_no; + e->right.expr = NULL; + return e; + } + else if ( e->right.expr->left.sym == &symbol_yes ) + { + free(e->right.expr); + tmp = e->left.expr; + *e = *(e->left.expr); + free(tmp); + return e; + } + } + break; + case E_OR: + e->left.expr = expr_eliminate_yn(e->left.expr); + e->right.expr = expr_eliminate_yn(e->right.expr); + if ( e->left.expr->type == E_SYMBOL ) + { + if ( e->left.expr->left.sym == &symbol_no ) + { + free(e->left.expr); + tmp = e->right.expr; + *e = *(e->right.expr); + free(tmp); + return e; + } + else if ( e->left.expr->left.sym == &symbol_yes ) + { + expr_free(e->left.expr); + expr_free(e->right.expr); + e->type = E_SYMBOL; + e->left.sym = &symbol_yes; + e->right.expr = NULL; + return e; + } + } + if ( e->right.expr->type == E_SYMBOL ) + { + if ( e->right.expr->left.sym == &symbol_no ) + { + free(e->right.expr); + tmp = e->left.expr; + *e = *(e->left.expr); + free(tmp); + return e; + } + else if ( e->right.expr->left.sym == &symbol_yes ) + { + expr_free(e->left.expr); + expr_free(e->right.expr); + e->type = E_SYMBOL; + e->left.sym = &symbol_yes; + e->right.expr = NULL; + return e; + } + } + break; + default:; + } + return e; } /* @@ -331,28 +359,30 @@ static struct expr *expr_eliminate_yn(struct expr *e) */ struct expr *expr_trans_bool(struct expr *e) { - if (!e) - return NULL; - switch (e->type) { - case E_AND: - case E_OR: - case E_NOT: - e->left.expr = expr_trans_bool(e->left.expr); - e->right.expr = expr_trans_bool(e->right.expr); - break; - case E_UNEQUAL: - // FOO!=n -> FOO - if (e->left.sym->type == S_TRISTATE) { - if (e->right.sym == &symbol_no) { - e->type = E_SYMBOL; - e->right.sym = NULL; - } - } - break; - default: - ; - } - return e; + if ( !e ) + return NULL; + switch (e->type) + { + case E_AND: + case E_OR: + case E_NOT: + e->left.expr = expr_trans_bool(e->left.expr); + e->right.expr = expr_trans_bool(e->right.expr); + break; + case E_UNEQUAL: + // FOO!=n -> FOO + if ( e->left.sym->type == S_TRISTATE ) + { + if ( e->right.sym == &symbol_no ) + { + e->type = E_SYMBOL; + e->right.sym = NULL; + } + } + break; + default:; + } + return e; } /* @@ -360,685 +390,763 @@ struct expr *expr_trans_bool(struct expr *e) */ static struct expr *expr_join_or(struct expr *e1, struct expr *e2) { - struct expr *tmp; - struct symbol *sym1, *sym2; - - if (expr_eq(e1, e2)) - return expr_copy(e1); - if (e1->type != E_EQUAL && e1->type != E_UNEQUAL && e1->type != E_SYMBOL && e1->type != E_NOT) - return NULL; - if (e2->type != E_EQUAL && e2->type != E_UNEQUAL && e2->type != E_SYMBOL && e2->type != E_NOT) - return NULL; - if (e1->type == E_NOT) { - tmp = e1->left.expr; - if (tmp->type != E_EQUAL && tmp->type != E_UNEQUAL && tmp->type != E_SYMBOL) - return NULL; - sym1 = tmp->left.sym; - } else - sym1 = e1->left.sym; - if (e2->type == E_NOT) { - if (e2->left.expr->type != E_SYMBOL) - return NULL; - sym2 = e2->left.expr->left.sym; - } else - sym2 = e2->left.sym; - if (sym1 != sym2) - return NULL; - if (sym1->type != S_BOOLEAN && sym1->type != S_TRISTATE) - return NULL; - if (sym1->type == S_TRISTATE) { - if (e1->type == E_EQUAL && e2->type == E_EQUAL && - ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_mod) || - (e1->right.sym == &symbol_mod && e2->right.sym == &symbol_yes))) { - // (a='y') || (a='m') -> (a!='n') - return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_no); - } - if (e1->type == E_EQUAL && e2->type == E_EQUAL && - ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_no) || - (e1->right.sym == &symbol_no && e2->right.sym == &symbol_yes))) { - // (a='y') || (a='n') -> (a!='m') - return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_mod); - } - if (e1->type == E_EQUAL && e2->type == E_EQUAL && - ((e1->right.sym == &symbol_mod && e2->right.sym == &symbol_no) || - (e1->right.sym == &symbol_no && e2->right.sym == &symbol_mod))) { - // (a='m') || (a='n') -> (a!='y') - return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_yes); - } - } - if (sym1->type == S_BOOLEAN && sym1 == sym2) { - if ((e1->type == E_NOT && e1->left.expr->type == E_SYMBOL && e2->type == E_SYMBOL) || - (e2->type == E_NOT && e2->left.expr->type == E_SYMBOL && e1->type == E_SYMBOL)) - return expr_alloc_symbol(&symbol_yes); - } - - if (DEBUG_EXPR) { - printf("optimize ("); - expr_fprint(e1, stdout); - printf(") || ("); - expr_fprint(e2, stdout); - printf(")?\n"); - } - return NULL; + struct expr *tmp; + struct symbol *sym1, *sym2; + + if ( expr_eq(e1, e2) ) + return expr_copy(e1); + if ( e1->type != E_EQUAL && e1->type != E_UNEQUAL && e1->type != E_SYMBOL && + e1->type != E_NOT ) + return NULL; + if ( e2->type != E_EQUAL && e2->type != E_UNEQUAL && e2->type != E_SYMBOL && + e2->type != E_NOT ) + return NULL; + if ( e1->type == E_NOT ) + { + tmp = e1->left.expr; + if ( tmp->type != E_EQUAL && tmp->type != E_UNEQUAL && + tmp->type != E_SYMBOL ) + return NULL; + sym1 = tmp->left.sym; + } + else + sym1 = e1->left.sym; + if ( e2->type == E_NOT ) + { + if ( e2->left.expr->type != E_SYMBOL ) + return NULL; + sym2 = e2->left.expr->left.sym; + } + else + sym2 = e2->left.sym; + if ( sym1 != sym2 ) + return NULL; + if ( sym1->type != S_BOOLEAN && sym1->type != S_TRISTATE ) + return NULL; + if ( sym1->type == S_TRISTATE ) + { + if ( e1->type == E_EQUAL && e2->type == E_EQUAL && + ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_mod) || + (e1->right.sym == &symbol_mod && e2->right.sym == &symbol_yes)) ) + { + // (a='y') || (a='m') -> (a!='n') + return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_no); + } + if ( e1->type == E_EQUAL && e2->type == E_EQUAL && + ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_no) || + (e1->right.sym == &symbol_no && e2->right.sym == &symbol_yes)) ) + { + // (a='y') || (a='n') -> (a!='m') + return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_mod); + } + if ( e1->type == E_EQUAL && e2->type == E_EQUAL && + ((e1->right.sym == &symbol_mod && e2->right.sym == &symbol_no) || + (e1->right.sym == &symbol_no && e2->right.sym == &symbol_mod)) ) + { + // (a='m') || (a='n') -> (a!='y') + return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_yes); + } + } + if ( sym1->type == S_BOOLEAN && sym1 == sym2 ) + { + if ( (e1->type == E_NOT && e1->left.expr->type == E_SYMBOL && + e2->type == E_SYMBOL) || + (e2->type == E_NOT && e2->left.expr->type == E_SYMBOL && + e1->type == E_SYMBOL) ) + return expr_alloc_symbol(&symbol_yes); + } + + if ( DEBUG_EXPR ) + { + printf("optimize ("); + expr_fprint(e1, stdout); + printf(") || ("); + expr_fprint(e2, stdout); + printf(")?\n"); + } + return NULL; } static struct expr *expr_join_and(struct expr *e1, struct expr *e2) { - struct expr *tmp; - struct symbol *sym1, *sym2; - - if (expr_eq(e1, e2)) - return expr_copy(e1); - if (e1->type != E_EQUAL && e1->type != E_UNEQUAL && e1->type != E_SYMBOL && e1->type != E_NOT) - return NULL; - if (e2->type != E_EQUAL && e2->type != E_UNEQUAL && e2->type != E_SYMBOL && e2->type != E_NOT) - return NULL; - if (e1->type == E_NOT) { - tmp = e1->left.expr; - if (tmp->type != E_EQUAL && tmp->type != E_UNEQUAL && tmp->type != E_SYMBOL) - return NULL; - sym1 = tmp->left.sym; - } else - sym1 = e1->left.sym; - if (e2->type == E_NOT) { - if (e2->left.expr->type != E_SYMBOL) - return NULL; - sym2 = e2->left.expr->left.sym; - } else - sym2 = e2->left.sym; - if (sym1 != sym2) - return NULL; - if (sym1->type != S_BOOLEAN && sym1->type != S_TRISTATE) - return NULL; - - if ((e1->type == E_SYMBOL && e2->type == E_EQUAL && e2->right.sym == &symbol_yes) || - (e2->type == E_SYMBOL && e1->type == E_EQUAL && e1->right.sym == &symbol_yes)) - // (a) && (a='y') -> (a='y') - return expr_alloc_comp(E_EQUAL, sym1, &symbol_yes); - - if ((e1->type == E_SYMBOL && e2->type == E_UNEQUAL && e2->right.sym == &symbol_no) || - (e2->type == E_SYMBOL && e1->type == E_UNEQUAL && e1->right.sym == &symbol_no)) - // (a) && (a!='n') -> (a) - return expr_alloc_symbol(sym1); - - if ((e1->type == E_SYMBOL && e2->type == E_UNEQUAL && e2->right.sym == &symbol_mod) || - (e2->type == E_SYMBOL && e1->type == E_UNEQUAL && e1->right.sym == &symbol_mod)) - // (a) && (a!='m') -> (a='y') - return expr_alloc_comp(E_EQUAL, sym1, &symbol_yes); - - if (sym1->type == S_TRISTATE) { - if (e1->type == E_EQUAL && e2->type == E_UNEQUAL) { - // (a='b') && (a!='c') -> 'b'='c' ? 'n' : a='b' - sym2 = e1->right.sym; - if ((e2->right.sym->flags & SYMBOL_CONST) && (sym2->flags & SYMBOL_CONST)) - return sym2 != e2->right.sym ? expr_alloc_comp(E_EQUAL, sym1, sym2) - : expr_alloc_symbol(&symbol_no); - } - if (e1->type == E_UNEQUAL && e2->type == E_EQUAL) { - // (a='b') && (a!='c') -> 'b'='c' ? 'n' : a='b' - sym2 = e2->right.sym; - if ((e1->right.sym->flags & SYMBOL_CONST) && (sym2->flags & SYMBOL_CONST)) - return sym2 != e1->right.sym ? expr_alloc_comp(E_EQUAL, sym1, sym2) - : expr_alloc_symbol(&symbol_no); - } - if (e1->type == E_UNEQUAL && e2->type == E_UNEQUAL && - ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_no) || - (e1->right.sym == &symbol_no && e2->right.sym == &symbol_yes))) - // (a!='y') && (a!='n') -> (a='m') - return expr_alloc_comp(E_EQUAL, sym1, &symbol_mod); - - if (e1->type == E_UNEQUAL && e2->type == E_UNEQUAL && - ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_mod) || - (e1->right.sym == &symbol_mod && e2->right.sym == &symbol_yes))) - // (a!='y') && (a!='m') -> (a='n') - return expr_alloc_comp(E_EQUAL, sym1, &symbol_no); - - if (e1->type == E_UNEQUAL && e2->type == E_UNEQUAL && - ((e1->right.sym == &symbol_mod && e2->right.sym == &symbol_no) || - (e1->right.sym == &symbol_no && e2->right.sym == &symbol_mod))) - // (a!='m') && (a!='n') -> (a='m') - return expr_alloc_comp(E_EQUAL, sym1, &symbol_yes); - - if ((e1->type == E_SYMBOL && e2->type == E_EQUAL && e2->right.sym == &symbol_mod) || - (e2->type == E_SYMBOL && e1->type == E_EQUAL && e1->right.sym == &symbol_mod) || - (e1->type == E_SYMBOL && e2->type == E_UNEQUAL && e2->right.sym == &symbol_yes) || - (e2->type == E_SYMBOL && e1->type == E_UNEQUAL && e1->right.sym == &symbol_yes)) - return NULL; - } - - if (DEBUG_EXPR) { - printf("optimize ("); - expr_fprint(e1, stdout); - printf(") && ("); - expr_fprint(e2, stdout); - printf(")?\n"); - } - return NULL; + struct expr *tmp; + struct symbol *sym1, *sym2; + + if ( expr_eq(e1, e2) ) + return expr_copy(e1); + if ( e1->type != E_EQUAL && e1->type != E_UNEQUAL && e1->type != E_SYMBOL && + e1->type != E_NOT ) + return NULL; + if ( e2->type != E_EQUAL && e2->type != E_UNEQUAL && e2->type != E_SYMBOL && + e2->type != E_NOT ) + return NULL; + if ( e1->type == E_NOT ) + { + tmp = e1->left.expr; + if ( tmp->type != E_EQUAL && tmp->type != E_UNEQUAL && + tmp->type != E_SYMBOL ) + return NULL; + sym1 = tmp->left.sym; + } + else + sym1 = e1->left.sym; + if ( e2->type == E_NOT ) + { + if ( e2->left.expr->type != E_SYMBOL ) + return NULL; + sym2 = e2->left.expr->left.sym; + } + else + sym2 = e2->left.sym; + if ( sym1 != sym2 ) + return NULL; + if ( sym1->type != S_BOOLEAN && sym1->type != S_TRISTATE ) + return NULL; + + if ( (e1->type == E_SYMBOL && e2->type == E_EQUAL && + e2->right.sym == &symbol_yes) || + (e2->type == E_SYMBOL && e1->type == E_EQUAL && + e1->right.sym == &symbol_yes) ) + // (a) && (a='y') -> (a='y') + return expr_alloc_comp(E_EQUAL, sym1, &symbol_yes); + + if ( (e1->type == E_SYMBOL && e2->type == E_UNEQUAL && + e2->right.sym == &symbol_no) || + (e2->type == E_SYMBOL && e1->type == E_UNEQUAL && + e1->right.sym == &symbol_no) ) + // (a) && (a!='n') -> (a) + return expr_alloc_symbol(sym1); + + if ( (e1->type == E_SYMBOL && e2->type == E_UNEQUAL && + e2->right.sym == &symbol_mod) || + (e2->type == E_SYMBOL && e1->type == E_UNEQUAL && + e1->right.sym == &symbol_mod) ) + // (a) && (a!='m') -> (a='y') + return expr_alloc_comp(E_EQUAL, sym1, &symbol_yes); + + if ( sym1->type == S_TRISTATE ) + { + if ( e1->type == E_EQUAL && e2->type == E_UNEQUAL ) + { + // (a='b') && (a!='c') -> 'b'='c' ? 'n' : a='b' + sym2 = e1->right.sym; + if ( (e2->right.sym->flags & SYMBOL_CONST) && + (sym2->flags & SYMBOL_CONST) ) + return sym2 != e2->right.sym + ? expr_alloc_comp(E_EQUAL, sym1, sym2) + : expr_alloc_symbol(&symbol_no); + } + if ( e1->type == E_UNEQUAL && e2->type == E_EQUAL ) + { + // (a='b') && (a!='c') -> 'b'='c' ? 'n' : a='b' + sym2 = e2->right.sym; + if ( (e1->right.sym->flags & SYMBOL_CONST) && + (sym2->flags & SYMBOL_CONST) ) + return sym2 != e1->right.sym + ? expr_alloc_comp(E_EQUAL, sym1, sym2) + : expr_alloc_symbol(&symbol_no); + } + if ( e1->type == E_UNEQUAL && e2->type == E_UNEQUAL && + ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_no) || + (e1->right.sym == &symbol_no && e2->right.sym == &symbol_yes)) ) + // (a!='y') && (a!='n') -> (a='m') + return expr_alloc_comp(E_EQUAL, sym1, &symbol_mod); + + if ( e1->type == E_UNEQUAL && e2->type == E_UNEQUAL && + ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_mod) || + (e1->right.sym == &symbol_mod && e2->right.sym == &symbol_yes)) ) + // (a!='y') && (a!='m') -> (a='n') + return expr_alloc_comp(E_EQUAL, sym1, &symbol_no); + + if ( e1->type == E_UNEQUAL && e2->type == E_UNEQUAL && + ((e1->right.sym == &symbol_mod && e2->right.sym == &symbol_no) || + (e1->right.sym == &symbol_no && e2->right.sym == &symbol_mod)) ) + // (a!='m') && (a!='n') -> (a='m') + return expr_alloc_comp(E_EQUAL, sym1, &symbol_yes); + + if ( (e1->type == E_SYMBOL && e2->type == E_EQUAL && + e2->right.sym == &symbol_mod) || + (e2->type == E_SYMBOL && e1->type == E_EQUAL && + e1->right.sym == &symbol_mod) || + (e1->type == E_SYMBOL && e2->type == E_UNEQUAL && + e2->right.sym == &symbol_yes) || + (e2->type == E_SYMBOL && e1->type == E_UNEQUAL && + e1->right.sym == &symbol_yes) ) + return NULL; + } + + if ( DEBUG_EXPR ) + { + printf("optimize ("); + expr_fprint(e1, stdout); + printf(") && ("); + expr_fprint(e2, stdout); + printf(")?\n"); + } + return NULL; } -static void expr_eliminate_dups1(enum expr_type type, struct expr **ep1, struct expr **ep2) +static void expr_eliminate_dups1(enum expr_type type, struct expr **ep1, + struct expr **ep2) { #define e1 (*ep1) #define e2 (*ep2) - struct expr *tmp; - - if (e1->type == type) { - expr_eliminate_dups1(type, &e1->left.expr, &e2); - expr_eliminate_dups1(type, &e1->right.expr, &e2); - return; - } - if (e2->type == type) { - expr_eliminate_dups1(type, &e1, &e2->left.expr); - expr_eliminate_dups1(type, &e1, &e2->right.expr); - return; - } - if (e1 == e2) - return; - - switch (e1->type) { - case E_OR: case E_AND: - expr_eliminate_dups1(e1->type, &e1, &e1); - default: - ; - } - - switch (type) { - case E_OR: - tmp = expr_join_or(e1, e2); - if (tmp) { - expr_free(e1); expr_free(e2); - e1 = expr_alloc_symbol(&symbol_no); - e2 = tmp; - trans_count++; - } - break; - case E_AND: - tmp = expr_join_and(e1, e2); - if (tmp) { - expr_free(e1); expr_free(e2); - e1 = expr_alloc_symbol(&symbol_yes); - e2 = tmp; - trans_count++; - } - break; - default: - ; - } + struct expr *tmp; + + if ( e1->type == type ) + { + expr_eliminate_dups1(type, &e1->left.expr, &e2); + expr_eliminate_dups1(type, &e1->right.expr, &e2); + return; + } + if ( e2->type == type ) + { + expr_eliminate_dups1(type, &e1, &e2->left.expr); + expr_eliminate_dups1(type, &e1, &e2->right.expr); + return; + } + if ( e1 == e2 ) + return; + + switch (e1->type) + { + case E_OR: + case E_AND: + expr_eliminate_dups1(e1->type, &e1, &e1); + default:; + } + + switch (type) + { + case E_OR: + tmp = expr_join_or(e1, e2); + if ( tmp ) + { + expr_free(e1); + expr_free(e2); + e1 = expr_alloc_symbol(&symbol_no); + e2 = tmp; + trans_count++; + } + break; + case E_AND: + tmp = expr_join_and(e1, e2); + if ( tmp ) + { + expr_free(e1); + expr_free(e2); + e1 = expr_alloc_symbol(&symbol_yes); + e2 = tmp; + trans_count++; + } + break; + default:; + } #undef e1 #undef e2 } struct expr *expr_eliminate_dups(struct expr *e) { - int oldcount; - if (!e) - return e; - - oldcount = trans_count; - while (1) { - trans_count = 0; - switch (e->type) { - case E_OR: case E_AND: - expr_eliminate_dups1(e->type, &e, &e); - default: - ; - } - if (!trans_count) - break; - e = expr_eliminate_yn(e); - } - trans_count = oldcount; - return e; + int oldcount; + if ( !e ) + return e; + + oldcount = trans_count; + while ( 1 ) + { + trans_count = 0; + switch (e->type) + { + case E_OR: + case E_AND: + expr_eliminate_dups1(e->type, &e, &e); + default:; + } + if ( !trans_count ) + break; + e = expr_eliminate_yn(e); + } + trans_count = oldcount; + return e; } struct expr *expr_transform(struct expr *e) { - struct expr *tmp; - - if (!e) - return NULL; - switch (e->type) { - case E_EQUAL: - case E_GEQ: - case E_GTH: - case E_LEQ: - case E_LTH: - case E_UNEQUAL: - case E_SYMBOL: - case E_LIST: - break; - default: - e->left.expr = expr_transform(e->left.expr); - e->right.expr = expr_transform(e->right.expr); - } - - switch (e->type) { - case E_EQUAL: - if (e->left.sym->type != S_BOOLEAN) - break; - if (e->right.sym == &symbol_no) { - e->type = E_NOT; - e->left.expr = expr_alloc_symbol(e->left.sym); - e->right.sym = NULL; - break; - } - if (e->right.sym == &symbol_mod) { - printf("boolean symbol %s tested for 'm'? test forced to 'n'\n", e->left.sym->name); - e->type = E_SYMBOL; - e->left.sym = &symbol_no; - e->right.sym = NULL; - break; - } - if (e->right.sym == &symbol_yes) { - e->type = E_SYMBOL; - e->right.sym = NULL; - break; - } - break; - case E_UNEQUAL: - if (e->left.sym->type != S_BOOLEAN) - break; - if (e->right.sym == &symbol_no) { - e->type = E_SYMBOL; - e->right.sym = NULL; - break; - } - if (e->right.sym == &symbol_mod) { - printf("boolean symbol %s tested for 'm'? test forced to 'y'\n", e->left.sym->name); - e->type = E_SYMBOL; - e->left.sym = &symbol_yes; - e->right.sym = NULL; - break; - } - if (e->right.sym == &symbol_yes) { - e->type = E_NOT; - e->left.expr = expr_alloc_symbol(e->left.sym); - e->right.sym = NULL; - break; - } - break; - case E_NOT: - switch (e->left.expr->type) { - case E_NOT: - // !!a -> a - tmp = e->left.expr->left.expr; - free(e->left.expr); - free(e); - e = tmp; - e = expr_transform(e); - break; - case E_EQUAL: - case E_UNEQUAL: - // !a='x' -> a!='x' - tmp = e->left.expr; - free(e); - e = tmp; - e->type = e->type == E_EQUAL ? E_UNEQUAL : E_EQUAL; - break; - case E_LEQ: - case E_GEQ: - // !a<='x' -> a>'x' - tmp = e->left.expr; - free(e); - e = tmp; - e->type = e->type == E_LEQ ? E_GTH : E_LTH; - break; - case E_LTH: - case E_GTH: - // !a<'x' -> a>='x' - tmp = e->left.expr; - free(e); - e = tmp; - e->type = e->type == E_LTH ? E_GEQ : E_LEQ; - break; - case E_OR: - // !(a || b) -> !a && !b - tmp = e->left.expr; - e->type = E_AND; - e->right.expr = expr_alloc_one(E_NOT, tmp->right.expr); - tmp->type = E_NOT; - tmp->right.expr = NULL; - e = expr_transform(e); - break; - case E_AND: - // !(a && b) -> !a || !b - tmp = e->left.expr; - e->type = E_OR; - e->right.expr = expr_alloc_one(E_NOT, tmp->right.expr); - tmp->type = E_NOT; - tmp->right.expr = NULL; - e = expr_transform(e); - break; - case E_SYMBOL: - if (e->left.expr->left.sym == &symbol_yes) { - // !'y' -> 'n' - tmp = e->left.expr; - free(e); - e = tmp; - e->type = E_SYMBOL; - e->left.sym = &symbol_no; - break; - } - if (e->left.expr->left.sym == &symbol_mod) { - // !'m' -> 'm' - tmp = e->left.expr; - free(e); - e = tmp; - e->type = E_SYMBOL; - e->left.sym = &symbol_mod; - break; - } - if (e->left.expr->left.sym == &symbol_no) { - // !'n' -> 'y' - tmp = e->left.expr; - free(e); - e = tmp; - e->type = E_SYMBOL; - e->left.sym = &symbol_yes; - break; - } - break; - default: - ; - } - break; - default: - ; - } - return e; + struct expr *tmp; + + if ( !e ) + return NULL; + switch (e->type) + { + case E_EQUAL: + case E_GEQ: + case E_GTH: + case E_LEQ: + case E_LTH: + case E_UNEQUAL: + case E_SYMBOL: + case E_LIST: + break; + default: + e->left.expr = expr_transform(e->left.expr); + e->right.expr = expr_transform(e->right.expr); + } + + switch (e->type) + { + case E_EQUAL: + if ( e->left.sym->type != S_BOOLEAN ) + break; + if ( e->right.sym == &symbol_no ) + { + e->type = E_NOT; + e->left.expr = expr_alloc_symbol(e->left.sym); + e->right.sym = NULL; + break; + } + if ( e->right.sym == &symbol_mod ) + { + printf("boolean symbol %s tested for 'm'? test forced to 'n'\n", + e->left.sym->name); + e->type = E_SYMBOL; + e->left.sym = &symbol_no; + e->right.sym = NULL; + break; + } + if ( e->right.sym == &symbol_yes ) + { + e->type = E_SYMBOL; + e->right.sym = NULL; + break; + } + break; + case E_UNEQUAL: + if ( e->left.sym->type != S_BOOLEAN ) + break; + if ( e->right.sym == &symbol_no ) + { + e->type = E_SYMBOL; + e->right.sym = NULL; + break; + } + if ( e->right.sym == &symbol_mod ) + { + printf("boolean symbol %s tested for 'm'? test forced to 'y'\n", + e->left.sym->name); + e->type = E_SYMBOL; + e->left.sym = &symbol_yes; + e->right.sym = NULL; + break; + } + if ( e->right.sym == &symbol_yes ) + { + e->type = E_NOT; + e->left.expr = expr_alloc_symbol(e->left.sym); + e->right.sym = NULL; + break; + } + break; + case E_NOT: + switch (e->left.expr->type) + { + case E_NOT: + // !!a -> a + tmp = e->left.expr->left.expr; + free(e->left.expr); + free(e); + e = tmp; + e = expr_transform(e); + break; + case E_EQUAL: + case E_UNEQUAL: + // !a='x' -> a!='x' + tmp = e->left.expr; + free(e); + e = tmp; + e->type = e->type == E_EQUAL ? E_UNEQUAL : E_EQUAL; + break; + case E_LEQ: + case E_GEQ: + // !a<='x' -> a>'x' + tmp = e->left.expr; + free(e); + e = tmp; + e->type = e->type == E_LEQ ? E_GTH : E_LTH; + break; + case E_LTH: + case E_GTH: + // !a<'x' -> a>='x' + tmp = e->left.expr; + free(e); + e = tmp; + e->type = e->type == E_LTH ? E_GEQ : E_LEQ; + break; + case E_OR: + // !(a || b) -> !a && !b + tmp = e->left.expr; + e->type = E_AND; + e->right.expr = expr_alloc_one(E_NOT, tmp->right.expr); + tmp->type = E_NOT; + tmp->right.expr = NULL; + e = expr_transform(e); + break; + case E_AND: + // !(a && b) -> !a || !b + tmp = e->left.expr; + e->type = E_OR; + e->right.expr = expr_alloc_one(E_NOT, tmp->right.expr); + tmp->type = E_NOT; + tmp->right.expr = NULL; + e = expr_transform(e); + break; + case E_SYMBOL: + if ( e->left.expr->left.sym == &symbol_yes ) + { + // !'y' -> 'n' + tmp = e->left.expr; + free(e); + e = tmp; + e->type = E_SYMBOL; + e->left.sym = &symbol_no; + break; + } + if ( e->left.expr->left.sym == &symbol_mod ) + { + // !'m' -> 'm' + tmp = e->left.expr; + free(e); + e = tmp; + e->type = E_SYMBOL; + e->left.sym = &symbol_mod; + break; + } + if ( e->left.expr->left.sym == &symbol_no ) + { + // !'n' -> 'y' + tmp = e->left.expr; + free(e); + e = tmp; + e->type = E_SYMBOL; + e->left.sym = &symbol_yes; + break; + } + break; + default:; + } + break; + default:; + } + return e; } int expr_contains_symbol(struct expr *dep, struct symbol *sym) { - if (!dep) - return 0; - - switch (dep->type) { - case E_AND: - case E_OR: - return expr_contains_symbol(dep->left.expr, sym) || - expr_contains_symbol(dep->right.expr, sym); - case E_SYMBOL: - return dep->left.sym == sym; - case E_EQUAL: - case E_GEQ: - case E_GTH: - case E_LEQ: - case E_LTH: - case E_UNEQUAL: - return dep->left.sym == sym || - dep->right.sym == sym; - case E_NOT: - return expr_contains_symbol(dep->left.expr, sym); - default: - ; - } - return 0; + if ( !dep ) + return 0; + + switch (dep->type) + { + case E_AND: + case E_OR: + return expr_contains_symbol(dep->left.expr, sym) || + expr_contains_symbol(dep->right.expr, sym); + case E_SYMBOL: + return dep->left.sym == sym; + case E_EQUAL: + case E_GEQ: + case E_GTH: + case E_LEQ: + case E_LTH: + case E_UNEQUAL: + return dep->left.sym == sym || dep->right.sym == sym; + case E_NOT: + return expr_contains_symbol(dep->left.expr, sym); + default:; + } + return 0; } bool expr_depends_symbol(struct expr *dep, struct symbol *sym) { - if (!dep) - return false; - - switch (dep->type) { - case E_AND: - return expr_depends_symbol(dep->left.expr, sym) || - expr_depends_symbol(dep->right.expr, sym); - case E_SYMBOL: - return dep->left.sym == sym; - case E_EQUAL: - if (dep->left.sym == sym) { - if (dep->right.sym == &symbol_yes || dep->right.sym == &symbol_mod) - return true; - } - break; - case E_UNEQUAL: - if (dep->left.sym == sym) { - if (dep->right.sym == &symbol_no) - return true; - } - break; - default: - ; - } - return false; + if ( !dep ) + return false; + + switch (dep->type) + { + case E_AND: + return expr_depends_symbol(dep->left.expr, sym) || + expr_depends_symbol(dep->right.expr, sym); + case E_SYMBOL: + return dep->left.sym == sym; + case E_EQUAL: + if ( dep->left.sym == sym ) + { + if ( dep->right.sym == &symbol_yes || + dep->right.sym == &symbol_mod ) + return true; + } + break; + case E_UNEQUAL: + if ( dep->left.sym == sym ) + { + if ( dep->right.sym == &symbol_no ) + return true; + } + break; + default:; + } + return false; } -struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symbol *sym) +struct expr *expr_trans_compare(struct expr *e, enum expr_type type, + struct symbol *sym) { - struct expr *e1, *e2; - - if (!e) { - e = expr_alloc_symbol(sym); - if (type == E_UNEQUAL) - e = expr_alloc_one(E_NOT, e); - return e; - } - switch (e->type) { - case E_AND: - e1 = expr_trans_compare(e->left.expr, E_EQUAL, sym); - e2 = expr_trans_compare(e->right.expr, E_EQUAL, sym); - if (sym == &symbol_yes) - e = expr_alloc_two(E_AND, e1, e2); - if (sym == &symbol_no) - e = expr_alloc_two(E_OR, e1, e2); - if (type == E_UNEQUAL) - e = expr_alloc_one(E_NOT, e); - return e; - case E_OR: - e1 = expr_trans_compare(e->left.expr, E_EQUAL, sym); - e2 = expr_trans_compare(e->right.expr, E_EQUAL, sym); - if (sym == &symbol_yes) - e = expr_alloc_two(E_OR, e1, e2); - if (sym == &symbol_no) - e = expr_alloc_two(E_AND, e1, e2); - if (type == E_UNEQUAL) - e = expr_alloc_one(E_NOT, e); - return e; - case E_NOT: - return expr_trans_compare(e->left.expr, type == E_EQUAL ? E_UNEQUAL : E_EQUAL, sym); - case E_UNEQUAL: - case E_LTH: - case E_LEQ: - case E_GTH: - case E_GEQ: - case E_EQUAL: - if (type == E_EQUAL) { - if (sym == &symbol_yes) - return expr_copy(e); - if (sym == &symbol_mod) - return expr_alloc_symbol(&symbol_no); - if (sym == &symbol_no) - return expr_alloc_one(E_NOT, expr_copy(e)); - } else { - if (sym == &symbol_yes) - return expr_alloc_one(E_NOT, expr_copy(e)); - if (sym == &symbol_mod) - return expr_alloc_symbol(&symbol_yes); - if (sym == &symbol_no) - return expr_copy(e); - } - break; - case E_SYMBOL: - return expr_alloc_comp(type, e->left.sym, sym); - case E_LIST: - case E_RANGE: - case E_NONE: - /* panic */; - } - return NULL; + struct expr *e1, *e2; + + if ( !e ) + { + e = expr_alloc_symbol(sym); + if ( type == E_UNEQUAL ) + e = expr_alloc_one(E_NOT, e); + return e; + } + switch (e->type) + { + case E_AND: + e1 = expr_trans_compare(e->left.expr, E_EQUAL, sym); + e2 = expr_trans_compare(e->right.expr, E_EQUAL, sym); + if ( sym == &symbol_yes ) + e = expr_alloc_two(E_AND, e1, e2); + if ( sym == &symbol_no ) + e = expr_alloc_two(E_OR, e1, e2); + if ( type == E_UNEQUAL ) + e = expr_alloc_one(E_NOT, e); + return e; + case E_OR: + e1 = expr_trans_compare(e->left.expr, E_EQUAL, sym); + e2 = expr_trans_compare(e->right.expr, E_EQUAL, sym); + if ( sym == &symbol_yes ) + e = expr_alloc_two(E_OR, e1, e2); + if ( sym == &symbol_no ) + e = expr_alloc_two(E_AND, e1, e2); + if ( type == E_UNEQUAL ) + e = expr_alloc_one(E_NOT, e); + return e; + case E_NOT: + return expr_trans_compare(e->left.expr, + type == E_EQUAL ? E_UNEQUAL : E_EQUAL, sym); + case E_UNEQUAL: + case E_LTH: + case E_LEQ: + case E_GTH: + case E_GEQ: + case E_EQUAL: + if ( type == E_EQUAL ) + { + if ( sym == &symbol_yes ) + return expr_copy(e); + if ( sym == &symbol_mod ) + return expr_alloc_symbol(&symbol_no); + if ( sym == &symbol_no ) + return expr_alloc_one(E_NOT, expr_copy(e)); + } + else + { + if ( sym == &symbol_yes ) + return expr_alloc_one(E_NOT, expr_copy(e)); + if ( sym == &symbol_mod ) + return expr_alloc_symbol(&symbol_yes); + if ( sym == &symbol_no ) + return expr_copy(e); + } + break; + case E_SYMBOL: + return expr_alloc_comp(type, e->left.sym, sym); + case E_LIST: + case E_RANGE: + case E_NONE: + /* panic */; + } + return NULL; } -enum string_value_kind { - k_string, - k_signed, - k_unsigned, - k_invalid +enum string_value_kind +{ + k_string, + k_signed, + k_unsigned, + k_invalid }; union string_value { - unsigned long long u; - signed long long s; + unsigned long long u; + signed long long s; }; static enum string_value_kind expr_parse_string(const char *str, - enum symbol_type type, - union string_value *val) + enum symbol_type type, + union string_value *val) { - char *tail; - enum string_value_kind kind; - - errno = 0; - switch (type) { - case S_BOOLEAN: - case S_TRISTATE: - return k_string; - case S_INT: - val->s = strtoll(str, &tail, 10); - kind = k_signed; - break; - case S_HEX: - val->u = strtoull(str, &tail, 16); - kind = k_unsigned; - break; - case S_STRING: - case S_UNKNOWN: - val->s = strtoll(str, &tail, 0); - kind = k_signed; - break; - default: - return k_invalid; - } - return !errno && !*tail && tail > str && isxdigit(tail[-1]) - ? kind : k_string; + char *tail; + enum string_value_kind kind; + + errno = 0; + switch (type) + { + case S_BOOLEAN: + case S_TRISTATE: + return k_string; + case S_INT: + val->s = strtoll(str, &tail, 10); + kind = k_signed; + break; + case S_HEX: + val->u = strtoull(str, &tail, 16); + kind = k_unsigned; + break; + case S_STRING: + case S_UNKNOWN: + val->s = strtoll(str, &tail, 0); + kind = k_signed; + break; + default: + return k_invalid; + } + return !errno && !*tail && tail > str && isxdigit(tail[-1]) ? kind + : k_string; } tristate expr_calc_value(struct expr *e) { - tristate val1, val2; - const char *str1, *str2; - enum string_value_kind k1 = k_string, k2 = k_string; - union string_value lval = {}, rval = {}; - int res; - - if (!e) - return yes; - - switch (e->type) { - case E_SYMBOL: - sym_calc_value(e->left.sym); - return e->left.sym->curr.tri; - case E_AND: - val1 = expr_calc_value(e->left.expr); - val2 = expr_calc_value(e->right.expr); - return EXPR_AND(val1, val2); - case E_OR: - val1 = expr_calc_value(e->left.expr); - val2 = expr_calc_value(e->right.expr); - return EXPR_OR(val1, val2); - case E_NOT: - val1 = expr_calc_value(e->left.expr); - return EXPR_NOT(val1); - case E_EQUAL: - case E_GEQ: - case E_GTH: - case E_LEQ: - case E_LTH: - case E_UNEQUAL: - break; - default: - printf("expr_calc_value: %d?\n", e->type); - return no; - } - - sym_calc_value(e->left.sym); - sym_calc_value(e->right.sym); - str1 = sym_get_string_value(e->left.sym); - str2 = sym_get_string_value(e->right.sym); - - if (e->left.sym->type != S_STRING || e->right.sym->type != S_STRING) { - k1 = expr_parse_string(str1, e->left.sym->type, &lval); - k2 = expr_parse_string(str2, e->right.sym->type, &rval); - } - - if (k1 == k_string || k2 == k_string) - res = strcmp(str1, str2); - else if (k1 == k_invalid || k2 == k_invalid) { - if (e->type != E_EQUAL && e->type != E_UNEQUAL) { - printf("Cannot compare \"%s\" and \"%s\"\n", str1, str2); - return no; - } - res = strcmp(str1, str2); - } else if (k1 == k_unsigned || k2 == k_unsigned) - res = (lval.u > rval.u) - (lval.u < rval.u); - else /* if (k1 == k_signed && k2 == k_signed) */ - res = (lval.s > rval.s) - (lval.s < rval.s); - - switch(e->type) { - case E_EQUAL: - return res ? no : yes; - case E_GEQ: - return res >= 0 ? yes : no; - case E_GTH: - return res > 0 ? yes : no; - case E_LEQ: - return res <= 0 ? yes : no; - case E_LTH: - return res < 0 ? yes : no; - case E_UNEQUAL: - return res ? yes : no; - default: - printf("expr_calc_value: relation %d?\n", e->type); - return no; - } + tristate val1, val2; + const char *str1, *str2; + enum string_value_kind k1 = k_string, k2 = k_string; + union string_value lval = {}, rval = {}; + int res; + + if ( !e ) + return yes; + + switch (e->type) + { + case E_SYMBOL: + sym_calc_value(e->left.sym); + return e->left.sym->curr.tri; + case E_AND: + val1 = expr_calc_value(e->left.expr); + val2 = expr_calc_value(e->right.expr); + return EXPR_AND(val1, val2); + case E_OR: + val1 = expr_calc_value(e->left.expr); + val2 = expr_calc_value(e->right.expr); + return EXPR_OR(val1, val2); + case E_NOT: + val1 = expr_calc_value(e->left.expr); + return EXPR_NOT(val1); + case E_EQUAL: + case E_GEQ: + case E_GTH: + case E_LEQ: + case E_LTH: + case E_UNEQUAL: + break; + default: + printf("expr_calc_value: %d?\n", e->type); + return no; + } + + sym_calc_value(e->left.sym); + sym_calc_value(e->right.sym); + str1 = sym_get_string_value(e->left.sym); + str2 = sym_get_string_value(e->right.sym); + + if ( e->left.sym->type != S_STRING || e->right.sym->type != S_STRING ) + { + k1 = expr_parse_string(str1, e->left.sym->type, &lval); + k2 = expr_parse_string(str2, e->right.sym->type, &rval); + } + + if ( k1 == k_string || k2 == k_string ) + res = strcmp(str1, str2); + else if ( k1 == k_invalid || k2 == k_invalid ) + { + if ( e->type != E_EQUAL && e->type != E_UNEQUAL ) + { + printf("Cannot compare \"%s\" and \"%s\"\n", str1, str2); + return no; + } + res = strcmp(str1, str2); + } + else if ( k1 == k_unsigned || k2 == k_unsigned ) + res = (lval.u > rval.u) - (lval.u < rval.u); + else /* if (k1 == k_signed && k2 == k_signed) */ + res = (lval.s > rval.s) - (lval.s < rval.s); + + switch (e->type) + { + case E_EQUAL: + return res ? no : yes; + case E_GEQ: + return res >= 0 ? yes : no; + case E_GTH: + return res > 0 ? yes : no; + case E_LEQ: + return res <= 0 ? yes : no; + case E_LTH: + return res < 0 ? yes : no; + case E_UNEQUAL: + return res ? yes : no; + default: + printf("expr_calc_value: relation %d?\n", e->type); + return no; + } } static int expr_compare_type(enum expr_type t1, enum expr_type t2) { - if (t1 == t2) - return 0; - switch (t1) { - case E_LEQ: - case E_LTH: - case E_GEQ: - case E_GTH: - if (t2 == E_EQUAL || t2 == E_UNEQUAL) - return 1; - case E_EQUAL: - case E_UNEQUAL: - if (t2 == E_NOT) - return 1; - case E_NOT: - if (t2 == E_AND) - return 1; - case E_AND: - if (t2 == E_OR) - return 1; - case E_OR: - if (t2 == E_LIST) - return 1; - case E_LIST: - if (t2 == 0) - return 1; - default: - return -1; - } - printf("[%dgt%d?]", t1, t2); - return 0; + if ( t1 == t2 ) + return 0; + switch (t1) + { + case E_LEQ: + case E_LTH: + case E_GEQ: + case E_GTH: + if ( t2 == E_EQUAL || t2 == E_UNEQUAL ) + return 1; + case E_EQUAL: + case E_UNEQUAL: + if ( t2 == E_NOT ) + return 1; + case E_NOT: + if ( t2 == E_AND ) + return 1; + case E_AND: + if ( t2 == E_OR ) + return 1; + case E_OR: + if ( t2 == E_LIST ) + return 1; + case E_LIST: + if ( t2 == 0 ) + return 1; + default: + return -1; + } + printf("[%dgt%d?]", t1, t2); + return 0; } -static inline struct expr * -expr_get_leftmost_symbol(const struct expr *e) +static inline struct expr *expr_get_leftmost_symbol(const struct expr *e) { + if ( e == NULL ) + return NULL; - if (e == NULL) - return NULL; - - while (e->type != E_SYMBOL) - e = e->left.expr; + while ( e->type != E_SYMBOL ) + e = e->left.expr; - return expr_copy(e); + return expr_copy(e); } /* @@ -1047,160 +1155,169 @@ expr_get_leftmost_symbol(const struct expr *e) */ struct expr *expr_simplify_unmet_dep(struct expr *e1, struct expr *e2) { - struct expr *ret; - - switch (e1->type) { - case E_OR: - return expr_alloc_and( - expr_simplify_unmet_dep(e1->left.expr, e2), - expr_simplify_unmet_dep(e1->right.expr, e2)); - case E_AND: { - struct expr *e; - e = expr_alloc_and(expr_copy(e1), expr_copy(e2)); - e = expr_eliminate_dups(e); - ret = (!expr_eq(e, e1)) ? e1 : NULL; - expr_free(e); - break; - } - default: - ret = e1; - break; - } - - return expr_get_leftmost_symbol(ret); + struct expr *ret; + + switch (e1->type) + { + case E_OR: + return expr_alloc_and(expr_simplify_unmet_dep(e1->left.expr, e2), + expr_simplify_unmet_dep(e1->right.expr, e2)); + case E_AND: + { + struct expr *e; + e = expr_alloc_and(expr_copy(e1), expr_copy(e2)); + e = expr_eliminate_dups(e); + ret = (!expr_eq(e, e1)) ? e1 : NULL; + expr_free(e); + break; + } + default: + ret = e1; + break; + } + + return expr_get_leftmost_symbol(ret); } -void expr_print(struct expr *e, void (*fn)(void *, struct symbol *, const char *), void *data, int prevtoken) +void expr_print(struct expr *e, + void (*fn)(void *, struct symbol *, const char *), void *data, + int prevtoken) { - if (!e) { - fn(data, NULL, "y"); - return; - } - - if (expr_compare_type(prevtoken, e->type) > 0) - fn(data, NULL, "("); - switch (e->type) { - case E_SYMBOL: - if (e->left.sym->name) - fn(data, e->left.sym, e->left.sym->name); - else - fn(data, NULL, ""); - break; - case E_NOT: - fn(data, NULL, "!"); - expr_print(e->left.expr, fn, data, E_NOT); - break; - case E_EQUAL: - if (e->left.sym->name) - fn(data, e->left.sym, e->left.sym->name); - else - fn(data, NULL, ""); - fn(data, NULL, "="); - fn(data, e->right.sym, e->right.sym->name); - break; - case E_LEQ: - case E_LTH: - if (e->left.sym->name) - fn(data, e->left.sym, e->left.sym->name); - else - fn(data, NULL, ""); - fn(data, NULL, e->type == E_LEQ ? "<=" : "<"); - fn(data, e->right.sym, e->right.sym->name); - break; - case E_GEQ: - case E_GTH: - if (e->left.sym->name) - fn(data, e->left.sym, e->left.sym->name); - else - fn(data, NULL, ""); - fn(data, NULL, e->type == E_LEQ ? ">=" : ">"); - fn(data, e->right.sym, e->right.sym->name); - break; - case E_UNEQUAL: - if (e->left.sym->name) - fn(data, e->left.sym, e->left.sym->name); - else - fn(data, NULL, ""); - fn(data, NULL, "!="); - fn(data, e->right.sym, e->right.sym->name); - break; - case E_OR: - expr_print(e->left.expr, fn, data, E_OR); - fn(data, NULL, " || "); - expr_print(e->right.expr, fn, data, E_OR); - break; - case E_AND: - expr_print(e->left.expr, fn, data, E_AND); - fn(data, NULL, " && "); - expr_print(e->right.expr, fn, data, E_AND); - break; - case E_LIST: - fn(data, e->right.sym, e->right.sym->name); - if (e->left.expr) { - fn(data, NULL, " ^ "); - expr_print(e->left.expr, fn, data, E_LIST); - } - break; - case E_RANGE: - fn(data, NULL, "["); - fn(data, e->left.sym, e->left.sym->name); - fn(data, NULL, " "); - fn(data, e->right.sym, e->right.sym->name); - fn(data, NULL, "]"); - break; - default: - { - char buf[32]; - sprintf(buf, "", e->type); - fn(data, NULL, buf); - break; - } - } - if (expr_compare_type(prevtoken, e->type) > 0) - fn(data, NULL, ")"); + if ( !e ) + { + fn(data, NULL, "y"); + return; + } + + if ( expr_compare_type(prevtoken, e->type) > 0 ) + fn(data, NULL, "("); + switch (e->type) + { + case E_SYMBOL: + if ( e->left.sym->name ) + fn(data, e->left.sym, e->left.sym->name); + else + fn(data, NULL, ""); + break; + case E_NOT: + fn(data, NULL, "!"); + expr_print(e->left.expr, fn, data, E_NOT); + break; + case E_EQUAL: + if ( e->left.sym->name ) + fn(data, e->left.sym, e->left.sym->name); + else + fn(data, NULL, ""); + fn(data, NULL, "="); + fn(data, e->right.sym, e->right.sym->name); + break; + case E_LEQ: + case E_LTH: + if ( e->left.sym->name ) + fn(data, e->left.sym, e->left.sym->name); + else + fn(data, NULL, ""); + fn(data, NULL, e->type == E_LEQ ? "<=" : "<"); + fn(data, e->right.sym, e->right.sym->name); + break; + case E_GEQ: + case E_GTH: + if ( e->left.sym->name ) + fn(data, e->left.sym, e->left.sym->name); + else + fn(data, NULL, ""); + fn(data, NULL, e->type == E_LEQ ? ">=" : ">"); + fn(data, e->right.sym, e->right.sym->name); + break; + case E_UNEQUAL: + if ( e->left.sym->name ) + fn(data, e->left.sym, e->left.sym->name); + else + fn(data, NULL, ""); + fn(data, NULL, "!="); + fn(data, e->right.sym, e->right.sym->name); + break; + case E_OR: + expr_print(e->left.expr, fn, data, E_OR); + fn(data, NULL, " || "); + expr_print(e->right.expr, fn, data, E_OR); + break; + case E_AND: + expr_print(e->left.expr, fn, data, E_AND); + fn(data, NULL, " && "); + expr_print(e->right.expr, fn, data, E_AND); + break; + case E_LIST: + fn(data, e->right.sym, e->right.sym->name); + if ( e->left.expr ) + { + fn(data, NULL, " ^ "); + expr_print(e->left.expr, fn, data, E_LIST); + } + break; + case E_RANGE: + fn(data, NULL, "["); + fn(data, e->left.sym, e->left.sym->name); + fn(data, NULL, " "); + fn(data, e->right.sym, e->right.sym->name); + fn(data, NULL, "]"); + break; + default: + { + char buf[32]; + sprintf(buf, "", e->type); + fn(data, NULL, buf); + break; + } + } + if ( expr_compare_type(prevtoken, e->type) > 0 ) + fn(data, NULL, ")"); } -static void expr_print_file_helper(void *data, struct symbol *sym, const char *str) +static void expr_print_file_helper(void *data, struct symbol *sym, + const char *str) { - xfwrite(str, strlen(str), 1, data); + xfwrite(str, strlen(str), 1, data); } void expr_fprint(struct expr *e, FILE *out) { - expr_print(e, expr_print_file_helper, out, E_NONE); + expr_print(e, expr_print_file_helper, out, E_NONE); } -static void expr_print_gstr_helper(void *data, struct symbol *sym, const char *str) +static void expr_print_gstr_helper(void *data, struct symbol *sym, + const char *str) { - struct gstr *gs = (struct gstr*)data; - const char *sym_str = NULL; + struct gstr *gs = (struct gstr *)data; + const char *sym_str = NULL; - if (sym) - sym_str = sym_get_string_value(sym); + if ( sym ) + sym_str = sym_get_string_value(sym); - if (gs->max_width) { - unsigned extra_length = strlen(str); - const char *last_cr = strrchr(gs->s, '\n'); - unsigned last_line_length; + if ( gs->max_width ) + { + unsigned extra_length = strlen(str); + const char *last_cr = strrchr(gs->s, '\n'); + unsigned last_line_length; - if (sym_str) - extra_length += 4 + strlen(sym_str); + if ( sym_str ) + extra_length += 4 + strlen(sym_str); - if (!last_cr) - last_cr = gs->s; + if ( !last_cr ) + last_cr = gs->s; - last_line_length = strlen(gs->s) - (last_cr - gs->s); + last_line_length = strlen(gs->s) - (last_cr - gs->s); - if ((last_line_length + extra_length) > gs->max_width) - str_append(gs, "\\\n"); - } + if ( (last_line_length + extra_length) > gs->max_width ) + str_append(gs, "\\\n"); + } - str_append(gs, str); - if (sym && sym->type != S_UNKNOWN) - str_printf(gs, " [=%s]", sym_str); + str_append(gs, str); + if ( sym && sym->type != S_UNKNOWN ) + str_printf(gs, " [=%s]", sym_str); } void expr_gstr_print(struct expr *e, struct gstr *gs) { - expr_print(e, expr_print_gstr_helper, gs, E_NONE); + expr_print(e, expr_print_gstr_helper, gs, E_NONE); } diff --git a/xen/tools/kconfig/gconf.c b/xen/tools/kconfig/gconf.c index 26d208b435..aec2cbdc63 100644 --- a/xen/tools/kconfig/gconf.c +++ b/xen/tools/kconfig/gconf.c @@ -7,7 +7,7 @@ */ #ifdef HAVE_CONFIG_H -# include +#include #endif #include @@ -26,12 +26,18 @@ //#define DEBUG -enum { - SINGLE_VIEW, SPLIT_VIEW, FULL_VIEW +enum +{ + SINGLE_VIEW, + SPLIT_VIEW, + FULL_VIEW }; -enum { - OPT_NORMAL, OPT_ALL, OPT_PROMPT +enum +{ + OPT_NORMAL, + OPT_ALL, + OPT_PROMPT }; static gint view_mode = FULL_VIEW; @@ -42,8 +48,8 @@ static gboolean resizeable = FALSE; static int opt_mode = OPT_NORMAL; GtkWidget *main_wnd = NULL; -GtkWidget *tree1_w = NULL; // left frame -GtkWidget *tree2_w = NULL; // right frame +GtkWidget *tree1_w = NULL; // left frame +GtkWidget *tree2_w = NULL; // right frame GtkWidget *text_w = NULL; GtkWidget *hpaned = NULL; GtkWidget *vpaned = NULL; @@ -62,18 +68,31 @@ static gint indent; static struct menu *current; // current node for SINGLE view static struct menu *browsed; // browsed node for SPLIT view -enum { - COL_OPTION, COL_NAME, COL_NO, COL_MOD, COL_YES, COL_VALUE, - COL_MENU, COL_COLOR, COL_EDIT, COL_PIXBUF, - COL_PIXVIS, COL_BTNVIS, COL_BTNACT, COL_BTNINC, COL_BTNRAD, - COL_NUMBER +enum +{ + COL_OPTION, + COL_NAME, + COL_NO, + COL_MOD, + COL_YES, + COL_VALUE, + COL_MENU, + COL_COLOR, + COL_EDIT, + COL_PIXBUF, + COL_PIXVIS, + COL_BTNVIS, + COL_BTNACT, + COL_BTNINC, + COL_BTNRAD, + COL_NUMBER }; static void display_list(void); static void display_tree(struct menu *menu); static void display_tree_part(void); -static void update_tree(struct menu *src, GtkTreeIter * dst); -static void set_node(GtkTreeIter * node, struct menu *menu, gchar ** row); +static void update_tree(struct menu *src, GtkTreeIter *dst); +static void set_node(GtkTreeIter *node, struct menu *menu, gchar **row); static gchar **fill_row(struct menu *menu); static void conf_changed(void); @@ -81,1441 +100,1368 @@ static void conf_changed(void); const char *dbg_sym_flags(int val) { - static char buf[256]; - - bzero(buf, 256); - - if (val & SYMBOL_CONST) - strcat(buf, "const/"); - if (val & SYMBOL_CHECK) - strcat(buf, "check/"); - if (val & SYMBOL_CHOICE) - strcat(buf, "choice/"); - if (val & SYMBOL_CHOICEVAL) - strcat(buf, "choiceval/"); - if (val & SYMBOL_VALID) - strcat(buf, "valid/"); - if (val & SYMBOL_OPTIONAL) - strcat(buf, "optional/"); - if (val & SYMBOL_WRITE) - strcat(buf, "write/"); - if (val & SYMBOL_CHANGED) - strcat(buf, "changed/"); - if (val & SYMBOL_AUTO) - strcat(buf, "auto/"); - - buf[strlen(buf) - 1] = '\0'; - - return buf; + static char buf[256]; + + bzero(buf, 256); + + if ( val & SYMBOL_CONST ) + strcat(buf, "const/"); + if ( val & SYMBOL_CHECK ) + strcat(buf, "check/"); + if ( val & SYMBOL_CHOICE ) + strcat(buf, "choice/"); + if ( val & SYMBOL_CHOICEVAL ) + strcat(buf, "choiceval/"); + if ( val & SYMBOL_VALID ) + strcat(buf, "valid/"); + if ( val & SYMBOL_OPTIONAL ) + strcat(buf, "optional/"); + if ( val & SYMBOL_WRITE ) + strcat(buf, "write/"); + if ( val & SYMBOL_CHANGED ) + strcat(buf, "changed/"); + if ( val & SYMBOL_AUTO ) + strcat(buf, "auto/"); + + buf[strlen(buf) - 1] = '\0'; + + return buf; } -void replace_button_icon(GladeXML * xml, GdkDrawable * window, - GtkStyle * style, gchar * btn_name, gchar ** xpm) +void replace_button_icon(GladeXML *xml, GdkDrawable *window, GtkStyle *style, + gchar *btn_name, gchar **xpm) { - GdkPixmap *pixmap; - GdkBitmap *mask; - GtkToolButton *button; - GtkWidget *image; - - pixmap = gdk_pixmap_create_from_xpm_d(window, &mask, - &style->bg[GTK_STATE_NORMAL], - xpm); - - button = GTK_TOOL_BUTTON(glade_xml_get_widget(xml, btn_name)); - image = gtk_image_new_from_pixmap(pixmap, mask); - gtk_widget_show(image); - gtk_tool_button_set_icon_widget(button, image); + GdkPixmap *pixmap; + GdkBitmap *mask; + GtkToolButton *button; + GtkWidget *image; + + pixmap = gdk_pixmap_create_from_xpm_d(window, &mask, + &style->bg[GTK_STATE_NORMAL], xpm); + + button = GTK_TOOL_BUTTON(glade_xml_get_widget(xml, btn_name)); + image = gtk_image_new_from_pixmap(pixmap, mask); + gtk_widget_show(image); + gtk_tool_button_set_icon_widget(button, image); } /* Main Window Initialization */ -void init_main_window(const gchar * glade_file) +void init_main_window(const gchar *glade_file) { - GladeXML *xml; - GtkWidget *widget; - GtkTextBuffer *txtbuf; - GtkStyle *style; - - xml = glade_xml_new(glade_file, "window1", NULL); - if (!xml) - g_error(_("GUI loading failed !\n")); - glade_xml_signal_autoconnect(xml); - - main_wnd = glade_xml_get_widget(xml, "window1"); - hpaned = glade_xml_get_widget(xml, "hpaned1"); - vpaned = glade_xml_get_widget(xml, "vpaned1"); - tree1_w = glade_xml_get_widget(xml, "treeview1"); - tree2_w = glade_xml_get_widget(xml, "treeview2"); - text_w = glade_xml_get_widget(xml, "textview3"); - - back_btn = glade_xml_get_widget(xml, "button1"); - gtk_widget_set_sensitive(back_btn, FALSE); - - widget = glade_xml_get_widget(xml, "show_name1"); - gtk_check_menu_item_set_active((GtkCheckMenuItem *) widget, - show_name); - - widget = glade_xml_get_widget(xml, "show_range1"); - gtk_check_menu_item_set_active((GtkCheckMenuItem *) widget, - show_range); - - widget = glade_xml_get_widget(xml, "show_data1"); - gtk_check_menu_item_set_active((GtkCheckMenuItem *) widget, - show_value); - - save_btn = glade_xml_get_widget(xml, "button3"); - save_menu_item = glade_xml_get_widget(xml, "save1"); - conf_set_changed_callback(conf_changed); - - style = gtk_widget_get_style(main_wnd); - widget = glade_xml_get_widget(xml, "toolbar1"); - - replace_button_icon(xml, main_wnd->window, style, - "button4", (gchar **) xpm_single_view); - replace_button_icon(xml, main_wnd->window, style, - "button5", (gchar **) xpm_split_view); - replace_button_icon(xml, main_wnd->window, style, - "button6", (gchar **) xpm_tree_view); - - txtbuf = gtk_text_view_get_buffer(GTK_TEXT_VIEW(text_w)); - tag1 = gtk_text_buffer_create_tag(txtbuf, "mytag1", - "foreground", "red", - "weight", PANGO_WEIGHT_BOLD, - NULL); - tag2 = gtk_text_buffer_create_tag(txtbuf, "mytag2", - /*"style", PANGO_STYLE_OBLIQUE, */ - NULL); - - gtk_window_set_title(GTK_WINDOW(main_wnd), rootmenu.prompt->text); - - gtk_widget_show(main_wnd); + GladeXML *xml; + GtkWidget *widget; + GtkTextBuffer *txtbuf; + GtkStyle *style; + + xml = glade_xml_new(glade_file, "window1", NULL); + if ( !xml ) + g_error(_("GUI loading failed !\n")); + glade_xml_signal_autoconnect(xml); + + main_wnd = glade_xml_get_widget(xml, "window1"); + hpaned = glade_xml_get_widget(xml, "hpaned1"); + vpaned = glade_xml_get_widget(xml, "vpaned1"); + tree1_w = glade_xml_get_widget(xml, "treeview1"); + tree2_w = glade_xml_get_widget(xml, "treeview2"); + text_w = glade_xml_get_widget(xml, "textview3"); + + back_btn = glade_xml_get_widget(xml, "button1"); + gtk_widget_set_sensitive(back_btn, FALSE); + + widget = glade_xml_get_widget(xml, "show_name1"); + gtk_check_menu_item_set_active((GtkCheckMenuItem *)widget, show_name); + + widget = glade_xml_get_widget(xml, "show_range1"); + gtk_check_menu_item_set_active((GtkCheckMenuItem *)widget, show_range); + + widget = glade_xml_get_widget(xml, "show_data1"); + gtk_check_menu_item_set_active((GtkCheckMenuItem *)widget, show_value); + + save_btn = glade_xml_get_widget(xml, "button3"); + save_menu_item = glade_xml_get_widget(xml, "save1"); + conf_set_changed_callback(conf_changed); + + style = gtk_widget_get_style(main_wnd); + widget = glade_xml_get_widget(xml, "toolbar1"); + + replace_button_icon(xml, main_wnd->window, style, "button4", + (gchar **)xpm_single_view); + replace_button_icon(xml, main_wnd->window, style, "button5", + (gchar **)xpm_split_view); + replace_button_icon(xml, main_wnd->window, style, "button6", + (gchar **)xpm_tree_view); + + txtbuf = gtk_text_view_get_buffer(GTK_TEXT_VIEW(text_w)); + tag1 = gtk_text_buffer_create_tag(txtbuf, "mytag1", "foreground", "red", + "weight", PANGO_WEIGHT_BOLD, NULL); + tag2 = gtk_text_buffer_create_tag(txtbuf, "mytag2", + /*"style", PANGO_STYLE_OBLIQUE, */ + NULL); + + gtk_window_set_title(GTK_WINDOW(main_wnd), rootmenu.prompt->text); + + gtk_widget_show(main_wnd); } void init_tree_model(void) { - gint i; - - tree = tree2 = gtk_tree_store_new(COL_NUMBER, - G_TYPE_STRING, G_TYPE_STRING, - G_TYPE_STRING, G_TYPE_STRING, - G_TYPE_STRING, G_TYPE_STRING, - G_TYPE_POINTER, GDK_TYPE_COLOR, - G_TYPE_BOOLEAN, GDK_TYPE_PIXBUF, - G_TYPE_BOOLEAN, G_TYPE_BOOLEAN, - G_TYPE_BOOLEAN, G_TYPE_BOOLEAN, - G_TYPE_BOOLEAN); - model2 = GTK_TREE_MODEL(tree2); - - for (parents[0] = NULL, i = 1; i < 256; i++) - parents[i] = (GtkTreeIter *) g_malloc(sizeof(GtkTreeIter)); - - tree1 = gtk_tree_store_new(COL_NUMBER, - G_TYPE_STRING, G_TYPE_STRING, - G_TYPE_STRING, G_TYPE_STRING, - G_TYPE_STRING, G_TYPE_STRING, - G_TYPE_POINTER, GDK_TYPE_COLOR, - G_TYPE_BOOLEAN, GDK_TYPE_PIXBUF, - G_TYPE_BOOLEAN, G_TYPE_BOOLEAN, - G_TYPE_BOOLEAN, G_TYPE_BOOLEAN, - G_TYPE_BOOLEAN); - model1 = GTK_TREE_MODEL(tree1); + gint i; + + tree = tree2 = gtk_tree_store_new( + COL_NUMBER, G_TYPE_STRING, G_TYPE_STRING, G_TYPE_STRING, G_TYPE_STRING, + G_TYPE_STRING, G_TYPE_STRING, G_TYPE_POINTER, GDK_TYPE_COLOR, + G_TYPE_BOOLEAN, GDK_TYPE_PIXBUF, G_TYPE_BOOLEAN, G_TYPE_BOOLEAN, + G_TYPE_BOOLEAN, G_TYPE_BOOLEAN, G_TYPE_BOOLEAN); + model2 = GTK_TREE_MODEL(tree2); + + for ( parents[0] = NULL, i = 1; i < 256; i++ ) + parents[i] = (GtkTreeIter *)g_malloc(sizeof(GtkTreeIter)); + + tree1 = gtk_tree_store_new( + COL_NUMBER, G_TYPE_STRING, G_TYPE_STRING, G_TYPE_STRING, G_TYPE_STRING, + G_TYPE_STRING, G_TYPE_STRING, G_TYPE_POINTER, GDK_TYPE_COLOR, + G_TYPE_BOOLEAN, GDK_TYPE_PIXBUF, G_TYPE_BOOLEAN, G_TYPE_BOOLEAN, + G_TYPE_BOOLEAN, G_TYPE_BOOLEAN, G_TYPE_BOOLEAN); + model1 = GTK_TREE_MODEL(tree1); } void init_left_tree(void) { - GtkTreeView *view = GTK_TREE_VIEW(tree1_w); - GtkCellRenderer *renderer; - GtkTreeSelection *sel; - GtkTreeViewColumn *column; - - gtk_tree_view_set_model(view, model1); - gtk_tree_view_set_headers_visible(view, TRUE); - gtk_tree_view_set_rules_hint(view, TRUE); - - column = gtk_tree_view_column_new(); - gtk_tree_view_append_column(view, column); - gtk_tree_view_column_set_title(column, _("Options")); - - renderer = gtk_cell_renderer_toggle_new(); - gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column), - renderer, FALSE); - gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column), - renderer, - "active", COL_BTNACT, - "inconsistent", COL_BTNINC, - "visible", COL_BTNVIS, - "radio", COL_BTNRAD, NULL); - renderer = gtk_cell_renderer_text_new(); - gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column), - renderer, FALSE); - gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column), - renderer, - "text", COL_OPTION, - "foreground-gdk", - COL_COLOR, NULL); - - sel = gtk_tree_view_get_selection(view); - gtk_tree_selection_set_mode(sel, GTK_SELECTION_SINGLE); - gtk_widget_realize(tree1_w); + GtkTreeView *view = GTK_TREE_VIEW(tree1_w); + GtkCellRenderer *renderer; + GtkTreeSelection *sel; + GtkTreeViewColumn *column; + + gtk_tree_view_set_model(view, model1); + gtk_tree_view_set_headers_visible(view, TRUE); + gtk_tree_view_set_rules_hint(view, TRUE); + + column = gtk_tree_view_column_new(); + gtk_tree_view_append_column(view, column); + gtk_tree_view_column_set_title(column, _("Options")); + + renderer = gtk_cell_renderer_toggle_new(); + gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column), renderer, + FALSE); + gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column), renderer, + "active", COL_BTNACT, "inconsistent", + COL_BTNINC, "visible", COL_BTNVIS, + "radio", COL_BTNRAD, NULL); + renderer = gtk_cell_renderer_text_new(); + gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column), renderer, + FALSE); + gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column), renderer, + "text", COL_OPTION, "foreground-gdk", + COL_COLOR, NULL); + + sel = gtk_tree_view_get_selection(view); + gtk_tree_selection_set_mode(sel, GTK_SELECTION_SINGLE); + gtk_widget_realize(tree1_w); } -static void renderer_edited(GtkCellRendererText * cell, - const gchar * path_string, - const gchar * new_text, gpointer user_data); +static void renderer_edited(GtkCellRendererText *cell, const gchar *path_string, + const gchar *new_text, gpointer user_data); void init_right_tree(void) { - GtkTreeView *view = GTK_TREE_VIEW(tree2_w); - GtkCellRenderer *renderer; - GtkTreeSelection *sel; - GtkTreeViewColumn *column; - gint i; - - gtk_tree_view_set_model(view, model2); - gtk_tree_view_set_headers_visible(view, TRUE); - gtk_tree_view_set_rules_hint(view, TRUE); - - column = gtk_tree_view_column_new(); - gtk_tree_view_append_column(view, column); - gtk_tree_view_column_set_title(column, _("Options")); - - renderer = gtk_cell_renderer_pixbuf_new(); - gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column), - renderer, FALSE); - gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column), - renderer, - "pixbuf", COL_PIXBUF, - "visible", COL_PIXVIS, NULL); - renderer = gtk_cell_renderer_toggle_new(); - gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column), - renderer, FALSE); - gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column), - renderer, - "active", COL_BTNACT, - "inconsistent", COL_BTNINC, - "visible", COL_BTNVIS, - "radio", COL_BTNRAD, NULL); - renderer = gtk_cell_renderer_text_new(); - gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column), - renderer, FALSE); - gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column), - renderer, - "text", COL_OPTION, - "foreground-gdk", - COL_COLOR, NULL); - - renderer = gtk_cell_renderer_text_new(); - gtk_tree_view_insert_column_with_attributes(view, -1, - _("Name"), renderer, - "text", COL_NAME, - "foreground-gdk", - COL_COLOR, NULL); - renderer = gtk_cell_renderer_text_new(); - gtk_tree_view_insert_column_with_attributes(view, -1, - "N", renderer, - "text", COL_NO, - "foreground-gdk", - COL_COLOR, NULL); - renderer = gtk_cell_renderer_text_new(); - gtk_tree_view_insert_column_with_attributes(view, -1, - "M", renderer, - "text", COL_MOD, - "foreground-gdk", - COL_COLOR, NULL); - renderer = gtk_cell_renderer_text_new(); - gtk_tree_view_insert_column_with_attributes(view, -1, - "Y", renderer, - "text", COL_YES, - "foreground-gdk", - COL_COLOR, NULL); - renderer = gtk_cell_renderer_text_new(); - gtk_tree_view_insert_column_with_attributes(view, -1, - _("Value"), renderer, - "text", COL_VALUE, - "editable", - COL_EDIT, - "foreground-gdk", - COL_COLOR, NULL); - g_signal_connect(G_OBJECT(renderer), "edited", - G_CALLBACK(renderer_edited), NULL); - - column = gtk_tree_view_get_column(view, COL_NAME); - gtk_tree_view_column_set_visible(column, show_name); - column = gtk_tree_view_get_column(view, COL_NO); - gtk_tree_view_column_set_visible(column, show_range); - column = gtk_tree_view_get_column(view, COL_MOD); - gtk_tree_view_column_set_visible(column, show_range); - column = gtk_tree_view_get_column(view, COL_YES); - gtk_tree_view_column_set_visible(column, show_range); - column = gtk_tree_view_get_column(view, COL_VALUE); - gtk_tree_view_column_set_visible(column, show_value); - - if (resizeable) { - for (i = 0; i < COL_VALUE; i++) { - column = gtk_tree_view_get_column(view, i); - gtk_tree_view_column_set_resizable(column, TRUE); - } - } - - sel = gtk_tree_view_get_selection(view); - gtk_tree_selection_set_mode(sel, GTK_SELECTION_SINGLE); + GtkTreeView *view = GTK_TREE_VIEW(tree2_w); + GtkCellRenderer *renderer; + GtkTreeSelection *sel; + GtkTreeViewColumn *column; + gint i; + + gtk_tree_view_set_model(view, model2); + gtk_tree_view_set_headers_visible(view, TRUE); + gtk_tree_view_set_rules_hint(view, TRUE); + + column = gtk_tree_view_column_new(); + gtk_tree_view_append_column(view, column); + gtk_tree_view_column_set_title(column, _("Options")); + + renderer = gtk_cell_renderer_pixbuf_new(); + gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column), renderer, + FALSE); + gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column), renderer, + "pixbuf", COL_PIXBUF, "visible", + COL_PIXVIS, NULL); + renderer = gtk_cell_renderer_toggle_new(); + gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column), renderer, + FALSE); + gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column), renderer, + "active", COL_BTNACT, "inconsistent", + COL_BTNINC, "visible", COL_BTNVIS, + "radio", COL_BTNRAD, NULL); + renderer = gtk_cell_renderer_text_new(); + gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column), renderer, + FALSE); + gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column), renderer, + "text", COL_OPTION, "foreground-gdk", + COL_COLOR, NULL); + + renderer = gtk_cell_renderer_text_new(); + gtk_tree_view_insert_column_with_attributes( + view, -1, _("Name"), renderer, "text", COL_NAME, "foreground-gdk", + COL_COLOR, NULL); + renderer = gtk_cell_renderer_text_new(); + gtk_tree_view_insert_column_with_attributes(view, -1, "N", renderer, "text", + COL_NO, "foreground-gdk", + COL_COLOR, NULL); + renderer = gtk_cell_renderer_text_new(); + gtk_tree_view_insert_column_with_attributes(view, -1, "M", renderer, "text", + COL_MOD, "foreground-gdk", + COL_COLOR, NULL); + renderer = gtk_cell_renderer_text_new(); + gtk_tree_view_insert_column_with_attributes(view, -1, "Y", renderer, "text", + COL_YES, "foreground-gdk", + COL_COLOR, NULL); + renderer = gtk_cell_renderer_text_new(); + gtk_tree_view_insert_column_with_attributes( + view, -1, _("Value"), renderer, "text", COL_VALUE, "editable", COL_EDIT, + "foreground-gdk", COL_COLOR, NULL); + g_signal_connect(G_OBJECT(renderer), "edited", G_CALLBACK(renderer_edited), + NULL); + + column = gtk_tree_view_get_column(view, COL_NAME); + gtk_tree_view_column_set_visible(column, show_name); + column = gtk_tree_view_get_column(view, COL_NO); + gtk_tree_view_column_set_visible(column, show_range); + column = gtk_tree_view_get_column(view, COL_MOD); + gtk_tree_view_column_set_visible(column, show_range); + column = gtk_tree_view_get_column(view, COL_YES); + gtk_tree_view_column_set_visible(column, show_range); + column = gtk_tree_view_get_column(view, COL_VALUE); + gtk_tree_view_column_set_visible(column, show_value); + + if ( resizeable ) + { + for ( i = 0; i < COL_VALUE; i++ ) + { + column = gtk_tree_view_get_column(view, i); + gtk_tree_view_column_set_resizable(column, TRUE); + } + } + + sel = gtk_tree_view_get_selection(view); + gtk_tree_selection_set_mode(sel, GTK_SELECTION_SINGLE); } - /* Utility Functions */ - static void text_insert_help(struct menu *menu) { - GtkTextBuffer *buffer; - GtkTextIter start, end; - const char *prompt = _(menu_get_prompt(menu)); - struct gstr help = str_new(); - - menu_get_ext_help(menu, &help); - - buffer = gtk_text_view_get_buffer(GTK_TEXT_VIEW(text_w)); - gtk_text_buffer_get_bounds(buffer, &start, &end); - gtk_text_buffer_delete(buffer, &start, &end); - gtk_text_view_set_left_margin(GTK_TEXT_VIEW(text_w), 15); - - gtk_text_buffer_get_end_iter(buffer, &end); - gtk_text_buffer_insert_with_tags(buffer, &end, prompt, -1, tag1, - NULL); - gtk_text_buffer_insert_at_cursor(buffer, "\n\n", 2); - gtk_text_buffer_get_end_iter(buffer, &end); - gtk_text_buffer_insert_with_tags(buffer, &end, str_get(&help), -1, tag2, - NULL); - str_free(&help); + GtkTextBuffer *buffer; + GtkTextIter start, end; + const char *prompt = _(menu_get_prompt(menu)); + struct gstr help = str_new(); + + menu_get_ext_help(menu, &help); + + buffer = gtk_text_view_get_buffer(GTK_TEXT_VIEW(text_w)); + gtk_text_buffer_get_bounds(buffer, &start, &end); + gtk_text_buffer_delete(buffer, &start, &end); + gtk_text_view_set_left_margin(GTK_TEXT_VIEW(text_w), 15); + + gtk_text_buffer_get_end_iter(buffer, &end); + gtk_text_buffer_insert_with_tags(buffer, &end, prompt, -1, tag1, NULL); + gtk_text_buffer_insert_at_cursor(buffer, "\n\n", 2); + gtk_text_buffer_get_end_iter(buffer, &end); + gtk_text_buffer_insert_with_tags(buffer, &end, str_get(&help), -1, tag2, + NULL); + str_free(&help); } - static void text_insert_msg(const char *title, const char *message) { - GtkTextBuffer *buffer; - GtkTextIter start, end; - const char *msg = message; - - buffer = gtk_text_view_get_buffer(GTK_TEXT_VIEW(text_w)); - gtk_text_buffer_get_bounds(buffer, &start, &end); - gtk_text_buffer_delete(buffer, &start, &end); - gtk_text_view_set_left_margin(GTK_TEXT_VIEW(text_w), 15); - - gtk_text_buffer_get_end_iter(buffer, &end); - gtk_text_buffer_insert_with_tags(buffer, &end, title, -1, tag1, - NULL); - gtk_text_buffer_insert_at_cursor(buffer, "\n\n", 2); - gtk_text_buffer_get_end_iter(buffer, &end); - gtk_text_buffer_insert_with_tags(buffer, &end, msg, -1, tag2, - NULL); + GtkTextBuffer *buffer; + GtkTextIter start, end; + const char *msg = message; + + buffer = gtk_text_view_get_buffer(GTK_TEXT_VIEW(text_w)); + gtk_text_buffer_get_bounds(buffer, &start, &end); + gtk_text_buffer_delete(buffer, &start, &end); + gtk_text_view_set_left_margin(GTK_TEXT_VIEW(text_w), 15); + + gtk_text_buffer_get_end_iter(buffer, &end); + gtk_text_buffer_insert_with_tags(buffer, &end, title, -1, tag1, NULL); + gtk_text_buffer_insert_at_cursor(buffer, "\n\n", 2); + gtk_text_buffer_get_end_iter(buffer, &end); + gtk_text_buffer_insert_with_tags(buffer, &end, msg, -1, tag2, NULL); } - /* Main Windows Callbacks */ -void on_save_activate(GtkMenuItem * menuitem, gpointer user_data); -gboolean on_window1_delete_event(GtkWidget * widget, GdkEvent * event, - gpointer user_data) +void on_save_activate(GtkMenuItem *menuitem, gpointer user_data); +gboolean on_window1_delete_event(GtkWidget *widget, GdkEvent *event, + gpointer user_data) { - GtkWidget *dialog, *label; - gint result; - - if (!conf_get_changed()) - return FALSE; - - dialog = gtk_dialog_new_with_buttons(_("Warning !"), - GTK_WINDOW(main_wnd), - (GtkDialogFlags) - (GTK_DIALOG_MODAL | - GTK_DIALOG_DESTROY_WITH_PARENT), - GTK_STOCK_OK, - GTK_RESPONSE_YES, - GTK_STOCK_NO, - GTK_RESPONSE_NO, - GTK_STOCK_CANCEL, - GTK_RESPONSE_CANCEL, NULL); - gtk_dialog_set_default_response(GTK_DIALOG(dialog), - GTK_RESPONSE_CANCEL); - - label = gtk_label_new(_("\nSave configuration ?\n")); - gtk_container_add(GTK_CONTAINER(GTK_DIALOG(dialog)->vbox), label); - gtk_widget_show(label); - - result = gtk_dialog_run(GTK_DIALOG(dialog)); - switch (result) { - case GTK_RESPONSE_YES: - on_save_activate(NULL, NULL); - return FALSE; - case GTK_RESPONSE_NO: - return FALSE; - case GTK_RESPONSE_CANCEL: - case GTK_RESPONSE_DELETE_EVENT: - default: - gtk_widget_destroy(dialog); - return TRUE; - } - - return FALSE; + GtkWidget *dialog, *label; + gint result; + + if ( !conf_get_changed() ) + return FALSE; + + dialog = gtk_dialog_new_with_buttons( + _("Warning !"), GTK_WINDOW(main_wnd), + (GtkDialogFlags)(GTK_DIALOG_MODAL | GTK_DIALOG_DESTROY_WITH_PARENT), + GTK_STOCK_OK, GTK_RESPONSE_YES, GTK_STOCK_NO, GTK_RESPONSE_NO, + GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL, NULL); + gtk_dialog_set_default_response(GTK_DIALOG(dialog), GTK_RESPONSE_CANCEL); + + label = gtk_label_new(_("\nSave configuration ?\n")); + gtk_container_add(GTK_CONTAINER(GTK_DIALOG(dialog)->vbox), label); + gtk_widget_show(label); + + result = gtk_dialog_run(GTK_DIALOG(dialog)); + switch (result) + { + case GTK_RESPONSE_YES: + on_save_activate(NULL, NULL); + return FALSE; + case GTK_RESPONSE_NO: + return FALSE; + case GTK_RESPONSE_CANCEL: + case GTK_RESPONSE_DELETE_EVENT: + default: + gtk_widget_destroy(dialog); + return TRUE; + } + + return FALSE; } - -void on_window1_destroy(GtkObject * object, gpointer user_data) +void on_window1_destroy(GtkObject *object, gpointer user_data) { - gtk_main_quit(); + gtk_main_quit(); } - -void -on_window1_size_request(GtkWidget * widget, - GtkRequisition * requisition, gpointer user_data) +void on_window1_size_request(GtkWidget *widget, GtkRequisition *requisition, + gpointer user_data) { - static gint old_h; - gint w, h; + static gint old_h; + gint w, h; - if (widget->window == NULL) - gtk_window_get_default_size(GTK_WINDOW(main_wnd), &w, &h); - else - gdk_window_get_size(widget->window, &w, &h); + if ( widget->window == NULL ) + gtk_window_get_default_size(GTK_WINDOW(main_wnd), &w, &h); + else + gdk_window_get_size(widget->window, &w, &h); - if (h == old_h) - return; - old_h = h; + if ( h == old_h ) + return; + old_h = h; - gtk_paned_set_position(GTK_PANED(vpaned), 2 * h / 3); + gtk_paned_set_position(GTK_PANED(vpaned), 2 * h / 3); } - /* Menu & Toolbar Callbacks */ - -static void -load_filename(GtkFileSelection * file_selector, gpointer user_data) +static void load_filename(GtkFileSelection *file_selector, gpointer user_data) { - const gchar *fn; + const gchar *fn; - fn = gtk_file_selection_get_filename(GTK_FILE_SELECTION - (user_data)); + fn = gtk_file_selection_get_filename(GTK_FILE_SELECTION(user_data)); - if (conf_read(fn)) - text_insert_msg(_("Error"), _("Unable to load configuration !")); - else - display_tree(&rootmenu); + if ( conf_read(fn) ) + text_insert_msg(_("Error"), _("Unable to load configuration !")); + else + display_tree(&rootmenu); } -void on_load1_activate(GtkMenuItem * menuitem, gpointer user_data) +void on_load1_activate(GtkMenuItem *menuitem, gpointer user_data) { - GtkWidget *fs; - - fs = gtk_file_selection_new(_("Load file...")); - g_signal_connect(GTK_OBJECT(GTK_FILE_SELECTION(fs)->ok_button), - "clicked", - G_CALLBACK(load_filename), (gpointer) fs); - g_signal_connect_swapped(GTK_OBJECT - (GTK_FILE_SELECTION(fs)->ok_button), - "clicked", G_CALLBACK(gtk_widget_destroy), - (gpointer) fs); - g_signal_connect_swapped(GTK_OBJECT - (GTK_FILE_SELECTION(fs)->cancel_button), - "clicked", G_CALLBACK(gtk_widget_destroy), - (gpointer) fs); - gtk_widget_show(fs); + GtkWidget *fs; + + fs = gtk_file_selection_new(_("Load file...")); + g_signal_connect(GTK_OBJECT(GTK_FILE_SELECTION(fs)->ok_button), "clicked", + G_CALLBACK(load_filename), (gpointer)fs); + g_signal_connect_swapped(GTK_OBJECT(GTK_FILE_SELECTION(fs)->ok_button), + "clicked", G_CALLBACK(gtk_widget_destroy), + (gpointer)fs); + g_signal_connect_swapped(GTK_OBJECT(GTK_FILE_SELECTION(fs)->cancel_button), + "clicked", G_CALLBACK(gtk_widget_destroy), + (gpointer)fs); + gtk_widget_show(fs); } - -void on_save_activate(GtkMenuItem * menuitem, gpointer user_data) +void on_save_activate(GtkMenuItem *menuitem, gpointer user_data) { - if (conf_write(NULL)) - text_insert_msg(_("Error"), _("Unable to save configuration !")); + if ( conf_write(NULL) ) + text_insert_msg(_("Error"), _("Unable to save configuration !")); } - -static void -store_filename(GtkFileSelection * file_selector, gpointer user_data) +static void store_filename(GtkFileSelection *file_selector, gpointer user_data) { - const gchar *fn; + const gchar *fn; - fn = gtk_file_selection_get_filename(GTK_FILE_SELECTION - (user_data)); + fn = gtk_file_selection_get_filename(GTK_FILE_SELECTION(user_data)); - if (conf_write(fn)) - text_insert_msg(_("Error"), _("Unable to save configuration !")); + if ( conf_write(fn) ) + text_insert_msg(_("Error"), _("Unable to save configuration !")); - gtk_widget_destroy(GTK_WIDGET(user_data)); + gtk_widget_destroy(GTK_WIDGET(user_data)); } -void on_save_as1_activate(GtkMenuItem * menuitem, gpointer user_data) +void on_save_as1_activate(GtkMenuItem *menuitem, gpointer user_data) { - GtkWidget *fs; - - fs = gtk_file_selection_new(_("Save file as...")); - g_signal_connect(GTK_OBJECT(GTK_FILE_SELECTION(fs)->ok_button), - "clicked", - G_CALLBACK(store_filename), (gpointer) fs); - g_signal_connect_swapped(GTK_OBJECT - (GTK_FILE_SELECTION(fs)->ok_button), - "clicked", G_CALLBACK(gtk_widget_destroy), - (gpointer) fs); - g_signal_connect_swapped(GTK_OBJECT - (GTK_FILE_SELECTION(fs)->cancel_button), - "clicked", G_CALLBACK(gtk_widget_destroy), - (gpointer) fs); - gtk_widget_show(fs); + GtkWidget *fs; + + fs = gtk_file_selection_new(_("Save file as...")); + g_signal_connect(GTK_OBJECT(GTK_FILE_SELECTION(fs)->ok_button), "clicked", + G_CALLBACK(store_filename), (gpointer)fs); + g_signal_connect_swapped(GTK_OBJECT(GTK_FILE_SELECTION(fs)->ok_button), + "clicked", G_CALLBACK(gtk_widget_destroy), + (gpointer)fs); + g_signal_connect_swapped(GTK_OBJECT(GTK_FILE_SELECTION(fs)->cancel_button), + "clicked", G_CALLBACK(gtk_widget_destroy), + (gpointer)fs); + gtk_widget_show(fs); } - -void on_quit1_activate(GtkMenuItem * menuitem, gpointer user_data) +void on_quit1_activate(GtkMenuItem *menuitem, gpointer user_data) { - if (!on_window1_delete_event(NULL, NULL, NULL)) - gtk_widget_destroy(GTK_WIDGET(main_wnd)); + if ( !on_window1_delete_event(NULL, NULL, NULL) ) + gtk_widget_destroy(GTK_WIDGET(main_wnd)); } - -void on_show_name1_activate(GtkMenuItem * menuitem, gpointer user_data) +void on_show_name1_activate(GtkMenuItem *menuitem, gpointer user_data) { - GtkTreeViewColumn *col; + GtkTreeViewColumn *col; - show_name = GTK_CHECK_MENU_ITEM(menuitem)->active; - col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_NAME); - if (col) - gtk_tree_view_column_set_visible(col, show_name); + show_name = GTK_CHECK_MENU_ITEM(menuitem)->active; + col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_NAME); + if ( col ) + gtk_tree_view_column_set_visible(col, show_name); } - -void on_show_range1_activate(GtkMenuItem * menuitem, gpointer user_data) +void on_show_range1_activate(GtkMenuItem *menuitem, gpointer user_data) { - GtkTreeViewColumn *col; - - show_range = GTK_CHECK_MENU_ITEM(menuitem)->active; - col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_NO); - if (col) - gtk_tree_view_column_set_visible(col, show_range); - col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_MOD); - if (col) - gtk_tree_view_column_set_visible(col, show_range); - col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_YES); - if (col) - gtk_tree_view_column_set_visible(col, show_range); - + GtkTreeViewColumn *col; + + show_range = GTK_CHECK_MENU_ITEM(menuitem)->active; + col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_NO); + if ( col ) + gtk_tree_view_column_set_visible(col, show_range); + col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_MOD); + if ( col ) + gtk_tree_view_column_set_visible(col, show_range); + col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_YES); + if ( col ) + gtk_tree_view_column_set_visible(col, show_range); } - -void on_show_data1_activate(GtkMenuItem * menuitem, gpointer user_data) +void on_show_data1_activate(GtkMenuItem *menuitem, gpointer user_data) { - GtkTreeViewColumn *col; + GtkTreeViewColumn *col; - show_value = GTK_CHECK_MENU_ITEM(menuitem)->active; - col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_VALUE); - if (col) - gtk_tree_view_column_set_visible(col, show_value); + show_value = GTK_CHECK_MENU_ITEM(menuitem)->active; + col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_VALUE); + if ( col ) + gtk_tree_view_column_set_visible(col, show_value); } - -void -on_set_option_mode1_activate(GtkMenuItem *menuitem, gpointer user_data) +void on_set_option_mode1_activate(GtkMenuItem *menuitem, gpointer user_data) { - opt_mode = OPT_NORMAL; - gtk_tree_store_clear(tree2); - display_tree(&rootmenu); /* instead of update_tree to speed-up */ + opt_mode = OPT_NORMAL; + gtk_tree_store_clear(tree2); + display_tree(&rootmenu); /* instead of update_tree to speed-up */ } - -void -on_set_option_mode2_activate(GtkMenuItem *menuitem, gpointer user_data) +void on_set_option_mode2_activate(GtkMenuItem *menuitem, gpointer user_data) { - opt_mode = OPT_ALL; - gtk_tree_store_clear(tree2); - display_tree(&rootmenu); /* instead of update_tree to speed-up */ + opt_mode = OPT_ALL; + gtk_tree_store_clear(tree2); + display_tree(&rootmenu); /* instead of update_tree to speed-up */ } - -void -on_set_option_mode3_activate(GtkMenuItem *menuitem, gpointer user_data) +void on_set_option_mode3_activate(GtkMenuItem *menuitem, gpointer user_data) { - opt_mode = OPT_PROMPT; - gtk_tree_store_clear(tree2); - display_tree(&rootmenu); /* instead of update_tree to speed-up */ + opt_mode = OPT_PROMPT; + gtk_tree_store_clear(tree2); + display_tree(&rootmenu); /* instead of update_tree to speed-up */ } - -void on_introduction1_activate(GtkMenuItem * menuitem, gpointer user_data) +void on_introduction1_activate(GtkMenuItem *menuitem, gpointer user_data) { - GtkWidget *dialog; - const gchar *intro_text = _( - "Welcome to gkc, the GTK+ graphical configuration tool\n" - "For each option, a blank box indicates the feature is disabled, a\n" - "check indicates it is enabled, and a dot indicates that it is to\n" - "be compiled as a module. Clicking on the box will cycle through the three states.\n" - "\n" - "If you do not see an option (e.g., a device driver) that you\n" - "believe should be present, try turning on Show All Options\n" - "under the Options menu.\n" - "Although there is no cross reference yet to help you figure out\n" - "what other options must be enabled to support the option you\n" - "are interested in, you can still view the help of a grayed-out\n" - "option.\n" - "\n" - "Toggling Show Debug Info under the Options menu will show \n" - "the dependencies, which you can then match by examining other options."); - - dialog = gtk_message_dialog_new(GTK_WINDOW(main_wnd), - GTK_DIALOG_DESTROY_WITH_PARENT, - GTK_MESSAGE_INFO, - GTK_BUTTONS_CLOSE, "%s", intro_text); - g_signal_connect_swapped(GTK_OBJECT(dialog), "response", - G_CALLBACK(gtk_widget_destroy), - GTK_OBJECT(dialog)); - gtk_widget_show_all(dialog); + GtkWidget *dialog; + const gchar *intro_text = + _("Welcome to gkc, the GTK+ graphical configuration tool\n" + "For each option, a blank box indicates the feature is disabled, a\n" + "check indicates it is enabled, and a dot indicates that it is to\n" + "be compiled as a module. Clicking on the box will cycle through " + "the three states.\n" + "\n" + "If you do not see an option (e.g., a device driver) that you\n" + "believe should be present, try turning on Show All Options\n" + "under the Options menu.\n" + "Although there is no cross reference yet to help you figure out\n" + "what other options must be enabled to support the option you\n" + "are interested in, you can still view the help of a grayed-out\n" + "option.\n" + "\n" + "Toggling Show Debug Info under the Options menu will show \n" + "the dependencies, which you can then match by examining other " + "options."); + + dialog = gtk_message_dialog_new( + GTK_WINDOW(main_wnd), GTK_DIALOG_DESTROY_WITH_PARENT, GTK_MESSAGE_INFO, + GTK_BUTTONS_CLOSE, "%s", intro_text); + g_signal_connect_swapped(GTK_OBJECT(dialog), "response", + G_CALLBACK(gtk_widget_destroy), + GTK_OBJECT(dialog)); + gtk_widget_show_all(dialog); } - -void on_about1_activate(GtkMenuItem * menuitem, gpointer user_data) +void on_about1_activate(GtkMenuItem *menuitem, gpointer user_data) { - GtkWidget *dialog; - const gchar *about_text = - _("gkc is copyright (c) 2002 Romain Lievin .\n" - "Based on the source code from Roman Zippel.\n"); - - dialog = gtk_message_dialog_new(GTK_WINDOW(main_wnd), - GTK_DIALOG_DESTROY_WITH_PARENT, - GTK_MESSAGE_INFO, - GTK_BUTTONS_CLOSE, "%s", about_text); - g_signal_connect_swapped(GTK_OBJECT(dialog), "response", - G_CALLBACK(gtk_widget_destroy), - GTK_OBJECT(dialog)); - gtk_widget_show_all(dialog); + GtkWidget *dialog; + const gchar *about_text = + _("gkc is copyright (c) 2002 Romain Lievin .\n" + "Based on the source code from Roman Zippel.\n"); + + dialog = gtk_message_dialog_new( + GTK_WINDOW(main_wnd), GTK_DIALOG_DESTROY_WITH_PARENT, GTK_MESSAGE_INFO, + GTK_BUTTONS_CLOSE, "%s", about_text); + g_signal_connect_swapped(GTK_OBJECT(dialog), "response", + G_CALLBACK(gtk_widget_destroy), + GTK_OBJECT(dialog)); + gtk_widget_show_all(dialog); } - -void on_license1_activate(GtkMenuItem * menuitem, gpointer user_data) +void on_license1_activate(GtkMenuItem *menuitem, gpointer user_data) { - GtkWidget *dialog; - const gchar *license_text = - _("gkc is released under the terms of the GNU GPL v2.\n" - "For more information, please see the source code or\n" - "visit http://www.fsf.org/licenses/licenses.html\n"); - - dialog = gtk_message_dialog_new(GTK_WINDOW(main_wnd), - GTK_DIALOG_DESTROY_WITH_PARENT, - GTK_MESSAGE_INFO, - GTK_BUTTONS_CLOSE, "%s", license_text); - g_signal_connect_swapped(GTK_OBJECT(dialog), "response", - G_CALLBACK(gtk_widget_destroy), - GTK_OBJECT(dialog)); - gtk_widget_show_all(dialog); + GtkWidget *dialog; + const gchar *license_text = + _("gkc is released under the terms of the GNU GPL v2.\n" + "For more information, please see the source code or\n" + "visit http://www.fsf.org/licenses/licenses.html\n"); + + dialog = gtk_message_dialog_new( + GTK_WINDOW(main_wnd), GTK_DIALOG_DESTROY_WITH_PARENT, GTK_MESSAGE_INFO, + GTK_BUTTONS_CLOSE, "%s", license_text); + g_signal_connect_swapped(GTK_OBJECT(dialog), "response", + G_CALLBACK(gtk_widget_destroy), + GTK_OBJECT(dialog)); + gtk_widget_show_all(dialog); } - -void on_back_clicked(GtkButton * button, gpointer user_data) +void on_back_clicked(GtkButton *button, gpointer user_data) { - enum prop_type ptype; + enum prop_type ptype; - current = current->parent; - ptype = current->prompt ? current->prompt->type : P_UNKNOWN; - if (ptype != P_MENU) - current = current->parent; - display_tree_part(); + current = current->parent; + ptype = current->prompt ? current->prompt->type : P_UNKNOWN; + if ( ptype != P_MENU ) + current = current->parent; + display_tree_part(); - if (current == &rootmenu) - gtk_widget_set_sensitive(back_btn, FALSE); + if ( current == &rootmenu ) + gtk_widget_set_sensitive(back_btn, FALSE); } - -void on_load_clicked(GtkButton * button, gpointer user_data) +void on_load_clicked(GtkButton *button, gpointer user_data) { - on_load1_activate(NULL, user_data); + on_load1_activate(NULL, user_data); } - -void on_single_clicked(GtkButton * button, gpointer user_data) +void on_single_clicked(GtkButton *button, gpointer user_data) { - view_mode = SINGLE_VIEW; - gtk_widget_hide(tree1_w); - current = &rootmenu; - display_tree_part(); + view_mode = SINGLE_VIEW; + gtk_widget_hide(tree1_w); + current = &rootmenu; + display_tree_part(); } - -void on_split_clicked(GtkButton * button, gpointer user_data) +void on_split_clicked(GtkButton *button, gpointer user_data) { - gint w, h; - view_mode = SPLIT_VIEW; - gtk_widget_show(tree1_w); - gtk_window_get_default_size(GTK_WINDOW(main_wnd), &w, &h); - gtk_paned_set_position(GTK_PANED(hpaned), w / 2); - if (tree2) - gtk_tree_store_clear(tree2); - display_list(); - - /* Disable back btn, like in full mode. */ - gtk_widget_set_sensitive(back_btn, FALSE); + gint w, h; + view_mode = SPLIT_VIEW; + gtk_widget_show(tree1_w); + gtk_window_get_default_size(GTK_WINDOW(main_wnd), &w, &h); + gtk_paned_set_position(GTK_PANED(hpaned), w / 2); + if ( tree2 ) + gtk_tree_store_clear(tree2); + display_list(); + + /* Disable back btn, like in full mode. */ + gtk_widget_set_sensitive(back_btn, FALSE); } - -void on_full_clicked(GtkButton * button, gpointer user_data) +void on_full_clicked(GtkButton *button, gpointer user_data) { - view_mode = FULL_VIEW; - gtk_widget_hide(tree1_w); - if (tree2) - gtk_tree_store_clear(tree2); - display_tree(&rootmenu); - gtk_widget_set_sensitive(back_btn, FALSE); + view_mode = FULL_VIEW; + gtk_widget_hide(tree1_w); + if ( tree2 ) + gtk_tree_store_clear(tree2); + display_tree(&rootmenu); + gtk_widget_set_sensitive(back_btn, FALSE); } - -void on_collapse_clicked(GtkButton * button, gpointer user_data) +void on_collapse_clicked(GtkButton *button, gpointer user_data) { - gtk_tree_view_collapse_all(GTK_TREE_VIEW(tree2_w)); + gtk_tree_view_collapse_all(GTK_TREE_VIEW(tree2_w)); } - -void on_expand_clicked(GtkButton * button, gpointer user_data) +void on_expand_clicked(GtkButton *button, gpointer user_data) { - gtk_tree_view_expand_all(GTK_TREE_VIEW(tree2_w)); + gtk_tree_view_expand_all(GTK_TREE_VIEW(tree2_w)); } - /* CTree Callbacks */ /* Change hex/int/string value in the cell */ -static void renderer_edited(GtkCellRendererText * cell, - const gchar * path_string, - const gchar * new_text, gpointer user_data) +static void renderer_edited(GtkCellRendererText *cell, const gchar *path_string, + const gchar *new_text, gpointer user_data) { - GtkTreePath *path = gtk_tree_path_new_from_string(path_string); - GtkTreeIter iter; - const char *old_def, *new_def; - struct menu *menu; - struct symbol *sym; + GtkTreePath *path = gtk_tree_path_new_from_string(path_string); + GtkTreeIter iter; + const char *old_def, *new_def; + struct menu *menu; + struct symbol *sym; - if (!gtk_tree_model_get_iter(model2, &iter, path)) - return; + if ( !gtk_tree_model_get_iter(model2, &iter, path) ) + return; - gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); - sym = menu->sym; + gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); + sym = menu->sym; - gtk_tree_model_get(model2, &iter, COL_VALUE, &old_def, -1); - new_def = new_text; + gtk_tree_model_get(model2, &iter, COL_VALUE, &old_def, -1); + new_def = new_text; - sym_set_string_value(sym, new_def); + sym_set_string_value(sym, new_def); - update_tree(&rootmenu, NULL); + update_tree(&rootmenu, NULL); - gtk_tree_path_free(path); + gtk_tree_path_free(path); } /* Change the value of a symbol and update the tree */ static void change_sym_value(struct menu *menu, gint col) { - struct symbol *sym = menu->sym; - tristate newval; - - if (!sym) - return; - - if (col == COL_NO) - newval = no; - else if (col == COL_MOD) - newval = mod; - else if (col == COL_YES) - newval = yes; - else - return; - - switch (sym_get_type(sym)) { - case S_BOOLEAN: - case S_TRISTATE: - if (!sym_tristate_within_range(sym, newval)) - newval = yes; - sym_set_tristate_value(sym, newval); - if (view_mode == FULL_VIEW) - update_tree(&rootmenu, NULL); - else if (view_mode == SPLIT_VIEW) { - update_tree(browsed, NULL); - display_list(); - } - else if (view_mode == SINGLE_VIEW) - display_tree_part(); //fixme: keep exp/coll - break; - case S_INT: - case S_HEX: - case S_STRING: - default: - break; - } + struct symbol *sym = menu->sym; + tristate newval; + + if ( !sym ) + return; + + if ( col == COL_NO ) + newval = no; + else if ( col == COL_MOD ) + newval = mod; + else if ( col == COL_YES ) + newval = yes; + else + return; + + switch (sym_get_type(sym)) + { + case S_BOOLEAN: + case S_TRISTATE: + if ( !sym_tristate_within_range(sym, newval) ) + newval = yes; + sym_set_tristate_value(sym, newval); + if ( view_mode == FULL_VIEW ) + update_tree(&rootmenu, NULL); + else if ( view_mode == SPLIT_VIEW ) + { + update_tree(browsed, NULL); + display_list(); + } + else if ( view_mode == SINGLE_VIEW ) + display_tree_part(); // fixme: keep exp/coll + break; + case S_INT: + case S_HEX: + case S_STRING: + default: + break; + } } static void toggle_sym_value(struct menu *menu) { - if (!menu->sym) - return; - - sym_toggle_tristate_value(menu->sym); - if (view_mode == FULL_VIEW) - update_tree(&rootmenu, NULL); - else if (view_mode == SPLIT_VIEW) { - update_tree(browsed, NULL); - display_list(); - } - else if (view_mode == SINGLE_VIEW) - display_tree_part(); //fixme: keep exp/coll + if ( !menu->sym ) + return; + + sym_toggle_tristate_value(menu->sym); + if ( view_mode == FULL_VIEW ) + update_tree(&rootmenu, NULL); + else if ( view_mode == SPLIT_VIEW ) + { + update_tree(browsed, NULL); + display_list(); + } + else if ( view_mode == SINGLE_VIEW ) + display_tree_part(); // fixme: keep exp/coll } -static gint column2index(GtkTreeViewColumn * column) +static gint column2index(GtkTreeViewColumn *column) { - gint i; + gint i; - for (i = 0; i < COL_NUMBER; i++) { - GtkTreeViewColumn *col; + for ( i = 0; i < COL_NUMBER; i++ ) + { + GtkTreeViewColumn *col; - col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), i); - if (col == column) - return i; - } + col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), i); + if ( col == column ) + return i; + } - return -1; + return -1; } - /* User click: update choice (full) or goes down (single) */ -gboolean -on_treeview2_button_press_event(GtkWidget * widget, - GdkEventButton * event, gpointer user_data) +gboolean on_treeview2_button_press_event(GtkWidget *widget, + GdkEventButton *event, + gpointer user_data) { - GtkTreeView *view = GTK_TREE_VIEW(widget); - GtkTreePath *path; - GtkTreeViewColumn *column; - GtkTreeIter iter; - struct menu *menu; - gint col; - -#if GTK_CHECK_VERSION(2,1,4) // bug in ctree with earlier version of GTK - gint tx = (gint) event->x; - gint ty = (gint) event->y; - gint cx, cy; - - gtk_tree_view_get_path_at_pos(view, tx, ty, &path, &column, &cx, - &cy); + GtkTreeView *view = GTK_TREE_VIEW(widget); + GtkTreePath *path; + GtkTreeViewColumn *column; + GtkTreeIter iter; + struct menu *menu; + gint col; + +#if GTK_CHECK_VERSION(2, 1, 4) // bug in ctree with earlier version of GTK + gint tx = (gint)event->x; + gint ty = (gint)event->y; + gint cx, cy; + + gtk_tree_view_get_path_at_pos(view, tx, ty, &path, &column, &cx, &cy); #else - gtk_tree_view_get_cursor(view, &path, &column); + gtk_tree_view_get_cursor(view, &path, &column); #endif - if (path == NULL) - return FALSE; - - if (!gtk_tree_model_get_iter(model2, &iter, path)) - return FALSE; - gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); - - col = column2index(column); - if (event->type == GDK_2BUTTON_PRESS) { - enum prop_type ptype; - ptype = menu->prompt ? menu->prompt->type : P_UNKNOWN; - - if (ptype == P_MENU && view_mode != FULL_VIEW && col == COL_OPTION) { - // goes down into menu - current = menu; - display_tree_part(); - gtk_widget_set_sensitive(back_btn, TRUE); - } else if ((col == COL_OPTION)) { - toggle_sym_value(menu); - gtk_tree_view_expand_row(view, path, TRUE); - } - } else { - if (col == COL_VALUE) { - toggle_sym_value(menu); - gtk_tree_view_expand_row(view, path, TRUE); - } else if (col == COL_NO || col == COL_MOD - || col == COL_YES) { - change_sym_value(menu, col); - gtk_tree_view_expand_row(view, path, TRUE); - } - } - - return FALSE; + if ( path == NULL ) + return FALSE; + + if ( !gtk_tree_model_get_iter(model2, &iter, path) ) + return FALSE; + gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); + + col = column2index(column); + if ( event->type == GDK_2BUTTON_PRESS ) + { + enum prop_type ptype; + ptype = menu->prompt ? menu->prompt->type : P_UNKNOWN; + + if ( ptype == P_MENU && view_mode != FULL_VIEW && col == COL_OPTION ) + { + // goes down into menu + current = menu; + display_tree_part(); + gtk_widget_set_sensitive(back_btn, TRUE); + } + else if ( (col == COL_OPTION) ) + { + toggle_sym_value(menu); + gtk_tree_view_expand_row(view, path, TRUE); + } + } + else + { + if ( col == COL_VALUE ) + { + toggle_sym_value(menu); + gtk_tree_view_expand_row(view, path, TRUE); + } + else if ( col == COL_NO || col == COL_MOD || col == COL_YES ) + { + change_sym_value(menu, col); + gtk_tree_view_expand_row(view, path, TRUE); + } + } + + return FALSE; } /* Key pressed: update choice */ -gboolean -on_treeview2_key_press_event(GtkWidget * widget, - GdkEventKey * event, gpointer user_data) +gboolean on_treeview2_key_press_event(GtkWidget *widget, GdkEventKey *event, + gpointer user_data) { - GtkTreeView *view = GTK_TREE_VIEW(widget); - GtkTreePath *path; - GtkTreeViewColumn *column; - GtkTreeIter iter; - struct menu *menu; - gint col; - - gtk_tree_view_get_cursor(view, &path, &column); - if (path == NULL) - return FALSE; - - if (event->keyval == GDK_space) { - if (gtk_tree_view_row_expanded(view, path)) - gtk_tree_view_collapse_row(view, path); - else - gtk_tree_view_expand_row(view, path, FALSE); - return TRUE; - } - if (event->keyval == GDK_KP_Enter) { - } - if (widget == tree1_w) - return FALSE; - - gtk_tree_model_get_iter(model2, &iter, path); - gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); - - if (!strcasecmp(event->string, "n")) - col = COL_NO; - else if (!strcasecmp(event->string, "m")) - col = COL_MOD; - else if (!strcasecmp(event->string, "y")) - col = COL_YES; - else - col = -1; - change_sym_value(menu, col); - - return FALSE; + GtkTreeView *view = GTK_TREE_VIEW(widget); + GtkTreePath *path; + GtkTreeViewColumn *column; + GtkTreeIter iter; + struct menu *menu; + gint col; + + gtk_tree_view_get_cursor(view, &path, &column); + if ( path == NULL ) + return FALSE; + + if ( event->keyval == GDK_space ) + { + if ( gtk_tree_view_row_expanded(view, path) ) + gtk_tree_view_collapse_row(view, path); + else + gtk_tree_view_expand_row(view, path, FALSE); + return TRUE; + } + if ( event->keyval == GDK_KP_Enter ) + { + } + if ( widget == tree1_w ) + return FALSE; + + gtk_tree_model_get_iter(model2, &iter, path); + gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); + + if ( !strcasecmp(event->string, "n") ) + col = COL_NO; + else if ( !strcasecmp(event->string, "m") ) + col = COL_MOD; + else if ( !strcasecmp(event->string, "y") ) + col = COL_YES; + else + col = -1; + change_sym_value(menu, col); + + return FALSE; } - /* Row selection changed: update help */ -void -on_treeview2_cursor_changed(GtkTreeView * treeview, gpointer user_data) +void on_treeview2_cursor_changed(GtkTreeView *treeview, gpointer user_data) { - GtkTreeSelection *selection; - GtkTreeIter iter; - struct menu *menu; - - selection = gtk_tree_view_get_selection(treeview); - if (gtk_tree_selection_get_selected(selection, &model2, &iter)) { - gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); - text_insert_help(menu); - } + GtkTreeSelection *selection; + GtkTreeIter iter; + struct menu *menu; + + selection = gtk_tree_view_get_selection(treeview); + if ( gtk_tree_selection_get_selected(selection, &model2, &iter) ) + { + gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); + text_insert_help(menu); + } } - /* User click: display sub-tree in the right frame. */ -gboolean -on_treeview1_button_press_event(GtkWidget * widget, - GdkEventButton * event, gpointer user_data) +gboolean on_treeview1_button_press_event(GtkWidget *widget, + GdkEventButton *event, + gpointer user_data) { - GtkTreeView *view = GTK_TREE_VIEW(widget); - GtkTreePath *path; - GtkTreeViewColumn *column; - GtkTreeIter iter; - struct menu *menu; - - gint tx = (gint) event->x; - gint ty = (gint) event->y; - gint cx, cy; - - gtk_tree_view_get_path_at_pos(view, tx, ty, &path, &column, &cx, - &cy); - if (path == NULL) - return FALSE; - - gtk_tree_model_get_iter(model1, &iter, path); - gtk_tree_model_get(model1, &iter, COL_MENU, &menu, -1); - - if (event->type == GDK_2BUTTON_PRESS) { - toggle_sym_value(menu); - current = menu; - display_tree_part(); - } else { - browsed = menu; - display_tree_part(); - } - - gtk_widget_realize(tree2_w); - gtk_tree_view_set_cursor(view, path, NULL, FALSE); - gtk_widget_grab_focus(tree2_w); - - return FALSE; + GtkTreeView *view = GTK_TREE_VIEW(widget); + GtkTreePath *path; + GtkTreeViewColumn *column; + GtkTreeIter iter; + struct menu *menu; + + gint tx = (gint)event->x; + gint ty = (gint)event->y; + gint cx, cy; + + gtk_tree_view_get_path_at_pos(view, tx, ty, &path, &column, &cx, &cy); + if ( path == NULL ) + return FALSE; + + gtk_tree_model_get_iter(model1, &iter, path); + gtk_tree_model_get(model1, &iter, COL_MENU, &menu, -1); + + if ( event->type == GDK_2BUTTON_PRESS ) + { + toggle_sym_value(menu); + current = menu; + display_tree_part(); + } + else + { + browsed = menu; + display_tree_part(); + } + + gtk_widget_realize(tree2_w); + gtk_tree_view_set_cursor(view, path, NULL, FALSE); + gtk_widget_grab_focus(tree2_w); + + return FALSE; } - /* Fill a row of strings */ static gchar **fill_row(struct menu *menu) { - static gchar *row[COL_NUMBER]; - struct symbol *sym = menu->sym; - const char *def; - int stype; - tristate val; - enum prop_type ptype; - int i; - - for (i = COL_OPTION; i <= COL_COLOR; i++) - g_free(row[i]); - bzero(row, sizeof(row)); - - row[COL_OPTION] = - g_strdup_printf("%s %s", _(menu_get_prompt(menu)), - sym && !sym_has_value(sym) ? "(NEW)" : ""); - - if (opt_mode == OPT_ALL && !menu_is_visible(menu)) - row[COL_COLOR] = g_strdup("DarkGray"); - else if (opt_mode == OPT_PROMPT && - menu_has_prompt(menu) && !menu_is_visible(menu)) - row[COL_COLOR] = g_strdup("DarkGray"); - else - row[COL_COLOR] = g_strdup("Black"); - - ptype = menu->prompt ? menu->prompt->type : P_UNKNOWN; - switch (ptype) { - case P_MENU: - row[COL_PIXBUF] = (gchar *) xpm_menu; - if (view_mode == SINGLE_VIEW) - row[COL_PIXVIS] = GINT_TO_POINTER(TRUE); - row[COL_BTNVIS] = GINT_TO_POINTER(FALSE); - break; - case P_COMMENT: - row[COL_PIXBUF] = (gchar *) xpm_void; - row[COL_PIXVIS] = GINT_TO_POINTER(FALSE); - row[COL_BTNVIS] = GINT_TO_POINTER(FALSE); - break; - default: - row[COL_PIXBUF] = (gchar *) xpm_void; - row[COL_PIXVIS] = GINT_TO_POINTER(FALSE); - row[COL_BTNVIS] = GINT_TO_POINTER(TRUE); - break; - } - - if (!sym) - return row; - row[COL_NAME] = g_strdup(sym->name); - - sym_calc_value(sym); - sym->flags &= ~SYMBOL_CHANGED; - - if (sym_is_choice(sym)) { // parse childs for getting final value - struct menu *child; - struct symbol *def_sym = sym_get_choice_value(sym); - struct menu *def_menu = NULL; - - row[COL_BTNVIS] = GINT_TO_POINTER(FALSE); - - for (child = menu->list; child; child = child->next) { - if (menu_is_visible(child) - && child->sym == def_sym) - def_menu = child; - } - - if (def_menu) - row[COL_VALUE] = - g_strdup(_(menu_get_prompt(def_menu))); - } - if (sym->flags & SYMBOL_CHOICEVAL) - row[COL_BTNRAD] = GINT_TO_POINTER(TRUE); - - stype = sym_get_type(sym); - switch (stype) { - case S_BOOLEAN: - if (GPOINTER_TO_INT(row[COL_PIXVIS]) == FALSE) - row[COL_BTNVIS] = GINT_TO_POINTER(TRUE); - if (sym_is_choice(sym)) - break; - /* fall through */ - case S_TRISTATE: - val = sym_get_tristate_value(sym); - switch (val) { - case no: - row[COL_NO] = g_strdup("N"); - row[COL_VALUE] = g_strdup("N"); - row[COL_BTNACT] = GINT_TO_POINTER(FALSE); - row[COL_BTNINC] = GINT_TO_POINTER(FALSE); - break; - case mod: - row[COL_MOD] = g_strdup("M"); - row[COL_VALUE] = g_strdup("M"); - row[COL_BTNINC] = GINT_TO_POINTER(TRUE); - break; - case yes: - row[COL_YES] = g_strdup("Y"); - row[COL_VALUE] = g_strdup("Y"); - row[COL_BTNACT] = GINT_TO_POINTER(TRUE); - row[COL_BTNINC] = GINT_TO_POINTER(FALSE); - break; - } - - if (val != no && sym_tristate_within_range(sym, no)) - row[COL_NO] = g_strdup("_"); - if (val != mod && sym_tristate_within_range(sym, mod)) - row[COL_MOD] = g_strdup("_"); - if (val != yes && sym_tristate_within_range(sym, yes)) - row[COL_YES] = g_strdup("_"); - break; - case S_INT: - case S_HEX: - case S_STRING: - def = sym_get_string_value(sym); - row[COL_VALUE] = g_strdup(def); - row[COL_EDIT] = GINT_TO_POINTER(TRUE); - row[COL_BTNVIS] = GINT_TO_POINTER(FALSE); - break; - } - - return row; + static gchar *row[COL_NUMBER]; + struct symbol *sym = menu->sym; + const char *def; + int stype; + tristate val; + enum prop_type ptype; + int i; + + for ( i = COL_OPTION; i <= COL_COLOR; i++ ) + g_free(row[i]); + bzero(row, sizeof(row)); + + row[COL_OPTION] = + g_strdup_printf("%s %s", _(menu_get_prompt(menu)), + sym && !sym_has_value(sym) ? "(NEW)" : ""); + + if ( opt_mode == OPT_ALL && !menu_is_visible(menu) ) + row[COL_COLOR] = g_strdup("DarkGray"); + else if ( opt_mode == OPT_PROMPT && menu_has_prompt(menu) && + !menu_is_visible(menu) ) + row[COL_COLOR] = g_strdup("DarkGray"); + else + row[COL_COLOR] = g_strdup("Black"); + + ptype = menu->prompt ? menu->prompt->type : P_UNKNOWN; + switch (ptype) + { + case P_MENU: + row[COL_PIXBUF] = (gchar *)xpm_menu; + if ( view_mode == SINGLE_VIEW ) + row[COL_PIXVIS] = GINT_TO_POINTER(TRUE); + row[COL_BTNVIS] = GINT_TO_POINTER(FALSE); + break; + case P_COMMENT: + row[COL_PIXBUF] = (gchar *)xpm_void; + row[COL_PIXVIS] = GINT_TO_POINTER(FALSE); + row[COL_BTNVIS] = GINT_TO_POINTER(FALSE); + break; + default: + row[COL_PIXBUF] = (gchar *)xpm_void; + row[COL_PIXVIS] = GINT_TO_POINTER(FALSE); + row[COL_BTNVIS] = GINT_TO_POINTER(TRUE); + break; + } + + if ( !sym ) + return row; + row[COL_NAME] = g_strdup(sym->name); + + sym_calc_value(sym); + sym->flags &= ~SYMBOL_CHANGED; + + if ( sym_is_choice(sym) ) + { // parse childs for getting final value + struct menu *child; + struct symbol *def_sym = sym_get_choice_value(sym); + struct menu *def_menu = NULL; + + row[COL_BTNVIS] = GINT_TO_POINTER(FALSE); + + for ( child = menu->list; child; child = child->next ) + { + if ( menu_is_visible(child) && child->sym == def_sym ) + def_menu = child; + } + + if ( def_menu ) + row[COL_VALUE] = g_strdup(_(menu_get_prompt(def_menu))); + } + if ( sym->flags & SYMBOL_CHOICEVAL ) + row[COL_BTNRAD] = GINT_TO_POINTER(TRUE); + + stype = sym_get_type(sym); + switch (stype) + { + case S_BOOLEAN: + if ( GPOINTER_TO_INT(row[COL_PIXVIS]) == FALSE ) + row[COL_BTNVIS] = GINT_TO_POINTER(TRUE); + if ( sym_is_choice(sym) ) + break; + /* fall through */ + case S_TRISTATE: + val = sym_get_tristate_value(sym); + switch (val) + { + case no: + row[COL_NO] = g_strdup("N"); + row[COL_VALUE] = g_strdup("N"); + row[COL_BTNACT] = GINT_TO_POINTER(FALSE); + row[COL_BTNINC] = GINT_TO_POINTER(FALSE); + break; + case mod: + row[COL_MOD] = g_strdup("M"); + row[COL_VALUE] = g_strdup("M"); + row[COL_BTNINC] = GINT_TO_POINTER(TRUE); + break; + case yes: + row[COL_YES] = g_strdup("Y"); + row[COL_VALUE] = g_strdup("Y"); + row[COL_BTNACT] = GINT_TO_POINTER(TRUE); + row[COL_BTNINC] = GINT_TO_POINTER(FALSE); + break; + } + + if ( val != no && sym_tristate_within_range(sym, no) ) + row[COL_NO] = g_strdup("_"); + if ( val != mod && sym_tristate_within_range(sym, mod) ) + row[COL_MOD] = g_strdup("_"); + if ( val != yes && sym_tristate_within_range(sym, yes) ) + row[COL_YES] = g_strdup("_"); + break; + case S_INT: + case S_HEX: + case S_STRING: + def = sym_get_string_value(sym); + row[COL_VALUE] = g_strdup(def); + row[COL_EDIT] = GINT_TO_POINTER(TRUE); + row[COL_BTNVIS] = GINT_TO_POINTER(FALSE); + break; + } + + return row; } - /* Set the node content with a row of strings */ -static void set_node(GtkTreeIter * node, struct menu *menu, gchar ** row) +static void set_node(GtkTreeIter *node, struct menu *menu, gchar **row) { - GdkColor color; - gboolean success; - GdkPixbuf *pix; - - pix = gdk_pixbuf_new_from_xpm_data((const char **) - row[COL_PIXBUF]); - - gdk_color_parse(row[COL_COLOR], &color); - gdk_colormap_alloc_colors(gdk_colormap_get_system(), &color, 1, - FALSE, FALSE, &success); - - gtk_tree_store_set(tree, node, - COL_OPTION, row[COL_OPTION], - COL_NAME, row[COL_NAME], - COL_NO, row[COL_NO], - COL_MOD, row[COL_MOD], - COL_YES, row[COL_YES], - COL_VALUE, row[COL_VALUE], - COL_MENU, (gpointer) menu, - COL_COLOR, &color, - COL_EDIT, GPOINTER_TO_INT(row[COL_EDIT]), - COL_PIXBUF, pix, - COL_PIXVIS, GPOINTER_TO_INT(row[COL_PIXVIS]), - COL_BTNVIS, GPOINTER_TO_INT(row[COL_BTNVIS]), - COL_BTNACT, GPOINTER_TO_INT(row[COL_BTNACT]), - COL_BTNINC, GPOINTER_TO_INT(row[COL_BTNINC]), - COL_BTNRAD, GPOINTER_TO_INT(row[COL_BTNRAD]), - -1); - - g_object_unref(pix); + GdkColor color; + gboolean success; + GdkPixbuf *pix; + + pix = gdk_pixbuf_new_from_xpm_data((const char **)row[COL_PIXBUF]); + + gdk_color_parse(row[COL_COLOR], &color); + gdk_colormap_alloc_colors(gdk_colormap_get_system(), &color, 1, FALSE, + FALSE, &success); + + gtk_tree_store_set( + tree, node, COL_OPTION, row[COL_OPTION], COL_NAME, row[COL_NAME], + COL_NO, row[COL_NO], COL_MOD, row[COL_MOD], COL_YES, row[COL_YES], + COL_VALUE, row[COL_VALUE], COL_MENU, (gpointer)menu, COL_COLOR, &color, + COL_EDIT, GPOINTER_TO_INT(row[COL_EDIT]), COL_PIXBUF, pix, COL_PIXVIS, + GPOINTER_TO_INT(row[COL_PIXVIS]), COL_BTNVIS, + GPOINTER_TO_INT(row[COL_BTNVIS]), COL_BTNACT, + GPOINTER_TO_INT(row[COL_BTNACT]), COL_BTNINC, + GPOINTER_TO_INT(row[COL_BTNINC]), COL_BTNRAD, + GPOINTER_TO_INT(row[COL_BTNRAD]), -1); + + g_object_unref(pix); } - /* Add a node to the tree */ static void place_node(struct menu *menu, char **row) { - GtkTreeIter *parent = parents[indent - 1]; - GtkTreeIter *node = parents[indent]; + GtkTreeIter *parent = parents[indent - 1]; + GtkTreeIter *node = parents[indent]; - gtk_tree_store_append(tree, node, parent); - set_node(node, menu, row); + gtk_tree_store_append(tree, node, parent); + set_node(node, menu, row); } - /* Find a node in the GTK+ tree */ static GtkTreeIter found; /* * Find a menu in the GtkTree starting at parent. */ -GtkTreeIter *gtktree_iter_find_node(GtkTreeIter * parent, - struct menu *tofind) +GtkTreeIter *gtktree_iter_find_node(GtkTreeIter *parent, struct menu *tofind) { - GtkTreeIter iter; - GtkTreeIter *child = &iter; - gboolean valid; - GtkTreeIter *ret; + GtkTreeIter iter; + GtkTreeIter *child = &iter; + gboolean valid; + GtkTreeIter *ret; - valid = gtk_tree_model_iter_children(model2, child, parent); - while (valid) { - struct menu *menu; + valid = gtk_tree_model_iter_children(model2, child, parent); + while ( valid ) + { + struct menu *menu; - gtk_tree_model_get(model2, child, 6, &menu, -1); + gtk_tree_model_get(model2, child, 6, &menu, -1); - if (menu == tofind) { - memcpy(&found, child, sizeof(GtkTreeIter)); - return &found; - } + if ( menu == tofind ) + { + memcpy(&found, child, sizeof(GtkTreeIter)); + return &found; + } - ret = gtktree_iter_find_node(child, tofind); - if (ret) - return ret; + ret = gtktree_iter_find_node(child, tofind); + if ( ret ) + return ret; - valid = gtk_tree_model_iter_next(model2, child); - } + valid = gtk_tree_model_iter_next(model2, child); + } - return NULL; + return NULL; } - /* * Update the tree by adding/removing entries * Does not change other nodes */ -static void update_tree(struct menu *src, GtkTreeIter * dst) +static void update_tree(struct menu *src, GtkTreeIter *dst) { - struct menu *child1; - GtkTreeIter iter, tmp; - GtkTreeIter *child2 = &iter; - gboolean valid; - GtkTreeIter *sibling; - struct symbol *sym; - struct menu *menu1, *menu2; - - if (src == &rootmenu) - indent = 1; - - valid = gtk_tree_model_iter_children(model2, child2, dst); - for (child1 = src->list; child1; child1 = child1->next) { - - sym = child1->sym; - - reparse: - menu1 = child1; - if (valid) - gtk_tree_model_get(model2, child2, COL_MENU, - &menu2, -1); - else - menu2 = NULL; // force adding of a first child + struct menu *child1; + GtkTreeIter iter, tmp; + GtkTreeIter *child2 = &iter; + gboolean valid; + GtkTreeIter *sibling; + struct symbol *sym; + struct menu *menu1, *menu2; + + if ( src == &rootmenu ) + indent = 1; + + valid = gtk_tree_model_iter_children(model2, child2, dst); + for ( child1 = src->list; child1; child1 = child1->next ) + { + sym = child1->sym; + + reparse: + menu1 = child1; + if ( valid ) + gtk_tree_model_get(model2, child2, COL_MENU, &menu2, -1); + else + menu2 = NULL; // force adding of a first child #ifdef DEBUG - printf("%*c%s | %s\n", indent, ' ', - menu1 ? menu_get_prompt(menu1) : "nil", - menu2 ? menu_get_prompt(menu2) : "nil"); + printf("%*c%s | %s\n", indent, ' ', + menu1 ? menu_get_prompt(menu1) : "nil", + menu2 ? menu_get_prompt(menu2) : "nil"); #endif - if ((opt_mode == OPT_NORMAL && !menu_is_visible(child1)) || - (opt_mode == OPT_PROMPT && !menu_has_prompt(child1)) || - (opt_mode == OPT_ALL && !menu_get_prompt(child1))) { - - /* remove node */ - if (gtktree_iter_find_node(dst, menu1) != NULL) { - memcpy(&tmp, child2, sizeof(GtkTreeIter)); - valid = gtk_tree_model_iter_next(model2, - child2); - gtk_tree_store_remove(tree2, &tmp); - if (!valid) - return; /* next parent */ - else - goto reparse; /* next child */ - } else - continue; - } - - if (menu1 != menu2) { - if (gtktree_iter_find_node(dst, menu1) == NULL) { // add node - if (!valid && !menu2) - sibling = NULL; - else - sibling = child2; - gtk_tree_store_insert_before(tree2, - child2, - dst, sibling); - set_node(child2, menu1, fill_row(menu1)); - if (menu2 == NULL) - valid = TRUE; - } else { // remove node - memcpy(&tmp, child2, sizeof(GtkTreeIter)); - valid = gtk_tree_model_iter_next(model2, - child2); - gtk_tree_store_remove(tree2, &tmp); - if (!valid) - return; // next parent - else - goto reparse; // next child - } - } else if (sym && (sym->flags & SYMBOL_CHANGED)) { - set_node(child2, menu1, fill_row(menu1)); - } - - indent++; - update_tree(child1, child2); - indent--; - - valid = gtk_tree_model_iter_next(model2, child2); - } + if ( (opt_mode == OPT_NORMAL && !menu_is_visible(child1)) || + (opt_mode == OPT_PROMPT && !menu_has_prompt(child1)) || + (opt_mode == OPT_ALL && !menu_get_prompt(child1)) ) + { + /* remove node */ + if ( gtktree_iter_find_node(dst, menu1) != NULL ) + { + memcpy(&tmp, child2, sizeof(GtkTreeIter)); + valid = gtk_tree_model_iter_next(model2, child2); + gtk_tree_store_remove(tree2, &tmp); + if ( !valid ) + return; /* next parent */ + else + goto reparse; /* next child */ + } + else + continue; + } + + if ( menu1 != menu2 ) + { + if ( gtktree_iter_find_node(dst, menu1) == NULL ) + { // add node + if ( !valid && !menu2 ) + sibling = NULL; + else + sibling = child2; + gtk_tree_store_insert_before(tree2, child2, dst, sibling); + set_node(child2, menu1, fill_row(menu1)); + if ( menu2 == NULL ) + valid = TRUE; + } + else + { // remove node + memcpy(&tmp, child2, sizeof(GtkTreeIter)); + valid = gtk_tree_model_iter_next(model2, child2); + gtk_tree_store_remove(tree2, &tmp); + if ( !valid ) + return; // next parent + else + goto reparse; // next child + } + } + else if ( sym && (sym->flags & SYMBOL_CHANGED) ) + { + set_node(child2, menu1, fill_row(menu1)); + } + + indent++; + update_tree(child1, child2); + indent--; + + valid = gtk_tree_model_iter_next(model2, child2); + } } - /* Display the whole tree (single/split/full view) */ static void display_tree(struct menu *menu) { - struct symbol *sym; - struct property *prop; - struct menu *child; - enum prop_type ptype; - - if (menu == &rootmenu) { - indent = 1; - current = &rootmenu; - } - - for (child = menu->list; child; child = child->next) { - prop = child->prompt; - sym = child->sym; - ptype = prop ? prop->type : P_UNKNOWN; - - if (sym) - sym->flags &= ~SYMBOL_CHANGED; - - if ((view_mode == SPLIT_VIEW) - && !(child->flags & MENU_ROOT) && (tree == tree1)) - continue; - - if ((view_mode == SPLIT_VIEW) && (child->flags & MENU_ROOT) - && (tree == tree2)) - continue; - - if ((opt_mode == OPT_NORMAL && menu_is_visible(child)) || - (opt_mode == OPT_PROMPT && menu_has_prompt(child)) || - (opt_mode == OPT_ALL && menu_get_prompt(child))) - place_node(child, fill_row(child)); + struct symbol *sym; + struct property *prop; + struct menu *child; + enum prop_type ptype; + + if ( menu == &rootmenu ) + { + indent = 1; + current = &rootmenu; + } + + for ( child = menu->list; child; child = child->next ) + { + prop = child->prompt; + sym = child->sym; + ptype = prop ? prop->type : P_UNKNOWN; + + if ( sym ) + sym->flags &= ~SYMBOL_CHANGED; + + if ( (view_mode == SPLIT_VIEW) && !(child->flags & MENU_ROOT) && + (tree == tree1) ) + continue; + + if ( (view_mode == SPLIT_VIEW) && (child->flags & MENU_ROOT) && + (tree == tree2) ) + continue; + + if ( (opt_mode == OPT_NORMAL && menu_is_visible(child)) || + (opt_mode == OPT_PROMPT && menu_has_prompt(child)) || + (opt_mode == OPT_ALL && menu_get_prompt(child)) ) + place_node(child, fill_row(child)); #ifdef DEBUG - printf("%*c%s: ", indent, ' ', menu_get_prompt(child)); - printf("%s", child->flags & MENU_ROOT ? "rootmenu | " : ""); - printf("%s", prop_get_type_name(ptype)); - printf(" | "); - if (sym) { - printf("%s", sym_type_name(sym->type)); - printf(" | "); - printf("%s", dbg_sym_flags(sym->flags)); - printf("\n"); - } else - printf("\n"); + printf("%*c%s: ", indent, ' ', menu_get_prompt(child)); + printf("%s", child->flags & MENU_ROOT ? "rootmenu | " : ""); + printf("%s", prop_get_type_name(ptype)); + printf(" | "); + if ( sym ) + { + printf("%s", sym_type_name(sym->type)); + printf(" | "); + printf("%s", dbg_sym_flags(sym->flags)); + printf("\n"); + } + else + printf("\n"); #endif - if ((view_mode != FULL_VIEW) && (ptype == P_MENU) - && (tree == tree2)) - continue; -/* - if (((menu != &rootmenu) && !(menu->flags & MENU_ROOT)) - || (view_mode == FULL_VIEW) - || (view_mode == SPLIT_VIEW))*/ - - /* Change paned position if the view is not in 'split mode' */ - if (view_mode == SINGLE_VIEW || view_mode == FULL_VIEW) { - gtk_paned_set_position(GTK_PANED(hpaned), 0); - } - - if (((view_mode == SINGLE_VIEW) && (menu->flags & MENU_ROOT)) - || (view_mode == FULL_VIEW) - || (view_mode == SPLIT_VIEW)) { - indent++; - display_tree(child); - indent--; - } - } + if ( (view_mode != FULL_VIEW) && (ptype == P_MENU) && (tree == tree2) ) + continue; + /* + if (((menu != &rootmenu) && !(menu->flags & MENU_ROOT)) + || (view_mode == FULL_VIEW) + || (view_mode == SPLIT_VIEW))*/ + + /* Change paned position if the view is not in 'split mode' */ + if ( view_mode == SINGLE_VIEW || view_mode == FULL_VIEW ) + { + gtk_paned_set_position(GTK_PANED(hpaned), 0); + } + + if ( ((view_mode == SINGLE_VIEW) && (menu->flags & MENU_ROOT)) || + (view_mode == FULL_VIEW) || (view_mode == SPLIT_VIEW) ) + { + indent++; + display_tree(child); + indent--; + } + } } /* Display a part of the tree starting at current node (single/split view) */ static void display_tree_part(void) { - if (tree2) - gtk_tree_store_clear(tree2); - if (view_mode == SINGLE_VIEW) - display_tree(current); - else if (view_mode == SPLIT_VIEW) - display_tree(browsed); - gtk_tree_view_expand_all(GTK_TREE_VIEW(tree2_w)); + if ( tree2 ) + gtk_tree_store_clear(tree2); + if ( view_mode == SINGLE_VIEW ) + display_tree(current); + else if ( view_mode == SPLIT_VIEW ) + display_tree(browsed); + gtk_tree_view_expand_all(GTK_TREE_VIEW(tree2_w)); } /* Display the list in the left frame (split view) */ static void display_list(void) { - if (tree1) - gtk_tree_store_clear(tree1); + if ( tree1 ) + gtk_tree_store_clear(tree1); - tree = tree1; - display_tree(&rootmenu); - gtk_tree_view_expand_all(GTK_TREE_VIEW(tree1_w)); - tree = tree2; + tree = tree1; + display_tree(&rootmenu); + gtk_tree_view_expand_all(GTK_TREE_VIEW(tree1_w)); + tree = tree2; } void fixup_rootmenu(struct menu *menu) { - struct menu *child; - static int menu_cnt = 0; - - menu->flags |= MENU_ROOT; - for (child = menu->list; child; child = child->next) { - if (child->prompt && child->prompt->type == P_MENU) { - menu_cnt++; - fixup_rootmenu(child); - menu_cnt--; - } else if (!menu_cnt) - fixup_rootmenu(child); - } + struct menu *child; + static int menu_cnt = 0; + + menu->flags |= MENU_ROOT; + for ( child = menu->list; child; child = child->next ) + { + if ( child->prompt && child->prompt->type == P_MENU ) + { + menu_cnt++; + fixup_rootmenu(child); + menu_cnt--; + } + else if ( !menu_cnt ) + fixup_rootmenu(child); + } } - /* Main */ int main(int ac, char *av[]) { - const char *name; - char *env; - gchar *glade_file; - - bindtextdomain(PACKAGE, LOCALEDIR); - bind_textdomain_codeset(PACKAGE, "UTF-8"); - textdomain(PACKAGE); - - /* GTK stuffs */ - gtk_set_locale(); - gtk_init(&ac, &av); - glade_init(); - - //add_pixmap_directory (PACKAGE_DATA_DIR "/" PACKAGE "/pixmaps"); - //add_pixmap_directory (PACKAGE_SOURCE_DIR "/pixmaps"); - - /* Determine GUI path */ - env = getenv(SRCTREE); - if (env) - glade_file = g_strconcat(env, "/scripts/kconfig/gconf.glade", NULL); - else if (av[0][0] == '/') - glade_file = g_strconcat(av[0], ".glade", NULL); - else - glade_file = g_strconcat(g_get_current_dir(), "/", av[0], ".glade", NULL); - - /* Conf stuffs */ - if (ac > 1 && av[1][0] == '-') { - switch (av[1][1]) { - case 'a': - //showAll = 1; - break; - case 's': - conf_set_message_callback(NULL); - break; - case 'h': - case '?': - printf("%s [-s] \n", av[0]); - exit(0); - } - name = av[2]; - } else - name = av[1]; - - conf_parse(name); - fixup_rootmenu(&rootmenu); - conf_read(NULL); - - /* Load the interface and connect signals */ - init_main_window(glade_file); - init_tree_model(); - init_left_tree(); - init_right_tree(); - - switch (view_mode) { - case SINGLE_VIEW: - display_tree_part(); - break; - case SPLIT_VIEW: - display_list(); - break; - case FULL_VIEW: - display_tree(&rootmenu); - break; - } - - gtk_main(); - - return 0; + const char *name; + char *env; + gchar *glade_file; + + bindtextdomain(PACKAGE, LOCALEDIR); + bind_textdomain_codeset(PACKAGE, "UTF-8"); + textdomain(PACKAGE); + + /* GTK stuffs */ + gtk_set_locale(); + gtk_init(&ac, &av); + glade_init(); + + // add_pixmap_directory (PACKAGE_DATA_DIR "/" PACKAGE "/pixmaps"); + // add_pixmap_directory (PACKAGE_SOURCE_DIR "/pixmaps"); + + /* Determine GUI path */ + env = getenv(SRCTREE); + if ( env ) + glade_file = g_strconcat(env, "/scripts/kconfig/gconf.glade", NULL); + else if ( av[0][0] == '/' ) + glade_file = g_strconcat(av[0], ".glade", NULL); + else + glade_file = + g_strconcat(g_get_current_dir(), "/", av[0], ".glade", NULL); + + /* Conf stuffs */ + if ( ac > 1 && av[1][0] == '-' ) + { + switch (av[1][1]) + { + case 'a': + // showAll = 1; + break; + case 's': + conf_set_message_callback(NULL); + break; + case 'h': + case '?': + printf("%s [-s] \n", av[0]); + exit(0); + } + name = av[2]; + } + else + name = av[1]; + + conf_parse(name); + fixup_rootmenu(&rootmenu); + conf_read(NULL); + + /* Load the interface and connect signals */ + init_main_window(glade_file); + init_tree_model(); + init_left_tree(); + init_right_tree(); + + switch (view_mode) + { + case SINGLE_VIEW: + display_tree_part(); + break; + case SPLIT_VIEW: + display_list(); + break; + case FULL_VIEW: + display_tree(&rootmenu); + break; + } + + gtk_main(); + + return 0; } static void conf_changed(void) { - bool changed = conf_get_changed(); - gtk_widget_set_sensitive(save_btn, changed); - gtk_widget_set_sensitive(save_menu_item, changed); + bool changed = conf_get_changed(); + gtk_widget_set_sensitive(save_btn, changed); + gtk_widget_set_sensitive(save_menu_item, changed); } diff --git a/xen/tools/kconfig/images.c b/xen/tools/kconfig/images.c index d4f84bd4a9..cb9277c6ae 100644 --- a/xen/tools/kconfig/images.c +++ b/xen/tools/kconfig/images.c @@ -3,324 +3,219 @@ * Released under the terms of the GNU GPL v2.0. */ -static const char *xpm_load[] = { -"22 22 5 1", -". c None", -"# c #000000", -"c c #838100", -"a c #ffff00", -"b c #ffffff", -"......................", -"......................", -"......................", -"............####....#.", -"...........#....##.##.", -"..................###.", -".................####.", -".####...........#####.", -"#abab##########.......", -"#babababababab#.......", -"#ababababababa#.......", -"#babababababab#.......", -"#ababab###############", -"#babab##cccccccccccc##", -"#abab##cccccccccccc##.", -"#bab##cccccccccccc##..", -"#ab##cccccccccccc##...", -"#b##cccccccccccc##....", -"###cccccccccccc##.....", -"##cccccccccccc##......", -"###############.......", -"......................"}; +static const char *xpm_load[] = {"22 22 5 1", + ". c None", + "# c #000000", + "c c #838100", + "a c #ffff00", + "b c #ffffff", + "......................", + "......................", + "......................", + "............####....#.", + "...........#....##.##.", + "..................###.", + ".................####.", + ".####...........#####.", + "#abab##########.......", + "#babababababab#.......", + "#ababababababa#.......", + "#babababababab#.......", + "#ababab###############", + "#babab##cccccccccccc##", + "#abab##cccccccccccc##.", + "#bab##cccccccccccc##..", + "#ab##cccccccccccc##...", + "#b##cccccccccccc##....", + "###cccccccccccc##.....", + "##cccccccccccc##......", + "###############.......", + "......................"}; -static const char *xpm_save[] = { -"22 22 5 1", -". c None", -"# c #000000", -"a c #838100", -"b c #c5c2c5", -"c c #cdb6d5", -"......................", -".####################.", -".#aa#bbbbbbbbbbbb#bb#.", -".#aa#bbbbbbbbbbbb#bb#.", -".#aa#bbbbbbbbbcbb####.", -".#aa#bbbccbbbbbbb#aa#.", -".#aa#bbbccbbbbbbb#aa#.", -".#aa#bbbbbbbbbbbb#aa#.", -".#aa#bbbbbbbbbbbb#aa#.", -".#aa#bbbbbbbbbbbb#aa#.", -".#aa#bbbbbbbbbbbb#aa#.", -".#aaa############aaa#.", -".#aaaaaaaaaaaaaaaaaa#.", -".#aaaaaaaaaaaaaaaaaa#.", -".#aaa#############aa#.", -".#aaa#########bbb#aa#.", -".#aaa#########bbb#aa#.", -".#aaa#########bbb#aa#.", -".#aaa#########bbb#aa#.", -".#aaa#########bbb#aa#.", -"..##################..", -"......................"}; +static const char *xpm_save[] = {"22 22 5 1", + ". c None", + "# c #000000", + "a c #838100", + "b c #c5c2c5", + "c c #cdb6d5", + "......................", + ".####################.", + ".#aa#bbbbbbbbbbbb#bb#.", + ".#aa#bbbbbbbbbbbb#bb#.", + ".#aa#bbbbbbbbbcbb####.", + ".#aa#bbbccbbbbbbb#aa#.", + ".#aa#bbbccbbbbbbb#aa#.", + ".#aa#bbbbbbbbbbbb#aa#.", + ".#aa#bbbbbbbbbbbb#aa#.", + ".#aa#bbbbbbbbbbbb#aa#.", + ".#aa#bbbbbbbbbbbb#aa#.", + ".#aaa############aaa#.", + ".#aaaaaaaaaaaaaaaaaa#.", + ".#aaaaaaaaaaaaaaaaaa#.", + ".#aaa#############aa#.", + ".#aaa#########bbb#aa#.", + ".#aaa#########bbb#aa#.", + ".#aaa#########bbb#aa#.", + ".#aaa#########bbb#aa#.", + ".#aaa#########bbb#aa#.", + "..##################..", + "......................"}; -static const char *xpm_back[] = { -"22 22 3 1", -". c None", -"# c #000083", -"a c #838183", -"......................", -"......................", -"......................", -"......................", -"......................", -"...........######a....", -"..#......##########...", -"..##...####......##a..", -"..###.###.........##..", -"..######..........##..", -"..#####...........##..", -"..######..........##..", -"..#######.........##..", -"..########.......##a..", -"...............a###...", -"...............###....", -"......................", -"......................", -"......................", -"......................", -"......................", -"......................"}; +static const char *xpm_back[] = {"22 22 3 1", + ". c None", + "# c #000083", + "a c #838183", + "......................", + "......................", + "......................", + "......................", + "......................", + "...........######a....", + "..#......##########...", + "..##...####......##a..", + "..###.###.........##..", + "..######..........##..", + "..#####...........##..", + "..######..........##..", + "..#######.........##..", + "..########.......##a..", + "...............a###...", + "...............###....", + "......................", + "......................", + "......................", + "......................", + "......................", + "......................"}; -static const char *xpm_tree_view[] = { -"22 22 2 1", -". c None", -"# c #000000", -"......................", -"......................", -"......#...............", -"......#...............", -"......#...............", -"......#...............", -"......#...............", -"......########........", -"......#...............", -"......#...............", -"......#...............", -"......#...............", -"......#...............", -"......########........", -"......#...............", -"......#...............", -"......#...............", -"......#...............", -"......#...............", -"......########........", -"......................", -"......................"}; +static const char *xpm_tree_view[] = {"22 22 2 1", + ". c None", + "# c #000000", + "......................", + "......................", + "......#...............", + "......#...............", + "......#...............", + "......#...............", + "......#...............", + "......########........", + "......#...............", + "......#...............", + "......#...............", + "......#...............", + "......#...............", + "......########........", + "......#...............", + "......#...............", + "......#...............", + "......#...............", + "......#...............", + "......########........", + "......................", + "......................"}; -static const char *xpm_single_view[] = { -"22 22 2 1", -". c None", -"# c #000000", -"......................", -"......................", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"..........#...........", -"......................", -"......................"}; +static const char *xpm_single_view[] = {"22 22 2 1", + ". c None", + "# c #000000", + "......................", + "......................", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "..........#...........", + "......................", + "......................"}; -static const char *xpm_split_view[] = { -"22 22 2 1", -". c None", -"# c #000000", -"......................", -"......................", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......#......#........", -"......................", -"......................"}; +static const char *xpm_split_view[] = {"22 22 2 1", + ". c None", + "# c #000000", + "......................", + "......................", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......#......#........", + "......................", + "......................"}; static const char *xpm_symbol_no[] = { -"12 12 2 1", -" c white", -". c black", -" ", -" .......... ", -" . . ", -" . . ", -" . . ", -" . . ", -" . . ", -" . . ", -" . . ", -" . . ", -" .......... ", -" "}; + "12 12 2 1", " c white", ". c black", " ", + " .......... ", " . . ", " . . ", " . . ", + " . . ", " . . ", " . . ", " . . ", + " . . ", " .......... ", " "}; static const char *xpm_symbol_mod[] = { -"12 12 2 1", -" c white", -". c black", -" ", -" .......... ", -" . . ", -" . . ", -" . .. . ", -" . .... . ", -" . .... . ", -" . .. . ", -" . . ", -" . . ", -" .......... ", -" "}; + "12 12 2 1", " c white", ". c black", " ", + " .......... ", " . . ", " . . ", " . .. . ", + " . .... . ", " . .... . ", " . .. . ", " . . ", + " . . ", " .......... ", " "}; static const char *xpm_symbol_yes[] = { -"12 12 2 1", -" c white", -". c black", -" ", -" .......... ", -" . . ", -" . . ", -" . . . ", -" . .. . ", -" . . .. . ", -" . .... . ", -" . .. . ", -" . . ", -" .......... ", -" "}; + "12 12 2 1", " c white", ". c black", " ", + " .......... ", " . . ", " . . ", " . . . ", + " . .. . ", " . . .. . ", " . .... . ", " . .. . ", + " . . ", " .......... ", " "}; static const char *xpm_choice_no[] = { -"12 12 2 1", -" c white", -". c black", -" ", -" .... ", -" .. .. ", -" . . ", -" . . ", -" . . ", -" . . ", -" . . ", -" . . ", -" .. .. ", -" .... ", -" "}; + "12 12 2 1", " c white", ". c black", " ", + " .... ", " .. .. ", " . . ", " . . ", + " . . ", " . . ", " . . ", " . . ", + " .. .. ", " .... ", " "}; static const char *xpm_choice_yes[] = { -"12 12 2 1", -" c white", -". c black", -" ", -" .... ", -" .. .. ", -" . . ", -" . .. . ", -" . .... . ", -" . .... . ", -" . .. . ", -" . . ", -" .. .. ", -" .... ", -" "}; + "12 12 2 1", " c white", ". c black", " ", + " .... ", " .. .. ", " . . ", " . .. . ", + " . .... . ", " . .... . ", " . .. . ", " . . ", + " .. .. ", " .... ", " "}; static const char *xpm_menu[] = { -"12 12 2 1", -" c white", -". c black", -" ", -" .......... ", -" . . ", -" . .. . ", -" . .... . ", -" . ...... . ", -" . ...... . ", -" . .... . ", -" . .. . ", -" . . ", -" .......... ", -" "}; + "12 12 2 1", " c white", ". c black", " ", + " .......... ", " . . ", " . .. . ", " . .... . ", + " . ...... . ", " . ...... . ", " . .... . ", " . .. . ", + " . . ", " .......... ", " "}; static const char *xpm_menu_inv[] = { -"12 12 2 1", -" c white", -". c black", -" ", -" .......... ", -" .......... ", -" .. ...... ", -" .. .... ", -" .. .. ", -" .. .. ", -" .. .... ", -" .. ...... ", -" .......... ", -" .......... ", -" "}; + "12 12 2 1", " c white", ". c black", " ", + " .......... ", " .......... ", " .. ...... ", " .. .... ", + " .. .. ", " .. .. ", " .. .... ", " .. ...... ", + " .......... ", " .......... ", " "}; static const char *xpm_menuback[] = { -"12 12 2 1", -" c white", -". c black", -" ", -" .......... ", -" . . ", -" . .. . ", -" . .... . ", -" . ...... . ", -" . ...... . ", -" . .... . ", -" . .. . ", -" . . ", -" .......... ", -" "}; + "12 12 2 1", " c white", ". c black", " ", + " .......... ", " . . ", " . .. . ", " . .... . ", + " . ...... . ", " . ...... . ", " . .... . ", " . .. . ", + " . . ", " .......... ", " "}; static const char *xpm_void[] = { -"12 12 2 1", -" c white", -". c black", -" ", -" ", -" ", -" ", -" ", -" ", -" ", -" ", -" ", -" ", -" ", -" "}; + "12 12 2 1", " c white", ". c black", " ", + " ", " ", " ", " ", + " ", " ", " ", " ", + " ", " ", " "}; diff --git a/xen/tools/kconfig/kxgettext.c b/xen/tools/kconfig/kxgettext.c index 2858738b22..6aba67c5ff 100644 --- a/xen/tools/kconfig/kxgettext.c +++ b/xen/tools/kconfig/kxgettext.c @@ -9,227 +9,240 @@ #include "lkc.h" -static char *escape(const char* text, char *bf, int len) +static char *escape(const char *text, char *bf, int len) { - char *bfp = bf; - int multiline = strchr(text, '\n') != NULL; - int eol = 0; - int textlen = strlen(text); - - if ((textlen > 0) && (text[textlen-1] == '\n')) - eol = 1; - - *bfp++ = '"'; - --len; - - if (multiline) { - *bfp++ = '"'; - *bfp++ = '\n'; - *bfp++ = '"'; - len -= 3; - } - - while (*text != '\0' && len > 1) { - if (*text == '"') - *bfp++ = '\\'; - else if (*text == '\n') { - *bfp++ = '\\'; - *bfp++ = 'n'; - *bfp++ = '"'; - *bfp++ = '\n'; - *bfp++ = '"'; - len -= 5; - ++text; - goto next; - } - else if (*text == '\\') { - *bfp++ = '\\'; - len--; - } - *bfp++ = *text++; -next: - --len; - } - - if (multiline && eol) - bfp -= 3; - - *bfp++ = '"'; - *bfp = '\0'; - - return bf; + char *bfp = bf; + int multiline = strchr(text, '\n') != NULL; + int eol = 0; + int textlen = strlen(text); + + if ( (textlen > 0) && (text[textlen - 1] == '\n') ) + eol = 1; + + *bfp++ = '"'; + --len; + + if ( multiline ) + { + *bfp++ = '"'; + *bfp++ = '\n'; + *bfp++ = '"'; + len -= 3; + } + + while ( *text != '\0' && len > 1 ) + { + if ( *text == '"' ) + *bfp++ = '\\'; + else if ( *text == '\n' ) + { + *bfp++ = '\\'; + *bfp++ = 'n'; + *bfp++ = '"'; + *bfp++ = '\n'; + *bfp++ = '"'; + len -= 5; + ++text; + goto next; + } + else if ( *text == '\\' ) + { + *bfp++ = '\\'; + len--; + } + *bfp++ = *text++; + next: + --len; + } + + if ( multiline && eol ) + bfp -= 3; + + *bfp++ = '"'; + *bfp = '\0'; + + return bf; } -struct file_line { - struct file_line *next; - const char *file; - int lineno; +struct file_line +{ + struct file_line *next; + const char *file; + int lineno; }; static struct file_line *file_line__new(const char *file, int lineno) { - struct file_line *self = malloc(sizeof(*self)); + struct file_line *self = malloc(sizeof(*self)); - if (self == NULL) - goto out; + if ( self == NULL ) + goto out; - self->file = file; - self->lineno = lineno; - self->next = NULL; + self->file = file; + self->lineno = lineno; + self->next = NULL; out: - return self; + return self; } -struct message { - const char *msg; - const char *option; - struct message *next; - struct file_line *files; +struct message +{ + const char *msg; + const char *option; + struct message *next; + struct file_line *files; }; static struct message *message__list; static struct message *message__new(const char *msg, char *option, - const char *file, int lineno) + const char *file, int lineno) { - struct message *self = malloc(sizeof(*self)); + struct message *self = malloc(sizeof(*self)); - if (self == NULL) - goto out; + if ( self == NULL ) + goto out; - self->files = file_line__new(file, lineno); - if (self->files == NULL) - goto out_fail; + self->files = file_line__new(file, lineno); + if ( self->files == NULL ) + goto out_fail; - self->msg = strdup(msg); - if (self->msg == NULL) - goto out_fail_msg; + self->msg = strdup(msg); + if ( self->msg == NULL ) + goto out_fail_msg; - self->option = option; - self->next = NULL; + self->option = option; + self->next = NULL; out: - return self; + return self; out_fail_msg: - free(self->files); + free(self->files); out_fail: - free(self); - self = NULL; - goto out; + free(self); + self = NULL; + goto out; } static struct message *mesage__find(const char *msg) { - struct message *m = message__list; + struct message *m = message__list; - while (m != NULL) { - if (strcmp(m->msg, msg) == 0) - break; - m = m->next; - } + while ( m != NULL ) + { + if ( strcmp(m->msg, msg) == 0 ) + break; + m = m->next; + } - return m; + return m; } static int message__add_file_line(struct message *self, const char *file, - int lineno) + int lineno) { - int rc = -1; - struct file_line *fl = file_line__new(file, lineno); + int rc = -1; + struct file_line *fl = file_line__new(file, lineno); - if (fl == NULL) - goto out; + if ( fl == NULL ) + goto out; - fl->next = self->files; - self->files = fl; - rc = 0; + fl->next = self->files; + self->files = fl; + rc = 0; out: - return rc; + return rc; } static int message__add(const char *msg, char *option, const char *file, - int lineno) + int lineno) { - int rc = 0; - char bf[16384]; - char *escaped = escape(msg, bf, sizeof(bf)); - struct message *m = mesage__find(escaped); - - if (m != NULL) - rc = message__add_file_line(m, file, lineno); - else { - m = message__new(escaped, option, file, lineno); - - if (m != NULL) { - m->next = message__list; - message__list = m; - } else - rc = -1; - } - return rc; + int rc = 0; + char bf[16384]; + char *escaped = escape(msg, bf, sizeof(bf)); + struct message *m = mesage__find(escaped); + + if ( m != NULL ) + rc = message__add_file_line(m, file, lineno); + else + { + m = message__new(escaped, option, file, lineno); + + if ( m != NULL ) + { + m->next = message__list; + message__list = m; + } + else + rc = -1; + } + return rc; } static void menu_build_message_list(struct menu *menu) { - struct menu *child; + struct menu *child; - message__add(menu_get_prompt(menu), NULL, - menu->file == NULL ? "Root Menu" : menu->file->name, - menu->lineno); + message__add(menu_get_prompt(menu), NULL, + menu->file == NULL ? "Root Menu" : menu->file->name, + menu->lineno); - if (menu->sym != NULL && menu_has_help(menu)) - message__add(menu_get_help(menu), menu->sym->name, - menu->file == NULL ? "Root Menu" : menu->file->name, - menu->lineno); + if ( menu->sym != NULL && menu_has_help(menu) ) + message__add(menu_get_help(menu), menu->sym->name, + menu->file == NULL ? "Root Menu" : menu->file->name, + menu->lineno); - for (child = menu->list; child != NULL; child = child->next) - if (child->prompt != NULL) - menu_build_message_list(child); + for ( child = menu->list; child != NULL; child = child->next ) + if ( child->prompt != NULL ) + menu_build_message_list(child); } static void message__print_file_lineno(struct message *self) { - struct file_line *fl = self->files; + struct file_line *fl = self->files; - putchar('\n'); - if (self->option != NULL) - printf("# %s:00000\n", self->option); + putchar('\n'); + if ( self->option != NULL ) + printf("# %s:00000\n", self->option); - printf("#: %s:%d", fl->file, fl->lineno); - fl = fl->next; + printf("#: %s:%d", fl->file, fl->lineno); + fl = fl->next; - while (fl != NULL) { - printf(", %s:%d", fl->file, fl->lineno); - fl = fl->next; - } + while ( fl != NULL ) + { + printf(", %s:%d", fl->file, fl->lineno); + fl = fl->next; + } - putchar('\n'); + putchar('\n'); } static void message__print_gettext_msgid_msgstr(struct message *self) { - message__print_file_lineno(self); + message__print_file_lineno(self); - printf("msgid %s\n" - "msgstr \"\"\n", self->msg); + printf("msgid %s\n" + "msgstr \"\"\n", + self->msg); } static void menu__xgettext(void) { - struct message *m = message__list; - - while (m != NULL) { - /* skip empty lines ("") */ - if (strlen(m->msg) > sizeof("\"\"")) - message__print_gettext_msgid_msgstr(m); - m = m->next; - } + struct message *m = message__list; + + while ( m != NULL ) + { + /* skip empty lines ("") */ + if ( strlen(m->msg) > sizeof("\"\"") ) + message__print_gettext_msgid_msgstr(m); + m = m->next; + } } int main(int ac, char **av) { - conf_parse(av[1]); + conf_parse(av[1]); - menu_build_message_list(menu_get_root_menu(NULL)); - menu__xgettext(); - return 0; + menu_build_message_list(menu_get_root_menu(NULL)); + menu__xgettext(); + return 0; } diff --git a/xen/tools/kconfig/lxdialog/checklist.c b/xen/tools/kconfig/lxdialog/checklist.c index 8d016faa28..78ce8777e6 100644 --- a/xen/tools/kconfig/lxdialog/checklist.c +++ b/xen/tools/kconfig/lxdialog/checklist.c @@ -28,86 +28,92 @@ static int list_width, check_x, item_x; /* * Print list item */ -static void print_item(WINDOW * win, int choice, int selected) +static void print_item(WINDOW *win, int choice, int selected) { - int i; - char *list_item = malloc(list_width + 1); - - strncpy(list_item, item_str(), list_width - item_x); - list_item[list_width - item_x] = '\0'; - - /* Clear 'residue' of last item */ - wattrset(win, dlg.menubox.atr); - wmove(win, choice, 0); - for (i = 0; i < list_width; i++) - waddch(win, ' '); - - wmove(win, choice, check_x); - wattrset(win, selected ? dlg.check_selected.atr - : dlg.check.atr); - if (!item_is_tag(':')) - wprintw(win, "(%c)", item_is_tag('X') ? 'X' : ' '); - - wattrset(win, selected ? dlg.tag_selected.atr : dlg.tag.atr); - mvwaddch(win, choice, item_x, list_item[0]); - wattrset(win, selected ? dlg.item_selected.atr : dlg.item.atr); - waddstr(win, list_item + 1); - if (selected) { - wmove(win, choice, check_x + 1); - wrefresh(win); - } - free(list_item); + int i; + char *list_item = malloc(list_width + 1); + + strncpy(list_item, item_str(), list_width - item_x); + list_item[list_width - item_x] = '\0'; + + /* Clear 'residue' of last item */ + wattrset(win, dlg.menubox.atr); + wmove(win, choice, 0); + for ( i = 0; i < list_width; i++ ) + waddch(win, ' '); + + wmove(win, choice, check_x); + wattrset(win, selected ? dlg.check_selected.atr : dlg.check.atr); + if ( !item_is_tag(':') ) + wprintw(win, "(%c)", item_is_tag('X') ? 'X' : ' '); + + wattrset(win, selected ? dlg.tag_selected.atr : dlg.tag.atr); + mvwaddch(win, choice, item_x, list_item[0]); + wattrset(win, selected ? dlg.item_selected.atr : dlg.item.atr); + waddstr(win, list_item + 1); + if ( selected ) + { + wmove(win, choice, check_x + 1); + wrefresh(win); + } + free(list_item); } /* * Print the scroll indicators. */ -static void print_arrows(WINDOW * win, int choice, int item_no, int scroll, - int y, int x, int height) +static void print_arrows(WINDOW *win, int choice, int item_no, int scroll, + int y, int x, int height) { - wmove(win, y, x); - - if (scroll > 0) { - wattrset(win, dlg.uarrow.atr); - waddch(win, ACS_UARROW); - waddstr(win, "(-)"); - } else { - wattrset(win, dlg.menubox.atr); - waddch(win, ACS_HLINE); - waddch(win, ACS_HLINE); - waddch(win, ACS_HLINE); - waddch(win, ACS_HLINE); - } - - y = y + height + 1; - wmove(win, y, x); - - if ((height < item_no) && (scroll + choice < item_no - 1)) { - wattrset(win, dlg.darrow.atr); - waddch(win, ACS_DARROW); - waddstr(win, "(+)"); - } else { - wattrset(win, dlg.menubox_border.atr); - waddch(win, ACS_HLINE); - waddch(win, ACS_HLINE); - waddch(win, ACS_HLINE); - waddch(win, ACS_HLINE); - } + wmove(win, y, x); + + if ( scroll > 0 ) + { + wattrset(win, dlg.uarrow.atr); + waddch(win, ACS_UARROW); + waddstr(win, "(-)"); + } + else + { + wattrset(win, dlg.menubox.atr); + waddch(win, ACS_HLINE); + waddch(win, ACS_HLINE); + waddch(win, ACS_HLINE); + waddch(win, ACS_HLINE); + } + + y = y + height + 1; + wmove(win, y, x); + + if ( (height < item_no) && (scroll + choice < item_no - 1) ) + { + wattrset(win, dlg.darrow.atr); + waddch(win, ACS_DARROW); + waddstr(win, "(+)"); + } + else + { + wattrset(win, dlg.menubox_border.atr); + waddch(win, ACS_HLINE); + waddch(win, ACS_HLINE); + waddch(win, ACS_HLINE); + waddch(win, ACS_HLINE); + } } /* * Display the termination buttons */ -static void print_buttons(WINDOW * dialog, int height, int width, int selected) +static void print_buttons(WINDOW *dialog, int height, int width, int selected) { - int x = width / 2 - 11; - int y = height - 2; + int x = width / 2 - 11; + int y = height - 2; - print_button(dialog, gettext("Select"), y, x, selected == 0); - print_button(dialog, gettext(" Help "), y, x + 14, selected == 1); + print_button(dialog, gettext("Select"), y, x, selected == 0); + print_button(dialog, gettext(" Help "), y, x + 14, selected == 1); - wmove(dialog, y, x + 1 + 14 * selected); - wrefresh(dialog); + wmove(dialog, y, x + 1 + 14 * selected); + wrefresh(dialog); } /* @@ -115,218 +121,234 @@ static void print_buttons(WINDOW * dialog, int height, int width, int selected) * in the style of radiolist (only one option turned on at a time). */ int dialog_checklist(const char *title, const char *prompt, int height, - int width, int list_height) + int width, int list_height) { - int i, x, y, box_x, box_y; - int key = 0, button = 0, choice = 0, scroll = 0, max_choice; - WINDOW *dialog, *list; - - /* which item to highlight */ - item_foreach() { - if (item_is_tag('X')) - choice = item_n(); - if (item_is_selected()) { - choice = item_n(); - break; - } - } + int i, x, y, box_x, box_y; + int key = 0, button = 0, choice = 0, scroll = 0, max_choice; + WINDOW *dialog, *list; + + /* which item to highlight */ + item_foreach () + { + if ( item_is_tag('X') ) + choice = item_n(); + if ( item_is_selected() ) + { + choice = item_n(); + break; + } + } do_resize: - if (getmaxy(stdscr) < (height + CHECKLIST_HEIGTH_MIN)) - return -ERRDISPLAYTOOSMALL; - if (getmaxx(stdscr) < (width + CHECKLIST_WIDTH_MIN)) - return -ERRDISPLAYTOOSMALL; - - max_choice = MIN(list_height, item_count()); - - /* center dialog box on screen */ - x = (getmaxx(stdscr) - width) / 2; - y = (getmaxy(stdscr) - height) / 2; - - draw_shadow(stdscr, y, x, height, width); - - dialog = newwin(height, width, y, x); - keypad(dialog, TRUE); - - draw_box(dialog, 0, 0, height, width, - dlg.dialog.atr, dlg.border.atr); - wattrset(dialog, dlg.border.atr); - mvwaddch(dialog, height - 3, 0, ACS_LTEE); - for (i = 0; i < width - 2; i++) - waddch(dialog, ACS_HLINE); - wattrset(dialog, dlg.dialog.atr); - waddch(dialog, ACS_RTEE); - - print_title(dialog, title, width); - - wattrset(dialog, dlg.dialog.atr); - print_autowrap(dialog, prompt, width - 2, 1, 3); - - list_width = width - 6; - box_y = height - list_height - 5; - box_x = (width - list_width) / 2 - 1; - - /* create new window for the list */ - list = subwin(dialog, list_height, list_width, y + box_y + 1, - x + box_x + 1); - - keypad(list, TRUE); - - /* draw a box around the list items */ - draw_box(dialog, box_y, box_x, list_height + 2, list_width + 2, - dlg.menubox_border.atr, dlg.menubox.atr); - - /* Find length of longest item in order to center checklist */ - check_x = 0; - item_foreach() - check_x = MAX(check_x, strlen(item_str()) + 4); - check_x = MIN(check_x, list_width); - - check_x = (list_width - check_x) / 2; - item_x = check_x + 4; - - if (choice >= list_height) { - scroll = choice - list_height + 1; - choice -= scroll; - } - - /* Print the list */ - for (i = 0; i < max_choice; i++) { - item_set(scroll + i); - print_item(list, i, i == choice); - } - - print_arrows(dialog, choice, item_count(), scroll, - box_y, box_x + check_x + 5, list_height); - - print_buttons(dialog, height, width, 0); - - wnoutrefresh(dialog); - wnoutrefresh(list); - doupdate(); - - while (key != KEY_ESC) { - key = wgetch(dialog); - - for (i = 0; i < max_choice; i++) { - item_set(i + scroll); - if (toupper(key) == toupper(item_str()[0])) - break; - } - - if (i < max_choice || key == KEY_UP || key == KEY_DOWN || - key == '+' || key == '-') { - if (key == KEY_UP || key == '-') { - if (!choice) { - if (!scroll) - continue; - /* Scroll list down */ - if (list_height > 1) { - /* De-highlight current first item */ - item_set(scroll); - print_item(list, 0, FALSE); - scrollok(list, TRUE); - wscrl(list, -1); - scrollok(list, FALSE); - } - scroll--; - item_set(scroll); - print_item(list, 0, TRUE); - print_arrows(dialog, choice, item_count(), - scroll, box_y, box_x + check_x + 5, list_height); - - wnoutrefresh(dialog); - wrefresh(list); - - continue; /* wait for another key press */ - } else - i = choice - 1; - } else if (key == KEY_DOWN || key == '+') { - if (choice == max_choice - 1) { - if (scroll + choice >= item_count() - 1) - continue; - /* Scroll list up */ - if (list_height > 1) { - /* De-highlight current last item before scrolling up */ - item_set(scroll + max_choice - 1); - print_item(list, - max_choice - 1, - FALSE); - scrollok(list, TRUE); - wscrl(list, 1); - scrollok(list, FALSE); - } - scroll++; - item_set(scroll + max_choice - 1); - print_item(list, max_choice - 1, TRUE); - - print_arrows(dialog, choice, item_count(), - scroll, box_y, box_x + check_x + 5, list_height); - - wnoutrefresh(dialog); - wrefresh(list); - - continue; /* wait for another key press */ - } else - i = choice + 1; - } - if (i != choice) { - /* De-highlight current item */ - item_set(scroll + choice); - print_item(list, choice, FALSE); - /* Highlight new item */ - choice = i; - item_set(scroll + choice); - print_item(list, choice, TRUE); - wnoutrefresh(dialog); - wrefresh(list); - } - continue; /* wait for another key press */ - } - switch (key) { - case 'H': - case 'h': - case '?': - button = 1; - /* fall-through */ - case 'S': - case 's': - case ' ': - case '\n': - item_foreach() - item_set_selected(0); - item_set(scroll + choice); - item_set_selected(1); - delwin(list); - delwin(dialog); - return button; - case TAB: - case KEY_LEFT: - case KEY_RIGHT: - button = ((key == KEY_LEFT ? --button : ++button) < 0) - ? 1 : (button > 1 ? 0 : button); - - print_buttons(dialog, height, width, button); - wrefresh(dialog); - break; - case 'X': - case 'x': - key = KEY_ESC; - break; - case KEY_ESC: - key = on_key_esc(dialog); - break; - case KEY_RESIZE: - delwin(list); - delwin(dialog); - on_key_resize(); - goto do_resize; - } - - /* Now, update everything... */ - doupdate(); - } - delwin(list); - delwin(dialog); - return key; /* ESC pressed */ + if ( getmaxy(stdscr) < (height + CHECKLIST_HEIGTH_MIN) ) + return -ERRDISPLAYTOOSMALL; + if ( getmaxx(stdscr) < (width + CHECKLIST_WIDTH_MIN) ) + return -ERRDISPLAYTOOSMALL; + + max_choice = MIN(list_height, item_count()); + + /* center dialog box on screen */ + x = (getmaxx(stdscr) - width) / 2; + y = (getmaxy(stdscr) - height) / 2; + + draw_shadow(stdscr, y, x, height, width); + + dialog = newwin(height, width, y, x); + keypad(dialog, TRUE); + + draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); + wattrset(dialog, dlg.border.atr); + mvwaddch(dialog, height - 3, 0, ACS_LTEE); + for ( i = 0; i < width - 2; i++ ) + waddch(dialog, ACS_HLINE); + wattrset(dialog, dlg.dialog.atr); + waddch(dialog, ACS_RTEE); + + print_title(dialog, title, width); + + wattrset(dialog, dlg.dialog.atr); + print_autowrap(dialog, prompt, width - 2, 1, 3); + + list_width = width - 6; + box_y = height - list_height - 5; + box_x = (width - list_width) / 2 - 1; + + /* create new window for the list */ + list = + subwin(dialog, list_height, list_width, y + box_y + 1, x + box_x + 1); + + keypad(list, TRUE); + + /* draw a box around the list items */ + draw_box(dialog, box_y, box_x, list_height + 2, list_width + 2, + dlg.menubox_border.atr, dlg.menubox.atr); + + /* Find length of longest item in order to center checklist */ + check_x = 0; + item_foreach () + check_x = MAX(check_x, strlen(item_str()) + 4); + check_x = MIN(check_x, list_width); + + check_x = (list_width - check_x) / 2; + item_x = check_x + 4; + + if ( choice >= list_height ) + { + scroll = choice - list_height + 1; + choice -= scroll; + } + + /* Print the list */ + for ( i = 0; i < max_choice; i++ ) + { + item_set(scroll + i); + print_item(list, i, i == choice); + } + + print_arrows(dialog, choice, item_count(), scroll, box_y, + box_x + check_x + 5, list_height); + + print_buttons(dialog, height, width, 0); + + wnoutrefresh(dialog); + wnoutrefresh(list); + doupdate(); + + while ( key != KEY_ESC ) + { + key = wgetch(dialog); + + for ( i = 0; i < max_choice; i++ ) + { + item_set(i + scroll); + if ( toupper(key) == toupper(item_str()[0]) ) + break; + } + + if ( i < max_choice || key == KEY_UP || key == KEY_DOWN || key == '+' || + key == '-' ) + { + if ( key == KEY_UP || key == '-' ) + { + if ( !choice ) + { + if ( !scroll ) + continue; + /* Scroll list down */ + if ( list_height > 1 ) + { + /* De-highlight current first item */ + item_set(scroll); + print_item(list, 0, FALSE); + scrollok(list, TRUE); + wscrl(list, -1); + scrollok(list, FALSE); + } + scroll--; + item_set(scroll); + print_item(list, 0, TRUE); + print_arrows(dialog, choice, item_count(), scroll, box_y, + box_x + check_x + 5, list_height); + + wnoutrefresh(dialog); + wrefresh(list); + + continue; /* wait for another key press */ + } + else + i = choice - 1; + } + else if ( key == KEY_DOWN || key == '+' ) + { + if ( choice == max_choice - 1 ) + { + if ( scroll + choice >= item_count() - 1 ) + continue; + /* Scroll list up */ + if ( list_height > 1 ) + { + /* De-highlight current last item before scrolling up */ + item_set(scroll + max_choice - 1); + print_item(list, max_choice - 1, FALSE); + scrollok(list, TRUE); + wscrl(list, 1); + scrollok(list, FALSE); + } + scroll++; + item_set(scroll + max_choice - 1); + print_item(list, max_choice - 1, TRUE); + + print_arrows(dialog, choice, item_count(), scroll, box_y, + box_x + check_x + 5, list_height); + + wnoutrefresh(dialog); + wrefresh(list); + + continue; /* wait for another key press */ + } + else + i = choice + 1; + } + if ( i != choice ) + { + /* De-highlight current item */ + item_set(scroll + choice); + print_item(list, choice, FALSE); + /* Highlight new item */ + choice = i; + item_set(scroll + choice); + print_item(list, choice, TRUE); + wnoutrefresh(dialog); + wrefresh(list); + } + continue; /* wait for another key press */ + } + switch (key) + { + case 'H': + case 'h': + case '?': + button = 1; + /* fall-through */ + case 'S': + case 's': + case ' ': + case '\n': + item_foreach () + item_set_selected(0); + item_set(scroll + choice); + item_set_selected(1); + delwin(list); + delwin(dialog); + return button; + case TAB: + case KEY_LEFT: + case KEY_RIGHT: + button = ((key == KEY_LEFT ? --button : ++button) < 0) + ? 1 + : (button > 1 ? 0 : button); + + print_buttons(dialog, height, width, button); + wrefresh(dialog); + break; + case 'X': + case 'x': + key = KEY_ESC; + break; + case KEY_ESC: + key = on_key_esc(dialog); + break; + case KEY_RESIZE: + delwin(list); + delwin(dialog); + on_key_resize(); + goto do_resize; + } + + /* Now, update everything... */ + doupdate(); + } + delwin(list); + delwin(dialog); + return key; /* ESC pressed */ } diff --git a/xen/tools/kconfig/lxdialog/inputbox.c b/xen/tools/kconfig/lxdialog/inputbox.c index d58de1dc53..a3098afa4c 100644 --- a/xen/tools/kconfig/lxdialog/inputbox.c +++ b/xen/tools/kconfig/lxdialog/inputbox.c @@ -26,276 +26,314 @@ char dialog_input_result[MAX_LEN + 1]; /* * Print the termination buttons */ -static void print_buttons(WINDOW * dialog, int height, int width, int selected) +static void print_buttons(WINDOW *dialog, int height, int width, int selected) { - int x = width / 2 - 11; - int y = height - 2; + int x = width / 2 - 11; + int y = height - 2; - print_button(dialog, gettext(" Ok "), y, x, selected == 0); - print_button(dialog, gettext(" Help "), y, x + 14, selected == 1); + print_button(dialog, gettext(" Ok "), y, x, selected == 0); + print_button(dialog, gettext(" Help "), y, x + 14, selected == 1); - wmove(dialog, y, x + 1 + 14 * selected); - wrefresh(dialog); + wmove(dialog, y, x + 1 + 14 * selected); + wrefresh(dialog); } /* * Display a dialog box for inputing a string */ -int dialog_inputbox(const char *title, const char *prompt, int height, int width, - const char *init) +int dialog_inputbox(const char *title, const char *prompt, int height, + int width, const char *init) { - int i, x, y, box_y, box_x, box_width; - int input_x = 0, key = 0, button = -1; - int show_x, len, pos; - char *instr = dialog_input_result; - WINDOW *dialog; + int i, x, y, box_y, box_x, box_width; + int input_x = 0, key = 0, button = -1; + int show_x, len, pos; + char *instr = dialog_input_result; + WINDOW *dialog; - if (!init) - instr[0] = '\0'; - else - strcpy(instr, init); + if ( !init ) + instr[0] = '\0'; + else + strcpy(instr, init); do_resize: - if (getmaxy(stdscr) <= (height - INPUTBOX_HEIGTH_MIN)) - return -ERRDISPLAYTOOSMALL; - if (getmaxx(stdscr) <= (width - INPUTBOX_WIDTH_MIN)) - return -ERRDISPLAYTOOSMALL; + if ( getmaxy(stdscr) <= (height - INPUTBOX_HEIGTH_MIN) ) + return -ERRDISPLAYTOOSMALL; + if ( getmaxx(stdscr) <= (width - INPUTBOX_WIDTH_MIN) ) + return -ERRDISPLAYTOOSMALL; - /* center dialog box on screen */ - x = (getmaxx(stdscr) - width) / 2; - y = (getmaxy(stdscr) - height) / 2; + /* center dialog box on screen */ + x = (getmaxx(stdscr) - width) / 2; + y = (getmaxy(stdscr) - height) / 2; - draw_shadow(stdscr, y, x, height, width); + draw_shadow(stdscr, y, x, height, width); - dialog = newwin(height, width, y, x); - keypad(dialog, TRUE); + dialog = newwin(height, width, y, x); + keypad(dialog, TRUE); - draw_box(dialog, 0, 0, height, width, - dlg.dialog.atr, dlg.border.atr); - wattrset(dialog, dlg.border.atr); - mvwaddch(dialog, height - 3, 0, ACS_LTEE); - for (i = 0; i < width - 2; i++) - waddch(dialog, ACS_HLINE); - wattrset(dialog, dlg.dialog.atr); - waddch(dialog, ACS_RTEE); + draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); + wattrset(dialog, dlg.border.atr); + mvwaddch(dialog, height - 3, 0, ACS_LTEE); + for ( i = 0; i < width - 2; i++ ) + waddch(dialog, ACS_HLINE); + wattrset(dialog, dlg.dialog.atr); + waddch(dialog, ACS_RTEE); - print_title(dialog, title, width); + print_title(dialog, title, width); - wattrset(dialog, dlg.dialog.atr); - print_autowrap(dialog, prompt, width - 2, 1, 3); + wattrset(dialog, dlg.dialog.atr); + print_autowrap(dialog, prompt, width - 2, 1, 3); - /* Draw the input field box */ - box_width = width - 6; - getyx(dialog, y, x); - box_y = y + 2; - box_x = (width - box_width) / 2; - draw_box(dialog, y + 1, box_x - 1, 3, box_width + 2, - dlg.dialog.atr, dlg.border.atr); + /* Draw the input field box */ + box_width = width - 6; + getyx(dialog, y, x); + box_y = y + 2; + box_x = (width - box_width) / 2; + draw_box(dialog, y + 1, box_x - 1, 3, box_width + 2, dlg.dialog.atr, + dlg.border.atr); - print_buttons(dialog, height, width, 0); + print_buttons(dialog, height, width, 0); - /* Set up the initial value */ - wmove(dialog, box_y, box_x); - wattrset(dialog, dlg.inputbox.atr); + /* Set up the initial value */ + wmove(dialog, box_y, box_x); + wattrset(dialog, dlg.inputbox.atr); - len = strlen(instr); - pos = len; + len = strlen(instr); + pos = len; - if (len >= box_width) { - show_x = len - box_width + 1; - input_x = box_width - 1; - for (i = 0; i < box_width - 1; i++) - waddch(dialog, instr[show_x + i]); - } else { - show_x = 0; - input_x = len; - waddstr(dialog, instr); - } + if ( len >= box_width ) + { + show_x = len - box_width + 1; + input_x = box_width - 1; + for ( i = 0; i < box_width - 1; i++ ) + waddch(dialog, instr[show_x + i]); + } + else + { + show_x = 0; + input_x = len; + waddstr(dialog, instr); + } - wmove(dialog, box_y, box_x + input_x); + wmove(dialog, box_y, box_x + input_x); - wrefresh(dialog); + wrefresh(dialog); - while (key != KEY_ESC) { - key = wgetch(dialog); + while ( key != KEY_ESC ) + { + key = wgetch(dialog); - if (button == -1) { /* Input box selected */ - switch (key) { - case TAB: - case KEY_UP: - case KEY_DOWN: - break; - case KEY_BACKSPACE: - case 127: - if (pos) { - wattrset(dialog, dlg.inputbox.atr); - if (input_x == 0) { - show_x--; - } else - input_x--; + if ( button == -1 ) + { /* Input box selected */ + switch (key) + { + case TAB: + case KEY_UP: + case KEY_DOWN: + break; + case KEY_BACKSPACE: + case 127: + if ( pos ) + { + wattrset(dialog, dlg.inputbox.atr); + if ( input_x == 0 ) + { + show_x--; + } + else + input_x--; - if (pos < len) { - for (i = pos - 1; i < len; i++) { - instr[i] = instr[i+1]; - } - } + if ( pos < len ) + { + for ( i = pos - 1; i < len; i++ ) + { + instr[i] = instr[i + 1]; + } + } - pos--; - len--; - instr[len] = '\0'; - wmove(dialog, box_y, box_x); - for (i = 0; i < box_width; i++) { - if (!instr[show_x + i]) { - waddch(dialog, ' '); - break; - } - waddch(dialog, instr[show_x + i]); - } - wmove(dialog, box_y, input_x + box_x); - wrefresh(dialog); - } - continue; - case KEY_LEFT: - if (pos > 0) { - if (input_x > 0) { - wmove(dialog, box_y, --input_x + box_x); - } else if (input_x == 0) { - show_x--; - wmove(dialog, box_y, box_x); - for (i = 0; i < box_width; i++) { - if (!instr[show_x + i]) { - waddch(dialog, ' '); - break; - } - waddch(dialog, instr[show_x + i]); - } - wmove(dialog, box_y, box_x); - } - pos--; - } - continue; - case KEY_RIGHT: - if (pos < len) { - if (input_x < box_width - 1) { - wmove(dialog, box_y, ++input_x + box_x); - } else if (input_x == box_width - 1) { - show_x++; - wmove(dialog, box_y, box_x); - for (i = 0; i < box_width; i++) { - if (!instr[show_x + i]) { - waddch(dialog, ' '); - break; - } - waddch(dialog, instr[show_x + i]); - } - wmove(dialog, box_y, input_x + box_x); - } - pos++; - } - continue; - default: - if (key < 0x100 && isprint(key)) { - if (len < MAX_LEN) { - wattrset(dialog, dlg.inputbox.atr); - if (pos < len) { - for (i = len; i > pos; i--) - instr[i] = instr[i-1]; - instr[pos] = key; - } else { - instr[len] = key; - } - pos++; - len++; - instr[len] = '\0'; + pos--; + len--; + instr[len] = '\0'; + wmove(dialog, box_y, box_x); + for ( i = 0; i < box_width; i++ ) + { + if ( !instr[show_x + i] ) + { + waddch(dialog, ' '); + break; + } + waddch(dialog, instr[show_x + i]); + } + wmove(dialog, box_y, input_x + box_x); + wrefresh(dialog); + } + continue; + case KEY_LEFT: + if ( pos > 0 ) + { + if ( input_x > 0 ) + { + wmove(dialog, box_y, --input_x + box_x); + } + else if ( input_x == 0 ) + { + show_x--; + wmove(dialog, box_y, box_x); + for ( i = 0; i < box_width; i++ ) + { + if ( !instr[show_x + i] ) + { + waddch(dialog, ' '); + break; + } + waddch(dialog, instr[show_x + i]); + } + wmove(dialog, box_y, box_x); + } + pos--; + } + continue; + case KEY_RIGHT: + if ( pos < len ) + { + if ( input_x < box_width - 1 ) + { + wmove(dialog, box_y, ++input_x + box_x); + } + else if ( input_x == box_width - 1 ) + { + show_x++; + wmove(dialog, box_y, box_x); + for ( i = 0; i < box_width; i++ ) + { + if ( !instr[show_x + i] ) + { + waddch(dialog, ' '); + break; + } + waddch(dialog, instr[show_x + i]); + } + wmove(dialog, box_y, input_x + box_x); + } + pos++; + } + continue; + default: + if ( key < 0x100 && isprint(key) ) + { + if ( len < MAX_LEN ) + { + wattrset(dialog, dlg.inputbox.atr); + if ( pos < len ) + { + for ( i = len; i > pos; i-- ) + instr[i] = instr[i - 1]; + instr[pos] = key; + } + else + { + instr[len] = key; + } + pos++; + len++; + instr[len] = '\0'; - if (input_x == box_width - 1) { - show_x++; - } else { - input_x++; - } + if ( input_x == box_width - 1 ) + { + show_x++; + } + else + { + input_x++; + } - wmove(dialog, box_y, box_x); - for (i = 0; i < box_width; i++) { - if (!instr[show_x + i]) { - waddch(dialog, ' '); - break; - } - waddch(dialog, instr[show_x + i]); - } - wmove(dialog, box_y, input_x + box_x); - wrefresh(dialog); - } else - flash(); /* Alarm user about overflow */ - continue; - } - } - } - switch (key) { - case 'O': - case 'o': - delwin(dialog); - return 0; - case 'H': - case 'h': - delwin(dialog); - return 1; - case KEY_UP: - case KEY_LEFT: - switch (button) { - case -1: - button = 1; /* Indicates "Help" button is selected */ - print_buttons(dialog, height, width, 1); - break; - case 0: - button = -1; /* Indicates input box is selected */ - print_buttons(dialog, height, width, 0); - wmove(dialog, box_y, box_x + input_x); - wrefresh(dialog); - break; - case 1: - button = 0; /* Indicates "OK" button is selected */ - print_buttons(dialog, height, width, 0); - break; - } - break; - case TAB: - case KEY_DOWN: - case KEY_RIGHT: - switch (button) { - case -1: - button = 0; /* Indicates "OK" button is selected */ - print_buttons(dialog, height, width, 0); - break; - case 0: - button = 1; /* Indicates "Help" button is selected */ - print_buttons(dialog, height, width, 1); - break; - case 1: - button = -1; /* Indicates input box is selected */ - print_buttons(dialog, height, width, 0); - wmove(dialog, box_y, box_x + input_x); - wrefresh(dialog); - break; - } - break; - case ' ': - case '\n': - delwin(dialog); - return (button == -1 ? 0 : button); - case 'X': - case 'x': - key = KEY_ESC; - break; - case KEY_ESC: - key = on_key_esc(dialog); - break; - case KEY_RESIZE: - delwin(dialog); - on_key_resize(); - goto do_resize; - } - } + wmove(dialog, box_y, box_x); + for ( i = 0; i < box_width; i++ ) + { + if ( !instr[show_x + i] ) + { + waddch(dialog, ' '); + break; + } + waddch(dialog, instr[show_x + i]); + } + wmove(dialog, box_y, input_x + box_x); + wrefresh(dialog); + } + else + flash(); /* Alarm user about overflow */ + continue; + } + } + } + switch (key) + { + case 'O': + case 'o': + delwin(dialog); + return 0; + case 'H': + case 'h': + delwin(dialog); + return 1; + case KEY_UP: + case KEY_LEFT: + switch (button) + { + case -1: + button = 1; /* Indicates "Help" button is selected */ + print_buttons(dialog, height, width, 1); + break; + case 0: + button = -1; /* Indicates input box is selected */ + print_buttons(dialog, height, width, 0); + wmove(dialog, box_y, box_x + input_x); + wrefresh(dialog); + break; + case 1: + button = 0; /* Indicates "OK" button is selected */ + print_buttons(dialog, height, width, 0); + break; + } + break; + case TAB: + case KEY_DOWN: + case KEY_RIGHT: + switch (button) + { + case -1: + button = 0; /* Indicates "OK" button is selected */ + print_buttons(dialog, height, width, 0); + break; + case 0: + button = 1; /* Indicates "Help" button is selected */ + print_buttons(dialog, height, width, 1); + break; + case 1: + button = -1; /* Indicates input box is selected */ + print_buttons(dialog, height, width, 0); + wmove(dialog, box_y, box_x + input_x); + wrefresh(dialog); + break; + } + break; + case ' ': + case '\n': + delwin(dialog); + return (button == -1 ? 0 : button); + case 'X': + case 'x': + key = KEY_ESC; + break; + case KEY_ESC: + key = on_key_esc(dialog); + break; + case KEY_RESIZE: + delwin(dialog); + on_key_resize(); + goto do_resize; + } + } - delwin(dialog); - return KEY_ESC; /* ESC pressed */ + delwin(dialog); + return KEY_ESC; /* ESC pressed */ } diff --git a/xen/tools/kconfig/lxdialog/menubox.c b/xen/tools/kconfig/lxdialog/menubox.c index 11ae9ad7ac..7d2fba39ff 100644 --- a/xen/tools/kconfig/lxdialog/menubox.c +++ b/xen/tools/kconfig/lxdialog/menubox.c @@ -63,375 +63,409 @@ static int menu_width, item_x; /* * Print menu item */ -static void do_print_item(WINDOW * win, const char *item, int line_y, - int selected, int hotkey) +static void do_print_item(WINDOW *win, const char *item, int line_y, + int selected, int hotkey) { - int j; - char *menu_item = malloc(menu_width + 1); + int j; + char *menu_item = malloc(menu_width + 1); - strncpy(menu_item, item, menu_width - item_x); - menu_item[menu_width - item_x] = '\0'; - j = first_alpha(menu_item, "YyNnMmHh"); + strncpy(menu_item, item, menu_width - item_x); + menu_item[menu_width - item_x] = '\0'; + j = first_alpha(menu_item, "YyNnMmHh"); - /* Clear 'residue' of last item */ - wattrset(win, dlg.menubox.atr); - wmove(win, line_y, 0); + /* Clear 'residue' of last item */ + wattrset(win, dlg.menubox.atr); + wmove(win, line_y, 0); #if OLD_NCURSES - { - int i; - for (i = 0; i < menu_width; i++) - waddch(win, ' '); - } + { + int i; + for ( i = 0; i < menu_width; i++ ) + waddch(win, ' '); + } #else - wclrtoeol(win); + wclrtoeol(win); #endif - wattrset(win, selected ? dlg.item_selected.atr : dlg.item.atr); - mvwaddstr(win, line_y, item_x, menu_item); - if (hotkey) { - wattrset(win, selected ? dlg.tag_key_selected.atr - : dlg.tag_key.atr); - mvwaddch(win, line_y, item_x + j, menu_item[j]); - } - if (selected) { - wmove(win, line_y, item_x + 1); - } - free(menu_item); - wrefresh(win); + wattrset(win, selected ? dlg.item_selected.atr : dlg.item.atr); + mvwaddstr(win, line_y, item_x, menu_item); + if ( hotkey ) + { + wattrset(win, selected ? dlg.tag_key_selected.atr : dlg.tag_key.atr); + mvwaddch(win, line_y, item_x + j, menu_item[j]); + } + if ( selected ) + { + wmove(win, line_y, item_x + 1); + } + free(menu_item); + wrefresh(win); } -#define print_item(index, choice, selected) \ -do { \ - item_set(index); \ - do_print_item(menu, item_str(), choice, selected, !item_is_tag(':')); \ -} while (0) +#define print_item(index, choice, selected) \ + do { \ + item_set(index); \ + do_print_item(menu, item_str(), choice, selected, !item_is_tag(':')); \ + } while ( 0 ) /* * Print the scroll indicators. */ -static void print_arrows(WINDOW * win, int item_no, int scroll, int y, int x, - int height) +static void print_arrows(WINDOW *win, int item_no, int scroll, int y, int x, + int height) { - int cur_y, cur_x; - - getyx(win, cur_y, cur_x); - - wmove(win, y, x); - - if (scroll > 0) { - wattrset(win, dlg.uarrow.atr); - waddch(win, ACS_UARROW); - waddstr(win, "(-)"); - } else { - wattrset(win, dlg.menubox.atr); - waddch(win, ACS_HLINE); - waddch(win, ACS_HLINE); - waddch(win, ACS_HLINE); - waddch(win, ACS_HLINE); - } - - y = y + height + 1; - wmove(win, y, x); - wrefresh(win); - - if ((height < item_no) && (scroll + height < item_no)) { - wattrset(win, dlg.darrow.atr); - waddch(win, ACS_DARROW); - waddstr(win, "(+)"); - } else { - wattrset(win, dlg.menubox_border.atr); - waddch(win, ACS_HLINE); - waddch(win, ACS_HLINE); - waddch(win, ACS_HLINE); - waddch(win, ACS_HLINE); - } - - wmove(win, cur_y, cur_x); - wrefresh(win); + int cur_y, cur_x; + + getyx(win, cur_y, cur_x); + + wmove(win, y, x); + + if ( scroll > 0 ) + { + wattrset(win, dlg.uarrow.atr); + waddch(win, ACS_UARROW); + waddstr(win, "(-)"); + } + else + { + wattrset(win, dlg.menubox.atr); + waddch(win, ACS_HLINE); + waddch(win, ACS_HLINE); + waddch(win, ACS_HLINE); + waddch(win, ACS_HLINE); + } + + y = y + height + 1; + wmove(win, y, x); + wrefresh(win); + + if ( (height < item_no) && (scroll + height < item_no) ) + { + wattrset(win, dlg.darrow.atr); + waddch(win, ACS_DARROW); + waddstr(win, "(+)"); + } + else + { + wattrset(win, dlg.menubox_border.atr); + waddch(win, ACS_HLINE); + waddch(win, ACS_HLINE); + waddch(win, ACS_HLINE); + waddch(win, ACS_HLINE); + } + + wmove(win, cur_y, cur_x); + wrefresh(win); } /* * Display the termination buttons. */ -static void print_buttons(WINDOW * win, int height, int width, int selected) +static void print_buttons(WINDOW *win, int height, int width, int selected) { - int x = width / 2 - 28; - int y = height - 2; + int x = width / 2 - 28; + int y = height - 2; - print_button(win, gettext("Select"), y, x, selected == 0); - print_button(win, gettext(" Exit "), y, x + 12, selected == 1); - print_button(win, gettext(" Help "), y, x + 24, selected == 2); - print_button(win, gettext(" Save "), y, x + 36, selected == 3); - print_button(win, gettext(" Load "), y, x + 48, selected == 4); + print_button(win, gettext("Select"), y, x, selected == 0); + print_button(win, gettext(" Exit "), y, x + 12, selected == 1); + print_button(win, gettext(" Help "), y, x + 24, selected == 2); + print_button(win, gettext(" Save "), y, x + 36, selected == 3); + print_button(win, gettext(" Load "), y, x + 48, selected == 4); - wmove(win, y, x + 1 + 12 * selected); - wrefresh(win); + wmove(win, y, x + 1 + 12 * selected); + wrefresh(win); } /* scroll up n lines (n may be negative) */ static void do_scroll(WINDOW *win, int *scroll, int n) { - /* Scroll menu up */ - scrollok(win, TRUE); - wscrl(win, n); - scrollok(win, FALSE); - *scroll = *scroll + n; - wrefresh(win); + /* Scroll menu up */ + scrollok(win, TRUE); + wscrl(win, n); + scrollok(win, FALSE); + *scroll = *scroll + n; + wrefresh(win); } /* * Display a menu for choosing among a number of options */ -int dialog_menu(const char *title, const char *prompt, - const void *selected, int *s_scroll) +int dialog_menu(const char *title, const char *prompt, const void *selected, + int *s_scroll) { - int i, j, x, y, box_x, box_y; - int height, width, menu_height; - int key = 0, button = 0, scroll = 0, choice = 0; - int first_item = 0, max_choice; - WINDOW *dialog, *menu; + int i, j, x, y, box_x, box_y; + int height, width, menu_height; + int key = 0, button = 0, scroll = 0, choice = 0; + int first_item = 0, max_choice; + WINDOW *dialog, *menu; do_resize: - height = getmaxy(stdscr); - width = getmaxx(stdscr); - if (height < MENUBOX_HEIGTH_MIN || width < MENUBOX_WIDTH_MIN) - return -ERRDISPLAYTOOSMALL; - - height -= 4; - width -= 5; - menu_height = height - 10; - - max_choice = MIN(menu_height, item_count()); - - /* center dialog box on screen */ - x = (getmaxx(stdscr) - width) / 2; - y = (getmaxy(stdscr) - height) / 2; - - draw_shadow(stdscr, y, x, height, width); - - dialog = newwin(height, width, y, x); - keypad(dialog, TRUE); - - draw_box(dialog, 0, 0, height, width, - dlg.dialog.atr, dlg.border.atr); - wattrset(dialog, dlg.border.atr); - mvwaddch(dialog, height - 3, 0, ACS_LTEE); - for (i = 0; i < width - 2; i++) - waddch(dialog, ACS_HLINE); - wattrset(dialog, dlg.dialog.atr); - wbkgdset(dialog, dlg.dialog.atr & A_COLOR); - waddch(dialog, ACS_RTEE); - - print_title(dialog, title, width); - - wattrset(dialog, dlg.dialog.atr); - print_autowrap(dialog, prompt, width - 2, 1, 3); - - menu_width = width - 6; - box_y = height - menu_height - 5; - box_x = (width - menu_width) / 2 - 1; - - /* create new window for the menu */ - menu = subwin(dialog, menu_height, menu_width, - y + box_y + 1, x + box_x + 1); - keypad(menu, TRUE); - - /* draw a box around the menu items */ - draw_box(dialog, box_y, box_x, menu_height + 2, menu_width + 2, - dlg.menubox_border.atr, dlg.menubox.atr); - - if (menu_width >= 80) - item_x = (menu_width - 70) / 2; - else - item_x = 4; - - /* Set choice to default item */ - item_foreach() - if (selected && (selected == item_data())) - choice = item_n(); - /* get the saved scroll info */ - scroll = *s_scroll; - if ((scroll <= choice) && (scroll + max_choice > choice) && - (scroll >= 0) && (scroll + max_choice <= item_count())) { - first_item = scroll; - choice = choice - scroll; - } else { - scroll = 0; - } - if ((choice >= max_choice)) { - if (choice >= item_count() - max_choice / 2) - scroll = first_item = item_count() - max_choice; - else - scroll = first_item = choice - max_choice / 2; - choice = choice - scroll; - } - - /* Print the menu */ - for (i = 0; i < max_choice; i++) { - print_item(first_item + i, i, i == choice); - } - - wnoutrefresh(menu); - - print_arrows(dialog, item_count(), scroll, - box_y, box_x + item_x + 1, menu_height); - - print_buttons(dialog, height, width, 0); - wmove(menu, choice, item_x + 1); - wrefresh(menu); - - while (key != KEY_ESC) { - key = wgetch(menu); - - if (key < 256 && isalpha(key)) - key = tolower(key); - - if (strchr("ynmh", key)) - i = max_choice; - else { - for (i = choice + 1; i < max_choice; i++) { - item_set(scroll + i); - j = first_alpha(item_str(), "YyNnMmHh"); - if (key == tolower(item_str()[j])) - break; - } - if (i == max_choice) - for (i = 0; i < max_choice; i++) { - item_set(scroll + i); - j = first_alpha(item_str(), "YyNnMmHh"); - if (key == tolower(item_str()[j])) - break; - } - } - - if (item_count() != 0 && - (i < max_choice || - key == KEY_UP || key == KEY_DOWN || - key == '-' || key == '+' || - key == KEY_PPAGE || key == KEY_NPAGE)) { - /* Remove highligt of current item */ - print_item(scroll + choice, choice, FALSE); - - if (key == KEY_UP || key == '-') { - if (choice < 2 && scroll) { - /* Scroll menu down */ - do_scroll(menu, &scroll, -1); - - print_item(scroll, 0, FALSE); - } else - choice = MAX(choice - 1, 0); - - } else if (key == KEY_DOWN || key == '+') { - print_item(scroll+choice, choice, FALSE); - - if ((choice > max_choice - 3) && - (scroll + max_choice < item_count())) { - /* Scroll menu up */ - do_scroll(menu, &scroll, 1); - - print_item(scroll+max_choice - 1, - max_choice - 1, FALSE); - } else - choice = MIN(choice + 1, max_choice - 1); - - } else if (key == KEY_PPAGE) { - scrollok(menu, TRUE); - for (i = 0; (i < max_choice); i++) { - if (scroll > 0) { - do_scroll(menu, &scroll, -1); - print_item(scroll, 0, FALSE); - } else { - if (choice > 0) - choice--; - } - } - - } else if (key == KEY_NPAGE) { - for (i = 0; (i < max_choice); i++) { - if (scroll + max_choice < item_count()) { - do_scroll(menu, &scroll, 1); - print_item(scroll+max_choice-1, - max_choice - 1, FALSE); - } else { - if (choice + 1 < max_choice) - choice++; - } - } - } else - choice = i; - - print_item(scroll + choice, choice, TRUE); - - print_arrows(dialog, item_count(), scroll, - box_y, box_x + item_x + 1, menu_height); - - wnoutrefresh(dialog); - wrefresh(menu); - - continue; /* wait for another key press */ - } - - switch (key) { - case KEY_LEFT: - case TAB: - case KEY_RIGHT: - button = ((key == KEY_LEFT ? --button : ++button) < 0) - ? 4 : (button > 4 ? 0 : button); - - print_buttons(dialog, height, width, button); - wrefresh(menu); - break; - case ' ': - case 's': - case 'y': - case 'n': - case 'm': - case '/': - case 'h': - case '?': - case 'z': - case '\n': - /* save scroll info */ - *s_scroll = scroll; - delwin(menu); - delwin(dialog); - item_set(scroll + choice); - item_set_selected(1); - switch (key) { - case 'h': - case '?': - return 2; - case 's': - case 'y': - return 5; - case 'n': - return 6; - case 'm': - return 7; - case ' ': - return 8; - case '/': - return 9; - case 'z': - return 10; - case '\n': - return button; - } - return 0; - case 'e': - case 'x': - key = KEY_ESC; - break; - case KEY_ESC: - key = on_key_esc(menu); - break; - case KEY_RESIZE: - on_key_resize(); - delwin(menu); - delwin(dialog); - goto do_resize; - } - } - delwin(menu); - delwin(dialog); - return key; /* ESC pressed */ + height = getmaxy(stdscr); + width = getmaxx(stdscr); + if ( height < MENUBOX_HEIGTH_MIN || width < MENUBOX_WIDTH_MIN ) + return -ERRDISPLAYTOOSMALL; + + height -= 4; + width -= 5; + menu_height = height - 10; + + max_choice = MIN(menu_height, item_count()); + + /* center dialog box on screen */ + x = (getmaxx(stdscr) - width) / 2; + y = (getmaxy(stdscr) - height) / 2; + + draw_shadow(stdscr, y, x, height, width); + + dialog = newwin(height, width, y, x); + keypad(dialog, TRUE); + + draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); + wattrset(dialog, dlg.border.atr); + mvwaddch(dialog, height - 3, 0, ACS_LTEE); + for ( i = 0; i < width - 2; i++ ) + waddch(dialog, ACS_HLINE); + wattrset(dialog, dlg.dialog.atr); + wbkgdset(dialog, dlg.dialog.atr & A_COLOR); + waddch(dialog, ACS_RTEE); + + print_title(dialog, title, width); + + wattrset(dialog, dlg.dialog.atr); + print_autowrap(dialog, prompt, width - 2, 1, 3); + + menu_width = width - 6; + box_y = height - menu_height - 5; + box_x = (width - menu_width) / 2 - 1; + + /* create new window for the menu */ + menu = + subwin(dialog, menu_height, menu_width, y + box_y + 1, x + box_x + 1); + keypad(menu, TRUE); + + /* draw a box around the menu items */ + draw_box(dialog, box_y, box_x, menu_height + 2, menu_width + 2, + dlg.menubox_border.atr, dlg.menubox.atr); + + if ( menu_width >= 80 ) + item_x = (menu_width - 70) / 2; + else + item_x = 4; + + /* Set choice to default item */ + item_foreach () + if ( selected && (selected == item_data()) ) + choice = item_n(); + /* get the saved scroll info */ + scroll = *s_scroll; + if ( (scroll <= choice) && (scroll + max_choice > choice) && + (scroll >= 0) && (scroll + max_choice <= item_count()) ) + { + first_item = scroll; + choice = choice - scroll; + } + else + { + scroll = 0; + } + if ( (choice >= max_choice) ) + { + if ( choice >= item_count() - max_choice / 2 ) + scroll = first_item = item_count() - max_choice; + else + scroll = first_item = choice - max_choice / 2; + choice = choice - scroll; + } + + /* Print the menu */ + for ( i = 0; i < max_choice; i++ ) + { + print_item(first_item + i, i, i == choice); + } + + wnoutrefresh(menu); + + print_arrows(dialog, item_count(), scroll, box_y, box_x + item_x + 1, + menu_height); + + print_buttons(dialog, height, width, 0); + wmove(menu, choice, item_x + 1); + wrefresh(menu); + + while ( key != KEY_ESC ) + { + key = wgetch(menu); + + if ( key < 256 && isalpha(key) ) + key = tolower(key); + + if ( strchr("ynmh", key) ) + i = max_choice; + else + { + for ( i = choice + 1; i < max_choice; i++ ) + { + item_set(scroll + i); + j = first_alpha(item_str(), "YyNnMmHh"); + if ( key == tolower(item_str()[j]) ) + break; + } + if ( i == max_choice ) + for ( i = 0; i < max_choice; i++ ) + { + item_set(scroll + i); + j = first_alpha(item_str(), "YyNnMmHh"); + if ( key == tolower(item_str()[j]) ) + break; + } + } + + if ( item_count() != 0 && + (i < max_choice || key == KEY_UP || key == KEY_DOWN || + key == '-' || key == '+' || key == KEY_PPAGE || + key == KEY_NPAGE) ) + { + /* Remove highligt of current item */ + print_item(scroll + choice, choice, FALSE); + + if ( key == KEY_UP || key == '-' ) + { + if ( choice < 2 && scroll ) + { + /* Scroll menu down */ + do_scroll(menu, &scroll, -1); + + print_item(scroll, 0, FALSE); + } + else + choice = MAX(choice - 1, 0); + } + else if ( key == KEY_DOWN || key == '+' ) + { + print_item(scroll + choice, choice, FALSE); + + if ( (choice > max_choice - 3) && + (scroll + max_choice < item_count()) ) + { + /* Scroll menu up */ + do_scroll(menu, &scroll, 1); + + print_item(scroll + max_choice - 1, max_choice - 1, FALSE); + } + else + choice = MIN(choice + 1, max_choice - 1); + } + else if ( key == KEY_PPAGE ) + { + scrollok(menu, TRUE); + for ( i = 0; (i < max_choice); i++ ) + { + if ( scroll > 0 ) + { + do_scroll(menu, &scroll, -1); + print_item(scroll, 0, FALSE); + } + else + { + if ( choice > 0 ) + choice--; + } + } + } + else if ( key == KEY_NPAGE ) + { + for ( i = 0; (i < max_choice); i++ ) + { + if ( scroll + max_choice < item_count() ) + { + do_scroll(menu, &scroll, 1); + print_item(scroll + max_choice - 1, max_choice - 1, + FALSE); + } + else + { + if ( choice + 1 < max_choice ) + choice++; + } + } + } + else + choice = i; + + print_item(scroll + choice, choice, TRUE); + + print_arrows(dialog, item_count(), scroll, box_y, + box_x + item_x + 1, menu_height); + + wnoutrefresh(dialog); + wrefresh(menu); + + continue; /* wait for another key press */ + } + + switch (key) + { + case KEY_LEFT: + case TAB: + case KEY_RIGHT: + button = ((key == KEY_LEFT ? --button : ++button) < 0) + ? 4 + : (button > 4 ? 0 : button); + + print_buttons(dialog, height, width, button); + wrefresh(menu); + break; + case ' ': + case 's': + case 'y': + case 'n': + case 'm': + case '/': + case 'h': + case '?': + case 'z': + case '\n': + /* save scroll info */ + *s_scroll = scroll; + delwin(menu); + delwin(dialog); + item_set(scroll + choice); + item_set_selected(1); + switch (key) + { + case 'h': + case '?': + return 2; + case 's': + case 'y': + return 5; + case 'n': + return 6; + case 'm': + return 7; + case ' ': + return 8; + case '/': + return 9; + case 'z': + return 10; + case '\n': + return button; + } + return 0; + case 'e': + case 'x': + key = KEY_ESC; + break; + case KEY_ESC: + key = on_key_esc(menu); + break; + case KEY_RESIZE: + on_key_resize(); + delwin(menu); + delwin(dialog); + goto do_resize; + } + } + delwin(menu); + delwin(dialog); + return key; /* ESC pressed */ } diff --git a/xen/tools/kconfig/lxdialog/textbox.c b/xen/tools/kconfig/lxdialog/textbox.c index 1773319b95..40868d1e7e 100644 --- a/xen/tools/kconfig/lxdialog/textbox.c +++ b/xen/tools/kconfig/lxdialog/textbox.c @@ -22,11 +22,11 @@ #include "dialog.h" static void back_lines(int n); -static void print_page(WINDOW *win, int height, int width, update_text_fn - update_text, void *data); +static void print_page(WINDOW *win, int height, int width, + update_text_fn update_text, void *data); static void print_line(WINDOW *win, int row, int width); static char *get_line(void); -static void print_position(WINDOW * win); +static void print_position(WINDOW *win); static int hscroll; static int begin_reached, end_reached, page_length; @@ -37,16 +37,15 @@ static char *page; * refresh window content */ static void refresh_text_box(WINDOW *dialog, WINDOW *box, int boxh, int boxw, - int cur_y, int cur_x, update_text_fn update_text, - void *data) + int cur_y, int cur_x, update_text_fn update_text, + void *data) { - print_page(box, boxh, boxw, update_text, data); - print_position(dialog); - wmove(dialog, cur_y, cur_x); /* Restore cursor position */ - wrefresh(dialog); + print_page(box, boxh, boxw, update_text, data); + print_position(dialog); + wmove(dialog, cur_y, cur_x); /* Restore cursor position */ + wrefresh(dialog); } - /* * Display text from a file in a dialog box. * @@ -54,223 +53,226 @@ static void refresh_text_box(WINDOW *dialog, WINDOW *box, int boxh, int boxw, * update_text() may not add or remove any '\n' or '\0' in tbuf */ int dialog_textbox(const char *title, char *tbuf, int initial_height, - int initial_width, int *keys, int *_vscroll, int *_hscroll, - update_text_fn update_text, void *data) + int initial_width, int *keys, int *_vscroll, int *_hscroll, + update_text_fn update_text, void *data) { - int i, x, y, cur_x, cur_y, key = 0; - int height, width, boxh, boxw; - WINDOW *dialog, *box; - bool done = false; - - begin_reached = 1; - end_reached = 0; - page_length = 0; - hscroll = 0; - buf = tbuf; - page = buf; /* page is pointer to start of page to be displayed */ - - if (_vscroll && *_vscroll) { - begin_reached = 0; - - for (i = 0; i < *_vscroll; i++) - get_line(); - } - if (_hscroll) - hscroll = *_hscroll; + int i, x, y, cur_x, cur_y, key = 0; + int height, width, boxh, boxw; + WINDOW *dialog, *box; + bool done = false; + + begin_reached = 1; + end_reached = 0; + page_length = 0; + hscroll = 0; + buf = tbuf; + page = buf; /* page is pointer to start of page to be displayed */ + + if ( _vscroll && *_vscroll ) + { + begin_reached = 0; + + for ( i = 0; i < *_vscroll; i++ ) + get_line(); + } + if ( _hscroll ) + hscroll = *_hscroll; do_resize: - getmaxyx(stdscr, height, width); - if (height < TEXTBOX_HEIGTH_MIN || width < TEXTBOX_WIDTH_MIN) - return -ERRDISPLAYTOOSMALL; - if (initial_height != 0) - height = initial_height; - else - if (height > 4) - height -= 4; - else - height = 0; - if (initial_width != 0) - width = initial_width; - else - if (width > 5) - width -= 5; - else - width = 0; - - /* center dialog box on screen */ - x = (getmaxx(stdscr) - width) / 2; - y = (getmaxy(stdscr) - height) / 2; - - draw_shadow(stdscr, y, x, height, width); - - dialog = newwin(height, width, y, x); - keypad(dialog, TRUE); - - /* Create window for box region, used for scrolling text */ - boxh = height - 4; - boxw = width - 2; - box = subwin(dialog, boxh, boxw, y + 1, x + 1); - wattrset(box, dlg.dialog.atr); - wbkgdset(box, dlg.dialog.atr & A_COLOR); - - keypad(box, TRUE); - - /* register the new window, along with its borders */ - draw_box(dialog, 0, 0, height, width, - dlg.dialog.atr, dlg.border.atr); - - wattrset(dialog, dlg.border.atr); - mvwaddch(dialog, height - 3, 0, ACS_LTEE); - for (i = 0; i < width - 2; i++) - waddch(dialog, ACS_HLINE); - wattrset(dialog, dlg.dialog.atr); - wbkgdset(dialog, dlg.dialog.atr & A_COLOR); - waddch(dialog, ACS_RTEE); - - print_title(dialog, title, width); - - print_button(dialog, gettext(" Exit "), height - 2, width / 2 - 4, TRUE); - wnoutrefresh(dialog); - getyx(dialog, cur_y, cur_x); /* Save cursor position */ - - /* Print first page of text */ - attr_clear(box, boxh, boxw, dlg.dialog.atr); - refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, - data); - - while (!done) { - key = wgetch(dialog); - switch (key) { - case 'E': /* Exit */ - case 'e': - case 'X': - case 'x': - case 'q': - case '\n': - done = true; - break; - case 'g': /* First page */ - case KEY_HOME: - if (!begin_reached) { - begin_reached = 1; - page = buf; - refresh_text_box(dialog, box, boxh, boxw, - cur_y, cur_x, update_text, - data); - } - break; - case 'G': /* Last page */ - case KEY_END: - - end_reached = 1; - /* point to last char in buf */ - page = buf + strlen(buf); - back_lines(boxh); - refresh_text_box(dialog, box, boxh, boxw, cur_y, - cur_x, update_text, data); - break; - case 'K': /* Previous line */ - case 'k': - case KEY_UP: - if (begin_reached) - break; - - back_lines(page_length + 1); - refresh_text_box(dialog, box, boxh, boxw, cur_y, - cur_x, update_text, data); - break; - case 'B': /* Previous page */ - case 'b': - case 'u': - case KEY_PPAGE: - if (begin_reached) - break; - back_lines(page_length + boxh); - refresh_text_box(dialog, box, boxh, boxw, cur_y, - cur_x, update_text, data); - break; - case 'J': /* Next line */ - case 'j': - case KEY_DOWN: - if (end_reached) - break; - - back_lines(page_length - 1); - refresh_text_box(dialog, box, boxh, boxw, cur_y, - cur_x, update_text, data); - break; - case KEY_NPAGE: /* Next page */ - case ' ': - case 'd': - if (end_reached) - break; - - begin_reached = 0; - refresh_text_box(dialog, box, boxh, boxw, cur_y, - cur_x, update_text, data); - break; - case '0': /* Beginning of line */ - case 'H': /* Scroll left */ - case 'h': - case KEY_LEFT: - if (hscroll <= 0) - break; - - if (key == '0') - hscroll = 0; - else - hscroll--; - /* Reprint current page to scroll horizontally */ - back_lines(page_length); - refresh_text_box(dialog, box, boxh, boxw, cur_y, - cur_x, update_text, data); - break; - case 'L': /* Scroll right */ - case 'l': - case KEY_RIGHT: - if (hscroll >= MAX_LEN) - break; - hscroll++; - /* Reprint current page to scroll horizontally */ - back_lines(page_length); - refresh_text_box(dialog, box, boxh, boxw, cur_y, - cur_x, update_text, data); - break; - case KEY_ESC: - if (on_key_esc(dialog) == KEY_ESC) - done = true; - break; - case KEY_RESIZE: - back_lines(height); - delwin(box); - delwin(dialog); - on_key_resize(); - goto do_resize; - default: - for (i = 0; keys[i]; i++) { - if (key == keys[i]) { - done = true; - break; - } - } - } - } - delwin(box); - delwin(dialog); - if (_vscroll) { - const char *s; - - s = buf; - *_vscroll = 0; - back_lines(page_length); - while (s < page && (s = strchr(s, '\n'))) { - (*_vscroll)++; - s++; - } - } - if (_hscroll) - *_hscroll = hscroll; - return key; + getmaxyx(stdscr, height, width); + if ( height < TEXTBOX_HEIGTH_MIN || width < TEXTBOX_WIDTH_MIN ) + return -ERRDISPLAYTOOSMALL; + if ( initial_height != 0 ) + height = initial_height; + else if ( height > 4 ) + height -= 4; + else + height = 0; + if ( initial_width != 0 ) + width = initial_width; + else if ( width > 5 ) + width -= 5; + else + width = 0; + + /* center dialog box on screen */ + x = (getmaxx(stdscr) - width) / 2; + y = (getmaxy(stdscr) - height) / 2; + + draw_shadow(stdscr, y, x, height, width); + + dialog = newwin(height, width, y, x); + keypad(dialog, TRUE); + + /* Create window for box region, used for scrolling text */ + boxh = height - 4; + boxw = width - 2; + box = subwin(dialog, boxh, boxw, y + 1, x + 1); + wattrset(box, dlg.dialog.atr); + wbkgdset(box, dlg.dialog.atr & A_COLOR); + + keypad(box, TRUE); + + /* register the new window, along with its borders */ + draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); + + wattrset(dialog, dlg.border.atr); + mvwaddch(dialog, height - 3, 0, ACS_LTEE); + for ( i = 0; i < width - 2; i++ ) + waddch(dialog, ACS_HLINE); + wattrset(dialog, dlg.dialog.atr); + wbkgdset(dialog, dlg.dialog.atr & A_COLOR); + waddch(dialog, ACS_RTEE); + + print_title(dialog, title, width); + + print_button(dialog, gettext(" Exit "), height - 2, width / 2 - 4, TRUE); + wnoutrefresh(dialog); + getyx(dialog, cur_y, cur_x); /* Save cursor position */ + + /* Print first page of text */ + attr_clear(box, boxh, boxw, dlg.dialog.atr); + refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); + + while ( !done ) + { + key = wgetch(dialog); + switch (key) + { + case 'E': /* Exit */ + case 'e': + case 'X': + case 'x': + case 'q': + case '\n': + done = true; + break; + case 'g': /* First page */ + case KEY_HOME: + if ( !begin_reached ) + { + begin_reached = 1; + page = buf; + refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, + update_text, data); + } + break; + case 'G': /* Last page */ + case KEY_END: + + end_reached = 1; + /* point to last char in buf */ + page = buf + strlen(buf); + back_lines(boxh); + refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, + data); + break; + case 'K': /* Previous line */ + case 'k': + case KEY_UP: + if ( begin_reached ) + break; + + back_lines(page_length + 1); + refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, + data); + break; + case 'B': /* Previous page */ + case 'b': + case 'u': + case KEY_PPAGE: + if ( begin_reached ) + break; + back_lines(page_length + boxh); + refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, + data); + break; + case 'J': /* Next line */ + case 'j': + case KEY_DOWN: + if ( end_reached ) + break; + + back_lines(page_length - 1); + refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, + data); + break; + case KEY_NPAGE: /* Next page */ + case ' ': + case 'd': + if ( end_reached ) + break; + + begin_reached = 0; + refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, + data); + break; + case '0': /* Beginning of line */ + case 'H': /* Scroll left */ + case 'h': + case KEY_LEFT: + if ( hscroll <= 0 ) + break; + + if ( key == '0' ) + hscroll = 0; + else + hscroll--; + /* Reprint current page to scroll horizontally */ + back_lines(page_length); + refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, + data); + break; + case 'L': /* Scroll right */ + case 'l': + case KEY_RIGHT: + if ( hscroll >= MAX_LEN ) + break; + hscroll++; + /* Reprint current page to scroll horizontally */ + back_lines(page_length); + refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, + data); + break; + case KEY_ESC: + if ( on_key_esc(dialog) == KEY_ESC ) + done = true; + break; + case KEY_RESIZE: + back_lines(height); + delwin(box); + delwin(dialog); + on_key_resize(); + goto do_resize; + default: + for ( i = 0; keys[i]; i++ ) + { + if ( key == keys[i] ) + { + done = true; + break; + } + } + } + } + delwin(box); + delwin(dialog); + if ( _vscroll ) + { + const char *s; + + s = buf; + *_vscroll = 0; + back_lines(page_length); + while ( s < page && (s = strchr(s, '\n')) ) + { + (*_vscroll)++; + s++; + } + } + if ( _hscroll ) + *_hscroll = hscroll; + return key; } /* @@ -279,85 +281,92 @@ do_resize: */ static void back_lines(int n) { - int i; - - begin_reached = 0; - /* Go back 'n' lines */ - for (i = 0; i < n; i++) { - if (*page == '\0') { - if (end_reached) { - end_reached = 0; - continue; - } - } - if (page == buf) { - begin_reached = 1; - return; - } - page--; - do { - if (page == buf) { - begin_reached = 1; - return; - } - page--; - } while (*page != '\n'); - page++; - } + int i; + + begin_reached = 0; + /* Go back 'n' lines */ + for ( i = 0; i < n; i++ ) + { + if ( *page == '\0' ) + { + if ( end_reached ) + { + end_reached = 0; + continue; + } + } + if ( page == buf ) + { + begin_reached = 1; + return; + } + page--; + do { + if ( page == buf ) + { + begin_reached = 1; + return; + } + page--; + } while ( *page != '\n' ); + page++; + } } /* * Print a new page of text. */ -static void print_page(WINDOW *win, int height, int width, update_text_fn - update_text, void *data) +static void print_page(WINDOW *win, int height, int width, + update_text_fn update_text, void *data) { - int i, passed_end = 0; - - if (update_text) { - char *end; - - for (i = 0; i < height; i++) - get_line(); - end = page; - back_lines(height); - update_text(buf, page - buf, end - buf, data); - } - - page_length = 0; - for (i = 0; i < height; i++) { - print_line(win, i, width); - if (!passed_end) - page_length++; - if (end_reached && !passed_end) - passed_end = 1; - } - wnoutrefresh(win); + int i, passed_end = 0; + + if ( update_text ) + { + char *end; + + for ( i = 0; i < height; i++ ) + get_line(); + end = page; + back_lines(height); + update_text(buf, page - buf, end - buf, data); + } + + page_length = 0; + for ( i = 0; i < height; i++ ) + { + print_line(win, i, width); + if ( !passed_end ) + page_length++; + if ( end_reached && !passed_end ) + passed_end = 1; + } + wnoutrefresh(win); } /* * Print a new line of text. */ -static void print_line(WINDOW * win, int row, int width) +static void print_line(WINDOW *win, int row, int width) { - char *line; + char *line; - line = get_line(); - line += MIN(strlen(line), hscroll); /* Scroll horizontally */ - wmove(win, row, 0); /* move cursor to correct line */ - waddch(win, ' '); - waddnstr(win, line, MIN(strlen(line), width - 2)); + line = get_line(); + line += MIN(strlen(line), hscroll); /* Scroll horizontally */ + wmove(win, row, 0); /* move cursor to correct line */ + waddch(win, ' '); + waddnstr(win, line, MIN(strlen(line), width - 2)); - /* Clear 'residue' of previous line */ + /* Clear 'residue' of previous line */ #if OLD_NCURSES - { - int x = getcurx(win); - int i; - for (i = 0; i < width - x; i++) - waddch(win, ' '); - } + { + int x = getcurx(win); + int i; + for ( i = 0; i < width - x; i++ ) + waddch(win, ' '); + } #else - wclrtoeol(win); + wclrtoeol(win); #endif } @@ -368,41 +377,45 @@ static void print_line(WINDOW * win, int row, int width) */ static char *get_line(void) { - int i = 0; - static char line[MAX_LEN + 1]; - - end_reached = 0; - while (*page != '\n') { - if (*page == '\0') { - end_reached = 1; - break; - } else if (i < MAX_LEN) - line[i++] = *(page++); - else { - /* Truncate lines longer than MAX_LEN characters */ - if (i == MAX_LEN) - line[i++] = '\0'; - page++; - } - } - if (i <= MAX_LEN) - line[i] = '\0'; - if (!end_reached) - page++; /* move past '\n' */ - - return line; + int i = 0; + static char line[MAX_LEN + 1]; + + end_reached = 0; + while ( *page != '\n' ) + { + if ( *page == '\0' ) + { + end_reached = 1; + break; + } + else if ( i < MAX_LEN ) + line[i++] = *(page++); + else + { + /* Truncate lines longer than MAX_LEN characters */ + if ( i == MAX_LEN ) + line[i++] = '\0'; + page++; + } + } + if ( i <= MAX_LEN ) + line[i] = '\0'; + if ( !end_reached ) + page++; /* move past '\n' */ + + return line; } /* * Print current position */ -static void print_position(WINDOW * win) +static void print_position(WINDOW *win) { - int percent; + int percent; - wattrset(win, dlg.position_indicator.atr); - wbkgdset(win, dlg.position_indicator.atr & A_COLOR); - percent = (page - buf) * 100 / strlen(buf); - wmove(win, getmaxy(win) - 3, getmaxx(win) - 9); - wprintw(win, "(%3d%%)", percent); + wattrset(win, dlg.position_indicator.atr); + wbkgdset(win, dlg.position_indicator.atr & A_COLOR); + percent = (page - buf) * 100 / strlen(buf); + wmove(win, getmaxy(win) - 3, getmaxx(win) - 9); + wprintw(win, "(%3d%%)", percent); } diff --git a/xen/tools/kconfig/lxdialog/util.c b/xen/tools/kconfig/lxdialog/util.c index f7abdeb92a..4c0d72c8e4 100644 --- a/xen/tools/kconfig/lxdialog/util.c +++ b/xen/tools/kconfig/lxdialog/util.c @@ -30,130 +30,129 @@ struct dialog_info dlg; static void set_mono_theme(void) { - dlg.screen.atr = A_NORMAL; - dlg.shadow.atr = A_NORMAL; - dlg.dialog.atr = A_NORMAL; - dlg.title.atr = A_BOLD; - dlg.border.atr = A_NORMAL; - dlg.button_active.atr = A_REVERSE; - dlg.button_inactive.atr = A_DIM; - dlg.button_key_active.atr = A_REVERSE; - dlg.button_key_inactive.atr = A_BOLD; - dlg.button_label_active.atr = A_REVERSE; - dlg.button_label_inactive.atr = A_NORMAL; - dlg.inputbox.atr = A_NORMAL; - dlg.inputbox_border.atr = A_NORMAL; - dlg.searchbox.atr = A_NORMAL; - dlg.searchbox_title.atr = A_BOLD; - dlg.searchbox_border.atr = A_NORMAL; - dlg.position_indicator.atr = A_BOLD; - dlg.menubox.atr = A_NORMAL; - dlg.menubox_border.atr = A_NORMAL; - dlg.item.atr = A_NORMAL; - dlg.item_selected.atr = A_REVERSE; - dlg.tag.atr = A_BOLD; - dlg.tag_selected.atr = A_REVERSE; - dlg.tag_key.atr = A_BOLD; - dlg.tag_key_selected.atr = A_REVERSE; - dlg.check.atr = A_BOLD; - dlg.check_selected.atr = A_REVERSE; - dlg.uarrow.atr = A_BOLD; - dlg.darrow.atr = A_BOLD; + dlg.screen.atr = A_NORMAL; + dlg.shadow.atr = A_NORMAL; + dlg.dialog.atr = A_NORMAL; + dlg.title.atr = A_BOLD; + dlg.border.atr = A_NORMAL; + dlg.button_active.atr = A_REVERSE; + dlg.button_inactive.atr = A_DIM; + dlg.button_key_active.atr = A_REVERSE; + dlg.button_key_inactive.atr = A_BOLD; + dlg.button_label_active.atr = A_REVERSE; + dlg.button_label_inactive.atr = A_NORMAL; + dlg.inputbox.atr = A_NORMAL; + dlg.inputbox_border.atr = A_NORMAL; + dlg.searchbox.atr = A_NORMAL; + dlg.searchbox_title.atr = A_BOLD; + dlg.searchbox_border.atr = A_NORMAL; + dlg.position_indicator.atr = A_BOLD; + dlg.menubox.atr = A_NORMAL; + dlg.menubox_border.atr = A_NORMAL; + dlg.item.atr = A_NORMAL; + dlg.item_selected.atr = A_REVERSE; + dlg.tag.atr = A_BOLD; + dlg.tag_selected.atr = A_REVERSE; + dlg.tag_key.atr = A_BOLD; + dlg.tag_key_selected.atr = A_REVERSE; + dlg.check.atr = A_BOLD; + dlg.check_selected.atr = A_REVERSE; + dlg.uarrow.atr = A_BOLD; + dlg.darrow.atr = A_BOLD; } #define DLG_COLOR(dialog, f, b, h) \ -do { \ - dlg.dialog.fg = (f); \ - dlg.dialog.bg = (b); \ - dlg.dialog.hl = (h); \ -} while (0) + do { \ + dlg.dialog.fg = (f); \ + dlg.dialog.bg = (b); \ + dlg.dialog.hl = (h); \ + } while ( 0 ) static void set_classic_theme(void) { - DLG_COLOR(screen, COLOR_CYAN, COLOR_BLUE, true); - DLG_COLOR(shadow, COLOR_BLACK, COLOR_BLACK, true); - DLG_COLOR(dialog, COLOR_BLACK, COLOR_WHITE, false); - DLG_COLOR(title, COLOR_YELLOW, COLOR_WHITE, true); - DLG_COLOR(border, COLOR_WHITE, COLOR_WHITE, true); - DLG_COLOR(button_active, COLOR_WHITE, COLOR_BLUE, true); - DLG_COLOR(button_inactive, COLOR_BLACK, COLOR_WHITE, false); - DLG_COLOR(button_key_active, COLOR_WHITE, COLOR_BLUE, true); - DLG_COLOR(button_key_inactive, COLOR_RED, COLOR_WHITE, false); - DLG_COLOR(button_label_active, COLOR_YELLOW, COLOR_BLUE, true); - DLG_COLOR(button_label_inactive, COLOR_BLACK, COLOR_WHITE, true); - DLG_COLOR(inputbox, COLOR_BLACK, COLOR_WHITE, false); - DLG_COLOR(inputbox_border, COLOR_BLACK, COLOR_WHITE, false); - DLG_COLOR(searchbox, COLOR_BLACK, COLOR_WHITE, false); - DLG_COLOR(searchbox_title, COLOR_YELLOW, COLOR_WHITE, true); - DLG_COLOR(searchbox_border, COLOR_WHITE, COLOR_WHITE, true); - DLG_COLOR(position_indicator, COLOR_YELLOW, COLOR_WHITE, true); - DLG_COLOR(menubox, COLOR_BLACK, COLOR_WHITE, false); - DLG_COLOR(menubox_border, COLOR_WHITE, COLOR_WHITE, true); - DLG_COLOR(item, COLOR_BLACK, COLOR_WHITE, false); - DLG_COLOR(item_selected, COLOR_WHITE, COLOR_BLUE, true); - DLG_COLOR(tag, COLOR_YELLOW, COLOR_WHITE, true); - DLG_COLOR(tag_selected, COLOR_YELLOW, COLOR_BLUE, true); - DLG_COLOR(tag_key, COLOR_YELLOW, COLOR_WHITE, true); - DLG_COLOR(tag_key_selected, COLOR_YELLOW, COLOR_BLUE, true); - DLG_COLOR(check, COLOR_BLACK, COLOR_WHITE, false); - DLG_COLOR(check_selected, COLOR_WHITE, COLOR_BLUE, true); - DLG_COLOR(uarrow, COLOR_GREEN, COLOR_WHITE, true); - DLG_COLOR(darrow, COLOR_GREEN, COLOR_WHITE, true); + DLG_COLOR(screen, COLOR_CYAN, COLOR_BLUE, true); + DLG_COLOR(shadow, COLOR_BLACK, COLOR_BLACK, true); + DLG_COLOR(dialog, COLOR_BLACK, COLOR_WHITE, false); + DLG_COLOR(title, COLOR_YELLOW, COLOR_WHITE, true); + DLG_COLOR(border, COLOR_WHITE, COLOR_WHITE, true); + DLG_COLOR(button_active, COLOR_WHITE, COLOR_BLUE, true); + DLG_COLOR(button_inactive, COLOR_BLACK, COLOR_WHITE, false); + DLG_COLOR(button_key_active, COLOR_WHITE, COLOR_BLUE, true); + DLG_COLOR(button_key_inactive, COLOR_RED, COLOR_WHITE, false); + DLG_COLOR(button_label_active, COLOR_YELLOW, COLOR_BLUE, true); + DLG_COLOR(button_label_inactive, COLOR_BLACK, COLOR_WHITE, true); + DLG_COLOR(inputbox, COLOR_BLACK, COLOR_WHITE, false); + DLG_COLOR(inputbox_border, COLOR_BLACK, COLOR_WHITE, false); + DLG_COLOR(searchbox, COLOR_BLACK, COLOR_WHITE, false); + DLG_COLOR(searchbox_title, COLOR_YELLOW, COLOR_WHITE, true); + DLG_COLOR(searchbox_border, COLOR_WHITE, COLOR_WHITE, true); + DLG_COLOR(position_indicator, COLOR_YELLOW, COLOR_WHITE, true); + DLG_COLOR(menubox, COLOR_BLACK, COLOR_WHITE, false); + DLG_COLOR(menubox_border, COLOR_WHITE, COLOR_WHITE, true); + DLG_COLOR(item, COLOR_BLACK, COLOR_WHITE, false); + DLG_COLOR(item_selected, COLOR_WHITE, COLOR_BLUE, true); + DLG_COLOR(tag, COLOR_YELLOW, COLOR_WHITE, true); + DLG_COLOR(tag_selected, COLOR_YELLOW, COLOR_BLUE, true); + DLG_COLOR(tag_key, COLOR_YELLOW, COLOR_WHITE, true); + DLG_COLOR(tag_key_selected, COLOR_YELLOW, COLOR_BLUE, true); + DLG_COLOR(check, COLOR_BLACK, COLOR_WHITE, false); + DLG_COLOR(check_selected, COLOR_WHITE, COLOR_BLUE, true); + DLG_COLOR(uarrow, COLOR_GREEN, COLOR_WHITE, true); + DLG_COLOR(darrow, COLOR_GREEN, COLOR_WHITE, true); } static void set_blackbg_theme(void) { - DLG_COLOR(screen, COLOR_RED, COLOR_BLACK, true); - DLG_COLOR(shadow, COLOR_BLACK, COLOR_BLACK, false); - DLG_COLOR(dialog, COLOR_WHITE, COLOR_BLACK, false); - DLG_COLOR(title, COLOR_RED, COLOR_BLACK, false); - DLG_COLOR(border, COLOR_BLACK, COLOR_BLACK, true); + DLG_COLOR(screen, COLOR_RED, COLOR_BLACK, true); + DLG_COLOR(shadow, COLOR_BLACK, COLOR_BLACK, false); + DLG_COLOR(dialog, COLOR_WHITE, COLOR_BLACK, false); + DLG_COLOR(title, COLOR_RED, COLOR_BLACK, false); + DLG_COLOR(border, COLOR_BLACK, COLOR_BLACK, true); - DLG_COLOR(button_active, COLOR_YELLOW, COLOR_RED, false); - DLG_COLOR(button_inactive, COLOR_YELLOW, COLOR_BLACK, false); - DLG_COLOR(button_key_active, COLOR_YELLOW, COLOR_RED, true); - DLG_COLOR(button_key_inactive, COLOR_RED, COLOR_BLACK, false); - DLG_COLOR(button_label_active, COLOR_WHITE, COLOR_RED, false); - DLG_COLOR(button_label_inactive, COLOR_BLACK, COLOR_BLACK, true); + DLG_COLOR(button_active, COLOR_YELLOW, COLOR_RED, false); + DLG_COLOR(button_inactive, COLOR_YELLOW, COLOR_BLACK, false); + DLG_COLOR(button_key_active, COLOR_YELLOW, COLOR_RED, true); + DLG_COLOR(button_key_inactive, COLOR_RED, COLOR_BLACK, false); + DLG_COLOR(button_label_active, COLOR_WHITE, COLOR_RED, false); + DLG_COLOR(button_label_inactive, COLOR_BLACK, COLOR_BLACK, true); - DLG_COLOR(inputbox, COLOR_YELLOW, COLOR_BLACK, false); - DLG_COLOR(inputbox_border, COLOR_YELLOW, COLOR_BLACK, false); + DLG_COLOR(inputbox, COLOR_YELLOW, COLOR_BLACK, false); + DLG_COLOR(inputbox_border, COLOR_YELLOW, COLOR_BLACK, false); - DLG_COLOR(searchbox, COLOR_YELLOW, COLOR_BLACK, false); - DLG_COLOR(searchbox_title, COLOR_YELLOW, COLOR_BLACK, true); - DLG_COLOR(searchbox_border, COLOR_BLACK, COLOR_BLACK, true); + DLG_COLOR(searchbox, COLOR_YELLOW, COLOR_BLACK, false); + DLG_COLOR(searchbox_title, COLOR_YELLOW, COLOR_BLACK, true); + DLG_COLOR(searchbox_border, COLOR_BLACK, COLOR_BLACK, true); - DLG_COLOR(position_indicator, COLOR_RED, COLOR_BLACK, false); + DLG_COLOR(position_indicator, COLOR_RED, COLOR_BLACK, false); - DLG_COLOR(menubox, COLOR_YELLOW, COLOR_BLACK, false); - DLG_COLOR(menubox_border, COLOR_BLACK, COLOR_BLACK, true); + DLG_COLOR(menubox, COLOR_YELLOW, COLOR_BLACK, false); + DLG_COLOR(menubox_border, COLOR_BLACK, COLOR_BLACK, true); - DLG_COLOR(item, COLOR_WHITE, COLOR_BLACK, false); - DLG_COLOR(item_selected, COLOR_WHITE, COLOR_RED, false); + DLG_COLOR(item, COLOR_WHITE, COLOR_BLACK, false); + DLG_COLOR(item_selected, COLOR_WHITE, COLOR_RED, false); - DLG_COLOR(tag, COLOR_RED, COLOR_BLACK, false); - DLG_COLOR(tag_selected, COLOR_YELLOW, COLOR_RED, true); - DLG_COLOR(tag_key, COLOR_RED, COLOR_BLACK, false); - DLG_COLOR(tag_key_selected, COLOR_YELLOW, COLOR_RED, true); + DLG_COLOR(tag, COLOR_RED, COLOR_BLACK, false); + DLG_COLOR(tag_selected, COLOR_YELLOW, COLOR_RED, true); + DLG_COLOR(tag_key, COLOR_RED, COLOR_BLACK, false); + DLG_COLOR(tag_key_selected, COLOR_YELLOW, COLOR_RED, true); - DLG_COLOR(check, COLOR_YELLOW, COLOR_BLACK, false); - DLG_COLOR(check_selected, COLOR_YELLOW, COLOR_RED, true); + DLG_COLOR(check, COLOR_YELLOW, COLOR_BLACK, false); + DLG_COLOR(check_selected, COLOR_YELLOW, COLOR_RED, true); - DLG_COLOR(uarrow, COLOR_RED, COLOR_BLACK, false); - DLG_COLOR(darrow, COLOR_RED, COLOR_BLACK, false); + DLG_COLOR(uarrow, COLOR_RED, COLOR_BLACK, false); + DLG_COLOR(darrow, COLOR_RED, COLOR_BLACK, false); } static void set_bluetitle_theme(void) { - set_classic_theme(); - DLG_COLOR(title, COLOR_BLUE, COLOR_WHITE, true); - DLG_COLOR(button_key_active, COLOR_YELLOW, COLOR_BLUE, true); - DLG_COLOR(button_label_active, COLOR_WHITE, COLOR_BLUE, true); - DLG_COLOR(searchbox_title, COLOR_BLUE, COLOR_WHITE, true); - DLG_COLOR(position_indicator, COLOR_BLUE, COLOR_WHITE, true); - DLG_COLOR(tag, COLOR_BLUE, COLOR_WHITE, true); - DLG_COLOR(tag_key, COLOR_BLUE, COLOR_WHITE, true); - + set_classic_theme(); + DLG_COLOR(title, COLOR_BLUE, COLOR_WHITE, true); + DLG_COLOR(button_key_active, COLOR_YELLOW, COLOR_BLUE, true); + DLG_COLOR(button_label_active, COLOR_WHITE, COLOR_BLUE, true); + DLG_COLOR(searchbox_title, COLOR_BLUE, COLOR_WHITE, true); + DLG_COLOR(position_indicator, COLOR_BLUE, COLOR_WHITE, true); + DLG_COLOR(tag, COLOR_BLUE, COLOR_WHITE, true); + DLG_COLOR(tag_key, COLOR_BLUE, COLOR_WHITE, true); } /* @@ -161,64 +160,64 @@ static void set_bluetitle_theme(void) */ static int set_theme(const char *theme) { - int use_color = 1; - if (!theme) - set_bluetitle_theme(); - else if (strcmp(theme, "classic") == 0) - set_classic_theme(); - else if (strcmp(theme, "bluetitle") == 0) - set_bluetitle_theme(); - else if (strcmp(theme, "blackbg") == 0) - set_blackbg_theme(); - else if (strcmp(theme, "mono") == 0) - use_color = 0; + int use_color = 1; + if ( !theme ) + set_bluetitle_theme(); + else if ( strcmp(theme, "classic") == 0 ) + set_classic_theme(); + else if ( strcmp(theme, "bluetitle") == 0 ) + set_bluetitle_theme(); + else if ( strcmp(theme, "blackbg") == 0 ) + set_blackbg_theme(); + else if ( strcmp(theme, "mono") == 0 ) + use_color = 0; - return use_color; + return use_color; } static void init_one_color(struct dialog_color *color) { - static int pair = 0; + static int pair = 0; - pair++; - init_pair(pair, color->fg, color->bg); - if (color->hl) - color->atr = A_BOLD | COLOR_PAIR(pair); - else - color->atr = COLOR_PAIR(pair); + pair++; + init_pair(pair, color->fg, color->bg); + if ( color->hl ) + color->atr = A_BOLD | COLOR_PAIR(pair); + else + color->atr = COLOR_PAIR(pair); } static void init_dialog_colors(void) { - init_one_color(&dlg.screen); - init_one_color(&dlg.shadow); - init_one_color(&dlg.dialog); - init_one_color(&dlg.title); - init_one_color(&dlg.border); - init_one_color(&dlg.button_active); - init_one_color(&dlg.button_inactive); - init_one_color(&dlg.button_key_active); - init_one_color(&dlg.button_key_inactive); - init_one_color(&dlg.button_label_active); - init_one_color(&dlg.button_label_inactive); - init_one_color(&dlg.inputbox); - init_one_color(&dlg.inputbox_border); - init_one_color(&dlg.searchbox); - init_one_color(&dlg.searchbox_title); - init_one_color(&dlg.searchbox_border); - init_one_color(&dlg.position_indicator); - init_one_color(&dlg.menubox); - init_one_color(&dlg.menubox_border); - init_one_color(&dlg.item); - init_one_color(&dlg.item_selected); - init_one_color(&dlg.tag); - init_one_color(&dlg.tag_selected); - init_one_color(&dlg.tag_key); - init_one_color(&dlg.tag_key_selected); - init_one_color(&dlg.check); - init_one_color(&dlg.check_selected); - init_one_color(&dlg.uarrow); - init_one_color(&dlg.darrow); + init_one_color(&dlg.screen); + init_one_color(&dlg.shadow); + init_one_color(&dlg.dialog); + init_one_color(&dlg.title); + init_one_color(&dlg.border); + init_one_color(&dlg.button_active); + init_one_color(&dlg.button_inactive); + init_one_color(&dlg.button_key_active); + init_one_color(&dlg.button_key_inactive); + init_one_color(&dlg.button_label_active); + init_one_color(&dlg.button_label_inactive); + init_one_color(&dlg.inputbox); + init_one_color(&dlg.inputbox_border); + init_one_color(&dlg.searchbox); + init_one_color(&dlg.searchbox_title); + init_one_color(&dlg.searchbox_border); + init_one_color(&dlg.position_indicator); + init_one_color(&dlg.menubox); + init_one_color(&dlg.menubox_border); + init_one_color(&dlg.item); + init_one_color(&dlg.item_selected); + init_one_color(&dlg.tag); + init_one_color(&dlg.tag_selected); + init_one_color(&dlg.tag_key); + init_one_color(&dlg.tag_key_selected); + init_one_color(&dlg.check); + init_one_color(&dlg.check_selected); + init_one_color(&dlg.uarrow); + init_one_color(&dlg.darrow); } /* @@ -226,87 +225,96 @@ static void init_dialog_colors(void) */ static void color_setup(const char *theme) { - int use_color; + int use_color; - use_color = set_theme(theme); - if (use_color && has_colors()) { - start_color(); - init_dialog_colors(); - } else - set_mono_theme(); + use_color = set_theme(theme); + if ( use_color && has_colors() ) + { + start_color(); + init_dialog_colors(); + } + else + set_mono_theme(); } /* * Set window to attribute 'attr' */ -void attr_clear(WINDOW * win, int height, int width, chtype attr) +void attr_clear(WINDOW *win, int height, int width, chtype attr) { - int i, j; + int i, j; - wattrset(win, attr); - for (i = 0; i < height; i++) { - wmove(win, i, 0); - for (j = 0; j < width; j++) - waddch(win, ' '); - } - touchwin(win); + wattrset(win, attr); + for ( i = 0; i < height; i++ ) + { + wmove(win, i, 0); + for ( j = 0; j < width; j++ ) + waddch(win, ' '); + } + touchwin(win); } void dialog_clear(void) { - int lines, columns; - - lines = getmaxy(stdscr); - columns = getmaxx(stdscr); - - attr_clear(stdscr, lines, columns, dlg.screen.atr); - /* Display background title if it exists ... - SLH */ - if (dlg.backtitle != NULL) { - int i, len = 0, skip = 0; - struct subtitle_list *pos; - - wattrset(stdscr, dlg.screen.atr); - mvwaddstr(stdscr, 0, 1, (char *)dlg.backtitle); - - for (pos = dlg.subtitles; pos != NULL; pos = pos->next) { - /* 3 is for the arrow and spaces */ - len += strlen(pos->text) + 3; - } - - wmove(stdscr, 1, 1); - if (len > columns - 2) { - const char *ellipsis = "[...] "; - waddstr(stdscr, ellipsis); - skip = len - (columns - 2 - strlen(ellipsis)); - } - - for (pos = dlg.subtitles; pos != NULL; pos = pos->next) { - if (skip == 0) - waddch(stdscr, ACS_RARROW); - else - skip--; - - if (skip == 0) - waddch(stdscr, ' '); - else - skip--; - - if (skip < strlen(pos->text)) { - waddstr(stdscr, pos->text + skip); - skip = 0; - } else - skip -= strlen(pos->text); - - if (skip == 0) - waddch(stdscr, ' '); - else - skip--; - } - - for (i = len + 1; i < columns - 1; i++) - waddch(stdscr, ACS_HLINE); - } - wnoutrefresh(stdscr); + int lines, columns; + + lines = getmaxy(stdscr); + columns = getmaxx(stdscr); + + attr_clear(stdscr, lines, columns, dlg.screen.atr); + /* Display background title if it exists ... - SLH */ + if ( dlg.backtitle != NULL ) + { + int i, len = 0, skip = 0; + struct subtitle_list *pos; + + wattrset(stdscr, dlg.screen.atr); + mvwaddstr(stdscr, 0, 1, (char *)dlg.backtitle); + + for ( pos = dlg.subtitles; pos != NULL; pos = pos->next ) + { + /* 3 is for the arrow and spaces */ + len += strlen(pos->text) + 3; + } + + wmove(stdscr, 1, 1); + if ( len > columns - 2 ) + { + const char *ellipsis = "[...] "; + waddstr(stdscr, ellipsis); + skip = len - (columns - 2 - strlen(ellipsis)); + } + + for ( pos = dlg.subtitles; pos != NULL; pos = pos->next ) + { + if ( skip == 0 ) + waddch(stdscr, ACS_RARROW); + else + skip--; + + if ( skip == 0 ) + waddch(stdscr, ' '); + else + skip--; + + if ( skip < strlen(pos->text) ) + { + waddstr(stdscr, pos->text + skip); + skip = 0; + } + else + skip -= strlen(pos->text); + + if ( skip == 0 ) + waddch(stdscr, ' '); + else + skip--; + } + + for ( i = len + 1; i < columns - 1; i++ ) + waddch(stdscr, ACS_HLINE); + } + wnoutrefresh(stdscr); } /* @@ -314,38 +322,39 @@ void dialog_clear(void) */ int init_dialog(const char *backtitle) { - int height, width; + int height, width; - initscr(); /* Init curses */ + initscr(); /* Init curses */ - /* Get current cursor position for signal handler in mconf.c */ - getyx(stdscr, saved_y, saved_x); + /* Get current cursor position for signal handler in mconf.c */ + getyx(stdscr, saved_y, saved_x); - getmaxyx(stdscr, height, width); - if (height < WINDOW_HEIGTH_MIN || width < WINDOW_WIDTH_MIN) { - endwin(); - return -ERRDISPLAYTOOSMALL; - } + getmaxyx(stdscr, height, width); + if ( height < WINDOW_HEIGTH_MIN || width < WINDOW_WIDTH_MIN ) + { + endwin(); + return -ERRDISPLAYTOOSMALL; + } - dlg.backtitle = backtitle; - color_setup(getenv("MENUCONFIG_COLOR")); + dlg.backtitle = backtitle; + color_setup(getenv("MENUCONFIG_COLOR")); - keypad(stdscr, TRUE); - cbreak(); - noecho(); - dialog_clear(); + keypad(stdscr, TRUE); + cbreak(); + noecho(); + dialog_clear(); - return 0; + return 0; } void set_dialog_backtitle(const char *backtitle) { - dlg.backtitle = backtitle; + dlg.backtitle = backtitle; } void set_dialog_subtitles(struct subtitle_list *subtitles) { - dlg.subtitles = subtitles; + dlg.subtitles = subtitles; } /* @@ -353,10 +362,10 @@ void set_dialog_subtitles(struct subtitle_list *subtitles) */ void end_dialog(int x, int y) { - /* move cursor back to original position */ - move(y, x); - refresh(); - endwin(); + /* move cursor back to original position */ + move(y, x); + refresh(); + endwin(); } /* Print the title of the dialog. Center the title and truncate @@ -364,13 +373,14 @@ void end_dialog(int x, int y) **/ void print_title(WINDOW *dialog, const char *title, int width) { - if (title) { - int tlen = MIN(width - 2, strlen(title)); - wattrset(dialog, dlg.title.atr); - mvwaddch(dialog, 0, (width - tlen) / 2 - 1, ' '); - mvwaddnstr(dialog, 0, (width - tlen)/2, title, tlen); - waddch(dialog, ' '); - } + if ( title ) + { + int tlen = MIN(width - 2, strlen(title)); + wattrset(dialog, dlg.title.atr); + mvwaddch(dialog, 0, (width - tlen) / 2 - 1, ' '); + mvwaddnstr(dialog, 0, (width - tlen) / 2, title, tlen); + waddch(dialog, ' '); + } } /* @@ -379,151 +389,160 @@ void print_title(WINDOW *dialog, const char *title, int width) * characters '\n' are propperly processed. We start on a new line * if there is no room for at least 4 nonblanks following a double-space. */ -void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x) -{ - int newl, cur_x, cur_y; - int prompt_len, room, wlen; - char tempstr[MAX_LEN + 1], *word, *sp, *sp2, *newline_separator = 0; - - strcpy(tempstr, prompt); - - prompt_len = strlen(tempstr); - - if (prompt_len <= width - x * 2) { /* If prompt is short */ - wmove(win, y, (width - prompt_len) / 2); - waddstr(win, tempstr); - } else { - cur_x = x; - cur_y = y; - newl = 1; - word = tempstr; - while (word && *word) { - sp = strpbrk(word, "\n "); - if (sp && *sp == '\n') - newline_separator = sp; - - if (sp) - *sp++ = 0; - - /* Wrap to next line if either the word does not fit, - or it is the first word of a new sentence, and it is - short, and the next word does not fit. */ - room = width - cur_x; - wlen = strlen(word); - if (wlen > room || - (newl && wlen < 4 && sp - && wlen + 1 + strlen(sp) > room - && (!(sp2 = strpbrk(sp, "\n ")) - || wlen + 1 + (sp2 - sp) > room))) { - cur_y++; - cur_x = x; - } - wmove(win, cur_y, cur_x); - waddstr(win, word); - getyx(win, cur_y, cur_x); - - /* Move to the next line if the word separator was a newline */ - if (newline_separator) { - cur_y++; - cur_x = x; - newline_separator = 0; - } else - cur_x++; - - if (sp && *sp == ' ') { - cur_x++; /* double space */ - while (*++sp == ' ') ; - newl = 1; - } else - newl = 0; - word = sp; - } - } +void print_autowrap(WINDOW *win, const char *prompt, int width, int y, int x) +{ + int newl, cur_x, cur_y; + int prompt_len, room, wlen; + char tempstr[MAX_LEN + 1], *word, *sp, *sp2, *newline_separator = 0; + + strcpy(tempstr, prompt); + + prompt_len = strlen(tempstr); + + if ( prompt_len <= width - x * 2 ) + { /* If prompt is short */ + wmove(win, y, (width - prompt_len) / 2); + waddstr(win, tempstr); + } + else + { + cur_x = x; + cur_y = y; + newl = 1; + word = tempstr; + while ( word && *word ) + { + sp = strpbrk(word, "\n "); + if ( sp && *sp == '\n' ) + newline_separator = sp; + + if ( sp ) + *sp++ = 0; + + /* Wrap to next line if either the word does not fit, + or it is the first word of a new sentence, and it is + short, and the next word does not fit. */ + room = width - cur_x; + wlen = strlen(word); + if ( wlen > room || + (newl && wlen < 4 && sp && wlen + 1 + strlen(sp) > room && + (!(sp2 = strpbrk(sp, "\n ")) || + wlen + 1 + (sp2 - sp) > room)) ) + { + cur_y++; + cur_x = x; + } + wmove(win, cur_y, cur_x); + waddstr(win, word); + getyx(win, cur_y, cur_x); + + /* Move to the next line if the word separator was a newline */ + if ( newline_separator ) + { + cur_y++; + cur_x = x; + newline_separator = 0; + } + else + cur_x++; + + if ( sp && *sp == ' ' ) + { + cur_x++; /* double space */ + while ( *++sp == ' ' ) + ; + newl = 1; + } + else + newl = 0; + word = sp; + } + } } /* * Print a button */ -void print_button(WINDOW * win, const char *label, int y, int x, int selected) -{ - int i, temp; - - wmove(win, y, x); - wattrset(win, selected ? dlg.button_active.atr - : dlg.button_inactive.atr); - waddstr(win, "<"); - temp = strspn(label, " "); - label += temp; - wattrset(win, selected ? dlg.button_label_active.atr - : dlg.button_label_inactive.atr); - for (i = 0; i < temp; i++) - waddch(win, ' '); - wattrset(win, selected ? dlg.button_key_active.atr - : dlg.button_key_inactive.atr); - waddch(win, label[0]); - wattrset(win, selected ? dlg.button_label_active.atr - : dlg.button_label_inactive.atr); - waddstr(win, (char *)label + 1); - wattrset(win, selected ? dlg.button_active.atr - : dlg.button_inactive.atr); - waddstr(win, ">"); - wmove(win, y, x + temp + 1); +void print_button(WINDOW *win, const char *label, int y, int x, int selected) +{ + int i, temp; + + wmove(win, y, x); + wattrset(win, selected ? dlg.button_active.atr : dlg.button_inactive.atr); + waddstr(win, "<"); + temp = strspn(label, " "); + label += temp; + wattrset(win, selected ? dlg.button_label_active.atr + : dlg.button_label_inactive.atr); + for ( i = 0; i < temp; i++ ) + waddch(win, ' '); + wattrset(win, selected ? dlg.button_key_active.atr + : dlg.button_key_inactive.atr); + waddch(win, label[0]); + wattrset(win, selected ? dlg.button_label_active.atr + : dlg.button_label_inactive.atr); + waddstr(win, (char *)label + 1); + wattrset(win, selected ? dlg.button_active.atr : dlg.button_inactive.atr); + waddstr(win, ">"); + wmove(win, y, x + temp + 1); } /* * Draw a rectangular box with line drawing characters */ -void -draw_box(WINDOW * win, int y, int x, int height, int width, - chtype box, chtype border) -{ - int i, j; - - wattrset(win, 0); - for (i = 0; i < height; i++) { - wmove(win, y + i, x); - for (j = 0; j < width; j++) - if (!i && !j) - waddch(win, border | ACS_ULCORNER); - else if (i == height - 1 && !j) - waddch(win, border | ACS_LLCORNER); - else if (!i && j == width - 1) - waddch(win, box | ACS_URCORNER); - else if (i == height - 1 && j == width - 1) - waddch(win, box | ACS_LRCORNER); - else if (!i) - waddch(win, border | ACS_HLINE); - else if (i == height - 1) - waddch(win, box | ACS_HLINE); - else if (!j) - waddch(win, border | ACS_VLINE); - else if (j == width - 1) - waddch(win, box | ACS_VLINE); - else - waddch(win, box | ' '); - } +void draw_box(WINDOW *win, int y, int x, int height, int width, chtype box, + chtype border) +{ + int i, j; + + wattrset(win, 0); + for ( i = 0; i < height; i++ ) + { + wmove(win, y + i, x); + for ( j = 0; j < width; j++ ) + if ( !i && !j ) + waddch(win, border | ACS_ULCORNER); + else if ( i == height - 1 && !j ) + waddch(win, border | ACS_LLCORNER); + else if ( !i && j == width - 1 ) + waddch(win, box | ACS_URCORNER); + else if ( i == height - 1 && j == width - 1 ) + waddch(win, box | ACS_LRCORNER); + else if ( !i ) + waddch(win, border | ACS_HLINE); + else if ( i == height - 1 ) + waddch(win, box | ACS_HLINE); + else if ( !j ) + waddch(win, border | ACS_VLINE); + else if ( j == width - 1 ) + waddch(win, box | ACS_VLINE); + else + waddch(win, box | ' '); + } } /* * Draw shadows along the right and bottom edge to give a more 3D look * to the boxes */ -void draw_shadow(WINDOW * win, int y, int x, int height, int width) -{ - int i; - - if (has_colors()) { /* Whether terminal supports color? */ - wattrset(win, dlg.shadow.atr); - wmove(win, y + height, x + 2); - for (i = 0; i < width; i++) - waddch(win, winch(win) & A_CHARTEXT); - for (i = y + 1; i < y + height + 1; i++) { - wmove(win, i, x + width); - waddch(win, winch(win) & A_CHARTEXT); - waddch(win, winch(win) & A_CHARTEXT); - } - wnoutrefresh(win); - } +void draw_shadow(WINDOW *win, int y, int x, int height, int width) +{ + int i; + + if ( has_colors() ) + { /* Whether terminal supports color? */ + wattrset(win, dlg.shadow.atr); + wmove(win, y + height, x + 2); + for ( i = 0; i < width; i++ ) + waddch(win, winch(win) & A_CHARTEXT); + for ( i = y + 1; i < y + height + 1; i++ ) + { + wmove(win, i, x + width); + waddch(win, winch(win) & A_CHARTEXT); + waddch(win, winch(win) & A_CHARTEXT); + } + wnoutrefresh(win); + } } /* @@ -531,21 +550,22 @@ void draw_shadow(WINDOW * win, int y, int x, int height, int width) */ int first_alpha(const char *string, const char *exempt) { - int i, in_paren = 0, c; + int i, in_paren = 0, c; - for (i = 0; i < strlen(string); i++) { - c = tolower(string[i]); + for ( i = 0; i < strlen(string); i++ ) + { + c = tolower(string[i]); - if (strchr("<[(", c)) - ++in_paren; - if (strchr(">])", c) && in_paren > 0) - --in_paren; + if ( strchr("<[(", c) ) + ++in_paren; + if ( strchr(">])", c) && in_paren > 0 ) + --in_paren; - if ((!in_paren) && isalpha(c) && strchr(exempt, c) == 0) - return i; - } + if ( (!in_paren) && isalpha(c) && strchr(exempt, c) == 0 ) + return i; + } - return 0; + return 0; } /* @@ -559,32 +579,32 @@ int first_alpha(const char *string, const char *exempt) */ int on_key_esc(WINDOW *win) { - int key; - int key2; - int key3; + int key; + int key2; + int key3; - nodelay(win, TRUE); - keypad(win, FALSE); - key = wgetch(win); - key2 = wgetch(win); - do { - key3 = wgetch(win); - } while (key3 != ERR); - nodelay(win, FALSE); - keypad(win, TRUE); - if (key == KEY_ESC && key2 == ERR) - return KEY_ESC; - else if (key != ERR && key != KEY_ESC && key2 == ERR) - ungetch(key); + nodelay(win, TRUE); + keypad(win, FALSE); + key = wgetch(win); + key2 = wgetch(win); + do { + key3 = wgetch(win); + } while ( key3 != ERR ); + nodelay(win, FALSE); + keypad(win, TRUE); + if ( key == KEY_ESC && key2 == ERR ) + return KEY_ESC; + else if ( key != ERR && key != KEY_ESC && key2 == ERR ) + ungetch(key); - return -1; + return -1; } /* redraw screen in new size */ int on_key_resize(void) { - dialog_clear(); - return KEY_RESIZE; + dialog_clear(); + return KEY_RESIZE; } struct dialog_list *item_cur; @@ -593,121 +613,122 @@ struct dialog_list *item_head; void item_reset(void) { - struct dialog_list *p, *next; + struct dialog_list *p, *next; - for (p = item_head; p; p = next) { - next = p->next; - free(p); - } - item_head = NULL; - item_cur = &item_nil; + for ( p = item_head; p; p = next ) + { + next = p->next; + free(p); + } + item_head = NULL; + item_cur = &item_nil; } void item_make(const char *fmt, ...) { - va_list ap; - struct dialog_list *p = malloc(sizeof(*p)); + va_list ap; + struct dialog_list *p = malloc(sizeof(*p)); - if (item_head) - item_cur->next = p; - else - item_head = p; - item_cur = p; - memset(p, 0, sizeof(*p)); + if ( item_head ) + item_cur->next = p; + else + item_head = p; + item_cur = p; + memset(p, 0, sizeof(*p)); - va_start(ap, fmt); - vsnprintf(item_cur->node.str, sizeof(item_cur->node.str), fmt, ap); - va_end(ap); + va_start(ap, fmt); + vsnprintf(item_cur->node.str, sizeof(item_cur->node.str), fmt, ap); + va_end(ap); } void item_add_str(const char *fmt, ...) { - va_list ap; - size_t avail; + va_list ap; + size_t avail; - avail = sizeof(item_cur->node.str) - strlen(item_cur->node.str); + avail = sizeof(item_cur->node.str) - strlen(item_cur->node.str); - va_start(ap, fmt); - vsnprintf(item_cur->node.str + strlen(item_cur->node.str), - avail, fmt, ap); - item_cur->node.str[sizeof(item_cur->node.str) - 1] = '\0'; - va_end(ap); + va_start(ap, fmt); + vsnprintf(item_cur->node.str + strlen(item_cur->node.str), avail, fmt, ap); + item_cur->node.str[sizeof(item_cur->node.str) - 1] = '\0'; + va_end(ap); } void item_set_tag(char tag) { - item_cur->node.tag = tag; + item_cur->node.tag = tag; } void item_set_data(void *ptr) { - item_cur->node.data = ptr; + item_cur->node.data = ptr; } void item_set_selected(int val) { - item_cur->node.selected = val; + item_cur->node.selected = val; } int item_activate_selected(void) { - item_foreach() - if (item_is_selected()) - return 1; - return 0; + item_foreach () + if ( item_is_selected() ) + return 1; + return 0; } void *item_data(void) { - return item_cur->node.data; + return item_cur->node.data; } char item_tag(void) { - return item_cur->node.tag; + return item_cur->node.tag; } int item_count(void) { - int n = 0; - struct dialog_list *p; + int n = 0; + struct dialog_list *p; - for (p = item_head; p; p = p->next) - n++; - return n; + for ( p = item_head; p; p = p->next ) + n++; + return n; } void item_set(int n) { - int i = 0; - item_foreach() - if (i++ == n) - return; + int i = 0; + item_foreach () + if ( i++ == n ) + return; } int item_n(void) { - int n = 0; - struct dialog_list *p; + int n = 0; + struct dialog_list *p; - for (p = item_head; p; p = p->next) { - if (p == item_cur) - return n; - n++; - } - return 0; + for ( p = item_head; p; p = p->next ) + { + if ( p == item_cur ) + return n; + n++; + } + return 0; } const char *item_str(void) { - return item_cur->node.str; + return item_cur->node.str; } int item_is_selected(void) { - return (item_cur->node.selected != 0); + return (item_cur->node.selected != 0); } int item_is_tag(char tag) { - return (item_cur->node.tag == tag); + return (item_cur->node.tag == tag); } diff --git a/xen/tools/kconfig/lxdialog/yesno.c b/xen/tools/kconfig/lxdialog/yesno.c index 676fb2f824..64fa9d5202 100644 --- a/xen/tools/kconfig/lxdialog/yesno.c +++ b/xen/tools/kconfig/lxdialog/yesno.c @@ -24,16 +24,16 @@ /* * Display termination buttons */ -static void print_buttons(WINDOW * dialog, int height, int width, int selected) +static void print_buttons(WINDOW *dialog, int height, int width, int selected) { - int x = width / 2 - 10; - int y = height - 2; + int x = width / 2 - 10; + int y = height - 2; - print_button(dialog, gettext(" Yes "), y, x, selected == 0); - print_button(dialog, gettext(" No "), y, x + 13, selected == 1); + print_button(dialog, gettext(" Yes "), y, x, selected == 0); + print_button(dialog, gettext(" No "), y, x + 13, selected == 1); - wmove(dialog, y, x + 1 + 13 * selected); - wrefresh(dialog); + wmove(dialog, y, x + 1 + 13 * selected); + wrefresh(dialog); } /* @@ -41,74 +41,77 @@ static void print_buttons(WINDOW * dialog, int height, int width, int selected) */ int dialog_yesno(const char *title, const char *prompt, int height, int width) { - int i, x, y, key = 0, button = 0; - WINDOW *dialog; + int i, x, y, key = 0, button = 0; + WINDOW *dialog; do_resize: - if (getmaxy(stdscr) < (height + YESNO_HEIGTH_MIN)) - return -ERRDISPLAYTOOSMALL; - if (getmaxx(stdscr) < (width + YESNO_WIDTH_MIN)) - return -ERRDISPLAYTOOSMALL; - - /* center dialog box on screen */ - x = (getmaxx(stdscr) - width) / 2; - y = (getmaxy(stdscr) - height) / 2; - - draw_shadow(stdscr, y, x, height, width); - - dialog = newwin(height, width, y, x); - keypad(dialog, TRUE); - - draw_box(dialog, 0, 0, height, width, - dlg.dialog.atr, dlg.border.atr); - wattrset(dialog, dlg.border.atr); - mvwaddch(dialog, height - 3, 0, ACS_LTEE); - for (i = 0; i < width - 2; i++) - waddch(dialog, ACS_HLINE); - wattrset(dialog, dlg.dialog.atr); - waddch(dialog, ACS_RTEE); - - print_title(dialog, title, width); - - wattrset(dialog, dlg.dialog.atr); - print_autowrap(dialog, prompt, width - 2, 1, 3); - - print_buttons(dialog, height, width, 0); - - while (key != KEY_ESC) { - key = wgetch(dialog); - switch (key) { - case 'Y': - case 'y': - delwin(dialog); - return 0; - case 'N': - case 'n': - delwin(dialog); - return 1; - - case TAB: - case KEY_LEFT: - case KEY_RIGHT: - button = ((key == KEY_LEFT ? --button : ++button) < 0) ? 1 : (button > 1 ? 0 : button); - - print_buttons(dialog, height, width, button); - wrefresh(dialog); - break; - case ' ': - case '\n': - delwin(dialog); - return button; - case KEY_ESC: - key = on_key_esc(dialog); - break; - case KEY_RESIZE: - delwin(dialog); - on_key_resize(); - goto do_resize; - } - } - - delwin(dialog); - return key; /* ESC pressed */ + if ( getmaxy(stdscr) < (height + YESNO_HEIGTH_MIN) ) + return -ERRDISPLAYTOOSMALL; + if ( getmaxx(stdscr) < (width + YESNO_WIDTH_MIN) ) + return -ERRDISPLAYTOOSMALL; + + /* center dialog box on screen */ + x = (getmaxx(stdscr) - width) / 2; + y = (getmaxy(stdscr) - height) / 2; + + draw_shadow(stdscr, y, x, height, width); + + dialog = newwin(height, width, y, x); + keypad(dialog, TRUE); + + draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); + wattrset(dialog, dlg.border.atr); + mvwaddch(dialog, height - 3, 0, ACS_LTEE); + for ( i = 0; i < width - 2; i++ ) + waddch(dialog, ACS_HLINE); + wattrset(dialog, dlg.dialog.atr); + waddch(dialog, ACS_RTEE); + + print_title(dialog, title, width); + + wattrset(dialog, dlg.dialog.atr); + print_autowrap(dialog, prompt, width - 2, 1, 3); + + print_buttons(dialog, height, width, 0); + + while ( key != KEY_ESC ) + { + key = wgetch(dialog); + switch (key) + { + case 'Y': + case 'y': + delwin(dialog); + return 0; + case 'N': + case 'n': + delwin(dialog); + return 1; + + case TAB: + case KEY_LEFT: + case KEY_RIGHT: + button = ((key == KEY_LEFT ? --button : ++button) < 0) + ? 1 + : (button > 1 ? 0 : button); + + print_buttons(dialog, height, width, button); + wrefresh(dialog); + break; + case ' ': + case '\n': + delwin(dialog); + return button; + case KEY_ESC: + key = on_key_esc(dialog); + break; + case KEY_RESIZE: + delwin(dialog); + on_key_resize(); + goto do_resize; + } + } + + delwin(dialog); + return key; /* ESC pressed */ } diff --git a/xen/tools/kconfig/mconf.c b/xen/tools/kconfig/mconf.c index 315ce2c7cb..f5c8b88382 100644 --- a/xen/tools/kconfig/mconf.c +++ b/xen/tools/kconfig/mconf.c @@ -22,256 +22,271 @@ #include "lkc.h" #include "lxdialog/dialog.h" -static const char mconf_readme[] = N_( -"Overview\n" -"--------\n" -"This interface lets you select features and parameters for the build.\n" -"Features can either be built-in, modularized, or ignored. Parameters\n" -"must be entered in as decimal or hexadecimal numbers or text.\n" -"\n" -"Menu items beginning with following braces represent features that\n" -" [ ] can be built in or removed\n" -" < > can be built in, modularized or removed\n" -" { } can be built in or modularized (selected by other feature)\n" -" - - are selected by other feature,\n" -"while *, M or whitespace inside braces means to build in, build as\n" -"a module or to exclude the feature respectively.\n" -"\n" -"To change any of these features, highlight it with the cursor\n" -"keys and press to build it in, to make it a module or\n" -" to remove it. You may also press the to cycle\n" -"through the available options (i.e. Y->N->M->Y).\n" -"\n" -"Some additional keyboard hints:\n" -"\n" -"Menus\n" -"----------\n" -"o Use the Up/Down arrow keys (cursor keys) to highlight the item you\n" -" wish to change or the submenu you wish to select and press .\n" -" Submenus are designated by \"--->\", empty ones by \"----\".\n" -"\n" -" Shortcut: Press the option's highlighted letter (hotkey).\n" -" Pressing a hotkey more than once will sequence\n" -" through all visible items which use that hotkey.\n" -"\n" -" You may also use the and keys to scroll\n" -" unseen options into view.\n" -"\n" -"o To exit a menu use the cursor keys to highlight the button\n" -" and press .\n" -"\n" -" Shortcut: Press or or if there is no hotkey\n" -" using those letters. You may press a single , but\n" -" there is a delayed response which you may find annoying.\n" -"\n" -" Also, the and cursor keys will cycle between and\n" -" \n" -"\n" -"\n" -"Data Entry\n" -"-----------\n" -"o Enter the requested information and press \n" -" If you are entering hexadecimal values, it is not necessary to\n" -" add the '0x' prefix to the entry.\n" -"\n" -"o For help, use the or cursor keys to highlight the help option\n" -" and press . You can try as well.\n" -"\n" -"\n" -"Text Box (Help Window)\n" -"--------\n" -"o Use the cursor keys to scroll up/down/left/right. The VI editor\n" -" keys h,j,k,l function here as do , , and for\n" -" those who are familiar with less and lynx.\n" -"\n" -"o Press , , , or to exit.\n" -"\n" -"\n" -"Alternate Configuration Files\n" -"-----------------------------\n" -"Menuconfig supports the use of alternate configuration files for\n" -"those who, for various reasons, find it necessary to switch\n" -"between different configurations.\n" -"\n" -"The button will let you save the current configuration to\n" -"a file of your choosing. Use the button to load a previously\n" -"saved alternate configuration.\n" -"\n" -"Even if you don't use alternate configuration files, but you find\n" -"during a Menuconfig session that you have completely messed up your\n" -"settings, you may use the button to restore your previously\n" -"saved settings from \".config\" without restarting Menuconfig.\n" -"\n" -"Other information\n" -"-----------------\n" -"If you use Menuconfig in an XTERM window, make sure you have your\n" -"$TERM variable set to point to an xterm definition which supports\n" -"color. Otherwise, Menuconfig will look rather bad. Menuconfig will\n" -"not display correctly in an RXVT window because rxvt displays only one\n" -"intensity of color, bright.\n" -"\n" -"Menuconfig will display larger menus on screens or xterms which are\n" -"set to display more than the standard 25 row by 80 column geometry.\n" -"In order for this to work, the \"stty size\" command must be able to\n" -"display the screen's current row and column geometry. I STRONGLY\n" -"RECOMMEND that you make sure you do NOT have the shell variables\n" -"LINES and COLUMNS exported into your environment. Some distributions\n" -"export those variables via /etc/profile. Some ncurses programs can\n" -"become confused when those variables (LINES & COLUMNS) don't reflect\n" -"the true screen size.\n" -"\n" -"Optional personality available\n" -"------------------------------\n" -"If you prefer to have all of the options listed in a single menu,\n" -"rather than the default multimenu hierarchy, run the menuconfig with\n" -"MENUCONFIG_MODE environment variable set to single_menu. Example:\n" -"\n" -"make MENUCONFIG_MODE=single_menu menuconfig\n" -"\n" -" will then unroll the appropriate category, or enfold it if it\n" -"is already unrolled.\n" -"\n" -"Note that this mode can eventually be a little more CPU expensive\n" -"(especially with a larger number of unrolled categories) than the\n" -"default mode.\n" -"\n" -"Different color themes available\n" -"--------------------------------\n" -"It is possible to select different color themes using the variable\n" -"MENUCONFIG_COLOR. To select a theme use:\n" -"\n" -"make MENUCONFIG_COLOR= menuconfig\n" -"\n" -"Available themes are\n" -" mono => selects colors suitable for monochrome displays\n" -" blackbg => selects a color scheme with black background\n" -" classic => theme with blue background. The classic look\n" -" bluetitle => an LCD friendly version of classic. (default)\n" -"\n"), -menu_instructions[] = N_( - "Arrow keys navigate the menu. " - " selects submenus ---> (or empty submenus ----). " - "Highlighted letters are hotkeys. " - "Pressing includes, excludes, modularizes features. " - "Press to exit, for Help, for Search. " - "Legend: [*] built-in [ ] excluded module < > module capable"), -radiolist_instructions[] = N_( - "Use the arrow keys to navigate this window or " - "press the hotkey of the item you wish to select " - "followed by the . " - "Press for additional information about this option."), -inputbox_instructions_int[] = N_( - "Please enter a decimal value. " - "Fractions will not be accepted. " - "Use the key to move from the input field to the buttons below it."), -inputbox_instructions_hex[] = N_( - "Please enter a hexadecimal value. " - "Use the key to move from the input field to the buttons below it."), -inputbox_instructions_string[] = N_( - "Please enter a string value. " - "Use the key to move from the input field to the buttons below it."), -setmod_text[] = N_( - "This feature depends on another which has been configured as a module.\n" - "As a result, this feature will be built as a module."), -load_config_text[] = N_( - "Enter the name of the configuration file you wish to load. " - "Accept the name shown to restore the configuration you " - "last retrieved. Leave blank to abort."), -load_config_help[] = N_( - "\n" - "For various reasons, one may wish to keep several different\n" - "configurations available on a single machine.\n" - "\n" - "If you have saved a previous configuration in a file other than the\n" - "default one, entering its name here will allow you to modify that\n" - "configuration.\n" - "\n" - "If you are uncertain, then you have probably never used alternate\n" - "configuration files. You should therefore leave this blank to abort.\n"), -save_config_text[] = N_( - "Enter a filename to which this configuration should be saved " - "as an alternate. Leave blank to abort."), -save_config_help[] = N_( - "\n" - "For various reasons, one may wish to keep different configurations\n" - "available on a single machine.\n" - "\n" - "Entering a file name here will allow you to later retrieve, modify\n" - "and use the current configuration as an alternate to whatever\n" - "configuration options you have selected at that time.\n" - "\n" - "If you are uncertain what all this means then you should probably\n" - "leave this blank.\n"), -search_help[] = N_( - "\n" - "Search for symbols and display their relations.\n" - "Regular expressions are allowed.\n" - "Example: search for \"^FOO\"\n" - "Result:\n" - "-----------------------------------------------------------------\n" - "Symbol: FOO [=m]\n" - "Type : tristate\n" - "Prompt: Foo bus is used to drive the bar HW\n" - " Location:\n" - " -> Bus options (PCI, PCMCIA, EISA, ISA)\n" - " -> PCI support (PCI [=y])\n" - "(1) -> PCI access mode ( [=y])\n" - " Defined at drivers/pci/Kconfig:47\n" - " Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n" - " Selects: LIBCRC32\n" - " Selected by: BAR [=n]\n" - "-----------------------------------------------------------------\n" - "o The line 'Type:' shows the type of the configuration option for\n" - " this symbol (boolean, tristate, string, ...)\n" - "o The line 'Prompt:' shows the text used in the menu structure for\n" - " this symbol\n" - "o The 'Defined at' line tells at what file / line number the symbol\n" - " is defined\n" - "o The 'Depends on:' line tells what symbols need to be defined for\n" - " this symbol to be visible in the menu (selectable)\n" - "o The 'Location:' lines tells where in the menu structure this symbol\n" - " is located\n" - " A location followed by a [=y] indicates that this is a\n" - " selectable menu item - and the current value is displayed inside\n" - " brackets.\n" - " Press the key in the (#) prefix to jump directly to that\n" - " location. You will be returned to the current search results\n" - " after exiting this new menu.\n" - "o The 'Selects:' line tells what symbols will be automatically\n" - " selected if this symbol is selected (y or m)\n" - "o The 'Selected by' line tells what symbol has selected this symbol\n" - "\n" - "Only relevant lines are shown.\n" - "\n\n" - "Search examples:\n" - "Examples: USB => find all symbols containing USB\n" - " ^USB => find all symbols starting with USB\n" - " USB$ => find all symbols ending with USB\n" - "\n"); +static const char + mconf_readme[] = N_( + "Overview\n" + "--------\n" + "This interface lets you select features and parameters for the " + "build.\n" + "Features can either be built-in, modularized, or ignored. Parameters\n" + "must be entered in as decimal or hexadecimal numbers or text.\n" + "\n" + "Menu items beginning with following braces represent features that\n" + " [ ] can be built in or removed\n" + " < > can be built in, modularized or removed\n" + " { } can be built in or modularized (selected by other feature)\n" + " - - are selected by other feature,\n" + "while *, M or whitespace inside braces means to build in, build as\n" + "a module or to exclude the feature respectively.\n" + "\n" + "To change any of these features, highlight it with the cursor\n" + "keys and press to build it in, to make it a module or\n" + " to remove it. You may also press the to cycle\n" + "through the available options (i.e. Y->N->M->Y).\n" + "\n" + "Some additional keyboard hints:\n" + "\n" + "Menus\n" + "----------\n" + "o Use the Up/Down arrow keys (cursor keys) to highlight the item " + "you\n" + " wish to change or the submenu you wish to select and press " + ".\n" + " Submenus are designated by \"--->\", empty ones by \"----\".\n" + "\n" + " Shortcut: Press the option's highlighted letter (hotkey).\n" + " Pressing a hotkey more than once will sequence\n" + " through all visible items which use that hotkey.\n" + "\n" + " You may also use the and keys to scroll\n" + " unseen options into view.\n" + "\n" + "o To exit a menu use the cursor keys to highlight the button\n" + " and press .\n" + "\n" + " Shortcut: Press or or if there is no hotkey\n" + " using those letters. You may press a single , but\n" + " there is a delayed response which you may find " + "annoying.\n" + "\n" + " Also, the and cursor keys will cycle between and\n" + " \n" + "\n" + "\n" + "Data Entry\n" + "-----------\n" + "o Enter the requested information and press \n" + " If you are entering hexadecimal values, it is not necessary to\n" + " add the '0x' prefix to the entry.\n" + "\n" + "o For help, use the or cursor keys to highlight the help " + "option\n" + " and press . You can try as well.\n" + "\n" + "\n" + "Text Box (Help Window)\n" + "--------\n" + "o Use the cursor keys to scroll up/down/left/right. The VI editor\n" + " keys h,j,k,l function here as do , , and " + "for\n" + " those who are familiar with less and lynx.\n" + "\n" + "o Press , , , or to exit.\n" + "\n" + "\n" + "Alternate Configuration Files\n" + "-----------------------------\n" + "Menuconfig supports the use of alternate configuration files for\n" + "those who, for various reasons, find it necessary to switch\n" + "between different configurations.\n" + "\n" + "The button will let you save the current configuration to\n" + "a file of your choosing. Use the button to load a previously\n" + "saved alternate configuration.\n" + "\n" + "Even if you don't use alternate configuration files, but you find\n" + "during a Menuconfig session that you have completely messed up your\n" + "settings, you may use the button to restore your previously\n" + "saved settings from \".config\" without restarting Menuconfig.\n" + "\n" + "Other information\n" + "-----------------\n" + "If you use Menuconfig in an XTERM window, make sure you have your\n" + "$TERM variable set to point to an xterm definition which supports\n" + "color. Otherwise, Menuconfig will look rather bad. Menuconfig will\n" + "not display correctly in an RXVT window because rxvt displays only " + "one\n" + "intensity of color, bright.\n" + "\n" + "Menuconfig will display larger menus on screens or xterms which are\n" + "set to display more than the standard 25 row by 80 column geometry.\n" + "In order for this to work, the \"stty size\" command must be able to\n" + "display the screen's current row and column geometry. I STRONGLY\n" + "RECOMMEND that you make sure you do NOT have the shell variables\n" + "LINES and COLUMNS exported into your environment. Some " + "distributions\n" + "export those variables via /etc/profile. Some ncurses programs can\n" + "become confused when those variables (LINES & COLUMNS) don't reflect\n" + "the true screen size.\n" + "\n" + "Optional personality available\n" + "------------------------------\n" + "If you prefer to have all of the options listed in a single menu,\n" + "rather than the default multimenu hierarchy, run the menuconfig with\n" + "MENUCONFIG_MODE environment variable set to single_menu. Example:\n" + "\n" + "make MENUCONFIG_MODE=single_menu menuconfig\n" + "\n" + " will then unroll the appropriate category, or enfold it if " + "it\n" + "is already unrolled.\n" + "\n" + "Note that this mode can eventually be a little more CPU expensive\n" + "(especially with a larger number of unrolled categories) than the\n" + "default mode.\n" + "\n" + "Different color themes available\n" + "--------------------------------\n" + "It is possible to select different color themes using the variable\n" + "MENUCONFIG_COLOR. To select a theme use:\n" + "\n" + "make MENUCONFIG_COLOR= menuconfig\n" + "\n" + "Available themes are\n" + " mono => selects colors suitable for monochrome displays\n" + " blackbg => selects a color scheme with black background\n" + " classic => theme with blue background. The classic look\n" + " bluetitle => an LCD friendly version of classic. (default)\n" + "\n"), + menu_instructions[] = N_( + "Arrow keys navigate the menu. " + " selects submenus ---> (or empty submenus ----). " + "Highlighted letters are hotkeys. " + "Pressing includes, excludes, modularizes features. " + "Press to exit, for Help, for Search. " + "Legend: [*] built-in [ ] excluded module < > module capable"), + radiolist_instructions[] = + N_("Use the arrow keys to navigate this window or " + "press the hotkey of the item you wish to select " + "followed by the . " + "Press for additional information about this option."), + inputbox_instructions_int[] = N_("Please enter a decimal value. " + "Fractions will not be accepted. " + "Use the key to move from the input " + "field to the buttons below it."), + inputbox_instructions_hex[] = N_("Please enter a hexadecimal value. " + "Use the key to move from the input " + "field to the buttons below it."), + inputbox_instructions_string[] = N_("Please enter a string value. " + "Use the key to move from the " + "input field to the buttons below it."), + setmod_text[] = N_("This feature depends on another which has been " + "configured as a module.\n" + "As a result, this feature will be built as a module."), + load_config_text[] = + N_("Enter the name of the configuration file you wish to load. " + "Accept the name shown to restore the configuration you " + "last retrieved. Leave blank to abort."), + load_config_help[] = N_( + "\n" + "For various reasons, one may wish to keep several different\n" + "configurations available on a single machine.\n" + "\n" + "If you have saved a previous configuration in a file other than the\n" + "default one, entering its name here will allow you to modify that\n" + "configuration.\n" + "\n" + "If you are uncertain, then you have probably never used alternate\n" + "configuration files. You should therefore leave this blank to " + "abort.\n"), + save_config_text[] = + N_("Enter a filename to which this configuration should be saved " + "as an alternate. Leave blank to abort."), + save_config_help[] = N_( + "\n" + "For various reasons, one may wish to keep different configurations\n" + "available on a single machine.\n" + "\n" + "Entering a file name here will allow you to later retrieve, modify\n" + "and use the current configuration as an alternate to whatever\n" + "configuration options you have selected at that time.\n" + "\n" + "If you are uncertain what all this means then you should probably\n" + "leave this blank.\n"), + search_help[] = N_( + "\n" + "Search for symbols and display their relations.\n" + "Regular expressions are allowed.\n" + "Example: search for \"^FOO\"\n" + "Result:\n" + "-----------------------------------------------------------------\n" + "Symbol: FOO [=m]\n" + "Type : tristate\n" + "Prompt: Foo bus is used to drive the bar HW\n" + " Location:\n" + " -> Bus options (PCI, PCMCIA, EISA, ISA)\n" + " -> PCI support (PCI [=y])\n" + "(1) -> PCI access mode ( [=y])\n" + " Defined at drivers/pci/Kconfig:47\n" + " Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n" + " Selects: LIBCRC32\n" + " Selected by: BAR [=n]\n" + "-----------------------------------------------------------------\n" + "o The line 'Type:' shows the type of the configuration option for\n" + " this symbol (boolean, tristate, string, ...)\n" + "o The line 'Prompt:' shows the text used in the menu structure for\n" + " this symbol\n" + "o The 'Defined at' line tells at what file / line number the symbol\n" + " is defined\n" + "o The 'Depends on:' line tells what symbols need to be defined for\n" + " this symbol to be visible in the menu (selectable)\n" + "o The 'Location:' lines tells where in the menu structure this " + "symbol\n" + " is located\n" + " A location followed by a [=y] indicates that this is a\n" + " selectable menu item - and the current value is displayed inside\n" + " brackets.\n" + " Press the key in the (#) prefix to jump directly to that\n" + " location. You will be returned to the current search results\n" + " after exiting this new menu.\n" + "o The 'Selects:' line tells what symbols will be automatically\n" + " selected if this symbol is selected (y or m)\n" + "o The 'Selected by' line tells what symbol has selected this symbol\n" + "\n" + "Only relevant lines are shown.\n" + "\n\n" + "Search examples:\n" + "Examples: USB => find all symbols containing USB\n" + " ^USB => find all symbols starting with USB\n" + " USB$ => find all symbols ending with USB\n" + "\n"); static int indent; static struct menu *current_menu; @@ -287,761 +302,843 @@ static void conf_string(struct menu *menu); static void conf_load(void); static void conf_save(void); static int show_textbox_ext(const char *title, char *text, int r, int c, - int *keys, int *vscroll, int *hscroll, - update_text_fn update_text, void *data); + int *keys, int *vscroll, int *hscroll, + update_text_fn update_text, void *data); static void show_textbox(const char *title, const char *text, int r, int c); static void show_helptext(const char *title, const char *text); static void show_help(struct menu *menu); -static char filename[PATH_MAX+1]; +static char filename[PATH_MAX + 1]; static void set_config_filename(const char *config_filename) { - static char menu_backtitle[PATH_MAX+128]; - int size; - - size = snprintf(menu_backtitle, sizeof(menu_backtitle), - "%s - %s", config_filename, rootmenu.prompt->text); - if (size >= sizeof(menu_backtitle)) - menu_backtitle[sizeof(menu_backtitle)-1] = '\0'; - set_dialog_backtitle(menu_backtitle); - - size = snprintf(filename, sizeof(filename), "%s", config_filename); - if (size >= sizeof(filename)) - filename[sizeof(filename)-1] = '\0'; + static char menu_backtitle[PATH_MAX + 128]; + int size; + + size = snprintf(menu_backtitle, sizeof(menu_backtitle), "%s - %s", + config_filename, rootmenu.prompt->text); + if ( size >= sizeof(menu_backtitle) ) + menu_backtitle[sizeof(menu_backtitle) - 1] = '\0'; + set_dialog_backtitle(menu_backtitle); + + size = snprintf(filename, sizeof(filename), "%s", config_filename); + if ( size >= sizeof(filename) ) + filename[sizeof(filename) - 1] = '\0'; } -struct subtitle_part { - struct list_head entries; - const char *text; +struct subtitle_part +{ + struct list_head entries; + const char *text; }; static LIST_HEAD(trail); static struct subtitle_list *subtitles; static void set_subtitle(void) { - struct subtitle_part *sp; - struct subtitle_list *pos, *tmp; - - for (pos = subtitles; pos != NULL; pos = tmp) { - tmp = pos->next; - free(pos); - } - - subtitles = NULL; - list_for_each_entry(sp, &trail, entries) { - if (sp->text) { - if (pos) { - pos->next = xcalloc(1, sizeof(*pos)); - pos = pos->next; - } else { - subtitles = pos = xcalloc(1, sizeof(*pos)); - } - pos->text = sp->text; - } - } - - set_dialog_subtitles(subtitles); + struct subtitle_part *sp; + struct subtitle_list *pos, *tmp; + + for ( pos = subtitles; pos != NULL; pos = tmp ) + { + tmp = pos->next; + free(pos); + } + + subtitles = NULL; + list_for_each_entry (sp, &trail, entries) + { + if ( sp->text ) + { + if ( pos ) + { + pos->next = xcalloc(1, sizeof(*pos)); + pos = pos->next; + } + else + { + subtitles = pos = xcalloc(1, sizeof(*pos)); + } + pos->text = sp->text; + } + } + + set_dialog_subtitles(subtitles); } static void reset_subtitle(void) { - struct subtitle_list *pos, *tmp; - - for (pos = subtitles; pos != NULL; pos = tmp) { - tmp = pos->next; - free(pos); - } - subtitles = NULL; - set_dialog_subtitles(subtitles); + struct subtitle_list *pos, *tmp; + + for ( pos = subtitles; pos != NULL; pos = tmp ) + { + tmp = pos->next; + free(pos); + } + subtitles = NULL; + set_dialog_subtitles(subtitles); } -struct search_data { - struct list_head *head; - struct menu **targets; - int *keys; +struct search_data +{ + struct list_head *head; + struct menu **targets; + int *keys; }; static void update_text(char *buf, size_t start, size_t end, void *_data) { - struct search_data *data = _data; - struct jump_key *pos; - int k = 0; - - list_for_each_entry(pos, data->head, entries) { - if (pos->offset >= start && pos->offset < end) { - char header[4]; - - if (k < JUMP_NB) { - int key = '0' + (pos->index % JUMP_NB) + 1; - - sprintf(header, "(%c)", key); - data->keys[k] = key; - data->targets[k] = pos->target; - k++; - } else { - sprintf(header, " "); - } - - memcpy(buf + pos->offset, header, sizeof(header) - 1); - } - } - data->keys[k] = 0; + struct search_data *data = _data; + struct jump_key *pos; + int k = 0; + + list_for_each_entry (pos, data->head, entries) + { + if ( pos->offset >= start && pos->offset < end ) + { + char header[4]; + + if ( k < JUMP_NB ) + { + int key = '0' + (pos->index % JUMP_NB) + 1; + + sprintf(header, "(%c)", key); + data->keys[k] = key; + data->targets[k] = pos->target; + k++; + } + else + { + sprintf(header, " "); + } + + memcpy(buf + pos->offset, header, sizeof(header) - 1); + } + } + data->keys[k] = 0; } static void search_conf(void) { - struct symbol **sym_arr; - struct gstr res; - struct gstr title; - char *dialog_input; - int dres, vscroll = 0, hscroll = 0; - bool again; - struct gstr sttext; - struct subtitle_part stpart; - - title = str_new(); - str_printf( &title, _("Enter (sub)string or regexp to search for " - "(with or without \"%s\")"), CONFIG_); + struct symbol **sym_arr; + struct gstr res; + struct gstr title; + char *dialog_input; + int dres, vscroll = 0, hscroll = 0; + bool again; + struct gstr sttext; + struct subtitle_part stpart; + + title = str_new(); + str_printf(&title, + _("Enter (sub)string or regexp to search for " + "(with or without \"%s\")"), + CONFIG_); again: - dialog_clear(); - dres = dialog_inputbox(_("Search Configuration Parameter"), - str_get(&title), - 10, 75, ""); - switch (dres) { - case 0: - break; - case 1: - show_helptext(_("Search Configuration"), search_help); - goto again; - default: - str_free(&title); - return; - } - - /* strip the prefix if necessary */ - dialog_input = dialog_input_result; - if (strncasecmp(dialog_input_result, CONFIG_, strlen(CONFIG_)) == 0) - dialog_input += strlen(CONFIG_); - - sttext = str_new(); - str_printf(&sttext, "Search (%s)", dialog_input_result); - stpart.text = str_get(&sttext); - list_add_tail(&stpart.entries, &trail); - - sym_arr = sym_re_search(dialog_input); - do { - LIST_HEAD(head); - struct menu *targets[JUMP_NB]; - int keys[JUMP_NB + 1], i; - struct search_data data = { - .head = &head, - .targets = targets, - .keys = keys, - }; - struct jump_key *pos, *tmp; - - res = get_relations_str(sym_arr, &head); - set_subtitle(); - dres = show_textbox_ext(_("Search Results"), (char *) - str_get(&res), 0, 0, keys, &vscroll, - &hscroll, &update_text, (void *) - &data); - again = false; - for (i = 0; i < JUMP_NB && keys[i]; i++) - if (dres == keys[i]) { - conf(targets[i]->parent, targets[i]); - again = true; - } - str_free(&res); - list_for_each_entry_safe(pos, tmp, &head, entries) - free(pos); - } while (again); - free(sym_arr); - str_free(&title); - list_del(trail.prev); - str_free(&sttext); + dialog_clear(); + dres = dialog_inputbox(_("Search Configuration Parameter"), str_get(&title), + 10, 75, ""); + switch (dres) + { + case 0: + break; + case 1: + show_helptext(_("Search Configuration"), search_help); + goto again; + default: + str_free(&title); + return; + } + + /* strip the prefix if necessary */ + dialog_input = dialog_input_result; + if ( strncasecmp(dialog_input_result, CONFIG_, strlen(CONFIG_)) == 0 ) + dialog_input += strlen(CONFIG_); + + sttext = str_new(); + str_printf(&sttext, "Search (%s)", dialog_input_result); + stpart.text = str_get(&sttext); + list_add_tail(&stpart.entries, &trail); + + sym_arr = sym_re_search(dialog_input); + do { + LIST_HEAD(head); + struct menu *targets[JUMP_NB]; + int keys[JUMP_NB + 1], i; + struct search_data data = { + .head = &head, + .targets = targets, + .keys = keys, + }; + struct jump_key *pos, *tmp; + + res = get_relations_str(sym_arr, &head); + set_subtitle(); + dres = show_textbox_ext(_("Search Results"), (char *)str_get(&res), 0, + 0, keys, &vscroll, &hscroll, &update_text, + (void *)&data); + again = false; + for ( i = 0; i < JUMP_NB && keys[i]; i++ ) + if ( dres == keys[i] ) + { + conf(targets[i]->parent, targets[i]); + again = true; + } + str_free(&res); + list_for_each_entry_safe(pos, tmp, &head, entries) free(pos); + } while ( again ); + free(sym_arr); + str_free(&title); + list_del(trail.prev); + str_free(&sttext); } static void build_conf(struct menu *menu) { - struct symbol *sym; - struct property *prop; - struct menu *child; - int type, tmp, doint = 2; - tristate val; - char ch; - bool visible; - - /* - * note: menu_is_visible() has side effect that it will - * recalc the value of the symbol. - */ - visible = menu_is_visible(menu); - if (show_all_options && !menu_has_prompt(menu)) - return; - else if (!show_all_options && !visible) - return; - - sym = menu->sym; - prop = menu->prompt; - if (!sym) { - if (prop && menu != current_menu) { - const char *prompt = menu_get_prompt(menu); - switch (prop->type) { - case P_MENU: - child_count++; - prompt = _(prompt); - if (single_menu_mode) { - item_make("%s%*c%s", - menu->data ? "-->" : "++>", - indent + 1, ' ', prompt); - } else - item_make(" %*c%s %s", - indent + 1, ' ', prompt, - menu_is_empty(menu) ? "----" : "--->"); - item_set_tag('m'); - item_set_data(menu); - if (single_menu_mode && menu->data) - goto conf_childs; - return; - case P_COMMENT: - if (prompt) { - child_count++; - item_make(" %*c*** %s ***", indent + 1, ' ', _(prompt)); - item_set_tag(':'); - item_set_data(menu); - } - break; - default: - if (prompt) { - child_count++; - item_make("---%*c%s", indent + 1, ' ', _(prompt)); - item_set_tag(':'); - item_set_data(menu); - } - } - } else - doint = 0; - goto conf_childs; - } - - type = sym_get_type(sym); - if (sym_is_choice(sym)) { - struct symbol *def_sym = sym_get_choice_value(sym); - struct menu *def_menu = NULL; - - child_count++; - for (child = menu->list; child; child = child->next) { - if (menu_is_visible(child) && child->sym == def_sym) - def_menu = child; - } - - val = sym_get_tristate_value(sym); - if (sym_is_changable(sym)) { - switch (type) { - case S_BOOLEAN: - item_make("[%c]", val == no ? ' ' : '*'); - break; - case S_TRISTATE: - switch (val) { - case yes: ch = '*'; break; - case mod: ch = 'M'; break; - default: ch = ' '; break; - } - item_make("<%c>", ch); - break; - } - item_set_tag('t'); - item_set_data(menu); - } else { - item_make(" "); - item_set_tag(def_menu ? 't' : ':'); - item_set_data(menu); - } - - item_add_str("%*c%s", indent + 1, ' ', _(menu_get_prompt(menu))); - if (val == yes) { - if (def_menu) { - item_add_str(" (%s)", _(menu_get_prompt(def_menu))); - item_add_str(" --->"); - if (def_menu->list) { - indent += 2; - build_conf(def_menu); - indent -= 2; - } - } - return; - } - } else { - if (menu == current_menu) { - item_make("---%*c%s", indent + 1, ' ', _(menu_get_prompt(menu))); - item_set_tag(':'); - item_set_data(menu); - goto conf_childs; - } - child_count++; - val = sym_get_tristate_value(sym); - if (sym_is_choice_value(sym) && val == yes) { - item_make(" "); - item_set_tag(':'); - item_set_data(menu); - } else { - switch (type) { - case S_BOOLEAN: - if (sym_is_changable(sym)) - item_make("[%c]", val == no ? ' ' : '*'); - else - item_make("-%c-", val == no ? ' ' : '*'); - item_set_tag('t'); - item_set_data(menu); - break; - case S_TRISTATE: - switch (val) { - case yes: ch = '*'; break; - case mod: ch = 'M'; break; - default: ch = ' '; break; - } - if (sym_is_changable(sym)) { - if (sym->rev_dep.tri == mod) - item_make("{%c}", ch); - else - item_make("<%c>", ch); - } else - item_make("-%c-", ch); - item_set_tag('t'); - item_set_data(menu); - break; - default: - tmp = 2 + strlen(sym_get_string_value(sym)); /* () = 2 */ - item_make("(%s)", sym_get_string_value(sym)); - tmp = indent - tmp + 4; - if (tmp < 0) - tmp = 0; - item_add_str("%*c%s%s", tmp, ' ', _(menu_get_prompt(menu)), - (sym_has_value(sym) || !sym_is_changable(sym)) ? - "" : _(" (NEW)")); - item_set_tag('s'); - item_set_data(menu); - goto conf_childs; - } - } - item_add_str("%*c%s%s", indent + 1, ' ', _(menu_get_prompt(menu)), - (sym_has_value(sym) || !sym_is_changable(sym)) ? - "" : _(" (NEW)")); - if (menu->prompt->type == P_MENU) { - item_add_str(" %s", menu_is_empty(menu) ? "----" : "--->"); - return; - } - } + struct symbol *sym; + struct property *prop; + struct menu *child; + int type, tmp, doint = 2; + tristate val; + char ch; + bool visible; + + /* + * note: menu_is_visible() has side effect that it will + * recalc the value of the symbol. + */ + visible = menu_is_visible(menu); + if ( show_all_options && !menu_has_prompt(menu) ) + return; + else if ( !show_all_options && !visible ) + return; + + sym = menu->sym; + prop = menu->prompt; + if ( !sym ) + { + if ( prop && menu != current_menu ) + { + const char *prompt = menu_get_prompt(menu); + switch (prop->type) + { + case P_MENU: + child_count++; + prompt = _(prompt); + if ( single_menu_mode ) + { + item_make("%s%*c%s", menu->data ? "-->" : "++>", indent + 1, + ' ', prompt); + } + else + item_make(" %*c%s %s", indent + 1, ' ', prompt, + menu_is_empty(menu) ? "----" : "--->"); + item_set_tag('m'); + item_set_data(menu); + if ( single_menu_mode && menu->data ) + goto conf_childs; + return; + case P_COMMENT: + if ( prompt ) + { + child_count++; + item_make(" %*c*** %s ***", indent + 1, ' ', _(prompt)); + item_set_tag(':'); + item_set_data(menu); + } + break; + default: + if ( prompt ) + { + child_count++; + item_make("---%*c%s", indent + 1, ' ', _(prompt)); + item_set_tag(':'); + item_set_data(menu); + } + } + } + else + doint = 0; + goto conf_childs; + } + + type = sym_get_type(sym); + if ( sym_is_choice(sym) ) + { + struct symbol *def_sym = sym_get_choice_value(sym); + struct menu *def_menu = NULL; + + child_count++; + for ( child = menu->list; child; child = child->next ) + { + if ( menu_is_visible(child) && child->sym == def_sym ) + def_menu = child; + } + + val = sym_get_tristate_value(sym); + if ( sym_is_changable(sym) ) + { + switch (type) + { + case S_BOOLEAN: + item_make("[%c]", val == no ? ' ' : '*'); + break; + case S_TRISTATE: + switch (val) + { + case yes: + ch = '*'; + break; + case mod: + ch = 'M'; + break; + default: + ch = ' '; + break; + } + item_make("<%c>", ch); + break; + } + item_set_tag('t'); + item_set_data(menu); + } + else + { + item_make(" "); + item_set_tag(def_menu ? 't' : ':'); + item_set_data(menu); + } + + item_add_str("%*c%s", indent + 1, ' ', _(menu_get_prompt(menu))); + if ( val == yes ) + { + if ( def_menu ) + { + item_add_str(" (%s)", _(menu_get_prompt(def_menu))); + item_add_str(" --->"); + if ( def_menu->list ) + { + indent += 2; + build_conf(def_menu); + indent -= 2; + } + } + return; + } + } + else + { + if ( menu == current_menu ) + { + item_make("---%*c%s", indent + 1, ' ', _(menu_get_prompt(menu))); + item_set_tag(':'); + item_set_data(menu); + goto conf_childs; + } + child_count++; + val = sym_get_tristate_value(sym); + if ( sym_is_choice_value(sym) && val == yes ) + { + item_make(" "); + item_set_tag(':'); + item_set_data(menu); + } + else + { + switch (type) + { + case S_BOOLEAN: + if ( sym_is_changable(sym) ) + item_make("[%c]", val == no ? ' ' : '*'); + else + item_make("-%c-", val == no ? ' ' : '*'); + item_set_tag('t'); + item_set_data(menu); + break; + case S_TRISTATE: + switch (val) + { + case yes: + ch = '*'; + break; + case mod: + ch = 'M'; + break; + default: + ch = ' '; + break; + } + if ( sym_is_changable(sym) ) + { + if ( sym->rev_dep.tri == mod ) + item_make("{%c}", ch); + else + item_make("<%c>", ch); + } + else + item_make("-%c-", ch); + item_set_tag('t'); + item_set_data(menu); + break; + default: + tmp = 2 + strlen(sym_get_string_value(sym)); /* () = 2 */ + item_make("(%s)", sym_get_string_value(sym)); + tmp = indent - tmp + 4; + if ( tmp < 0 ) + tmp = 0; + item_add_str("%*c%s%s", tmp, ' ', _(menu_get_prompt(menu)), + (sym_has_value(sym) || !sym_is_changable(sym)) + ? "" + : _(" (NEW)")); + item_set_tag('s'); + item_set_data(menu); + goto conf_childs; + } + } + item_add_str( + "%*c%s%s", indent + 1, ' ', _(menu_get_prompt(menu)), + (sym_has_value(sym) || !sym_is_changable(sym)) ? "" : _(" (NEW)")); + if ( menu->prompt->type == P_MENU ) + { + item_add_str(" %s", menu_is_empty(menu) ? "----" : "--->"); + return; + } + } conf_childs: - indent += doint; - for (child = menu->list; child; child = child->next) - build_conf(child); - indent -= doint; + indent += doint; + for ( child = menu->list; child; child = child->next ) + build_conf(child); + indent -= doint; } static void conf(struct menu *menu, struct menu *active_menu) { - struct menu *submenu; - const char *prompt = menu_get_prompt(menu); - struct subtitle_part stpart; - struct symbol *sym; - int res; - int s_scroll = 0; - - if (menu != &rootmenu) - stpart.text = menu_get_prompt(menu); - else - stpart.text = NULL; - list_add_tail(&stpart.entries, &trail); - - while (1) { - item_reset(); - current_menu = menu; - build_conf(menu); - if (!child_count) - break; - set_subtitle(); - dialog_clear(); - res = dialog_menu(prompt ? _(prompt) : _("Main Menu"), - _(menu_instructions), - active_menu, &s_scroll); - if (res == 1 || res == KEY_ESC || res == -ERRDISPLAYTOOSMALL) - break; - if (item_count() != 0) { - if (!item_activate_selected()) - continue; - if (!item_tag()) - continue; - } - submenu = item_data(); - active_menu = item_data(); - if (submenu) - sym = submenu->sym; - else - sym = NULL; - - switch (res) { - case 0: - switch (item_tag()) { - case 'm': - if (single_menu_mode) - submenu->data = (void *) (long) !submenu->data; - else - conf(submenu, NULL); - break; - case 't': - if (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes) - conf_choice(submenu); - else if (submenu->prompt->type == P_MENU) - conf(submenu, NULL); - break; - case 's': - conf_string(submenu); - break; - } - break; - case 2: - if (sym) - show_help(submenu); - else { - reset_subtitle(); - show_helptext(_("README"), _(mconf_readme)); - } - break; - case 3: - reset_subtitle(); - conf_save(); - break; - case 4: - reset_subtitle(); - conf_load(); - break; - case 5: - if (item_is_tag('t')) { - if (sym_set_tristate_value(sym, yes)) - break; - if (sym_set_tristate_value(sym, mod)) - show_textbox(NULL, setmod_text, 6, 74); - } - break; - case 6: - if (item_is_tag('t')) - sym_set_tristate_value(sym, no); - break; - case 7: - if (item_is_tag('t')) - sym_set_tristate_value(sym, mod); - break; - case 8: - if (item_is_tag('t')) - sym_toggle_tristate_value(sym); - else if (item_is_tag('m')) - conf(submenu, NULL); - break; - case 9: - search_conf(); - break; - case 10: - show_all_options = !show_all_options; - break; - } - } - - list_del(trail.prev); + struct menu *submenu; + const char *prompt = menu_get_prompt(menu); + struct subtitle_part stpart; + struct symbol *sym; + int res; + int s_scroll = 0; + + if ( menu != &rootmenu ) + stpart.text = menu_get_prompt(menu); + else + stpart.text = NULL; + list_add_tail(&stpart.entries, &trail); + + while ( 1 ) + { + item_reset(); + current_menu = menu; + build_conf(menu); + if ( !child_count ) + break; + set_subtitle(); + dialog_clear(); + res = dialog_menu(prompt ? _(prompt) : _("Main Menu"), + _(menu_instructions), active_menu, &s_scroll); + if ( res == 1 || res == KEY_ESC || res == -ERRDISPLAYTOOSMALL ) + break; + if ( item_count() != 0 ) + { + if ( !item_activate_selected() ) + continue; + if ( !item_tag() ) + continue; + } + submenu = item_data(); + active_menu = item_data(); + if ( submenu ) + sym = submenu->sym; + else + sym = NULL; + + switch (res) + { + case 0: + switch (item_tag()) + { + case 'm': + if ( single_menu_mode ) + submenu->data = (void *)(long)!submenu->data; + else + conf(submenu, NULL); + break; + case 't': + if ( sym_is_choice(sym) && sym_get_tristate_value(sym) == yes ) + conf_choice(submenu); + else if ( submenu->prompt->type == P_MENU ) + conf(submenu, NULL); + break; + case 's': + conf_string(submenu); + break; + } + break; + case 2: + if ( sym ) + show_help(submenu); + else + { + reset_subtitle(); + show_helptext(_("README"), _(mconf_readme)); + } + break; + case 3: + reset_subtitle(); + conf_save(); + break; + case 4: + reset_subtitle(); + conf_load(); + break; + case 5: + if ( item_is_tag('t') ) + { + if ( sym_set_tristate_value(sym, yes) ) + break; + if ( sym_set_tristate_value(sym, mod) ) + show_textbox(NULL, setmod_text, 6, 74); + } + break; + case 6: + if ( item_is_tag('t') ) + sym_set_tristate_value(sym, no); + break; + case 7: + if ( item_is_tag('t') ) + sym_set_tristate_value(sym, mod); + break; + case 8: + if ( item_is_tag('t') ) + sym_toggle_tristate_value(sym); + else if ( item_is_tag('m') ) + conf(submenu, NULL); + break; + case 9: + search_conf(); + break; + case 10: + show_all_options = !show_all_options; + break; + } + } + + list_del(trail.prev); } -static int show_textbox_ext(const char *title, char *text, int r, int c, int - *keys, int *vscroll, int *hscroll, update_text_fn - update_text, void *data) +static int show_textbox_ext(const char *title, char *text, int r, int c, + int *keys, int *vscroll, int *hscroll, + update_text_fn update_text, void *data) { - dialog_clear(); - return dialog_textbox(title, text, r, c, keys, vscroll, hscroll, - update_text, data); + dialog_clear(); + return dialog_textbox(title, text, r, c, keys, vscroll, hscroll, + update_text, data); } static void show_textbox(const char *title, const char *text, int r, int c) { - show_textbox_ext(title, (char *) text, r, c, (int []) {0}, NULL, NULL, - NULL, NULL); + show_textbox_ext(title, (char *)text, r, c, (int[]){0}, NULL, NULL, NULL, + NULL); } static void show_helptext(const char *title, const char *text) { - show_textbox(title, text, 0, 0); + show_textbox(title, text, 0, 0); } static void conf_message_callback(const char *fmt, va_list ap) { - char buf[PATH_MAX+1]; - - vsnprintf(buf, sizeof(buf), fmt, ap); - if (save_and_exit) { - if (!silent) - printf("%s", buf); - } else { - show_textbox(NULL, buf, 6, 60); - } + char buf[PATH_MAX + 1]; + + vsnprintf(buf, sizeof(buf), fmt, ap); + if ( save_and_exit ) + { + if ( !silent ) + printf("%s", buf); + } + else + { + show_textbox(NULL, buf, 6, 60); + } } static void show_help(struct menu *menu) { - struct gstr help = str_new(); + struct gstr help = str_new(); - help.max_width = getmaxx(stdscr) - 10; - menu_get_ext_help(menu, &help); + help.max_width = getmaxx(stdscr) - 10; + menu_get_ext_help(menu, &help); - show_helptext(_(menu_get_prompt(menu)), str_get(&help)); - str_free(&help); + show_helptext(_(menu_get_prompt(menu)), str_get(&help)); + str_free(&help); } static void conf_choice(struct menu *menu) { - const char *prompt = _(menu_get_prompt(menu)); - struct menu *child; - struct symbol *active; - - active = sym_get_choice_value(menu->sym); - while (1) { - int res; - int selected; - item_reset(); - - current_menu = menu; - for (child = menu->list; child; child = child->next) { - if (!menu_is_visible(child)) - continue; - if (child->sym) - item_make("%s", _(menu_get_prompt(child))); - else { - item_make("*** %s ***", _(menu_get_prompt(child))); - item_set_tag(':'); - } - item_set_data(child); - if (child->sym == active) - item_set_selected(1); - if (child->sym == sym_get_choice_value(menu->sym)) - item_set_tag('X'); - } - dialog_clear(); - res = dialog_checklist(prompt ? _(prompt) : _("Main Menu"), - _(radiolist_instructions), - MENUBOX_HEIGTH_MIN, - MENUBOX_WIDTH_MIN, - CHECKLIST_HEIGTH_MIN); - selected = item_activate_selected(); - switch (res) { - case 0: - if (selected) { - child = item_data(); - if (!child->sym) - break; - - sym_set_tristate_value(child->sym, yes); - } - return; - case 1: - if (selected) { - child = item_data(); - show_help(child); - active = child->sym; - } else - show_help(menu); - break; - case KEY_ESC: - return; - case -ERRDISPLAYTOOSMALL: - return; - } - } + const char *prompt = _(menu_get_prompt(menu)); + struct menu *child; + struct symbol *active; + + active = sym_get_choice_value(menu->sym); + while ( 1 ) + { + int res; + int selected; + item_reset(); + + current_menu = menu; + for ( child = menu->list; child; child = child->next ) + { + if ( !menu_is_visible(child) ) + continue; + if ( child->sym ) + item_make("%s", _(menu_get_prompt(child))); + else + { + item_make("*** %s ***", _(menu_get_prompt(child))); + item_set_tag(':'); + } + item_set_data(child); + if ( child->sym == active ) + item_set_selected(1); + if ( child->sym == sym_get_choice_value(menu->sym) ) + item_set_tag('X'); + } + dialog_clear(); + res = dialog_checklist(prompt ? _(prompt) : _("Main Menu"), + _(radiolist_instructions), MENUBOX_HEIGTH_MIN, + MENUBOX_WIDTH_MIN, CHECKLIST_HEIGTH_MIN); + selected = item_activate_selected(); + switch (res) + { + case 0: + if ( selected ) + { + child = item_data(); + if ( !child->sym ) + break; + + sym_set_tristate_value(child->sym, yes); + } + return; + case 1: + if ( selected ) + { + child = item_data(); + show_help(child); + active = child->sym; + } + else + show_help(menu); + break; + case KEY_ESC: + return; + case -ERRDISPLAYTOOSMALL: + return; + } + } } static void conf_string(struct menu *menu) { - const char *prompt = menu_get_prompt(menu); - - while (1) { - int res; - const char *heading; - - switch (sym_get_type(menu->sym)) { - case S_INT: - heading = _(inputbox_instructions_int); - break; - case S_HEX: - heading = _(inputbox_instructions_hex); - break; - case S_STRING: - heading = _(inputbox_instructions_string); - break; - default: - heading = _("Internal mconf error!"); - } - dialog_clear(); - res = dialog_inputbox(prompt ? _(prompt) : _("Main Menu"), - heading, 10, 75, - sym_get_string_value(menu->sym)); - switch (res) { - case 0: - if (sym_set_string_value(menu->sym, dialog_input_result)) - return; - show_textbox(NULL, _("You have made an invalid entry."), 5, 43); - break; - case 1: - show_help(menu); - break; - case KEY_ESC: - return; - } - } + const char *prompt = menu_get_prompt(menu); + + while ( 1 ) + { + int res; + const char *heading; + + switch (sym_get_type(menu->sym)) + { + case S_INT: + heading = _(inputbox_instructions_int); + break; + case S_HEX: + heading = _(inputbox_instructions_hex); + break; + case S_STRING: + heading = _(inputbox_instructions_string); + break; + default: + heading = _("Internal mconf error!"); + } + dialog_clear(); + res = dialog_inputbox(prompt ? _(prompt) : _("Main Menu"), heading, 10, + 75, sym_get_string_value(menu->sym)); + switch (res) + { + case 0: + if ( sym_set_string_value(menu->sym, dialog_input_result) ) + return; + show_textbox(NULL, _("You have made an invalid entry."), 5, 43); + break; + case 1: + show_help(menu); + break; + case KEY_ESC: + return; + } + } } static void conf_load(void) { - - while (1) { - int res; - dialog_clear(); - res = dialog_inputbox(NULL, load_config_text, - 11, 55, filename); - switch(res) { - case 0: - if (!dialog_input_result[0]) - return; - if (!conf_read(dialog_input_result)) { - set_config_filename(dialog_input_result); - sym_set_change_count(1); - return; - } - show_textbox(NULL, _("File does not exist!"), 5, 38); - break; - case 1: - show_helptext(_("Load Alternate Configuration"), load_config_help); - break; - case KEY_ESC: - return; - } - } + while ( 1 ) + { + int res; + dialog_clear(); + res = dialog_inputbox(NULL, load_config_text, 11, 55, filename); + switch (res) + { + case 0: + if ( !dialog_input_result[0] ) + return; + if ( !conf_read(dialog_input_result) ) + { + set_config_filename(dialog_input_result); + sym_set_change_count(1); + return; + } + show_textbox(NULL, _("File does not exist!"), 5, 38); + break; + case 1: + show_helptext(_("Load Alternate Configuration"), load_config_help); + break; + case KEY_ESC: + return; + } + } } static void conf_save(void) { - while (1) { - int res; - dialog_clear(); - res = dialog_inputbox(NULL, save_config_text, - 11, 55, filename); - switch(res) { - case 0: - if (!dialog_input_result[0]) - return; - if (!conf_write(dialog_input_result)) { - set_config_filename(dialog_input_result); - return; - } - show_textbox(NULL, _("Can't create file! Probably a nonexistent directory."), 5, 60); - break; - case 1: - show_helptext(_("Save Alternate Configuration"), save_config_help); - break; - case KEY_ESC: - return; - } - } + while ( 1 ) + { + int res; + dialog_clear(); + res = dialog_inputbox(NULL, save_config_text, 11, 55, filename); + switch (res) + { + case 0: + if ( !dialog_input_result[0] ) + return; + if ( !conf_write(dialog_input_result) ) + { + set_config_filename(dialog_input_result); + return; + } + show_textbox( + NULL, + _("Can't create file! Probably a nonexistent directory."), 5, + 60); + break; + case 1: + show_helptext(_("Save Alternate Configuration"), save_config_help); + break; + case KEY_ESC: + return; + } + } } static int handle_exit(void) { - int res; - - save_and_exit = 1; - reset_subtitle(); - dialog_clear(); - if (conf_get_changed()) - res = dialog_yesno(NULL, - _("Do you wish to save your new configuration?\n" - "(Press to continue kernel configuration.)"), - 6, 60); - else - res = -1; - - end_dialog(saved_x, saved_y); - - switch (res) { - case 0: - if (conf_write(filename)) { - fprintf(stderr, _("\n\n" - "Error while writing of the configuration.\n" - "Your configuration changes were NOT saved." - "\n\n")); - return 1; - } - /* fall through */ - case -1: - if (!silent) - printf(_("\n\n" - "*** End of the configuration.\n" - "*** Execute 'make' to start the build or try 'make help'." - "\n\n")); - res = 0; - break; - default: - if (!silent) - fprintf(stderr, _("\n\n" - "Your configuration changes were NOT saved." - "\n\n")); - if (res != KEY_ESC) - res = 0; - } - - return res; + int res; + + save_and_exit = 1; + reset_subtitle(); + dialog_clear(); + if ( conf_get_changed() ) + res = dialog_yesno( + NULL, + _("Do you wish to save your new configuration?\n" + "(Press to continue kernel configuration.)"), + 6, 60); + else + res = -1; + + end_dialog(saved_x, saved_y); + + switch (res) + { + case 0: + if ( conf_write(filename) ) + { + fprintf(stderr, _("\n\n" + "Error while writing of the configuration.\n" + "Your configuration changes were NOT saved." + "\n\n")); + return 1; + } + /* fall through */ + case -1: + if ( !silent ) + printf(_("\n\n" + "*** End of the configuration.\n" + "*** Execute 'make' to start the build or try 'make help'." + "\n\n")); + res = 0; + break; + default: + if ( !silent ) + fprintf(stderr, _("\n\n" + "Your configuration changes were NOT saved." + "\n\n")); + if ( res != KEY_ESC ) + res = 0; + } + + return res; } static void sig_handler(int signo) { - exit(handle_exit()); + exit(handle_exit()); } int main(int ac, char **av) { - char *mode; - int res; - - setlocale(LC_ALL, ""); - bindtextdomain(PACKAGE, LOCALEDIR); - textdomain(PACKAGE); - - signal(SIGINT, sig_handler); - - if (ac > 1 && strcmp(av[1], "-s") == 0) { - silent = 1; - /* Silence conf_read() until the real callback is set up */ - conf_set_message_callback(NULL); - av++; - } - conf_parse(av[1]); - conf_read(NULL); - - mode = getenv("MENUCONFIG_MODE"); - if (mode) { - if (!strcasecmp(mode, "single_menu")) - single_menu_mode = 1; - } - - if (init_dialog(NULL)) { - fprintf(stderr, N_("Your display is too small to run Menuconfig!\n")); - fprintf(stderr, N_("It must be at least 19 lines by 80 columns.\n")); - return 1; - } - - set_config_filename(conf_get_configname()); - conf_set_message_callback(conf_message_callback); - do { - conf(&rootmenu, NULL); - res = handle_exit(); - } while (res == KEY_ESC); - - return res; + char *mode; + int res; + + setlocale(LC_ALL, ""); + bindtextdomain(PACKAGE, LOCALEDIR); + textdomain(PACKAGE); + + signal(SIGINT, sig_handler); + + if ( ac > 1 && strcmp(av[1], "-s") == 0 ) + { + silent = 1; + /* Silence conf_read() until the real callback is set up */ + conf_set_message_callback(NULL); + av++; + } + conf_parse(av[1]); + conf_read(NULL); + + mode = getenv("MENUCONFIG_MODE"); + if ( mode ) + { + if ( !strcasecmp(mode, "single_menu") ) + single_menu_mode = 1; + } + + if ( init_dialog(NULL) ) + { + fprintf(stderr, N_("Your display is too small to run Menuconfig!\n")); + fprintf(stderr, N_("It must be at least 19 lines by 80 columns.\n")); + return 1; + } + + set_config_filename(conf_get_configname()); + conf_set_message_callback(conf_message_callback); + do { + conf(&rootmenu, NULL); + res = handle_exit(); + } while ( res == KEY_ESC ); + + return res; } diff --git a/xen/tools/kconfig/menu.c b/xen/tools/kconfig/menu.c index b05cc3d4a9..8abafd3a3d 100644 --- a/xen/tools/kconfig/menu.c +++ b/xen/tools/kconfig/menu.c @@ -20,46 +20,46 @@ struct file *current_file; void menu_warn(struct menu *menu, const char *fmt, ...) { - va_list ap; - va_start(ap, fmt); - fprintf(stderr, "%s:%d:warning: ", menu->file->name, menu->lineno); - vfprintf(stderr, fmt, ap); - fprintf(stderr, "\n"); - va_end(ap); + va_list ap; + va_start(ap, fmt); + fprintf(stderr, "%s:%d:warning: ", menu->file->name, menu->lineno); + vfprintf(stderr, fmt, ap); + fprintf(stderr, "\n"); + va_end(ap); } static void prop_warn(struct property *prop, const char *fmt, ...) { - va_list ap; - va_start(ap, fmt); - fprintf(stderr, "%s:%d:warning: ", prop->file->name, prop->lineno); - vfprintf(stderr, fmt, ap); - fprintf(stderr, "\n"); - va_end(ap); + va_list ap; + va_start(ap, fmt); + fprintf(stderr, "%s:%d:warning: ", prop->file->name, prop->lineno); + vfprintf(stderr, fmt, ap); + fprintf(stderr, "\n"); + va_end(ap); } void _menu_init(void) { - current_entry = current_menu = &rootmenu; - last_entry_ptr = &rootmenu.list; + current_entry = current_menu = &rootmenu; + last_entry_ptr = &rootmenu.list; } void menu_add_entry(struct symbol *sym) { - struct menu *menu; + struct menu *menu; - menu = xmalloc(sizeof(*menu)); - memset(menu, 0, sizeof(*menu)); - menu->sym = sym; - menu->parent = current_menu; - menu->file = current_file; - menu->lineno = zconf_lineno(); + menu = xmalloc(sizeof(*menu)); + memset(menu, 0, sizeof(*menu)); + menu->sym = sym; + menu->parent = current_menu; + menu->file = current_file; + menu->lineno = zconf_lineno(); - *last_entry_ptr = menu; - last_entry_ptr = &menu->next; - current_entry = menu; - if (sym) - menu_add_symbol(P_SYMBOL, sym, NULL); + *last_entry_ptr = menu; + last_entry_ptr = &menu->next; + current_entry = menu; + if ( sym ) + menu_add_symbol(P_SYMBOL, sym, NULL); } void menu_end_entry(void) @@ -68,386 +68,421 @@ void menu_end_entry(void) struct menu *menu_add_menu(void) { - menu_end_entry(); - last_entry_ptr = ¤t_entry->list; - return current_menu = current_entry; + menu_end_entry(); + last_entry_ptr = ¤t_entry->list; + return current_menu = current_entry; } void menu_end_menu(void) { - last_entry_ptr = ¤t_menu->next; - current_menu = current_menu->parent; + last_entry_ptr = ¤t_menu->next; + current_menu = current_menu->parent; } static struct expr *menu_check_dep(struct expr *e) { - if (!e) - return e; - - switch (e->type) { - case E_NOT: - e->left.expr = menu_check_dep(e->left.expr); - break; - case E_OR: - case E_AND: - e->left.expr = menu_check_dep(e->left.expr); - e->right.expr = menu_check_dep(e->right.expr); - break; - case E_SYMBOL: - /* change 'm' into 'm' && MODULES */ - if (e->left.sym == &symbol_mod) - return expr_alloc_and(e, expr_alloc_symbol(modules_sym)); - break; - default: - break; - } - return e; + if ( !e ) + return e; + + switch (e->type) + { + case E_NOT: + e->left.expr = menu_check_dep(e->left.expr); + break; + case E_OR: + case E_AND: + e->left.expr = menu_check_dep(e->left.expr); + e->right.expr = menu_check_dep(e->right.expr); + break; + case E_SYMBOL: + /* change 'm' into 'm' && MODULES */ + if ( e->left.sym == &symbol_mod ) + return expr_alloc_and(e, expr_alloc_symbol(modules_sym)); + break; + default: + break; + } + return e; } void menu_add_dep(struct expr *dep) { - current_entry->dep = expr_alloc_and(current_entry->dep, menu_check_dep(dep)); + current_entry->dep = + expr_alloc_and(current_entry->dep, menu_check_dep(dep)); } void menu_set_type(int type) { - struct symbol *sym = current_entry->sym; - - if (sym->type == type) - return; - if (sym->type == S_UNKNOWN) { - sym->type = type; - return; - } - menu_warn(current_entry, - "ignoring type redefinition of '%s' from '%s' to '%s'", - sym->name ? sym->name : "", - sym_type_name(sym->type), sym_type_name(type)); -} - -static struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *expr, struct expr *dep) -{ - struct property *prop = prop_alloc(type, current_entry->sym); - - prop->menu = current_entry; - prop->expr = expr; - prop->visible.expr = menu_check_dep(dep); - - if (prompt) { - if (isspace(*prompt)) { - prop_warn(prop, "leading whitespace ignored"); - while (isspace(*prompt)) - prompt++; - } - if (current_entry->prompt && current_entry != &rootmenu) - prop_warn(prop, "prompt redefined"); - - /* Apply all upper menus' visibilities to actual prompts. */ - if(type == P_PROMPT) { - struct menu *menu = current_entry; - - while ((menu = menu->parent) != NULL) { - struct expr *dup_expr; - - if (!menu->visibility) - continue; - /* - * Do not add a reference to the - * menu's visibility expression but - * use a copy of it. Otherwise the - * expression reduction functions - * will modify expressions that have - * multiple references which can - * cause unwanted side effects. - */ - dup_expr = expr_copy(menu->visibility); - - prop->visible.expr - = expr_alloc_and(prop->visible.expr, - dup_expr); - } - } - - current_entry->prompt = prop; - } - prop->text = prompt; - - return prop; -} - -struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep) -{ - return menu_add_prop(type, prompt, NULL, dep); + struct symbol *sym = current_entry->sym; + + if ( sym->type == type ) + return; + if ( sym->type == S_UNKNOWN ) + { + sym->type = type; + return; + } + menu_warn(current_entry, + "ignoring type redefinition of '%s' from '%s' to '%s'", + sym->name ? sym->name : "", sym_type_name(sym->type), + sym_type_name(type)); +} + +static struct property *menu_add_prop(enum prop_type type, char *prompt, + struct expr *expr, struct expr *dep) +{ + struct property *prop = prop_alloc(type, current_entry->sym); + + prop->menu = current_entry; + prop->expr = expr; + prop->visible.expr = menu_check_dep(dep); + + if ( prompt ) + { + if ( isspace(*prompt) ) + { + prop_warn(prop, "leading whitespace ignored"); + while ( isspace(*prompt) ) + prompt++; + } + if ( current_entry->prompt && current_entry != &rootmenu ) + prop_warn(prop, "prompt redefined"); + + /* Apply all upper menus' visibilities to actual prompts. */ + if ( type == P_PROMPT ) + { + struct menu *menu = current_entry; + + while ( (menu = menu->parent) != NULL ) + { + struct expr *dup_expr; + + if ( !menu->visibility ) + continue; + /* + * Do not add a reference to the + * menu's visibility expression but + * use a copy of it. Otherwise the + * expression reduction functions + * will modify expressions that have + * multiple references which can + * cause unwanted side effects. + */ + dup_expr = expr_copy(menu->visibility); + + prop->visible.expr = + expr_alloc_and(prop->visible.expr, dup_expr); + } + } + + current_entry->prompt = prop; + } + prop->text = prompt; + + return prop; +} + +struct property *menu_add_prompt(enum prop_type type, char *prompt, + struct expr *dep) +{ + return menu_add_prop(type, prompt, NULL, dep); } void menu_add_visibility(struct expr *expr) { - current_entry->visibility = expr_alloc_and(current_entry->visibility, - expr); + current_entry->visibility = expr_alloc_and(current_entry->visibility, expr); } void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep) { - menu_add_prop(type, NULL, expr, dep); + menu_add_prop(type, NULL, expr, dep); } void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep) { - menu_add_prop(type, NULL, expr_alloc_symbol(sym), dep); + menu_add_prop(type, NULL, expr_alloc_symbol(sym), dep); } void menu_add_option(int token, char *arg) { - switch (token) { - case T_OPT_MODULES: - if (modules_sym) - zconf_error("symbol '%s' redefines option 'modules'" - " already defined by symbol '%s'", - current_entry->sym->name, - modules_sym->name - ); - modules_sym = current_entry->sym; - break; - case T_OPT_DEFCONFIG_LIST: - if (!sym_defconfig_list) - sym_defconfig_list = current_entry->sym; - else if (sym_defconfig_list != current_entry->sym) - zconf_error("trying to redefine defconfig symbol"); - break; - case T_OPT_ENV: - prop_add_env(arg); - break; - case T_OPT_ALLNOCONFIG_Y: - current_entry->sym->flags |= SYMBOL_ALLNOCONFIG_Y; - break; - } + switch (token) + { + case T_OPT_MODULES: + if ( modules_sym ) + zconf_error("symbol '%s' redefines option 'modules'" + " already defined by symbol '%s'", + current_entry->sym->name, modules_sym->name); + modules_sym = current_entry->sym; + break; + case T_OPT_DEFCONFIG_LIST: + if ( !sym_defconfig_list ) + sym_defconfig_list = current_entry->sym; + else if ( sym_defconfig_list != current_entry->sym ) + zconf_error("trying to redefine defconfig symbol"); + break; + case T_OPT_ENV: + prop_add_env(arg); + break; + case T_OPT_ALLNOCONFIG_Y: + current_entry->sym->flags |= SYMBOL_ALLNOCONFIG_Y; + break; + } } static int menu_validate_number(struct symbol *sym, struct symbol *sym2) { - return sym2->type == S_INT || sym2->type == S_HEX || - (sym2->type == S_UNKNOWN && sym_string_valid(sym, sym2->name)); + return sym2->type == S_INT || sym2->type == S_HEX || + (sym2->type == S_UNKNOWN && sym_string_valid(sym, sym2->name)); } static void sym_check_prop(struct symbol *sym) { - struct property *prop; - struct symbol *sym2; - for (prop = sym->prop; prop; prop = prop->next) { - switch (prop->type) { - case P_DEFAULT: - if ((sym->type == S_STRING || sym->type == S_INT || sym->type == S_HEX) && - prop->expr->type != E_SYMBOL) - prop_warn(prop, - "default for config symbol '%s'" - " must be a single symbol", sym->name); - if (prop->expr->type != E_SYMBOL) - break; - sym2 = prop_get_symbol(prop); - if (sym->type == S_HEX || sym->type == S_INT) { - if (!menu_validate_number(sym, sym2)) - prop_warn(prop, - "'%s': number is invalid", - sym->name); - } - break; - case P_SELECT: - sym2 = prop_get_symbol(prop); - if (sym->type != S_BOOLEAN && sym->type != S_TRISTATE) - prop_warn(prop, - "config symbol '%s' uses select, but is " - "not boolean or tristate", sym->name); - else if (sym2->type != S_UNKNOWN && - sym2->type != S_BOOLEAN && - sym2->type != S_TRISTATE) - prop_warn(prop, - "'%s' has wrong type. 'select' only " - "accept arguments of boolean and " - "tristate type", sym2->name); - break; - case P_RANGE: - if (sym->type != S_INT && sym->type != S_HEX) - prop_warn(prop, "range is only allowed " - "for int or hex symbols"); - if (!menu_validate_number(sym, prop->expr->left.sym) || - !menu_validate_number(sym, prop->expr->right.sym)) - prop_warn(prop, "range is invalid"); - break; - default: - ; - } - } + struct property *prop; + struct symbol *sym2; + for ( prop = sym->prop; prop; prop = prop->next ) + { + switch (prop->type) + { + case P_DEFAULT: + if ( (sym->type == S_STRING || sym->type == S_INT || + sym->type == S_HEX) && + prop->expr->type != E_SYMBOL ) + prop_warn(prop, + "default for config symbol '%s'" + " must be a single symbol", + sym->name); + if ( prop->expr->type != E_SYMBOL ) + break; + sym2 = prop_get_symbol(prop); + if ( sym->type == S_HEX || sym->type == S_INT ) + { + if ( !menu_validate_number(sym, sym2) ) + prop_warn(prop, "'%s': number is invalid", sym->name); + } + break; + case P_SELECT: + sym2 = prop_get_symbol(prop); + if ( sym->type != S_BOOLEAN && sym->type != S_TRISTATE ) + prop_warn(prop, + "config symbol '%s' uses select, but is " + "not boolean or tristate", + sym->name); + else if ( sym2->type != S_UNKNOWN && sym2->type != S_BOOLEAN && + sym2->type != S_TRISTATE ) + prop_warn(prop, + "'%s' has wrong type. 'select' only " + "accept arguments of boolean and " + "tristate type", + sym2->name); + break; + case P_RANGE: + if ( sym->type != S_INT && sym->type != S_HEX ) + prop_warn(prop, "range is only allowed " + "for int or hex symbols"); + if ( !menu_validate_number(sym, prop->expr->left.sym) || + !menu_validate_number(sym, prop->expr->right.sym) ) + prop_warn(prop, "range is invalid"); + break; + default:; + } + } } void menu_finalize(struct menu *parent) { - struct menu *menu, *last_menu; - struct symbol *sym; - struct property *prop; - struct expr *parentdep, *basedep, *dep, *dep2, **ep; - - sym = parent->sym; - if (parent->list) { - if (sym && sym_is_choice(sym)) { - if (sym->type == S_UNKNOWN) { - /* find the first choice value to find out choice type */ - current_entry = parent; - for (menu = parent->list; menu; menu = menu->next) { - if (menu->sym && menu->sym->type != S_UNKNOWN) { - menu_set_type(menu->sym->type); - break; - } - } - } - /* set the type of the remaining choice values */ - for (menu = parent->list; menu; menu = menu->next) { - current_entry = menu; - if (menu->sym && menu->sym->type == S_UNKNOWN) - menu_set_type(sym->type); - } - parentdep = expr_alloc_symbol(sym); - } else if (parent->prompt) - parentdep = parent->prompt->visible.expr; - else - parentdep = parent->dep; - - for (menu = parent->list; menu; menu = menu->next) { - basedep = expr_transform(menu->dep); - basedep = expr_alloc_and(expr_copy(parentdep), basedep); - basedep = expr_eliminate_dups(basedep); - menu->dep = basedep; - if (menu->sym) - prop = menu->sym->prop; - else - prop = menu->prompt; - for (; prop; prop = prop->next) { - if (prop->menu != menu) - continue; - dep = expr_transform(prop->visible.expr); - dep = expr_alloc_and(expr_copy(basedep), dep); - dep = expr_eliminate_dups(dep); - if (menu->sym && menu->sym->type != S_TRISTATE) - dep = expr_trans_bool(dep); - prop->visible.expr = dep; - if (prop->type == P_SELECT) { - struct symbol *es = prop_get_symbol(prop); - es->rev_dep.expr = expr_alloc_or(es->rev_dep.expr, - expr_alloc_and(expr_alloc_symbol(menu->sym), expr_copy(dep))); - } - } - } - for (menu = parent->list; menu; menu = menu->next) - menu_finalize(menu); - } else if (sym) { - basedep = parent->prompt ? parent->prompt->visible.expr : NULL; - basedep = expr_trans_compare(basedep, E_UNEQUAL, &symbol_no); - basedep = expr_eliminate_dups(expr_transform(basedep)); - last_menu = NULL; - for (menu = parent->next; menu; menu = menu->next) { - dep = menu->prompt ? menu->prompt->visible.expr : menu->dep; - if (!expr_contains_symbol(dep, sym)) - break; - if (expr_depends_symbol(dep, sym)) - goto next; - dep = expr_trans_compare(dep, E_UNEQUAL, &symbol_no); - dep = expr_eliminate_dups(expr_transform(dep)); - dep2 = expr_copy(basedep); - expr_eliminate_eq(&dep, &dep2); - expr_free(dep); - if (!expr_is_yes(dep2)) { - expr_free(dep2); - break; - } - expr_free(dep2); - next: - menu_finalize(menu); - menu->parent = parent; - last_menu = menu; - } - if (last_menu) { - parent->list = parent->next; - parent->next = last_menu->next; - last_menu->next = NULL; - } - - sym->dir_dep.expr = expr_alloc_or(sym->dir_dep.expr, parent->dep); - } - for (menu = parent->list; menu; menu = menu->next) { - if (sym && sym_is_choice(sym) && - menu->sym && !sym_is_choice_value(menu->sym)) { - current_entry = menu; - menu->sym->flags |= SYMBOL_CHOICEVAL; - if (!menu->prompt) - menu_warn(menu, "choice value must have a prompt"); - for (prop = menu->sym->prop; prop; prop = prop->next) { - if (prop->type == P_DEFAULT) - prop_warn(prop, "defaults for choice " - "values not supported"); - if (prop->menu == menu) - continue; - if (prop->type == P_PROMPT && - prop->menu->parent->sym != sym) - prop_warn(prop, "choice value used outside its choice group"); - } - /* Non-tristate choice values of tristate choices must - * depend on the choice being set to Y. The choice - * values' dependencies were propagated to their - * properties above, so the change here must be re- - * propagated. - */ - if (sym->type == S_TRISTATE && menu->sym->type != S_TRISTATE) { - basedep = expr_alloc_comp(E_EQUAL, sym, &symbol_yes); - menu->dep = expr_alloc_and(basedep, menu->dep); - for (prop = menu->sym->prop; prop; prop = prop->next) { - if (prop->menu != menu) - continue; - prop->visible.expr = expr_alloc_and(expr_copy(basedep), - prop->visible.expr); - } - } - menu_add_symbol(P_CHOICE, sym, NULL); - prop = sym_get_choice_prop(sym); - for (ep = &prop->expr; *ep; ep = &(*ep)->left.expr) - ; - *ep = expr_alloc_one(E_LIST, NULL); - (*ep)->right.sym = menu->sym; - } - if (menu->list && (!menu->prompt || !menu->prompt->text)) { - for (last_menu = menu->list; ; last_menu = last_menu->next) { - last_menu->parent = parent; - if (!last_menu->next) - break; - } - last_menu->next = menu->next; - menu->next = menu->list; - menu->list = NULL; - } - } - - if (sym && !(sym->flags & SYMBOL_WARNED)) { - if (sym->type == S_UNKNOWN) - menu_warn(parent, "config symbol defined without type"); - - if (sym_is_choice(sym) && !parent->prompt) - menu_warn(parent, "choice must have a prompt"); - - /* Check properties connected to this symbol */ - sym_check_prop(sym); - sym->flags |= SYMBOL_WARNED; - } - - if (sym && !sym_is_optional(sym) && parent->prompt) { - sym->rev_dep.expr = expr_alloc_or(sym->rev_dep.expr, - expr_alloc_and(parent->prompt->visible.expr, - expr_alloc_symbol(&symbol_mod))); - } + struct menu *menu, *last_menu; + struct symbol *sym; + struct property *prop; + struct expr *parentdep, *basedep, *dep, *dep2, **ep; + + sym = parent->sym; + if ( parent->list ) + { + if ( sym && sym_is_choice(sym) ) + { + if ( sym->type == S_UNKNOWN ) + { + /* find the first choice value to find out choice type */ + current_entry = parent; + for ( menu = parent->list; menu; menu = menu->next ) + { + if ( menu->sym && menu->sym->type != S_UNKNOWN ) + { + menu_set_type(menu->sym->type); + break; + } + } + } + /* set the type of the remaining choice values */ + for ( menu = parent->list; menu; menu = menu->next ) + { + current_entry = menu; + if ( menu->sym && menu->sym->type == S_UNKNOWN ) + menu_set_type(sym->type); + } + parentdep = expr_alloc_symbol(sym); + } + else if ( parent->prompt ) + parentdep = parent->prompt->visible.expr; + else + parentdep = parent->dep; + + for ( menu = parent->list; menu; menu = menu->next ) + { + basedep = expr_transform(menu->dep); + basedep = expr_alloc_and(expr_copy(parentdep), basedep); + basedep = expr_eliminate_dups(basedep); + menu->dep = basedep; + if ( menu->sym ) + prop = menu->sym->prop; + else + prop = menu->prompt; + for ( ; prop; prop = prop->next ) + { + if ( prop->menu != menu ) + continue; + dep = expr_transform(prop->visible.expr); + dep = expr_alloc_and(expr_copy(basedep), dep); + dep = expr_eliminate_dups(dep); + if ( menu->sym && menu->sym->type != S_TRISTATE ) + dep = expr_trans_bool(dep); + prop->visible.expr = dep; + if ( prop->type == P_SELECT ) + { + struct symbol *es = prop_get_symbol(prop); + es->rev_dep.expr = expr_alloc_or( + es->rev_dep.expr, + expr_alloc_and(expr_alloc_symbol(menu->sym), + expr_copy(dep))); + } + } + } + for ( menu = parent->list; menu; menu = menu->next ) + menu_finalize(menu); + } + else if ( sym ) + { + basedep = parent->prompt ? parent->prompt->visible.expr : NULL; + basedep = expr_trans_compare(basedep, E_UNEQUAL, &symbol_no); + basedep = expr_eliminate_dups(expr_transform(basedep)); + last_menu = NULL; + for ( menu = parent->next; menu; menu = menu->next ) + { + dep = menu->prompt ? menu->prompt->visible.expr : menu->dep; + if ( !expr_contains_symbol(dep, sym) ) + break; + if ( expr_depends_symbol(dep, sym) ) + goto next; + dep = expr_trans_compare(dep, E_UNEQUAL, &symbol_no); + dep = expr_eliminate_dups(expr_transform(dep)); + dep2 = expr_copy(basedep); + expr_eliminate_eq(&dep, &dep2); + expr_free(dep); + if ( !expr_is_yes(dep2) ) + { + expr_free(dep2); + break; + } + expr_free(dep2); + next: + menu_finalize(menu); + menu->parent = parent; + last_menu = menu; + } + if ( last_menu ) + { + parent->list = parent->next; + parent->next = last_menu->next; + last_menu->next = NULL; + } + + sym->dir_dep.expr = expr_alloc_or(sym->dir_dep.expr, parent->dep); + } + for ( menu = parent->list; menu; menu = menu->next ) + { + if ( sym && sym_is_choice(sym) && menu->sym && + !sym_is_choice_value(menu->sym) ) + { + current_entry = menu; + menu->sym->flags |= SYMBOL_CHOICEVAL; + if ( !menu->prompt ) + menu_warn(menu, "choice value must have a prompt"); + for ( prop = menu->sym->prop; prop; prop = prop->next ) + { + if ( prop->type == P_DEFAULT ) + prop_warn(prop, "defaults for choice " + "values not supported"); + if ( prop->menu == menu ) + continue; + if ( prop->type == P_PROMPT && prop->menu->parent->sym != sym ) + prop_warn(prop, + "choice value used outside its choice group"); + } + /* Non-tristate choice values of tristate choices must + * depend on the choice being set to Y. The choice + * values' dependencies were propagated to their + * properties above, so the change here must be re- + * propagated. + */ + if ( sym->type == S_TRISTATE && menu->sym->type != S_TRISTATE ) + { + basedep = expr_alloc_comp(E_EQUAL, sym, &symbol_yes); + menu->dep = expr_alloc_and(basedep, menu->dep); + for ( prop = menu->sym->prop; prop; prop = prop->next ) + { + if ( prop->menu != menu ) + continue; + prop->visible.expr = + expr_alloc_and(expr_copy(basedep), prop->visible.expr); + } + } + menu_add_symbol(P_CHOICE, sym, NULL); + prop = sym_get_choice_prop(sym); + for ( ep = &prop->expr; *ep; ep = &(*ep)->left.expr ) + ; + *ep = expr_alloc_one(E_LIST, NULL); + (*ep)->right.sym = menu->sym; + } + if ( menu->list && (!menu->prompt || !menu->prompt->text) ) + { + for ( last_menu = menu->list;; last_menu = last_menu->next ) + { + last_menu->parent = parent; + if ( !last_menu->next ) + break; + } + last_menu->next = menu->next; + menu->next = menu->list; + menu->list = NULL; + } + } + + if ( sym && !(sym->flags & SYMBOL_WARNED) ) + { + if ( sym->type == S_UNKNOWN ) + menu_warn(parent, "config symbol defined without type"); + + if ( sym_is_choice(sym) && !parent->prompt ) + menu_warn(parent, "choice must have a prompt"); + + /* Check properties connected to this symbol */ + sym_check_prop(sym); + sym->flags |= SYMBOL_WARNED; + } + + if ( sym && !sym_is_optional(sym) && parent->prompt ) + { + sym->rev_dep.expr = expr_alloc_or( + sym->rev_dep.expr, expr_alloc_and(parent->prompt->visible.expr, + expr_alloc_symbol(&symbol_mod))); + } } bool menu_has_prompt(struct menu *menu) { - if (!menu->prompt) - return false; - return true; + if ( !menu->prompt ) + return false; + return true; } /* @@ -457,147 +492,161 @@ bool menu_has_prompt(struct menu *menu) */ bool menu_is_empty(struct menu *menu) { - struct menu *child; + struct menu *child; - for (child = menu->list; child; child = child->next) { - if (menu_is_visible(child)) - return(false); - } - return(true); + for ( child = menu->list; child; child = child->next ) + { + if ( menu_is_visible(child) ) + return (false); + } + return (true); } bool menu_is_visible(struct menu *menu) { - struct menu *child; - struct symbol *sym; - tristate visible; + struct menu *child; + struct symbol *sym; + tristate visible; - if (!menu->prompt) - return false; + if ( !menu->prompt ) + return false; - if (menu->visibility) { - if (expr_calc_value(menu->visibility) == no) - return no; - } + if ( menu->visibility ) + { + if ( expr_calc_value(menu->visibility) == no ) + return no; + } - sym = menu->sym; - if (sym) { - sym_calc_value(sym); - visible = menu->prompt->visible.tri; - } else - visible = menu->prompt->visible.tri = expr_calc_value(menu->prompt->visible.expr); + sym = menu->sym; + if ( sym ) + { + sym_calc_value(sym); + visible = menu->prompt->visible.tri; + } + else + visible = menu->prompt->visible.tri = + expr_calc_value(menu->prompt->visible.expr); - if (visible != no) - return true; + if ( visible != no ) + return true; - if (!sym || sym_get_tristate_value(menu->sym) == no) - return false; + if ( !sym || sym_get_tristate_value(menu->sym) == no ) + return false; - for (child = menu->list; child; child = child->next) { - if (menu_is_visible(child)) { - if (sym) - sym->flags |= SYMBOL_DEF_USER; - return true; - } - } + for ( child = menu->list; child; child = child->next ) + { + if ( menu_is_visible(child) ) + { + if ( sym ) + sym->flags |= SYMBOL_DEF_USER; + return true; + } + } - return false; + return false; } const char *menu_get_prompt(struct menu *menu) { - if (menu->prompt) - return menu->prompt->text; - else if (menu->sym) - return menu->sym->name; - return NULL; + if ( menu->prompt ) + return menu->prompt->text; + else if ( menu->sym ) + return menu->sym->name; + return NULL; } struct menu *menu_get_root_menu(struct menu *menu) { - return &rootmenu; + return &rootmenu; } struct menu *menu_get_parent_menu(struct menu *menu) { - enum prop_type type; + enum prop_type type; - for (; menu != &rootmenu; menu = menu->parent) { - type = menu->prompt ? menu->prompt->type : 0; - if (type == P_MENU) - break; - } - return menu; + for ( ; menu != &rootmenu; menu = menu->parent ) + { + type = menu->prompt ? menu->prompt->type : 0; + if ( type == P_MENU ) + break; + } + return menu; } bool menu_has_help(struct menu *menu) { - return menu->help != NULL; + return menu->help != NULL; } const char *menu_get_help(struct menu *menu) { - if (menu->help) - return menu->help; - else - return ""; + if ( menu->help ) + return menu->help; + else + return ""; } static void get_prompt_str(struct gstr *r, struct property *prop, - struct list_head *head) -{ - int i, j; - struct menu *submenu[8], *menu, *location = NULL; - struct jump_key *jump = NULL; - - str_printf(r, _("Prompt: %s\n"), _(prop->text)); - menu = prop->menu->parent; - for (i = 0; menu != &rootmenu && i < 8; menu = menu->parent) { - bool accessible = menu_is_visible(menu); - - submenu[i++] = menu; - if (location == NULL && accessible) - location = menu; - } - if (head && location) { - jump = xmalloc(sizeof(struct jump_key)); - - if (menu_is_visible(prop->menu)) { - /* - * There is not enough room to put the hint at the - * beginning of the "Prompt" line. Put the hint on the - * last "Location" line even when it would belong on - * the former. - */ - jump->target = prop->menu; - } else - jump->target = location; - - if (list_empty(head)) - jump->index = 0; - else - jump->index = list_entry(head->prev, struct jump_key, - entries)->index + 1; - - list_add_tail(&jump->entries, head); - } - - if (i > 0) { - str_printf(r, _(" Location:\n")); - for (j = 4; --i >= 0; j += 2) { - menu = submenu[i]; - if (jump && menu == location) - jump->offset = strlen(r->s); - str_printf(r, "%*c-> %s", j, ' ', - _(menu_get_prompt(menu))); - if (menu->sym) { - str_printf(r, " (%s [=%s])", menu->sym->name ? - menu->sym->name : _(""), - sym_get_string_value(menu->sym)); - } - str_append(r, "\n"); - } - } + struct list_head *head) +{ + int i, j; + struct menu *submenu[8], *menu, *location = NULL; + struct jump_key *jump = NULL; + + str_printf(r, _("Prompt: %s\n"), _(prop->text)); + menu = prop->menu->parent; + for ( i = 0; menu != &rootmenu && i < 8; menu = menu->parent ) + { + bool accessible = menu_is_visible(menu); + + submenu[i++] = menu; + if ( location == NULL && accessible ) + location = menu; + } + if ( head && location ) + { + jump = xmalloc(sizeof(struct jump_key)); + + if ( menu_is_visible(prop->menu) ) + { + /* + * There is not enough room to put the hint at the + * beginning of the "Prompt" line. Put the hint on the + * last "Location" line even when it would belong on + * the former. + */ + jump->target = prop->menu; + } + else + jump->target = location; + + if ( list_empty(head) ) + jump->index = 0; + else + jump->index = + list_entry(head->prev, struct jump_key, entries)->index + 1; + + list_add_tail(&jump->entries, head); + } + + if ( i > 0 ) + { + str_printf(r, _(" Location:\n")); + for ( j = 4; --i >= 0; j += 2 ) + { + menu = submenu[i]; + if ( jump && menu == location ) + jump->offset = strlen(r->s); + str_printf(r, "%*c-> %s", j, ' ', _(menu_get_prompt(menu))); + if ( menu->sym ) + { + str_printf(r, " (%s [=%s])", + menu->sym->name ? menu->sym->name : _(""), + sym_get_string_value(menu->sym)); + } + str_append(r, "\n"); + } + } } /* @@ -605,93 +654,100 @@ static void get_prompt_str(struct gstr *r, struct property *prop, */ static struct property *get_symbol_prop(struct symbol *sym) { - struct property *prop = NULL; + struct property *prop = NULL; - for_all_properties(sym, prop, P_SYMBOL) - break; - return prop; + for_all_properties(sym, prop, P_SYMBOL) break; + return prop; } /* * head is optional and may be NULL */ static void get_symbol_str(struct gstr *r, struct symbol *sym, - struct list_head *head) -{ - bool hit; - struct property *prop; - - if (sym && sym->name) { - str_printf(r, "Symbol: %s [=%s]\n", sym->name, - sym_get_string_value(sym)); - str_printf(r, "Type : %s\n", sym_type_name(sym->type)); - if (sym->type == S_INT || sym->type == S_HEX) { - prop = sym_get_range_prop(sym); - if (prop) { - str_printf(r, "Range : "); - expr_gstr_print(prop->expr, r); - str_append(r, "\n"); - } - } - } - for_all_prompts(sym, prop) - get_prompt_str(r, prop, head); - - prop = get_symbol_prop(sym); - if (prop) { - str_printf(r, _(" Defined at %s:%d\n"), prop->menu->file->name, - prop->menu->lineno); - if (!expr_is_yes(prop->visible.expr)) { - str_append(r, _(" Depends on: ")); - expr_gstr_print(prop->visible.expr, r); - str_append(r, "\n"); - } - } - - hit = false; - for_all_properties(sym, prop, P_SELECT) { - if (!hit) { - str_append(r, " Selects: "); - hit = true; - } else - str_printf(r, " && "); - expr_gstr_print(prop->expr, r); - } - if (hit) - str_append(r, "\n"); - if (sym->rev_dep.expr) { - str_append(r, _(" Selected by: ")); - expr_gstr_print(sym->rev_dep.expr, r); - str_append(r, "\n"); - } - str_append(r, "\n\n"); + struct list_head *head) +{ + bool hit; + struct property *prop; + + if ( sym && sym->name ) + { + str_printf(r, "Symbol: %s [=%s]\n", sym->name, + sym_get_string_value(sym)); + str_printf(r, "Type : %s\n", sym_type_name(sym->type)); + if ( sym->type == S_INT || sym->type == S_HEX ) + { + prop = sym_get_range_prop(sym); + if ( prop ) + { + str_printf(r, "Range : "); + expr_gstr_print(prop->expr, r); + str_append(r, "\n"); + } + } + } + for_all_prompts(sym, prop) get_prompt_str(r, prop, head); + + prop = get_symbol_prop(sym); + if ( prop ) + { + str_printf(r, _(" Defined at %s:%d\n"), prop->menu->file->name, + prop->menu->lineno); + if ( !expr_is_yes(prop->visible.expr) ) + { + str_append(r, _(" Depends on: ")); + expr_gstr_print(prop->visible.expr, r); + str_append(r, "\n"); + } + } + + hit = false; + for_all_properties(sym, prop, P_SELECT) + { + if ( !hit ) + { + str_append(r, " Selects: "); + hit = true; + } + else + str_printf(r, " && "); + expr_gstr_print(prop->expr, r); + } + if ( hit ) + str_append(r, "\n"); + if ( sym->rev_dep.expr ) + { + str_append(r, _(" Selected by: ")); + expr_gstr_print(sym->rev_dep.expr, r); + str_append(r, "\n"); + } + str_append(r, "\n\n"); } struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head) { - struct symbol *sym; - struct gstr res = str_new(); - int i; + struct symbol *sym; + struct gstr res = str_new(); + int i; - for (i = 0; sym_arr && (sym = sym_arr[i]); i++) - get_symbol_str(&res, sym, head); - if (!i) - str_append(&res, _("No matches found.\n")); - return res; + for ( i = 0; sym_arr && (sym = sym_arr[i]); i++ ) + get_symbol_str(&res, sym, head); + if ( !i ) + str_append(&res, _("No matches found.\n")); + return res; } - void menu_get_ext_help(struct menu *menu, struct gstr *help) { - struct symbol *sym = menu->sym; - const char *help_text = nohelp_text; - - if (menu_has_help(menu)) { - if (sym->name) - str_printf(help, "%s%s:\n\n", CONFIG_, sym->name); - help_text = menu_get_help(menu); - } - str_printf(help, "%s\n", _(help_text)); - if (sym) - get_symbol_str(help, sym, NULL); + struct symbol *sym = menu->sym; + const char *help_text = nohelp_text; + + if ( menu_has_help(menu) ) + { + if ( sym->name ) + str_printf(help, "%s%s:\n\n", CONFIG_, sym->name); + help_text = menu_get_help(menu); + } + str_printf(help, "%s\n", _(help_text)); + if ( sym ) + get_symbol_str(help, sym, NULL); } diff --git a/xen/tools/kconfig/nconf.c b/xen/tools/kconfig/nconf.c index d42d534a66..decfb2b3d4 100644 --- a/xen/tools/kconfig/nconf.c +++ b/xen/tools/kconfig/nconf.c @@ -13,242 +13,274 @@ #include "nconf.h" #include -static const char nconf_global_help[] = N_( -"Help windows\n" -"------------\n" -"o Global help: Unless in a data entry window, pressing will give \n" -" you the global help window, which you are just reading.\n" -"\n" -"o A short version of the global help is available by pressing .\n" -"\n" -"o Local help: To get help related to the current menu entry, use any\n" -" of , or if in a data entry window then press .\n" -"\n" -"\n" -"Menu entries\n" -"------------\n" -"This interface lets you select features and parameters for the kernel\n" -"build. Kernel features can either be built-in, modularized, or removed.\n" -"Parameters must be entered as text or decimal or hexadecimal numbers.\n" -"\n" -"Menu entries beginning with following braces represent features that\n" -" [ ] can be built in or removed\n" -" < > can be built in, modularized or removed\n" -" { } can be built in or modularized, are selected by another feature\n" -" - - are selected by another feature\n" -" XXX cannot be selected. Symbol Info tells you why.\n" -"*, M or whitespace inside braces means to build in, build as a module\n" -"or to exclude the feature respectively.\n" -"\n" -"To change any of these features, highlight it with the movement keys\n" -"listed below and press to build it in, to make it a module or\n" -" to remove it. You may press the key to cycle through the\n" -"available options.\n" -"\n" -"A trailing \"--->\" designates a submenu, a trailing \"----\" an\n" -"empty submenu.\n" -"\n" -"Menu navigation keys\n" -"----------------------------------------------------------------------\n" -"Linewise up \n" -"Linewise down \n" -"Pagewise up \n" -"Pagewise down \n" -"First entry \n" -"Last entry \n" -"Enter a submenu \n" -"Go back to parent menu \n" -"Close a help window \n" -"Close entry window, apply \n" -"Close entry window, forget \n" -"Start incremental, case-insensitive search for STRING in menu entries,\n" -" no regex support, STRING is displayed in upper left corner\n" -" STRING\n" -" Remove last character \n" -" Jump to next hit \n" -" Jump to previous hit \n" -"Exit menu search mode \n" -"Search for configuration variables with or without leading CONFIG_\n" -" RegExpr\n" -"Verbose search help \n" -"----------------------------------------------------------------------\n" -"\n" -"Unless in a data entry window, key <1> may be used instead of ,\n" -"<2> instead of , etc.\n" -"\n" -"\n" -"Radiolist (Choice list)\n" -"-----------------------\n" -"Use the movement keys listed above to select the option you wish to set\n" -"and press .\n" -"\n" -"\n" -"Data entry\n" -"----------\n" -"Enter the requested information and press . Hexadecimal values\n" -"may be entered without the \"0x\" prefix.\n" -"\n" -"\n" -"Text Box (Help Window)\n" -"----------------------\n" -"Use movement keys as listed in table above.\n" -"\n" -"Press any of to exit.\n" -"\n" -"\n" -"Alternate configuration files\n" -"-----------------------------\n" -"nconfig supports switching between different configurations.\n" -"Press to save your current configuration. Press and enter\n" -"a file name to load a previously saved configuration.\n" -"\n" -"\n" -"Terminal configuration\n" -"----------------------\n" -"If you use nconfig in a xterm window, make sure your TERM environment\n" -"variable specifies a terminal configuration which supports at least\n" -"16 colors. Otherwise nconfig will look rather bad.\n" -"\n" -"If the \"stty size\" command reports the current terminalsize correctly,\n" -"nconfig will adapt to sizes larger than the traditional 80x25 \"standard\"\n" -"and display longer menus properly.\n" -"\n" -"\n" -"Single menu mode\n" -"----------------\n" -"If you prefer to have all of the menu entries listed in a single menu,\n" -"rather than the default multimenu hierarchy, run nconfig with\n" -"NCONFIG_MODE environment variable set to single_menu. Example:\n" -"\n" -"make NCONFIG_MODE=single_menu nconfig\n" -"\n" -" will then unfold the appropriate category, or fold it if it\n" -"is already unfolded. Folded menu entries will be designated by a\n" -"leading \"++>\" and unfolded entries by a leading \"-->\".\n" -"\n" -"Note that this mode can eventually be a little more CPU expensive than\n" -"the default mode, especially with a larger number of unfolded submenus.\n" -"\n"), -menu_no_f_instructions[] = N_( -"Legend: [*] built-in [ ] excluded module < > module capable.\n" -"Submenus are designated by a trailing \"--->\", empty ones by \"----\".\n" -"\n" -"Use the following keys to navigate the menus:\n" -"Move up or down with and .\n" -"Enter a submenu with or .\n" -"Exit a submenu to its parent menu with or .\n" -"Pressing includes, excludes, modularizes features.\n" -"Pressing cycles through the available options.\n" -"To search for menu entries press .\n" -" always leaves the current window.\n" -"\n" -"You do not have function keys support.\n" -"Press <1> instead of , <2> instead of , etc.\n" -"For verbose global help use key <1>.\n" -"For help related to the current menu entry press or .\n"), -menu_instructions[] = N_( -"Legend: [*] built-in [ ] excluded module < > module capable.\n" -"Submenus are designated by a trailing \"--->\", empty ones by \"----\".\n" -"\n" -"Use the following keys to navigate the menus:\n" -"Move up or down with or .\n" -"Enter a submenu with or .\n" -"Exit a submenu to its parent menu with or .\n" -"Pressing includes, excludes, modularizes features.\n" -"Pressing cycles through the available options.\n" -"To search for menu entries press .\n" -" always leaves the current window.\n" -"\n" -"Pressing <1> may be used instead of , <2> instead of , etc.\n" -"For verbose global help press .\n" -"For help related to the current menu entry press or .\n"), -radiolist_instructions[] = N_( -"Press , , or to navigate a radiolist, select\n" -"with .\n" -"For help related to the current entry press or .\n" -"For global help press .\n"), -inputbox_instructions_int[] = N_( -"Please enter a decimal value.\n" -"Fractions will not be accepted.\n" -"Press to apply, to cancel."), -inputbox_instructions_hex[] = N_( -"Please enter a hexadecimal value.\n" -"Press to apply, to cancel."), -inputbox_instructions_string[] = N_( -"Please enter a string value.\n" -"Press to apply, to cancel."), -setmod_text[] = N_( -"This feature depends on another feature which has been configured as a\n" -"module. As a result, the current feature will be built as a module too."), -load_config_text[] = N_( -"Enter the name of the configuration file you wish to load.\n" -"Accept the name shown to restore the configuration you last\n" -"retrieved. Leave empty to abort."), -load_config_help[] = N_( -"For various reasons, one may wish to keep several different\n" -"configurations available on a single machine.\n" -"\n" -"If you have saved a previous configuration in a file other than the\n" -"default one, entering its name here will allow you to load and modify\n" -"that configuration.\n" -"\n" -"Leave empty to abort.\n"), -save_config_text[] = N_( -"Enter a filename to which this configuration should be saved\n" -"as an alternate. Leave empty to abort."), -save_config_help[] = N_( -"For various reasons, one may wish to keep several different\n" -"configurations available on a single machine.\n" -"\n" -"Entering a file name here will allow you to later retrieve, modify\n" -"and use the current configuration as an alternate to whatever\n" -"configuration options you have selected at that time.\n" -"\n" -"Leave empty to abort.\n"), -search_help[] = N_( -"Search for symbols (configuration variable names CONFIG_*) and display\n" -"their relations. Regular expressions are supported.\n" -"Example: Search for \"^FOO\".\n" -"Result:\n" -"-----------------------------------------------------------------\n" -"Symbol: FOO [ = m]\n" -"Prompt: Foo bus is used to drive the bar HW\n" -"Defined at drivers/pci/Kconfig:47\n" -"Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n" -"Location:\n" -" -> Bus options (PCI, PCMCIA, EISA, ISA)\n" -" -> PCI support (PCI [ = y])\n" -" -> PCI access mode ( [ = y])\n" -"Selects: LIBCRC32\n" -"Selected by: BAR\n" -"-----------------------------------------------------------------\n" -"o The line 'Prompt:' shows the text displayed for this symbol in\n" -" the menu hierarchy.\n" -"o The 'Defined at' line tells at what file / line number the symbol is\n" -" defined.\n" -"o The 'Depends on:' line lists symbols that need to be defined for\n" -" this symbol to be visible and selectable in the menu.\n" -"o The 'Location:' lines tell, where in the menu structure this symbol\n" -" is located. A location followed by a [ = y] indicates that this is\n" -" a selectable menu item, and the current value is displayed inside\n" -" brackets.\n" -"o The 'Selects:' line tells, what symbol will be automatically selected\n" -" if this symbol is selected (y or m).\n" -"o The 'Selected by' line tells what symbol has selected this symbol.\n" -"\n" -"Only relevant lines are shown.\n" -"\n\n" -"Search examples:\n" -"USB => find all symbols containing USB\n" -"^USB => find all symbols starting with USB\n" -"USB$ => find all symbols ending with USB\n" -"\n"); - -struct mitem { - char str[256]; - char tag; - void *usrptr; - int is_visible; +static const char + nconf_global_help[] = N_( + "Help windows\n" + "------------\n" + "o Global help: Unless in a data entry window, pressing will " + "give \n" + " you the global help window, which you are just reading.\n" + "\n" + "o A short version of the global help is available by pressing .\n" + "\n" + "o Local help: To get help related to the current menu entry, use " + "any\n" + " of , or if in a data entry window then press .\n" + "\n" + "\n" + "Menu entries\n" + "------------\n" + "This interface lets you select features and parameters for the " + "kernel\n" + "build. Kernel features can either be built-in, modularized, or " + "removed.\n" + "Parameters must be entered as text or decimal or hexadecimal " + "numbers.\n" + "\n" + "Menu entries beginning with following braces represent features that\n" + " [ ] can be built in or removed\n" + " < > can be built in, modularized or removed\n" + " { } can be built in or modularized, are selected by another " + "feature\n" + " - - are selected by another feature\n" + " XXX cannot be selected. Symbol Info tells you why.\n" + "*, M or whitespace inside braces means to build in, build as a " + "module\n" + "or to exclude the feature respectively.\n" + "\n" + "To change any of these features, highlight it with the movement keys\n" + "listed below and press to build it in, to make it a module " + "or\n" + " to remove it. You may press the key to cycle through " + "the\n" + "available options.\n" + "\n" + "A trailing \"--->\" designates a submenu, a trailing \"----\" an\n" + "empty submenu.\n" + "\n" + "Menu navigation keys\n" + "----------------------------------------------------------------------" + "\n" + "Linewise up \n" + "Linewise down \n" + "Pagewise up \n" + "Pagewise down \n" + "First entry \n" + "Last entry \n" + "Enter a submenu \n" + "Go back to parent menu \n" + "Close a help window \n" + "Close entry window, apply \n" + "Close entry window, forget \n" + "Start incremental, case-insensitive search for STRING in menu " + "entries,\n" + " no regex support, STRING is displayed in upper left corner\n" + " STRING\n" + " Remove last character \n" + " Jump to next hit \n" + " Jump to previous hit \n" + "Exit menu search mode \n" + "Search for configuration variables with or without leading CONFIG_\n" + " RegExpr\n" + "Verbose search help \n" + "----------------------------------------------------------------------" + "\n" + "\n" + "Unless in a data entry window, key <1> may be used instead of ,\n" + "<2> instead of , etc.\n" + "\n" + "\n" + "Radiolist (Choice list)\n" + "-----------------------\n" + "Use the movement keys listed above to select the option you wish to " + "set\n" + "and press .\n" + "\n" + "\n" + "Data entry\n" + "----------\n" + "Enter the requested information and press . Hexadecimal " + "values\n" + "may be entered without the \"0x\" prefix.\n" + "\n" + "\n" + "Text Box (Help Window)\n" + "----------------------\n" + "Use movement keys as listed in table above.\n" + "\n" + "Press any of to exit.\n" + "\n" + "\n" + "Alternate configuration files\n" + "-----------------------------\n" + "nconfig supports switching between different configurations.\n" + "Press to save your current configuration. Press and enter\n" + "a file name to load a previously saved configuration.\n" + "\n" + "\n" + "Terminal configuration\n" + "----------------------\n" + "If you use nconfig in a xterm window, make sure your TERM " + "environment\n" + "variable specifies a terminal configuration which supports at least\n" + "16 colors. Otherwise nconfig will look rather bad.\n" + "\n" + "If the \"stty size\" command reports the current terminalsize " + "correctly,\n" + "nconfig will adapt to sizes larger than the traditional 80x25 " + "\"standard\"\n" + "and display longer menus properly.\n" + "\n" + "\n" + "Single menu mode\n" + "----------------\n" + "If you prefer to have all of the menu entries listed in a single " + "menu,\n" + "rather than the default multimenu hierarchy, run nconfig with\n" + "NCONFIG_MODE environment variable set to single_menu. Example:\n" + "\n" + "make NCONFIG_MODE=single_menu nconfig\n" + "\n" + " will then unfold the appropriate category, or fold it if it\n" + "is already unfolded. Folded menu entries will be designated by a\n" + "leading \"++>\" and unfolded entries by a leading \"-->\".\n" + "\n" + "Note that this mode can eventually be a little more CPU expensive " + "than\n" + "the default mode, especially with a larger number of unfolded " + "submenus.\n" + "\n"), + menu_no_f_instructions[] = N_( + "Legend: [*] built-in [ ] excluded module < > module capable.\n" + "Submenus are designated by a trailing \"--->\", empty ones by " + "\"----\".\n" + "\n" + "Use the following keys to navigate the menus:\n" + "Move up or down with and .\n" + "Enter a submenu with or .\n" + "Exit a submenu to its parent menu with or .\n" + "Pressing includes, excludes, modularizes features.\n" + "Pressing cycles through the available options.\n" + "To search for menu entries press .\n" + " always leaves the current window.\n" + "\n" + "You do not have function keys support.\n" + "Press <1> instead of , <2> instead of , etc.\n" + "For verbose global help use key <1>.\n" + "For help related to the current menu entry press or .\n"), + menu_instructions[] = N_( + "Legend: [*] built-in [ ] excluded module < > module capable.\n" + "Submenus are designated by a trailing \"--->\", empty ones by " + "\"----\".\n" + "\n" + "Use the following keys to navigate the menus:\n" + "Move up or down with or .\n" + "Enter a submenu with or .\n" + "Exit a submenu to its parent menu with or .\n" + "Pressing includes, excludes, modularizes features.\n" + "Pressing cycles through the available options.\n" + "To search for menu entries press .\n" + " always leaves the current window.\n" + "\n" + "Pressing <1> may be used instead of , <2> instead of , etc.\n" + "For verbose global help press .\n" + "For help related to the current menu entry press or .\n"), + radiolist_instructions[] = N_( + "Press , , or to navigate a radiolist, select\n" + "with .\n" + "For help related to the current entry press or .\n" + "For global help press .\n"), + inputbox_instructions_int[] = + N_("Please enter a decimal value.\n" + "Fractions will not be accepted.\n" + "Press to apply, to cancel."), + inputbox_instructions_hex[] = + N_("Please enter a hexadecimal value.\n" + "Press to apply, to cancel."), + inputbox_instructions_string[] = + N_("Please enter a string value.\n" + "Press to apply, to cancel."), + setmod_text[] = N_("This feature depends on another feature which has been " + "configured as a\n" + "module. As a result, the current feature will be " + "built as a module too."), + load_config_text[] = + N_("Enter the name of the configuration file you wish to load.\n" + "Accept the name shown to restore the configuration you last\n" + "retrieved. Leave empty to abort."), + load_config_help[] = N_( + "For various reasons, one may wish to keep several different\n" + "configurations available on a single machine.\n" + "\n" + "If you have saved a previous configuration in a file other than the\n" + "default one, entering its name here will allow you to load and " + "modify\n" + "that configuration.\n" + "\n" + "Leave empty to abort.\n"), + save_config_text[] = + N_("Enter a filename to which this configuration should be saved\n" + "as an alternate. Leave empty to abort."), + save_config_help[] = N_( + "For various reasons, one may wish to keep several different\n" + "configurations available on a single machine.\n" + "\n" + "Entering a file name here will allow you to later retrieve, modify\n" + "and use the current configuration as an alternate to whatever\n" + "configuration options you have selected at that time.\n" + "\n" + "Leave empty to abort.\n"), + search_help[] = N_( + "Search for symbols (configuration variable names CONFIG_*) and " + "display\n" + "their relations. Regular expressions are supported.\n" + "Example: Search for \"^FOO\".\n" + "Result:\n" + "-----------------------------------------------------------------\n" + "Symbol: FOO [ = m]\n" + "Prompt: Foo bus is used to drive the bar HW\n" + "Defined at drivers/pci/Kconfig:47\n" + "Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n" + "Location:\n" + " -> Bus options (PCI, PCMCIA, EISA, ISA)\n" + " -> PCI support (PCI [ = y])\n" + " -> PCI access mode ( [ = y])\n" + "Selects: LIBCRC32\n" + "Selected by: BAR\n" + "-----------------------------------------------------------------\n" + "o The line 'Prompt:' shows the text displayed for this symbol in\n" + " the menu hierarchy.\n" + "o The 'Defined at' line tells at what file / line number the symbol " + "is\n" + " defined.\n" + "o The 'Depends on:' line lists symbols that need to be defined for\n" + " this symbol to be visible and selectable in the menu.\n" + "o The 'Location:' lines tell, where in the menu structure this " + "symbol\n" + " is located. A location followed by a [ = y] indicates that this " + "is\n" + " a selectable menu item, and the current value is displayed inside\n" + " brackets.\n" + "o The 'Selects:' line tells, what symbol will be automatically " + "selected\n" + " if this symbol is selected (y or m).\n" + "o The 'Selected by' line tells what symbol has selected this " + "symbol.\n" + "\n" + "Only relevant lines are shown.\n" + "\n\n" + "Search examples:\n" + "USB => find all symbols containing USB\n" + "^USB => find all symbols starting with USB\n" + "USB$ => find all symbols ending with USB\n" + "\n"); + +struct mitem +{ + char str[256]; + char tag; + void *usrptr; + int is_visible; }; #define MAX_MENU_ITEMS 4096 @@ -295,349 +327,341 @@ static void handle_f7(int *key, struct menu *current_item); static void handle_f8(int *key, struct menu *current_item); static void handle_f9(int *key, struct menu *current_item); -struct function_keys { - const char *key_str; - const char *func; - function_key key; - function_key_handler_t handler; +struct function_keys +{ + const char *key_str; + const char *func; + function_key key; + function_key_handler_t handler; }; static const int function_keys_num = 9; struct function_keys function_keys[] = { - { - .key_str = "F1", - .func = "Help", - .key = F_HELP, - .handler = handle_f1, - }, - { - .key_str = "F2", - .func = "SymInfo", - .key = F_SYMBOL, - .handler = handle_f2, - }, - { - .key_str = "F3", - .func = "Help 2", - .key = F_INSTS, - .handler = handle_f3, - }, - { - .key_str = "F4", - .func = "ShowAll", - .key = F_CONF, - .handler = handle_f4, - }, - { - .key_str = "F5", - .func = "Back", - .key = F_BACK, - .handler = handle_f5, - }, - { - .key_str = "F6", - .func = "Save", - .key = F_SAVE, - .handler = handle_f6, - }, - { - .key_str = "F7", - .func = "Load", - .key = F_LOAD, - .handler = handle_f7, - }, - { - .key_str = "F8", - .func = "SymSearch", - .key = F_SEARCH, - .handler = handle_f8, - }, - { - .key_str = "F9", - .func = "Exit", - .key = F_EXIT, - .handler = handle_f9, - }, + { + .key_str = "F1", + .func = "Help", + .key = F_HELP, + .handler = handle_f1, + }, + { + .key_str = "F2", + .func = "SymInfo", + .key = F_SYMBOL, + .handler = handle_f2, + }, + { + .key_str = "F3", + .func = "Help 2", + .key = F_INSTS, + .handler = handle_f3, + }, + { + .key_str = "F4", + .func = "ShowAll", + .key = F_CONF, + .handler = handle_f4, + }, + { + .key_str = "F5", + .func = "Back", + .key = F_BACK, + .handler = handle_f5, + }, + { + .key_str = "F6", + .func = "Save", + .key = F_SAVE, + .handler = handle_f6, + }, + { + .key_str = "F7", + .func = "Load", + .key = F_LOAD, + .handler = handle_f7, + }, + { + .key_str = "F8", + .func = "SymSearch", + .key = F_SEARCH, + .handler = handle_f8, + }, + { + .key_str = "F9", + .func = "Exit", + .key = F_EXIT, + .handler = handle_f9, + }, }; static void print_function_line(void) { - int i; - int offset = 1; - const int skip = 1; - int lines = getmaxy(stdscr); - - for (i = 0; i < function_keys_num; i++) { - (void) wattrset(main_window, attributes[FUNCTION_HIGHLIGHT]); - mvwprintw(main_window, lines-3, offset, - "%s", - function_keys[i].key_str); - (void) wattrset(main_window, attributes[FUNCTION_TEXT]); - offset += strlen(function_keys[i].key_str); - mvwprintw(main_window, lines-3, - offset, "%s", - function_keys[i].func); - offset += strlen(function_keys[i].func) + skip; - } - (void) wattrset(main_window, attributes[NORMAL]); + int i; + int offset = 1; + const int skip = 1; + int lines = getmaxy(stdscr); + + for ( i = 0; i < function_keys_num; i++ ) + { + (void)wattrset(main_window, attributes[FUNCTION_HIGHLIGHT]); + mvwprintw(main_window, lines - 3, offset, "%s", + function_keys[i].key_str); + (void)wattrset(main_window, attributes[FUNCTION_TEXT]); + offset += strlen(function_keys[i].key_str); + mvwprintw(main_window, lines - 3, offset, "%s", function_keys[i].func); + offset += strlen(function_keys[i].func) + skip; + } + (void)wattrset(main_window, attributes[NORMAL]); } /* help */ static void handle_f1(int *key, struct menu *current_item) { - show_scroll_win(main_window, - _("Global help"), _(nconf_global_help)); - return; + show_scroll_win(main_window, _("Global help"), _(nconf_global_help)); + return; } /* symbole help */ static void handle_f2(int *key, struct menu *current_item) { - show_help(current_item); - return; + show_help(current_item); + return; } /* instructions */ static void handle_f3(int *key, struct menu *current_item) { - show_scroll_win(main_window, - _("Short help"), - _(current_instructions)); - return; + show_scroll_win(main_window, _("Short help"), _(current_instructions)); + return; } /* config */ static void handle_f4(int *key, struct menu *current_item) { - int res = btn_dialog(main_window, - _("Show all symbols?"), - 2, - " ", - ""); - if (res == 0) - show_all_items = 1; - else if (res == 1) - show_all_items = 0; - - return; + int res = btn_dialog(main_window, _("Show all symbols?"), 2, + " ", ""); + if ( res == 0 ) + show_all_items = 1; + else if ( res == 1 ) + show_all_items = 0; + + return; } /* back */ static void handle_f5(int *key, struct menu *current_item) { - *key = KEY_LEFT; - return; + *key = KEY_LEFT; + return; } /* save */ static void handle_f6(int *key, struct menu *current_item) { - conf_save(); - return; + conf_save(); + return; } /* load */ static void handle_f7(int *key, struct menu *current_item) { - conf_load(); - return; + conf_load(); + return; } /* search */ static void handle_f8(int *key, struct menu *current_item) { - search_conf(); - return; + search_conf(); + return; } /* exit */ static void handle_f9(int *key, struct menu *current_item) { - do_exit(); - return; + do_exit(); + return; } /* return != 0 to indicate the key was handles */ static int process_special_keys(int *key, struct menu *menu) { - int i; - - if (*key == KEY_RESIZE) { - setup_windows(); - return 1; - } - - for (i = 0; i < function_keys_num; i++) { - if (*key == KEY_F(function_keys[i].key) || - *key == '0' + function_keys[i].key){ - function_keys[i].handler(key, menu); - return 1; - } - } - - return 0; + int i; + + if ( *key == KEY_RESIZE ) + { + setup_windows(); + return 1; + } + + for ( i = 0; i < function_keys_num; i++ ) + { + if ( *key == KEY_F(function_keys[i].key) || + *key == '0' + function_keys[i].key ) + { + function_keys[i].handler(key, menu); + return 1; + } + } + + return 0; } static void clean_items(void) { - int i; - for (i = 0; curses_menu_items[i]; i++) - free_item(curses_menu_items[i]); - bzero(curses_menu_items, sizeof(curses_menu_items)); - bzero(k_menu_items, sizeof(k_menu_items)); - items_num = 0; + int i; + for ( i = 0; curses_menu_items[i]; i++ ) + free_item(curses_menu_items[i]); + bzero(curses_menu_items, sizeof(curses_menu_items)); + bzero(k_menu_items, sizeof(k_menu_items)); + items_num = 0; } -typedef enum {MATCH_TINKER_PATTERN_UP, MATCH_TINKER_PATTERN_DOWN, - FIND_NEXT_MATCH_DOWN, FIND_NEXT_MATCH_UP} match_f; +typedef enum +{ + MATCH_TINKER_PATTERN_UP, + MATCH_TINKER_PATTERN_DOWN, + FIND_NEXT_MATCH_DOWN, + FIND_NEXT_MATCH_UP +} match_f; /* return the index of the matched item, or -1 if no such item exists */ static int get_mext_match(const char *match_str, match_f flag) { - int match_start = item_index(current_item(curses_menu)); - int index; - - if (flag == FIND_NEXT_MATCH_DOWN) - ++match_start; - else if (flag == FIND_NEXT_MATCH_UP) - --match_start; - - index = match_start; - index = (index + items_num) % items_num; - while (true) { - char *str = k_menu_items[index].str; - if (strcasestr(str, match_str) != 0) - return index; - if (flag == FIND_NEXT_MATCH_UP || - flag == MATCH_TINKER_PATTERN_UP) - --index; - else - ++index; - index = (index + items_num) % items_num; - if (index == match_start) - return -1; - } + int match_start = item_index(current_item(curses_menu)); + int index; + + if ( flag == FIND_NEXT_MATCH_DOWN ) + ++match_start; + else if ( flag == FIND_NEXT_MATCH_UP ) + --match_start; + + index = match_start; + index = (index + items_num) % items_num; + while ( true ) + { + char *str = k_menu_items[index].str; + if ( strcasestr(str, match_str) != 0 ) + return index; + if ( flag == FIND_NEXT_MATCH_UP || flag == MATCH_TINKER_PATTERN_UP ) + --index; + else + ++index; + index = (index + items_num) % items_num; + if ( index == match_start ) + return -1; + } } /* Make a new item. */ static void item_make(struct menu *menu, char tag, const char *fmt, ...) { - va_list ap; - - if (items_num > MAX_MENU_ITEMS-1) - return; - - bzero(&k_menu_items[items_num], sizeof(k_menu_items[0])); - k_menu_items[items_num].tag = tag; - k_menu_items[items_num].usrptr = menu; - if (menu != NULL) - k_menu_items[items_num].is_visible = - menu_is_visible(menu); - else - k_menu_items[items_num].is_visible = 1; - - va_start(ap, fmt); - vsnprintf(k_menu_items[items_num].str, - sizeof(k_menu_items[items_num].str), - fmt, ap); - va_end(ap); - - if (!k_menu_items[items_num].is_visible) - memcpy(k_menu_items[items_num].str, "XXX", 3); - - curses_menu_items[items_num] = new_item( - k_menu_items[items_num].str, - k_menu_items[items_num].str); - set_item_userptr(curses_menu_items[items_num], - &k_menu_items[items_num]); - /* - if (!k_menu_items[items_num].is_visible) - item_opts_off(curses_menu_items[items_num], O_SELECTABLE); - */ - - items_num++; - curses_menu_items[items_num] = NULL; + va_list ap; + + if ( items_num > MAX_MENU_ITEMS - 1 ) + return; + + bzero(&k_menu_items[items_num], sizeof(k_menu_items[0])); + k_menu_items[items_num].tag = tag; + k_menu_items[items_num].usrptr = menu; + if ( menu != NULL ) + k_menu_items[items_num].is_visible = menu_is_visible(menu); + else + k_menu_items[items_num].is_visible = 1; + + va_start(ap, fmt); + vsnprintf(k_menu_items[items_num].str, sizeof(k_menu_items[items_num].str), + fmt, ap); + va_end(ap); + + if ( !k_menu_items[items_num].is_visible ) + memcpy(k_menu_items[items_num].str, "XXX", 3); + + curses_menu_items[items_num] = + new_item(k_menu_items[items_num].str, k_menu_items[items_num].str); + set_item_userptr(curses_menu_items[items_num], &k_menu_items[items_num]); + /* + if (!k_menu_items[items_num].is_visible) + item_opts_off(curses_menu_items[items_num], O_SELECTABLE); + */ + + items_num++; + curses_menu_items[items_num] = NULL; } /* very hackish. adds a string to the last item added */ static void item_add_str(const char *fmt, ...) { - va_list ap; - int index = items_num-1; - char new_str[256]; - char tmp_str[256]; - - if (index < 0) - return; - - va_start(ap, fmt); - vsnprintf(new_str, sizeof(new_str), fmt, ap); - va_end(ap); - snprintf(tmp_str, sizeof(tmp_str), "%s%s", - k_menu_items[index].str, new_str); - strncpy(k_menu_items[index].str, - tmp_str, - sizeof(k_menu_items[index].str)); - - free_item(curses_menu_items[index]); - curses_menu_items[index] = new_item( - k_menu_items[index].str, - k_menu_items[index].str); - set_item_userptr(curses_menu_items[index], - &k_menu_items[index]); + va_list ap; + int index = items_num - 1; + char new_str[256]; + char tmp_str[256]; + + if ( index < 0 ) + return; + + va_start(ap, fmt); + vsnprintf(new_str, sizeof(new_str), fmt, ap); + va_end(ap); + snprintf(tmp_str, sizeof(tmp_str), "%s%s", k_menu_items[index].str, + new_str); + strncpy(k_menu_items[index].str, tmp_str, sizeof(k_menu_items[index].str)); + + free_item(curses_menu_items[index]); + curses_menu_items[index] = + new_item(k_menu_items[index].str, k_menu_items[index].str); + set_item_userptr(curses_menu_items[index], &k_menu_items[index]); } /* get the tag of the currently selected item */ static char item_tag(void) { - ITEM *cur; - struct mitem *mcur; - - cur = current_item(curses_menu); - if (cur == NULL) - return 0; - mcur = (struct mitem *) item_userptr(cur); - return mcur->tag; + ITEM *cur; + struct mitem *mcur; + + cur = current_item(curses_menu); + if ( cur == NULL ) + return 0; + mcur = (struct mitem *)item_userptr(cur); + return mcur->tag; } static int curses_item_index(void) { - return item_index(current_item(curses_menu)); + return item_index(current_item(curses_menu)); } static void *item_data(void) { - ITEM *cur; - struct mitem *mcur; - - cur = current_item(curses_menu); - if (!cur) - return NULL; - mcur = (struct mitem *) item_userptr(cur); - return mcur->usrptr; - + ITEM *cur; + struct mitem *mcur; + + cur = current_item(curses_menu); + if ( !cur ) + return NULL; + mcur = (struct mitem *)item_userptr(cur); + return mcur->usrptr; } static int item_is_tag(char tag) { - return item_tag() == tag; + return item_tag() == tag; } -static char filename[PATH_MAX+1]; -static char menu_backtitle[PATH_MAX+128]; +static char filename[PATH_MAX + 1]; +static char menu_backtitle[PATH_MAX + 128]; static const char *set_config_filename(const char *config_filename) { - int size; + int size; - size = snprintf(menu_backtitle, sizeof(menu_backtitle), - "%s - %s", config_filename, rootmenu.prompt->text); - if (size >= sizeof(menu_backtitle)) - menu_backtitle[sizeof(menu_backtitle)-1] = '\0'; + size = snprintf(menu_backtitle, sizeof(menu_backtitle), "%s - %s", + config_filename, rootmenu.prompt->text); + if ( size >= sizeof(menu_backtitle) ) + menu_backtitle[sizeof(menu_backtitle) - 1] = '\0'; - size = snprintf(filename, sizeof(filename), "%s", config_filename); - if (size >= sizeof(filename)) - filename[sizeof(filename)-1] = '\0'; - return menu_backtitle; + size = snprintf(filename, sizeof(filename), "%s", config_filename); + if ( size >= sizeof(filename) ) + filename[sizeof(filename) - 1] = '\0'; + return menu_backtitle; } /* return = 0 means we are successful. @@ -645,371 +669,369 @@ static const char *set_config_filename(const char *config_filename) */ static int do_exit(void) { - int res; - if (!conf_get_changed()) { - global_exit = 1; - return 0; - } - res = btn_dialog(main_window, - _("Do you wish to save your new configuration?\n" - " to cancel and resume nconfig."), - 2, - " ", - ""); - if (res == KEY_EXIT) { - global_exit = 0; - return -1; - } - - /* if we got here, the user really wants to exit */ - switch (res) { - case 0: - res = conf_write(filename); - if (res) - btn_dialog( - main_window, - _("Error during writing of configuration.\n" - "Your configuration changes were NOT saved."), - 1, - ""); - break; - default: - btn_dialog( - main_window, - _("Your configuration changes were NOT saved."), - 1, - ""); - break; - } - global_exit = 1; - return 0; + int res; + if ( !conf_get_changed() ) + { + global_exit = 1; + return 0; + } + res = btn_dialog(main_window, + _("Do you wish to save your new configuration?\n" + " to cancel and resume nconfig."), + 2, " ", ""); + if ( res == KEY_EXIT ) + { + global_exit = 0; + return -1; + } + + /* if we got here, the user really wants to exit */ + switch (res) + { + case 0: + res = conf_write(filename); + if ( res ) + btn_dialog(main_window, + _("Error during writing of configuration.\n" + "Your configuration changes were NOT saved."), + 1, ""); + break; + default: + btn_dialog(main_window, _("Your configuration changes were NOT saved."), + 1, ""); + break; + } + global_exit = 1; + return 0; } - static void search_conf(void) { - struct symbol **sym_arr; - struct gstr res; - struct gstr title; - char *dialog_input; - int dres; - - title = str_new(); - str_printf( &title, _("Enter (sub)string or regexp to search for " - "(with or without \"%s\")"), CONFIG_); + struct symbol **sym_arr; + struct gstr res; + struct gstr title; + char *dialog_input; + int dres; + + title = str_new(); + str_printf(&title, + _("Enter (sub)string or regexp to search for " + "(with or without \"%s\")"), + CONFIG_); again: - dres = dialog_inputbox(main_window, - _("Search Configuration Parameter"), - str_get(&title), - "", &dialog_input_result, &dialog_input_result_len); - switch (dres) { - case 0: - break; - case 1: - show_scroll_win(main_window, - _("Search Configuration"), search_help); - goto again; - default: - str_free(&title); - return; - } - - /* strip the prefix if necessary */ - dialog_input = dialog_input_result; - if (strncasecmp(dialog_input_result, CONFIG_, strlen(CONFIG_)) == 0) - dialog_input += strlen(CONFIG_); - - sym_arr = sym_re_search(dialog_input); - res = get_relations_str(sym_arr, NULL); - free(sym_arr); - show_scroll_win(main_window, - _("Search Results"), str_get(&res)); - str_free(&res); - str_free(&title); + dres = dialog_inputbox(main_window, _("Search Configuration Parameter"), + str_get(&title), "", &dialog_input_result, + &dialog_input_result_len); + switch (dres) + { + case 0: + break; + case 1: + show_scroll_win(main_window, _("Search Configuration"), search_help); + goto again; + default: + str_free(&title); + return; + } + + /* strip the prefix if necessary */ + dialog_input = dialog_input_result; + if ( strncasecmp(dialog_input_result, CONFIG_, strlen(CONFIG_)) == 0 ) + dialog_input += strlen(CONFIG_); + + sym_arr = sym_re_search(dialog_input); + res = get_relations_str(sym_arr, NULL); + free(sym_arr); + show_scroll_win(main_window, _("Search Results"), str_get(&res)); + str_free(&res); + str_free(&title); } - static void build_conf(struct menu *menu) { - struct symbol *sym; - struct property *prop; - struct menu *child; - int type, tmp, doint = 2; - tristate val; - char ch; - - if (!menu || (!show_all_items && !menu_is_visible(menu))) - return; - - sym = menu->sym; - prop = menu->prompt; - if (!sym) { - if (prop && menu != current_menu) { - const char *prompt = menu_get_prompt(menu); - enum prop_type ptype; - ptype = menu->prompt ? menu->prompt->type : P_UNKNOWN; - switch (ptype) { - case P_MENU: - child_count++; - prompt = _(prompt); - if (single_menu_mode) { - item_make(menu, 'm', - "%s%*c%s", - menu->data ? "-->" : "++>", - indent + 1, ' ', prompt); - } else - item_make(menu, 'm', - " %*c%s %s", - indent + 1, ' ', prompt, - menu_is_empty(menu) ? "----" : "--->"); - - if (single_menu_mode && menu->data) - goto conf_childs; - return; - case P_COMMENT: - if (prompt) { - child_count++; - item_make(menu, ':', - " %*c*** %s ***", - indent + 1, ' ', - _(prompt)); - } - break; - default: - if (prompt) { - child_count++; - item_make(menu, ':', "---%*c%s", - indent + 1, ' ', - _(prompt)); - } - } - } else - doint = 0; - goto conf_childs; - } - - type = sym_get_type(sym); - if (sym_is_choice(sym)) { - struct symbol *def_sym = sym_get_choice_value(sym); - struct menu *def_menu = NULL; - - child_count++; - for (child = menu->list; child; child = child->next) { - if (menu_is_visible(child) && child->sym == def_sym) - def_menu = child; - } - - val = sym_get_tristate_value(sym); - if (sym_is_changable(sym)) { - switch (type) { - case S_BOOLEAN: - item_make(menu, 't', "[%c]", - val == no ? ' ' : '*'); - break; - case S_TRISTATE: - switch (val) { - case yes: - ch = '*'; - break; - case mod: - ch = 'M'; - break; - default: - ch = ' '; - break; - } - item_make(menu, 't', "<%c>", ch); - break; - } - } else { - item_make(menu, def_menu ? 't' : ':', " "); - } - - item_add_str("%*c%s", indent + 1, - ' ', _(menu_get_prompt(menu))); - if (val == yes) { - if (def_menu) { - item_add_str(" (%s)", - _(menu_get_prompt(def_menu))); - item_add_str(" --->"); - if (def_menu->list) { - indent += 2; - build_conf(def_menu); - indent -= 2; - } - } - return; - } - } else { - if (menu == current_menu) { - item_make(menu, ':', - "---%*c%s", indent + 1, - ' ', _(menu_get_prompt(menu))); - goto conf_childs; - } - child_count++; - val = sym_get_tristate_value(sym); - if (sym_is_choice_value(sym) && val == yes) { - item_make(menu, ':', " "); - } else { - switch (type) { - case S_BOOLEAN: - if (sym_is_changable(sym)) - item_make(menu, 't', "[%c]", - val == no ? ' ' : '*'); - else - item_make(menu, 't', "-%c-", - val == no ? ' ' : '*'); - break; - case S_TRISTATE: - switch (val) { - case yes: - ch = '*'; - break; - case mod: - ch = 'M'; - break; - default: - ch = ' '; - break; - } - if (sym_is_changable(sym)) { - if (sym->rev_dep.tri == mod) - item_make(menu, - 't', "{%c}", ch); - else - item_make(menu, - 't', "<%c>", ch); - } else - item_make(menu, 't', "-%c-", ch); - break; - default: - tmp = 2 + strlen(sym_get_string_value(sym)); - item_make(menu, 's', " (%s)", - sym_get_string_value(sym)); - tmp = indent - tmp + 4; - if (tmp < 0) - tmp = 0; - item_add_str("%*c%s%s", tmp, ' ', - _(menu_get_prompt(menu)), - (sym_has_value(sym) || - !sym_is_changable(sym)) ? "" : - _(" (NEW)")); - goto conf_childs; - } - } - item_add_str("%*c%s%s", indent + 1, ' ', - _(menu_get_prompt(menu)), - (sym_has_value(sym) || !sym_is_changable(sym)) ? - "" : _(" (NEW)")); - if (menu->prompt && menu->prompt->type == P_MENU) { - item_add_str(" %s", menu_is_empty(menu) ? "----" : "--->"); - return; - } - } + struct symbol *sym; + struct property *prop; + struct menu *child; + int type, tmp, doint = 2; + tristate val; + char ch; + + if ( !menu || (!show_all_items && !menu_is_visible(menu)) ) + return; + + sym = menu->sym; + prop = menu->prompt; + if ( !sym ) + { + if ( prop && menu != current_menu ) + { + const char *prompt = menu_get_prompt(menu); + enum prop_type ptype; + ptype = menu->prompt ? menu->prompt->type : P_UNKNOWN; + switch (ptype) + { + case P_MENU: + child_count++; + prompt = _(prompt); + if ( single_menu_mode ) + { + item_make(menu, 'm', "%s%*c%s", menu->data ? "-->" : "++>", + indent + 1, ' ', prompt); + } + else + item_make(menu, 'm', " %*c%s %s", indent + 1, ' ', + prompt, menu_is_empty(menu) ? "----" : "--->"); + + if ( single_menu_mode && menu->data ) + goto conf_childs; + return; + case P_COMMENT: + if ( prompt ) + { + child_count++; + item_make(menu, ':', " %*c*** %s ***", indent + 1, ' ', + _(prompt)); + } + break; + default: + if ( prompt ) + { + child_count++; + item_make(menu, ':', "---%*c%s", indent + 1, ' ', + _(prompt)); + } + } + } + else + doint = 0; + goto conf_childs; + } + + type = sym_get_type(sym); + if ( sym_is_choice(sym) ) + { + struct symbol *def_sym = sym_get_choice_value(sym); + struct menu *def_menu = NULL; + + child_count++; + for ( child = menu->list; child; child = child->next ) + { + if ( menu_is_visible(child) && child->sym == def_sym ) + def_menu = child; + } + + val = sym_get_tristate_value(sym); + if ( sym_is_changable(sym) ) + { + switch (type) + { + case S_BOOLEAN: + item_make(menu, 't', "[%c]", val == no ? ' ' : '*'); + break; + case S_TRISTATE: + switch (val) + { + case yes: + ch = '*'; + break; + case mod: + ch = 'M'; + break; + default: + ch = ' '; + break; + } + item_make(menu, 't', "<%c>", ch); + break; + } + } + else + { + item_make(menu, def_menu ? 't' : ':', " "); + } + + item_add_str("%*c%s", indent + 1, ' ', _(menu_get_prompt(menu))); + if ( val == yes ) + { + if ( def_menu ) + { + item_add_str(" (%s)", _(menu_get_prompt(def_menu))); + item_add_str(" --->"); + if ( def_menu->list ) + { + indent += 2; + build_conf(def_menu); + indent -= 2; + } + } + return; + } + } + else + { + if ( menu == current_menu ) + { + item_make(menu, ':', "---%*c%s", indent + 1, ' ', + _(menu_get_prompt(menu))); + goto conf_childs; + } + child_count++; + val = sym_get_tristate_value(sym); + if ( sym_is_choice_value(sym) && val == yes ) + { + item_make(menu, ':', " "); + } + else + { + switch (type) + { + case S_BOOLEAN: + if ( sym_is_changable(sym) ) + item_make(menu, 't', "[%c]", val == no ? ' ' : '*'); + else + item_make(menu, 't', "-%c-", val == no ? ' ' : '*'); + break; + case S_TRISTATE: + switch (val) + { + case yes: + ch = '*'; + break; + case mod: + ch = 'M'; + break; + default: + ch = ' '; + break; + } + if ( sym_is_changable(sym) ) + { + if ( sym->rev_dep.tri == mod ) + item_make(menu, 't', "{%c}", ch); + else + item_make(menu, 't', "<%c>", ch); + } + else + item_make(menu, 't', "-%c-", ch); + break; + default: + tmp = 2 + strlen(sym_get_string_value(sym)); + item_make(menu, 's', " (%s)", sym_get_string_value(sym)); + tmp = indent - tmp + 4; + if ( tmp < 0 ) + tmp = 0; + item_add_str("%*c%s%s", tmp, ' ', _(menu_get_prompt(menu)), + (sym_has_value(sym) || !sym_is_changable(sym)) + ? "" + : _(" (NEW)")); + goto conf_childs; + } + } + item_add_str( + "%*c%s%s", indent + 1, ' ', _(menu_get_prompt(menu)), + (sym_has_value(sym) || !sym_is_changable(sym)) ? "" : _(" (NEW)")); + if ( menu->prompt && menu->prompt->type == P_MENU ) + { + item_add_str(" %s", menu_is_empty(menu) ? "----" : "--->"); + return; + } + } conf_childs: - indent += doint; - for (child = menu->list; child; child = child->next) - build_conf(child); - indent -= doint; + indent += doint; + for ( child = menu->list; child; child = child->next ) + build_conf(child); + indent -= doint; } static void reset_menu(void) { - unpost_menu(curses_menu); - clean_items(); + unpost_menu(curses_menu); + clean_items(); } /* adjust the menu to show this item. * prefer not to scroll the menu if possible*/ static void center_item(int selected_index, int *last_top_row) { - int toprow; - - set_top_row(curses_menu, *last_top_row); - toprow = top_row(curses_menu); - if (selected_index < toprow || - selected_index >= toprow+mwin_max_lines) { - toprow = max(selected_index-mwin_max_lines/2, 0); - if (toprow >= item_count(curses_menu)-mwin_max_lines) - toprow = item_count(curses_menu)-mwin_max_lines; - set_top_row(curses_menu, toprow); - } - set_current_item(curses_menu, - curses_menu_items[selected_index]); - *last_top_row = toprow; - post_menu(curses_menu); - refresh_all_windows(main_window); + int toprow; + + set_top_row(curses_menu, *last_top_row); + toprow = top_row(curses_menu); + if ( selected_index < toprow || selected_index >= toprow + mwin_max_lines ) + { + toprow = max(selected_index - mwin_max_lines / 2, 0); + if ( toprow >= item_count(curses_menu) - mwin_max_lines ) + toprow = item_count(curses_menu) - mwin_max_lines; + set_top_row(curses_menu, toprow); + } + set_current_item(curses_menu, curses_menu_items[selected_index]); + *last_top_row = toprow; + post_menu(curses_menu); + refresh_all_windows(main_window); } /* this function assumes reset_menu has been called before */ static void show_menu(const char *prompt, const char *instructions, - int selected_index, int *last_top_row) + int selected_index, int *last_top_row) { - int maxx, maxy; - WINDOW *menu_window; - - current_instructions = instructions; - - clear(); - (void) wattrset(main_window, attributes[NORMAL]); - print_in_middle(stdscr, 1, 0, getmaxx(stdscr), - menu_backtitle, - attributes[MAIN_HEADING]); - - (void) wattrset(main_window, attributes[MAIN_MENU_BOX]); - box(main_window, 0, 0); - (void) wattrset(main_window, attributes[MAIN_MENU_HEADING]); - mvwprintw(main_window, 0, 3, " %s ", prompt); - (void) wattrset(main_window, attributes[NORMAL]); - - set_menu_items(curses_menu, curses_menu_items); - - /* position the menu at the middle of the screen */ - scale_menu(curses_menu, &maxy, &maxx); - maxx = min(maxx, mwin_max_cols-2); - maxy = mwin_max_lines; - menu_window = derwin(main_window, - maxy, - maxx, - 2, - (mwin_max_cols-maxx)/2); - keypad(menu_window, TRUE); - set_menu_win(curses_menu, menu_window); - set_menu_sub(curses_menu, menu_window); - - /* must reassert this after changing items, otherwise returns to a - * default of 16 - */ - set_menu_format(curses_menu, maxy, 1); - center_item(selected_index, last_top_row); - set_menu_format(curses_menu, maxy, 1); - - print_function_line(); - - /* Post the menu */ - post_menu(curses_menu); - refresh_all_windows(main_window); + int maxx, maxy; + WINDOW *menu_window; + + current_instructions = instructions; + + clear(); + (void)wattrset(main_window, attributes[NORMAL]); + print_in_middle(stdscr, 1, 0, getmaxx(stdscr), menu_backtitle, + attributes[MAIN_HEADING]); + + (void)wattrset(main_window, attributes[MAIN_MENU_BOX]); + box(main_window, 0, 0); + (void)wattrset(main_window, attributes[MAIN_MENU_HEADING]); + mvwprintw(main_window, 0, 3, " %s ", prompt); + (void)wattrset(main_window, attributes[NORMAL]); + + set_menu_items(curses_menu, curses_menu_items); + + /* position the menu at the middle of the screen */ + scale_menu(curses_menu, &maxy, &maxx); + maxx = min(maxx, mwin_max_cols - 2); + maxy = mwin_max_lines; + menu_window = + derwin(main_window, maxy, maxx, 2, (mwin_max_cols - maxx) / 2); + keypad(menu_window, TRUE); + set_menu_win(curses_menu, menu_window); + set_menu_sub(curses_menu, menu_window); + + /* must reassert this after changing items, otherwise returns to a + * default of 16 + */ + set_menu_format(curses_menu, maxy, 1); + center_item(selected_index, last_top_row); + set_menu_format(curses_menu, maxy, 1); + + print_function_line(); + + /* Post the menu */ + post_menu(curses_menu); + refresh_all_windows(main_window); } static void adj_match_dir(match_f *match_direction) { - if (*match_direction == FIND_NEXT_MATCH_DOWN) - *match_direction = - MATCH_TINKER_PATTERN_DOWN; - else if (*match_direction == FIND_NEXT_MATCH_UP) - *match_direction = - MATCH_TINKER_PATTERN_UP; - /* else, do no change.. */ + if ( *match_direction == FIND_NEXT_MATCH_DOWN ) + *match_direction = MATCH_TINKER_PATTERN_DOWN; + else if ( *match_direction == FIND_NEXT_MATCH_UP ) + *match_direction = MATCH_TINKER_PATTERN_UP; + /* else, do no change.. */ } struct match_state { - int in_search; - match_f match_direction; - char pattern[256]; + int in_search; + match_f match_direction; + char pattern[256]; }; /* Return 0 means I have handled the key. In such a case, ans should hold the @@ -1018,544 +1040,552 @@ struct match_state */ static int do_match(int key, struct match_state *state, int *ans) { - char c = (char) key; - int terminate_search = 0; - *ans = -1; - if (key == '/' || (state->in_search && key == 27)) { - move(0, 0); - refresh(); - clrtoeol(); - state->in_search = 1-state->in_search; - bzero(state->pattern, sizeof(state->pattern)); - state->match_direction = MATCH_TINKER_PATTERN_DOWN; - return 0; - } else if (!state->in_search) - return 1; - - if (isalnum(c) || isgraph(c) || c == ' ') { - state->pattern[strlen(state->pattern)] = c; - state->pattern[strlen(state->pattern)] = '\0'; - adj_match_dir(&state->match_direction); - *ans = get_mext_match(state->pattern, - state->match_direction); - } else if (key == KEY_DOWN) { - state->match_direction = FIND_NEXT_MATCH_DOWN; - *ans = get_mext_match(state->pattern, - state->match_direction); - } else if (key == KEY_UP) { - state->match_direction = FIND_NEXT_MATCH_UP; - *ans = get_mext_match(state->pattern, - state->match_direction); - } else if (key == KEY_BACKSPACE || key == 127) { - state->pattern[strlen(state->pattern)-1] = '\0'; - adj_match_dir(&state->match_direction); - } else - terminate_search = 1; - - if (terminate_search) { - state->in_search = 0; - bzero(state->pattern, sizeof(state->pattern)); - move(0, 0); - refresh(); - clrtoeol(); - return -1; - } - return 0; + char c = (char)key; + int terminate_search = 0; + *ans = -1; + if ( key == '/' || (state->in_search && key == 27) ) + { + move(0, 0); + refresh(); + clrtoeol(); + state->in_search = 1 - state->in_search; + bzero(state->pattern, sizeof(state->pattern)); + state->match_direction = MATCH_TINKER_PATTERN_DOWN; + return 0; + } + else if ( !state->in_search ) + return 1; + + if ( isalnum(c) || isgraph(c) || c == ' ' ) + { + state->pattern[strlen(state->pattern)] = c; + state->pattern[strlen(state->pattern)] = '\0'; + adj_match_dir(&state->match_direction); + *ans = get_mext_match(state->pattern, state->match_direction); + } + else if ( key == KEY_DOWN ) + { + state->match_direction = FIND_NEXT_MATCH_DOWN; + *ans = get_mext_match(state->pattern, state->match_direction); + } + else if ( key == KEY_UP ) + { + state->match_direction = FIND_NEXT_MATCH_UP; + *ans = get_mext_match(state->pattern, state->match_direction); + } + else if ( key == KEY_BACKSPACE || key == 127 ) + { + state->pattern[strlen(state->pattern) - 1] = '\0'; + adj_match_dir(&state->match_direction); + } + else + terminate_search = 1; + + if ( terminate_search ) + { + state->in_search = 0; + bzero(state->pattern, sizeof(state->pattern)); + move(0, 0); + refresh(); + clrtoeol(); + return -1; + } + return 0; } static void conf(struct menu *menu) { - struct menu *submenu = 0; - const char *prompt = menu_get_prompt(menu); - struct symbol *sym; - int res; - int current_index = 0; - int last_top_row = 0; - struct match_state match_state = { - .in_search = 0, - .match_direction = MATCH_TINKER_PATTERN_DOWN, - .pattern = "", - }; - - while (!global_exit) { - reset_menu(); - current_menu = menu; - build_conf(menu); - if (!child_count) - break; - - show_menu(prompt ? _(prompt) : _("Main Menu"), - _(menu_instructions), - current_index, &last_top_row); - keypad((menu_win(curses_menu)), TRUE); - while (!global_exit) { - if (match_state.in_search) { - mvprintw(0, 0, - "searching: %s", match_state.pattern); - clrtoeol(); - } - refresh_all_windows(main_window); - res = wgetch(menu_win(curses_menu)); - if (!res) - break; - if (do_match(res, &match_state, ¤t_index) == 0) { - if (current_index != -1) - center_item(current_index, - &last_top_row); - continue; - } - if (process_special_keys(&res, - (struct menu *) item_data())) - break; - switch (res) { - case KEY_DOWN: - menu_driver(curses_menu, REQ_DOWN_ITEM); - break; - case KEY_UP: - menu_driver(curses_menu, REQ_UP_ITEM); - break; - case KEY_NPAGE: - menu_driver(curses_menu, REQ_SCR_DPAGE); - break; - case KEY_PPAGE: - menu_driver(curses_menu, REQ_SCR_UPAGE); - break; - case KEY_HOME: - menu_driver(curses_menu, REQ_FIRST_ITEM); - break; - case KEY_END: - menu_driver(curses_menu, REQ_LAST_ITEM); - break; - case 'h': - case '?': - show_help((struct menu *) item_data()); - break; - } - if (res == 10 || res == 27 || - res == 32 || res == 'n' || res == 'y' || - res == KEY_LEFT || res == KEY_RIGHT || - res == 'm') - break; - refresh_all_windows(main_window); - } - - refresh_all_windows(main_window); - /* if ESC or left*/ - if (res == 27 || (menu != &rootmenu && res == KEY_LEFT)) - break; - - /* remember location in the menu */ - last_top_row = top_row(curses_menu); - current_index = curses_item_index(); - - if (!item_tag()) - continue; - - submenu = (struct menu *) item_data(); - if (!submenu || !menu_is_visible(submenu)) - continue; - sym = submenu->sym; - - switch (res) { - case ' ': - if (item_is_tag('t')) - sym_toggle_tristate_value(sym); - else if (item_is_tag('m')) - conf(submenu); - break; - case KEY_RIGHT: - case 10: /* ENTER WAS PRESSED */ - switch (item_tag()) { - case 'm': - if (single_menu_mode) - submenu->data = - (void *) (long) !submenu->data; - else - conf(submenu); - break; - case 't': - if (sym_is_choice(sym) && - sym_get_tristate_value(sym) == yes) - conf_choice(submenu); - else if (submenu->prompt && - submenu->prompt->type == P_MENU) - conf(submenu); - else if (res == 10) - sym_toggle_tristate_value(sym); - break; - case 's': - conf_string(submenu); - break; - } - break; - case 'y': - if (item_is_tag('t')) { - if (sym_set_tristate_value(sym, yes)) - break; - if (sym_set_tristate_value(sym, mod)) - btn_dialog(main_window, setmod_text, 0); - } - break; - case 'n': - if (item_is_tag('t')) - sym_set_tristate_value(sym, no); - break; - case 'm': - if (item_is_tag('t')) - sym_set_tristate_value(sym, mod); - break; - } - } + struct menu *submenu = 0; + const char *prompt = menu_get_prompt(menu); + struct symbol *sym; + int res; + int current_index = 0; + int last_top_row = 0; + struct match_state match_state = { + .in_search = 0, + .match_direction = MATCH_TINKER_PATTERN_DOWN, + .pattern = "", + }; + + while ( !global_exit ) + { + reset_menu(); + current_menu = menu; + build_conf(menu); + if ( !child_count ) + break; + + show_menu(prompt ? _(prompt) : _("Main Menu"), _(menu_instructions), + current_index, &last_top_row); + keypad((menu_win(curses_menu)), TRUE); + while ( !global_exit ) + { + if ( match_state.in_search ) + { + mvprintw(0, 0, "searching: %s", match_state.pattern); + clrtoeol(); + } + refresh_all_windows(main_window); + res = wgetch(menu_win(curses_menu)); + if ( !res ) + break; + if ( do_match(res, &match_state, ¤t_index) == 0 ) + { + if ( current_index != -1 ) + center_item(current_index, &last_top_row); + continue; + } + if ( process_special_keys(&res, (struct menu *)item_data()) ) + break; + switch (res) + { + case KEY_DOWN: + menu_driver(curses_menu, REQ_DOWN_ITEM); + break; + case KEY_UP: + menu_driver(curses_menu, REQ_UP_ITEM); + break; + case KEY_NPAGE: + menu_driver(curses_menu, REQ_SCR_DPAGE); + break; + case KEY_PPAGE: + menu_driver(curses_menu, REQ_SCR_UPAGE); + break; + case KEY_HOME: + menu_driver(curses_menu, REQ_FIRST_ITEM); + break; + case KEY_END: + menu_driver(curses_menu, REQ_LAST_ITEM); + break; + case 'h': + case '?': + show_help((struct menu *)item_data()); + break; + } + if ( res == 10 || res == 27 || res == 32 || res == 'n' || + res == 'y' || res == KEY_LEFT || res == KEY_RIGHT || + res == 'm' ) + break; + refresh_all_windows(main_window); + } + + refresh_all_windows(main_window); + /* if ESC or left*/ + if ( res == 27 || (menu != &rootmenu && res == KEY_LEFT) ) + break; + + /* remember location in the menu */ + last_top_row = top_row(curses_menu); + current_index = curses_item_index(); + + if ( !item_tag() ) + continue; + + submenu = (struct menu *)item_data(); + if ( !submenu || !menu_is_visible(submenu) ) + continue; + sym = submenu->sym; + + switch (res) + { + case ' ': + if ( item_is_tag('t') ) + sym_toggle_tristate_value(sym); + else if ( item_is_tag('m') ) + conf(submenu); + break; + case KEY_RIGHT: + case 10: /* ENTER WAS PRESSED */ + switch (item_tag()) + { + case 'm': + if ( single_menu_mode ) + submenu->data = (void *)(long)!submenu->data; + else + conf(submenu); + break; + case 't': + if ( sym_is_choice(sym) && sym_get_tristate_value(sym) == yes ) + conf_choice(submenu); + else if ( submenu->prompt && submenu->prompt->type == P_MENU ) + conf(submenu); + else if ( res == 10 ) + sym_toggle_tristate_value(sym); + break; + case 's': + conf_string(submenu); + break; + } + break; + case 'y': + if ( item_is_tag('t') ) + { + if ( sym_set_tristate_value(sym, yes) ) + break; + if ( sym_set_tristate_value(sym, mod) ) + btn_dialog(main_window, setmod_text, 0); + } + break; + case 'n': + if ( item_is_tag('t') ) + sym_set_tristate_value(sym, no); + break; + case 'm': + if ( item_is_tag('t') ) + sym_set_tristate_value(sym, mod); + break; + } + } } static void conf_message_callback(const char *fmt, va_list ap) { - char buf[1024]; + char buf[1024]; - vsnprintf(buf, sizeof(buf), fmt, ap); - btn_dialog(main_window, buf, 1, ""); + vsnprintf(buf, sizeof(buf), fmt, ap); + btn_dialog(main_window, buf, 1, ""); } static void show_help(struct menu *menu) { - struct gstr help; + struct gstr help; - if (!menu) - return; + if ( !menu ) + return; - help = str_new(); - menu_get_ext_help(menu, &help); - show_scroll_win(main_window, _(menu_get_prompt(menu)), str_get(&help)); - str_free(&help); + help = str_new(); + menu_get_ext_help(menu, &help); + show_scroll_win(main_window, _(menu_get_prompt(menu)), str_get(&help)); + str_free(&help); } static void conf_choice(struct menu *menu) { - const char *prompt = _(menu_get_prompt(menu)); - struct menu *child = 0; - struct symbol *active; - int selected_index = 0; - int last_top_row = 0; - int res, i = 0; - struct match_state match_state = { - .in_search = 0, - .match_direction = MATCH_TINKER_PATTERN_DOWN, - .pattern = "", - }; - - active = sym_get_choice_value(menu->sym); - /* this is mostly duplicated from the conf() function. */ - while (!global_exit) { - reset_menu(); - - for (i = 0, child = menu->list; child; child = child->next) { - if (!show_all_items && !menu_is_visible(child)) - continue; - - if (child->sym == sym_get_choice_value(menu->sym)) - item_make(child, ':', " %s", - _(menu_get_prompt(child))); - else if (child->sym) - item_make(child, ':', " %s", - _(menu_get_prompt(child))); - else - item_make(child, ':', "*** %s ***", - _(menu_get_prompt(child))); - - if (child->sym == active){ - last_top_row = top_row(curses_menu); - selected_index = i; - } - i++; - } - show_menu(prompt ? _(prompt) : _("Choice Menu"), - _(radiolist_instructions), - selected_index, - &last_top_row); - while (!global_exit) { - if (match_state.in_search) { - mvprintw(0, 0, "searching: %s", - match_state.pattern); - clrtoeol(); - } - refresh_all_windows(main_window); - res = wgetch(menu_win(curses_menu)); - if (!res) - break; - if (do_match(res, &match_state, &selected_index) == 0) { - if (selected_index != -1) - center_item(selected_index, - &last_top_row); - continue; - } - if (process_special_keys( - &res, - (struct menu *) item_data())) - break; - switch (res) { - case KEY_DOWN: - menu_driver(curses_menu, REQ_DOWN_ITEM); - break; - case KEY_UP: - menu_driver(curses_menu, REQ_UP_ITEM); - break; - case KEY_NPAGE: - menu_driver(curses_menu, REQ_SCR_DPAGE); - break; - case KEY_PPAGE: - menu_driver(curses_menu, REQ_SCR_UPAGE); - break; - case KEY_HOME: - menu_driver(curses_menu, REQ_FIRST_ITEM); - break; - case KEY_END: - menu_driver(curses_menu, REQ_LAST_ITEM); - break; - case 'h': - case '?': - show_help((struct menu *) item_data()); - break; - } - if (res == 10 || res == 27 || res == ' ' || - res == KEY_LEFT){ - break; - } - refresh_all_windows(main_window); - } - /* if ESC or left */ - if (res == 27 || res == KEY_LEFT) - break; - - child = item_data(); - if (!child || !menu_is_visible(child) || !child->sym) - continue; - switch (res) { - case ' ': - case 10: - case KEY_RIGHT: - sym_set_tristate_value(child->sym, yes); - return; - case 'h': - case '?': - show_help(child); - active = child->sym; - break; - case KEY_EXIT: - return; - } - } + const char *prompt = _(menu_get_prompt(menu)); + struct menu *child = 0; + struct symbol *active; + int selected_index = 0; + int last_top_row = 0; + int res, i = 0; + struct match_state match_state = { + .in_search = 0, + .match_direction = MATCH_TINKER_PATTERN_DOWN, + .pattern = "", + }; + + active = sym_get_choice_value(menu->sym); + /* this is mostly duplicated from the conf() function. */ + while ( !global_exit ) + { + reset_menu(); + + for ( i = 0, child = menu->list; child; child = child->next ) + { + if ( !show_all_items && !menu_is_visible(child) ) + continue; + + if ( child->sym == sym_get_choice_value(menu->sym) ) + item_make(child, ':', " %s", _(menu_get_prompt(child))); + else if ( child->sym ) + item_make(child, ':', " %s", _(menu_get_prompt(child))); + else + item_make(child, ':', "*** %s ***", _(menu_get_prompt(child))); + + if ( child->sym == active ) + { + last_top_row = top_row(curses_menu); + selected_index = i; + } + i++; + } + show_menu(prompt ? _(prompt) : _("Choice Menu"), + _(radiolist_instructions), selected_index, &last_top_row); + while ( !global_exit ) + { + if ( match_state.in_search ) + { + mvprintw(0, 0, "searching: %s", match_state.pattern); + clrtoeol(); + } + refresh_all_windows(main_window); + res = wgetch(menu_win(curses_menu)); + if ( !res ) + break; + if ( do_match(res, &match_state, &selected_index) == 0 ) + { + if ( selected_index != -1 ) + center_item(selected_index, &last_top_row); + continue; + } + if ( process_special_keys(&res, (struct menu *)item_data()) ) + break; + switch (res) + { + case KEY_DOWN: + menu_driver(curses_menu, REQ_DOWN_ITEM); + break; + case KEY_UP: + menu_driver(curses_menu, REQ_UP_ITEM); + break; + case KEY_NPAGE: + menu_driver(curses_menu, REQ_SCR_DPAGE); + break; + case KEY_PPAGE: + menu_driver(curses_menu, REQ_SCR_UPAGE); + break; + case KEY_HOME: + menu_driver(curses_menu, REQ_FIRST_ITEM); + break; + case KEY_END: + menu_driver(curses_menu, REQ_LAST_ITEM); + break; + case 'h': + case '?': + show_help((struct menu *)item_data()); + break; + } + if ( res == 10 || res == 27 || res == ' ' || res == KEY_LEFT ) + { + break; + } + refresh_all_windows(main_window); + } + /* if ESC or left */ + if ( res == 27 || res == KEY_LEFT ) + break; + + child = item_data(); + if ( !child || !menu_is_visible(child) || !child->sym ) + continue; + switch (res) + { + case ' ': + case 10: + case KEY_RIGHT: + sym_set_tristate_value(child->sym, yes); + return; + case 'h': + case '?': + show_help(child); + active = child->sym; + break; + case KEY_EXIT: + return; + } + } } static void conf_string(struct menu *menu) { - const char *prompt = menu_get_prompt(menu); - - while (1) { - int res; - const char *heading; - - switch (sym_get_type(menu->sym)) { - case S_INT: - heading = _(inputbox_instructions_int); - break; - case S_HEX: - heading = _(inputbox_instructions_hex); - break; - case S_STRING: - heading = _(inputbox_instructions_string); - break; - default: - heading = _("Internal nconf error!"); - } - res = dialog_inputbox(main_window, - prompt ? _(prompt) : _("Main Menu"), - heading, - sym_get_string_value(menu->sym), - &dialog_input_result, - &dialog_input_result_len); - switch (res) { - case 0: - if (sym_set_string_value(menu->sym, - dialog_input_result)) - return; - btn_dialog(main_window, - _("You have made an invalid entry."), 0); - break; - case 1: - show_help(menu); - break; - case KEY_EXIT: - return; - } - } + const char *prompt = menu_get_prompt(menu); + + while ( 1 ) + { + int res; + const char *heading; + + switch (sym_get_type(menu->sym)) + { + case S_INT: + heading = _(inputbox_instructions_int); + break; + case S_HEX: + heading = _(inputbox_instructions_hex); + break; + case S_STRING: + heading = _(inputbox_instructions_string); + break; + default: + heading = _("Internal nconf error!"); + } + res = dialog_inputbox(main_window, prompt ? _(prompt) : _("Main Menu"), + heading, sym_get_string_value(menu->sym), + &dialog_input_result, &dialog_input_result_len); + switch (res) + { + case 0: + if ( sym_set_string_value(menu->sym, dialog_input_result) ) + return; + btn_dialog(main_window, _("You have made an invalid entry."), 0); + break; + case 1: + show_help(menu); + break; + case KEY_EXIT: + return; + } + } } static void conf_load(void) { - while (1) { - int res; - res = dialog_inputbox(main_window, - NULL, load_config_text, - filename, - &dialog_input_result, - &dialog_input_result_len); - switch (res) { - case 0: - if (!dialog_input_result[0]) - return; - if (!conf_read(dialog_input_result)) { - set_config_filename(dialog_input_result); - sym_set_change_count(1); - return; - } - btn_dialog(main_window, _("File does not exist!"), 0); - break; - case 1: - show_scroll_win(main_window, - _("Load Alternate Configuration"), - load_config_help); - break; - case KEY_EXIT: - return; - } - } + while ( 1 ) + { + int res; + res = dialog_inputbox(main_window, NULL, load_config_text, filename, + &dialog_input_result, &dialog_input_result_len); + switch (res) + { + case 0: + if ( !dialog_input_result[0] ) + return; + if ( !conf_read(dialog_input_result) ) + { + set_config_filename(dialog_input_result); + sym_set_change_count(1); + return; + } + btn_dialog(main_window, _("File does not exist!"), 0); + break; + case 1: + show_scroll_win(main_window, _("Load Alternate Configuration"), + load_config_help); + break; + case KEY_EXIT: + return; + } + } } static void conf_save(void) { - while (1) { - int res; - res = dialog_inputbox(main_window, - NULL, save_config_text, - filename, - &dialog_input_result, - &dialog_input_result_len); - switch (res) { - case 0: - if (!dialog_input_result[0]) - return; - res = conf_write(dialog_input_result); - if (!res) { - set_config_filename(dialog_input_result); - return; - } - btn_dialog(main_window, _("Can't create file! " - "Probably a nonexistent directory."), - 1, ""); - break; - case 1: - show_scroll_win(main_window, - _("Save Alternate Configuration"), - save_config_help); - break; - case KEY_EXIT: - return; - } - } + while ( 1 ) + { + int res; + res = dialog_inputbox(main_window, NULL, save_config_text, filename, + &dialog_input_result, &dialog_input_result_len); + switch (res) + { + case 0: + if ( !dialog_input_result[0] ) + return; + res = conf_write(dialog_input_result); + if ( !res ) + { + set_config_filename(dialog_input_result); + return; + } + btn_dialog(main_window, + _("Can't create file! " + "Probably a nonexistent directory."), + 1, ""); + break; + case 1: + show_scroll_win(main_window, _("Save Alternate Configuration"), + save_config_help); + break; + case KEY_EXIT: + return; + } + } } void setup_windows(void) { - int lines, columns; + int lines, columns; - getmaxyx(stdscr, lines, columns); + getmaxyx(stdscr, lines, columns); - if (main_window != NULL) - delwin(main_window); + if ( main_window != NULL ) + delwin(main_window); - /* set up the menu and menu window */ - main_window = newwin(lines-2, columns-2, 2, 1); - keypad(main_window, TRUE); - mwin_max_lines = lines-7; - mwin_max_cols = columns-6; + /* set up the menu and menu window */ + main_window = newwin(lines - 2, columns - 2, 2, 1); + keypad(main_window, TRUE); + mwin_max_lines = lines - 7; + mwin_max_cols = columns - 6; - /* panels order is from bottom to top */ - new_panel(main_window); + /* panels order is from bottom to top */ + new_panel(main_window); } int main(int ac, char **av) { - int lines, columns; - char *mode; - - setlocale(LC_ALL, ""); - bindtextdomain(PACKAGE, LOCALEDIR); - textdomain(PACKAGE); - - if (ac > 1 && strcmp(av[1], "-s") == 0) { - /* Silence conf_read() until the real callback is set up */ - conf_set_message_callback(NULL); - av++; - } - conf_parse(av[1]); - conf_read(NULL); - - mode = getenv("NCONFIG_MODE"); - if (mode) { - if (!strcasecmp(mode, "single_menu")) - single_menu_mode = 1; - } - - /* Initialize curses */ - initscr(); - /* set color theme */ - set_colors(); - - cbreak(); - noecho(); - keypad(stdscr, TRUE); - curs_set(0); - - getmaxyx(stdscr, lines, columns); - if (columns < 75 || lines < 20) { - endwin(); - printf("Your terminal should have at " - "least 20 lines and 75 columns\n"); - return 1; - } - - notimeout(stdscr, FALSE); + int lines, columns; + char *mode; + + setlocale(LC_ALL, ""); + bindtextdomain(PACKAGE, LOCALEDIR); + textdomain(PACKAGE); + + if ( ac > 1 && strcmp(av[1], "-s") == 0 ) + { + /* Silence conf_read() until the real callback is set up */ + conf_set_message_callback(NULL); + av++; + } + conf_parse(av[1]); + conf_read(NULL); + + mode = getenv("NCONFIG_MODE"); + if ( mode ) + { + if ( !strcasecmp(mode, "single_menu") ) + single_menu_mode = 1; + } + + /* Initialize curses */ + initscr(); + /* set color theme */ + set_colors(); + + cbreak(); + noecho(); + keypad(stdscr, TRUE); + curs_set(0); + + getmaxyx(stdscr, lines, columns); + if ( columns < 75 || lines < 20 ) + { + endwin(); + printf("Your terminal should have at " + "least 20 lines and 75 columns\n"); + return 1; + } + + notimeout(stdscr, FALSE); #if NCURSES_REENTRANT - set_escdelay(1); + set_escdelay(1); #else - ESCDELAY = 1; + ESCDELAY = 1; #endif - /* set btns menu */ - curses_menu = new_menu(curses_menu_items); - menu_opts_off(curses_menu, O_SHOWDESC); - menu_opts_on(curses_menu, O_SHOWMATCH); - menu_opts_on(curses_menu, O_ONEVALUE); - menu_opts_on(curses_menu, O_NONCYCLIC); - menu_opts_on(curses_menu, O_IGNORECASE); - set_menu_mark(curses_menu, " "); - set_menu_fore(curses_menu, attributes[MAIN_MENU_FORE]); - set_menu_back(curses_menu, attributes[MAIN_MENU_BACK]); - set_menu_grey(curses_menu, attributes[MAIN_MENU_GREY]); - - set_config_filename(conf_get_configname()); - setup_windows(); - - /* check for KEY_FUNC(1) */ - if (has_key(KEY_F(1)) == FALSE) { - show_scroll_win(main_window, - _("Instructions"), - _(menu_no_f_instructions)); - } - - conf_set_message_callback(conf_message_callback); - /* do the work */ - while (!global_exit) { - conf(&rootmenu); - if (!global_exit && do_exit() == 0) - break; - } - /* ok, we are done */ - unpost_menu(curses_menu); - free_menu(curses_menu); - delwin(main_window); - clear(); - refresh(); - endwin(); - return 0; + /* set btns menu */ + curses_menu = new_menu(curses_menu_items); + menu_opts_off(curses_menu, O_SHOWDESC); + menu_opts_on(curses_menu, O_SHOWMATCH); + menu_opts_on(curses_menu, O_ONEVALUE); + menu_opts_on(curses_menu, O_NONCYCLIC); + menu_opts_on(curses_menu, O_IGNORECASE); + set_menu_mark(curses_menu, " "); + set_menu_fore(curses_menu, attributes[MAIN_MENU_FORE]); + set_menu_back(curses_menu, attributes[MAIN_MENU_BACK]); + set_menu_grey(curses_menu, attributes[MAIN_MENU_GREY]); + + set_config_filename(conf_get_configname()); + setup_windows(); + + /* check for KEY_FUNC(1) */ + if ( has_key(KEY_F(1)) == FALSE ) + { + show_scroll_win(main_window, _("Instructions"), + _(menu_no_f_instructions)); + } + + conf_set_message_callback(conf_message_callback); + /* do the work */ + while ( !global_exit ) + { + conf(&rootmenu); + if ( !global_exit && do_exit() == 0 ) + break; + } + /* ok, we are done */ + unpost_menu(curses_menu); + free_menu(curses_menu); + delwin(main_window); + clear(); + refresh(); + endwin(); + return 0; } diff --git a/xen/tools/kconfig/nconf.gui.c b/xen/tools/kconfig/nconf.gui.c index 8275f0e551..23ffa2a062 100644 --- a/xen/tools/kconfig/nconf.gui.c +++ b/xen/tools/kconfig/nconf.gui.c @@ -8,7 +8,7 @@ #include "nconf.h" /* a list of all the different widgets we use */ -attributes_t attributes[ATTR_MAX+1] = {0}; +attributes_t attributes[ATTR_MAX + 1] = {0}; /* available colors: COLOR_BLACK 0 @@ -22,33 +22,33 @@ attributes_t attributes[ATTR_MAX+1] = {0}; */ static void set_normal_colors(void) { - init_pair(NORMAL, -1, -1); - init_pair(MAIN_HEADING, COLOR_MAGENTA, -1); - - /* FORE is for the selected item */ - init_pair(MAIN_MENU_FORE, -1, -1); - /* BACK for all the rest */ - init_pair(MAIN_MENU_BACK, -1, -1); - init_pair(MAIN_MENU_GREY, -1, -1); - init_pair(MAIN_MENU_HEADING, COLOR_GREEN, -1); - init_pair(MAIN_MENU_BOX, COLOR_YELLOW, -1); - - init_pair(SCROLLWIN_TEXT, -1, -1); - init_pair(SCROLLWIN_HEADING, COLOR_GREEN, -1); - init_pair(SCROLLWIN_BOX, COLOR_YELLOW, -1); - - init_pair(DIALOG_TEXT, -1, -1); - init_pair(DIALOG_BOX, COLOR_YELLOW, -1); - init_pair(DIALOG_MENU_BACK, COLOR_YELLOW, -1); - init_pair(DIALOG_MENU_FORE, COLOR_RED, -1); - - init_pair(INPUT_BOX, COLOR_YELLOW, -1); - init_pair(INPUT_HEADING, COLOR_GREEN, -1); - init_pair(INPUT_TEXT, -1, -1); - init_pair(INPUT_FIELD, -1, -1); - - init_pair(FUNCTION_HIGHLIGHT, -1, -1); - init_pair(FUNCTION_TEXT, COLOR_YELLOW, -1); + init_pair(NORMAL, -1, -1); + init_pair(MAIN_HEADING, COLOR_MAGENTA, -1); + + /* FORE is for the selected item */ + init_pair(MAIN_MENU_FORE, -1, -1); + /* BACK for all the rest */ + init_pair(MAIN_MENU_BACK, -1, -1); + init_pair(MAIN_MENU_GREY, -1, -1); + init_pair(MAIN_MENU_HEADING, COLOR_GREEN, -1); + init_pair(MAIN_MENU_BOX, COLOR_YELLOW, -1); + + init_pair(SCROLLWIN_TEXT, -1, -1); + init_pair(SCROLLWIN_HEADING, COLOR_GREEN, -1); + init_pair(SCROLLWIN_BOX, COLOR_YELLOW, -1); + + init_pair(DIALOG_TEXT, -1, -1); + init_pair(DIALOG_BOX, COLOR_YELLOW, -1); + init_pair(DIALOG_MENU_BACK, COLOR_YELLOW, -1); + init_pair(DIALOG_MENU_FORE, COLOR_RED, -1); + + init_pair(INPUT_BOX, COLOR_YELLOW, -1); + init_pair(INPUT_HEADING, COLOR_GREEN, -1); + init_pair(INPUT_TEXT, -1, -1); + init_pair(INPUT_FIELD, -1, -1); + + init_pair(FUNCTION_HIGHLIGHT, -1, -1); + init_pair(FUNCTION_TEXT, COLOR_YELLOW, -1); } /* available attributes: @@ -67,167 +67,172 @@ static void set_normal_colors(void) */ static void normal_color_theme(void) { - /* automatically add color... */ -#define mkattr(name, attr) do { \ -attributes[name] = attr | COLOR_PAIR(name); } while (0) - mkattr(NORMAL, NORMAL); - mkattr(MAIN_HEADING, A_BOLD | A_UNDERLINE); - - mkattr(MAIN_MENU_FORE, A_REVERSE); - mkattr(MAIN_MENU_BACK, A_NORMAL); - mkattr(MAIN_MENU_GREY, A_NORMAL); - mkattr(MAIN_MENU_HEADING, A_BOLD); - mkattr(MAIN_MENU_BOX, A_NORMAL); - - mkattr(SCROLLWIN_TEXT, A_NORMAL); - mkattr(SCROLLWIN_HEADING, A_BOLD); - mkattr(SCROLLWIN_BOX, A_BOLD); - - mkattr(DIALOG_TEXT, A_BOLD); - mkattr(DIALOG_BOX, A_BOLD); - mkattr(DIALOG_MENU_FORE, A_STANDOUT); - mkattr(DIALOG_MENU_BACK, A_NORMAL); - - mkattr(INPUT_BOX, A_NORMAL); - mkattr(INPUT_HEADING, A_BOLD); - mkattr(INPUT_TEXT, A_NORMAL); - mkattr(INPUT_FIELD, A_UNDERLINE); - - mkattr(FUNCTION_HIGHLIGHT, A_BOLD); - mkattr(FUNCTION_TEXT, A_REVERSE); + /* automatically add color... */ +#define mkattr(name, attr) \ + do { \ + attributes[name] = attr | COLOR_PAIR(name); \ + } while ( 0 ) + mkattr(NORMAL, NORMAL); + mkattr(MAIN_HEADING, A_BOLD | A_UNDERLINE); + + mkattr(MAIN_MENU_FORE, A_REVERSE); + mkattr(MAIN_MENU_BACK, A_NORMAL); + mkattr(MAIN_MENU_GREY, A_NORMAL); + mkattr(MAIN_MENU_HEADING, A_BOLD); + mkattr(MAIN_MENU_BOX, A_NORMAL); + + mkattr(SCROLLWIN_TEXT, A_NORMAL); + mkattr(SCROLLWIN_HEADING, A_BOLD); + mkattr(SCROLLWIN_BOX, A_BOLD); + + mkattr(DIALOG_TEXT, A_BOLD); + mkattr(DIALOG_BOX, A_BOLD); + mkattr(DIALOG_MENU_FORE, A_STANDOUT); + mkattr(DIALOG_MENU_BACK, A_NORMAL); + + mkattr(INPUT_BOX, A_NORMAL); + mkattr(INPUT_HEADING, A_BOLD); + mkattr(INPUT_TEXT, A_NORMAL); + mkattr(INPUT_FIELD, A_UNDERLINE); + + mkattr(FUNCTION_HIGHLIGHT, A_BOLD); + mkattr(FUNCTION_TEXT, A_REVERSE); } static void no_colors_theme(void) { - /* automatically add highlight, no color */ -#define mkattrn(name, attr) { attributes[name] = attr; } - - mkattrn(NORMAL, NORMAL); - mkattrn(MAIN_HEADING, A_BOLD | A_UNDERLINE); - - mkattrn(MAIN_MENU_FORE, A_STANDOUT); - mkattrn(MAIN_MENU_BACK, A_NORMAL); - mkattrn(MAIN_MENU_GREY, A_NORMAL); - mkattrn(MAIN_MENU_HEADING, A_BOLD); - mkattrn(MAIN_MENU_BOX, A_NORMAL); - - mkattrn(SCROLLWIN_TEXT, A_NORMAL); - mkattrn(SCROLLWIN_HEADING, A_BOLD); - mkattrn(SCROLLWIN_BOX, A_BOLD); - - mkattrn(DIALOG_TEXT, A_NORMAL); - mkattrn(DIALOG_BOX, A_BOLD); - mkattrn(DIALOG_MENU_FORE, A_STANDOUT); - mkattrn(DIALOG_MENU_BACK, A_NORMAL); - - mkattrn(INPUT_BOX, A_BOLD); - mkattrn(INPUT_HEADING, A_BOLD); - mkattrn(INPUT_TEXT, A_NORMAL); - mkattrn(INPUT_FIELD, A_UNDERLINE); - - mkattrn(FUNCTION_HIGHLIGHT, A_BOLD); - mkattrn(FUNCTION_TEXT, A_REVERSE); + /* automatically add highlight, no color */ +#define mkattrn(name, attr) \ + { \ + attributes[name] = attr; \ + } + + mkattrn(NORMAL, NORMAL); + mkattrn(MAIN_HEADING, A_BOLD | A_UNDERLINE); + + mkattrn(MAIN_MENU_FORE, A_STANDOUT); + mkattrn(MAIN_MENU_BACK, A_NORMAL); + mkattrn(MAIN_MENU_GREY, A_NORMAL); + mkattrn(MAIN_MENU_HEADING, A_BOLD); + mkattrn(MAIN_MENU_BOX, A_NORMAL); + + mkattrn(SCROLLWIN_TEXT, A_NORMAL); + mkattrn(SCROLLWIN_HEADING, A_BOLD); + mkattrn(SCROLLWIN_BOX, A_BOLD); + + mkattrn(DIALOG_TEXT, A_NORMAL); + mkattrn(DIALOG_BOX, A_BOLD); + mkattrn(DIALOG_MENU_FORE, A_STANDOUT); + mkattrn(DIALOG_MENU_BACK, A_NORMAL); + + mkattrn(INPUT_BOX, A_BOLD); + mkattrn(INPUT_HEADING, A_BOLD); + mkattrn(INPUT_TEXT, A_NORMAL); + mkattrn(INPUT_FIELD, A_UNDERLINE); + + mkattrn(FUNCTION_HIGHLIGHT, A_BOLD); + mkattrn(FUNCTION_TEXT, A_REVERSE); } void set_colors() { - start_color(); - use_default_colors(); - set_normal_colors(); - if (has_colors()) { - normal_color_theme(); - } else { - /* give defaults */ - no_colors_theme(); - } + start_color(); + use_default_colors(); + set_normal_colors(); + if ( has_colors() ) + { + normal_color_theme(); + } + else + { + /* give defaults */ + no_colors_theme(); + } } - /* this changes the windows attributes !!! */ -void print_in_middle(WINDOW *win, - int starty, - int startx, - int width, - const char *string, - chtype color) -{ int length, x, y; - float temp; - - - if (win == NULL) - win = stdscr; - getyx(win, y, x); - if (startx != 0) - x = startx; - if (starty != 0) - y = starty; - if (width == 0) - width = 80; - - length = strlen(string); - temp = (width - length) / 2; - x = startx + (int)temp; - (void) wattrset(win, color); - mvwprintw(win, y, x, "%s", string); - refresh(); +void print_in_middle(WINDOW *win, int starty, int startx, int width, + const char *string, chtype color) +{ + int length, x, y; + float temp; + + if ( win == NULL ) + win = stdscr; + getyx(win, y, x); + if ( startx != 0 ) + x = startx; + if ( starty != 0 ) + y = starty; + if ( width == 0 ) + width = 80; + + length = strlen(string); + temp = (width - length) / 2; + x = startx + (int)temp; + (void)wattrset(win, color); + mvwprintw(win, y, x, "%s", string); + refresh(); } int get_line_no(const char *text) { - int i; - int total = 1; + int i; + int total = 1; - if (!text) - return 0; + if ( !text ) + return 0; - for (i = 0; text[i] != '\0'; i++) - if (text[i] == '\n') - total++; - return total; + for ( i = 0; text[i] != '\0'; i++ ) + if ( text[i] == '\n' ) + total++; + return total; } const char *get_line(const char *text, int line_no) { - int i; - int lines = 0; + int i; + int lines = 0; - if (!text) - return 0; + if ( !text ) + return 0; - for (i = 0; text[i] != '\0' && lines < line_no; i++) - if (text[i] == '\n') - lines++; - return text+i; + for ( i = 0; text[i] != '\0' && lines < line_no; i++ ) + if ( text[i] == '\n' ) + lines++; + return text + i; } int get_line_length(const char *line) { - int res = 0; - while (*line != '\0' && *line != '\n') { - line++; - res++; - } - return res; + int res = 0; + while ( *line != '\0' && *line != '\n' ) + { + line++; + res++; + } + return res; } /* print all lines to the window. */ void fill_window(WINDOW *win, const char *text) { - int x, y; - int total_lines = get_line_no(text); - int i; - - getmaxyx(win, y, x); - /* do not go over end of line */ - total_lines = min(total_lines, y); - for (i = 0; i < total_lines; i++) { - char tmp[x+10]; - const char *line = get_line(text, i); - int len = get_line_length(line); - strncpy(tmp, line, min(len, x)); - tmp[len] = '\0'; - mvwprintw(win, i, 0, "%s", tmp); - } + int x, y; + int total_lines = get_line_no(text); + int i; + + getmaxyx(win, y, x); + /* do not go over end of line */ + total_lines = min(total_lines, y); + for ( i = 0; i < total_lines; i++ ) + { + char tmp[x + 10]; + const char *line = get_line(text, i); + int len = get_line_length(line); + strncpy(tmp, line, min(len, x)); + tmp[len] = '\0'; + mvwprintw(win, i, 0, "%s", tmp); + } } /* get the message, and buttons. @@ -240,417 +245,423 @@ void fill_window(WINDOW *win, const char *text) */ int btn_dialog(WINDOW *main_window, const char *msg, int btn_num, ...) { - va_list ap; - char *btn; - int btns_width = 0; - int msg_lines = 0; - int msg_width = 0; - int total_width; - int win_rows = 0; - WINDOW *win; - WINDOW *msg_win; - WINDOW *menu_win; - MENU *menu; - ITEM *btns[btn_num+1]; - int i, x, y; - int res = -1; - - - va_start(ap, btn_num); - for (i = 0; i < btn_num; i++) { - btn = va_arg(ap, char *); - btns[i] = new_item(btn, ""); - btns_width += strlen(btn)+1; - } - va_end(ap); - btns[btn_num] = NULL; - - /* find the widest line of msg: */ - msg_lines = get_line_no(msg); - for (i = 0; i < msg_lines; i++) { - const char *line = get_line(msg, i); - int len = get_line_length(line); - if (msg_width < len) - msg_width = len; - } - - total_width = max(msg_width, btns_width); - /* place dialog in middle of screen */ - y = (getmaxy(stdscr)-(msg_lines+4))/2; - x = (getmaxx(stdscr)-(total_width+4))/2; - - - /* create the windows */ - if (btn_num > 0) - win_rows = msg_lines+4; - else - win_rows = msg_lines+2; - - win = newwin(win_rows, total_width+4, y, x); - keypad(win, TRUE); - menu_win = derwin(win, 1, btns_width, win_rows-2, - 1+(total_width+2-btns_width)/2); - menu = new_menu(btns); - msg_win = derwin(win, win_rows-2, msg_width, 1, - 1+(total_width+2-msg_width)/2); - - set_menu_fore(menu, attributes[DIALOG_MENU_FORE]); - set_menu_back(menu, attributes[DIALOG_MENU_BACK]); - - (void) wattrset(win, attributes[DIALOG_BOX]); - box(win, 0, 0); - - /* print message */ - (void) wattrset(msg_win, attributes[DIALOG_TEXT]); - fill_window(msg_win, msg); - - set_menu_win(menu, win); - set_menu_sub(menu, menu_win); - set_menu_format(menu, 1, btn_num); - menu_opts_off(menu, O_SHOWDESC); - menu_opts_off(menu, O_SHOWMATCH); - menu_opts_on(menu, O_ONEVALUE); - menu_opts_on(menu, O_NONCYCLIC); - set_menu_mark(menu, ""); - post_menu(menu); - - - touchwin(win); - refresh_all_windows(main_window); - while ((res = wgetch(win))) { - switch (res) { - case KEY_LEFT: - menu_driver(menu, REQ_LEFT_ITEM); - break; - case KEY_RIGHT: - menu_driver(menu, REQ_RIGHT_ITEM); - break; - case 10: /* ENTER */ - case 27: /* ESCAPE */ - case ' ': - case KEY_F(F_BACK): - case KEY_F(F_EXIT): - break; - } - touchwin(win); - refresh_all_windows(main_window); - - if (res == 10 || res == ' ') { - res = item_index(current_item(menu)); - break; - } else if (res == 27 || res == KEY_F(F_BACK) || - res == KEY_F(F_EXIT)) { - res = KEY_EXIT; - break; - } - } - - unpost_menu(menu); - free_menu(menu); - for (i = 0; i < btn_num; i++) - free_item(btns[i]); - - delwin(win); - return res; + va_list ap; + char *btn; + int btns_width = 0; + int msg_lines = 0; + int msg_width = 0; + int total_width; + int win_rows = 0; + WINDOW *win; + WINDOW *msg_win; + WINDOW *menu_win; + MENU *menu; + ITEM *btns[btn_num + 1]; + int i, x, y; + int res = -1; + + va_start(ap, btn_num); + for ( i = 0; i < btn_num; i++ ) + { + btn = va_arg(ap, char *); + btns[i] = new_item(btn, ""); + btns_width += strlen(btn) + 1; + } + va_end(ap); + btns[btn_num] = NULL; + + /* find the widest line of msg: */ + msg_lines = get_line_no(msg); + for ( i = 0; i < msg_lines; i++ ) + { + const char *line = get_line(msg, i); + int len = get_line_length(line); + if ( msg_width < len ) + msg_width = len; + } + + total_width = max(msg_width, btns_width); + /* place dialog in middle of screen */ + y = (getmaxy(stdscr) - (msg_lines + 4)) / 2; + x = (getmaxx(stdscr) - (total_width + 4)) / 2; + + /* create the windows */ + if ( btn_num > 0 ) + win_rows = msg_lines + 4; + else + win_rows = msg_lines + 2; + + win = newwin(win_rows, total_width + 4, y, x); + keypad(win, TRUE); + menu_win = derwin(win, 1, btns_width, win_rows - 2, + 1 + (total_width + 2 - btns_width) / 2); + menu = new_menu(btns); + msg_win = derwin(win, win_rows - 2, msg_width, 1, + 1 + (total_width + 2 - msg_width) / 2); + + set_menu_fore(menu, attributes[DIALOG_MENU_FORE]); + set_menu_back(menu, attributes[DIALOG_MENU_BACK]); + + (void)wattrset(win, attributes[DIALOG_BOX]); + box(win, 0, 0); + + /* print message */ + (void)wattrset(msg_win, attributes[DIALOG_TEXT]); + fill_window(msg_win, msg); + + set_menu_win(menu, win); + set_menu_sub(menu, menu_win); + set_menu_format(menu, 1, btn_num); + menu_opts_off(menu, O_SHOWDESC); + menu_opts_off(menu, O_SHOWMATCH); + menu_opts_on(menu, O_ONEVALUE); + menu_opts_on(menu, O_NONCYCLIC); + set_menu_mark(menu, ""); + post_menu(menu); + + touchwin(win); + refresh_all_windows(main_window); + while ( (res = wgetch(win)) ) + { + switch (res) + { + case KEY_LEFT: + menu_driver(menu, REQ_LEFT_ITEM); + break; + case KEY_RIGHT: + menu_driver(menu, REQ_RIGHT_ITEM); + break; + case 10: /* ENTER */ + case 27: /* ESCAPE */ + case ' ': + case KEY_F(F_BACK): + case KEY_F(F_EXIT): + break; + } + touchwin(win); + refresh_all_windows(main_window); + + if ( res == 10 || res == ' ' ) + { + res = item_index(current_item(menu)); + break; + } + else if ( res == 27 || res == KEY_F(F_BACK) || res == KEY_F(F_EXIT) ) + { + res = KEY_EXIT; + break; + } + } + + unpost_menu(menu); + free_menu(menu); + for ( i = 0; i < btn_num; i++ ) + free_item(btns[i]); + + delwin(win); + return res; } -int dialog_inputbox(WINDOW *main_window, - const char *title, const char *prompt, - const char *init, char **resultp, int *result_len) +int dialog_inputbox(WINDOW *main_window, const char *title, const char *prompt, + const char *init, char **resultp, int *result_len) { - int prompt_lines = 0; - int prompt_width = 0; - WINDOW *win; - WINDOW *prompt_win; - WINDOW *form_win; - PANEL *panel; - int i, x, y; - int res = -1; - int cursor_position = strlen(init); - int cursor_form_win; - char *result = *resultp; - - if (strlen(init)+1 > *result_len) { - *result_len = strlen(init)+1; - *resultp = result = realloc(result, *result_len); - } - - /* find the widest line of msg: */ - prompt_lines = get_line_no(prompt); - for (i = 0; i < prompt_lines; i++) { - const char *line = get_line(prompt, i); - int len = get_line_length(line); - prompt_width = max(prompt_width, len); - } - - if (title) - prompt_width = max(prompt_width, strlen(title)); - - /* place dialog in middle of screen */ - y = (getmaxy(stdscr)-(prompt_lines+4))/2; - x = (getmaxx(stdscr)-(prompt_width+4))/2; - - strncpy(result, init, *result_len); - - /* create the windows */ - win = newwin(prompt_lines+6, prompt_width+7, y, x); - prompt_win = derwin(win, prompt_lines+1, prompt_width, 2, 2); - form_win = derwin(win, 1, prompt_width, prompt_lines+3, 2); - keypad(form_win, TRUE); - - (void) wattrset(form_win, attributes[INPUT_FIELD]); - - (void) wattrset(win, attributes[INPUT_BOX]); - box(win, 0, 0); - (void) wattrset(win, attributes[INPUT_HEADING]); - if (title) - mvwprintw(win, 0, 3, "%s", title); - - /* print message */ - (void) wattrset(prompt_win, attributes[INPUT_TEXT]); - fill_window(prompt_win, prompt); - - mvwprintw(form_win, 0, 0, "%*s", prompt_width, " "); - cursor_form_win = min(cursor_position, prompt_width-1); - mvwprintw(form_win, 0, 0, "%s", - result + cursor_position-cursor_form_win); - - /* create panels */ - panel = new_panel(win); - - /* show the cursor */ - curs_set(1); - - touchwin(win); - refresh_all_windows(main_window); - while ((res = wgetch(form_win))) { - int len = strlen(result); - switch (res) { - case 10: /* ENTER */ - case 27: /* ESCAPE */ - case KEY_F(F_HELP): - case KEY_F(F_EXIT): - case KEY_F(F_BACK): - break; - case 127: - case KEY_BACKSPACE: - if (cursor_position > 0) { - memmove(&result[cursor_position-1], - &result[cursor_position], - len-cursor_position+1); - cursor_position--; - cursor_form_win--; - len--; - } - break; - case KEY_DC: - if (cursor_position >= 0 && cursor_position < len) { - memmove(&result[cursor_position], - &result[cursor_position+1], - len-cursor_position+1); - len--; - } - break; - case KEY_UP: - case KEY_RIGHT: - if (cursor_position < len) { - cursor_position++; - cursor_form_win++; - } - break; - case KEY_DOWN: - case KEY_LEFT: - if (cursor_position > 0) { - cursor_position--; - cursor_form_win--; - } - break; - case KEY_HOME: - cursor_position = 0; - cursor_form_win = 0; - break; - case KEY_END: - cursor_position = len; - cursor_form_win = min(cursor_position, prompt_width-1); - break; - default: - if ((isgraph(res) || isspace(res))) { - /* one for new char, one for '\0' */ - if (len+2 > *result_len) { - *result_len = len+2; - *resultp = result = realloc(result, - *result_len); - } - /* insert the char at the proper position */ - memmove(&result[cursor_position+1], - &result[cursor_position], - len-cursor_position+1); - result[cursor_position] = res; - cursor_position++; - cursor_form_win++; - len++; - } else { - mvprintw(0, 0, "unknown key: %d\n", res); - } - break; - } - if (cursor_form_win < 0) - cursor_form_win = 0; - else if (cursor_form_win > prompt_width-1) - cursor_form_win = prompt_width-1; - - wmove(form_win, 0, 0); - wclrtoeol(form_win); - mvwprintw(form_win, 0, 0, "%*s", prompt_width, " "); - mvwprintw(form_win, 0, 0, "%s", - result + cursor_position-cursor_form_win); - wmove(form_win, 0, cursor_form_win); - touchwin(win); - refresh_all_windows(main_window); - - if (res == 10) { - res = 0; - break; - } else if (res == 27 || res == KEY_F(F_BACK) || - res == KEY_F(F_EXIT)) { - res = KEY_EXIT; - break; - } else if (res == KEY_F(F_HELP)) { - res = 1; - break; - } - } - - /* hide the cursor */ - curs_set(0); - del_panel(panel); - delwin(prompt_win); - delwin(form_win); - delwin(win); - return res; + int prompt_lines = 0; + int prompt_width = 0; + WINDOW *win; + WINDOW *prompt_win; + WINDOW *form_win; + PANEL *panel; + int i, x, y; + int res = -1; + int cursor_position = strlen(init); + int cursor_form_win; + char *result = *resultp; + + if ( strlen(init) + 1 > *result_len ) + { + *result_len = strlen(init) + 1; + *resultp = result = realloc(result, *result_len); + } + + /* find the widest line of msg: */ + prompt_lines = get_line_no(prompt); + for ( i = 0; i < prompt_lines; i++ ) + { + const char *line = get_line(prompt, i); + int len = get_line_length(line); + prompt_width = max(prompt_width, len); + } + + if ( title ) + prompt_width = max(prompt_width, strlen(title)); + + /* place dialog in middle of screen */ + y = (getmaxy(stdscr) - (prompt_lines + 4)) / 2; + x = (getmaxx(stdscr) - (prompt_width + 4)) / 2; + + strncpy(result, init, *result_len); + + /* create the windows */ + win = newwin(prompt_lines + 6, prompt_width + 7, y, x); + prompt_win = derwin(win, prompt_lines + 1, prompt_width, 2, 2); + form_win = derwin(win, 1, prompt_width, prompt_lines + 3, 2); + keypad(form_win, TRUE); + + (void)wattrset(form_win, attributes[INPUT_FIELD]); + + (void)wattrset(win, attributes[INPUT_BOX]); + box(win, 0, 0); + (void)wattrset(win, attributes[INPUT_HEADING]); + if ( title ) + mvwprintw(win, 0, 3, "%s", title); + + /* print message */ + (void)wattrset(prompt_win, attributes[INPUT_TEXT]); + fill_window(prompt_win, prompt); + + mvwprintw(form_win, 0, 0, "%*s", prompt_width, " "); + cursor_form_win = min(cursor_position, prompt_width - 1); + mvwprintw(form_win, 0, 0, "%s", result + cursor_position - cursor_form_win); + + /* create panels */ + panel = new_panel(win); + + /* show the cursor */ + curs_set(1); + + touchwin(win); + refresh_all_windows(main_window); + while ( (res = wgetch(form_win)) ) + { + int len = strlen(result); + switch (res) + { + case 10: /* ENTER */ + case 27: /* ESCAPE */ + case KEY_F(F_HELP): + case KEY_F(F_EXIT): + case KEY_F(F_BACK): + break; + case 127: + case KEY_BACKSPACE: + if ( cursor_position > 0 ) + { + memmove(&result[cursor_position - 1], &result[cursor_position], + len - cursor_position + 1); + cursor_position--; + cursor_form_win--; + len--; + } + break; + case KEY_DC: + if ( cursor_position >= 0 && cursor_position < len ) + { + memmove(&result[cursor_position], &result[cursor_position + 1], + len - cursor_position + 1); + len--; + } + break; + case KEY_UP: + case KEY_RIGHT: + if ( cursor_position < len ) + { + cursor_position++; + cursor_form_win++; + } + break; + case KEY_DOWN: + case KEY_LEFT: + if ( cursor_position > 0 ) + { + cursor_position--; + cursor_form_win--; + } + break; + case KEY_HOME: + cursor_position = 0; + cursor_form_win = 0; + break; + case KEY_END: + cursor_position = len; + cursor_form_win = min(cursor_position, prompt_width - 1); + break; + default: + if ( (isgraph(res) || isspace(res)) ) + { + /* one for new char, one for '\0' */ + if ( len + 2 > *result_len ) + { + *result_len = len + 2; + *resultp = result = realloc(result, *result_len); + } + /* insert the char at the proper position */ + memmove(&result[cursor_position + 1], &result[cursor_position], + len - cursor_position + 1); + result[cursor_position] = res; + cursor_position++; + cursor_form_win++; + len++; + } + else + { + mvprintw(0, 0, "unknown key: %d\n", res); + } + break; + } + if ( cursor_form_win < 0 ) + cursor_form_win = 0; + else if ( cursor_form_win > prompt_width - 1 ) + cursor_form_win = prompt_width - 1; + + wmove(form_win, 0, 0); + wclrtoeol(form_win); + mvwprintw(form_win, 0, 0, "%*s", prompt_width, " "); + mvwprintw(form_win, 0, 0, "%s", + result + cursor_position - cursor_form_win); + wmove(form_win, 0, cursor_form_win); + touchwin(win); + refresh_all_windows(main_window); + + if ( res == 10 ) + { + res = 0; + break; + } + else if ( res == 27 || res == KEY_F(F_BACK) || res == KEY_F(F_EXIT) ) + { + res = KEY_EXIT; + break; + } + else if ( res == KEY_F(F_HELP) ) + { + res = 1; + break; + } + } + + /* hide the cursor */ + curs_set(0); + del_panel(panel); + delwin(prompt_win); + delwin(form_win); + delwin(win); + return res; } /* refresh all windows in the correct order */ void refresh_all_windows(WINDOW *main_window) { - update_panels(); - touchwin(main_window); - refresh(); + update_panels(); + touchwin(main_window); + refresh(); } /* layman's scrollable window... */ -void show_scroll_win(WINDOW *main_window, - const char *title, - const char *text) +void show_scroll_win(WINDOW *main_window, const char *title, const char *text) { - int res; - int total_lines = get_line_no(text); - int x, y, lines, columns; - int start_x = 0, start_y = 0; - int text_lines = 0, text_cols = 0; - int total_cols = 0; - int win_cols = 0; - int win_lines = 0; - int i = 0; - WINDOW *win; - WINDOW *pad; - PANEL *panel; - - getmaxyx(stdscr, lines, columns); - - /* find the widest line of msg: */ - total_lines = get_line_no(text); - for (i = 0; i < total_lines; i++) { - const char *line = get_line(text, i); - int len = get_line_length(line); - total_cols = max(total_cols, len+2); - } - - /* create the pad */ - pad = newpad(total_lines+10, total_cols+10); - (void) wattrset(pad, attributes[SCROLLWIN_TEXT]); - fill_window(pad, text); - - win_lines = min(total_lines+4, lines-2); - win_cols = min(total_cols+2, columns-2); - text_lines = max(win_lines-4, 0); - text_cols = max(win_cols-2, 0); - - /* place window in middle of screen */ - y = (lines-win_lines)/2; - x = (columns-win_cols)/2; - - win = newwin(win_lines, win_cols, y, x); - keypad(win, TRUE); - /* show the help in the help window, and show the help panel */ - (void) wattrset(win, attributes[SCROLLWIN_BOX]); - box(win, 0, 0); - (void) wattrset(win, attributes[SCROLLWIN_HEADING]); - mvwprintw(win, 0, 3, " %s ", title); - panel = new_panel(win); - - /* handle scrolling */ - do { - - copywin(pad, win, start_y, start_x, 2, 2, text_lines, - text_cols, 0); - print_in_middle(win, - text_lines+2, - 0, - text_cols, - "", - attributes[DIALOG_MENU_FORE]); - wrefresh(win); - - res = wgetch(win); - switch (res) { - case KEY_NPAGE: - case ' ': - case 'd': - start_y += text_lines-2; - break; - case KEY_PPAGE: - case 'u': - start_y -= text_lines+2; - break; - case KEY_HOME: - start_y = 0; - break; - case KEY_END: - start_y = total_lines-text_lines; - break; - case KEY_DOWN: - case 'j': - start_y++; - break; - case KEY_UP: - case 'k': - start_y--; - break; - case KEY_LEFT: - case 'h': - start_x--; - break; - case KEY_RIGHT: - case 'l': - start_x++; - break; - } - if (res == 10 || res == 27 || res == 'q' || - res == KEY_F(F_HELP) || res == KEY_F(F_BACK) || - res == KEY_F(F_EXIT)) - break; - if (start_y < 0) - start_y = 0; - if (start_y >= total_lines-text_lines) - start_y = total_lines-text_lines; - if (start_x < 0) - start_x = 0; - if (start_x >= total_cols-text_cols) - start_x = total_cols-text_cols; - } while (res); - - del_panel(panel); - delwin(win); - refresh_all_windows(main_window); + int res; + int total_lines = get_line_no(text); + int x, y, lines, columns; + int start_x = 0, start_y = 0; + int text_lines = 0, text_cols = 0; + int total_cols = 0; + int win_cols = 0; + int win_lines = 0; + int i = 0; + WINDOW *win; + WINDOW *pad; + PANEL *panel; + + getmaxyx(stdscr, lines, columns); + + /* find the widest line of msg: */ + total_lines = get_line_no(text); + for ( i = 0; i < total_lines; i++ ) + { + const char *line = get_line(text, i); + int len = get_line_length(line); + total_cols = max(total_cols, len + 2); + } + + /* create the pad */ + pad = newpad(total_lines + 10, total_cols + 10); + (void)wattrset(pad, attributes[SCROLLWIN_TEXT]); + fill_window(pad, text); + + win_lines = min(total_lines + 4, lines - 2); + win_cols = min(total_cols + 2, columns - 2); + text_lines = max(win_lines - 4, 0); + text_cols = max(win_cols - 2, 0); + + /* place window in middle of screen */ + y = (lines - win_lines) / 2; + x = (columns - win_cols) / 2; + + win = newwin(win_lines, win_cols, y, x); + keypad(win, TRUE); + /* show the help in the help window, and show the help panel */ + (void)wattrset(win, attributes[SCROLLWIN_BOX]); + box(win, 0, 0); + (void)wattrset(win, attributes[SCROLLWIN_HEADING]); + mvwprintw(win, 0, 3, " %s ", title); + panel = new_panel(win); + + /* handle scrolling */ + do { + copywin(pad, win, start_y, start_x, 2, 2, text_lines, text_cols, 0); + print_in_middle(win, text_lines + 2, 0, text_cols, "", + attributes[DIALOG_MENU_FORE]); + wrefresh(win); + + res = wgetch(win); + switch (res) + { + case KEY_NPAGE: + case ' ': + case 'd': + start_y += text_lines - 2; + break; + case KEY_PPAGE: + case 'u': + start_y -= text_lines + 2; + break; + case KEY_HOME: + start_y = 0; + break; + case KEY_END: + start_y = total_lines - text_lines; + break; + case KEY_DOWN: + case 'j': + start_y++; + break; + case KEY_UP: + case 'k': + start_y--; + break; + case KEY_LEFT: + case 'h': + start_x--; + break; + case KEY_RIGHT: + case 'l': + start_x++; + break; + } + if ( res == 10 || res == 27 || res == 'q' || res == KEY_F(F_HELP) || + res == KEY_F(F_BACK) || res == KEY_F(F_EXIT) ) + break; + if ( start_y < 0 ) + start_y = 0; + if ( start_y >= total_lines - text_lines ) + start_y = total_lines - text_lines; + if ( start_x < 0 ) + start_x = 0; + if ( start_x >= total_cols - text_cols ) + start_x = total_cols - text_cols; + } while ( res ); + + del_panel(panel); + delwin(win); + refresh_all_windows(main_window); } diff --git a/xen/tools/kconfig/symbol.c b/xen/tools/kconfig/symbol.c index 50878dc025..ffd3d05795 100644 --- a/xen/tools/kconfig/symbol.c +++ b/xen/tools/kconfig/symbol.c @@ -11,22 +11,28 @@ #include "lkc.h" -struct symbol symbol_yes = { - .name = "y", - .curr = { "y", yes }, - .flags = SYMBOL_CONST|SYMBOL_VALID, -}, symbol_mod = { - .name = "m", - .curr = { "m", mod }, - .flags = SYMBOL_CONST|SYMBOL_VALID, -}, symbol_no = { - .name = "n", - .curr = { "n", no }, - .flags = SYMBOL_CONST|SYMBOL_VALID, -}, symbol_empty = { - .name = "", - .curr = { "", no }, - .flags = SYMBOL_VALID, +struct symbol symbol_yes = + { + .name = "y", + .curr = {"y", yes}, + .flags = SYMBOL_CONST | SYMBOL_VALID, +}, + symbol_mod = + { + .name = "m", + .curr = {"m", mod}, + .flags = SYMBOL_CONST | SYMBOL_VALID, +}, + symbol_no = + { + .name = "n", + .curr = {"n", no}, + .flags = SYMBOL_CONST | SYMBOL_VALID, +}, + symbol_empty = { + .name = "", + .curr = {"", no}, + .flags = SYMBOL_VALID, }; struct symbol *sym_defconfig_list; @@ -37,213 +43,222 @@ struct expr *sym_env_list; static void sym_add_default(struct symbol *sym, const char *def) { - struct property *prop = prop_alloc(P_DEFAULT, sym); + struct property *prop = prop_alloc(P_DEFAULT, sym); - prop->expr = expr_alloc_symbol(sym_lookup(def, SYMBOL_CONST)); + prop->expr = expr_alloc_symbol(sym_lookup(def, SYMBOL_CONST)); } void sym_init(void) { - struct symbol *sym; - struct utsname uts; - static bool inited = false; + struct symbol *sym; + struct utsname uts; + static bool inited = false; - if (inited) - return; - inited = true; + if ( inited ) + return; + inited = true; - uname(&uts); + uname(&uts); - sym = sym_lookup("UNAME_RELEASE", 0); - sym->type = S_STRING; - sym->flags |= SYMBOL_AUTO; - sym_add_default(sym, uts.release); + sym = sym_lookup("UNAME_RELEASE", 0); + sym->type = S_STRING; + sym->flags |= SYMBOL_AUTO; + sym_add_default(sym, uts.release); } enum symbol_type sym_get_type(struct symbol *sym) { - enum symbol_type type = sym->type; - - if (type == S_TRISTATE) { - if (sym_is_choice_value(sym) && sym->visible == yes) - type = S_BOOLEAN; - else if (modules_val == no) - type = S_BOOLEAN; - } - return type; + enum symbol_type type = sym->type; + + if ( type == S_TRISTATE ) + { + if ( sym_is_choice_value(sym) && sym->visible == yes ) + type = S_BOOLEAN; + else if ( modules_val == no ) + type = S_BOOLEAN; + } + return type; } const char *sym_type_name(enum symbol_type type) { - switch (type) { - case S_BOOLEAN: - return "boolean"; - case S_TRISTATE: - return "tristate"; - case S_INT: - return "integer"; - case S_HEX: - return "hex"; - case S_STRING: - return "string"; - case S_UNKNOWN: - return "unknown"; - case S_OTHER: - break; - } - return "???"; + switch (type) + { + case S_BOOLEAN: + return "boolean"; + case S_TRISTATE: + return "tristate"; + case S_INT: + return "integer"; + case S_HEX: + return "hex"; + case S_STRING: + return "string"; + case S_UNKNOWN: + return "unknown"; + case S_OTHER: + break; + } + return "???"; } struct property *sym_get_choice_prop(struct symbol *sym) { - struct property *prop; + struct property *prop; - for_all_choices(sym, prop) - return prop; - return NULL; + for_all_choices(sym, prop) return prop; + return NULL; } struct property *sym_get_env_prop(struct symbol *sym) { - struct property *prop; + struct property *prop; - for_all_properties(sym, prop, P_ENV) - return prop; - return NULL; + for_all_properties(sym, prop, P_ENV) return prop; + return NULL; } static struct property *sym_get_default_prop(struct symbol *sym) { - struct property *prop; - - for_all_defaults(sym, prop) { - prop->visible.tri = expr_calc_value(prop->visible.expr); - if (prop->visible.tri != no) - return prop; - } - return NULL; + struct property *prop; + + for_all_defaults(sym, prop) + { + prop->visible.tri = expr_calc_value(prop->visible.expr); + if ( prop->visible.tri != no ) + return prop; + } + return NULL; } static struct property *sym_get_range_prop(struct symbol *sym) { - struct property *prop; - - for_all_properties(sym, prop, P_RANGE) { - prop->visible.tri = expr_calc_value(prop->visible.expr); - if (prop->visible.tri != no) - return prop; - } - return NULL; + struct property *prop; + + for_all_properties(sym, prop, P_RANGE) + { + prop->visible.tri = expr_calc_value(prop->visible.expr); + if ( prop->visible.tri != no ) + return prop; + } + return NULL; } static long long sym_get_range_val(struct symbol *sym, int base) { - sym_calc_value(sym); - switch (sym->type) { - case S_INT: - base = 10; - break; - case S_HEX: - base = 16; - break; - default: - break; - } - return strtoll(sym->curr.val, NULL, base); + sym_calc_value(sym); + switch (sym->type) + { + case S_INT: + base = 10; + break; + case S_HEX: + base = 16; + break; + default: + break; + } + return strtoll(sym->curr.val, NULL, base); } static void sym_validate_range(struct symbol *sym) { - struct property *prop; - int base; - long long val, val2; - char str[64]; - - switch (sym->type) { - case S_INT: - base = 10; - break; - case S_HEX: - base = 16; - break; - default: - return; - } - prop = sym_get_range_prop(sym); - if (!prop) - return; - val = strtoll(sym->curr.val, NULL, base); - val2 = sym_get_range_val(prop->expr->left.sym, base); - if (val >= val2) { - val2 = sym_get_range_val(prop->expr->right.sym, base); - if (val <= val2) - return; - } - if (sym->type == S_INT) - sprintf(str, "%lld", val2); - else - sprintf(str, "0x%llx", val2); - sym->curr.val = strdup(str); + struct property *prop; + int base; + long long val, val2; + char str[64]; + + switch (sym->type) + { + case S_INT: + base = 10; + break; + case S_HEX: + base = 16; + break; + default: + return; + } + prop = sym_get_range_prop(sym); + if ( !prop ) + return; + val = strtoll(sym->curr.val, NULL, base); + val2 = sym_get_range_val(prop->expr->left.sym, base); + if ( val >= val2 ) + { + val2 = sym_get_range_val(prop->expr->right.sym, base); + if ( val <= val2 ) + return; + } + if ( sym->type == S_INT ) + sprintf(str, "%lld", val2); + else + sprintf(str, "0x%llx", val2); + sym->curr.val = strdup(str); } static void sym_set_changed(struct symbol *sym) { - struct property *prop; - - sym->flags |= SYMBOL_CHANGED; - for (prop = sym->prop; prop; prop = prop->next) { - if (prop->menu) - prop->menu->flags |= MENU_CHANGED; - } + struct property *prop; + + sym->flags |= SYMBOL_CHANGED; + for ( prop = sym->prop; prop; prop = prop->next ) + { + if ( prop->menu ) + prop->menu->flags |= MENU_CHANGED; + } } static void sym_set_all_changed(void) { - struct symbol *sym; - int i; + struct symbol *sym; + int i; - for_all_symbols(i, sym) - sym_set_changed(sym); + for_all_symbols(i, sym) sym_set_changed(sym); } static void sym_calc_visibility(struct symbol *sym) { - struct property *prop; - tristate tri; - - /* any prompt visible? */ - tri = no; - for_all_prompts(sym, prop) { - prop->visible.tri = expr_calc_value(prop->visible.expr); - tri = EXPR_OR(tri, prop->visible.tri); - } - if (tri == mod && (sym->type != S_TRISTATE || modules_val == no)) - tri = yes; - if (sym->visible != tri) { - sym->visible = tri; - sym_set_changed(sym); - } - if (sym_is_choice_value(sym)) - return; - /* defaulting to "yes" if no explicit "depends on" are given */ - tri = yes; - if (sym->dir_dep.expr) - tri = expr_calc_value(sym->dir_dep.expr); - if (tri == mod) - tri = yes; - if (sym->dir_dep.tri != tri) { - sym->dir_dep.tri = tri; - sym_set_changed(sym); - } - tri = no; - if (sym->rev_dep.expr) - tri = expr_calc_value(sym->rev_dep.expr); - if (tri == mod && sym_get_type(sym) == S_BOOLEAN) - tri = yes; - if (sym->rev_dep.tri != tri) { - sym->rev_dep.tri = tri; - sym_set_changed(sym); - } + struct property *prop; + tristate tri; + + /* any prompt visible? */ + tri = no; + for_all_prompts(sym, prop) + { + prop->visible.tri = expr_calc_value(prop->visible.expr); + tri = EXPR_OR(tri, prop->visible.tri); + } + if ( tri == mod && (sym->type != S_TRISTATE || modules_val == no) ) + tri = yes; + if ( sym->visible != tri ) + { + sym->visible = tri; + sym_set_changed(sym); + } + if ( sym_is_choice_value(sym) ) + return; + /* defaulting to "yes" if no explicit "depends on" are given */ + tri = yes; + if ( sym->dir_dep.expr ) + tri = expr_calc_value(sym->dir_dep.expr); + if ( tri == mod ) + tri = yes; + if ( sym->dir_dep.tri != tri ) + { + sym->dir_dep.tri = tri; + sym_set_changed(sym); + } + tri = no; + if ( sym->rev_dep.expr ) + tri = expr_calc_value(sym->rev_dep.expr); + if ( tri == mod && sym_get_type(sym) == S_BOOLEAN ) + tri = yes; + if ( sym->rev_dep.tri != tri ) + { + sym->rev_dep.tri = tri; + sym_set_changed(sym); + } } /* @@ -254,432 +269,473 @@ static void sym_calc_visibility(struct symbol *sym) */ struct symbol *sym_choice_default(struct symbol *sym) { - struct symbol *def_sym; - struct property *prop; - struct expr *e; - - /* any of the defaults visible? */ - for_all_defaults(sym, prop) { - prop->visible.tri = expr_calc_value(prop->visible.expr); - if (prop->visible.tri == no) - continue; - def_sym = prop_get_symbol(prop); - if (def_sym->visible != no) - return def_sym; - } - - /* just get the first visible value */ - prop = sym_get_choice_prop(sym); - expr_list_for_each_sym(prop->expr, e, def_sym) - if (def_sym->visible != no) - return def_sym; - - /* failed to locate any defaults */ - return NULL; + struct symbol *def_sym; + struct property *prop; + struct expr *e; + + /* any of the defaults visible? */ + for_all_defaults(sym, prop) + { + prop->visible.tri = expr_calc_value(prop->visible.expr); + if ( prop->visible.tri == no ) + continue; + def_sym = prop_get_symbol(prop); + if ( def_sym->visible != no ) + return def_sym; + } + + /* just get the first visible value */ + prop = sym_get_choice_prop(sym); + expr_list_for_each_sym(prop->expr, e, def_sym) if ( def_sym->visible != + no ) return def_sym; + + /* failed to locate any defaults */ + return NULL; } static struct symbol *sym_calc_choice(struct symbol *sym) { - struct symbol *def_sym; - struct property *prop; - struct expr *e; - int flags; - - /* first calculate all choice values' visibilities */ - flags = sym->flags; - prop = sym_get_choice_prop(sym); - expr_list_for_each_sym(prop->expr, e, def_sym) { - sym_calc_visibility(def_sym); - if (def_sym->visible != no) - flags &= def_sym->flags; - } - - sym->flags &= flags | ~SYMBOL_DEF_USER; - - /* is the user choice visible? */ - def_sym = sym->def[S_DEF_USER].val; - if (def_sym && def_sym->visible != no) - return def_sym; - - def_sym = sym_choice_default(sym); - - if (def_sym == NULL) - /* no choice? reset tristate value */ - sym->curr.tri = no; - - return def_sym; + struct symbol *def_sym; + struct property *prop; + struct expr *e; + int flags; + + /* first calculate all choice values' visibilities */ + flags = sym->flags; + prop = sym_get_choice_prop(sym); + expr_list_for_each_sym(prop->expr, e, def_sym) + { + sym_calc_visibility(def_sym); + if ( def_sym->visible != no ) + flags &= def_sym->flags; + } + + sym->flags &= flags | ~SYMBOL_DEF_USER; + + /* is the user choice visible? */ + def_sym = sym->def[S_DEF_USER].val; + if ( def_sym && def_sym->visible != no ) + return def_sym; + + def_sym = sym_choice_default(sym); + + if ( def_sym == NULL ) + /* no choice? reset tristate value */ + sym->curr.tri = no; + + return def_sym; } void sym_calc_value(struct symbol *sym) { - struct symbol_value newval, oldval; - struct property *prop; - struct expr *e; - - if (!sym) - return; - - if (sym->flags & SYMBOL_VALID) - return; - - if (sym_is_choice_value(sym) && - sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES) { - sym->flags &= ~SYMBOL_NEED_SET_CHOICE_VALUES; - prop = sym_get_choice_prop(sym); - sym_calc_value(prop_get_symbol(prop)); - } - - sym->flags |= SYMBOL_VALID; - - oldval = sym->curr; - - switch (sym->type) { - case S_INT: - case S_HEX: - case S_STRING: - newval = symbol_empty.curr; - break; - case S_BOOLEAN: - case S_TRISTATE: - newval = symbol_no.curr; - break; - default: - sym->curr.val = sym->name; - sym->curr.tri = no; - return; - } - if (!sym_is_choice_value(sym)) - sym->flags &= ~SYMBOL_WRITE; - - sym_calc_visibility(sym); - - /* set default if recursively called */ - sym->curr = newval; - - switch (sym_get_type(sym)) { - case S_BOOLEAN: - case S_TRISTATE: - if (sym_is_choice_value(sym) && sym->visible == yes) { - prop = sym_get_choice_prop(sym); - newval.tri = (prop_get_symbol(prop)->curr.val == sym) ? yes : no; - } else { - if (sym->visible != no) { - /* if the symbol is visible use the user value - * if available, otherwise try the default value - */ - sym->flags |= SYMBOL_WRITE; - if (sym_has_value(sym)) { - newval.tri = EXPR_AND(sym->def[S_DEF_USER].tri, - sym->visible); - goto calc_newval; - } - } - if (sym->rev_dep.tri != no) - sym->flags |= SYMBOL_WRITE; - if (!sym_is_choice(sym)) { - prop = sym_get_default_prop(sym); - if (prop) { - sym->flags |= SYMBOL_WRITE; - newval.tri = EXPR_AND(expr_calc_value(prop->expr), - prop->visible.tri); - } - } - calc_newval: - if (sym->dir_dep.tri == no && sym->rev_dep.tri != no) { - struct expr *e; - e = expr_simplify_unmet_dep(sym->rev_dep.expr, - sym->dir_dep.expr); - fprintf(stderr, "warning: ("); - expr_fprint(e, stderr); - fprintf(stderr, ") selects %s which has unmet direct dependencies (", - sym->name); - expr_fprint(sym->dir_dep.expr, stderr); - fprintf(stderr, ")\n"); - expr_free(e); - } - newval.tri = EXPR_OR(newval.tri, sym->rev_dep.tri); - } - if (newval.tri == mod && sym_get_type(sym) == S_BOOLEAN) - newval.tri = yes; - break; - case S_STRING: - case S_HEX: - case S_INT: - if (sym->visible != no) { - sym->flags |= SYMBOL_WRITE; - if (sym_has_value(sym)) { - newval.val = sym->def[S_DEF_USER].val; - break; - } - } - prop = sym_get_default_prop(sym); - if (prop) { - struct symbol *ds = prop_get_symbol(prop); - if (ds) { - sym->flags |= SYMBOL_WRITE; - sym_calc_value(ds); - newval.val = ds->curr.val; - } - } - break; - default: - ; - } - - sym->curr = newval; - if (sym_is_choice(sym) && newval.tri == yes) - sym->curr.val = sym_calc_choice(sym); - sym_validate_range(sym); - - if (memcmp(&oldval, &sym->curr, sizeof(oldval))) { - sym_set_changed(sym); - if (modules_sym == sym) { - sym_set_all_changed(); - modules_val = modules_sym->curr.tri; - } - } - - if (sym_is_choice(sym)) { - struct symbol *choice_sym; - - prop = sym_get_choice_prop(sym); - expr_list_for_each_sym(prop->expr, e, choice_sym) { - if ((sym->flags & SYMBOL_WRITE) && - choice_sym->visible != no) - choice_sym->flags |= SYMBOL_WRITE; - if (sym->flags & SYMBOL_CHANGED) - sym_set_changed(choice_sym); - } - } - - if (sym->flags & SYMBOL_AUTO) - sym->flags &= ~SYMBOL_WRITE; - - if (sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES) - set_all_choice_values(sym); + struct symbol_value newval, oldval; + struct property *prop; + struct expr *e; + + if ( !sym ) + return; + + if ( sym->flags & SYMBOL_VALID ) + return; + + if ( sym_is_choice_value(sym) && + sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES ) + { + sym->flags &= ~SYMBOL_NEED_SET_CHOICE_VALUES; + prop = sym_get_choice_prop(sym); + sym_calc_value(prop_get_symbol(prop)); + } + + sym->flags |= SYMBOL_VALID; + + oldval = sym->curr; + + switch (sym->type) + { + case S_INT: + case S_HEX: + case S_STRING: + newval = symbol_empty.curr; + break; + case S_BOOLEAN: + case S_TRISTATE: + newval = symbol_no.curr; + break; + default: + sym->curr.val = sym->name; + sym->curr.tri = no; + return; + } + if ( !sym_is_choice_value(sym) ) + sym->flags &= ~SYMBOL_WRITE; + + sym_calc_visibility(sym); + + /* set default if recursively called */ + sym->curr = newval; + + switch (sym_get_type(sym)) + { + case S_BOOLEAN: + case S_TRISTATE: + if ( sym_is_choice_value(sym) && sym->visible == yes ) + { + prop = sym_get_choice_prop(sym); + newval.tri = (prop_get_symbol(prop)->curr.val == sym) ? yes : no; + } + else + { + if ( sym->visible != no ) + { + /* if the symbol is visible use the user value + * if available, otherwise try the default value + */ + sym->flags |= SYMBOL_WRITE; + if ( sym_has_value(sym) ) + { + newval.tri = + EXPR_AND(sym->def[S_DEF_USER].tri, sym->visible); + goto calc_newval; + } + } + if ( sym->rev_dep.tri != no ) + sym->flags |= SYMBOL_WRITE; + if ( !sym_is_choice(sym) ) + { + prop = sym_get_default_prop(sym); + if ( prop ) + { + sym->flags |= SYMBOL_WRITE; + newval.tri = EXPR_AND(expr_calc_value(prop->expr), + prop->visible.tri); + } + } + calc_newval: + if ( sym->dir_dep.tri == no && sym->rev_dep.tri != no ) + { + struct expr *e; + e = expr_simplify_unmet_dep(sym->rev_dep.expr, + sym->dir_dep.expr); + fprintf(stderr, "warning: ("); + expr_fprint(e, stderr); + fprintf(stderr, + ") selects %s which has unmet direct dependencies (", + sym->name); + expr_fprint(sym->dir_dep.expr, stderr); + fprintf(stderr, ")\n"); + expr_free(e); + } + newval.tri = EXPR_OR(newval.tri, sym->rev_dep.tri); + } + if ( newval.tri == mod && sym_get_type(sym) == S_BOOLEAN ) + newval.tri = yes; + break; + case S_STRING: + case S_HEX: + case S_INT: + if ( sym->visible != no ) + { + sym->flags |= SYMBOL_WRITE; + if ( sym_has_value(sym) ) + { + newval.val = sym->def[S_DEF_USER].val; + break; + } + } + prop = sym_get_default_prop(sym); + if ( prop ) + { + struct symbol *ds = prop_get_symbol(prop); + if ( ds ) + { + sym->flags |= SYMBOL_WRITE; + sym_calc_value(ds); + newval.val = ds->curr.val; + } + } + break; + default:; + } + + sym->curr = newval; + if ( sym_is_choice(sym) && newval.tri == yes ) + sym->curr.val = sym_calc_choice(sym); + sym_validate_range(sym); + + if ( memcmp(&oldval, &sym->curr, sizeof(oldval)) ) + { + sym_set_changed(sym); + if ( modules_sym == sym ) + { + sym_set_all_changed(); + modules_val = modules_sym->curr.tri; + } + } + + if ( sym_is_choice(sym) ) + { + struct symbol *choice_sym; + + prop = sym_get_choice_prop(sym); + expr_list_for_each_sym(prop->expr, e, choice_sym) + { + if ( (sym->flags & SYMBOL_WRITE) && choice_sym->visible != no ) + choice_sym->flags |= SYMBOL_WRITE; + if ( sym->flags & SYMBOL_CHANGED ) + sym_set_changed(choice_sym); + } + } + + if ( sym->flags & SYMBOL_AUTO ) + sym->flags &= ~SYMBOL_WRITE; + + if ( sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES ) + set_all_choice_values(sym); } void sym_clear_all_valid(void) { - struct symbol *sym; - int i; + struct symbol *sym; + int i; - for_all_symbols(i, sym) - sym->flags &= ~SYMBOL_VALID; - sym_add_change_count(1); - sym_calc_value(modules_sym); + for_all_symbols(i, sym) sym->flags &= ~SYMBOL_VALID; + sym_add_change_count(1); + sym_calc_value(modules_sym); } bool sym_tristate_within_range(struct symbol *sym, tristate val) { - int type = sym_get_type(sym); + int type = sym_get_type(sym); - if (sym->visible == no) - return false; + if ( sym->visible == no ) + return false; - if (type != S_BOOLEAN && type != S_TRISTATE) - return false; + if ( type != S_BOOLEAN && type != S_TRISTATE ) + return false; - if (type == S_BOOLEAN && val == mod) - return false; - if (sym->visible <= sym->rev_dep.tri) - return false; - if (sym_is_choice_value(sym) && sym->visible == yes) - return val == yes; - return val >= sym->rev_dep.tri && val <= sym->visible; + if ( type == S_BOOLEAN && val == mod ) + return false; + if ( sym->visible <= sym->rev_dep.tri ) + return false; + if ( sym_is_choice_value(sym) && sym->visible == yes ) + return val == yes; + return val >= sym->rev_dep.tri && val <= sym->visible; } bool sym_set_tristate_value(struct symbol *sym, tristate val) { - tristate oldval = sym_get_tristate_value(sym); - - if (oldval != val && !sym_tristate_within_range(sym, val)) - return false; - - if (!(sym->flags & SYMBOL_DEF_USER)) { - sym->flags |= SYMBOL_DEF_USER; - sym_set_changed(sym); - } - /* - * setting a choice value also resets the new flag of the choice - * symbol and all other choice values. - */ - if (sym_is_choice_value(sym) && val == yes) { - struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym)); - struct property *prop; - struct expr *e; - - cs->def[S_DEF_USER].val = sym; - cs->flags |= SYMBOL_DEF_USER; - prop = sym_get_choice_prop(cs); - for (e = prop->expr; e; e = e->left.expr) { - if (e->right.sym->visible != no) - e->right.sym->flags |= SYMBOL_DEF_USER; - } - } - - sym->def[S_DEF_USER].tri = val; - if (oldval != val) - sym_clear_all_valid(); - - return true; + tristate oldval = sym_get_tristate_value(sym); + + if ( oldval != val && !sym_tristate_within_range(sym, val) ) + return false; + + if ( !(sym->flags & SYMBOL_DEF_USER) ) + { + sym->flags |= SYMBOL_DEF_USER; + sym_set_changed(sym); + } + /* + * setting a choice value also resets the new flag of the choice + * symbol and all other choice values. + */ + if ( sym_is_choice_value(sym) && val == yes ) + { + struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym)); + struct property *prop; + struct expr *e; + + cs->def[S_DEF_USER].val = sym; + cs->flags |= SYMBOL_DEF_USER; + prop = sym_get_choice_prop(cs); + for ( e = prop->expr; e; e = e->left.expr ) + { + if ( e->right.sym->visible != no ) + e->right.sym->flags |= SYMBOL_DEF_USER; + } + } + + sym->def[S_DEF_USER].tri = val; + if ( oldval != val ) + sym_clear_all_valid(); + + return true; } tristate sym_toggle_tristate_value(struct symbol *sym) { - tristate oldval, newval; - - oldval = newval = sym_get_tristate_value(sym); - do { - switch (newval) { - case no: - newval = mod; - break; - case mod: - newval = yes; - break; - case yes: - newval = no; - break; - } - if (sym_set_tristate_value(sym, newval)) - break; - } while (oldval != newval); - return newval; + tristate oldval, newval; + + oldval = newval = sym_get_tristate_value(sym); + do { + switch (newval) + { + case no: + newval = mod; + break; + case mod: + newval = yes; + break; + case yes: + newval = no; + break; + } + if ( sym_set_tristate_value(sym, newval) ) + break; + } while ( oldval != newval ); + return newval; } bool sym_string_valid(struct symbol *sym, const char *str) { - signed char ch; - - switch (sym->type) { - case S_STRING: - return true; - case S_INT: - ch = *str++; - if (ch == '-') - ch = *str++; - if (!isdigit(ch)) - return false; - if (ch == '0' && *str != 0) - return false; - while ((ch = *str++)) { - if (!isdigit(ch)) - return false; - } - return true; - case S_HEX: - if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) - str += 2; - ch = *str++; - do { - if (!isxdigit(ch)) - return false; - } while ((ch = *str++)); - return true; - case S_BOOLEAN: - case S_TRISTATE: - switch (str[0]) { - case 'y': case 'Y': - case 'm': case 'M': - case 'n': case 'N': - return true; - } - return false; - default: - return false; - } + signed char ch; + + switch (sym->type) + { + case S_STRING: + return true; + case S_INT: + ch = *str++; + if ( ch == '-' ) + ch = *str++; + if ( !isdigit(ch) ) + return false; + if ( ch == '0' && *str != 0 ) + return false; + while ( (ch = *str++) ) + { + if ( !isdigit(ch) ) + return false; + } + return true; + case S_HEX: + if ( str[0] == '0' && (str[1] == 'x' || str[1] == 'X') ) + str += 2; + ch = *str++; + do { + if ( !isxdigit(ch) ) + return false; + } while ( (ch = *str++) ); + return true; + case S_BOOLEAN: + case S_TRISTATE: + switch (str[0]) + { + case 'y': + case 'Y': + case 'm': + case 'M': + case 'n': + case 'N': + return true; + } + return false; + default: + return false; + } } bool sym_string_within_range(struct symbol *sym, const char *str) { - struct property *prop; - long long val; - - switch (sym->type) { - case S_STRING: - return sym_string_valid(sym, str); - case S_INT: - if (!sym_string_valid(sym, str)) - return false; - prop = sym_get_range_prop(sym); - if (!prop) - return true; - val = strtoll(str, NULL, 10); - return val >= sym_get_range_val(prop->expr->left.sym, 10) && - val <= sym_get_range_val(prop->expr->right.sym, 10); - case S_HEX: - if (!sym_string_valid(sym, str)) - return false; - prop = sym_get_range_prop(sym); - if (!prop) - return true; - val = strtoll(str, NULL, 16); - return val >= sym_get_range_val(prop->expr->left.sym, 16) && - val <= sym_get_range_val(prop->expr->right.sym, 16); - case S_BOOLEAN: - case S_TRISTATE: - switch (str[0]) { - case 'y': case 'Y': - return sym_tristate_within_range(sym, yes); - case 'm': case 'M': - return sym_tristate_within_range(sym, mod); - case 'n': case 'N': - return sym_tristate_within_range(sym, no); - } - return false; - default: - return false; - } + struct property *prop; + long long val; + + switch (sym->type) + { + case S_STRING: + return sym_string_valid(sym, str); + case S_INT: + if ( !sym_string_valid(sym, str) ) + return false; + prop = sym_get_range_prop(sym); + if ( !prop ) + return true; + val = strtoll(str, NULL, 10); + return val >= sym_get_range_val(prop->expr->left.sym, 10) && + val <= sym_get_range_val(prop->expr->right.sym, 10); + case S_HEX: + if ( !sym_string_valid(sym, str) ) + return false; + prop = sym_get_range_prop(sym); + if ( !prop ) + return true; + val = strtoll(str, NULL, 16); + return val >= sym_get_range_val(prop->expr->left.sym, 16) && + val <= sym_get_range_val(prop->expr->right.sym, 16); + case S_BOOLEAN: + case S_TRISTATE: + switch (str[0]) + { + case 'y': + case 'Y': + return sym_tristate_within_range(sym, yes); + case 'm': + case 'M': + return sym_tristate_within_range(sym, mod); + case 'n': + case 'N': + return sym_tristate_within_range(sym, no); + } + return false; + default: + return false; + } } bool sym_set_string_value(struct symbol *sym, const char *newval) { - const char *oldval; - char *val; - int size; - - switch (sym->type) { - case S_BOOLEAN: - case S_TRISTATE: - switch (newval[0]) { - case 'y': case 'Y': - return sym_set_tristate_value(sym, yes); - case 'm': case 'M': - return sym_set_tristate_value(sym, mod); - case 'n': case 'N': - return sym_set_tristate_value(sym, no); - } - return false; - default: - ; - } - - if (!sym_string_within_range(sym, newval)) - return false; - - if (!(sym->flags & SYMBOL_DEF_USER)) { - sym->flags |= SYMBOL_DEF_USER; - sym_set_changed(sym); - } - - oldval = sym->def[S_DEF_USER].val; - size = strlen(newval) + 1; - if (sym->type == S_HEX && (newval[0] != '0' || (newval[1] != 'x' && newval[1] != 'X'))) { - size += 2; - sym->def[S_DEF_USER].val = val = xmalloc(size); - *val++ = '0'; - *val++ = 'x'; - } else if (!oldval || strcmp(oldval, newval)) - sym->def[S_DEF_USER].val = val = xmalloc(size); - else - return true; - - strcpy(val, newval); - free((void *)oldval); - sym_clear_all_valid(); - - return true; + const char *oldval; + char *val; + int size; + + switch (sym->type) + { + case S_BOOLEAN: + case S_TRISTATE: + switch (newval[0]) + { + case 'y': + case 'Y': + return sym_set_tristate_value(sym, yes); + case 'm': + case 'M': + return sym_set_tristate_value(sym, mod); + case 'n': + case 'N': + return sym_set_tristate_value(sym, no); + } + return false; + default:; + } + + if ( !sym_string_within_range(sym, newval) ) + return false; + + if ( !(sym->flags & SYMBOL_DEF_USER) ) + { + sym->flags |= SYMBOL_DEF_USER; + sym_set_changed(sym); + } + + oldval = sym->def[S_DEF_USER].val; + size = strlen(newval) + 1; + if ( sym->type == S_HEX && + (newval[0] != '0' || (newval[1] != 'x' && newval[1] != 'X')) ) + { + size += 2; + sym->def[S_DEF_USER].val = val = xmalloc(size); + *val++ = '0'; + *val++ = 'x'; + } + else if ( !oldval || strcmp(oldval, newval) ) + sym->def[S_DEF_USER].val = val = xmalloc(size); + else + return true; + + strcpy(val, newval); + free((void *)oldval); + sym_clear_all_valid(); + + return true; } /* @@ -691,175 +747,197 @@ bool sym_set_string_value(struct symbol *sym, const char *newval) */ const char *sym_get_string_default(struct symbol *sym) { - struct property *prop; - struct symbol *ds; - const char *str; - tristate val; - - sym_calc_visibility(sym); - sym_calc_value(modules_sym); - val = symbol_no.curr.tri; - str = symbol_empty.curr.val; - - /* If symbol has a default value look it up */ - prop = sym_get_default_prop(sym); - if (prop != NULL) { - switch (sym->type) { - case S_BOOLEAN: - case S_TRISTATE: - /* The visibility may limit the value from yes => mod */ - val = EXPR_AND(expr_calc_value(prop->expr), prop->visible.tri); - break; - default: - /* - * The following fails to handle the situation - * where a default value is further limited by - * the valid range. - */ - ds = prop_get_symbol(prop); - if (ds != NULL) { - sym_calc_value(ds); - str = (const char *)ds->curr.val; - } - } - } - - /* Handle select statements */ - val = EXPR_OR(val, sym->rev_dep.tri); - - /* transpose mod to yes if modules are not enabled */ - if (val == mod) - if (!sym_is_choice_value(sym) && modules_sym->curr.tri == no) - val = yes; - - /* transpose mod to yes if type is bool */ - if (sym->type == S_BOOLEAN && val == mod) - val = yes; - - switch (sym->type) { - case S_BOOLEAN: - case S_TRISTATE: - switch (val) { - case no: return "n"; - case mod: return "m"; - case yes: return "y"; - } - case S_INT: - case S_HEX: - return str; - case S_STRING: - return str; - case S_OTHER: - case S_UNKNOWN: - break; - } - return ""; + struct property *prop; + struct symbol *ds; + const char *str; + tristate val; + + sym_calc_visibility(sym); + sym_calc_value(modules_sym); + val = symbol_no.curr.tri; + str = symbol_empty.curr.val; + + /* If symbol has a default value look it up */ + prop = sym_get_default_prop(sym); + if ( prop != NULL ) + { + switch (sym->type) + { + case S_BOOLEAN: + case S_TRISTATE: + /* The visibility may limit the value from yes => mod */ + val = EXPR_AND(expr_calc_value(prop->expr), prop->visible.tri); + break; + default: + /* + * The following fails to handle the situation + * where a default value is further limited by + * the valid range. + */ + ds = prop_get_symbol(prop); + if ( ds != NULL ) + { + sym_calc_value(ds); + str = (const char *)ds->curr.val; + } + } + } + + /* Handle select statements */ + val = EXPR_OR(val, sym->rev_dep.tri); + + /* transpose mod to yes if modules are not enabled */ + if ( val == mod ) + if ( !sym_is_choice_value(sym) && modules_sym->curr.tri == no ) + val = yes; + + /* transpose mod to yes if type is bool */ + if ( sym->type == S_BOOLEAN && val == mod ) + val = yes; + + switch (sym->type) + { + case S_BOOLEAN: + case S_TRISTATE: + switch (val) + { + case no: + return "n"; + case mod: + return "m"; + case yes: + return "y"; + } + case S_INT: + case S_HEX: + return str; + case S_STRING: + return str; + case S_OTHER: + case S_UNKNOWN: + break; + } + return ""; } const char *sym_get_string_value(struct symbol *sym) { - tristate val; - - switch (sym->type) { - case S_BOOLEAN: - case S_TRISTATE: - val = sym_get_tristate_value(sym); - switch (val) { - case no: - return "n"; - case mod: - sym_calc_value(modules_sym); - return (modules_sym->curr.tri == no) ? "n" : "m"; - case yes: - return "y"; - } - break; - default: - ; - } - return (const char *)sym->curr.val; + tristate val; + + switch (sym->type) + { + case S_BOOLEAN: + case S_TRISTATE: + val = sym_get_tristate_value(sym); + switch (val) + { + case no: + return "n"; + case mod: + sym_calc_value(modules_sym); + return (modules_sym->curr.tri == no) ? "n" : "m"; + case yes: + return "y"; + } + break; + default:; + } + return (const char *)sym->curr.val; } bool sym_is_changable(struct symbol *sym) { - return sym->visible > sym->rev_dep.tri; + return sym->visible > sym->rev_dep.tri; } static unsigned strhash(const char *s) { - /* fnv32 hash */ - unsigned hash = 2166136261U; - for (; *s; s++) - hash = (hash ^ *s) * 0x01000193; - return hash; + /* fnv32 hash */ + unsigned hash = 2166136261U; + for ( ; *s; s++ ) + hash = (hash ^ *s) * 0x01000193; + return hash; } struct symbol *sym_lookup(const char *name, int flags) { - struct symbol *symbol; - char *new_name; - int hash; - - if (name) { - if (name[0] && !name[1]) { - switch (name[0]) { - case 'y': return &symbol_yes; - case 'm': return &symbol_mod; - case 'n': return &symbol_no; - } - } - hash = strhash(name) % SYMBOL_HASHSIZE; - - for (symbol = symbol_hash[hash]; symbol; symbol = symbol->next) { - if (symbol->name && - !strcmp(symbol->name, name) && - (flags ? symbol->flags & flags - : !(symbol->flags & (SYMBOL_CONST|SYMBOL_CHOICE)))) - return symbol; - } - new_name = strdup(name); - } else { - new_name = NULL; - hash = 0; - } - - symbol = xmalloc(sizeof(*symbol)); - memset(symbol, 0, sizeof(*symbol)); - symbol->name = new_name; - symbol->type = S_UNKNOWN; - symbol->flags |= flags; - - symbol->next = symbol_hash[hash]; - symbol_hash[hash] = symbol; - - return symbol; + struct symbol *symbol; + char *new_name; + int hash; + + if ( name ) + { + if ( name[0] && !name[1] ) + { + switch (name[0]) + { + case 'y': + return &symbol_yes; + case 'm': + return &symbol_mod; + case 'n': + return &symbol_no; + } + } + hash = strhash(name) % SYMBOL_HASHSIZE; + + for ( symbol = symbol_hash[hash]; symbol; symbol = symbol->next ) + { + if ( symbol->name && !strcmp(symbol->name, name) && + (flags ? symbol->flags & flags + : !(symbol->flags & (SYMBOL_CONST | SYMBOL_CHOICE))) ) + return symbol; + } + new_name = strdup(name); + } + else + { + new_name = NULL; + hash = 0; + } + + symbol = xmalloc(sizeof(*symbol)); + memset(symbol, 0, sizeof(*symbol)); + symbol->name = new_name; + symbol->type = S_UNKNOWN; + symbol->flags |= flags; + + symbol->next = symbol_hash[hash]; + symbol_hash[hash] = symbol; + + return symbol; } struct symbol *sym_find(const char *name) { - struct symbol *symbol = NULL; - int hash = 0; - - if (!name) - return NULL; - - if (name[0] && !name[1]) { - switch (name[0]) { - case 'y': return &symbol_yes; - case 'm': return &symbol_mod; - case 'n': return &symbol_no; - } - } - hash = strhash(name) % SYMBOL_HASHSIZE; - - for (symbol = symbol_hash[hash]; symbol; symbol = symbol->next) { - if (symbol->name && - !strcmp(symbol->name, name) && - !(symbol->flags & SYMBOL_CONST)) - break; - } - - return symbol; + struct symbol *symbol = NULL; + int hash = 0; + + if ( !name ) + return NULL; + + if ( name[0] && !name[1] ) + { + switch (name[0]) + { + case 'y': + return &symbol_yes; + case 'm': + return &symbol_mod; + case 'n': + return &symbol_no; + } + } + hash = strhash(name) % SYMBOL_HASHSIZE; + + for ( symbol = symbol_hash[hash]; symbol; symbol = symbol->next ) + { + if ( symbol->name && !strcmp(symbol->name, name) && + !(symbol->flags & SYMBOL_CONST) ) + break; + } + + return symbol; } /* @@ -869,94 +947,100 @@ struct symbol *sym_find(const char *name) */ const char *sym_expand_string_value(const char *in) { - const char *src; - char *res; - size_t reslen; - - reslen = strlen(in) + 1; - res = xmalloc(reslen); - res[0] = '\0'; - - while ((src = strchr(in, '$'))) { - char *p, name[SYMBOL_MAXLENGTH]; - const char *symval = ""; - struct symbol *sym; - size_t newlen; - - strncat(res, in, src - in); - src++; - - p = name; - while (isalnum(*src) || *src == '_') - *p++ = *src++; - *p = '\0'; - - sym = sym_find(name); - if (sym != NULL) { - sym_calc_value(sym); - symval = sym_get_string_value(sym); - } - - newlen = strlen(res) + strlen(symval) + strlen(src) + 1; - if (newlen > reslen) { - reslen = newlen; - res = realloc(res, reslen); - } - - strcat(res, symval); - in = src; - } - strcat(res, in); - - return res; + const char *src; + char *res; + size_t reslen; + + reslen = strlen(in) + 1; + res = xmalloc(reslen); + res[0] = '\0'; + + while ( (src = strchr(in, '$')) ) + { + char *p, name[SYMBOL_MAXLENGTH]; + const char *symval = ""; + struct symbol *sym; + size_t newlen; + + strncat(res, in, src - in); + src++; + + p = name; + while ( isalnum(*src) || *src == '_' ) + *p++ = *src++; + *p = '\0'; + + sym = sym_find(name); + if ( sym != NULL ) + { + sym_calc_value(sym); + symval = sym_get_string_value(sym); + } + + newlen = strlen(res) + strlen(symval) + strlen(src) + 1; + if ( newlen > reslen ) + { + reslen = newlen; + res = realloc(res, reslen); + } + + strcat(res, symval); + in = src; + } + strcat(res, in); + + return res; } const char *sym_escape_string_value(const char *in) { - const char *p; - size_t reslen; - char *res; - size_t l; + const char *p; + size_t reslen; + char *res; + size_t l; - reslen = strlen(in) + strlen("\"\"") + 1; + reslen = strlen(in) + strlen("\"\"") + 1; - p = in; - for (;;) { - l = strcspn(p, "\"\\"); - p += l; + p = in; + for ( ;; ) + { + l = strcspn(p, "\"\\"); + p += l; - if (p[0] == '\0') - break; + if ( p[0] == '\0' ) + break; - reslen++; - p++; - } + reslen++; + p++; + } - res = xmalloc(reslen); - res[0] = '\0'; + res = xmalloc(reslen); + res[0] = '\0'; - strcat(res, "\""); + strcat(res, "\""); - p = in; - for (;;) { - l = strcspn(p, "\"\\"); - strncat(res, p, l); - p += l; + p = in; + for ( ;; ) + { + l = strcspn(p, "\"\\"); + strncat(res, p, l); + p += l; - if (p[0] == '\0') - break; + if ( p[0] == '\0' ) + break; - strcat(res, "\\"); - strncat(res, p++, 1); - } + strcat(res, "\\"); + strncat(res, p++, 1); + } - strcat(res, "\""); - return res; + strcat(res, "\""); + return res; } -struct sym_match { - struct symbol *sym; - off_t so, eo; +struct sym_match +{ + struct symbol *sym; + off_t so, eo; }; /* Compare matched symbols as thus: @@ -965,81 +1049,84 @@ struct sym_match { */ static int sym_rel_comp(const void *sym1, const void *sym2) { - const struct sym_match *s1 = sym1; - const struct sym_match *s2 = sym2; - int exact1, exact2; - - /* Exact match: - * - if matched length on symbol s1 is the length of that symbol, - * then this symbol should come first; - * - if matched length on symbol s2 is the length of that symbol, - * then this symbol should come first. - * Note: since the search can be a regexp, both symbols may match - * exactly; if this is the case, we can't decide which comes first, - * and we fallback to sorting alphabetically. - */ - exact1 = (s1->eo - s1->so) == strlen(s1->sym->name); - exact2 = (s2->eo - s2->so) == strlen(s2->sym->name); - if (exact1 && !exact2) - return -1; - if (!exact1 && exact2) - return 1; - - /* As a fallback, sort symbols alphabetically */ - return strcmp(s1->sym->name, s2->sym->name); + const struct sym_match *s1 = sym1; + const struct sym_match *s2 = sym2; + int exact1, exact2; + + /* Exact match: + * - if matched length on symbol s1 is the length of that symbol, + * then this symbol should come first; + * - if matched length on symbol s2 is the length of that symbol, + * then this symbol should come first. + * Note: since the search can be a regexp, both symbols may match + * exactly; if this is the case, we can't decide which comes first, + * and we fallback to sorting alphabetically. + */ + exact1 = (s1->eo - s1->so) == strlen(s1->sym->name); + exact2 = (s2->eo - s2->so) == strlen(s2->sym->name); + if ( exact1 && !exact2 ) + return -1; + if ( !exact1 && exact2 ) + return 1; + + /* As a fallback, sort symbols alphabetically */ + return strcmp(s1->sym->name, s2->sym->name); } struct symbol **sym_re_search(const char *pattern) { - struct symbol *sym, **sym_arr = NULL; - struct sym_match *sym_match_arr = NULL; - int i, cnt, size; - regex_t re; - regmatch_t match[1]; - - cnt = size = 0; - /* Skip if empty */ - if (strlen(pattern) == 0) - return NULL; - if (regcomp(&re, pattern, REG_EXTENDED|REG_ICASE)) - return NULL; - - for_all_symbols(i, sym) { - if (sym->flags & SYMBOL_CONST || !sym->name) - continue; - if (regexec(&re, sym->name, 1, match, 0)) - continue; - if (cnt >= size) { - void *tmp; - size += 16; - tmp = realloc(sym_match_arr, size * sizeof(struct sym_match)); - if (!tmp) - goto sym_re_search_free; - sym_match_arr = tmp; - } - sym_calc_value(sym); - /* As regexec returned 0, we know we have a match, so - * we can use match[0].rm_[se]o without further checks - */ - sym_match_arr[cnt].so = match[0].rm_so; - sym_match_arr[cnt].eo = match[0].rm_eo; - sym_match_arr[cnt++].sym = sym; - } - if (sym_match_arr) { - qsort(sym_match_arr, cnt, sizeof(struct sym_match), sym_rel_comp); - sym_arr = malloc((cnt+1) * sizeof(struct symbol)); - if (!sym_arr) - goto sym_re_search_free; - for (i = 0; i < cnt; i++) - sym_arr[i] = sym_match_arr[i].sym; - sym_arr[cnt] = NULL; - } + struct symbol *sym, **sym_arr = NULL; + struct sym_match *sym_match_arr = NULL; + int i, cnt, size; + regex_t re; + regmatch_t match[1]; + + cnt = size = 0; + /* Skip if empty */ + if ( strlen(pattern) == 0 ) + return NULL; + if ( regcomp(&re, pattern, REG_EXTENDED | REG_ICASE) ) + return NULL; + + for_all_symbols(i, sym) + { + if ( sym->flags & SYMBOL_CONST || !sym->name ) + continue; + if ( regexec(&re, sym->name, 1, match, 0) ) + continue; + if ( cnt >= size ) + { + void *tmp; + size += 16; + tmp = realloc(sym_match_arr, size * sizeof(struct sym_match)); + if ( !tmp ) + goto sym_re_search_free; + sym_match_arr = tmp; + } + sym_calc_value(sym); + /* As regexec returned 0, we know we have a match, so + * we can use match[0].rm_[se]o without further checks + */ + sym_match_arr[cnt].so = match[0].rm_so; + sym_match_arr[cnt].eo = match[0].rm_eo; + sym_match_arr[cnt++].sym = sym; + } + if ( sym_match_arr ) + { + qsort(sym_match_arr, cnt, sizeof(struct sym_match), sym_rel_comp); + sym_arr = malloc((cnt + 1) * sizeof(struct symbol)); + if ( !sym_arr ) + goto sym_re_search_free; + for ( i = 0; i < cnt; i++ ) + sym_arr[i] = sym_match_arr[i].sym; + sym_arr[cnt] = NULL; + } sym_re_search_free: - /* sym_match_arr can be NULL if no match, but free(NULL) is OK */ - free(sym_match_arr); - regfree(&re); + /* sym_match_arr can be NULL if no match, but free(NULL) is OK */ + free(sym_match_arr); + regfree(&re); - return sym_arr; + return sym_arr; } /* @@ -1048,28 +1135,29 @@ sym_re_search_free: * The entries are located on the call stack so no need to free memory. * Note insert() remove() must always match to properly clear the stack. */ -static struct dep_stack { - struct dep_stack *prev, *next; - struct symbol *sym; - struct property *prop; - struct expr *expr; -} *check_top; +static struct dep_stack +{ + struct dep_stack *prev, *next; + struct symbol *sym; + struct property *prop; + struct expr *expr; +} * check_top; static void dep_stack_insert(struct dep_stack *stack, struct symbol *sym) { - memset(stack, 0, sizeof(*stack)); - if (check_top) - check_top->next = stack; - stack->prev = check_top; - stack->sym = sym; - check_top = stack; + memset(stack, 0, sizeof(*stack)); + if ( check_top ) + check_top->next = stack; + stack->prev = check_top; + stack->sym = sym; + check_top = stack; } static void dep_stack_remove(void) { - check_top = check_top->prev; - if (check_top) - check_top->next = NULL; + check_top = check_top->prev; + if ( check_top ) + check_top->next = NULL; } /* @@ -1079,298 +1167,323 @@ static void dep_stack_remove(void) */ static void sym_check_print_recursive(struct symbol *last_sym) { - struct dep_stack *stack; - struct symbol *sym, *next_sym; - struct menu *menu = NULL; - struct property *prop; - struct dep_stack cv_stack; - - if (sym_is_choice_value(last_sym)) { - dep_stack_insert(&cv_stack, last_sym); - last_sym = prop_get_symbol(sym_get_choice_prop(last_sym)); - } - - for (stack = check_top; stack != NULL; stack = stack->prev) - if (stack->sym == last_sym) - break; - if (!stack) { - fprintf(stderr, "unexpected recursive dependency error\n"); - return; - } - - for (; stack; stack = stack->next) { - sym = stack->sym; - next_sym = stack->next ? stack->next->sym : last_sym; - prop = stack->prop; - if (prop == NULL) - prop = stack->sym->prop; - - /* for choice values find the menu entry (used below) */ - if (sym_is_choice(sym) || sym_is_choice_value(sym)) { - for (prop = sym->prop; prop; prop = prop->next) { - menu = prop->menu; - if (prop->menu) - break; - } - } - if (stack->sym == last_sym) - fprintf(stderr, "%s:%d:error: recursive dependency detected!\n", - prop->file->name, prop->lineno); - if (stack->expr) { - fprintf(stderr, "%s:%d:\tsymbol %s %s value contains %s\n", - prop->file->name, prop->lineno, - sym->name ? sym->name : "", - prop_get_type_name(prop->type), - next_sym->name ? next_sym->name : ""); - } else if (stack->prop) { - fprintf(stderr, "%s:%d:\tsymbol %s depends on %s\n", - prop->file->name, prop->lineno, - sym->name ? sym->name : "", - next_sym->name ? next_sym->name : ""); - } else if (sym_is_choice(sym)) { - fprintf(stderr, "%s:%d:\tchoice %s contains symbol %s\n", - menu->file->name, menu->lineno, - sym->name ? sym->name : "", - next_sym->name ? next_sym->name : ""); - } else if (sym_is_choice_value(sym)) { - fprintf(stderr, "%s:%d:\tsymbol %s is part of choice %s\n", - menu->file->name, menu->lineno, - sym->name ? sym->name : "", - next_sym->name ? next_sym->name : ""); - } else { - fprintf(stderr, "%s:%d:\tsymbol %s is selected by %s\n", - prop->file->name, prop->lineno, - sym->name ? sym->name : "", - next_sym->name ? next_sym->name : ""); - } - } - - if (check_top == &cv_stack) - dep_stack_remove(); + struct dep_stack *stack; + struct symbol *sym, *next_sym; + struct menu *menu = NULL; + struct property *prop; + struct dep_stack cv_stack; + + if ( sym_is_choice_value(last_sym) ) + { + dep_stack_insert(&cv_stack, last_sym); + last_sym = prop_get_symbol(sym_get_choice_prop(last_sym)); + } + + for ( stack = check_top; stack != NULL; stack = stack->prev ) + if ( stack->sym == last_sym ) + break; + if ( !stack ) + { + fprintf(stderr, "unexpected recursive dependency error\n"); + return; + } + + for ( ; stack; stack = stack->next ) + { + sym = stack->sym; + next_sym = stack->next ? stack->next->sym : last_sym; + prop = stack->prop; + if ( prop == NULL ) + prop = stack->sym->prop; + + /* for choice values find the menu entry (used below) */ + if ( sym_is_choice(sym) || sym_is_choice_value(sym) ) + { + for ( prop = sym->prop; prop; prop = prop->next ) + { + menu = prop->menu; + if ( prop->menu ) + break; + } + } + if ( stack->sym == last_sym ) + fprintf(stderr, "%s:%d:error: recursive dependency detected!\n", + prop->file->name, prop->lineno); + if ( stack->expr ) + { + fprintf(stderr, "%s:%d:\tsymbol %s %s value contains %s\n", + prop->file->name, prop->lineno, + sym->name ? sym->name : "", + prop_get_type_name(prop->type), + next_sym->name ? next_sym->name : ""); + } + else if ( stack->prop ) + { + fprintf(stderr, "%s:%d:\tsymbol %s depends on %s\n", + prop->file->name, prop->lineno, + sym->name ? sym->name : "", + next_sym->name ? next_sym->name : ""); + } + else if ( sym_is_choice(sym) ) + { + fprintf(stderr, "%s:%d:\tchoice %s contains symbol %s\n", + menu->file->name, menu->lineno, + sym->name ? sym->name : "", + next_sym->name ? next_sym->name : ""); + } + else if ( sym_is_choice_value(sym) ) + { + fprintf(stderr, "%s:%d:\tsymbol %s is part of choice %s\n", + menu->file->name, menu->lineno, + sym->name ? sym->name : "", + next_sym->name ? next_sym->name : ""); + } + else + { + fprintf(stderr, "%s:%d:\tsymbol %s is selected by %s\n", + prop->file->name, prop->lineno, + sym->name ? sym->name : "", + next_sym->name ? next_sym->name : ""); + } + } + + if ( check_top == &cv_stack ) + dep_stack_remove(); } static struct symbol *sym_check_expr_deps(struct expr *e) { - struct symbol *sym; - - if (!e) - return NULL; - switch (e->type) { - case E_OR: - case E_AND: - sym = sym_check_expr_deps(e->left.expr); - if (sym) - return sym; - return sym_check_expr_deps(e->right.expr); - case E_NOT: - return sym_check_expr_deps(e->left.expr); - case E_EQUAL: - case E_GEQ: - case E_GTH: - case E_LEQ: - case E_LTH: - case E_UNEQUAL: - sym = sym_check_deps(e->left.sym); - if (sym) - return sym; - return sym_check_deps(e->right.sym); - case E_SYMBOL: - return sym_check_deps(e->left.sym); - default: - break; - } - printf("Oops! How to check %d?\n", e->type); - return NULL; + struct symbol *sym; + + if ( !e ) + return NULL; + switch (e->type) + { + case E_OR: + case E_AND: + sym = sym_check_expr_deps(e->left.expr); + if ( sym ) + return sym; + return sym_check_expr_deps(e->right.expr); + case E_NOT: + return sym_check_expr_deps(e->left.expr); + case E_EQUAL: + case E_GEQ: + case E_GTH: + case E_LEQ: + case E_LTH: + case E_UNEQUAL: + sym = sym_check_deps(e->left.sym); + if ( sym ) + return sym; + return sym_check_deps(e->right.sym); + case E_SYMBOL: + return sym_check_deps(e->left.sym); + default: + break; + } + printf("Oops! How to check %d?\n", e->type); + return NULL; } /* return NULL when dependencies are OK */ static struct symbol *sym_check_sym_deps(struct symbol *sym) { - struct symbol *sym2; - struct property *prop; - struct dep_stack stack; - - dep_stack_insert(&stack, sym); - - sym2 = sym_check_expr_deps(sym->rev_dep.expr); - if (sym2) - goto out; - - for (prop = sym->prop; prop; prop = prop->next) { - if (prop->type == P_CHOICE || prop->type == P_SELECT) - continue; - stack.prop = prop; - sym2 = sym_check_expr_deps(prop->visible.expr); - if (sym2) - break; - if (prop->type != P_DEFAULT || sym_is_choice(sym)) - continue; - stack.expr = prop->expr; - sym2 = sym_check_expr_deps(prop->expr); - if (sym2) - break; - stack.expr = NULL; - } + struct symbol *sym2; + struct property *prop; + struct dep_stack stack; + + dep_stack_insert(&stack, sym); + + sym2 = sym_check_expr_deps(sym->rev_dep.expr); + if ( sym2 ) + goto out; + + for ( prop = sym->prop; prop; prop = prop->next ) + { + if ( prop->type == P_CHOICE || prop->type == P_SELECT ) + continue; + stack.prop = prop; + sym2 = sym_check_expr_deps(prop->visible.expr); + if ( sym2 ) + break; + if ( prop->type != P_DEFAULT || sym_is_choice(sym) ) + continue; + stack.expr = prop->expr; + sym2 = sym_check_expr_deps(prop->expr); + if ( sym2 ) + break; + stack.expr = NULL; + } out: - dep_stack_remove(); + dep_stack_remove(); - return sym2; + return sym2; } static struct symbol *sym_check_choice_deps(struct symbol *choice) { - struct symbol *sym, *sym2; - struct property *prop; - struct expr *e; - struct dep_stack stack; - - dep_stack_insert(&stack, choice); - - prop = sym_get_choice_prop(choice); - expr_list_for_each_sym(prop->expr, e, sym) - sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED); - - choice->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED); - sym2 = sym_check_sym_deps(choice); - choice->flags &= ~SYMBOL_CHECK; - if (sym2) - goto out; - - expr_list_for_each_sym(prop->expr, e, sym) { - sym2 = sym_check_sym_deps(sym); - if (sym2) - break; - } + struct symbol *sym, *sym2; + struct property *prop; + struct expr *e; + struct dep_stack stack; + + dep_stack_insert(&stack, choice); + + prop = sym_get_choice_prop(choice); + expr_list_for_each_sym(prop->expr, e, sym) sym->flags |= + (SYMBOL_CHECK | SYMBOL_CHECKED); + + choice->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED); + sym2 = sym_check_sym_deps(choice); + choice->flags &= ~SYMBOL_CHECK; + if ( sym2 ) + goto out; + + expr_list_for_each_sym(prop->expr, e, sym) + { + sym2 = sym_check_sym_deps(sym); + if ( sym2 ) + break; + } out: - expr_list_for_each_sym(prop->expr, e, sym) - sym->flags &= ~SYMBOL_CHECK; + expr_list_for_each_sym(prop->expr, e, sym) sym->flags &= ~SYMBOL_CHECK; - if (sym2 && sym_is_choice_value(sym2) && - prop_get_symbol(sym_get_choice_prop(sym2)) == choice) - sym2 = choice; + if ( sym2 && sym_is_choice_value(sym2) && + prop_get_symbol(sym_get_choice_prop(sym2)) == choice ) + sym2 = choice; - dep_stack_remove(); + dep_stack_remove(); - return sym2; + return sym2; } struct symbol *sym_check_deps(struct symbol *sym) { - struct symbol *sym2; - struct property *prop; - - if (sym->flags & SYMBOL_CHECK) { - sym_check_print_recursive(sym); - return sym; - } - if (sym->flags & SYMBOL_CHECKED) - return NULL; - - if (sym_is_choice_value(sym)) { - struct dep_stack stack; - - /* for choice groups start the check with main choice symbol */ - dep_stack_insert(&stack, sym); - prop = sym_get_choice_prop(sym); - sym2 = sym_check_deps(prop_get_symbol(prop)); - dep_stack_remove(); - } else if (sym_is_choice(sym)) { - sym2 = sym_check_choice_deps(sym); - } else { - sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED); - sym2 = sym_check_sym_deps(sym); - sym->flags &= ~SYMBOL_CHECK; - } - - if (sym2 && sym2 == sym) - sym2 = NULL; - - return sym2; + struct symbol *sym2; + struct property *prop; + + if ( sym->flags & SYMBOL_CHECK ) + { + sym_check_print_recursive(sym); + return sym; + } + if ( sym->flags & SYMBOL_CHECKED ) + return NULL; + + if ( sym_is_choice_value(sym) ) + { + struct dep_stack stack; + + /* for choice groups start the check with main choice symbol */ + dep_stack_insert(&stack, sym); + prop = sym_get_choice_prop(sym); + sym2 = sym_check_deps(prop_get_symbol(prop)); + dep_stack_remove(); + } + else if ( sym_is_choice(sym) ) + { + sym2 = sym_check_choice_deps(sym); + } + else + { + sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED); + sym2 = sym_check_sym_deps(sym); + sym->flags &= ~SYMBOL_CHECK; + } + + if ( sym2 && sym2 == sym ) + sym2 = NULL; + + return sym2; } struct property *prop_alloc(enum prop_type type, struct symbol *sym) { - struct property *prop; - struct property **propp; - - prop = xmalloc(sizeof(*prop)); - memset(prop, 0, sizeof(*prop)); - prop->type = type; - prop->sym = sym; - prop->file = current_file; - prop->lineno = zconf_lineno(); - - /* append property to the prop list of symbol */ - if (sym) { - for (propp = &sym->prop; *propp; propp = &(*propp)->next) - ; - *propp = prop; - } - - return prop; + struct property *prop; + struct property **propp; + + prop = xmalloc(sizeof(*prop)); + memset(prop, 0, sizeof(*prop)); + prop->type = type; + prop->sym = sym; + prop->file = current_file; + prop->lineno = zconf_lineno(); + + /* append property to the prop list of symbol */ + if ( sym ) + { + for ( propp = &sym->prop; *propp; propp = &(*propp)->next ) + ; + *propp = prop; + } + + return prop; } struct symbol *prop_get_symbol(struct property *prop) { - if (prop->expr && (prop->expr->type == E_SYMBOL || - prop->expr->type == E_LIST)) - return prop->expr->left.sym; - return NULL; + if ( prop->expr && + (prop->expr->type == E_SYMBOL || prop->expr->type == E_LIST) ) + return prop->expr->left.sym; + return NULL; } const char *prop_get_type_name(enum prop_type type) { - switch (type) { - case P_PROMPT: - return "prompt"; - case P_ENV: - return "env"; - case P_COMMENT: - return "comment"; - case P_MENU: - return "menu"; - case P_DEFAULT: - return "default"; - case P_CHOICE: - return "choice"; - case P_SELECT: - return "select"; - case P_RANGE: - return "range"; - case P_SYMBOL: - return "symbol"; - case P_UNKNOWN: - break; - } - return "unknown"; + switch (type) + { + case P_PROMPT: + return "prompt"; + case P_ENV: + return "env"; + case P_COMMENT: + return "comment"; + case P_MENU: + return "menu"; + case P_DEFAULT: + return "default"; + case P_CHOICE: + return "choice"; + case P_SELECT: + return "select"; + case P_RANGE: + return "range"; + case P_SYMBOL: + return "symbol"; + case P_UNKNOWN: + break; + } + return "unknown"; } static void prop_add_env(const char *env) { - struct symbol *sym, *sym2; - struct property *prop; - char *p; - - sym = current_entry->sym; - sym->flags |= SYMBOL_AUTO; - for_all_properties(sym, prop, P_ENV) { - sym2 = prop_get_symbol(prop); - if (strcmp(sym2->name, env)) - menu_warn(current_entry, "redefining environment symbol from %s", - sym2->name); - return; - } - - prop = prop_alloc(P_ENV, sym); - prop->expr = expr_alloc_symbol(sym_lookup(env, SYMBOL_CONST)); - - sym_env_list = expr_alloc_one(E_LIST, sym_env_list); - sym_env_list->right.sym = sym; - - p = getenv(env); - if (p) - sym_add_default(sym, p); - else - menu_warn(current_entry, "environment variable %s undefined", env); + struct symbol *sym, *sym2; + struct property *prop; + char *p; + + sym = current_entry->sym; + sym->flags |= SYMBOL_AUTO; + for_all_properties(sym, prop, P_ENV) + { + sym2 = prop_get_symbol(prop); + if ( strcmp(sym2->name, env) ) + menu_warn(current_entry, "redefining environment symbol from %s", + sym2->name); + return; + } + + prop = prop_alloc(P_ENV, sym); + prop->expr = expr_alloc_symbol(sym_lookup(env, SYMBOL_CONST)); + + sym_env_list = expr_alloc_one(E_LIST, sym_env_list); + sym_env_list->right.sym = sym; + + p = getenv(env); + if ( p ) + sym_add_default(sym, p); + else + menu_warn(current_entry, "environment variable %s undefined", env); } diff --git a/xen/tools/kconfig/util.c b/xen/tools/kconfig/util.c index 0e76042473..28aa4b4bf8 100644 --- a/xen/tools/kconfig/util.c +++ b/xen/tools/kconfig/util.c @@ -13,135 +13,142 @@ /* file already present in list? If not add it */ struct file *file_lookup(const char *name) { - struct file *file; - const char *file_name = sym_expand_string_value(name); - - for (file = file_list; file; file = file->next) { - if (!strcmp(name, file->name)) { - free((void *)file_name); - return file; - } - } - - file = xmalloc(sizeof(*file)); - memset(file, 0, sizeof(*file)); - file->name = file_name; - file->next = file_list; - file_list = file; - return file; + struct file *file; + const char *file_name = sym_expand_string_value(name); + + for ( file = file_list; file; file = file->next ) + { + if ( !strcmp(name, file->name) ) + { + free((void *)file_name); + return file; + } + } + + file = xmalloc(sizeof(*file)); + memset(file, 0, sizeof(*file)); + file->name = file_name; + file->next = file_list; + file_list = file; + return file; } /* write a dependency file as used by kbuild to track dependencies */ int file_write_dep(const char *name) { - struct symbol *sym, *env_sym; - struct expr *e; - struct file *file; - FILE *out; - - if (!name) - name = ".kconfig.d"; - out = fopen("..config.tmp", "w"); - if (!out) - return 1; - fprintf(out, "deps_config := \\\n"); - for (file = file_list; file; file = file->next) { - if (file->next) - fprintf(out, "\t%s \\\n", file->name); - else - fprintf(out, "\t%s\n", file->name); - } - fprintf(out, "\n%s: \\\n" - "\t$(deps_config)\n\n", conf_get_autoconfig_name()); - - expr_list_for_each_sym(sym_env_list, e, sym) { - struct property *prop; - const char *value; - - prop = sym_get_env_prop(sym); - env_sym = prop_get_symbol(prop); - if (!env_sym) - continue; - value = getenv(env_sym->name); - if (!value) - value = ""; - fprintf(out, "ifneq \"$(%s)\" \"%s\"\n", env_sym->name, value); - fprintf(out, "%s: FORCE\n", conf_get_autoconfig_name()); - fprintf(out, "endif\n"); - } - - fprintf(out, "\n$(deps_config): ;\n"); - fclose(out); - rename("..config.tmp", name); - return 0; + struct symbol *sym, *env_sym; + struct expr *e; + struct file *file; + FILE *out; + + if ( !name ) + name = ".kconfig.d"; + out = fopen("..config.tmp", "w"); + if ( !out ) + return 1; + fprintf(out, "deps_config := \\\n"); + for ( file = file_list; file; file = file->next ) + { + if ( file->next ) + fprintf(out, "\t%s \\\n", file->name); + else + fprintf(out, "\t%s\n", file->name); + } + fprintf(out, + "\n%s: \\\n" + "\t$(deps_config)\n\n", + conf_get_autoconfig_name()); + + expr_list_for_each_sym(sym_env_list, e, sym) + { + struct property *prop; + const char *value; + + prop = sym_get_env_prop(sym); + env_sym = prop_get_symbol(prop); + if ( !env_sym ) + continue; + value = getenv(env_sym->name); + if ( !value ) + value = ""; + fprintf(out, "ifneq \"$(%s)\" \"%s\"\n", env_sym->name, value); + fprintf(out, "%s: FORCE\n", conf_get_autoconfig_name()); + fprintf(out, "endif\n"); + } + + fprintf(out, "\n$(deps_config): ;\n"); + fclose(out); + rename("..config.tmp", name); + return 0; } - /* Allocate initial growable string */ struct gstr str_new(void) { - struct gstr gs; - gs.s = xmalloc(sizeof(char) * 64); - gs.len = 64; - gs.max_width = 0; - strcpy(gs.s, "\0"); - return gs; + struct gstr gs; + gs.s = xmalloc(sizeof(char) * 64); + gs.len = 64; + gs.max_width = 0; + strcpy(gs.s, "\0"); + return gs; } /* Free storage for growable string */ void str_free(struct gstr *gs) { - if (gs->s) - free(gs->s); - gs->s = NULL; - gs->len = 0; + if ( gs->s ) + free(gs->s); + gs->s = NULL; + gs->len = 0; } /* Append to growable string */ void str_append(struct gstr *gs, const char *s) { - size_t l; - if (s) { - l = strlen(gs->s) + strlen(s) + 1; - if (l > gs->len) { - gs->s = realloc(gs->s, l); - gs->len = l; - } - strcat(gs->s, s); - } + size_t l; + if ( s ) + { + l = strlen(gs->s) + strlen(s) + 1; + if ( l > gs->len ) + { + gs->s = realloc(gs->s, l); + gs->len = l; + } + strcat(gs->s, s); + } } /* Append printf formatted string to growable string */ void str_printf(struct gstr *gs, const char *fmt, ...) { - va_list ap; - char s[10000]; /* big enough... */ - va_start(ap, fmt); - vsnprintf(s, sizeof(s), fmt, ap); - str_append(gs, s); - va_end(ap); + va_list ap; + char s[10000]; /* big enough... */ + va_start(ap, fmt); + vsnprintf(s, sizeof(s), fmt, ap); + str_append(gs, s); + va_end(ap); } /* Retrieve value of growable string */ const char *str_get(struct gstr *gs) { - return gs->s; + return gs->s; } void *xmalloc(size_t size) { - void *p = malloc(size); - if (p) - return p; - fprintf(stderr, "Out of memory.\n"); - exit(1); + void *p = malloc(size); + if ( p ) + return p; + fprintf(stderr, "Out of memory.\n"); + exit(1); } void *xcalloc(size_t nmemb, size_t size) { - void *p = calloc(nmemb, size); - if (p) - return p; - fprintf(stderr, "Out of memory.\n"); - exit(1); + void *p = calloc(nmemb, size); + if ( p ) + return p; + fprintf(stderr, "Out of memory.\n"); + exit(1); } diff --git a/xen/tools/symbols.c b/xen/tools/symbols.c index 8c5842d43f..3959680d69 100644 --- a/xen/tools/symbols.c +++ b/xen/tools/symbols.c @@ -33,23 +33,24 @@ #include #include -#define KSYM_NAME_LEN 127 +#define KSYM_NAME_LEN 127 - -struct sym_entry { - unsigned long long addr; - unsigned int len; - unsigned char *sym; - char *orig_symbol; - unsigned int addr_idx; - unsigned int stream_offset; - unsigned char type; +struct sym_entry +{ + unsigned long long addr; + unsigned int len; + unsigned char *sym; + char *orig_symbol; + unsigned int addr_idx; + unsigned int stream_offset; + unsigned char type; }; #define SYMBOL_NAME(s) ((char *)(s)->sym + 1) static struct sym_entry *table; static unsigned int table_size, table_cnt; -static unsigned long long _stext, _etext, _sinittext, _einittext, _sextratext, _eextratext; +static unsigned long long _stext, _etext, _sinittext, _einittext, _sextratext, + _eextratext; static int all_symbols = 0; static int sort_by_name = 0; static int map_only = 0; @@ -63,11 +64,11 @@ int token_profit[0x10000]; unsigned char best_table[256][2]; unsigned char best_table_len[256]; - static void usage(void) { - fprintf(stderr, "Usage: symbols [--all-symbols] [--symbol-prefix=] < in.map > out.S\n"); - exit(1); + fprintf(stderr, "Usage: symbols [--all-symbols] [--symbol-prefix=] < in.map > out.S\n"); + exit(1); } /* @@ -76,575 +77,612 @@ static void usage(void) */ static inline int is_arm_mapping_symbol(const char *str) { - return str[0] == '$' && strchr("atd", str[1]) - && (str[2] == '\0' || str[2] == '.'); + return str[0] == '$' && strchr("atd", str[1]) && + (str[2] == '\0' || str[2] == '.'); } static int read_symbol(FILE *in, struct sym_entry *s) { - char str[500], type[20] = ""; - char *sym, stype; - static enum { symbol, single_source, multi_source } last; - static char *filename; - int rc = -1; - - switch (input_format) { - case fmt_bsd: - rc = fscanf(in, "%llx %c %499s\n", &s->addr, &stype, str); - break; - case fmt_sysv: - while (fscanf(in, "\n") == 1) - /* nothing */; - rc = fscanf(in, "%499[^ |] |%llx | %c |", - str, &s->addr, &stype); - if (rc == 3 && fscanf(in, " %19[^ |] |", type) != 1) - *type = '\0'; - break; - } - if (rc != 3) { - if (rc != EOF) { - /* skip line */ - if (fgets(str, 500, in) == NULL) - return -1; /* must check fgets result */ - } - return -1; - } - - sym = strrchr(str, '.'); - if (strcasecmp(type, "FILE") == 0 || - (/* - * GNU nm prior to binutils commit 552e55ed06 (expected to - * appear in 2.27) doesn't produce a type for EFI binaries. - */ - input_format == fmt_sysv && !*type && stype == '?' && sym && - sym[1] && strchr("cSsoh", sym[1]) && !sym[2])) { - /* - * gas prior to binutils commit fbdf9406b0 (expected to appear - * in 2.27) outputs symbol table entries resulting from .file - * in reverse order. If we get two consecutive file symbols, - * prefer the first one if that names an object file or has a - * directory component (to cover multiply compiled files). - */ - bool multi = strchr(str, '/') || (sym && sym[1] == 'o'); - - if (multi || last != multi_source) { - free(filename); - filename = *str ? strdup(str) : NULL; - } - last = multi ? multi_source : single_source; - goto skip_tail; - } - - last = symbol; - rc = -1; - - sym = str; - /* skip prefix char */ - if (symbol_prefix_char && str[0] == symbol_prefix_char) - sym++; - - /* Ignore most absolute/undefined (?) symbols. */ - if (strcmp(sym, "_stext") == 0) - _stext = s->addr; - else if (strcmp(sym, "_etext") == 0) - _etext = s->addr; - else if (strcmp(sym, "_sinittext") == 0) - _sinittext = s->addr; - else if (strcmp(sym, "_einittext") == 0) - _einittext = s->addr; - else if (strcmp(sym, "_sextratext") == 0) - _sextratext = s->addr; - else if (strcmp(sym, "_eextratext") == 0) - _eextratext = s->addr; - else if (toupper((uint8_t)stype) == 'A') - { - /* Keep these useful absolute symbols */ - if (strcmp(sym, "__gp")) - goto skip_tail; - } - else if (toupper((uint8_t)stype) == 'U' || - toupper((uint8_t)stype) == 'N' || - is_arm_mapping_symbol(sym)) - goto skip_tail; - /* exclude also MIPS ELF local symbols ($L123 instead of .L123) */ - else if (str[0] == '$') - goto skip_tail; - - /* include the type field in the symbol name, so that it gets - * compressed together */ - s->len = strlen(str) + 1; - if (islower(stype) && filename) - s->len += strlen(filename) + 1; - s->sym = malloc(s->len + 1); - sym = SYMBOL_NAME(s); - if (islower(stype) && filename) { - sym = stpcpy(sym, filename); - *sym++ = '#'; - } - strcpy(sym, str); - if (sort_by_name || map_only) { - s->orig_symbol = strdup(SYMBOL_NAME(s)); - s->type = stype; /* As s->sym[0] ends mangled. */ - } - s->sym[0] = stype; - rc = 0; - - skip_tail: - if ((input_format == fmt_sysv) && fgets(str, 500, in) == NULL) - /* ignore errors while discarding rest of line */; - - return rc; + char str[500], type[20] = ""; + char *sym, stype; + static enum { symbol, single_source, multi_source } last; + static char *filename; + int rc = -1; + + switch (input_format) + { + case fmt_bsd: + rc = fscanf(in, "%llx %c %499s\n", &s->addr, &stype, str); + break; + case fmt_sysv: + while ( fscanf(in, "\n") == 1 ) + /* nothing */; + rc = fscanf(in, "%499[^ |] |%llx | %c |", str, &s->addr, &stype); + if ( rc == 3 && fscanf(in, " %19[^ |] |", type) != 1 ) + *type = '\0'; + break; + } + if ( rc != 3 ) + { + if ( rc != EOF ) + { + /* skip line */ + if ( fgets(str, 500, in) == NULL ) + return -1; /* must check fgets result */ + } + return -1; + } + + sym = strrchr(str, '.'); + if ( strcasecmp(type, "FILE") == 0 || + (/* + * GNU nm prior to binutils commit 552e55ed06 (expected to + * appear in 2.27) doesn't produce a type for EFI binaries. + */ + input_format == fmt_sysv && !*type && stype == '?' && sym && sym[1] && + strchr("cSsoh", sym[1]) && !sym[2]) ) + { + /* + * gas prior to binutils commit fbdf9406b0 (expected to appear + * in 2.27) outputs symbol table entries resulting from .file + * in reverse order. If we get two consecutive file symbols, + * prefer the first one if that names an object file or has a + * directory component (to cover multiply compiled files). + */ + bool multi = strchr(str, '/') || (sym && sym[1] == 'o'); + + if ( multi || last != multi_source ) + { + free(filename); + filename = *str ? strdup(str) : NULL; + } + last = multi ? multi_source : single_source; + goto skip_tail; + } + + last = symbol; + rc = -1; + + sym = str; + /* skip prefix char */ + if ( symbol_prefix_char && str[0] == symbol_prefix_char ) + sym++; + + /* Ignore most absolute/undefined (?) symbols. */ + if ( strcmp(sym, "_stext") == 0 ) + _stext = s->addr; + else if ( strcmp(sym, "_etext") == 0 ) + _etext = s->addr; + else if ( strcmp(sym, "_sinittext") == 0 ) + _sinittext = s->addr; + else if ( strcmp(sym, "_einittext") == 0 ) + _einittext = s->addr; + else if ( strcmp(sym, "_sextratext") == 0 ) + _sextratext = s->addr; + else if ( strcmp(sym, "_eextratext") == 0 ) + _eextratext = s->addr; + else if ( toupper((uint8_t)stype) == 'A' ) + { + /* Keep these useful absolute symbols */ + if ( strcmp(sym, "__gp") ) + goto skip_tail; + } + else if ( toupper((uint8_t)stype) == 'U' || + toupper((uint8_t)stype) == 'N' || is_arm_mapping_symbol(sym) ) + goto skip_tail; + /* exclude also MIPS ELF local symbols ($L123 instead of .L123) */ + else if ( str[0] == '$' ) + goto skip_tail; + + /* include the type field in the symbol name, so that it gets + * compressed together */ + s->len = strlen(str) + 1; + if ( islower(stype) && filename ) + s->len += strlen(filename) + 1; + s->sym = malloc(s->len + 1); + sym = SYMBOL_NAME(s); + if ( islower(stype) && filename ) + { + sym = stpcpy(sym, filename); + *sym++ = '#'; + } + strcpy(sym, str); + if ( sort_by_name || map_only ) + { + s->orig_symbol = strdup(SYMBOL_NAME(s)); + s->type = stype; /* As s->sym[0] ends mangled. */ + } + s->sym[0] = stype; + rc = 0; + +skip_tail: + if ( (input_format == fmt_sysv) && fgets(str, 500, in) == NULL ) + /* ignore errors while discarding rest of line */; + + return rc; } static int symbol_valid(struct sym_entry *s) { - int offset = 1; - - /* skip prefix char */ - if (symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char) - offset++; - - /* if --all-symbols is not specified, then symbols outside the text - * and inittext sections are discarded */ - if (!all_symbols) { - if ((s->addr < _stext || s->addr > _etext) - && (s->addr < _sinittext || s->addr > _einittext) - && (s->addr < _sextratext || s->addr > _eextratext)) - return 0; - /* Corner case. Discard any symbols with the same value as - * _etext _einittext or _eextratext; they can move between pass - * 1 and 2 when the symbols data are added. If these symbols - * move then they may get dropped in pass 2, which breaks the - * symbols rules. - */ - if ((s->addr == _etext && strcmp((char*)s->sym + offset, "_etext")) || - (s->addr == _einittext && strcmp((char*)s->sym + offset, "_einittext")) || - (s->addr == _eextratext && strcmp((char*)s->sym + offset, "_eextratext"))) - return 0; - } - - /* Exclude symbols which vary between passes. */ - if (strstr((char *)s->sym + offset, "_compiled.")) - return 0; - - return 1; + int offset = 1; + + /* skip prefix char */ + if ( symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char ) + offset++; + + /* if --all-symbols is not specified, then symbols outside the text + * and inittext sections are discarded */ + if ( !all_symbols ) + { + if ( (s->addr < _stext || s->addr > _etext) && + (s->addr < _sinittext || s->addr > _einittext) && + (s->addr < _sextratext || s->addr > _eextratext) ) + return 0; + /* Corner case. Discard any symbols with the same value as + * _etext _einittext or _eextratext; they can move between pass + * 1 and 2 when the symbols data are added. If these symbols + * move then they may get dropped in pass 2, which breaks the + * symbols rules. + */ + if ( (s->addr == _etext && strcmp((char *)s->sym + offset, "_etext")) || + (s->addr == _einittext && + strcmp((char *)s->sym + offset, "_einittext")) || + (s->addr == _eextratext && + strcmp((char *)s->sym + offset, "_eextratext")) ) + return 0; + } + + /* Exclude symbols which vary between passes. */ + if ( strstr((char *)s->sym + offset, "_compiled.") ) + return 0; + + return 1; } static void read_map(FILE *in) { - while (!feof(in)) { - if (table_cnt >= table_size) { - table_size += 10000; - table = realloc(table, sizeof(*table) * table_size); - if (!table) { - fprintf(stderr, "out of memory\n"); - exit (1); - } - } - if (read_symbol(in, &table[table_cnt]) == 0) - table_cnt++; - } + while ( !feof(in) ) + { + if ( table_cnt >= table_size ) + { + table_size += 10000; + table = realloc(table, sizeof(*table) * table_size); + if ( !table ) + { + fprintf(stderr, "out of memory\n"); + exit(1); + } + } + if ( read_symbol(in, &table[table_cnt]) == 0 ) + table_cnt++; + } } static void output_label(char *label) { - if (symbol_prefix_char) - printf(".globl %c%s\n", symbol_prefix_char, label); - else - printf(".globl %s\n", label); - printf("\tALGN\n"); - if (symbol_prefix_char) - printf("%c%s:\n", symbol_prefix_char, label); - else - printf("%s:\n", label); + if ( symbol_prefix_char ) + printf(".globl %c%s\n", symbol_prefix_char, label); + else + printf(".globl %s\n", label); + printf("\tALGN\n"); + if ( symbol_prefix_char ) + printf("%c%s:\n", symbol_prefix_char, label); + else + printf("%s:\n", label); } /* uncompress a compressed symbol. When this function is called, the best table * might still be compressed itself, so the function needs to be recursive */ static int expand_symbol(unsigned char *data, int len, char *result) { - int c, rlen, total=0; - - while (len) { - c = *data; - /* if the table holds a single char that is the same as the one - * we are looking for, then end the search */ - if (best_table[c][0]==c && best_table_len[c]==1) { - *result++ = c; - total++; - } else { - /* if not, recurse and expand */ - rlen = expand_symbol(best_table[c], best_table_len[c], result); - total += rlen; - result += rlen; - } - data++; - len--; - } - *result=0; - - return total; + int c, rlen, total = 0; + + while ( len ) + { + c = *data; + /* if the table holds a single char that is the same as the one + * we are looking for, then end the search */ + if ( best_table[c][0] == c && best_table_len[c] == 1 ) + { + *result++ = c; + total++; + } + else + { + /* if not, recurse and expand */ + rlen = expand_symbol(best_table[c], best_table_len[c], result); + total += rlen; + result += rlen; + } + data++; + len--; + } + *result = 0; + + return total; } /* Sort by original (non mangled) symbol name, then type. */ static int compare_name_orig(const void *p1, const void *p2) { - const struct sym_entry *sym1 = p1; - const struct sym_entry *sym2 = p2; - int rc; + const struct sym_entry *sym1 = p1; + const struct sym_entry *sym2 = p2; + int rc; - rc = strcmp(sym1->orig_symbol, sym2->orig_symbol); + rc = strcmp(sym1->orig_symbol, sym2->orig_symbol); - if (!rc) - rc = sym1->type - sym2->type; + if ( !rc ) + rc = sym1->type - sym2->type; - return rc; + return rc; } static void write_src(void) { - unsigned int i, k, off; - unsigned int best_idx[256]; - unsigned int *markers; - char buf[KSYM_NAME_LEN+1]; - - if (map_only) { - for (i = 0; i < table_cnt; i++) - printf("%#llx %c %s\n", table[i].addr, table[i].type, - table[i].orig_symbol); - - return; - } - printf("#include \n"); - printf("#include \n"); - printf("#if BITS_PER_LONG == 64 && !defined(SYMBOLS_ORIGIN)\n"); - printf("#define PTR .quad\n"); - printf("#define ALGN .align 8\n"); - printf("#else\n"); - printf("#define PTR .long\n"); - printf("#define ALGN .align 4\n"); - printf("#endif\n"); - - printf("\t.section .rodata, \"a\"\n"); - - printf("#ifndef SYMBOLS_ORIGIN\n"); - printf("#define SYMBOLS_ORIGIN 0\n"); - output_label("symbols_addresses"); - printf("#else\n"); - output_label("symbols_offsets"); - printf("#endif\n"); - for (i = 0; i < table_cnt; i++) { - printf("\tPTR\t%#llx - SYMBOLS_ORIGIN\n", table[i].addr); - } - printf("\n"); - - output_label("symbols_num_syms"); - printf("\t.long\t%d\n", table_cnt); - printf("\n"); - - /* table of offset markers, that give the offset in the compressed stream - * every 256 symbols */ - markers = (unsigned int *) malloc(sizeof(unsigned int) * ((table_cnt + 255) / 256)); - - output_label("symbols_names"); - off = 0; - for (i = 0; i < table_cnt; i++) { - if ((i & 0xFF) == 0) - markers[i >> 8] = off; - - printf("\t.byte 0x%02x", table[i].len); - for (k = 0; k < table[i].len; k++) - printf(", 0x%02x", table[i].sym[k]); - printf("\n"); - - table[i].stream_offset = off; - off += table[i].len + 1; - } - printf("\n"); - - output_label("symbols_markers"); - for (i = 0; i < ((table_cnt + 255) >> 8); i++) - printf("\t.long\t%d\n", markers[i]); - printf("\n"); - - - output_label("symbols_token_table"); - off = 0; - for (i = 0; i < 256; i++) { - best_idx[i] = off; - expand_symbol(best_table[i], best_table_len[i], buf); - printf("\t.asciz\t\"%s\"\n", buf); - off += strlen(buf) + 1; - } - printf("\n"); - - output_label("symbols_token_index"); - for (i = 0; i < 256; i++) - printf("\t.short\t%d\n", best_idx[i]); - printf("\n"); - - if (!sort_by_name) { - free(markers); - return; - } - - /* Sorted by original symbol names and type. */ - qsort(table, table_cnt, sizeof(*table), compare_name_orig); - - output_label("symbols_sorted_offsets"); - /* A fixed sized array with two entries: offset in the - * compressed stream (for symbol name), and offset in - * symbols_addresses (or symbols_offset). */ - for (i = 0; i < table_cnt; i++) { - printf("\t.long %u, %u\n", table[i].stream_offset, table[i].addr_idx); - } - printf("\n"); - - free(markers); + unsigned int i, k, off; + unsigned int best_idx[256]; + unsigned int *markers; + char buf[KSYM_NAME_LEN + 1]; + + if ( map_only ) + { + for ( i = 0; i < table_cnt; i++ ) + printf("%#llx %c %s\n", table[i].addr, table[i].type, + table[i].orig_symbol); + + return; + } + printf("#include \n"); + printf("#include \n"); + printf("#if BITS_PER_LONG == 64 && !defined(SYMBOLS_ORIGIN)\n"); + printf("#define PTR .quad\n"); + printf("#define ALGN .align 8\n"); + printf("#else\n"); + printf("#define PTR .long\n"); + printf("#define ALGN .align 4\n"); + printf("#endif\n"); + + printf("\t.section .rodata, \"a\"\n"); + + printf("#ifndef SYMBOLS_ORIGIN\n"); + printf("#define SYMBOLS_ORIGIN 0\n"); + output_label("symbols_addresses"); + printf("#else\n"); + output_label("symbols_offsets"); + printf("#endif\n"); + for ( i = 0; i < table_cnt; i++ ) + { + printf("\tPTR\t%#llx - SYMBOLS_ORIGIN\n", table[i].addr); + } + printf("\n"); + + output_label("symbols_num_syms"); + printf("\t.long\t%d\n", table_cnt); + printf("\n"); + + /* table of offset markers, that give the offset in the compressed stream + * every 256 symbols */ + markers = (unsigned int *)malloc(sizeof(unsigned int) * + ((table_cnt + 255) / 256)); + + output_label("symbols_names"); + off = 0; + for ( i = 0; i < table_cnt; i++ ) + { + if ( (i & 0xFF) == 0 ) + markers[i >> 8] = off; + + printf("\t.byte 0x%02x", table[i].len); + for ( k = 0; k < table[i].len; k++ ) + printf(", 0x%02x", table[i].sym[k]); + printf("\n"); + + table[i].stream_offset = off; + off += table[i].len + 1; + } + printf("\n"); + + output_label("symbols_markers"); + for ( i = 0; i < ((table_cnt + 255) >> 8); i++ ) + printf("\t.long\t%d\n", markers[i]); + printf("\n"); + + output_label("symbols_token_table"); + off = 0; + for ( i = 0; i < 256; i++ ) + { + best_idx[i] = off; + expand_symbol(best_table[i], best_table_len[i], buf); + printf("\t.asciz\t\"%s\"\n", buf); + off += strlen(buf) + 1; + } + printf("\n"); + + output_label("symbols_token_index"); + for ( i = 0; i < 256; i++ ) + printf("\t.short\t%d\n", best_idx[i]); + printf("\n"); + + if ( !sort_by_name ) + { + free(markers); + return; + } + + /* Sorted by original symbol names and type. */ + qsort(table, table_cnt, sizeof(*table), compare_name_orig); + + output_label("symbols_sorted_offsets"); + /* A fixed sized array with two entries: offset in the + * compressed stream (for symbol name), and offset in + * symbols_addresses (or symbols_offset). */ + for ( i = 0; i < table_cnt; i++ ) + { + printf("\t.long %u, %u\n", table[i].stream_offset, table[i].addr_idx); + } + printf("\n"); + + free(markers); } - /* table lookup compression functions */ /* count all the possible tokens in a symbol */ static void learn_symbol(unsigned char *symbol, int len) { - int i; + int i; - for (i = 0; i < len - 1; i++) - token_profit[ symbol[i] + (symbol[i + 1] << 8) ]++; + for ( i = 0; i < len - 1; i++ ) + token_profit[symbol[i] + (symbol[i + 1] << 8)]++; } /* decrease the count for all the possible tokens in a symbol */ static void forget_symbol(unsigned char *symbol, int len) { - int i; + int i; - for (i = 0; i < len - 1; i++) - token_profit[ symbol[i] + (symbol[i + 1] << 8) ]--; + for ( i = 0; i < len - 1; i++ ) + token_profit[symbol[i] + (symbol[i + 1] << 8)]--; } -/* remove all the invalid symbols from the table and do the initial token count */ +/* remove all the invalid symbols from the table and do the initial token count + */ static void build_initial_tok_table(void) { - unsigned int i, pos; - - pos = 0; - for (i = 0; i < table_cnt; i++) { - if ( symbol_valid(&table[i]) ) { - if (pos != i) - table[pos] = table[i]; - learn_symbol(table[pos].sym, table[pos].len); - pos++; - } - } - table_cnt = pos; + unsigned int i, pos; + + pos = 0; + for ( i = 0; i < table_cnt; i++ ) + { + if ( symbol_valid(&table[i]) ) + { + if ( pos != i ) + table[pos] = table[i]; + learn_symbol(table[pos].sym, table[pos].len); + pos++; + } + } + table_cnt = pos; } static void *memmem_pvt(void *h, size_t hlen, void *n, size_t nlen) { - char *p; - for (p = h; (p - (char *)h) <= (long)(hlen - nlen); p++) - if (!memcmp(p, n, nlen)) return p; - return NULL; + char *p; + for ( p = h; (p - (char *)h) <= (long)(hlen - nlen); p++ ) + if ( !memcmp(p, n, nlen) ) + return p; + return NULL; } /* replace a given token in all the valid symbols. Use the sampled symbols * to update the counts */ static void compress_symbols(unsigned char *str, int idx) { - unsigned int i, len, size; - unsigned char *p1, *p2; - - for (i = 0; i < table_cnt; i++) { + unsigned int i, len, size; + unsigned char *p1, *p2; - len = table[i].len; - p1 = table[i].sym; + for ( i = 0; i < table_cnt; i++ ) + { + len = table[i].len; + p1 = table[i].sym; - table[i].addr_idx = i; - /* find the token on the symbol */ - p2 = memmem_pvt(p1, len, str, 2); - if (!p2) continue; + table[i].addr_idx = i; + /* find the token on the symbol */ + p2 = memmem_pvt(p1, len, str, 2); + if ( !p2 ) + continue; - /* decrease the counts for this symbol's tokens */ - forget_symbol(table[i].sym, len); + /* decrease the counts for this symbol's tokens */ + forget_symbol(table[i].sym, len); - size = len; + size = len; - do { - *p2 = idx; - p2++; - size -= (p2 - p1); - memmove(p2, p2 + 1, size); - p1 = p2; - len--; + do { + *p2 = idx; + p2++; + size -= (p2 - p1); + memmove(p2, p2 + 1, size); + p1 = p2; + len--; - if (size < 2) break; + if ( size < 2 ) + break; - /* find the token on the symbol */ - p2 = memmem_pvt(p1, size, str, 2); + /* find the token on the symbol */ + p2 = memmem_pvt(p1, size, str, 2); - } while (p2); + } while ( p2 ); - table[i].len = len; + table[i].len = len; - /* increase the counts for this symbol's new tokens */ - learn_symbol(table[i].sym, len); - } + /* increase the counts for this symbol's new tokens */ + learn_symbol(table[i].sym, len); + } } /* search the token with the maximum profit */ static int find_best_token(void) { - int i, best, bestprofit; - - bestprofit=-10000; - best = 0; - - for (i = 0; i < 0x10000; i++) { - if (token_profit[i] > bestprofit) { - best = i; - bestprofit = token_profit[i]; - } - } - return best; + int i, best, bestprofit; + + bestprofit = -10000; + best = 0; + + for ( i = 0; i < 0x10000; i++ ) + { + if ( token_profit[i] > bestprofit ) + { + best = i; + bestprofit = token_profit[i]; + } + } + return best; } /* this is the core of the algorithm: calculate the "best" table */ static void optimize_result(void) { - int i, best; - - /* using the '\0' symbol last allows compress_symbols to use standard - * fast string functions */ - for (i = 255; i >= 0; i--) { - - /* if this table slot is empty (it is not used by an actual - * original char code */ - if (!best_table_len[i]) { - - /* find the token with the breates profit value */ - best = find_best_token(); - - /* place it in the "best" table */ - best_table_len[i] = 2; - best_table[i][0] = best & 0xFF; - best_table[i][1] = (best >> 8) & 0xFF; - - /* replace this token in all the valid symbols */ - compress_symbols(best_table[i], i); - } - } + int i, best; + + /* using the '\0' symbol last allows compress_symbols to use standard + * fast string functions */ + for ( i = 255; i >= 0; i-- ) + { + /* if this table slot is empty (it is not used by an actual + * original char code */ + if ( !best_table_len[i] ) + { + /* find the token with the breates profit value */ + best = find_best_token(); + + /* place it in the "best" table */ + best_table_len[i] = 2; + best_table[i][0] = best & 0xFF; + best_table[i][1] = (best >> 8) & 0xFF; + + /* replace this token in all the valid symbols */ + compress_symbols(best_table[i], i); + } + } } /* start by placing the symbols that are actually used on the table */ static void insert_real_symbols_in_table(void) { - unsigned int i, j, c; - - memset(best_table, 0, sizeof(best_table)); - memset(best_table_len, 0, sizeof(best_table_len)); - - for (i = 0; i < table_cnt; i++) { - for (j = 0; j < table[i].len; j++) { - c = table[i].sym[j]; - best_table[c][0]=c; - best_table_len[c]=1; - } - } + unsigned int i, j, c; + + memset(best_table, 0, sizeof(best_table)); + memset(best_table_len, 0, sizeof(best_table_len)); + + for ( i = 0; i < table_cnt; i++ ) + { + for ( j = 0; j < table[i].len; j++ ) + { + c = table[i].sym[j]; + best_table[c][0] = c; + best_table_len[c] = 1; + } + } } static void optimize_token_table(void) { - build_initial_tok_table(); + build_initial_tok_table(); - insert_real_symbols_in_table(); + insert_real_symbols_in_table(); - /* When valid symbol is not registered, exit to error */ - if (!table_cnt) { - fprintf(stderr, "No valid symbol.\n"); - exit(1); - } + /* When valid symbol is not registered, exit to error */ + if ( !table_cnt ) + { + fprintf(stderr, "No valid symbol.\n"); + exit(1); + } - optimize_result(); + optimize_result(); } static int compare_value(const void *p1, const void *p2) { - const struct sym_entry *sym1 = p1; - const struct sym_entry *sym2 = p2; - - if (sym1->addr < sym2->addr) - return -1; - if (sym1->addr > sym2->addr) - return +1; - /* Prefer global symbols. */ - if (isupper(*sym1->sym)) - return -1; - if (isupper(*sym2->sym)) - return +1; - return 0; + const struct sym_entry *sym1 = p1; + const struct sym_entry *sym2 = p2; + + if ( sym1->addr < sym2->addr ) + return -1; + if ( sym1->addr > sym2->addr ) + return +1; + /* Prefer global symbols. */ + if ( isupper(*sym1->sym) ) + return -1; + if ( isupper(*sym2->sym) ) + return +1; + return 0; } static int compare_name(const void *p1, const void *p2) { - const struct sym_entry *sym1 = p1; - const struct sym_entry *sym2 = p2; + const struct sym_entry *sym1 = p1; + const struct sym_entry *sym2 = p2; - return strcmp(SYMBOL_NAME(sym1), SYMBOL_NAME(sym2)); + return strcmp(SYMBOL_NAME(sym1), SYMBOL_NAME(sym2)); } int main(int argc, char **argv) { - unsigned int i; - bool unsorted = false, warn_dup = false; - - if (argc >= 2) { - for (i = 1; i < argc; i++) { - if(strcmp(argv[i], "--all-symbols") == 0) - all_symbols = 1; - else if (strncmp(argv[i], "--symbol-prefix=", 16) == 0) { - char *p = &argv[i][16]; - /* skip quote */ - if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\'')) - p++; - symbol_prefix_char = *p; - } else if (strcmp(argv[i], "--sysv") == 0) - input_format = fmt_sysv; - else if (strcmp(argv[i], "--sort") == 0) - unsorted = true; - else if (strcmp(argv[i], "--sort-by-name") == 0) - sort_by_name = 1; - else if (strcmp(argv[i], "--warn-dup") == 0) - warn_dup = true; - else if (strcmp(argv[i], "--xensyms") == 0) - map_only = true; - else - usage(); - } - } else if (argc != 1) - usage(); - - read_map(stdin); - - if (warn_dup) { - qsort(table, table_cnt, sizeof(*table), compare_name); - for (i = 1; i < table_cnt; ++i) - if (strcmp(SYMBOL_NAME(table + i - 1), - SYMBOL_NAME(table + i)) == 0 && - table[i - 1].addr != table[i].addr) - fprintf(stderr, - "Duplicate symbol '%s' (%llx != %llx)\n", - SYMBOL_NAME(table + i), - table[i].addr, table[i - 1].addr); - unsorted = true; - } - - if (unsorted) - qsort(table, table_cnt, sizeof(*table), compare_value); - - optimize_token_table(); - write_src(); - - return 0; + unsigned int i; + bool unsorted = false, warn_dup = false; + + if ( argc >= 2 ) + { + for ( i = 1; i < argc; i++ ) + { + if ( strcmp(argv[i], "--all-symbols") == 0 ) + all_symbols = 1; + else if ( strncmp(argv[i], "--symbol-prefix=", 16) == 0 ) + { + char *p = &argv[i][16]; + /* skip quote */ + if ( (*p == '"' && *(p + 2) == '"') || + (*p == '\'' && *(p + 2) == '\'') ) + p++; + symbol_prefix_char = *p; + } + else if ( strcmp(argv[i], "--sysv") == 0 ) + input_format = fmt_sysv; + else if ( strcmp(argv[i], "--sort") == 0 ) + unsorted = true; + else if ( strcmp(argv[i], "--sort-by-name") == 0 ) + sort_by_name = 1; + else if ( strcmp(argv[i], "--warn-dup") == 0 ) + warn_dup = true; + else if ( strcmp(argv[i], "--xensyms") == 0 ) + map_only = true; + else + usage(); + } + } + else if ( argc != 1 ) + usage(); + + read_map(stdin); + + if ( warn_dup ) + { + qsort(table, table_cnt, sizeof(*table), compare_name); + for ( i = 1; i < table_cnt; ++i ) + if ( strcmp(SYMBOL_NAME(table + i - 1), SYMBOL_NAME(table + i)) == + 0 && + table[i - 1].addr != table[i].addr ) + fprintf(stderr, "Duplicate symbol '%s' (%llx != %llx)\n", + SYMBOL_NAME(table + i), table[i].addr, + table[i - 1].addr); + unsorted = true; + } + + if ( unsorted ) + qsort(table, table_cnt, sizeof(*table), compare_value); + + optimize_token_table(); + write_src(); + + return 0; } diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c index 1fe0e746fa..a6e7727862 100644 --- a/xen/xsm/dummy.c +++ b/xen/xsm/dummy.c @@ -15,13 +15,13 @@ struct xsm_operations dummy_xsm_ops; -#define set_to_dummy_if_null(ops, function) \ - do { \ - if ( !ops->function ) \ - ops->function = xsm_##function; \ - } while (0) +#define set_to_dummy_if_null(ops, function) \ + do { \ + if ( !ops->function ) \ + ops->function = xsm_##function; \ + } while ( 0 ) -void __init xsm_fixup_ops (struct xsm_operations *ops) +void __init xsm_fixup_ops(struct xsm_operations *ops) { set_to_dummy_if_null(ops, security_domaininfo); set_to_dummy_if_null(ops, domain_create); diff --git a/xen/xsm/flask/avc.c b/xen/xsm/flask/avc.c index 640c708659..b71177c86b 100644 --- a/xen/xsm/flask/avc.c +++ b/xen/xsm/flask/avc.c @@ -13,9 +13,9 @@ * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ - + /* Ported to Xen 3.0, George Coker, */ - + #include #include #include @@ -34,7 +34,7 @@ #include "avc_ss.h" static const struct av_perm_to_string av_perm_to_string[] = { -#define S_(c, v, s) { c, v, s }, +#define S_(c, v, s) {c, v, s}, #include "av_perm_to_string.h" #undef S_ }; @@ -52,38 +52,43 @@ const struct selinux_class_perm selinux_class_perm = { .cts_len = ARRAY_SIZE(class_to_string), }; -#define AVC_CACHE_SLOTS 512 -#define AVC_DEF_CACHE_THRESHOLD 512 -#define AVC_CACHE_RECLAIM 16 +#define AVC_CACHE_SLOTS 512 +#define AVC_DEF_CACHE_THRESHOLD 512 +#define AVC_CACHE_RECLAIM 16 #ifdef CONFIG_XSM_FLASK_AVC_STATS -#define avc_cache_stats_incr(field) \ -do { \ - __get_cpu_var(avc_cache_stats).field++; \ -} while (0) +#define avc_cache_stats_incr(field) \ + do { \ + __get_cpu_var(avc_cache_stats).field++; \ + } while ( 0 ) #else -#define avc_cache_stats_incr(field) do {} while (0) +#define avc_cache_stats_incr(field) \ + do { \ + } while ( 0 ) #endif -struct avc_entry { - u32 ssid; - u32 tsid; - u16 tclass; - struct av_decision avd; +struct avc_entry +{ + u32 ssid; + u32 tsid; + u16 tclass; + struct av_decision avd; }; -struct avc_node { - struct avc_entry ae; - struct hlist_node list; /* anchored in avc_cache->slots[i] */ - struct rcu_head rhead; +struct avc_node +{ + struct avc_entry ae; + struct hlist_node list; /* anchored in avc_cache->slots[i] */ + struct rcu_head rhead; }; -struct avc_cache { - struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */ - spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ - atomic_t lru_hint; /* LRU hint for reclaim scan */ - atomic_t active_nodes; - u32 latest_notif; /* latest revocation notification */ +struct avc_cache +{ + struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */ + spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ + atomic_t lru_hint; /* LRU hint for reclaim scan */ + atomic_t active_nodes; + u32 latest_notif; /* latest revocation notification */ }; /* Exported via Flask hypercall */ @@ -99,7 +104,7 @@ static DEFINE_RCU_READ_LOCK(avc_rcu_lock); static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) { - return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1); + return (ssid ^ (tsid << 2) ^ (tclass << 4)) & (AVC_CACHE_SLOTS - 1); } /* no use making this larger than the printk buffer */ @@ -107,7 +112,8 @@ static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) static DEFINE_SPINLOCK(avc_emerg_lock); static char avc_emerg_buf[AVC_BUF_SIZE]; -struct avc_dump_buf { +struct avc_dump_buf +{ char *start; char *pos; u32 free; @@ -118,7 +124,7 @@ static void avc_printk(struct avc_dump_buf *buf, const char *fmt, ...) int i; va_list args; - again: +again: va_start(args, fmt); i = vsnprintf(buf->pos, buf->free, fmt, args); va_end(args); @@ -195,7 +201,8 @@ static void avc_dump_av(struct avc_dump_buf *buf, u16 tclass, u32 av) * @tsid: target security identifier * @tclass: target security class */ -static void avc_dump_query(struct avc_dump_buf *buf, u32 ssid, u32 tsid, u16 tclass) +static void avc_dump_query(struct avc_dump_buf *buf, u32 ssid, u32 tsid, + u16 tclass) { int rc; char *scontext; @@ -259,7 +266,7 @@ int avc_get_hash_stats(struct xen_flask_hash_stats *arg) slots_used++; chain_len = 0; - hlist_for_each_entry_rcu(node, next, head, list) + hlist_for_each_entry_rcu (node, next, head, list) chain_len++; if ( chain_len > max_chain_len ) max_chain_len = chain_len; @@ -267,7 +274,7 @@ int avc_get_hash_stats(struct xen_flask_hash_stats *arg) } rcu_read_unlock(&avc_rcu_lock); - + arg->entries = atomic_read(&avc_cache.active_nodes); arg->buckets_used = slots_used; arg->buckets_total = AVC_CACHE_SLOTS; @@ -307,22 +314,24 @@ static void avc_node_replace(struct avc_node *new, struct avc_node *old) static inline int avc_reclaim_node(void) { struct avc_node *node; - int hvalue, try, ecx; + int hvalue, + try + , ecx; unsigned long flags; struct hlist_head *head; struct hlist_node *next; spinlock_t *lock; - for ( try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++ ) + for ( try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try ++) { atomic_inc(&avc_cache.lru_hint); - hvalue = atomic_read(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); + hvalue = atomic_read(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); head = &avc_cache.slots[hvalue]; lock = &avc_cache.slots_lock[hvalue]; spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flags); rcu_read_lock(&avc_rcu_lock); - hlist_for_each_entry(node, next, head, list) + hlist_for_each_entry (node, next, head, list) { avc_node_delete(node); avc_cache_stats_incr(reclaims); @@ -336,8 +345,8 @@ static inline int avc_reclaim_node(void) } rcu_read_unlock(&avc_rcu_lock); spin_unlock_irqrestore(lock, flags); - } - out: + } +out: return ecx; } @@ -346,7 +355,7 @@ static struct avc_node *avc_alloc_node(void) struct avc_node *node; node = xzalloc(struct avc_node); - if (!node) + if ( !node ) goto out; INIT_RCU_HEAD(&node->rhead); @@ -357,7 +366,7 @@ static struct avc_node *avc_alloc_node(void) if ( atomic_read(&avc_cache.active_nodes) > avc_cache_threshold ) avc_reclaim_node(); - out: +out: return node; } @@ -379,10 +388,9 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) hvalue = avc_hash(ssid, tsid, tclass); head = &avc_cache.slots[hvalue]; - hlist_for_each_entry_rcu(node, next, head, list) + hlist_for_each_entry_rcu (node, next, head, list) { - if ( ssid == node->ae.ssid && - tclass == node->ae.tclass && + if ( ssid == node->ae.ssid && tclass == node->ae.tclass && tsid == node->ae.tsid ) { ret = node; @@ -433,8 +441,8 @@ static int avc_latest_notif_update(int seqno, int is_insert) { if ( seqno < avc_cache.latest_notif ) { - printk(KERN_WARNING "avc: seqno %d < latest_notif %d\n", - seqno, avc_cache.latest_notif); + printk(KERN_WARNING "avc: seqno %d < latest_notif %d\n", seqno, + avc_cache.latest_notif); ret = -EAGAIN; } } @@ -489,10 +497,9 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, lock = &avc_cache.slots_lock[hvalue]; spin_lock_irqsave(lock, flag); - hlist_for_each_entry(pos, next, head, list) + hlist_for_each_entry (pos, next, head, list) { - if ( pos->ae.ssid == ssid && - pos->ae.tsid == tsid && + if ( pos->ae.ssid == ssid && pos->ae.tsid == tsid && pos->ae.tclass == tclass ) { avc_node_replace(node, pos); @@ -503,7 +510,7 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, found: spin_unlock_irqrestore(lock, flag); } - out: +out: return node; } @@ -566,7 +573,8 @@ void avc_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested, if ( a && (a->sdom || a->tdom) ) { if ( a->sdom && a->tdom && a->sdom != a->tdom ) - avc_printk(&buf, "domid=%d target=%d ", a->sdom->domain_id, a->tdom->domain_id); + avc_printk(&buf, "domid=%d target=%d ", a->sdom->domain_id, + a->tdom->domain_id); else if ( a->sdom ) avc_printk(&buf, "domid=%d ", a->sdom->domain_id); else @@ -574,7 +582,8 @@ void avc_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested, } else if ( cdom ) avc_printk(&buf, "domid=%d ", cdom->domain_id); - switch ( a ? a->type : 0 ) { + switch (a ? a->type : 0) + { case AVC_AUDIT_DATA_DEV: avc_printk(&buf, "device=%#lx ", a->device); break; @@ -613,8 +622,7 @@ void avc_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested, * otherwise, this function update the AVC entry. The original AVC-entry object * will release later by RCU. */ -static int avc_update_node(u32 perms, u32 ssid, u32 tsid, u16 tclass, - u32 seqno) +static int avc_update_node(u32 perms, u32 ssid, u32 tsid, u16 tclass, u32 seqno) { int hvalue, rc = 0; unsigned long flag; @@ -622,7 +630,7 @@ static int avc_update_node(u32 perms, u32 ssid, u32 tsid, u16 tclass, struct hlist_head *head; struct hlist_node *next; spinlock_t *lock; - + node = avc_alloc_node(); if ( !node ) { @@ -630,19 +638,17 @@ static int avc_update_node(u32 perms, u32 ssid, u32 tsid, u16 tclass, goto out; } - hvalue = avc_hash(ssid, tsid, tclass); + hvalue = avc_hash(ssid, tsid, tclass); head = &avc_cache.slots[hvalue]; lock = &avc_cache.slots_lock[hvalue]; spin_lock_irqsave(lock, flag); - hlist_for_each_entry(pos, next, head, list) + hlist_for_each_entry (pos, next, head, list) { - if ( ssid == pos->ae.ssid && - tsid == pos->ae.tsid && - tclass == pos->ae.tclass && - seqno == pos->ae.avd.seqno ) + if ( ssid == pos->ae.ssid && tsid == pos->ae.tsid && + tclass == pos->ae.tclass && seqno == pos->ae.avd.seqno ) { orig = pos; break; @@ -664,9 +670,9 @@ static int avc_update_node(u32 perms, u32 ssid, u32 tsid, u16 tclass, node->ae.avd.allowed |= perms; avc_node_replace(node, orig); - out_unlock: +out_unlock: spin_unlock_irqrestore(lock, flag); - out: +out: return rc; } @@ -690,12 +696,12 @@ int avc_ss_reset(u32 seqno) spin_lock_irqsave(lock, flag); rcu_read_lock(&avc_rcu_lock); - hlist_for_each_entry(node, next, head, list) + hlist_for_each_entry (node, next, head, list) avc_node_delete(node); rcu_read_unlock(&avc_rcu_lock); spin_unlock_irqrestore(lock, flag); } - + avc_latest_notif_update(seqno, 0); return rc; } @@ -741,12 +747,14 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested, else avd = &avd_entry; - rc = security_compute_av(ssid,tsid,tclass,requested,avd); + rc = security_compute_av(ssid, tsid, tclass, requested, avd); if ( rc ) goto out; rcu_read_lock(&avc_rcu_lock); - node = avc_insert(ssid,tsid,tclass,avd); - } else { + node = avc_insert(ssid, tsid, tclass, avd); + } + else + { if ( in_avd ) memcpy(in_avd, &node->ae.avd, sizeof(*in_avd)); avd = &node->ae.avd; @@ -757,13 +765,13 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested, if ( denied ) { if ( !flask_enforcing || (avd->flags & AVD_FLAGS_PERMISSIVE) ) - avc_update_node(requested, ssid,tsid,tclass,avd->seqno); + avc_update_node(requested, ssid, tsid, tclass, avd->seqno); else rc = -EACCES; } rcu_read_unlock(&avc_rcu_lock); - out: +out: return rc; } @@ -783,8 +791,8 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested, * permissions are granted, -%EACCES if any permissions are denied, or * another -errno upon other errors. */ -int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, - u32 requested, struct avc_audit_data *auditdata) +int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, u32 requested, + struct avc_audit_data *auditdata) { struct av_decision avd; int rc; diff --git a/xen/xsm/flask/flask_op.c b/xen/xsm/flask/flask_op.c index 1c4decc6cd..b1f4e563ea 100644 --- a/xen/xsm/flask/flask_op.c +++ b/xen/xsm/flask/flask_op.c @@ -25,7 +25,8 @@ #define _copy_to_guest copy_to_guest #define _copy_from_guest copy_from_guest -enum flask_bootparam_t __read_mostly flask_bootparam = FLASK_BOOTPARAM_ENFORCING; +enum flask_bootparam_t __read_mostly flask_bootparam = + FLASK_BOOTPARAM_ENFORCING; static int parse_flask_param(const char *s); custom_param("flask", parse_flask_param); @@ -33,21 +34,12 @@ bool __read_mostly flask_enforcing = true; #define MAX_POLICY_SIZE 0x4000000 -#define FLASK_COPY_OUT \ - ( \ - 1UL<ssid; if ( !dsec ) return -EACCES; - - return avc_has_perm(dsec->sid, SECINITSID_SECURITY, SECCLASS_SECURITY, + + return avc_has_perm(dsec->sid, SECINITSID_SECURITY, SECCLASS_SECURITY, perms, NULL); } @@ -107,7 +99,8 @@ static int flask_security_create(struct xen_flask_transition *arg) if ( rv ) return rv; - rv = security_transition_sid(arg->ssid, arg->tsid, arg->tclass, &arg->newsid); + rv = security_transition_sid(arg->ssid, arg->tsid, arg->tclass, + &arg->newsid); return rv; } @@ -129,7 +122,7 @@ static int flask_security_access(struct xen_flask_access *arg) arg->audit_allow = avd.auditallow; arg->audit_deny = avd.auditdeny; arg->seqno = avd.seqno; - + return rv; } @@ -185,7 +178,7 @@ static int flask_security_context(struct xen_flask_sid_context *arg) if ( rv < 0 ) goto out; - out: +out: xfree(buf); return rv; @@ -248,7 +241,8 @@ static int flask_disable(void) return 0; } -static int flask_security_setavc_threshold(struct xen_flask_setavc_threshold *arg) +static int +flask_security_setavc_threshold(struct xen_flask_setavc_threshold *arg) { int rv = 0; @@ -260,7 +254,7 @@ static int flask_security_setavc_threshold(struct xen_flask_setavc_threshold *ar avc_cache_threshold = arg->threshold; } - out: +out: return rv; } @@ -340,7 +334,7 @@ static int flask_security_set_bool(struct xen_flask_boolean *arg) rv = 0; } - out: +out: spin_unlock(&sel_sem); return rv; } @@ -375,13 +369,13 @@ static int flask_security_get_bool(struct xen_flask_boolean *arg) if ( nameout_len > arg->size ) rv = -ERANGE; arg->size = nameout_len; - + if ( !rv && _copy_to_guest(arg->name, nameout, nameout_len) ) rv = -EFAULT; xfree(nameout); } - out: +out: spin_unlock(&sel_sem); return rv; } @@ -401,7 +395,7 @@ static int flask_security_commit_bools(void) if ( bool_pending_values ) rv = security_set_bools(bool_num, bool_pending_values); - out: +out: spin_unlock(&sel_sem); return rv; } @@ -411,9 +405,9 @@ static int flask_security_make_bools(void) int ret = 0; int num; int *values = NULL; - + xfree(bool_pending_values); - + ret = security_get_bools(&num, NULL, &values, NULL); if ( ret != 0 ) goto out; @@ -421,7 +415,7 @@ static int flask_security_make_bools(void) bool_num = num; bool_pending_values = values; - out: +out: return ret; } @@ -482,15 +476,15 @@ static int flask_security_load(struct xen_flask_load *load) if ( !is_reload ) printk(XENLOG_INFO "Flask: Policy loaded, continuing in %s mode.\n", - flask_enforcing ? "enforcing" : "permissive"); + flask_enforcing ? "enforcing" : "permissive"); xfree(bool_pending_values); bool_pending_values = NULL; ret = 0; - out: +out: spin_unlock(&sel_sem); - out_free: +out_free: xfree(buf); return ret; } @@ -571,7 +565,7 @@ static int flask_get_peer_sid(struct xen_flask_peersid *arg) arg->sid = dsec->sid; rv = 0; - out: +out: spin_unlock(&d->event_lock); return rv; } @@ -595,22 +589,26 @@ static int flask_relabel_domain(struct xen_flask_relabel *arg) if ( arg->domid == DOMID_SELF ) { - rc = avc_has_perm(dsec->sid, arg->sid, SECCLASS_DOMAIN2, DOMAIN2__RELABELSELF, &ad); + rc = avc_has_perm(dsec->sid, arg->sid, SECCLASS_DOMAIN2, + DOMAIN2__RELABELSELF, &ad); if ( rc ) goto out; } else { - rc = avc_has_perm(csec->sid, dsec->sid, SECCLASS_DOMAIN2, DOMAIN2__RELABELFROM, &ad); + rc = avc_has_perm(csec->sid, dsec->sid, SECCLASS_DOMAIN2, + DOMAIN2__RELABELFROM, &ad); if ( rc ) goto out; - rc = avc_has_perm(csec->sid, arg->sid, SECCLASS_DOMAIN2, DOMAIN2__RELABELTO, &ad); + rc = avc_has_perm(csec->sid, arg->sid, SECCLASS_DOMAIN2, + DOMAIN2__RELABELTO, &ad); if ( rc ) goto out; } - rc = avc_has_perm(dsec->sid, arg->sid, SECCLASS_DOMAIN, DOMAIN__TRANSITION, &ad); + rc = avc_has_perm(dsec->sid, arg->sid, SECCLASS_DOMAIN, DOMAIN__TRANSITION, + &ad); if ( rc ) goto out; @@ -625,7 +623,7 @@ static int flask_relabel_domain(struct xen_flask_relabel *arg) &dsec->target_sid); } - out: +out: rcu_unlock_domain(d); return rc; } @@ -643,7 +641,7 @@ ret_t do_flask_op(XEN_GUEST_HANDLE_PARAM(xsm_op_t) u_flask_op) if ( op.interface_version != XEN_FLASK_INTERFACE_VERSION ) return -ENOSYS; - switch ( op.cmd ) + switch (op.cmd) { case FLASK_LOAD: rv = flask_security_load(&op.u.load); @@ -695,7 +693,7 @@ ret_t do_flask_op(XEN_GUEST_HANDLE_PARAM(xsm_op_t) u_flask_op) case FLASK_MLS: rv = flask_mls_enabled; - break; + break; case FLASK_DISABLE: rv = flask_disable(); @@ -750,13 +748,13 @@ ret_t do_flask_op(XEN_GUEST_HANDLE_PARAM(xsm_op_t) u_flask_op) if ( rv < 0 ) goto out; - if ( (FLASK_COPY_OUT&(1UL<sid; } -static u32 domain_target_sid(const struct domain *src, - const struct domain *dst) +static u32 domain_target_sid(const struct domain *src, const struct domain *dst) { struct domain_security_struct *ssec = src->ssid; struct domain_security_struct *dsec = dst->ssid; - if (src == dst) + if ( src == dst ) return ssec->self_sid; - if (src->target == dst) + if ( src->target == dst ) return ssec->target_sid; return dsec->sid; } @@ -59,8 +58,7 @@ static u32 evtchn_sid(const struct evtchn *chn) return chn->ssid.flask_sid; } -static int domain_has_perm(const struct domain *dom1, - const struct domain *dom2, +static int domain_has_perm(const struct domain *dom1, const struct domain *dom2, u16 class, u32 perms) { u32 ssid, tsid; @@ -106,8 +104,10 @@ static int get_irq_sid(int irq, u32 *sid, struct avc_audit_data *ad) { if ( irq >= nr_irqs || irq < 0 ) return -EINVAL; - if ( irq < nr_static_irqs ) { - if (ad) { + if ( irq < nr_static_irqs ) + { + if ( ad ) + { AVC_AUDIT_DATA_INIT(ad, IRQ); ad->irq = irq; } @@ -116,10 +116,12 @@ static int get_irq_sid(int irq, u32 *sid, struct avc_audit_data *ad) #ifdef CONFIG_HAS_PCI { struct irq_desc *desc = irq_to_desc(irq); - if ( desc->msi_desc && desc->msi_desc->dev ) { + if ( desc->msi_desc && desc->msi_desc->dev ) + { struct pci_dev *dev = desc->msi_desc->dev; u32 sbdf = (dev->seg << 16) | (dev->bus << 8) | dev->devfn; - if (ad) { + if ( ad ) + { AVC_AUDIT_DATA_INIT(ad, DEV); ad->device = sbdf; } @@ -128,7 +130,8 @@ static int get_irq_sid(int irq, u32 *sid, struct avc_audit_data *ad) } #endif - if (ad) { + if ( ad ) + { AVC_AUDIT_DATA_INIT(ad, IRQ); ad->irq = irq; } @@ -163,7 +166,7 @@ static int flask_domain_alloc_security(struct domain *d) if ( !dsec ) return -ENOMEM; - switch ( d->domain_id ) + switch (d->domain_id) { case DOMID_IDLE: dsec->sid = SECINITSID_XEN; @@ -195,7 +198,7 @@ static void flask_domain_free_security(struct domain *d) xfree(dsec); } -static int flask_evtchn_unbound(struct domain *d1, struct evtchn *chn, +static int flask_evtchn_unbound(struct domain *d1, struct evtchn *chn, domid_t id2) { u32 sid1, sid2, newsid; @@ -223,12 +226,12 @@ static int flask_evtchn_unbound(struct domain *d1, struct evtchn *chn, chn->ssid.flask_sid = newsid; - out: +out: rcu_unlock_domain(d2); return rc; } -static int flask_evtchn_interdomain(struct domain *d1, struct evtchn *chn1, +static int flask_evtchn_interdomain(struct domain *d1, struct evtchn *chn1, struct domain *d2, struct evtchn *chn2) { u32 sid1, sid2, newsid, reverse_sid; @@ -244,8 +247,8 @@ static int flask_evtchn_interdomain(struct domain *d1, struct evtchn *chn1, rc = security_transition_sid(sid1, sid2, SECCLASS_EVENT, &newsid); if ( rc ) { - printk("security_transition_sid failed, rc=%d, Dom%d\n", - -rc, d2->domain_id); + printk("security_transition_sid failed, rc=%d, Dom%d\n", -rc, + d2->domain_id); return rc; } @@ -280,7 +283,7 @@ static int flask_evtchn_send(struct domain *d, struct evtchn *chn) { int rc; - switch ( chn->state ) + switch (chn->state) { case ECS_INTERDOMAIN: rc = domain_has_evtchn(d, chn, EVENT__SEND); @@ -310,7 +313,7 @@ static int flask_alloc_security_evtchn(struct evtchn *chn) { chn->ssid.flask_sid = SECINITSID_UNLABELED; - return 0; + return 0; } static void flask_free_security_evtchn(struct evtchn *chn) @@ -321,14 +324,15 @@ static void flask_free_security_evtchn(struct evtchn *chn) chn->ssid.flask_sid = SECINITSID_UNLABELED; } -static char *flask_show_security_evtchn(struct domain *d, const struct evtchn *chn) +static char *flask_show_security_evtchn(struct domain *d, + const struct evtchn *chn) { int irq; u32 sid = 0; char *ctx; u32 ctx_len; - switch ( chn->state ) + switch (chn->state) { case ECS_UNBOUND: case ECS_INTERDOMAIN: @@ -336,23 +340,24 @@ static char *flask_show_security_evtchn(struct domain *d, const struct evtchn *c break; case ECS_PIRQ: irq = domain_pirq_to_irq(d, chn->u.pirq.irq); - if (irq && get_irq_sid(irq, &sid, NULL)) + if ( irq && get_irq_sid(irq, &sid, NULL) ) return NULL; break; } if ( !sid ) return NULL; - if (security_sid_to_context(sid, &ctx, &ctx_len)) + if ( security_sid_to_context(sid, &ctx, &ctx_len) ) return NULL; return ctx; } static int flask_init_hardware_domain(struct domain *d) { - return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__CREATE_HARDWARE_DOMAIN); + return current_has_perm(d, SECCLASS_DOMAIN2, + DOMAIN2__CREATE_HARDWARE_DOMAIN); } -static int flask_grant_mapref(struct domain *d1, struct domain *d2, +static int flask_grant_mapref(struct domain *d1, struct domain *d2, uint32_t flags) { u32 perms = GRANT__MAP_READ; @@ -433,7 +438,7 @@ static int flask_console_io(struct domain *d, int cmd) { u32 perm; - switch ( cmd ) + switch (cmd) { case CONSOLEIO_read: perm = XEN__READCONSOLE; @@ -452,7 +457,7 @@ static int flask_profile(struct domain *d, int op) { u32 perm; - switch ( op ) + switch (op) { case XENOPROF_init: case XENOPROF_enable_virq: @@ -490,7 +495,7 @@ static int flask_schedop_shutdown(struct domain *d1, struct domain *d2) return domain_has_perm(d1, d2, SECCLASS_DOMAIN, DOMAIN__SHUTDOWN); } -static void flask_security_domaininfo(struct domain *d, +static void flask_security_domaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info) { info->ssidref = domain_sid(d); @@ -509,8 +514,8 @@ static int flask_domain_create(struct domain *d, u32 ssidref) } else { - rc = avc_current_has_perm(ssidref, SECCLASS_DOMAIN, - DOMAIN__CREATE, NULL); + rc = avc_current_has_perm(ssidref, SECCLASS_DOMAIN, DOMAIN__CREATE, + NULL); if ( rc ) return rc; @@ -531,7 +536,7 @@ static int flask_getdomaininfo(struct domain *d) static int flask_domctl_scheduler_op(struct domain *d, int op) { - switch ( op ) + switch (op) { case XEN_DOMCTL_SCHEDOP_putinfo: case XEN_DOMCTL_SCHEDOP_putvcpuinfo: @@ -548,7 +553,7 @@ static int flask_domctl_scheduler_op(struct domain *d, int op) static int flask_sysctl_scheduler_op(int op) { - switch ( op ) + switch (op) { case XEN_SYSCTL_SCHEDOP_putinfo: return domain_has_xen(current->domain, XEN__SETSCHEDULER); @@ -575,7 +580,8 @@ static int flask_set_target(struct domain *d, struct domain *t) if ( rc ) return rc; /* Use avc_has_perm to avoid resolving target/current SID */ - rc = avc_has_perm(dsec->sid, tsec->sid, SECCLASS_DOMAIN, DOMAIN__SET_TARGET, NULL); + rc = avc_has_perm(dsec->sid, tsec->sid, SECCLASS_DOMAIN, DOMAIN__SET_TARGET, + NULL); if ( rc ) return rc; @@ -587,7 +593,7 @@ static int flask_set_target(struct domain *d, struct domain *t) static int flask_domctl(struct domain *d, int cmd) { - switch ( cmd ) + switch (cmd) { /* These have individual XSM hooks (common/domctl.c) */ case XEN_DOMCTL_createdomain: @@ -754,7 +760,7 @@ static int flask_domctl(struct domain *d, int cmd) static int flask_sysctl(int cmd) { - switch ( cmd ) + switch (cmd) { /* These have individual XSM hooks */ case XEN_SYSCTL_readconsole: @@ -854,7 +860,7 @@ static inline u32 resource_to_perm(uint8_t access) return RESOURCE__REMOVE; } -static char *flask_show_irq_sid (int irq) +static char *flask_show_irq_sid(int irq) { u32 sid, ctx_len; char *ctx; @@ -862,19 +868,19 @@ static char *flask_show_irq_sid (int irq) if ( rc ) return NULL; - if (security_sid_to_context(sid, &ctx, &ctx_len)) + if ( security_sid_to_context(sid, &ctx, &ctx_len) ) return NULL; return ctx; } -static int flask_map_domain_pirq (struct domain *d) +static int flask_map_domain_pirq(struct domain *d) { return current_has_perm(d, SECCLASS_RESOURCE, RESOURCE__ADD); } -static int flask_map_domain_msi (struct domain *d, int irq, const void *data, - u32 *sid, struct avc_audit_data *ad) +static int flask_map_domain_msi(struct domain *d, int irq, const void *data, + u32 *sid, struct avc_audit_data *ad) { #ifdef CONFIG_HAS_PCI const struct msi_info *msi = data; @@ -902,22 +908,25 @@ static u32 flask_iommu_resource_use_perm(void) */ u32 perm = RESOURCE__USE_NOIOMMU; - if (iommu_enabled) - perm = ( iommu_intremap ? RESOURCE__USE_IOMMU : - RESOURCE__USE_IOMMU_NOINTREMAP ); + if ( iommu_enabled ) + perm = (iommu_intremap ? RESOURCE__USE_IOMMU + : RESOURCE__USE_IOMMU_NOINTREMAP); return perm; } -static int flask_map_domain_irq (struct domain *d, int irq, const void *data) +static int flask_map_domain_irq(struct domain *d, int irq, const void *data) { u32 sid, dsid; int rc = -EPERM; struct avc_audit_data ad; u32 dperm = flask_iommu_resource_use_perm(); - if ( irq >= nr_static_irqs && data ) { + if ( irq >= nr_static_irqs && data ) + { rc = flask_map_domain_msi(d, irq, data, &sid, &ad); - } else { + } + else + { rc = get_irq_sid(irq, &sid, &ad); } @@ -934,13 +943,13 @@ static int flask_map_domain_irq (struct domain *d, int irq, const void *data) return rc; } -static int flask_unmap_domain_pirq (struct domain *d) +static int flask_unmap_domain_pirq(struct domain *d) { return current_has_perm(d, SECCLASS_RESOURCE, RESOURCE__REMOVE); } -static int flask_unmap_domain_msi (struct domain *d, int irq, const void *data, - u32 *sid, struct avc_audit_data *ad) +static int flask_unmap_domain_msi(struct domain *d, int irq, const void *data, + u32 *sid, struct avc_audit_data *ad) { #ifdef CONFIG_HAS_PCI const struct pci_dev *pdev = data; @@ -955,25 +964,30 @@ static int flask_unmap_domain_msi (struct domain *d, int irq, const void *data, #endif } -static int flask_unmap_domain_irq (struct domain *d, int irq, const void *data) +static int flask_unmap_domain_irq(struct domain *d, int irq, const void *data) { u32 sid; int rc = -EPERM; struct avc_audit_data ad; - if ( irq >= nr_static_irqs && data ) { + if ( irq >= nr_static_irqs && data ) + { rc = flask_unmap_domain_msi(d, irq, data, &sid, &ad); - } else { + } + else + { rc = get_irq_sid(irq, &sid, &ad); } if ( rc ) return rc; - rc = avc_current_has_perm(sid, SECCLASS_RESOURCE, RESOURCE__REMOVE_IRQ, &ad); + rc = + avc_current_has_perm(sid, SECCLASS_RESOURCE, RESOURCE__REMOVE_IRQ, &ad); return rc; } -static int flask_bind_pt_irq (struct domain *d, struct xen_domctl_bind_pt_irq *bind) +static int flask_bind_pt_irq(struct domain *d, + struct xen_domctl_bind_pt_irq *bind) { u32 dsid, rsid; int rc = -EPERM; @@ -999,25 +1013,28 @@ static int flask_bind_pt_irq (struct domain *d, struct xen_domctl_bind_pt_irq *b return avc_has_perm(dsid, rsid, SECCLASS_RESOURCE, dperm, &ad); } -static int flask_unbind_pt_irq (struct domain *d, struct xen_domctl_bind_pt_irq *bind) +static int flask_unbind_pt_irq(struct domain *d, + struct xen_domctl_bind_pt_irq *bind) { return current_has_perm(d, SECCLASS_RESOURCE, RESOURCE__REMOVE); } -static int flask_irq_permission (struct domain *d, int pirq, uint8_t access) +static int flask_irq_permission(struct domain *d, int pirq, uint8_t access) { /* the PIRQ number is not useful; real IRQ is checked during mapping */ return current_has_perm(d, SECCLASS_RESOURCE, resource_to_perm(access)); } -struct iomem_has_perm_data { +struct iomem_has_perm_data +{ u32 ssid; u32 dsid; u32 perm; u32 use_perm; }; -static int _iomem_has_perm(void *v, u32 sid, unsigned long start, unsigned long end) +static int _iomem_has_perm(void *v, u32 sid, unsigned long start, + unsigned long end) { struct iomem_has_perm_data *data = v; struct avc_audit_data ad; @@ -1032,16 +1049,17 @@ static int _iomem_has_perm(void *v, u32 sid, unsigned long start, unsigned long if ( rc ) return rc; - return avc_has_perm(data->dsid, sid, SECCLASS_RESOURCE, data->use_perm, &ad); + return avc_has_perm(data->dsid, sid, SECCLASS_RESOURCE, data->use_perm, + &ad); } -static int flask_iomem_permission(struct domain *d, uint64_t start, uint64_t end, uint8_t access) +static int flask_iomem_permission(struct domain *d, uint64_t start, + uint64_t end, uint8_t access) { struct iomem_has_perm_data data; int rc; - rc = current_has_perm(d, SECCLASS_RESOURCE, - resource_to_perm(access)); + rc = current_has_perm(d, SECCLASS_RESOURCE, resource_to_perm(access)); if ( rc ) return rc; @@ -1057,12 +1075,15 @@ static int flask_iomem_permission(struct domain *d, uint64_t start, uint64_t end return security_iterate_iomem_sids(start, end, _iomem_has_perm, &data); } -static int flask_iomem_mapping(struct domain *d, uint64_t start, uint64_t end, uint8_t access) +static int flask_iomem_mapping(struct domain *d, uint64_t start, uint64_t end, + uint8_t access) { return flask_iomem_permission(d, start, end, access); } -static int flask_pci_config_permission(struct domain *d, uint32_t machine_bdf, uint16_t start, uint16_t end, uint8_t access) +static int flask_pci_config_permission(struct domain *d, uint32_t machine_bdf, + uint16_t start, uint16_t end, + uint8_t access) { u32 dsid, rsid; int rc = -EPERM; @@ -1080,25 +1101,27 @@ static int flask_pci_config_permission(struct domain *d, uint32_t machine_bdf, u perm = flask_iommu_resource_use_perm(); AVC_AUDIT_DATA_INIT(&ad, DEV); - ad.device = (unsigned long) machine_bdf; + ad.device = (unsigned long)machine_bdf; dsid = domain_sid(d); return avc_has_perm(dsid, rsid, SECCLASS_RESOURCE, perm, &ad); - } static int flask_resource_plug_core(void) { - return avc_current_has_perm(SECINITSID_DOMXEN, SECCLASS_RESOURCE, RESOURCE__PLUG, NULL); + return avc_current_has_perm(SECINITSID_DOMXEN, SECCLASS_RESOURCE, + RESOURCE__PLUG, NULL); } static int flask_resource_unplug_core(void) { - return avc_current_has_perm(SECINITSID_DOMXEN, SECCLASS_RESOURCE, RESOURCE__UNPLUG, NULL); + return avc_current_has_perm(SECINITSID_DOMXEN, SECCLASS_RESOURCE, + RESOURCE__UNPLUG, NULL); } static int flask_resource_use_core(void) { - return avc_current_has_perm(SECINITSID_DOMXEN, SECCLASS_RESOURCE, RESOURCE__USE, NULL); + return avc_current_has_perm(SECINITSID_DOMXEN, SECCLASS_RESOURCE, + RESOURCE__USE, NULL); } static int flask_resource_plug_pci(uint32_t machine_bdf) @@ -1112,7 +1135,7 @@ static int flask_resource_plug_pci(uint32_t machine_bdf) return rc; AVC_AUDIT_DATA_INIT(&ad, DEV); - ad.device = (unsigned long) machine_bdf; + ad.device = (unsigned long)machine_bdf; return avc_current_has_perm(rsid, SECCLASS_RESOURCE, RESOURCE__PLUG, &ad); } @@ -1127,7 +1150,7 @@ static int flask_resource_unplug_pci(uint32_t machine_bdf) return rc; AVC_AUDIT_DATA_INIT(&ad, DEV); - ad.device = (unsigned long) machine_bdf; + ad.device = (unsigned long)machine_bdf; return avc_current_has_perm(rsid, SECCLASS_RESOURCE, RESOURCE__UNPLUG, &ad); } @@ -1142,7 +1165,7 @@ static int flask_resource_setup_pci(uint32_t machine_bdf) return rc; AVC_AUDIT_DATA_INIT(&ad, DEV); - ad.device = (unsigned long) machine_bdf; + ad.device = (unsigned long)machine_bdf; return avc_current_has_perm(rsid, SECCLASS_RESOURCE, RESOURCE__SETUP, &ad); } @@ -1161,12 +1184,14 @@ static int flask_resource_setup_gsi(int gsi) static int flask_resource_setup_misc(void) { - return avc_current_has_perm(SECINITSID_XEN, SECCLASS_RESOURCE, RESOURCE__SETUP, NULL); + return avc_current_has_perm(SECINITSID_XEN, SECCLASS_RESOURCE, + RESOURCE__SETUP, NULL); } static inline int flask_page_offline(uint32_t cmd) { - switch (cmd) { + switch (cmd) + { case sysctl_page_offline: return flask_resource_unplug_core(); case sysctl_page_online: @@ -1202,7 +1227,7 @@ static int flask_hvm_param(struct domain *d, unsigned long op) { u32 perm; - switch ( op ) + switch (op) { case HVMOP_set_param: perm = HVM__SETPARAM; @@ -1234,7 +1259,7 @@ static int flask_hvm_altp2mhvm_op(struct domain *d, uint64_t mode, uint32_t op) * are written with the XSM_TARGET policy in mind, so add restrictions * on the domain acting on itself when forbidden by the mode. */ - switch ( mode ) + switch (mode) { case XEN_ALTP2M_mixed: break; @@ -1287,7 +1312,8 @@ static int flask_get_device_group(uint32_t machine_bdf) if ( rc ) return rc; - return avc_current_has_perm(rsid, SECCLASS_RESOURCE, RESOURCE__STAT_DEVICE, NULL); + return avc_current_has_perm(rsid, SECCLASS_RESOURCE, RESOURCE__STAT_DEVICE, + NULL); } static int flask_test_assign_device(uint32_t machine_bdf) @@ -1299,7 +1325,8 @@ static int flask_test_assign_device(uint32_t machine_bdf) if ( rc ) return rc; - return avc_current_has_perm(rsid, SECCLASS_RESOURCE, RESOURCE__STAT_DEVICE, NULL); + return avc_current_has_perm(rsid, SECCLASS_RESOURCE, RESOURCE__STAT_DEVICE, + NULL); } static int flask_assign_device(struct domain *d, uint32_t machine_bdf) @@ -1321,8 +1348,9 @@ static int flask_assign_device(struct domain *d, uint32_t machine_bdf) return rc; AVC_AUDIT_DATA_INIT(&ad, DEV); - ad.device = (unsigned long) machine_bdf; - rc = avc_current_has_perm(rsid, SECCLASS_RESOURCE, RESOURCE__ADD_DEVICE, &ad); + ad.device = (unsigned long)machine_bdf; + rc = avc_current_has_perm(rsid, SECCLASS_RESOURCE, RESOURCE__ADD_DEVICE, + &ad); if ( rc ) return rc; @@ -1343,7 +1371,8 @@ static int flask_deassign_device(struct domain *d, uint32_t machine_bdf) if ( rc ) return rc; - return avc_current_has_perm(rsid, SECCLASS_RESOURCE, RESOURCE__REMOVE_DEVICE, NULL); + return avc_current_has_perm(rsid, SECCLASS_RESOURCE, + RESOURCE__REMOVE_DEVICE, NULL); } #endif /* HAS_PASSTHROUGH && HAS_PCI */ @@ -1381,7 +1410,8 @@ static int flask_assign_dtdevice(struct domain *d, const char *dtpath) AVC_AUDIT_DATA_INIT(&ad, DTDEV); ad.dtdev = dtpath; - rc = avc_current_has_perm(rsid, SECCLASS_RESOURCE, RESOURCE__ADD_DEVICE, &ad); + rc = avc_current_has_perm(rsid, SECCLASS_RESOURCE, RESOURCE__ADD_DEVICE, + &ad); if ( rc ) return rc; @@ -1402,14 +1432,14 @@ static int flask_deassign_dtdevice(struct domain *d, const char *dtpath) if ( rc ) return rc; - return avc_current_has_perm(rsid, SECCLASS_RESOURCE, RESOURCE__REMOVE_DEVICE, - NULL); + return avc_current_has_perm(rsid, SECCLASS_RESOURCE, + RESOURCE__REMOVE_DEVICE, NULL); } #endif /* HAS_PASSTHROUGH && HAS_DEVICE_TREE */ static int flask_platform_op(uint32_t op) { - switch ( op ) + switch (op) { #ifdef CONFIG_X86 /* These operations have their own XSM hooks */ @@ -1485,7 +1515,7 @@ static int flask_shadow_control(struct domain *d, uint32_t op) { u32 perm; - switch ( op ) + switch (op) { case XEN_DOMCTL_SHADOW_OP_OFF: perm = SHADOW__DISABLE; @@ -1508,14 +1538,16 @@ static int flask_shadow_control(struct domain *d, uint32_t op) return current_has_perm(d, SECCLASS_SHADOW, perm); } -struct ioport_has_perm_data { +struct ioport_has_perm_data +{ u32 ssid; u32 dsid; u32 perm; u32 use_perm; }; -static int _ioport_has_perm(void *v, u32 sid, unsigned long start, unsigned long end) +static int _ioport_has_perm(void *v, u32 sid, unsigned long start, + unsigned long end) { struct ioport_has_perm_data *data = v; struct avc_audit_data ad; @@ -1530,16 +1562,17 @@ static int _ioport_has_perm(void *v, u32 sid, unsigned long start, unsigned long if ( rc ) return rc; - return avc_has_perm(data->dsid, sid, SECCLASS_RESOURCE, data->use_perm, &ad); + return avc_has_perm(data->dsid, sid, SECCLASS_RESOURCE, data->use_perm, + &ad); } -static int flask_ioport_permission(struct domain *d, uint32_t start, uint32_t end, uint8_t access) +static int flask_ioport_permission(struct domain *d, uint32_t start, + uint32_t end, uint8_t access) { int rc; struct ioport_has_perm_data data; - rc = current_has_perm(d, SECCLASS_RESOURCE, - resource_to_perm(access)); + rc = current_has_perm(d, SECCLASS_RESOURCE, resource_to_perm(access)); if ( rc ) return rc; @@ -1556,7 +1589,8 @@ static int flask_ioport_permission(struct domain *d, uint32_t start, uint32_t en return security_iterate_ioport_sids(start, end, _ioport_has_perm, &data); } -static int flask_ioport_mapping(struct domain *d, uint32_t start, uint32_t end, uint8_t access) +static int flask_ioport_mapping(struct domain *d, uint32_t start, uint32_t end, + uint8_t access) { return flask_ioport_permission(d, start, end, access); } @@ -1573,7 +1607,7 @@ static int flask_apic(struct domain *d, int cmd) { u32 perm; - switch ( cmd ) + switch (cmd) { case PHYSDEVOP_apic_read: case PHYSDEVOP_alloc_irq_vector: @@ -1591,7 +1625,8 @@ static int flask_apic(struct domain *d, int cmd) static int flask_machine_memory_map(void) { - return avc_current_has_perm(SECINITSID_XEN, SECCLASS_MMU, MMU__MEMORYMAP, NULL); + return avc_current_has_perm(SECINITSID_XEN, SECCLASS_MMU, MMU__MEMORYMAP, + NULL); } static int flask_domain_memory_map(struct domain *d) @@ -1644,24 +1679,24 @@ static int flask_priv_mapping(struct domain *d, struct domain *t) return domain_has_perm(d, t, SECCLASS_MMU, MMU__TARGET_HACK); } -static int flask_pmu_op (struct domain *d, unsigned int op) +static int flask_pmu_op(struct domain *d, unsigned int op) { u32 dsid = domain_sid(d); - switch ( op ) + switch (op) { case XENPMU_mode_set: case XENPMU_mode_get: case XENPMU_feature_set: case XENPMU_feature_get: - return avc_has_perm(dsid, SECINITSID_XEN, SECCLASS_XEN2, - XEN2__PMU_CTRL, NULL); + return avc_has_perm(dsid, SECINITSID_XEN, SECCLASS_XEN2, XEN2__PMU_CTRL, + NULL); case XENPMU_init: case XENPMU_finish: case XENPMU_lvtpc_set: case XENPMU_flush: - return avc_has_perm(dsid, SECINITSID_XEN, SECCLASS_XEN2, - XEN2__PMU_USE, NULL); + return avc_has_perm(dsid, SECINITSID_XEN, SECCLASS_XEN2, XEN2__PMU_USE, + NULL); default: return -EPERM; } @@ -1674,11 +1709,11 @@ static int flask_dm_op(struct domain *d) #endif /* CONFIG_X86 */ -static int flask_xen_version (uint32_t op) +static int flask_xen_version(uint32_t op) { u32 dsid = domain_sid(current->domain); - switch ( op ) + switch (op) { case XENVER_version: case XENVER_platform_parameters: @@ -1729,8 +1764,7 @@ static int flask_argo_enable(const struct domain *d) static int flask_argo_register_single_source(const struct domain *d, const struct domain *t) { - return domain_has_perm(d, t, SECCLASS_ARGO, - ARGO__REGISTER_SINGLE_SOURCE); + return domain_has_perm(d, t, SECCLASS_ARGO, ARGO__REGISTER_SINGLE_SOURCE); } static int flask_argo_register_any_source(const struct domain *d) @@ -1892,7 +1926,7 @@ void __init flask_init(const void *policy_buffer, size_t policy_size) { int ret = -ENOENT; - switch ( flask_bootparam ) + switch (flask_bootparam) { case FLASK_BOOTPARAM_DISABLED: printk(XENLOG_INFO "Flask: Disabled at boot.\n"); @@ -1923,7 +1957,8 @@ void __init flask_init(const void *policy_buffer, size_t policy_size) panic("Unable to load FLASK policy\n"); if ( ret ) - printk(XENLOG_INFO "Flask: Access controls disabled until policy is loaded.\n"); + printk(XENLOG_INFO + "Flask: Access controls disabled until policy is loaded.\n"); else if ( flask_enforcing ) printk(XENLOG_INFO "Flask: Starting in enforcing mode.\n"); else diff --git a/xen/xsm/flask/ss/avtab.c b/xen/xsm/flask/ss/avtab.c index bfc91c8b0c..bc5ef9ecf2 100644 --- a/xen/xsm/flask/ss/avtab.c +++ b/xen/xsm/flask/ss/avtab.c @@ -4,7 +4,8 @@ * Author : Stephen Smalley, */ -/* Updated: Frank Mayer and Karl MacMillan +/* Updated: Frank Mayer and Karl MacMillan + * * * Added conditional policy language extensions * @@ -31,12 +32,15 @@ static inline int avtab_hash(struct avtab_key *keyp, u16 mask) { return ((keyp->target_class + (keyp->target_type << 2) + - (keyp->source_type << 9)) & mask); + (keyp->source_type << 9)) & + mask); } -static struct avtab_node* avtab_insert_node(struct avtab *h, int hvalue, - struct avtab_node * prev, struct avtab_node * cur, struct avtab_key *key, - struct avtab_datum *datum) +static struct avtab_node *avtab_insert_node(struct avtab *h, int hvalue, + struct avtab_node *prev, + struct avtab_node *cur, + struct avtab_key *key, + struct avtab_datum *datum) { struct avtab_node *newnode = xzalloc(struct avtab_node); @@ -59,38 +63,38 @@ static struct avtab_node* avtab_insert_node(struct avtab *h, int hvalue, return newnode; } -static int avtab_insert(struct avtab *h, struct avtab_key *key, - struct avtab_datum *datum) +static int avtab_insert(struct avtab *h, struct avtab_key *key, + struct avtab_datum *datum) { int hvalue; struct avtab_node *prev, *cur, *newnode; - u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); + u16 specified = key->specified & ~(AVTAB_ENABLED | AVTAB_ENABLED_OLD); if ( !h || !h->htable ) return -EINVAL; hvalue = avtab_hash(key, h->mask); for ( prev = NULL, cur = h->htable[hvalue]; cur; - prev = cur, cur = cur->next) + prev = cur, cur = cur->next ) { if ( key->source_type == cur->key.source_type && - key->target_type == cur->key.target_type && - key->target_class == cur->key.target_class && - (specified & cur->key.specified) ) + key->target_type == cur->key.target_type && + key->target_class == cur->key.target_class && + (specified & cur->key.specified) ) return -EEXIST; if ( key->source_type < cur->key.source_type ) break; if ( key->source_type == cur->key.source_type && - key->target_type < cur->key.target_type ) + key->target_type < cur->key.target_type ) break; if ( key->source_type == cur->key.source_type && - key->target_type == cur->key.target_type && - key->target_class < cur->key.target_class ) + key->target_type == cur->key.target_type && + key->target_class < cur->key.target_class ) break; } newnode = avtab_insert_node(h, hvalue, prev, cur, key, datum); - if( !newnode ) + if ( !newnode ) return -ENOMEM; return 0; @@ -100,32 +104,33 @@ static int avtab_insert(struct avtab *h, struct avtab_key *key, * key/specified mask into the table, as needed by the conditional avtab. * It also returns a pointer to the node inserted. */ -struct avtab_node * avtab_insert_nonunique(struct avtab * h, - struct avtab_key * key, struct avtab_datum * datum) +struct avtab_node *avtab_insert_nonunique(struct avtab *h, + struct avtab_key *key, + struct avtab_datum *datum) { int hvalue; struct avtab_node *prev, *cur, *newnode; - u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); + u16 specified = key->specified & ~(AVTAB_ENABLED | AVTAB_ENABLED_OLD); if ( !h || !h->htable ) return NULL; hvalue = avtab_hash(key, h->mask); - for ( prev = NULL, cur = h->htable[hvalue]; cur; - prev = cur, cur = cur->next ) + for ( prev = NULL, cur = h->htable[hvalue]; cur; + prev = cur, cur = cur->next ) { if ( key->source_type == cur->key.source_type && - key->target_type == cur->key.target_type && - key->target_class == cur->key.target_class && - (specified & cur->key.specified) ) + key->target_type == cur->key.target_type && + key->target_class == cur->key.target_class && + (specified & cur->key.specified) ) break; if ( key->source_type < cur->key.source_type ) break; if ( key->source_type == cur->key.source_type && - key->target_type < cur->key.target_type ) + key->target_type < cur->key.target_type ) break; if ( key->source_type == cur->key.source_type && - key->target_type == cur->key.target_type && - key->target_class < cur->key.target_class ) + key->target_type == cur->key.target_type && + key->target_class < cur->key.target_class ) break; } newnode = avtab_insert_node(h, hvalue, prev, cur, key, datum); @@ -137,7 +142,7 @@ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key) { int hvalue; struct avtab_node *cur; - u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); + u16 specified = key->specified & ~(AVTAB_ENABLED | AVTAB_ENABLED_OLD); if ( !h || !h->htable ) return NULL; @@ -146,19 +151,19 @@ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key) for ( cur = h->htable[hvalue]; cur; cur = cur->next ) { if ( key->source_type == cur->key.source_type && - key->target_type == cur->key.target_type && - key->target_class == cur->key.target_class && - (specified & cur->key.specified) ) + key->target_type == cur->key.target_type && + key->target_class == cur->key.target_class && + (specified & cur->key.specified) ) return &cur->datum; if ( key->source_type < cur->key.source_type ) break; if ( key->source_type == cur->key.source_type && - key->target_type < cur->key.target_type ) + key->target_type < cur->key.target_type ) break; if ( key->source_type == cur->key.source_type && - key->target_type == cur->key.target_type && - key->target_class < cur->key.target_class ) + key->target_type == cur->key.target_type && + key->target_class < cur->key.target_class ) break; } @@ -168,11 +173,11 @@ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key) /* This search function returns a node pointer, and can be used in * conjunction with avtab_search_next_node() */ -struct avtab_node* avtab_search_node(struct avtab *h, struct avtab_key *key) +struct avtab_node *avtab_search_node(struct avtab *h, struct avtab_key *key) { int hvalue; struct avtab_node *cur; - u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); + u16 specified = key->specified & ~(AVTAB_ENABLED | AVTAB_ENABLED_OLD); if ( !h || !h->htable ) return NULL; @@ -181,49 +186,49 @@ struct avtab_node* avtab_search_node(struct avtab *h, struct avtab_key *key) for ( cur = h->htable[hvalue]; cur; cur = cur->next ) { if ( key->source_type == cur->key.source_type && - key->target_type == cur->key.target_type && - key->target_class == cur->key.target_class && - (specified & cur->key.specified) ) + key->target_type == cur->key.target_type && + key->target_class == cur->key.target_class && + (specified & cur->key.specified) ) return cur; if ( key->source_type < cur->key.source_type ) break; if ( key->source_type == cur->key.source_type && - key->target_type < cur->key.target_type ) + key->target_type < cur->key.target_type ) break; if ( key->source_type == cur->key.source_type && - key->target_type == cur->key.target_type && - key->target_class < cur->key.target_class ) + key->target_type == cur->key.target_type && + key->target_class < cur->key.target_class ) break; } return NULL; } -struct avtab_node* avtab_search_node_next(struct avtab_node *node, - int specified) +struct avtab_node *avtab_search_node_next(struct avtab_node *node, + int specified) { struct avtab_node *cur; if ( !node ) return NULL; - specified &= ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); + specified &= ~(AVTAB_ENABLED | AVTAB_ENABLED_OLD); for ( cur = node->next; cur; cur = cur->next ) { if ( node->key.source_type == cur->key.source_type && - node->key.target_type == cur->key.target_type && - node->key.target_class == cur->key.target_class && - (specified & cur->key.specified) ) + node->key.target_type == cur->key.target_type && + node->key.target_class == cur->key.target_class && + (specified & cur->key.specified) ) return cur; if ( node->key.source_type < cur->key.source_type ) break; if ( node->key.source_type == cur->key.source_type && - node->key.target_type < cur->key.target_type ) + node->key.target_type < cur->key.target_type ) break; if ( node->key.source_type == cur->key.source_type && - node->key.target_type == cur->key.target_type && - node->key.target_class < cur->key.target_class ) + node->key.target_type == cur->key.target_type && + node->key.target_class < cur->key.target_class ) break; } return NULL; @@ -274,10 +279,10 @@ int avtab_alloc(struct avtab *h, u32 nrules) while ( work ) { - work = work >> 1; + work = work >> 1; shift++; - } - if ( shift > 2 ) + } + if ( shift > 2 ) shift = shift - 2; nslot = 1 << shift; if ( nslot > MAX_AVTAB_SIZE ) @@ -294,8 +299,8 @@ avtab_alloc_out: h->nel = 0; h->nslot = nslot; h->mask = mask; - printk(KERN_DEBUG "Flask: %d avtab hash slots, %d rules.\n", - h->nslot, nrules); + printk(KERN_DEBUG "Flask: %d avtab hash slots, %d rules.\n", h->nslot, + nrules); return 0; } @@ -325,22 +330,18 @@ void avtab_hash_eval(struct avtab *h, char *tag) } printk(KERN_INFO "%s: %d entries and %d/%d buckets used, longest " - "chain length %d\n", tag, h->nel, slots_used, h->nslot, - max_chain_len); + "chain length %d\n", + tag, h->nel, slots_used, h->nslot, max_chain_len); } -static uint16_t spec_order[] = { - AVTAB_ALLOWED, - AVTAB_AUDITDENY, - AVTAB_AUDITALLOW, - AVTAB_TRANSITION, - AVTAB_CHANGE, - AVTAB_MEMBER -}; +static uint16_t spec_order[] = {AVTAB_ALLOWED, AVTAB_AUDITDENY, + AVTAB_AUDITALLOW, AVTAB_TRANSITION, + AVTAB_CHANGE, AVTAB_MEMBER}; int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol, - int (*insertf)(struct avtab *a, struct avtab_key *k, - struct avtab_datum *d, void *p), void *p) + int (*insertf)(struct avtab *a, struct avtab_key *k, + struct avtab_datum *d, void *p), + void *p) { __le16 buf16[4]; u16 enabled; @@ -367,9 +368,8 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol, { printk(KERN_ERR "Flask: avtab: entry overflow\n"); return -1; - } - rc = next_entry(buf32, fp, sizeof(u32)*items2); + rc = next_entry(buf32, fp, sizeof(u32) * items2); if ( rc < 0 ) { printk(KERN_ERR "Flask: avtab: truncated entry\n"); @@ -413,7 +413,7 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol, return -1; } - for ( i = 0; i < sizeof(spec_order)/sizeof(u16); i++ ) + for ( i = 0; i < sizeof(spec_order) / sizeof(u16); i++ ) { if ( val & spec_order[i] ) { @@ -425,15 +425,16 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol, } } - if ( items != items2 ) { + if ( items != items2 ) + { printk("Flask: avtab: entry only had %d items, expected %d\n", - items2, items); + items2, items); return -1; } return 0; } - rc = next_entry(buf16, fp, sizeof(u16)*4); + rc = next_entry(buf16, fp, sizeof(u16) * 4); if ( rc < 0 ) { printk("Flask: avtab: truncated entry\n"); @@ -483,7 +484,7 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol, } static int avtab_insertf(struct avtab *a, struct avtab_key *k, - struct avtab_datum *d, void *p) + struct avtab_datum *d, void *p) { return avtab_insert(a, k, d); } @@ -533,4 +534,3 @@ bad: avtab_destroy(a); goto out; } - diff --git a/xen/xsm/flask/ss/conditional.c b/xen/xsm/flask/ss/conditional.c index 3e58aea551..0096ead24a 100644 --- a/xen/xsm/flask/ss/conditional.c +++ b/xen/xsm/flask/ss/conditional.c @@ -34,51 +34,51 @@ static int cond_evaluate_expr(struct policydb *p, struct cond_expr *expr) for ( cur = expr; cur != NULL; cur = cur->next ) { - switch ( cur->expr_type ) + switch (cur->expr_type) { - case COND_BOOL: - if ( sp == (COND_EXPR_MAXDEPTH - 1) ) - return -1; - sp++; - s[sp] = p->bool_val_to_struct[cur->bool_val - 1]->state; + case COND_BOOL: + if ( sp == (COND_EXPR_MAXDEPTH - 1) ) + return -1; + sp++; + s[sp] = p->bool_val_to_struct[cur->bool_val - 1]->state; break; - case COND_NOT: - if ( sp < 0 ) - return -1; - s[sp] = !s[sp]; + case COND_NOT: + if ( sp < 0 ) + return -1; + s[sp] = !s[sp]; break; - case COND_OR: - if ( sp < 1 ) - return -1; - sp--; - s[sp] |= s[sp + 1]; + case COND_OR: + if ( sp < 1 ) + return -1; + sp--; + s[sp] |= s[sp + 1]; break; - case COND_AND: - if ( sp < 1 ) - return -1; - sp--; - s[sp] &= s[sp + 1]; + case COND_AND: + if ( sp < 1 ) + return -1; + sp--; + s[sp] &= s[sp + 1]; break; - case COND_XOR: - if ( sp < 1 ) - return -1; - sp--; - s[sp] ^= s[sp + 1]; - break; - case COND_EQ: - if ( sp < 1 ) - return -1; - sp--; - s[sp] = (s[sp] == s[sp + 1]); + case COND_XOR: + if ( sp < 1 ) + return -1; + sp--; + s[sp] ^= s[sp + 1]; break; - case COND_NEQ: - if ( sp < 1 ) - return -1; - sp--; - s[sp] = (s[sp] != s[sp + 1]); + case COND_EQ: + if ( sp < 1 ) + return -1; + sp--; + s[sp] = (s[sp] == s[sp + 1]); break; - default: + case COND_NEQ: + if ( sp < 1 ) return -1; + sp--; + s[sp] = (s[sp] != s[sp + 1]); + break; + default: + return -1; } } return s[0]; @@ -94,14 +94,15 @@ static int cond_evaluate_expr(struct policydb *p, struct cond_expr *expr) int evaluate_cond_node(struct policydb *p, struct cond_node *node) { int new_state; - struct cond_av_list* cur; + struct cond_av_list *cur; new_state = cond_evaluate_expr(p, node->expr); if ( new_state != node->cur_state ) { node->cur_state = new_state; if ( new_state == -1 ) - printk(KERN_ERR "Flask: expression result was undefined - disabling all rules.\n"); + printk(KERN_ERR "Flask: expression result was undefined - " + "disabling all rules.\n"); /* turn the rules on or off */ for ( cur = node->true_list; cur != NULL; cur = cur->next ) { @@ -182,8 +183,8 @@ void cond_policydb_destroy(struct policydb *p) int cond_init_bool_indexes(struct policydb *p) { xfree(p->bool_val_to_struct); - p->bool_val_to_struct = (struct cond_bool_datum**) - xmalloc_array(struct cond_bool_datum*, p->p_bools.nprim); + p->bool_val_to_struct = (struct cond_bool_datum **)xmalloc_array( + struct cond_bool_datum *, p->p_bools.nprim); if ( !p->bool_val_to_struct ) return -1; return 0; @@ -208,7 +209,7 @@ int cond_index_bool(void *key, void *datum, void *datap) return -EINVAL; p->p_bool_val_to_name[booldatum->value - 1] = key; - p->bool_val_to_struct[booldatum->value -1] = booldatum; + p->bool_val_to_struct[booldatum->value - 1] = booldatum; return 0; } @@ -268,8 +269,8 @@ struct cond_insertf_data struct cond_av_list *tail; }; -static int cond_insertf(struct avtab *a, struct avtab_key *k, - struct avtab_datum *d, void *ptr) +static int cond_insertf(struct avtab *a, struct avtab_key *k, + struct avtab_datum *d, void *ptr) { struct cond_insertf_data *data = ptr; struct policydb *p = data->p; @@ -287,7 +288,7 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, if ( avtab_search(&p->te_avtab, k) ) { printk("Flask: type rule already exists outside of a " - "conditional."); + "conditional."); goto err; } /* @@ -329,7 +330,7 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, if ( avtab_search(&p->te_cond_avtab, k) ) { printk("Flask: conflicting type rules when adding type rule " - "for true.\n"); + "for true.\n"); goto err; } } @@ -360,8 +361,9 @@ err: return -1; } -static int cond_read_av_list(struct policydb *p, void *fp, - struct cond_av_list **ret_list, struct cond_av_list *other) +static int cond_read_av_list(struct policydb *p, void *fp, + struct cond_av_list **ret_list, + struct cond_av_list *other) { int i, rc; __le32 buf[1]; @@ -452,7 +454,7 @@ static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp) goto err; } - if ( i == 0 ) + if ( i == 0 ) node->expr = expr; else last->next = expr; @@ -485,7 +487,7 @@ int cond_read_list(struct policydb *p, void *fp) rc = avtab_alloc(&(p->te_cond_avtab), p->te_avtab.nel); if ( rc ) - goto err; + goto err; for ( i = 0; i < len; i++ ) { @@ -513,30 +515,30 @@ err: /* Determine whether additional permissions are granted by the conditional * av table, and if so, add them to the result */ -void cond_compute_av(struct avtab *ctab, struct avtab_key *key, - struct av_decision *avd) +void cond_compute_av(struct avtab *ctab, struct avtab_key *key, + struct av_decision *avd) { struct avtab_node *node; - if( !ctab || !key || !avd ) + if ( !ctab || !key || !avd ) return; - for( node = avtab_search_node(ctab, key); node != NULL; - node = avtab_search_node_next(node, key->specified) ) + for ( node = avtab_search_node(ctab, key); node != NULL; + node = avtab_search_node_next(node, key->specified) ) { - if ( (u16) (AVTAB_ALLOWED|AVTAB_ENABLED) == - (node->key.specified & (AVTAB_ALLOWED|AVTAB_ENABLED)) ) + if ( (u16)(AVTAB_ALLOWED | AVTAB_ENABLED) == + (node->key.specified & (AVTAB_ALLOWED | AVTAB_ENABLED)) ) avd->allowed |= node->datum.data; - if ( (u16) (AVTAB_AUDITDENY|AVTAB_ENABLED) == - (node->key.specified & (AVTAB_AUDITDENY|AVTAB_ENABLED)) ) + if ( (u16)(AVTAB_AUDITDENY | AVTAB_ENABLED) == + (node->key.specified & (AVTAB_AUDITDENY | AVTAB_ENABLED)) ) /* Since a '0' in an auditdeny mask represents a * permission we do NOT want to audit (dontaudit), we use * the '&' operand to ensure that all '0's in the mask * are retained (much unlike the allow and auditallow cases). */ avd->auditdeny &= node->datum.data; - if ( (u16) (AVTAB_AUDITALLOW|AVTAB_ENABLED) == - (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED)) ) + if ( (u16)(AVTAB_AUDITALLOW | AVTAB_ENABLED) == + (node->key.specified & (AVTAB_AUDITALLOW | AVTAB_ENABLED)) ) avd->auditallow |= node->datum.data; } return; diff --git a/xen/xsm/flask/ss/ebitmap.c b/xen/xsm/flask/ss/ebitmap.c index e1d0a586a7..4cbae2d6c9 100644 --- a/xen/xsm/flask/ss/ebitmap.c +++ b/xen/xsm/flask/ss/ebitmap.c @@ -29,7 +29,7 @@ int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2) n1 = e1->node; n2 = e2->node; while ( n1 && n2 && (n1->startbit == n2->startbit) && - !memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8)) + !memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8) ) { n1 = n1->next; n2 = n2->next; @@ -241,8 +241,8 @@ int ebitmap_read(struct ebitmap *e, void *fp) if ( mapunit != sizeof(u64) * 8 ) { printk(KERN_ERR "Flask: ebitmap: map size %u does not " - "match my size %zd (high bit was %d)\n", mapunit, - sizeof(u64) * 8, e->highbit); + "match my size %zd (high bit was %d)\n", + mapunit, sizeof(u64) * 8, e->highbit); goto bad; } @@ -268,14 +268,14 @@ int ebitmap_read(struct ebitmap *e, void *fp) if ( startbit & (mapunit - 1) ) { printk(KERN_ERR "Flask: ebitmap start bit (%d) is " - "not a multiple of the map unit size (%u)\n", + "not a multiple of the map unit size (%u)\n", startbit, mapunit); goto bad; } if ( startbit > e->highbit - mapunit ) { printk(KERN_ERR "Flask: ebitmap start bit (%d) is " - "beyond the end of the bitmap (%u)\n", + "beyond the end of the bitmap (%u)\n", startbit, (e->highbit - mapunit)); goto bad; } @@ -286,8 +286,7 @@ int ebitmap_read(struct ebitmap *e, void *fp) if ( !tmp ) { - printk(KERN_ERR - "Flask: ebitmap: out of memory\n"); + printk(KERN_ERR "Flask: ebitmap: out of memory\n"); rc = -ENOMEM; goto bad; } @@ -302,7 +301,7 @@ int ebitmap_read(struct ebitmap *e, void *fp) else if ( startbit <= n->startbit ) { printk(KERN_ERR "Flask: ebitmap: start bit %d" - " comes after start bit %d\n", + " comes after start bit %d\n", startbit, n->startbit); goto bad; } diff --git a/xen/xsm/flask/ss/hashtab.c b/xen/xsm/flask/ss/hashtab.c index f35c0dcea0..612ca47b2a 100644 --- a/xen/xsm/flask/ss/hashtab.c +++ b/xen/xsm/flask/ss/hashtab.c @@ -11,10 +11,10 @@ #include #include "hashtab.h" -struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, - const void *key), - int (*keycmp)(struct hashtab *h, const void *key1, - const void *key2), u32 size) +struct hashtab *hashtab_create( + u32 (*hash_value)(struct hashtab *h, const void *key), + int (*keycmp)(struct hashtab *h, const void *key1, const void *key2), + u32 size) { struct hashtab *p = xzalloc(struct hashtab); @@ -119,9 +119,8 @@ void hashtab_destroy(struct hashtab *h) xfree(h); } -int hashtab_map(struct hashtab *h, - int (*apply)(void *k, void *d, void *args), - void *args) +int hashtab_map(struct hashtab *h, int (*apply)(void *k, void *d, void *args), + void *args) { u32 i; int ret; @@ -144,7 +143,6 @@ int hashtab_map(struct hashtab *h, return 0; } - void hashtab_stat(struct hashtab *h, struct hashtab_info *info) { u32 i, chain_len, slots_used, max_chain_len; diff --git a/xen/xsm/flask/ss/mls.c b/xen/xsm/flask/ss/mls.c index f2fa560810..795534c699 100644 --- a/xen/xsm/flask/ss/mls.c +++ b/xen/xsm/flask/ss/mls.c @@ -27,7 +27,7 @@ * Return the length in bytes for the MLS fields of the * security context string representation of `context'. */ -int mls_compute_context_len(struct context * context) +int mls_compute_context_len(struct context *context) { int i, l, len, head, prev; char *nm; @@ -47,11 +47,11 @@ int mls_compute_context_len(struct context * context) head = -2; prev = -2; e = &context->range.level[l].cat; - ebitmap_for_each_positive_bit(e, node, i) + ebitmap_for_each_positive_bit (e, node, i) { if ( i - prev > 1 ) { - /* one or more negative bits are skipped */ + /* one or more negative bits are skipped */ if ( head != prev ) { nm = policydb.p_cat_val_to_name[prev]; @@ -70,7 +70,7 @@ int mls_compute_context_len(struct context * context) } if ( l == 0 ) { - if ( mls_level_eq(&context->range.level[0], + if ( mls_level_eq(&context->range.level[0], &context->range.level[1]) ) break; else @@ -103,16 +103,19 @@ void mls_sid_to_context(struct context *context, char **scontext) for ( l = 0; l < 2; l++ ) { - memcpy(scontextp, - policydb.p_sens_val_to_name[context->range.level[l].sens - 1], - strlen(policydb.p_sens_val_to_name[context->range.level[l].sens - 1])+1); + memcpy( + scontextp, + policydb.p_sens_val_to_name[context->range.level[l].sens - 1], + strlen( + policydb.p_sens_val_to_name[context->range.level[l].sens - 1]) + + 1); scontextp += strlen(scontextp); /* categories */ head = -2; prev = -2; e = &context->range.level[l].cat; - ebitmap_for_each_positive_bit(e, node, i) + ebitmap_for_each_positive_bit (e, node, i) { if ( i - prev > 1 ) { @@ -124,7 +127,7 @@ void mls_sid_to_context(struct context *context, char **scontext) else *scontextp++ = ','; nm = policydb.p_cat_val_to_name[prev]; - memcpy(scontextp, nm, strlen(nm)+1); + memcpy(scontextp, nm, strlen(nm) + 1); scontextp += strlen(nm); } if ( prev < 0 ) @@ -132,7 +135,7 @@ void mls_sid_to_context(struct context *context, char **scontext) else *scontextp++ = ','; nm = policydb.p_cat_val_to_name[i]; - memcpy(scontextp, nm, strlen(nm)+1); + memcpy(scontextp, nm, strlen(nm) + 1); scontextp += strlen(nm); head = i; } @@ -146,14 +149,14 @@ void mls_sid_to_context(struct context *context, char **scontext) else *scontextp++ = ','; nm = policydb.p_cat_val_to_name[prev]; - memcpy(scontextp, nm, strlen(nm)+1); + memcpy(scontextp, nm, strlen(nm) + 1); scontextp += strlen(nm); } if ( l == 0 ) { if ( mls_level_eq(&context->range.level[0], - &context->range.level[1]) ) + &context->range.level[1]) ) break; else { @@ -175,12 +178,12 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l) if ( !l->sens || l->sens > p->p_levels.nprim ) return 0; - levdatum = hashtab_search(p->p_levels.table, - p->p_sens_val_to_name[l->sens - 1]); + levdatum = + hashtab_search(p->p_levels.table, p->p_sens_val_to_name[l->sens - 1]); if ( !levdatum ) return 0; - ebitmap_for_each_positive_bit(&l->cat, node, i) + ebitmap_for_each_positive_bit (&l->cat, node, i) { if ( i > p->p_cats.nprim ) return 0; @@ -199,9 +202,9 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l) int mls_range_isvalid(struct policydb *p, struct mls_range *r) { - return ( mls_level_isvalid(p, &r->level[0]) && - mls_level_isvalid(p, &r->level[1]) && - mls_level_dom(&r->level[1], &r->level[0])); + return (mls_level_isvalid(p, &r->level[0]) && + mls_level_isvalid(p, &r->level[1]) && + mls_level_dom(&r->level[1], &r->level[0])); } /* @@ -246,10 +249,9 @@ int mls_context_isvalid(struct policydb *p, struct context *c) * Policy read-lock must be held for sidtab lookup. * */ -int mls_context_to_sid(char oldc, char **scontext, - struct context *context, struct sidtab *s) +int mls_context_to_sid(char oldc, char **scontext, struct context *context, + struct sidtab *s) { - char delim; char *scontextp, *p, *rngptr; struct level_datum *levdatum; @@ -312,7 +314,7 @@ int mls_context_to_sid(char oldc, char **scontext, } rc = ebitmap_set_bit(&context->range.level[l].cat, - catdatum->value - 1, 1); + catdatum->value - 1, 1); if ( rc ) goto out; @@ -336,7 +338,8 @@ int mls_context_to_sid(char oldc, char **scontext, for ( i = catdatum->value; i < rngdatum->value; i++ ) { - rc = ebitmap_set_bit(&context->range.level[l].cat, i, 1); + rc = + ebitmap_set_bit(&context->range.level[l].cat, i, 1); if ( rc ) goto out; } @@ -365,7 +368,7 @@ int mls_context_to_sid(char oldc, char **scontext, { context->range.level[1].sens = context->range.level[0].sens; rc = ebitmap_cpy(&context->range.level[1].cat, - &context->range.level[0].cat); + &context->range.level[0].cat); if ( rc ) goto out; } @@ -379,7 +382,7 @@ out: * Copies the MLS range `range' into `context'. */ static inline int mls_range_set(struct context *context, - struct mls_range *range) + struct mls_range *range) { int l, rc = 0; @@ -387,8 +390,7 @@ static inline int mls_range_set(struct context *context, for ( l = 0; l < 2; l++ ) { context->range.level[l].sens = range->level[l].sens; - rc = ebitmap_cpy(&context->range.level[l].cat, - &range->level[l].cat); + rc = ebitmap_cpy(&context->range.level[l].cat, &range->level[l].cat); if ( rc ) break; } @@ -402,7 +404,7 @@ static inline int mls_range_set(struct context *context, * policy `oldp' to the values specified in the policy `newp'. */ int mls_convert_context(struct policydb *oldp, struct policydb *newp, - struct context *c) + struct context *c) { struct level_datum *levdatum; struct cat_datum *catdatum; @@ -415,20 +417,21 @@ int mls_convert_context(struct policydb *oldp, struct policydb *newp, for ( l = 0; l < 2; l++ ) { - levdatum = hashtab_search(newp->p_levels.table, - oldp->p_sens_val_to_name[c->range.level[l].sens - 1]); + levdatum = hashtab_search( + newp->p_levels.table, + oldp->p_sens_val_to_name[c->range.level[l].sens - 1]); if ( !levdatum ) return -EINVAL; c->range.level[l].sens = levdatum->level->sens; ebitmap_init(&bitmap); - ebitmap_for_each_positive_bit(&c->range.level[l].cat, node, i) + ebitmap_for_each_positive_bit (&c->range.level[l].cat, node, i) { int rc; - catdatum = hashtab_search(newp->p_cats.table, - oldp->p_cat_val_to_name[i]); + catdatum = + hashtab_search(newp->p_cats.table, oldp->p_cat_val_to_name[i]); if ( !catdatum ) return -EINVAL; rc = ebitmap_set_bit(&bitmap, catdatum->value - 1, 1); @@ -443,42 +446,40 @@ int mls_convert_context(struct policydb *oldp, struct policydb *newp, } int mls_compute_sid(struct context *scontext, struct context *tcontext, - u16 tclass, u32 specified, struct context *newcontext) + u16 tclass, u32 specified, struct context *newcontext) { struct range_trans *rtr; if ( !flask_mls_enabled ) return 0; - switch ( specified ) + switch (specified) { - case AVTAB_TRANSITION: - /* Look for a range transition rule. */ - for (rtr = policydb.range_tr; rtr; rtr = rtr->next) + case AVTAB_TRANSITION: + /* Look for a range transition rule. */ + for ( rtr = policydb.range_tr; rtr; rtr = rtr->next ) + { + if ( rtr->source_type == scontext->type && + rtr->target_type == tcontext->type && + rtr->target_class == tclass ) { - if (rtr->source_type == scontext->type && - rtr->target_type == tcontext->type && - rtr->target_class == tclass) - { - /* Set the range from the rule */ - return mls_range_set(newcontext, - &rtr->target_range); - } + /* Set the range from the rule */ + return mls_range_set(newcontext, &rtr->target_range); } - /* Fallthrough */ - case AVTAB_CHANGE: - if ( tclass == SECCLASS_DOMAIN ) - /* Use the process MLS attributes. */ - return mls_context_cpy(newcontext, scontext); - else - /* Use the process effective MLS attributes. */ - return mls_context_cpy_low(newcontext, scontext); - case AVTAB_MEMBER: + } + /* Fallthrough */ + case AVTAB_CHANGE: + if ( tclass == SECCLASS_DOMAIN ) + /* Use the process MLS attributes. */ + return mls_context_cpy(newcontext, scontext); + else /* Use the process effective MLS attributes. */ return mls_context_cpy_low(newcontext, scontext); - default: - return -EINVAL; + case AVTAB_MEMBER: + /* Use the process effective MLS attributes. */ + return mls_context_cpy_low(newcontext, scontext); + default: + return -EINVAL; } return -EINVAL; } - diff --git a/xen/xsm/flask/ss/policydb.c b/xen/xsm/flask/ss/policydb.c index 9426164353..fdd52bbd57 100644 --- a/xen/xsm/flask/ss/policydb.c +++ b/xen/xsm/flask/ss/policydb.c @@ -9,7 +9,8 @@ * * Support for enhanced MLS infrastructure. * - * Updated: Frank Mayer and Karl MacMillan + * Updated: Frank Mayer and Karl MacMillan + * * * Added conditional policy language extensions * @@ -38,31 +39,19 @@ #ifdef DEBUG_HASHES static char *symtab_name[SYM_NUM] = { - "common prefixes", - "classes", - "roles", - "types", - "users", - "bools", - "levels", - "categories", + "common prefixes", "classes", "roles", "types", + "users", "bools", "levels", "categories", }; #endif int flask_mls_enabled = 0; static unsigned int symtab_sizes[SYM_NUM] = { - 2, - 32, - 16, - 512, - 128, - 16, - 16, - 16, + 2, 32, 16, 512, 128, 16, 16, 16, }; -struct policydb_compat_info { +struct policydb_compat_info +{ int version; int sym_num; int ocon_num; @@ -72,86 +61,86 @@ struct policydb_compat_info { /* These need to be updated if SYM_NUM or OCON_NUM changes */ static struct policydb_compat_info policydb_compat[] = { { - .version = POLICYDB_VERSION_BASE, - .sym_num = SYM_NUM - 3, - .ocon_num = 4, - .target_type = TARGET_XEN_OLD, + .version = POLICYDB_VERSION_BASE, + .sym_num = SYM_NUM - 3, + .ocon_num = 4, + .target_type = TARGET_XEN_OLD, }, { - .version = POLICYDB_VERSION_BOOL, - .sym_num = SYM_NUM - 2, - .ocon_num = 4, - .target_type = TARGET_XEN_OLD, + .version = POLICYDB_VERSION_BOOL, + .sym_num = SYM_NUM - 2, + .ocon_num = 4, + .target_type = TARGET_XEN_OLD, }, { - .version = POLICYDB_VERSION_IPV6, - .sym_num = SYM_NUM - 2, - .ocon_num = 5, - .target_type = TARGET_XEN_OLD, + .version = POLICYDB_VERSION_IPV6, + .sym_num = SYM_NUM - 2, + .ocon_num = 5, + .target_type = TARGET_XEN_OLD, }, { - .version = POLICYDB_VERSION_NLCLASS, - .sym_num = SYM_NUM - 2, - .ocon_num = 5, - .target_type = TARGET_XEN_OLD, + .version = POLICYDB_VERSION_NLCLASS, + .sym_num = SYM_NUM - 2, + .ocon_num = 5, + .target_type = TARGET_XEN_OLD, }, { - .version = POLICYDB_VERSION_MLS, - .sym_num = SYM_NUM, - .ocon_num = 5, - .target_type = TARGET_XEN_OLD, + .version = POLICYDB_VERSION_MLS, + .sym_num = SYM_NUM, + .ocon_num = 5, + .target_type = TARGET_XEN_OLD, }, { - .version = POLICYDB_VERSION_AVTAB, - .sym_num = SYM_NUM, - .ocon_num = 5, - .target_type = TARGET_XEN_OLD, + .version = POLICYDB_VERSION_AVTAB, + .sym_num = SYM_NUM, + .ocon_num = 5, + .target_type = TARGET_XEN_OLD, }, { - .version = POLICYDB_VERSION_RANGETRANS, - .sym_num = SYM_NUM, - .ocon_num = 5, - .target_type = TARGET_XEN_OLD, + .version = POLICYDB_VERSION_RANGETRANS, + .sym_num = SYM_NUM, + .ocon_num = 5, + .target_type = TARGET_XEN_OLD, }, { - .version = POLICYDB_VERSION_POLCAP, - .sym_num = SYM_NUM, - .ocon_num = 5, - .target_type = TARGET_XEN_OLD, + .version = POLICYDB_VERSION_POLCAP, + .sym_num = SYM_NUM, + .ocon_num = 5, + .target_type = TARGET_XEN_OLD, }, { - .version = POLICYDB_VERSION_PERMISSIVE, - .sym_num = SYM_NUM, - .ocon_num = 5, - .target_type = TARGET_XEN_OLD, + .version = POLICYDB_VERSION_PERMISSIVE, + .sym_num = SYM_NUM, + .ocon_num = 5, + .target_type = TARGET_XEN_OLD, }, { - .version = POLICYDB_VERSION_BOUNDARY, - .sym_num = SYM_NUM, - .ocon_num = OCON_NUM_OLD, - .target_type = TARGET_XEN_OLD, + .version = POLICYDB_VERSION_BOUNDARY, + .sym_num = SYM_NUM, + .ocon_num = OCON_NUM_OLD, + .target_type = TARGET_XEN_OLD, }, { - .version = POLICYDB_VERSION_BOUNDARY, - .sym_num = SYM_NUM, - .ocon_num = OCON_DEVICE + 1, - .target_type = TARGET_XEN, + .version = POLICYDB_VERSION_BOUNDARY, + .sym_num = SYM_NUM, + .ocon_num = OCON_DEVICE + 1, + .target_type = TARGET_XEN, }, { - .version = POLICYDB_VERSION_XEN_DEVICETREE, - .sym_num = SYM_NUM, - .ocon_num = OCON_DTREE + 1, - .target_type = TARGET_XEN, + .version = POLICYDB_VERSION_XEN_DEVICETREE, + .sym_num = SYM_NUM, + .ocon_num = OCON_DTREE + 1, + .target_type = TARGET_XEN, }, }; static struct policydb_compat_info *policydb_lookup_compat(int version, - int target) + int target) { int i; struct policydb_compat_info *info = NULL; - for ( i = 0; i < sizeof(policydb_compat)/sizeof(*info); i++ ) + for ( i = 0; i < sizeof(policydb_compat) / sizeof(*info); i++ ) { if ( policydb_compat[i].version == version && policydb_compat[i].target_type == target ) @@ -184,13 +173,13 @@ static int roles_init(struct policydb *p) rc = -EINVAL; goto out_free_role; } - key = xmalloc_array(char, strlen(OBJECT_R)+1); + key = xmalloc_array(char, strlen(OBJECT_R) + 1); if ( !key ) { rc = -ENOMEM; goto out_free_role; } - strlcpy(key, OBJECT_R, strlen(OBJECT_R)+1); + strlcpy(key, OBJECT_R, strlen(OBJECT_R) + 1); rc = hashtab_insert(p->p_roles.table, key, role); if ( rc ) goto out_free_key; @@ -283,9 +272,8 @@ static int role_index(void *key, void *datum, void *datap) role = datum; p = datap; - if ( !role->value - || role->value > p->p_roles.nprim - || role->bounds > p->p_roles.nprim ) + if ( !role->value || role->value > p->p_roles.nprim || + role->bounds > p->p_roles.nprim ) return -EINVAL; p->p_role_val_to_name[role->value - 1] = key; p->role_val_to_struct[role->value - 1] = role; @@ -302,9 +290,8 @@ static int type_index(void *key, void *datum, void *datap) if ( typdatum->primary ) { - if ( !typdatum->value - || typdatum->value > p->p_types.nprim - || typdatum->bounds > p->p_types.nprim ) + if ( !typdatum->value || typdatum->value > p->p_types.nprim || + typdatum->bounds > p->p_types.nprim ) return -EINVAL; p->p_type_val_to_name[typdatum->value - 1] = key; p->type_val_to_struct[typdatum->value - 1] = typdatum; @@ -320,9 +307,8 @@ static int user_index(void *key, void *datum, void *datap) usrdatum = datum; p = datap; - if ( !usrdatum->value - || usrdatum->value > p->p_users.nprim - || usrdatum->bounds > p->p_users.nprim ) + if ( !usrdatum->value || usrdatum->value > p->p_users.nprim || + usrdatum->bounds > p->p_users.nprim ) return -EINVAL; p->p_user_val_to_name[usrdatum->value - 1] = key; p->user_val_to_struct[usrdatum->value - 1] = usrdatum; @@ -339,8 +325,8 @@ static int sens_index(void *key, void *datum, void *datap) if ( !levdatum->isalias ) { - if ( !levdatum->level->sens || levdatum->level->sens > - p->p_levels.nprim ) + if ( !levdatum->level->sens || + levdatum->level->sens > p->p_levels.nprim ) return -EINVAL; p->p_sens_val_to_name[levdatum->level->sens - 1] = key; } @@ -366,16 +352,9 @@ static int cat_index(void *key, void *datum, void *datap) return 0; } -static int (*index_f[SYM_NUM]) (void *key, void *datum, void *datap) = -{ - common_index, - class_index, - role_index, - type_index, - user_index, - cond_index_bool, - sens_index, - cat_index, +static int (*index_f[SYM_NUM])(void *key, void *datum, void *datap) = { + common_index, class_index, role_index, type_index, + user_index, cond_index_bool, sens_index, cat_index, }; /* @@ -396,8 +375,7 @@ static int policydb_index_classes(struct policydb *p) goto out; } - p->p_class_val_to_name = - xmalloc_array(char *, p->p_classes.nprim); + p->p_class_val_to_name = xmalloc_array(char *, p->p_classes.nprim); if ( !p->p_class_val_to_name ) { rc = -ENOMEM; @@ -421,8 +399,9 @@ static void symtab_hash_eval(struct symtab *s) hashtab_stat(h, &info); printk(KERN_INFO "%s: %d entries and %d/%d buckets used, " - "longest chain length %d\n", symtab_name[i], h->nel, - info.slots_used, h->size, info.max_chain_len); + "longest chain length %d\n", + symtab_name[i], h->nel, info.slots_used, h->size, + info.max_chain_len); } } #endif @@ -438,14 +417,15 @@ static int policydb_index_others(struct policydb *p) int i, rc = 0; printk(KERN_INFO "Flask: %d users, %d roles, %d types, %d bools", - p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim); + p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, + p->p_bools.nprim); if ( flask_mls_enabled ) printk(", %d sens, %d cats", p->p_levels.nprim, p->p_cats.nprim); printk("\n"); - printk(KERN_INFO "Flask: %d classes, %d rules\n", - p->p_classes.nprim, p->te_avtab.nel); + printk(KERN_INFO "Flask: %d classes, %d rules\n", p->p_classes.nprim, + p->te_avtab.nel); #ifdef DEBUG_HASHES avtab_hash_eval(&p->te_avtab, "rules"); @@ -484,8 +464,7 @@ static int policydb_index_others(struct policydb *p) for ( i = SYM_ROLES; i < SYM_NUM; i++ ) { - p->sym_val_to_name[i] = - xmalloc_array(char *, p->symtab[i].nprim); + p->sym_val_to_name[i] = xmalloc_array(char *, p->symtab[i].nprim); if ( !p->sym_val_to_name[i] ) { rc = -ENOMEM; @@ -624,16 +603,9 @@ static int cat_destroy(void *key, void *datum, void *p) return 0; } -static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) = -{ - common_destroy, - class_destroy, - role_destroy, - type_destroy, - user_destroy, - cond_destroy_bool, - sens_destroy, - cat_destroy, +static int (*destroy_f[SYM_NUM])(void *key, void *datum, void *datap) = { + common_destroy, class_destroy, role_destroy, type_destroy, + user_destroy, cond_destroy_bool, sens_destroy, cat_destroy, }; static void ocontext_destroy(struct ocontext *c, int i) @@ -678,7 +650,7 @@ void policydb_destroy(struct policydb *p) { ctmp = c; c = c->next; - ocontext_destroy(ctmp,i); + ocontext_destroy(ctmp, i); } p->ocontexts[i] = NULL; } @@ -692,14 +664,14 @@ void policydb_destroy(struct policydb *p) } xfree(ltr); - for ( ra = p->role_allow; ra; ra = ra -> next ) + for ( ra = p->role_allow; ra; ra = ra->next ) { xfree(lra); lra = ra; } xfree(lra); - for ( rt = p->range_tr; rt; rt = rt -> next ) + for ( rt = p->range_tr; rt; rt = rt->next ) { if ( lrt ) { @@ -749,14 +721,16 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s) if ( !c->context.user ) { printk(KERN_ERR "Flask: SID %s was never " - "defined.\n", c->u.name); + "defined.\n", + c->u.name); rc = -EINVAL; goto out; } if ( sidtab_insert(s, c->sid, &c->context) ) { printk(KERN_ERR "Flask: unable to load initial " - "SID %s.\n", c->u.name); + "SID %s.\n", + c->u.name); rc = -EINVAL; goto out; } @@ -869,7 +843,7 @@ static int mls_read_range_helper(struct mls_range *r, void *fp) if ( rc ) { printk(KERN_ERR "Flask: mls: error reading low " - "categories\n"); + "categories\n"); goto out; } if ( items > 1 ) @@ -878,7 +852,7 @@ static int mls_read_range_helper(struct mls_range *r, void *fp) if ( rc ) { printk(KERN_ERR "Flask: mls: error reading high " - "categories\n"); + "categories\n"); goto bad_high; } } @@ -905,7 +879,7 @@ bad_high: * from a policydb binary representation file. */ static int context_read_and_validate(struct context *c, struct policydb *p, - void *fp) + void *fp) { __le32 buf[3]; int rc; @@ -924,7 +898,7 @@ static int context_read_and_validate(struct context *c, struct policydb *p, if ( mls_read_range_helper(&c->range, fp) ) { printk(KERN_ERR "Flask: error reading MLS range of " - "context\n"); + "context\n"); rc = -EINVAL; goto out; } @@ -1095,51 +1069,51 @@ static int read_cons_helper(struct policydb *p, struct constraint_node **nodep, e->attr = le32_to_cpu(buf[1]); e->op = le32_to_cpu(buf[2]); - switch ( e->expr_type ) + switch (e->expr_type) { - case CEXPR_NOT: - if ( depth < 0 ) - return -EINVAL; + case CEXPR_NOT: + if ( depth < 0 ) + return -EINVAL; break; - case CEXPR_AND: - case CEXPR_OR: - if ( depth < 1 ) - return -EINVAL; - depth--; + case CEXPR_AND: + case CEXPR_OR: + if ( depth < 1 ) + return -EINVAL; + depth--; break; - case CEXPR_ATTR: - if ( depth == (CEXPR_MAXDEPTH - 1) ) - return -EINVAL; - depth++; + case CEXPR_ATTR: + if ( depth == (CEXPR_MAXDEPTH - 1) ) + return -EINVAL; + depth++; break; - case CEXPR_NAMES: - if ( !allowxtarget && (e->attr & CEXPR_XTARGET) ) - return -EINVAL; - if ( depth == (CEXPR_MAXDEPTH - 1) ) + case CEXPR_NAMES: + if ( !allowxtarget && (e->attr & CEXPR_XTARGET) ) + return -EINVAL; + if ( depth == (CEXPR_MAXDEPTH - 1) ) + return -EINVAL; + depth++; + if ( ebitmap_read(&e->names, fp) ) + return -EINVAL; + if ( p->policyvers >= POLICYDB_VERSION_CONSTRAINT_NAMES ) + { + struct ebitmap dummy; + ebitmap_init(&dummy); + if ( ebitmap_read(&dummy, fp) ) return -EINVAL; - depth++; - if ( ebitmap_read(&e->names, fp) ) + ebitmap_destroy(&dummy); + + ebitmap_init(&dummy); + if ( ebitmap_read(&dummy, fp) ) return -EINVAL; - if ( p->policyvers >= POLICYDB_VERSION_CONSTRAINT_NAMES ) - { - struct ebitmap dummy; - ebitmap_init(&dummy); - if ( ebitmap_read(&dummy, fp) ) - return -EINVAL; - ebitmap_destroy(&dummy); - - ebitmap_init(&dummy); - if ( ebitmap_read(&dummy, fp) ) - return -EINVAL; - ebitmap_destroy(&dummy); - - rc = next_entry(buf, fp, sizeof(u32)); - if ( rc < 0 ) - return rc; - } + ebitmap_destroy(&dummy); + + rc = next_entry(buf, fp, sizeof(u32)); + if ( rc < 0 ) + return rc; + } break; - default: - return -EINVAL; + default: + return -EINVAL; } le = e; } @@ -1166,7 +1140,7 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp) goto out; } - rc = next_entry(buf, fp, sizeof(u32)*6); + rc = next_entry(buf, fp, sizeof(u32) * 6); if ( rc < 0 ) goto bad; @@ -1195,7 +1169,8 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp) if ( len2 ) { - printk(KERN_ERR "Flask: classes with common prefixes are not supported\n"); + printk(KERN_ERR + "Flask: classes with common prefixes are not supported\n"); rc = -EINVAL; goto bad; } @@ -1303,7 +1278,7 @@ static int role_read(struct policydb *p, struct hashtab *h, void *fp) if ( role->value != OBJECT_R_VAL ) { printk(KERN_ERR "Role %s has wrong value %d\n", OBJECT_R, - role->value); + role->value); rc = -EINVAL; goto bad; } @@ -1384,7 +1359,6 @@ bad: goto out; } - /* * Read a MLS level structure from a policydb binary * representation file. @@ -1579,16 +1553,10 @@ bad: goto out; } -static int (*read_f[SYM_NUM]) (struct policydb *p, struct hashtab *h, void *fp) = -{ - common_read, - class_read, - role_read, - type_read, - user_read, - cond_read_bool, - sens_read, - cat_read, +static int (*read_f[SYM_NUM])(struct policydb *p, struct hashtab *h, + void *fp) = { + common_read, class_read, role_read, type_read, + user_read, cond_read_bool, sens_read, cat_read, }; static int user_bounds_sanity_check(void *key, void *datum, void *datap) @@ -1598,7 +1566,7 @@ static int user_bounds_sanity_check(void *key, void *datum, void *datap) int depth = 0; upper = user = datum; - while (upper->bounds) + while ( upper->bounds ) { struct ebitmap_node *node; unsigned long bit; @@ -1606,20 +1574,19 @@ static int user_bounds_sanity_check(void *key, void *datum, void *datap) if ( ++depth == POLICYDB_BOUNDS_MAXDEPTH ) { printk(KERN_ERR "Flask: user %s: " - "too deep or looped boundary", - (char *) key); + "too deep or looped boundary", + (char *)key); return -EINVAL; } upper = p->user_val_to_struct[upper->bounds - 1]; - ebitmap_for_each_positive_bit(&user->roles, node, bit) + ebitmap_for_each_positive_bit (&user->roles, node, bit) { if ( ebitmap_get_bit(&upper->roles, bit) ) continue; - printk(KERN_ERR - "Flask: boundary violated policy: " - "user=%s role=%s bounds=%s\n", + printk(KERN_ERR "Flask: boundary violated policy: " + "user=%s role=%s bounds=%s\n", p->p_user_val_to_name[user->value - 1], p->p_role_val_to_name[bit], p->p_user_val_to_name[upper->value - 1]); @@ -1638,7 +1605,7 @@ static int role_bounds_sanity_check(void *key, void *datum, void *datap) int depth = 0; upper = role = datum; - while (upper->bounds) + while ( upper->bounds ) { struct ebitmap_node *node; unsigned long bit; @@ -1646,20 +1613,19 @@ static int role_bounds_sanity_check(void *key, void *datum, void *datap) if ( ++depth == POLICYDB_BOUNDS_MAXDEPTH ) { printk(KERN_ERR "Flask: role %s: " - "too deep or looped bounds\n", - (char *) key); + "too deep or looped bounds\n", + (char *)key); return -EINVAL; } upper = p->role_val_to_struct[upper->bounds - 1]; - ebitmap_for_each_positive_bit(&role->types, node, bit) + ebitmap_for_each_positive_bit (&role->types, node, bit) { if ( ebitmap_get_bit(&upper->types, bit) ) continue; - printk(KERN_ERR - "Flask: boundary violated policy: " - "role=%s type=%s bounds=%s\n", + printk(KERN_ERR "Flask: boundary violated policy: " + "role=%s type=%s bounds=%s\n", p->p_role_val_to_name[role->value - 1], p->p_type_val_to_name[bit], p->p_role_val_to_name[upper->value - 1]); @@ -1678,13 +1644,13 @@ static int type_bounds_sanity_check(void *key, void *datum, void *datap) int depth = 0; upper = type = datum; - while (upper->bounds) + while ( upper->bounds ) { if ( ++depth == POLICYDB_BOUNDS_MAXDEPTH ) { printk(KERN_ERR "Flask: type %s: " - "too deep or looped boundary\n", - (char *) key); + "too deep or looped boundary\n", + (char *)key); return -EINVAL; } @@ -1692,9 +1658,8 @@ static int type_bounds_sanity_check(void *key, void *datum, void *datap) if ( upper->attribute ) { printk(KERN_ERR "Flask: type %s: " - "bounded by attribute %s", - (char *) key, - p->p_type_val_to_name[upper->value - 1]); + "bounded by attribute %s", + (char *)key, p->p_type_val_to_name[upper->value - 1]); return -EINVAL; } } @@ -1709,18 +1674,15 @@ static int policydb_bounds_sanity_check(struct policydb *p) if ( p->policyvers < POLICYDB_VERSION_BOUNDARY ) return 0; - rc = hashtab_map(p->p_users.table, - user_bounds_sanity_check, p); + rc = hashtab_map(p->p_users.table, user_bounds_sanity_check, p); if ( rc ) return rc; - rc = hashtab_map(p->p_roles.table, - role_bounds_sanity_check, p); + rc = hashtab_map(p->p_roles.table, role_bounds_sanity_check, p); if ( rc ) return rc; - rc = hashtab_map(p->p_types.table, - type_bounds_sanity_check, p); + rc = hashtab_map(p->p_types.table, type_bounds_sanity_check, p); if ( rc ) return rc; @@ -1751,14 +1713,14 @@ int policydb_read(struct policydb *p, void *fp) goto out; /* Read the magic number and string length. */ - rc = next_entry(buf, fp, sizeof(u32)* 2); + rc = next_entry(buf, fp, sizeof(u32) * 2); if ( rc < 0 ) goto bad; if ( le32_to_cpu(buf[0]) != POLICYDB_MAGIC ) { printk(KERN_ERR "Flask: policydb magic number %#x does " - "not match expected magic number %#x\n", + "not match expected magic number %#x\n", le32_to_cpu(buf[0]), POLICYDB_MAGIC); goto bad; } @@ -1767,7 +1729,7 @@ int policydb_read(struct policydb *p, void *fp) if ( len != strlen(POLICYDB_STRING) ) { printk(KERN_ERR "Flask: policydb string length %d does not " - "match expected length %zu\n", + "match expected length %zu\n", len, strlen(POLICYDB_STRING)); goto bad; } @@ -1775,7 +1737,8 @@ int policydb_read(struct policydb *p, void *fp) if ( !policydb_str ) { printk(KERN_ERR "Flask: unable to allocate memory for policydb " - "string of length %d\n", len); + "string of length %d\n", + len); rc = -ENOMEM; goto bad; } @@ -1802,18 +1765,18 @@ int policydb_read(struct policydb *p, void *fp) policydb_str = NULL; /* Read the version, config, and table sizes. */ - rc = next_entry(buf, fp, sizeof(u32)*4); + rc = next_entry(buf, fp, sizeof(u32) * 4); if ( rc < 0 ) goto bad; p->policyvers = le32_to_cpu(buf[0]); if ( p->policyvers < POLICYDB_VERSION_MIN || - p->policyvers > POLICYDB_VERSION_MAX ) + p->policyvers > POLICYDB_VERSION_MAX ) { - printk(KERN_ERR "Flask: policydb version %d does not match " - "my version range %d-%d\n", - le32_to_cpu(buf[0]), POLICYDB_VERSION_MIN, POLICYDB_VERSION_MAX); - goto bad; + printk(KERN_ERR "Flask: policydb version %d does not match " + "my version range %d-%d\n", + le32_to_cpu(buf[0]), POLICYDB_VERSION_MIN, POLICYDB_VERSION_MAX); + goto bad; } if ( (le32_to_cpu(buf[1]) & POLICYDB_CONFIG_MLS) ) @@ -1821,7 +1784,7 @@ int policydb_read(struct policydb *p, void *fp) if ( ss_initialized && !flask_mls_enabled ) { printk(KERN_ERR "Cannot switch between non-MLS and MLS " - "policies\n"); + "policies\n"); goto bad; } flask_mls_enabled = 1; @@ -1830,7 +1793,8 @@ int policydb_read(struct policydb *p, void *fp) if ( p->policyvers < POLICYDB_VERSION_MLS ) { printk(KERN_ERR "security policydb version %d (MLS) " - "not backwards compatible\n", p->policyvers); + "not backwards compatible\n", + p->policyvers); goto bad; } } @@ -1839,7 +1803,7 @@ int policydb_read(struct policydb *p, void *fp) if ( ss_initialized && flask_mls_enabled ) { printk(KERN_ERR "Cannot switch between MLS and non-MLS " - "policies\n"); + "policies\n"); goto bad; } } @@ -1857,7 +1821,8 @@ int policydb_read(struct policydb *p, void *fp) if ( !info ) { printk(KERN_ERR "Flask: unable to find policy compat info " - "for version %d target %d\n", p->policyvers, p->target_type); + "for version %d target %d\n", + p->policyvers, p->target_type); goto bad; } @@ -1865,15 +1830,15 @@ int policydb_read(struct policydb *p, void *fp) le32_to_cpu(buf[3]) != info->ocon_num ) { printk(KERN_ERR "Flask: policydb table sizes (%d,%d) do " - "not match mine (%d,%d)\n", le32_to_cpu(buf[2]), - le32_to_cpu(buf[3]), - info->sym_num, info->ocon_num); + "not match mine (%d,%d)\n", + le32_to_cpu(buf[2]), le32_to_cpu(buf[3]), info->sym_num, + info->ocon_num); goto bad; } for ( i = 0; i < info->sym_num; i++ ) { - rc = next_entry(buf, fp, sizeof(u32)*2); + rc = next_entry(buf, fp, sizeof(u32) * 2); if ( rc < 0 ) goto bad; nprim = le32_to_cpu(buf[0]); @@ -1917,9 +1882,9 @@ int policydb_read(struct policydb *p, void *fp) else p->role_tr = tr; if ( p->policyvers >= POLICYDB_VERSION_ROLETRANS ) - rc = next_entry(buf, fp, sizeof(u32)*4); + rc = next_entry(buf, fp, sizeof(u32) * 4); else - rc = next_entry(buf, fp, sizeof(u32)*3); + rc = next_entry(buf, fp, sizeof(u32) * 3); if ( rc < 0 ) goto bad; tr->role = le32_to_cpu(buf[0]); @@ -1952,7 +1917,7 @@ int policydb_read(struct policydb *p, void *fp) lra->next = ra; else p->role_allow = ra; - rc = next_entry(buf, fp, sizeof(u32)*2); + rc = next_entry(buf, fp, sizeof(u32) * 2); if ( rc < 0 ) goto bad; ra->role = le32_to_cpu(buf[0]); @@ -2005,7 +1970,7 @@ int policydb_read(struct policydb *p, void *fp) goto bad; } rc = -EINVAL; - switch ( i ) + switch (i) { case OCON_ISID: rc = next_entry(buf, fp, sizeof(u32)); @@ -2019,8 +1984,7 @@ int policydb_read(struct policydb *p, void *fp) case OCON_PIRQ: if ( p->target_type != TARGET_XEN ) { - printk(KERN_ERR - "Old xen policy does not support pirqcon"); + printk(KERN_ERR "Old xen policy does not support pirqcon"); goto bad; } rc = next_entry(buf, fp, sizeof(u32)); @@ -2035,10 +1999,10 @@ int policydb_read(struct policydb *p, void *fp) if ( p->target_type != TARGET_XEN ) { printk(KERN_ERR - "Old xen policy does not support ioportcon"); + "Old xen policy does not support ioportcon"); goto bad; } - rc = next_entry(buf, fp, sizeof(u32) *2); + rc = next_entry(buf, fp, sizeof(u32) * 2); if ( rc < 0 ) goto bad; c->u.ioport.low_ioport = le32_to_cpu(buf[0]); @@ -2047,11 +2011,14 @@ int policydb_read(struct policydb *p, void *fp) if ( rc ) goto bad; - if ( *pn || ( l && l->u.ioport.high_ioport >= c->u.ioport.low_ioport ) ) + if ( *pn || + (l && l->u.ioport.high_ioport >= c->u.ioport.low_ioport) ) { pn = &p->ocontexts[i]; l = *pn; - while ( l && l->u.ioport.high_ioport < c->u.ioport.low_ioport ) { + while ( l && + l->u.ioport.high_ioport < c->u.ioport.low_ioport ) + { pn = &l->next; l = *pn; } @@ -2062,14 +2029,13 @@ int policydb_read(struct policydb *p, void *fp) case OCON_IOMEM: if ( p->target_type != TARGET_XEN ) { - printk(KERN_ERR - "Old xen policy does not support iomemcon"); + printk(KERN_ERR "Old xen policy does not support iomemcon"); goto bad; } if ( p->policyvers >= POLICYDB_VERSION_XEN_DEVICETREE ) { u64 b64[2]; - rc = next_entry(b64, fp, sizeof(u64) *2); + rc = next_entry(b64, fp, sizeof(u64) * 2); if ( rc < 0 ) goto bad; c->u.iomem.low_iomem = le64_to_cpu(b64[0]); @@ -2077,7 +2043,7 @@ int policydb_read(struct policydb *p, void *fp) } else { - rc = next_entry(buf, fp, sizeof(u32) *2); + rc = next_entry(buf, fp, sizeof(u32) * 2); if ( rc < 0 ) goto bad; c->u.iomem.low_iomem = le32_to_cpu(buf[0]); @@ -2087,11 +2053,13 @@ int policydb_read(struct policydb *p, void *fp) if ( rc ) goto bad; - if ( *pn || ( l && l->u.iomem.high_iomem >= c->u.iomem.low_iomem ) ) + if ( *pn || + (l && l->u.iomem.high_iomem >= c->u.iomem.low_iomem) ) { pn = &p->ocontexts[i]; l = *pn; - while ( l && l->u.iomem.high_iomem < c->u.iomem.low_iomem ) { + while ( l && l->u.iomem.high_iomem < c->u.iomem.low_iomem ) + { pn = &l->next; l = *pn; } @@ -2103,7 +2071,7 @@ int policydb_read(struct policydb *p, void *fp) if ( p->target_type != TARGET_XEN ) { printk(KERN_ERR - "Old xen policy does not support pcidevicecon"); + "Old xen policy does not support pcidevicecon"); goto bad; } rc = next_entry(buf, fp, sizeof(u32)); @@ -2118,7 +2086,7 @@ int policydb_read(struct policydb *p, void *fp) if ( p->target_type != TARGET_XEN ) { printk(KERN_ERR - "Old xen policy does not support devicetreecon"); + "Old xen policy does not support devicetreecon"); goto bad; } rc = next_entry(buf, fp, sizeof(u32)); @@ -2127,7 +2095,7 @@ int policydb_read(struct policydb *p, void *fp) len = le32_to_cpu(buf[0]); rc = -ENOMEM; c->u.name = xmalloc_array(char, len + 1); - if (!c->u.name) + if ( !c->u.name ) goto bad; rc = next_entry(c->u.name, fp, len); if ( rc < 0 ) @@ -2191,7 +2159,8 @@ int policydb_read(struct policydb *p, void *fp) if ( rc < 0 ) goto bad; rt->target_class = le32_to_cpu(buf[0]); - } else + } + else rt->target_class = SECCLASS_DOMAIN; if ( !policydb_type_isvalid(p, rt->source_type) || !policydb_type_isvalid(p, rt->target_type) || @@ -2226,7 +2195,7 @@ int policydb_read(struct policydb *p, void *fp) } /* add the type itself as the degenerate case */ if ( ebitmap_set_bit(&p->type_attr_map[i], i, 1) ) - goto bad; + goto bad; } rc = policydb_bounds_sanity_check(p); diff --git a/xen/xsm/flask/ss/services.c b/xen/xsm/flask/ss/services.c index b59928ea8a..8e6abb118e 100644 --- a/xen/xsm/flask/ss/services.c +++ b/xen/xsm/flask/ss/services.c @@ -8,7 +8,8 @@ * * Support for enhanced MLS infrastructure. * - * Updated: Frank Mayer and Karl MacMillan + * Updated: Frank Mayer and Karl MacMillan + * * * Added conditional policy language extensions * @@ -79,13 +80,11 @@ static u32 latest_granting = 0; /* Forward declaration. */ static int context_struct_to_string(struct context *context, char **scontext, - u32 *scontext_len); + u32 *scontext_len); static int context_struct_compute_av(struct context *scontext, - struct context *tcontext, - u16 tclass, - u32 requested, - struct av_decision *avd); + struct context *tcontext, u16 tclass, + u32 requested, struct av_decision *avd); /* * Return the boolean value of a constraint expression @@ -99,8 +98,9 @@ static int context_struct_compute_av(struct context *scontext, * constraint_expr_eval should pass in NULL for xcontext. */ static int constraint_expr_eval(struct context *scontext, - struct context *tcontext, struct context *xcontext, - struct constraint_expr *cexpr) + struct context *tcontext, + struct context *xcontext, + struct constraint_expr *cexpr) { u32 val1, val2; struct context *c; @@ -112,166 +112,164 @@ static int constraint_expr_eval(struct context *scontext, for ( e = cexpr; e; e = e->next ) { - switch ( e->expr_type ) + switch (e->expr_type) { - case CEXPR_NOT: - BUG_ON(sp < 0); - s[sp] = !s[sp]; + case CEXPR_NOT: + BUG_ON(sp < 0); + s[sp] = !s[sp]; break; - case CEXPR_AND: - BUG_ON(sp < 1); - sp--; - s[sp] &= s[sp+1]; + case CEXPR_AND: + BUG_ON(sp < 1); + sp--; + s[sp] &= s[sp + 1]; break; - case CEXPR_OR: - BUG_ON(sp < 1); - sp--; - s[sp] |= s[sp+1]; + case CEXPR_OR: + BUG_ON(sp < 1); + sp--; + s[sp] |= s[sp + 1]; break; - case CEXPR_ATTR: - if ( sp == (CEXPR_MAXDEPTH-1) ) - return 0; - switch ( e->attr ) + case CEXPR_ATTR: + if ( sp == (CEXPR_MAXDEPTH - 1) ) + return 0; + switch (e->attr) { - case CEXPR_USER: - val1 = scontext->user; - val2 = tcontext->user; - break; - case CEXPR_TYPE: - val1 = scontext->type; - val2 = tcontext->type; - break; - case CEXPR_ROLE: - val1 = scontext->role; - val2 = tcontext->role; - r1 = policydb.role_val_to_struct[val1 - 1]; - r2 = policydb.role_val_to_struct[val2 - 1]; - switch ( e->op ) + case CEXPR_USER: + val1 = scontext->user; + val2 = tcontext->user; + break; + case CEXPR_TYPE: + val1 = scontext->type; + val2 = tcontext->type; + break; + case CEXPR_ROLE: + val1 = scontext->role; + val2 = tcontext->role; + r1 = policydb.role_val_to_struct[val1 - 1]; + r2 = policydb.role_val_to_struct[val2 - 1]; + switch (e->op) { - case CEXPR_DOM: - s[++sp] = ebitmap_get_bit(&r1->dominates, val2 - 1); + case CEXPR_DOM: + s[++sp] = ebitmap_get_bit(&r1->dominates, val2 - 1); continue; - case CEXPR_DOMBY: - s[++sp] = ebitmap_get_bit(&r2->dominates, val1 - 1); + case CEXPR_DOMBY: + s[++sp] = ebitmap_get_bit(&r2->dominates, val1 - 1); continue; - case CEXPR_INCOMP: - s[++sp] = ( !ebitmap_get_bit(&r1->dominates, - val2 - 1) && - !ebitmap_get_bit(&r2->dominates, - val1 - 1) ); + case CEXPR_INCOMP: + s[++sp] = (!ebitmap_get_bit(&r1->dominates, val2 - 1) && + !ebitmap_get_bit(&r2->dominates, val1 - 1)); continue; - default: + default: break; } break; - case CEXPR_L1L2: - l1 = &(scontext->range.level[0]); - l2 = &(tcontext->range.level[0]); - goto mls_ops; - case CEXPR_L1H2: - l1 = &(scontext->range.level[0]); - l2 = &(tcontext->range.level[1]); - goto mls_ops; - case CEXPR_H1L2: - l1 = &(scontext->range.level[1]); - l2 = &(tcontext->range.level[0]); - goto mls_ops; - case CEXPR_H1H2: - l1 = &(scontext->range.level[1]); - l2 = &(tcontext->range.level[1]); - goto mls_ops; - case CEXPR_L1H1: - l1 = &(scontext->range.level[0]); - l2 = &(scontext->range.level[1]); - goto mls_ops; - case CEXPR_L2H2: - l1 = &(tcontext->range.level[0]); - l2 = &(tcontext->range.level[1]); - goto mls_ops; -mls_ops: - switch ( e->op ) - { + case CEXPR_L1L2: + l1 = &(scontext->range.level[0]); + l2 = &(tcontext->range.level[0]); + goto mls_ops; + case CEXPR_L1H2: + l1 = &(scontext->range.level[0]); + l2 = &(tcontext->range.level[1]); + goto mls_ops; + case CEXPR_H1L2: + l1 = &(scontext->range.level[1]); + l2 = &(tcontext->range.level[0]); + goto mls_ops; + case CEXPR_H1H2: + l1 = &(scontext->range.level[1]); + l2 = &(tcontext->range.level[1]); + goto mls_ops; + case CEXPR_L1H1: + l1 = &(scontext->range.level[0]); + l2 = &(scontext->range.level[1]); + goto mls_ops; + case CEXPR_L2H2: + l1 = &(tcontext->range.level[0]); + l2 = &(tcontext->range.level[1]); + goto mls_ops; + mls_ops: + switch (e->op) + { case CEXPR_EQ: s[++sp] = mls_level_eq(l1, l2); - continue; + continue; case CEXPR_NEQ: s[++sp] = !mls_level_eq(l1, l2); - continue; + continue; case CEXPR_DOM: s[++sp] = mls_level_dom(l1, l2); - continue; + continue; case CEXPR_DOMBY: s[++sp] = mls_level_dom(l2, l1); - continue; + continue; case CEXPR_INCOMP: s[++sp] = mls_level_incomp(l2, l1); - continue; + continue; default: BUG(); return 0; - } - break; + } + break; default: BUG(); return 0; } - switch ( e->op ) + switch (e->op) { - case CEXPR_EQ: - s[++sp] = (val1 == val2); + case CEXPR_EQ: + s[++sp] = (val1 == val2); break; - case CEXPR_NEQ: - s[++sp] = (val1 != val2); + case CEXPR_NEQ: + s[++sp] = (val1 != val2); break; - default: - BUG(); - return 0; + default: + BUG(); + return 0; } break; - case CEXPR_NAMES: - if ( sp == (CEXPR_MAXDEPTH-1) ) - return 0; - c = scontext; - if ( e->attr & CEXPR_TARGET ) - c = tcontext; - else if ( e->attr & CEXPR_XTARGET ) - { - c = xcontext; - if ( !c ) - { - BUG(); - return 0; - } - } - if ( e->attr & CEXPR_USER ) - val1 = c->user; - else if ( e->attr & CEXPR_ROLE ) - val1 = c->role; - else if ( e->attr & CEXPR_TYPE ) - val1 = c->type; - else + case CEXPR_NAMES: + if ( sp == (CEXPR_MAXDEPTH - 1) ) + return 0; + c = scontext; + if ( e->attr & CEXPR_TARGET ) + c = tcontext; + else if ( e->attr & CEXPR_XTARGET ) + { + c = xcontext; + if ( !c ) { BUG(); return 0; } + } + if ( e->attr & CEXPR_USER ) + val1 = c->user; + else if ( e->attr & CEXPR_ROLE ) + val1 = c->role; + else if ( e->attr & CEXPR_TYPE ) + val1 = c->type; + else + { + BUG(); + return 0; + } - switch ( e->op ) + switch (e->op) { - case CEXPR_EQ: - s[++sp] = ebitmap_get_bit(&e->names, val1 - 1); + case CEXPR_EQ: + s[++sp] = ebitmap_get_bit(&e->names, val1 - 1); break; - case CEXPR_NEQ: - s[++sp] = !ebitmap_get_bit(&e->names, val1 - 1); + case CEXPR_NEQ: + s[++sp] = !ebitmap_get_bit(&e->names, val1 - 1); break; - default: - BUG(); - return 0; - } - break; default: BUG(); return 0; + } + break; + default: + BUG(); + return 0; } } @@ -296,10 +294,8 @@ static int dump_masked_av_helper(void *k, void *d, void *args) } static void security_dump_masked_av(struct context *scontext, - struct context *tcontext, - u16 tclass, - u32 permissions, - const char *reason) + struct context *tcontext, u16 tclass, + u32 permissions, const char *reason) { struct common_datum *common_dat; struct class_datum *tclass_dat; @@ -320,21 +316,19 @@ static void security_dump_masked_av(struct context *scontext, /* init permission_names */ if ( common_dat && - hashtab_map(common_dat->permissions.table, - dump_masked_av_helper, permission_names) < 0 ) + hashtab_map(common_dat->permissions.table, dump_masked_av_helper, + permission_names) < 0 ) goto out; - if ( hashtab_map(tclass_dat->permissions.table, - dump_masked_av_helper, permission_names) < 0 ) + if ( hashtab_map(tclass_dat->permissions.table, dump_masked_av_helper, + permission_names) < 0 ) goto out; - /* get scontext/tcontext in text form */ - if ( context_struct_to_string(scontext, - &scontext_name, &length) < 0 ) + /* get scontext/tcontext in text form */ + if ( context_struct_to_string(scontext, &scontext_name, &length) < 0 ) goto out; - if ( context_struct_to_string(tcontext, - &tcontext_name, &length) < 0 ) + if ( context_struct_to_string(tcontext, &tcontext_name, &length) < 0 ) goto out; printk("Flask: op=security_compute_av reason=%s " @@ -348,10 +342,8 @@ static void security_dump_masked_av(struct context *scontext, if ( (mask & permissions) == 0 ) continue; - printk("%s%s", - need_comma ? "," : "", - permission_names[index] - ? permission_names[index] : "????"); + printk("%s%s", need_comma ? "," : "", + permission_names[index] ? permission_names[index] : "????"); need_comma = 1; } printk("\n"); @@ -368,18 +360,14 @@ out: * on boundary constraint. */ static void type_attribute_bounds_av(struct context *scontext, - struct context *tcontext, - u16 tclass, - u32 requested, - struct av_decision *avd) + struct context *tcontext, u16 tclass, + u32 requested, struct av_decision *avd) { struct context lo_scontext; struct context lo_tcontext; struct av_decision lo_avd; - struct type_datum *source - = policydb.type_val_to_struct[scontext->type - 1]; - struct type_datum *target - = policydb.type_val_to_struct[tcontext->type - 1]; + struct type_datum *source = policydb.type_val_to_struct[scontext->type - 1]; + struct type_datum *target = policydb.type_val_to_struct[tcontext->type - 1]; u32 masked = 0; if ( source->bounds ) @@ -389,13 +377,10 @@ static void type_attribute_bounds_av(struct context *scontext, memcpy(&lo_scontext, scontext, sizeof(lo_scontext)); lo_scontext.type = source->bounds; - context_struct_compute_av(&lo_scontext, - tcontext, - tclass, - requested, + context_struct_compute_av(&lo_scontext, tcontext, tclass, requested, &lo_avd); if ( (lo_avd.allowed & avd->allowed) == avd->allowed ) - return; /* no masked permission */ + return; /* no masked permission */ masked = ~lo_avd.allowed & avd->allowed; } @@ -406,13 +391,10 @@ static void type_attribute_bounds_av(struct context *scontext, memcpy(&lo_tcontext, tcontext, sizeof(lo_tcontext)); lo_tcontext.type = target->bounds; - context_struct_compute_av(scontext, - &lo_tcontext, - tclass, - requested, + context_struct_compute_av(scontext, &lo_tcontext, tclass, requested, &lo_avd); if ( (lo_avd.allowed & avd->allowed) == avd->allowed ) - return; /* no masked permission */ + return; /* no masked permission */ masked = ~lo_avd.allowed & avd->allowed; } @@ -424,13 +406,10 @@ static void type_attribute_bounds_av(struct context *scontext, * set up. */ - context_struct_compute_av(&lo_scontext, - &lo_tcontext, - tclass, - requested, + context_struct_compute_av(&lo_scontext, &lo_tcontext, tclass, requested, &lo_avd); if ( (lo_avd.allowed & avd->allowed) == avd->allowed ) - return; /* no masked permission */ + return; /* no masked permission */ masked = ~lo_avd.allowed & avd->allowed; } @@ -440,8 +419,7 @@ static void type_attribute_bounds_av(struct context *scontext, avd->allowed &= ~masked; /* audit masked permissions */ - security_dump_masked_av(scontext, tcontext, - tclass, masked, "bounds"); + security_dump_masked_av(scontext, tcontext, tclass, masked, "bounds"); } } @@ -450,10 +428,8 @@ static void type_attribute_bounds_av(struct context *scontext, * the permissions in a particular class. */ static int context_struct_compute_av(struct context *scontext, - struct context *tcontext, - u16 tclass, - u32 requested, - struct av_decision *avd) + struct context *tcontext, u16 tclass, + u32 requested, struct av_decision *avd) { struct constraint_node *constraint; struct role_allow *ra; @@ -489,15 +465,15 @@ static int context_struct_compute_av(struct context *scontext, avkey.specified = AVTAB_AV; sattr = &policydb.type_attr_map[scontext->type - 1]; tattr = &policydb.type_attr_map[tcontext->type - 1]; - ebitmap_for_each_positive_bit(sattr, snode, i) + ebitmap_for_each_positive_bit (sattr, snode, i) { - ebitmap_for_each_positive_bit(tattr, tnode, j) + ebitmap_for_each_positive_bit (tattr, tnode, j) { avkey.source_type = i + 1; avkey.target_type = j + 1; for ( node = avtab_search_node(&policydb.te_avtab, &avkey); - node != NULL; - node = avtab_search_node_next(node, avkey.specified) ) + node != NULL; + node = avtab_search_node_next(node, avkey.specified) ) { if ( node->key.specified == AVTAB_ALLOWED ) avd->allowed |= node->datum.data; @@ -509,7 +485,6 @@ static int context_struct_compute_av(struct context *scontext, /* Check conditional av table for additional permissions */ cond_compute_av(&policydb.te_cond_avtab, &avkey, avd); - } } @@ -520,10 +495,10 @@ static int context_struct_compute_av(struct context *scontext, constraint = tclass_datum->constraints; while ( constraint ) { - if ( (constraint->permissions & (avd->allowed) ) && - !constraint_expr_eval(scontext, tcontext, NULL, constraint->expr)) + if ( (constraint->permissions & (avd->allowed)) && + !constraint_expr_eval(scontext, tcontext, NULL, constraint->expr) ) { - avd->allowed &= ~(constraint->permissions); + avd->allowed &= ~(constraint->permissions); } constraint = constraint->next; } @@ -533,8 +508,7 @@ static int context_struct_compute_av(struct context *scontext, * role is changing, then check the (current_role, new_role) * pair. */ - if ( tclass == SECCLASS_DOMAIN && - (avd->allowed & DOMAIN__TRANSITION) && + if ( tclass == SECCLASS_DOMAIN && (avd->allowed & DOMAIN__TRANSITION) && scontext->role != tcontext->role ) { for ( ra = policydb.role_allow; ra; ra = ra->next ) @@ -542,7 +516,7 @@ static int context_struct_compute_av(struct context *scontext, if ( scontext->role == ra->role && tcontext->role == ra->new_role ) break; } - if (!ra) + if ( !ra ) avd->allowed &= ~DOMAIN__TRANSITION; } @@ -551,13 +525,13 @@ static int context_struct_compute_av(struct context *scontext, * constraint, lazy checks have to mask any violated * permission and notice it to userspace via audit. */ - type_attribute_bounds_av(scontext, tcontext, - tclass, requested, avd); + type_attribute_bounds_av(scontext, tcontext, tclass, requested, avd); return 0; } static int security_validtrans_handle_fail(struct context *ocontext, - struct context *ncontext, struct context *tcontext, u16 tclass) + struct context *ncontext, + struct context *tcontext, u16 tclass) { char *o = NULL, *n = NULL, *t = NULL; u32 olen, nlen, tlen; @@ -569,8 +543,8 @@ static int security_validtrans_handle_fail(struct context *ocontext, if ( context_struct_to_string(tcontext, &t, &tlen) < 0 ) goto out; printk("security_validate_transition: denied for" - " oldcontext=%s newcontext=%s taskcontext=%s tclass=%s", - o, n, t, policydb.p_class_val_to_name[tclass-1]); + " oldcontext=%s newcontext=%s taskcontext=%s tclass=%s", + o, n, t, policydb.p_class_val_to_name[tclass - 1]); out: xfree(o); xfree(n); @@ -599,7 +573,8 @@ int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, if ( !tclass || tclass > policydb.p_classes.nprim ) { printk(KERN_ERR "security_validate_transition: " - "unrecognized class %d\n", tclass); + "unrecognized class %d\n", + tclass); rc = -EINVAL; goto out; } @@ -609,7 +584,8 @@ int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, if ( !ocontext ) { printk(KERN_ERR "security_validate_transition: " - " unrecognized SID %d\n", oldsid); + " unrecognized SID %d\n", + oldsid); rc = -EINVAL; goto out; } @@ -618,7 +594,8 @@ int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, if ( !ncontext ) { printk(KERN_ERR "security_validate_transition: " - " unrecognized SID %d\n", newsid); + " unrecognized SID %d\n", + newsid); rc = -EINVAL; goto out; } @@ -627,7 +604,8 @@ int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, if ( !tcontext ) { printk(KERN_ERR "security_validate_transition: " - " unrecognized SID %d\n", tasksid); + " unrecognized SID %d\n", + tasksid); rc = -EINVAL; goto out; } @@ -636,10 +614,10 @@ int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, while ( constraint ) { if ( !constraint_expr_eval(ocontext, ncontext, tcontext, - constraint->expr) ) + constraint->expr) ) { - rc = security_validtrans_handle_fail(ocontext, ncontext, - tcontext, tclass); + rc = security_validtrans_handle_fail(ocontext, ncontext, tcontext, + tclass); goto out; } constraint = constraint->next; @@ -712,7 +690,8 @@ out: * to point to this string and set `*scontext_len' to * the length of the string. */ -static int context_struct_to_string(struct context *context, char **scontext, u32 *scontext_len) +static int context_struct_to_string(struct context *context, char **scontext, + u32 *scontext_len) { char *scontextp; @@ -736,12 +715,12 @@ static int context_struct_to_string(struct context *context, char **scontext, u3 * Copy the user name, role name and type name into the context. */ snprintf(scontextp, *scontext_len, "%s:%s:%s", - policydb.p_user_val_to_name[context->user - 1], - policydb.p_role_val_to_name[context->role - 1], - policydb.p_type_val_to_name[context->type - 1]); - scontextp += strlen(policydb.p_user_val_to_name[context->user - 1]) + - 1 + strlen(policydb.p_role_val_to_name[context->role - 1]) + - 1 + strlen(policydb.p_type_val_to_name[context->type - 1]); + policydb.p_user_val_to_name[context->user - 1], + policydb.p_role_val_to_name[context->role - 1], + policydb.p_type_val_to_name[context->type - 1]); + scontextp += strlen(policydb.p_user_val_to_name[context->user - 1]) + 1 + + strlen(policydb.p_role_val_to_name[context->role - 1]) + 1 + + strlen(policydb.p_type_val_to_name[context->type - 1]); mls_sid_to_context(context, &scontextp); @@ -780,7 +759,8 @@ int security_sid_to_context(u32 sid, char **scontext, u32 *scontext_len) goto out; } printk(KERN_ERR "security_sid_to_context: called before initial " - "load_policy on unknown SID %d\n", sid); + "load_policy on unknown SID %d\n", + sid); rc = -EINVAL; goto out; } @@ -789,7 +769,8 @@ int security_sid_to_context(u32 sid, char **scontext, u32 *scontext_len) if ( !context ) { printk(KERN_ERR "security_sid_to_context: unrecognized SID " - "%d\n", sid); + "%d\n", + sid); rc = -EINVAL; goto out_unlock; } @@ -798,7 +779,6 @@ out_unlock: POLICY_RDUNLOCK; out: return rc; - } /** @@ -844,7 +824,7 @@ int security_context_to_sid(char *scontext, u32 scontext_len, u32 *sid) null suffix to the copy to avoid problems with the existing attr package, which doesn't view the null terminator as part of the attribute value. */ - scontext2 = xmalloc_array(char, scontext_len+1); + scontext2 = xmalloc_array(char, scontext_len + 1); if ( !scontext2 ) { rc = -ENOMEM; @@ -861,14 +841,14 @@ int security_context_to_sid(char *scontext, u32 scontext_len, u32 *sid) /* Parse the security context. */ rc = -EINVAL; - scontextp = (char *) scontext2; + scontextp = (char *)scontext2; /* Extract the user. */ p = scontextp; while ( *p && *p != ':' ) p++; - if (*p == 0) + if ( *p == 0 ) goto out_unlock; *p++ = 0; @@ -933,9 +913,10 @@ out: return rc; } -static int compute_sid_handle_invalid_context( - struct context *scontext, struct context *tcontext, u16 tclass, - struct context *newcontext) +static int compute_sid_handle_invalid_context(struct context *scontext, + struct context *tcontext, + u16 tclass, + struct context *newcontext) { char *s = NULL, *t = NULL, *n = NULL; u32 slen, tlen, nlen; @@ -947,10 +928,10 @@ static int compute_sid_handle_invalid_context( if ( context_struct_to_string(newcontext, &n, &nlen) < 0 ) goto out; printk("security_compute_sid: invalid context %s" - " for scontext=%s" - " tcontext=%s" - " tclass=%s", - n, s, t, policydb.p_class_val_to_name[tclass-1]); + " for scontext=%s" + " tcontext=%s" + " tclass=%s", + n, s, t, policydb.p_class_val_to_name[tclass - 1]); out: xfree(s); xfree(t); @@ -960,11 +941,8 @@ out: return -EACCES; } -static int security_compute_sid(u32 ssid, - u32 tsid, - u16 tclass, - u32 specified, - u32 *out_sid) +static int security_compute_sid(u32 ssid, u32 tsid, u16 tclass, u32 specified, + u32 *out_sid) { struct context *scontext = NULL, *tcontext = NULL, newcontext; struct role_trans *roletr = NULL; @@ -975,13 +953,13 @@ static int security_compute_sid(u32 ssid, if ( !ss_initialized ) { - switch ( tclass ) + switch (tclass) { - case SECCLASS_DOMAIN: - *out_sid = ssid; + case SECCLASS_DOMAIN: + *out_sid = ssid; break; - default: - *out_sid = tsid; + default: + *out_sid = tsid; break; } goto out; @@ -1007,32 +985,32 @@ static int security_compute_sid(u32 ssid, context_init(&newcontext); /* Set the user identity. */ - switch ( specified ) + switch (specified) { - case AVTAB_TRANSITION: - case AVTAB_CHANGE: - /* Use the process user identity. */ - newcontext.user = scontext->user; + case AVTAB_TRANSITION: + case AVTAB_CHANGE: + /* Use the process user identity. */ + newcontext.user = scontext->user; break; - case AVTAB_MEMBER: - /* Use the related object owner. */ - newcontext.user = tcontext->user; + case AVTAB_MEMBER: + /* Use the related object owner. */ + newcontext.user = tcontext->user; break; } /* Set the role and type to default values. */ - switch ( tclass ) + switch (tclass) { - case SECCLASS_DOMAIN: - /* Use the current role and type of process. */ - newcontext.role = scontext->role; - newcontext.type = scontext->type; + case SECCLASS_DOMAIN: + /* Use the current role and type of process. */ + newcontext.role = scontext->role; + newcontext.type = scontext->type; break; - default: - /* Use the well-defined object role. */ - newcontext.role = OBJECT_R_VAL; - /* Use the type of the related object. */ - newcontext.type = tcontext->type; + default: + /* Use the well-defined object role. */ + newcontext.role = OBJECT_R_VAL; + /* Use the type of the related object. */ + newcontext.type = tcontext->type; } /* Look for a type transition/member/change rule. */ @@ -1063,25 +1041,25 @@ static int security_compute_sid(u32 ssid, } /* Check for class-specific changes. */ - switch ( tclass ) + switch (tclass) { - case SECCLASS_DOMAIN: - if ( specified & AVTAB_TRANSITION ) + case SECCLASS_DOMAIN: + if ( specified & AVTAB_TRANSITION ) + { + /* Look for a role transition rule. */ + for ( roletr = policydb.role_tr; roletr; roletr = roletr->next ) { - /* Look for a role transition rule. */ - for ( roletr = policydb.role_tr; roletr; roletr = roletr->next ) + if ( roletr->role == scontext->role && + roletr->type == tcontext->type ) { - if ( roletr->role == scontext->role && - roletr->type == tcontext->type ) - { - /* Use the role transition rule. */ - newcontext.role = roletr->new_role; - break; - } + /* Use the role transition rule. */ + newcontext.role = roletr->new_role; + break; } } + } break; - default: + default: break; } @@ -1095,7 +1073,7 @@ static int security_compute_sid(u32 ssid, if ( !policydb_context_isvalid(&policydb, &newcontext) ) { rc = compute_sid_handle_invalid_context(scontext, tcontext, tclass, - &newcontext); + &newcontext); if ( rc ) goto out_unlock; } @@ -1184,12 +1162,11 @@ static int validate_classes(struct policydb *p) continue; if ( i > p->p_classes.nprim ) { - printk(KERN_INFO - "Flask: class %s not defined in policy\n", + printk(KERN_INFO "Flask: class %s not defined in policy\n", def_class); return -EINVAL; } - pol_class = p->p_class_val_to_name[i-1]; + pol_class = p->p_class_val_to_name[i - 1]; if ( strcmp(pol_class, def_class) ) { printk(KERN_ERR @@ -1205,9 +1182,9 @@ static int validate_classes(struct policydb *p) def_perm = kdefs->av_perm_to_string[i].name; if ( class_val > p->p_classes.nprim ) continue; - pol_class = p->p_class_val_to_name[class_val-1]; + pol_class = p->p_class_val_to_name[class_val - 1]; cladatum = hashtab_search(p->p_classes.table, pol_class); - BUG_ON( !cladatum ); + BUG_ON(!cladatum); perms = &cladatum->permissions; nprim = 1 << (perms->nprim - 1); if ( perm_val > nprim ) @@ -1245,7 +1222,8 @@ static int clone_sid(u32 sid, struct context *context, void *arg) return sidtab_insert(s, sid, context); } -static inline int convert_context_handle_invalid_context(struct context *context) +static inline int +convert_context_handle_invalid_context(struct context *context) { int rc = 0; @@ -1263,7 +1241,8 @@ static inline int convert_context_handle_invalid_context(struct context *context return rc; } -struct convert_context_args { +struct convert_context_args +{ struct policydb *oldp; struct policydb *newp; }; @@ -1360,7 +1339,7 @@ int security_load_policy(const void *data, size_t len) struct convert_context_args args; u32 seqno; int rc = 0; - struct policy_file file = { data, len }, *fp = &file; + struct policy_file file = {data, len}, *fp = &file; LOAD_LOCK; @@ -1380,8 +1359,7 @@ int security_load_policy(const void *data, size_t len) if ( validate_classes(&policydb) ) { LOAD_UNLOCK; - printk(KERN_ERR - "Flask: the definition of a class is incorrect\n"); + printk(KERN_ERR "Flask: the definition of a class is incorrect\n"); sidtab_destroy(&sidtab); policydb_destroy(&policydb); return -EINVAL; @@ -1409,8 +1387,7 @@ int security_load_policy(const void *data, size_t len) /* Verify that the kernel defined classes are correct. */ if ( validate_classes(&newpolicydb) ) { - printk(KERN_ERR - "Flask: the definition of a class is incorrect\n"); + printk(KERN_ERR "Flask: the definition of a class is incorrect\n"); rc = -EINVAL; goto err; } @@ -1462,7 +1439,6 @@ err: sidtab_destroy(&newsidtab); policydb_destroy(&newpolicydb); return rc; - } int security_get_allow_unknown(void) @@ -1483,7 +1459,7 @@ int security_irq_sid(int pirq, u32 *out_sid) POLICY_RDLOCK; c = policydb.ocontexts[OCON_PIRQ]; - + while ( c ) { if ( c->u.pirq == pirq ) @@ -1526,7 +1502,7 @@ int security_iomem_sid(unsigned long mfn, u32 *out_sid) c = policydb.ocontexts[OCON_IOMEM]; while ( c ) { - if ( c->u.iomem.low_iomem <= mfn && c->u.iomem.high_iomem >= mfn ) + if ( c->u.iomem.low_iomem <= mfn && c->u.iomem.high_iomem >= mfn ) break; c = c->next; } @@ -1560,31 +1536,34 @@ int security_iterate_iomem_sids(unsigned long start, unsigned long end, POLICY_RDLOCK; c = policydb.ocontexts[OCON_IOMEM]; - while (c && c->u.iomem.high_iomem < start) + while ( c && c->u.iomem.high_iomem < start ) c = c->next; - while (c && c->u.iomem.low_iomem <= end) { - if (!c->sid) + while ( c && c->u.iomem.low_iomem <= end ) + { + if ( !c->sid ) { rc = sidtab_context_to_sid(&sidtab, &c->context, &c->sid); if ( rc ) goto out; } - if (start < c->u.iomem.low_iomem) { + if ( start < c->u.iomem.low_iomem ) + { /* found a gap */ rc = fn(data, SECINITSID_IOMEM, start, c->u.iomem.low_iomem - 1); - if (rc) + if ( rc ) goto out; start = c->u.iomem.low_iomem; } - if (end <= c->u.iomem.high_iomem) { + if ( end <= c->u.iomem.high_iomem ) + { /* iteration ends in the middle of this range */ rc = fn(data, c->sid, start, end); goto out; } rc = fn(data, c->sid, start, c->u.iomem.high_iomem); - if (rc) + if ( rc ) goto out; start = c->u.iomem.high_iomem + 1; @@ -1639,8 +1618,8 @@ out: return rc; } -int security_iterate_ioport_sids(u32 start, u32 end, - security_iterate_fn fn, void *data) +int security_iterate_ioport_sids(u32 start, u32 end, security_iterate_fn fn, + void *data) { struct ocontext *c; int rc = 0; @@ -1648,31 +1627,34 @@ int security_iterate_ioport_sids(u32 start, u32 end, POLICY_RDLOCK; c = policydb.ocontexts[OCON_IOPORT]; - while (c && c->u.ioport.high_ioport < start) + while ( c && c->u.ioport.high_ioport < start ) c = c->next; - while (c && c->u.ioport.low_ioport <= end) { - if (!c->sid) + while ( c && c->u.ioport.low_ioport <= end ) + { + if ( !c->sid ) { rc = sidtab_context_to_sid(&sidtab, &c->context, &c->sid); if ( rc ) goto out; } - if (start < c->u.ioport.low_ioport) { + if ( start < c->u.ioport.low_ioport ) + { /* found a gap */ rc = fn(data, SECINITSID_IOPORT, start, c->u.ioport.low_ioport - 1); - if (rc) + if ( rc ) goto out; start = c->u.ioport.low_ioport; } - if (end <= c->u.ioport.high_ioport) { + if ( end <= c->u.ioport.high_ioport ) + { /* iteration ends in the middle of this range */ rc = fn(data, c->sid, start, end); goto out; } rc = fn(data, c->sid, start, c->u.ioport.high_ioport); - if (rc) + if ( rc ) goto out; start = c->u.ioport.high_ioport + 1; @@ -1767,7 +1749,7 @@ int security_find_bool(const char *name) POLICY_RDLOCK; for ( i = 0; i < policydb.p_bools.nprim; i++ ) { - if (!strcmp(name, policydb.p_bool_val_to_name[i])) + if ( !strcmp(name, policydb.p_bool_val_to_name[i]) ) { rv = i; break; @@ -1812,7 +1794,8 @@ int security_get_bools(int *len, char ***names, int **values, size_t *maxstr) size_t name_len = strlen(policydb.p_bool_val_to_name[i]); (*values)[i] = policydb.bool_val_to_struct[i]->state; - if ( names ) { + if ( names ) + { (*names)[i] = xmalloc_array(char, name_len + 1); if ( !(*names)[i] ) goto err; @@ -1836,7 +1819,6 @@ err: goto out; } - int security_set_bools(int len, int *values) { int i, rc = 0; @@ -1965,8 +1947,8 @@ out: return rc; } -int security_ocontext_add( u32 ocon, unsigned long low, unsigned long high - ,u32 sid ) +int security_ocontext_add(u32 ocon, unsigned long low, unsigned long high, + u32 sid) { int ret = 0; struct ocontext *c; @@ -1978,7 +1960,7 @@ int security_ocontext_add( u32 ocon, unsigned long low, unsigned long high add->sid = sid; POLICY_WRLOCK; - switch( ocon ) + switch (ocon) { case OCON_PIRQ: add->u.pirq = (u16)low; @@ -2016,15 +1998,16 @@ int security_ocontext_add( u32 ocon, unsigned long low, unsigned long high prev = NULL; c = policydb.ocontexts[OCON_IOPORT]; - while ( c && c->u.ioport.high_ioport < low ) { + while ( c && c->u.ioport.high_ioport < low ) + { prev = c; c = c->next; } - if (c && c->u.ioport.low_ioport <= high) + if ( c && c->u.ioport.low_ioport <= high ) { - if (c->u.ioport.low_ioport == low && - c->u.ioport.high_ioport == high && c->sid == sid) + if ( c->u.ioport.low_ioport == low && + c->u.ioport.high_ioport == high && c->sid == sid ) break; printk("flask: IO Port overlap with entry %#x - %#x\n", @@ -2033,10 +2016,13 @@ int security_ocontext_add( u32 ocon, unsigned long low, unsigned long high break; } - if (prev) { + if ( prev ) + { add->next = prev->next; prev->next = add; - } else { + } + else + { add->next = policydb.ocontexts[OCON_IOPORT]; policydb.ocontexts[OCON_IOPORT] = add; } @@ -2049,33 +2035,38 @@ int security_ocontext_add( u32 ocon, unsigned long low, unsigned long high prev = NULL; c = policydb.ocontexts[OCON_IOMEM]; - while ( c && c->u.iomem.high_iomem < low ) { + while ( c && c->u.iomem.high_iomem < low ) + { prev = c; c = c->next; } - if (c && c->u.iomem.low_iomem <= high) + if ( c && c->u.iomem.low_iomem <= high ) { - if (c->u.iomem.low_iomem == low && - c->u.iomem.high_iomem == high && c->sid == sid) + if ( c->u.iomem.low_iomem == low && c->u.iomem.high_iomem == high && + c->sid == sid ) break; - printk("flask: IO Memory overlap with entry %#"PRIx64" - %#"PRIx64"\n", + printk("flask: IO Memory overlap with entry %#" PRIx64 + " - %#" PRIx64 "\n", c->u.iomem.low_iomem, c->u.iomem.high_iomem); ret = -EEXIST; break; } - if (prev) { + if ( prev ) + { add->next = prev->next; prev->next = add; - } else { + } + else + { add->next = policydb.ocontexts[OCON_IOMEM]; policydb.ocontexts[OCON_IOMEM] = add; } break; - case OCON_DEVICE: + case OCON_DEVICE: add->u.device = low; if ( high != low ) { @@ -2105,8 +2096,8 @@ int security_ocontext_add( u32 ocon, unsigned long low, unsigned long high } break; - default: - ret = -EINVAL; + default: + ret = -EINVAL; } POLICY_WRUNLOCK; @@ -2115,17 +2106,17 @@ int security_ocontext_add( u32 ocon, unsigned long low, unsigned long high return ret; } -int security_ocontext_del( u32 ocon, unsigned long low, unsigned long high ) +int security_ocontext_del(u32 ocon, unsigned long low, unsigned long high) { int ret = 0; struct ocontext *c, *before_c; POLICY_WRLOCK; - switch( ocon ) + switch (ocon) { case OCON_PIRQ: - for ( before_c = NULL, c = policydb.ocontexts[OCON_PIRQ]; - c; before_c = c, c = c->next ) + for ( before_c = NULL, c = policydb.ocontexts[OCON_PIRQ]; c; + before_c = c, c = c->next ) { if ( c->u.pirq == low ) { @@ -2149,8 +2140,8 @@ int security_ocontext_del( u32 ocon, unsigned long low, unsigned long high ) break; case OCON_IOPORT: - for ( before_c = NULL, c = policydb.ocontexts[OCON_IOPORT]; - c; before_c = c, c = c->next ) + for ( before_c = NULL, c = policydb.ocontexts[OCON_IOPORT]; c; + before_c = c, c = c->next ) { if ( c->u.ioport.low_ioport == low && c->u.ioport.high_ioport == high ) @@ -2175,11 +2166,10 @@ int security_ocontext_del( u32 ocon, unsigned long low, unsigned long high ) break; case OCON_IOMEM: - for ( before_c = NULL, c = policydb.ocontexts[OCON_IOMEM]; - c; before_c = c, c = c->next ) + for ( before_c = NULL, c = policydb.ocontexts[OCON_IOMEM]; c; + before_c = c, c = c->next ) { - if ( c->u.iomem.low_iomem == low && - c->u.iomem.high_iomem == high ) + if ( c->u.iomem.low_iomem == low && c->u.iomem.high_iomem == high ) { if ( before_c == NULL ) { @@ -2201,8 +2191,8 @@ int security_ocontext_del( u32 ocon, unsigned long low, unsigned long high ) break; case OCON_DEVICE: - for ( before_c = NULL, c = policydb.ocontexts[OCON_DEVICE]; - c; before_c = c, c = c->next ) + for ( before_c = NULL, c = policydb.ocontexts[OCON_DEVICE]; c; + before_c = c, c = c->next ) { if ( c->u.device == low ) { @@ -2229,7 +2219,7 @@ int security_ocontext_del( u32 ocon, unsigned long low, unsigned long high ) ret = -EINVAL; } - out: +out: POLICY_WRUNLOCK; return ret; } diff --git a/xen/xsm/flask/ss/sidtab.c b/xen/xsm/flask/ss/sidtab.c index cd1360cb4a..95e1c78387 100644 --- a/xen/xsm/flask/ss/sidtab.c +++ b/xen/xsm/flask/ss/sidtab.c @@ -3,9 +3,9 @@ * * Author : Stephen Smalley, */ - + /* Ported to Xen 3.0, George Coker, */ - + #include #include #include @@ -124,8 +124,9 @@ struct context *sidtab_search(struct sidtab *s, u32 sid) return &cur->context; } -int sidtab_map(struct sidtab *s, - int (*apply) (u32 sid, struct context *context, void *args), void *args) +int sidtab_map(struct sidtab *s, + int (*apply)(u32 sid, struct context *context, void *args), + void *args) { int i, rc = 0; struct sidtab_node *cur; @@ -149,7 +150,9 @@ out: } void sidtab_map_remove_on_error(struct sidtab *s, - int (*apply) (u32 sid, struct context *context, void *args), void *args) + int (*apply)(u32 sid, struct context *context, + void *args), + void *args) { int i, ret; struct sidtab_node *last, *cur, *temp; @@ -192,8 +195,8 @@ void sidtab_map_remove_on_error(struct sidtab *s, return; } -static inline u32 sidtab_search_context(struct sidtab *s, - struct context *context) +static inline u32 sidtab_search_context(struct sidtab *s, + struct context *context) { int i; struct sidtab_node *cur; @@ -212,7 +215,7 @@ static inline u32 sidtab_search_context(struct sidtab *s, } int sidtab_context_to_sid(struct sidtab *s, struct context *context, - u32 *out_sid) + u32 *out_sid) { u32 sid; int ret = 0; @@ -237,7 +240,7 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context, ret = sidtab_insert(s, sid, context); if ( ret ) s->next_sid--; -unlock_out: + unlock_out: SIDTAB_UNLOCK(s); } @@ -274,8 +277,8 @@ void sidtab_hash_eval(struct sidtab *h, char *tag) } printk(KERN_INFO "%s: %d entries and %d/%d buckets used, longest " - "chain length %d\n", tag, h->nel, slots_used, SIDTAB_SIZE, - max_chain_len); + "chain length %d\n", + tag, h->nel, slots_used, SIDTAB_SIZE, max_chain_len); } void sidtab_destroy(struct sidtab *s) diff --git a/xen/xsm/flask/ss/symtab.c b/xen/xsm/flask/ss/symtab.c index d98c116d5b..85b9e68863 100644 --- a/xen/xsm/flask/ss/symtab.c +++ b/xen/xsm/flask/ss/symtab.c @@ -22,7 +22,7 @@ static unsigned int symhash(struct hashtab *h, const void *key) keyp = key; size = strlen(keyp); for ( p = keyp; (p - keyp) < size; p++ ) - val = (val << 4 | (val >> (8*sizeof(unsigned int)-4))) ^ (*p); + val = (val << 4 | (val >> (8 * sizeof(unsigned int) - 4))) ^ (*p); return val & (h->size - 1); } @@ -35,7 +35,6 @@ static int symcmp(struct hashtab *h, const void *key1, const void *key2) return strcmp(keyp1, keyp2); } - int symtab_init(struct symtab *s, unsigned int size) { s->table = hashtab_create(symhash, symcmp, size); @@ -44,4 +43,3 @@ int symtab_init(struct symtab *s, unsigned int size) s->nprim = 0; return 0; } - diff --git a/xen/xsm/xsm_core.c b/xen/xsm/xsm_core.c index 201c354390..f547632cf1 100644 --- a/xen/xsm/xsm_core.c +++ b/xen/xsm/xsm_core.c @@ -27,11 +27,12 @@ #include #endif -#define XSM_FRAMEWORK_VERSION "1.0.0" +#define XSM_FRAMEWORK_VERSION "1.0.0" struct xsm_operations *xsm_ops; -enum xsm_bootparam { +enum xsm_bootparam +{ XSM_BOOTPARAM_DUMMY, XSM_BOOTPARAM_FLASK, XSM_BOOTPARAM_SILO, @@ -94,7 +95,7 @@ static int __init xsm_core_init(const void *policy_buffer, size_t policy_size) xsm_ops = &dummy_xsm_ops; - switch ( xsm_bootparam ) + switch (xsm_bootparam) { case XSM_BOOTPARAM_DUMMY: break; @@ -127,8 +128,8 @@ int __init xsm_multiboot_init(unsigned long *module_map, if ( XSM_MAGIC ) { - ret = xsm_multiboot_policy_init(module_map, mbi, - &policy_buffer, &policy_size); + ret = xsm_multiboot_policy_init(module_map, mbi, &policy_buffer, + &policy_size); if ( ret ) { bootstrap_map(NULL); @@ -186,8 +187,8 @@ bool __init has_xsm_magic(paddr_t start) if ( XSM_MAGIC ) { - copy_from_paddr(&magic, start, sizeof(magic) ); - return ( magic == XSM_MAGIC ); + copy_from_paddr(&magic, start, sizeof(magic)); + return (magic == XSM_MAGIC); } return false; @@ -212,13 +213,13 @@ int __init register_xsm(struct xsm_operations *ops) #endif -long do_xsm_op (XEN_GUEST_HANDLE_PARAM(xsm_op_t) op) +long do_xsm_op(XEN_GUEST_HANDLE_PARAM(xsm_op_t) op) { return xsm_do_xsm_op(op); } #ifdef CONFIG_COMPAT -int compat_xsm_op (XEN_GUEST_HANDLE_PARAM(xsm_op_t) op) +int compat_xsm_op(XEN_GUEST_HANDLE_PARAM(xsm_op_t) op) { return xsm_do_compat_op(op); } diff --git a/xen/xsm/xsm_policy.c b/xen/xsm/xsm_policy.c index 33ab37717f..4db5c83868 100644 --- a/xen/xsm/xsm_policy.c +++ b/xen/xsm/xsm_policy.c @@ -8,7 +8,7 @@ * Contributors: * Michael LeMay, * George Coker, - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. @@ -25,15 +25,14 @@ #endif #include #ifdef CONFIG_HAS_DEVICE_TREE -# include -# include +#include +#include #endif #ifdef CONFIG_MULTIBOOT int __init xsm_multiboot_policy_init(unsigned long *module_map, const multiboot_info_t *mbi, - void **policy_buffer, - size_t *policy_size) + void **policy_buffer, size_t *policy_size) { int i; module_t *mod = (module_t *)__va(mbi->mods_addr); @@ -45,25 +44,24 @@ int __init xsm_multiboot_policy_init(unsigned long *module_map, * Try all modules and see whichever could be the binary policy. * Adjust module_map for the module that is the binary policy. */ - for ( i = mbi->mods_count-1; i >= 1; i-- ) + for ( i = mbi->mods_count - 1; i >= 1; i-- ) { if ( !test_bit(i, module_map) ) continue; _policy_start = bootstrap_map(mod + i); - _policy_len = mod[i].mod_end; + _policy_len = mod[i].mod_end; if ( (xsm_magic_t)(*_policy_start) == XSM_MAGIC ) { *policy_buffer = _policy_start; *policy_size = _policy_len; - printk("Policy len %#lx, start at %p.\n", - _policy_len,_policy_start); + printk("Policy len %#lx, start at %p.\n", _policy_len, + _policy_start); __clear_bit(i, module_map); break; - } bootstrap_map(NULL); @@ -91,8 +89,8 @@ int __init xsm_dt_policy_init(void **policy_buffer, size_t *policy_size) return -EINVAL; } - printk("xsm: Policy len = 0x%"PRIpaddr" start at 0x%"PRIpaddr"\n", - len, paddr); + printk("xsm: Policy len = 0x%" PRIpaddr " start at 0x%" PRIpaddr "\n", len, + paddr); *policy_buffer = xmalloc_bytes(len); if ( !*policy_buffer ) -- 2.17.1