From 3d10ef0c4ded45001cf8ab1ff249a5952a934238 Mon Sep 17 00:00:00 2001 From: Anna Lyons <Anna.Lyons@data61.csiro.au> Date: Tue, 19 Mar 2019 13:44:00 +1100 Subject: [PATCH] style: correct parenthesis padding Use astyle's unpad-paren to unpad all parentheses that are not included by pad-header, pad-oper, and pad-comma. --- .../arch/arm/arch/32/mode/fastpath/fastpath.h | 6 +- include/arch/arm/arch/32/mode/machine.h | 6 +- include/arch/arm/arch/32/mode/machine/fpu.h | 16 +- include/arch/arm/arch/32/mode/machine_pl2.h | 8 +- include/arch/arm/arch/32/mode/smp/smp.h | 2 +- .../arch/arm/arch/64/mode/fastpath/fastpath.h | 6 +- include/arch/arm/arch/64/mode/machine.h | 32 ++-- include/arch/arm/arch/64/mode/machine/fpu.h | 8 +- include/arch/arm/arch/64/mode/smp/smp.h | 4 +- include/arch/arm/arch/model/smp.h | 4 +- .../armv/armv6/armv/benchmark_irqHandler.h | 4 +- .../armv/armv7-a/armv/benchmark_irqHandler.h | 2 +- include/arch/arm/armv/armv7-a/armv/vcpu.h | 80 ++++----- include/arch/riscv/arch/fastpath/fastpath.h | 10 +- include/arch/riscv/arch/machine.h | 4 +- include/arch/riscv/arch/sbi.h | 10 +- .../arch/x86/arch/32/mode/fastpath/fastpath.h | 14 +- include/arch/x86/arch/32/mode/machine.h | 14 +- include/arch/x86/arch/32/mode/machine/debug.h | 28 +-- include/arch/x86/arch/32/mode/model/smp.h | 2 +- .../arch/x86/arch/64/mode/fastpath/fastpath.h | 22 +-- include/arch/x86/arch/64/mode/machine.h | 26 +-- .../x86/arch/64/mode/machine/cpu_registers.h | 2 +- include/arch/x86/arch/64/mode/machine/debug.h | 28 +-- include/arch/x86/arch/64/mode/model/smp.h | 6 +- include/arch/x86/arch/benchmark.h | 4 +- include/arch/x86/arch/machine.h | 44 ++--- include/arch/x86/arch/machine/fpu.h | 2 +- include/arch/x86/arch/machine/hardware.h | 2 +- include/arch/x86/arch/object/vcpu.h | 4 +- include/plat/bcm2837/plat/machine.h | 20 +-- include/util.h | 6 +- libsel4/arch_include/arm/sel4/arch/syscalls.h | 2 +- .../arch_include/riscv/sel4/arch/functions.h | 2 +- .../arch_include/riscv/sel4/arch/syscalls.h | 40 ++--- libsel4/include/sel4/shared_types.h | 2 +- libsel4/include/sel4/syscalls.h | 2 +- .../aarch32/sel4/sel4_arch/functions.h | 2 +- .../aarch32/sel4/sel4_arch/syscalls.h | 30 ++-- .../aarch64/sel4/sel4_arch/functions.h | 2 +- .../aarch64/sel4/sel4_arch/syscalls.h | 30 ++-- .../ia32/sel4/sel4_arch/syscalls.h | 170 +++++++++--------- .../x86_64/sel4/sel4_arch/syscalls.h | 2 +- .../x86_64/sel4/sel4_arch/syscalls_syscall.h | 88 ++++----- .../x86_64/sel4/sel4_arch/syscalls_sysenter.h | 88 ++++----- src/api/syscall.c | 2 +- src/arch/arm/32/c_traps.c | 4 +- src/arch/arm/32/kernel/vspace.c | 16 +- src/arch/arm/32/machine/capdl.c | 2 +- src/arch/arm/32/machine/fpu.c | 18 +- src/arch/arm/32/object/objecttype.c | 4 +- src/arch/arm/64/c_traps.c | 4 +- src/arch/arm/64/kernel/vspace.c | 8 +- src/arch/arm/armv/armv6/benchmark.c | 8 +- src/arch/arm/armv/armv7-a/benchmark.c | 16 +- src/arch/arm/kernel/boot.c | 2 +- src/arch/arm/machine/l2c_310.c | 24 +-- src/arch/arm/object/iospace.c | 2 +- src/arch/arm/object/vcpu.c | 2 +- src/arch/riscv/c_traps.c | 4 +- src/arch/riscv/kernel/boot.c | 6 +- src/arch/riscv/kernel/vspace.c | 6 +- src/arch/riscv/object/objecttype.c | 4 +- src/arch/x86/64/c_traps.c | 8 +- src/arch/x86/64/kernel/thread.c | 6 +- src/arch/x86/64/kernel/vspace.c | 12 +- src/arch/x86/kernel/boot_sys.c | 16 +- src/arch/x86/kernel/vspace.c | 14 +- src/arch/x86/machine/capdl.c | 2 +- src/arch/x86/machine/hardware.c | 2 +- src/arch/x86/object/ioport.c | 2 +- src/arch/x86/object/iospace.c | 4 +- src/arch/x86/object/objecttype.c | 4 +- src/arch/x86/object/vcpu.c | 8 +- src/drivers/serial/bcm2835-aux-uart.c | 4 +- src/drivers/serial/exynos4210-uart.c | 4 +- src/drivers/serial/msm-uartdm.c | 4 +- src/drivers/timer/exynos4412-mct.c | 2 +- src/fastpath/fastpath.c | 6 +- src/kernel/boot.c | 6 +- src/machine/io.c | 2 +- src/object/cnode.c | 2 +- src/object/endpoint.c | 2 +- src/object/untyped.c | 2 +- src/plat/allwinnerA20/machine/l2cache.c | 4 +- src/plat/am335x/machine/l2cache.c | 4 +- src/plat/omap3/machine/l2cache.c | 4 +- src/plat/pc99/machine/hardware.c | 2 +- src/plat/pc99/machine/intel-vtd.c | 2 +- src/plat/spike/machine/hardware.c | 12 +- src/plat/tk1/machine/smmu.c | 20 +-- src/util.c | 8 +- 92 files changed, 593 insertions(+), 593 deletions(-) mode change 100755 => 100644 src/plat/allwinnerA20/machine/l2cache.c diff --git a/include/arch/arm/arch/32/mode/fastpath/fastpath.h b/include/arch/arm/arch/32/mode/fastpath/fastpath.h index 41d6a941d..a7092f530 100644 --- a/include/arch/arm/arch/32/mode/fastpath/fastpath.h +++ b/include/arch/arm/arch/32/mode/fastpath/fastpath.h @@ -38,7 +38,7 @@ clearExMonitor_fp(void) { word_t temp1 = 0; word_t temp2; - asm volatile ( + asm volatile( "strex %[output], %[mem], [%[mem]]" : [output]"+r"(temp1) : [mem]"r"(&temp2) @@ -94,7 +94,7 @@ isValidVTableRoot_fp(cap_t pd_cap) which appears above it is zero. We are assuming that n_msgRegisters == 4 for this check to be useful. By masking out the bottom 3 bits, we are really checking that n + 3 <= MASK(3), i.e. n + 3 <= 7 or n <= 4. */ -compile_assert (n_msgRegisters_eq_4, n_msgRegisters == 4) +compile_assert(n_msgRegisters_eq_4, n_msgRegisters == 4) static inline int fastpath_mi_check(word_t msgInfo) { @@ -167,7 +167,7 @@ fastpath_restore(word_t badge, word_t msgInfo, tcb_t *cur_thread) /* Return to user */ "eret" : - : [badge] "r" (badge_reg), + : [badge] "r"(badge_reg), [msginfo]"r"(msgInfo_reg), [cur_thread]"r"(cur_thread_reg) : "memory" diff --git a/include/arch/arm/arch/32/mode/machine.h b/include/arch/arm/arch/32/mode/machine.h index 00bbb334b..f692234f3 100644 --- a/include/arch/arm/arch/32/mode/machine.h +++ b/include/arch/arm/arch/32/mode/machine.h @@ -241,7 +241,7 @@ static inline word_t readTPIDRURO(void) static inline word_t readMPIDR(void) { word_t reg; - asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r"(reg)); + asm volatile("mrc p15, 0, %0, c0, c0, 5" : "=r"(reg)); return reg; } @@ -561,13 +561,13 @@ static inline void setCIDR(word_t cidr) static inline word_t getACTLR(void) { word_t ACTLR; - asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r"(ACTLR)); + asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r"(ACTLR)); return ACTLR; } static inline void setACTLR(word_t actlr) { - asm volatile ("mcr p15, 0, %0, c1, c0, 1" :: "r"(actlr)); + asm volatile("mcr p15, 0, %0, c1, c0, 1" :: "r"(actlr)); } void arch_clean_invalidate_caches(void); diff --git a/include/arch/arm/arch/32/mode/machine/fpu.h b/include/arch/arm/arch/32/mode/machine/fpu.h index 313c49f7d..b9bed5cdd 100644 --- a/include/arch/arm/arch/32/mode/machine/fpu.h +++ b/include/arch/arm/arch/32/mode/machine/fpu.h @@ -123,24 +123,24 @@ static inline void saveFpuState(user_fpu_state_t *dest) } /* We don't support asynchronous exceptions */ - assert ((dest->fpexc & BIT(FPEXC_EX_BIT)) == 0); + assert((dest->fpexc & BIT(FPEXC_EX_BIT)) == 0); if (isFPUD32SupportedCached) { register word_t regs_d16_d31 asm("ip") = (word_t) &dest->fpregs[16]; asm volatile( ".word 0xeccc0b20 \n" /* vstmia ip, {d16-d31} */ : - : "r" (regs_d16_d31) + : "r"(regs_d16_d31) : "memory" ); } - register word_t regs_d0_d15 asm("r2") = (word_t) &dest->fpregs[0]; + register word_t regs_d0_d15 asm("r2") = (word_t) &dest->fpregs[0]; asm volatile( /* Store d0 - d15 to memory */ ".word 0xec820b20 \n" /* vstmia r2, {d0-d15}" */ : - : "r" (regs_d0_d15) + : "r"(regs_d0_d15) ); /* Store FPSCR. */ @@ -191,19 +191,19 @@ static inline void loadFpuState(user_fpu_state_t *src) /* now we need to enable the EN bit in FPEXC */ setEnFPEXC(); } - register word_t regs_d16_d31 asm("r2") = (word_t) &src->fpregs[16]; + register word_t regs_d16_d31 asm("r2") = (word_t) &src->fpregs[16]; if (isFPUD32SupportedCached) { asm volatile( ".word 0xecd20b20 \n" /* vldmia r2, {d16-d31} */ - :: "r" (regs_d16_d31) + :: "r"(regs_d16_d31) ); } - register word_t regs_d0_d15 asm("r0") = (word_t) &src->fpregs[0]; + register word_t regs_d0_d15 asm("r0") = (word_t) &src->fpregs[0]; asm volatile( /* Restore d0 - d15 from memory */ ".word 0xec900b20 \n" /* vldmia r0, {d0-d15} */ - :: "r" (regs_d0_d15) + :: "r"(regs_d0_d15) ); /* Load FPSCR. */ diff --git a/include/arch/arm/arch/32/mode/machine_pl2.h b/include/arch/arm/arch/32/mode/machine_pl2.h index a5ed514b1..e33eab7eb 100644 --- a/include/arch/arm/arch/32/mode/machine_pl2.h +++ b/include/arch/arm/arch/32/mode/machine_pl2.h @@ -126,9 +126,9 @@ static inline void invalidateHypTLB(void) static inline paddr_t PURE addressTranslateS1CPR(vptr_t vaddr) { uint32_t ipa0, ipa1; - asm volatile ("mcr p15, 0, %0, c7, c8, 0" :: "r"(vaddr)); + asm volatile("mcr p15, 0, %0, c7, c8, 0" :: "r"(vaddr)); isb(); - asm volatile ("mrrc p15, 0, %0, %1, c7" : "=r"(ipa0), "=r"(ipa1)); + asm volatile("mrrc p15, 0, %0, %1, c7" : "=r"(ipa0), "=r"(ipa1)); return ipa0; } @@ -164,13 +164,13 @@ static inline word_t PURE getHPFAR(void) static inline word_t getSCTLR(void) { word_t SCTLR; - asm volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r"(SCTLR)); + asm volatile("mrc p15, 0, %0, c1, c0, 0" : "=r"(SCTLR)); return SCTLR; } static inline void setSCTLR(word_t sctlr) { - asm volatile ("mcr p15, 0, %0, c1, c0, 0" :: "r"(sctlr)); + asm volatile("mcr p15, 0, %0, c1, c0, 0" :: "r"(sctlr)); } static inline void setHTPIDR(word_t htpidr) diff --git a/include/arch/arm/arch/32/mode/smp/smp.h b/include/arch/arm/arch/32/mode/smp/smp.h index 23eeb9cce..c39dfa0cf 100644 --- a/include/arch/arm/arch/32/mode/smp/smp.h +++ b/include/arch/arm/arch/32/mode/smp/smp.h @@ -28,7 +28,7 @@ static inline word_t getCurSP(void) { word_t stack_address; - asm ("mov %[stack_address], %[currStackAddress]" : [stack_address] "=r"(stack_address) : [currStackAddress] "r" (&stack_address):); + asm("mov %[stack_address], %[currStackAddress]" : [stack_address] "=r"(stack_address) : [currStackAddress] "r"(&stack_address):); return stack_address; } diff --git a/include/arch/arm/arch/64/mode/fastpath/fastpath.h b/include/arch/arm/arch/64/mode/fastpath/fastpath.h index 95b7a97df..df5fe13a1 100644 --- a/include/arch/arm/arch/64/mode/fastpath/fastpath.h +++ b/include/arch/arm/arch/64/mode/fastpath/fastpath.h @@ -77,7 +77,7 @@ isValidVTableRoot_fp(cap_t vspace_root_cap) which appears above it is zero. We are assuming that n_msgRegisters == 4 for this check to be useful. By masking out the bottom 3 bits, we are really checking that n + 3 <= MASK(3), i.e. n + 3 <= 7 or n <= 4. */ -compile_assert (n_msgRegisters_eq_4, n_msgRegisters == 4) +compile_assert(n_msgRegisters_eq_4, n_msgRegisters == 4) static inline int fastpath_mi_check(word_t msgInfo) { @@ -155,8 +155,8 @@ fastpath_restore(word_t badge, word_t msgInfo, tcb_t *cur_thread) "ldr x30, [sp, %[LR]] \n" "eret " : - : "r" (badge_reg), "r" (msgInfo_reg), "r" (cur_thread_reg), - [SP_EL0] "i" (PT_SP_EL0), [SPSR_EL1] "i" (PT_SPSR_EL1), [LR] "i" (PT_LR) + : "r"(badge_reg), "r"(msgInfo_reg), "r"(cur_thread_reg), + [SP_EL0] "i"(PT_SP_EL0), [SPSR_EL1] "i"(PT_SPSR_EL1), [LR] "i"(PT_LR) : "memory" ); diff --git a/include/arch/arm/arch/64/mode/machine.h b/include/arch/arm/arch/64/mode/machine.h index b4ac55fb0..49b7fdb81 100644 --- a/include/arch/arm/arch/64/mode/machine.h +++ b/include/arch/arm/arch/64/mode/machine.h @@ -143,7 +143,7 @@ static inline void setCurrentKernelVSpaceRoot(ttbr_t ttbr) MSR("ttbr0_el2", ttbr.words[0]); dsb(); isb(); - asm volatile ("ic ialluis"); + asm volatile("ic ialluis"); dsb(); } else { MSR("ttbr1_el1", ttbr.words[0]); @@ -187,12 +187,12 @@ static inline void setVtable(pptr_t addr) static inline void invalidateLocalTLB_EL2(void) { - asm volatile ("tlbi alle2"); + asm volatile("tlbi alle2"); } static inline void invalidateLocalTLB_EL1(void) { - asm volatile ("tlbi alle1"); + asm volatile("tlbi alle1"); } static inline void invalidateLocalTLB(void) @@ -214,7 +214,7 @@ static inline void invalidateLocalTLB_ASID(asid_t asid) assert(asid < BIT(16)); dsb(); - asm volatile("tlbi aside1, %0" : : "r" (asid << 48)); + asm volatile("tlbi aside1, %0" : : "r"(asid << 48)); dsb(); isb(); } @@ -222,7 +222,7 @@ static inline void invalidateLocalTLB_ASID(asid_t asid) static inline void invalidateLocalTLB_VAASID(word_t mva_plus_asid) { dsb(); - asm volatile("tlbi vae1, %0" : : "r" (mva_plus_asid)); + asm volatile("tlbi vae1, %0" : : "r"(mva_plus_asid)); dsb(); isb(); } @@ -231,7 +231,7 @@ static inline void invalidateLocalTLB_VAASID(word_t mva_plus_asid) * EL1 with the current VMID which is specified by vttbr_el2 */ static inline void invalidateLocalTLB_VMALLS12E1(void) { - asm volatile ("tlbi vmalls12e1"); + asm volatile("tlbi vmalls12e1"); dsb(); isb(); } @@ -239,9 +239,9 @@ static inline void invalidateLocalTLB_VMALLS12E1(void) /* Invalidate IPA with the current VMID */ static inline void invalidateLocalTLB_IPA(word_t ipa) { - asm volatile ("tlbi ipas2e1, %0" :: "r"(ipa)); + asm volatile("tlbi ipas2e1, %0" :: "r"(ipa)); dsb(); - asm volatile ("tlbi vmalle1"); + asm volatile("tlbi vmalle1"); dsb(); isb(); } @@ -250,25 +250,25 @@ void lockTLBEntry(vptr_t vaddr); static inline void cleanByVA(vptr_t vaddr, paddr_t paddr) { - asm volatile("dc cvac, %0" : : "r" (vaddr)); + asm volatile("dc cvac, %0" : : "r"(vaddr)); dmb(); } static inline void cleanByVA_PoU(vptr_t vaddr, paddr_t paddr) { - asm volatile("dc cvau, %0" : : "r" (vaddr)); + asm volatile("dc cvau, %0" : : "r"(vaddr)); dmb(); } static inline void invalidateByVA(vptr_t vaddr, paddr_t paddr) { - asm volatile("dc ivac, %0" : : "r" (vaddr)); + asm volatile("dc ivac, %0" : : "r"(vaddr)); dmb(); } static inline void invalidateByVA_I(vptr_t vaddr, paddr_t paddr) { - asm volatile("ic ivau, %0" : : "r" (vaddr)); + asm volatile("ic ivau, %0" : : "r"(vaddr)); isb(); } @@ -280,7 +280,7 @@ static inline void invalidate_I_PoU(void) static inline void cleanInvalByVA(vptr_t vaddr, paddr_t paddr) { - asm volatile("dc civac, %0" : : "r" (vaddr)); + asm volatile("dc civac, %0" : : "r"(vaddr)); dsb(); } @@ -316,7 +316,7 @@ static inline word_t PURE getFAR(void) static inline word_t ats1e2r(word_t va) { word_t par; - asm volatile ("at s1e2r, %0" :: "r"(va)); + asm volatile("at s1e2r, %0" :: "r"(va)); MRS("par_el1", par); return par; } @@ -324,7 +324,7 @@ static inline word_t ats1e2r(word_t va) static inline word_t ats1e1r(word_t va) { word_t par; - asm volatile ("at s1e1r, %0" :: "r"(va)); + asm volatile("at s1e1r, %0" :: "r"(va)); MRS("par_el1", par); return par; } @@ -333,7 +333,7 @@ static inline word_t ats1e1r(word_t va) static inline word_t ats2e0r(word_t va) { word_t par; - asm volatile ("at s12e0r, %0" :: "r"(va)); + asm volatile("at s12e0r, %0" :: "r"(va)); MRS("par_el1", par); return par; } diff --git a/include/arch/arm/arch/64/mode/machine/fpu.h b/include/arch/arm/arch/64/mode/machine/fpu.h index c7624d6ad..8e02cc2d8 100644 --- a/include/arch/arm/arch/64/mode/machine/fpu.h +++ b/include/arch/arm/arch/64/mode/machine/fpu.h @@ -47,8 +47,8 @@ static inline void saveFpuState(user_fpu_state_t *dest) "str %w0, [%1, #16 * 32] \n" "mrs %0, fpcr \n" "str %w0, [%1, #16 * 32 + 4] \n" - : "=&r" (temp) - : "r" (dest) + : "=&r"(temp) + : "r"(dest) : "memory" ); } @@ -82,8 +82,8 @@ static inline void loadFpuState(user_fpu_state_t *src) "msr fpsr, %0 \n" "ldr %w0, [%1, #16 * 32 + 4] \n" "msr fpcr, %0 \n" - : "=&r" (temp) - : "r" (src) + : "=&r"(temp) + : "r"(src) : "memory" ); } diff --git a/include/arch/arm/arch/64/mode/smp/smp.h b/include/arch/arm/arch/64/mode/smp/smp.h index 91d8a56d7..2e145a971 100644 --- a/include/arch/arm/arch/64/mode/smp/smp.h +++ b/include/arch/arm/arch/64/mode/smp/smp.h @@ -33,9 +33,9 @@ getCurrentCPUIndex(void) { cpu_id_t id; if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) { - asm volatile ("mrs %0, tpidr_el2" : "=r"(id)); + asm volatile("mrs %0, tpidr_el2" : "=r"(id)); } else { - asm volatile ("mrs %0, tpidr_el1" : "=r"(id)); + asm volatile("mrs %0, tpidr_el1" : "=r"(id)); } return (id & CPUID_MASK); } diff --git a/include/arch/arm/arch/model/smp.h b/include/arch/arm/arch/model/smp.h index e43257614..2f84893b0 100644 --- a/include/arch/arm/arch/model/smp.h +++ b/include/arch/arm/arch/model/smp.h @@ -29,11 +29,11 @@ try_arch_atomic_exchange(void* ptr, void *new_val, void **prev, int success_memo uint32_t atomic_status; void *temp; - asm volatile ( + asm volatile( LD_EX "%[prev_output], [%[ptr_val]] \n\t" /* ret = *ptr */ ST_EX "%" OP_WIDTH "[atomic_var], %[new_val] , [%[ptr_val]] \n\t" /* *ptr = new */ : [atomic_var] "=&r"(atomic_status), [prev_output]"=&r"(temp) /* output */ - : [ptr_val] "r"(ptr), [new_val] "r" (new_val) /* input */ + : [ptr_val] "r"(ptr), [new_val] "r"(new_val) /* input */ : ); diff --git a/include/arch/arm/armv/armv6/armv/benchmark_irqHandler.h b/include/arch/arm/armv/armv6/armv/benchmark_irqHandler.h index aa2b3d2c8..e609cdc10 100644 --- a/include/arch/arm/armv/armv6/armv/benchmark_irqHandler.h +++ b/include/arch/arm/armv/armv6/armv/benchmark_irqHandler.h @@ -17,9 +17,9 @@ static inline void armv_handleOverflowIRQ(void) { uint32_t pmcr; /* Clear the overflow flag */ - asm volatile ("mrc p15, 0, %0, c15, c12, 0;" : "=r" (pmcr):); + asm volatile("mrc p15, 0, %0, c15, c12, 0;" : "=r"(pmcr):); pmcr |= BIT(10); - asm volatile ("mcr p15, 0, %0, c15, c12, 0;" : : "r" (pmcr)); + asm volatile("mcr p15, 0, %0, c15, c12, 0;" : : "r"(pmcr)); } #endif /* CONFIG_ARM_ENABLE_PMU_OVERFLOW_INTERRUPT */ #endif /* ARMV_BENCHMARK_IRQ_H */ diff --git a/include/arch/arm/armv/armv7-a/armv/benchmark_irqHandler.h b/include/arch/arm/armv/armv7-a/armv/benchmark_irqHandler.h index 66856f4ed..5fa3c6ee5 100644 --- a/include/arch/arm/armv/armv7-a/armv/benchmark_irqHandler.h +++ b/include/arch/arm/armv/armv7-a/armv/benchmark_irqHandler.h @@ -18,7 +18,7 @@ static inline void armv_handleOverflowIRQ(void) uint32_t val; /* Clear the overflow flag */ val = BIT(31); - asm volatile ("mcr p15, 0, %0, c9, c12, 3;" : : "r" (val)); + asm volatile("mcr p15, 0, %0, c9, c12, 3;" : : "r"(val)); } #endif /* CONFIG_ARM_ENABLE_PMU_OVERFLOW_INTERRUPT */ #endif /* ARMV_BENCHMARK_IRQ_H */ diff --git a/include/arch/arm/armv/armv7-a/armv/vcpu.h b/include/arch/arm/armv/armv7-a/armv/vcpu.h index 439faa5f3..f76c333e7 100644 --- a/include/arch/arm/armv/armv7-a/armv/vcpu.h +++ b/include/arch/arm/armv/armv7-a/armv/vcpu.h @@ -37,280 +37,280 @@ static inline word_t get_lr_svc(void) { word_t ret; - asm ("mrs %[ret], lr_svc" : [ret]"=r"(ret)); + asm("mrs %[ret], lr_svc" : [ret]"=r"(ret)); return ret; } static inline void set_lr_svc(word_t val) { - asm ("msr lr_svc, %[val]" :: [val]"r"(val)); + asm("msr lr_svc, %[val]" :: [val]"r"(val)); } static inline word_t get_sp_svc(void) { word_t ret; - asm ("mrs %[ret], sp_svc" : [ret]"=r"(ret)); + asm("mrs %[ret], sp_svc" : [ret]"=r"(ret)); return ret; } static inline void set_sp_svc(word_t val) { - asm ("msr sp_svc, %[val]" :: [val]"r"(val)); + asm("msr sp_svc, %[val]" :: [val]"r"(val)); } static inline word_t get_spsr_svc(void) { word_t ret; - asm ("mrs %[ret], spsr_svc" : [ret]"=r"(ret)); + asm("mrs %[ret], spsr_svc" : [ret]"=r"(ret)); return ret; } static inline void set_spsr_svc(word_t val) { - asm ("msr spsr_svc, %[val]" :: [val]"r"(val)); + asm("msr spsr_svc, %[val]" :: [val]"r"(val)); } static inline word_t get_lr_abt(void) { word_t ret; - asm ("mrs %[ret], lr_abt" : [ret]"=r"(ret)); + asm("mrs %[ret], lr_abt" : [ret]"=r"(ret)); return ret; } static inline void set_lr_abt(word_t val) { - asm ("msr lr_abt, %[val]" :: [val]"r"(val)); + asm("msr lr_abt, %[val]" :: [val]"r"(val)); } static inline word_t get_sp_abt(void) { word_t ret; - asm ("mrs %[ret], sp_abt" : [ret]"=r"(ret)); + asm("mrs %[ret], sp_abt" : [ret]"=r"(ret)); return ret; } static inline void set_sp_abt(word_t val) { - asm ("msr sp_abt, %[val]" :: [val]"r"(val)); + asm("msr sp_abt, %[val]" :: [val]"r"(val)); } static inline word_t get_spsr_abt(void) { word_t ret; - asm ("mrs %[ret], spsr_abt" : [ret]"=r"(ret)); + asm("mrs %[ret], spsr_abt" : [ret]"=r"(ret)); return ret; } static inline void set_spsr_abt(word_t val) { - asm ("msr spsr_abt, %[val]" :: [val]"r"(val)); + asm("msr spsr_abt, %[val]" :: [val]"r"(val)); } static inline word_t get_lr_und(void) { word_t ret; - asm ("mrs %[ret], lr_und" : [ret]"=r"(ret)); + asm("mrs %[ret], lr_und" : [ret]"=r"(ret)); return ret; } static inline void set_lr_und(word_t val) { - asm ("msr lr_und, %[val]" :: [val]"r"(val)); + asm("msr lr_und, %[val]" :: [val]"r"(val)); } static inline word_t get_sp_und(void) { word_t ret; - asm ("mrs %[ret], sp_und" : [ret]"=r"(ret)); + asm("mrs %[ret], sp_und" : [ret]"=r"(ret)); return ret; } static inline void set_sp_und(word_t val) { - asm ("msr sp_und, %[val]" :: [val]"r"(val)); + asm("msr sp_und, %[val]" :: [val]"r"(val)); } static inline word_t get_spsr_und(void) { word_t ret; - asm ("mrs %[ret], spsr_und" : [ret]"=r"(ret)); + asm("mrs %[ret], spsr_und" : [ret]"=r"(ret)); return ret; } static inline void set_spsr_und(word_t val) { - asm ("msr spsr_und, %[val]" :: [val]"r"(val)); + asm("msr spsr_und, %[val]" :: [val]"r"(val)); } static inline word_t get_lr_irq(void) { word_t ret; - asm ("mrs %[ret], lr_irq" : [ret]"=r"(ret)); + asm("mrs %[ret], lr_irq" : [ret]"=r"(ret)); return ret; } static inline void set_lr_irq(word_t val) { - asm ("msr lr_irq, %[val]" :: [val]"r"(val)); + asm("msr lr_irq, %[val]" :: [val]"r"(val)); } static inline word_t get_sp_irq(void) { word_t ret; - asm ("mrs %[ret], sp_irq" : [ret]"=r"(ret)); + asm("mrs %[ret], sp_irq" : [ret]"=r"(ret)); return ret; } static inline void set_sp_irq(word_t val) { - asm ("msr sp_irq, %[val]" :: [val]"r"(val)); + asm("msr sp_irq, %[val]" :: [val]"r"(val)); } static inline word_t get_spsr_irq(void) { word_t ret; - asm ("mrs %[ret], spsr_irq" : [ret]"=r"(ret)); + asm("mrs %[ret], spsr_irq" : [ret]"=r"(ret)); return ret; } static inline void set_spsr_irq(word_t val) { - asm ("msr spsr_irq, %[val]" :: [val]"r"(val)); + asm("msr spsr_irq, %[val]" :: [val]"r"(val)); } static inline word_t get_lr_fiq(void) { word_t ret; - asm ("mrs %[ret], lr_fiq" : [ret]"=r"(ret)); + asm("mrs %[ret], lr_fiq" : [ret]"=r"(ret)); return ret; } static inline void set_lr_fiq(word_t val) { - asm ("msr lr_fiq, %[val]" :: [val]"r"(val)); + asm("msr lr_fiq, %[val]" :: [val]"r"(val)); } static inline word_t get_sp_fiq(void) { word_t ret; - asm ("mrs %[ret], sp_fiq" : [ret]"=r"(ret)); + asm("mrs %[ret], sp_fiq" : [ret]"=r"(ret)); return ret; } static inline void set_sp_fiq(word_t val) { - asm ("msr sp_fiq, %[val]" :: [val]"r"(val)); + asm("msr sp_fiq, %[val]" :: [val]"r"(val)); } static inline word_t get_spsr_fiq(void) { word_t ret; - asm ("mrs %[ret], spsr_fiq" : [ret]"=r"(ret)); + asm("mrs %[ret], spsr_fiq" : [ret]"=r"(ret)); return ret; } static inline void set_spsr_fiq(word_t val) { - asm ("msr spsr_fiq, %[val]" :: [val]"r"(val)); + asm("msr spsr_fiq, %[val]" :: [val]"r"(val)); } static inline word_t get_r8_fiq(void) { word_t ret; - asm ("mrs %[ret], r8_fiq" : [ret]"=r"(ret)); + asm("mrs %[ret], r8_fiq" : [ret]"=r"(ret)); return ret; } static inline void set_r8_fiq(word_t val) { - asm ("msr r8_fiq, %[val]" :: [val]"r"(val)); + asm("msr r8_fiq, %[val]" :: [val]"r"(val)); } static inline word_t get_r9_fiq(void) { word_t ret; - asm ("mrs %[ret], r9_fiq" : [ret]"=r"(ret)); + asm("mrs %[ret], r9_fiq" : [ret]"=r"(ret)); return ret; } static inline void set_r9_fiq(word_t val) { - asm ("msr r9_fiq, %[val]" :: [val]"r"(val)); + asm("msr r9_fiq, %[val]" :: [val]"r"(val)); } static inline word_t get_r10_fiq(void) { word_t ret; - asm ("mrs %[ret], r10_fiq" : [ret]"=r"(ret)); + asm("mrs %[ret], r10_fiq" : [ret]"=r"(ret)); return ret; } static inline void set_r10_fiq(word_t val) { - asm ("msr r10_fiq, %[val]" :: [val]"r"(val)); + asm("msr r10_fiq, %[val]" :: [val]"r"(val)); } static inline word_t get_r11_fiq(void) { word_t ret; - asm ("mrs %[ret], r11_fiq" : [ret]"=r"(ret)); + asm("mrs %[ret], r11_fiq" : [ret]"=r"(ret)); return ret; } static inline void set_r11_fiq(word_t val) { - asm ("msr r11_fiq, %[val]" :: [val]"r"(val)); + asm("msr r11_fiq, %[val]" :: [val]"r"(val)); } static inline word_t get_r12_fiq(void) { word_t ret; - asm ("mrs %[ret], r12_fiq" : [ret]"=r"(ret)); + asm("mrs %[ret], r12_fiq" : [ret]"=r"(ret)); return ret; } static inline void set_r12_fiq(word_t val) { - asm ("msr r12_fiq, %[val]" :: [val]"r"(val)); + asm("msr r12_fiq, %[val]" :: [val]"r"(val)); } static inline word_t get_cntv_tval(void) diff --git a/include/arch/riscv/arch/fastpath/fastpath.h b/include/arch/riscv/arch/fastpath/fastpath.h index e8c3b03c7..090efb383 100644 --- a/include/arch/riscv/arch/fastpath/fastpath.h +++ b/include/arch/riscv/arch/fastpath/fastpath.h @@ -79,7 +79,7 @@ isValidVTableRoot_fp(cap_t vspace_root_cap) which appears above it is zero. We are assuming that n_msgRegisters == 4 for this check to be useful. By masking out the bottom 3 bits, we are really checking that n + 3 <= MASK(3), i.e. n + 3 <= 7 or n <= 4. */ -compile_assert (n_msgRegisters_eq_4, n_msgRegisters == 4) +compile_assert(n_msgRegisters_eq_4, n_msgRegisters == 4) static inline int fastpath_mi_check(word_t msgInfo) { @@ -165,10 +165,10 @@ fastpath_restore(word_t badge, word_t msgInfo, tcb_t *cur_thread) LOAD_S " t0, (4*%[REGSIZE])(t0) \n" "sret" : /* no output */ - : "r" (cur_thread_reg), - [REGSIZE] "i" (sizeof(word_t)), - "r" (badge_reg), - "r" (msgInfo_reg) + : "r"(cur_thread_reg), + [REGSIZE] "i"(sizeof(word_t)), + "r"(badge_reg), + "r"(msgInfo_reg) : "memory" ); diff --git a/include/arch/riscv/arch/machine.h b/include/arch/riscv/arch/machine.h index 8577aba87..8b49e42af 100644 --- a/include/arch/riscv/arch/machine.h +++ b/include/arch/riscv/arch/machine.h @@ -29,12 +29,12 @@ static inline void sfence(void) { - asm volatile ("sfence.vma" ::: "memory"); + asm volatile("sfence.vma" ::: "memory"); } static inline void hwASIDFlush(asid_t asid) { - asm volatile ("sfence.vma x0, %0" :: "r" (asid): "memory"); + asm volatile("sfence.vma x0, %0" :: "r"(asid): "memory"); } word_t PURE getRestartPC(tcb_t *thread); diff --git a/include/arch/riscv/arch/sbi.h b/include/arch/riscv/arch/sbi.h index dd828f1f5..feeea7cc6 100644 --- a/include/arch/riscv/arch/sbi.h +++ b/include/arch/riscv/arch/sbi.h @@ -48,11 +48,11 @@ static inline register_t sbi_call(register_t cmd, register_t arg_1, register_t arg_2) { - register register_t a0 asm ("a0") = arg_0; - register register_t a1 asm ("a1") = arg_1; - register register_t a2 asm ("a2") = arg_2; - register register_t a7 asm ("a7") = cmd; - register register_t result asm ("a0"); + register register_t a0 asm("a0") = arg_0; + register register_t a1 asm("a1") = arg_1; + register register_t a2 asm("a2") = arg_2; + register register_t a7 asm("a7") = cmd; + register register_t result asm("a0"); asm volatile("ecall" : "=r"(result) : "r"(a0), "r"(a1), "r"(a2), "r"(a7) diff --git a/include/arch/x86/arch/32/mode/fastpath/fastpath.h b/include/arch/x86/arch/32/mode/fastpath/fastpath.h index 17c48ae4c..09f601b36 100644 --- a/include/arch/x86/arch/32/mode/fastpath/fastpath.h +++ b/include/arch/x86/arch/32/mode/fastpath/fastpath.h @@ -92,7 +92,7 @@ fastpath_copy_mrs(word_t length, tcb_t *src, tcb_t *dest) in the bottom of the msgInfo word, is <= 2 and that msgExtraCaps which appears above it is zero. We are assuming that n_msgRegisters == 2 for this check to be useful.*/ -compile_assert (n_msgRegisters_eq_2, n_msgRegisters == 2) +compile_assert(n_msgRegisters_eq_2, n_msgRegisters == 2) static inline int fastpath_mi_check(word_t msgInfo) { @@ -155,9 +155,9 @@ fastpath_restore(word_t badge, word_t msgInfo, tcb_t *cur_thread) "sysexit \n" : : "c"(&cur_thread->tcbArch.tcbContext.registers[EDI]), - "a" (cur_thread->tcbArch.tcbContext.registers[EAX]), - "b" (badge), - "S" (msgInfo), + "a"(cur_thread->tcbArch.tcbContext.registers[EAX]), + "b"(badge), + "S"(msgInfo), [IFMASK]"i"(FLAGS_IF) : "memory" ); @@ -186,9 +186,9 @@ fastpath_restore(word_t badge, word_t msgInfo, tcb_t *cur_thread) "sysexit \n" : : "c"(&cur_thread->tcbArch.tcbContext.registers[EDI]), - "a" (cur_thread->tcbArch.tcbContext.registers[EAX]), - "b" (badge), - "S" (msgInfo), + "a"(cur_thread->tcbArch.tcbContext.registers[EAX]), + "b"(badge), + "S"(msgInfo), [IFMASK]"i"(FLAGS_IF) : "memory" ); diff --git a/include/arch/x86/arch/32/mode/machine.h b/include/arch/x86/arch/32/mode/machine.h index 1019dd5ee..fd1a5cfa2 100644 --- a/include/arch/x86/arch/32/mode/machine.h +++ b/include/arch/x86/arch/32/mode/machine.h @@ -94,11 +94,11 @@ static inline rdmsr_safe_result_t x86_rdmsr_safe(const uint32_t reg) 1: \n\ movl (%[returnto_addr]), %[returnto] \n\ movl $0, (%[returnto_addr])" - : [returnto] "=&r" (returnto), - [high] "=&d" (high), - [low] "=&a" (low) - : [returnto_addr] "r" (&ARCH_NODE_STATE(x86KSGPExceptReturnTo)), - [reg] "c" (reg) + : [returnto] "=&r"(returnto), + [high] "=&d"(high), + [low] "=&a"(low) + : [returnto_addr] "r"(&ARCH_NODE_STATE(x86KSGPExceptReturnTo)), + [reg] "c"(reg) : "memory" ); result.success = returnto != 0; @@ -123,14 +123,14 @@ void ia32_install_tss(uint32_t tss_sel); static inline void FORCE_INLINE x86_write_fs_base_impl(word_t base) { gdt_entry_gdt_data_ptr_set_base_low(x86KSGlobalState[CURRENT_CPU_INDEX()].x86KSgdt + GDT_IPCBUF, base); - gdt_entry_gdt_data_ptr_set_base_mid(x86KSGlobalState[CURRENT_CPU_INDEX()].x86KSgdt + GDT_IPCBUF, (base >> 16) & 0xFF); + gdt_entry_gdt_data_ptr_set_base_mid(x86KSGlobalState[CURRENT_CPU_INDEX()].x86KSgdt + GDT_IPCBUF, (base >> 16) & 0xFF); gdt_entry_gdt_data_ptr_set_base_high(x86KSGlobalState[CURRENT_CPU_INDEX()].x86KSgdt + GDT_IPCBUF, (base >> 24) & 0xFF); } static inline void FORCE_INLINE x86_write_gs_base_impl(word_t base) { gdt_entry_gdt_data_ptr_set_base_low(x86KSGlobalState[CURRENT_CPU_INDEX()].x86KSgdt + GDT_TLS, base); - gdt_entry_gdt_data_ptr_set_base_mid(x86KSGlobalState[CURRENT_CPU_INDEX()].x86KSgdt + GDT_TLS, (base >> 16) & 0xFF); + gdt_entry_gdt_data_ptr_set_base_mid(x86KSGlobalState[CURRENT_CPU_INDEX()].x86KSgdt + GDT_TLS, (base >> 16) & 0xFF); gdt_entry_gdt_data_ptr_set_base_high(x86KSGlobalState[CURRENT_CPU_INDEX()].x86KSgdt + GDT_TLS, (base >> 24) & 0xFF); } diff --git a/include/arch/x86/arch/32/mode/machine/debug.h b/include/arch/x86/arch/32/mode/machine/debug.h index 4fe653e5b..96fdd5b5a 100644 --- a/include/arch/x86/arch/32/mode/machine/debug.h +++ b/include/arch/x86/arch/32/mode/machine/debug.h @@ -26,7 +26,7 @@ readDr6Reg(void) asm volatile( "movl %%dr6, %0 \n\t" - : "=r" (ret)); + : "=r"(ret)); return ret; } @@ -36,7 +36,7 @@ writeDr6Reg(word_t val) asm volatile( "movl %0, %%dr6 \n\t" : - : "r" (val)); + : "r"(val)); } static inline word_t @@ -46,7 +46,7 @@ readDr7Reg(void) asm volatile( "movl %%dr7, %0 \n\t" - : "=r" (ret)); + : "=r"(ret)); return ret; } @@ -56,7 +56,7 @@ writeDr7Reg(word_t val) asm volatile( "movl %0, %%dr7 \n\t" : - : "r" (val)); + : "r"(val)); } static inline word_t @@ -67,16 +67,16 @@ readDrReg(uint8_t reg) assert(reg < X86_DEBUG_BP_N_REGS); switch (reg) { case 0: - asm volatile("movl %%dr0, %0 \n\t" : "=r" (ret)); + asm volatile("movl %%dr0, %0 \n\t" : "=r"(ret)); break; case 1: - asm volatile("movl %%dr1, %0 \n\t" : "=r" (ret)); + asm volatile("movl %%dr1, %0 \n\t" : "=r"(ret)); break; case 2: - asm volatile("movl %%dr2, %0 \n\t" : "=r" (ret)); + asm volatile("movl %%dr2, %0 \n\t" : "=r"(ret)); break; default: - asm volatile("movl %%dr3, %0 \n\t" : "=r" (ret)); + asm volatile("movl %%dr3, %0 \n\t" : "=r"(ret)); break; } return ret; @@ -88,16 +88,16 @@ writeDrReg(uint8_t reg, word_t val) assert(reg < X86_DEBUG_BP_N_REGS); switch (reg) { case 0: - asm volatile("movl %0, %%dr0 \n\t" :: "r" (val)); + asm volatile("movl %0, %%dr0 \n\t" :: "r"(val)); break; case 1: - asm volatile("movl %0, %%dr1 \n\t" :: "r" (val)); + asm volatile("movl %0, %%dr1 \n\t" :: "r"(val)); break; case 2: - asm volatile("movl %0, %%dr2 \n\t" :: "r" (val)); + asm volatile("movl %0, %%dr2 \n\t" :: "r"(val)); break; default: - asm volatile("movl %0, %%dr3 \n\t" :: "r" (val)); + asm volatile("movl %0, %%dr3 \n\t" :: "r"(val)); break; } } @@ -112,7 +112,7 @@ loadBreakpointState(tcb_t *source) * breakpoint control register (DR7) last since it is what "activates" the * effects of the configuration described by the other registers. */ - asm volatile ( + asm volatile( "movl %0, %%edx \n\t" "movl (%%edx), %%ecx \n\t" "movl %%ecx, %%dr0 \n\t" @@ -132,7 +132,7 @@ loadBreakpointState(tcb_t *source) "movl (%%edx), %%ecx \n\t" "movl %%ecx, %%dr7 \n\t" : - : "r" (source->tcbArch.tcbContext.breakpointState.dr) + : "r"(source->tcbArch.tcbContext.breakpointState.dr) : "edx", "ecx"); } diff --git a/include/arch/x86/arch/32/mode/model/smp.h b/include/arch/x86/arch/32/mode/model/smp.h index b0171690f..e4a82f12c 100644 --- a/include/arch/x86/arch/32/mode/model/smp.h +++ b/include/arch/x86/arch/32/mode/model/smp.h @@ -25,7 +25,7 @@ getCurESP(void) { word_t stack; void *result; - asm ("movl %[stack_address], %[result]" : [result] "=r"(result) : [stack_address] "r"(&stack)); + asm("movl %[stack_address], %[result]" : [result] "=r"(result) : [stack_address] "r"(&stack)); return result; } diff --git a/include/arch/x86/arch/64/mode/fastpath/fastpath.h b/include/arch/x86/arch/64/mode/fastpath/fastpath.h index e691cb7a4..83d4c4bac 100644 --- a/include/arch/x86/arch/64/mode/fastpath/fastpath.h +++ b/include/arch/x86/arch/64/mode/fastpath/fastpath.h @@ -59,8 +59,8 @@ switchToThread_fp(tcb_t *thread, vspace_root_t *vroot, pde_t stored_hw_asid) #ifdef ENABLE_SMP_SUPPORT asm volatile("movq %[value], %%gs:%c[offset]" : - : [value] "r" (&thread->tcbArch.tcbContext.registers[Error + 1]), - [offset] "i" (OFFSETOF(nodeInfo_t, currentThreadUserContext))); + : [value] "r"(&thread->tcbArch.tcbContext.registers[Error + 1]), + [offset] "i"(OFFSETOF(nodeInfo_t, currentThreadUserContext))); #endif /* ENABLE_SMP_SUPPORT */ if (config_set(CONFIG_KERNEL_X86_IBPB_ON_CONTEXT_SWITCH)) { @@ -125,7 +125,7 @@ fastpath_copy_mrs(word_t length, tcb_t *src, tcb_t *dest) which appears above it is zero. We are assuming that n_msgRegisters == 4 for this check to be useful. By masking out the bottom 3 bits, we are really checking that n + 3 <= MASK(3), i.e. n + 3 <= 7 or n <= 4. */ -compile_assert (n_msgRegisters_eq_4, n_msgRegisters == 4) +compile_assert(n_msgRegisters_eq_4, n_msgRegisters == 4) static inline int fastpath_mi_check(word_t msgInfo) { @@ -189,7 +189,7 @@ fastpath_restore(word_t badge, word_t msgInfo, tcb_t *cur_thread) #if defined(ENABLE_SMP_SUPPORT) && defined(CONFIG_KERNEL_SKIM_WINDOW) register word_t next_cr3_r11 asm("r11") = next_cr3; #endif - asm volatile ( + asm volatile( "movq %%rcx, %%rsp\n" "popq %%rax\n" "popq %%rbx\n" @@ -229,13 +229,13 @@ fastpath_restore(word_t badge, word_t msgInfo, tcb_t *cur_thread) "sti\n" "sysexitq\n" : - : "c" (&cur_thread->tcbArch.tcbContext.registers[RAX]), - "D" (badge), - "S" (msgInfo), + : "c"(&cur_thread->tcbArch.tcbContext.registers[RAX]), + "D"(badge), + "S"(msgInfo), #if defined(ENABLE_SMP_SUPPORT) && defined(CONFIG_KERNEL_SKIM_WINDOW) "r"(next_cr3_r11), #endif - [IF] "i" (FLAGS_IF) + [IF] "i"(FLAGS_IF) : "memory" ); } else { @@ -274,10 +274,10 @@ fastpath_restore(word_t badge, word_t msgInfo, tcb_t *cur_thread) "sysretq\n" : : "r"(&cur_thread->tcbArch.tcbContext.registers[RAX]), - "D" (badge), - "S" (msgInfo) + "D"(badge), + "S"(msgInfo) #if defined(ENABLE_SMP_SUPPORT) && defined(CONFIG_KERNEL_SKIM_WINDOW) - , "c" (next_cr3) + , "c"(next_cr3) #endif : "memory" ); diff --git a/include/arch/x86/arch/64/mode/machine.h b/include/arch/x86/arch/64/mode/machine.h index c9380e518..0086818fa 100644 --- a/include/arch/x86/arch/64/mode/machine.h +++ b/include/arch/x86/arch/64/mode/machine.h @@ -153,7 +153,7 @@ static inline void invalidateLocalPCID(word_t type, void *vaddr, asid_t asid) invpcid_desc_t desc; desc.asid = asid & 0xfff; desc.addr = (uint64_t)vaddr; - asm volatile ("invpcid %1, %0" :: "r"(type), "m"(desc)); + asm volatile("invpcid %1, %0" :: "r"(type), "m"(desc)); } else { switch (type) { case INVPCID_TYPE_ADDR: @@ -207,8 +207,8 @@ static inline void invalidateLocalPageStructureCacheASID(paddr_t root, asid_t as "mov %[new_cr3], %%cr3\n" "mov %[old_cr3], %%cr3\n" :: - [new_cr3] "r" (makeCR3(root, asid).words[0]), - [old_cr3] "r" (cr3.words[0] | BIT(63)) + [new_cr3] "r"(makeCR3(root, asid).words[0]), + [old_cr3] "r"(cr3.words[0] | BIT(63)) ); } else { /* just invalidate the page structure cache as per normal, by @@ -236,12 +236,12 @@ static inline rdmsr_safe_result_t x86_rdmsr_safe(const uint32_t reg) 1: \n\ movq (%[returnto_addr]), %[returnto] \n\ movq $0, (%[returnto_addr])" - : [returnto] "=&r" (returnto), - [temp] "=&r" (temp), - [high] "=&d" (high), - [low] "=&a" (low) - : [returnto_addr] "r" (&ARCH_NODE_STATE(x86KSGPExceptReturnTo)), - [reg] "c" (reg) + : [returnto] "=&r"(returnto), + [temp] "=&r"(temp), + [high] "=&d"(high), + [low] "=&a"(low) + : [returnto_addr] "r"(&ARCH_NODE_STATE(x86KSGPExceptReturnTo)), + [reg] "c"(reg) : "memory" ); result.success = returnto != 0; @@ -253,25 +253,25 @@ static inline rdmsr_safe_result_t x86_rdmsr_safe(const uint32_t reg) static inline void x86_write_fs_base_impl(word_t base) { - asm volatile ("wrfsbase %0"::"r"(base)); + asm volatile("wrfsbase %0"::"r"(base)); } static inline void x86_write_gs_base_impl(word_t base) { - asm volatile ("wrgsbase %0"::"r"(base)); + asm volatile("wrgsbase %0"::"r"(base)); } static inline word_t x86_read_fs_base_impl(void) { word_t base = 0; - asm volatile ("rdfsbase %0":"=r"(base)); + asm volatile("rdfsbase %0":"=r"(base)); return base; } static inline word_t x86_read_gs_base_impl(void) { word_t base = 0; - asm volatile ("rdgsbase %0":"=r"(base)); + asm volatile("rdgsbase %0":"=r"(base)); return base; } diff --git a/include/arch/x86/arch/64/mode/machine/cpu_registers.h b/include/arch/x86/arch/64/mode/machine/cpu_registers.h index e97492f6d..bb550736c 100644 --- a/include/arch/x86/arch/64/mode/machine/cpu_registers.h +++ b/include/arch/x86/arch/64/mode/machine/cpu_registers.h @@ -16,7 +16,7 @@ static inline unsigned long read_cr3(void) { word_t cr3; - asm volatile ("movq %%cr3, %0" : "=r"(cr3), "=m"(control_reg_order)); + asm volatile("movq %%cr3, %0" : "=r"(cr3), "=m"(control_reg_order)); return cr3; } diff --git a/include/arch/x86/arch/64/mode/machine/debug.h b/include/arch/x86/arch/64/mode/machine/debug.h index b6ffc322a..c3956e85b 100644 --- a/include/arch/x86/arch/64/mode/machine/debug.h +++ b/include/arch/x86/arch/64/mode/machine/debug.h @@ -26,7 +26,7 @@ readDr6Reg(void) asm volatile( "movq %%dr6, %0 \n\t" - : "=r" (ret)); + : "=r"(ret)); return ret; } @@ -36,7 +36,7 @@ writeDr6Reg(word_t val) asm volatile( "movq %0, %%dr6 \n\t" : - : "r" (val)); + : "r"(val)); } static inline word_t @@ -46,7 +46,7 @@ readDr7Reg(void) asm volatile( "movq %%dr7, %0 \n\t" - : "=r" (ret)); + : "=r"(ret)); return ret; } @@ -56,7 +56,7 @@ writeDr7Reg(word_t val) asm volatile( "movq %0, %%dr7 \n\t" : - : "r" (val)); + : "r"(val)); } static inline word_t @@ -67,16 +67,16 @@ readDrReg(uint8_t reg) assert(reg < X86_DEBUG_BP_N_REGS); switch (reg) { case 0: - asm volatile("movq %%dr0, %0 \n\t" : "=r" (ret)); + asm volatile("movq %%dr0, %0 \n\t" : "=r"(ret)); break; case 1: - asm volatile("movq %%dr1, %0 \n\t" : "=r" (ret)); + asm volatile("movq %%dr1, %0 \n\t" : "=r"(ret)); break; case 2: - asm volatile("movq %%dr2, %0 \n\t" : "=r" (ret)); + asm volatile("movq %%dr2, %0 \n\t" : "=r"(ret)); break; default: - asm volatile("movq %%dr3, %0 \n\t" : "=r" (ret)); + asm volatile("movq %%dr3, %0 \n\t" : "=r"(ret)); break; } return ret; @@ -88,16 +88,16 @@ writeDrReg(uint8_t reg, word_t val) assert(reg < X86_DEBUG_BP_N_REGS); switch (reg) { case 0: - asm volatile("movq %0, %%dr0 \n\t" :: "r" (val)); + asm volatile("movq %0, %%dr0 \n\t" :: "r"(val)); break; case 1: - asm volatile("movq %0, %%dr1 \n\t" :: "r" (val)); + asm volatile("movq %0, %%dr1 \n\t" :: "r"(val)); break; case 2: - asm volatile("movq %0, %%dr2 \n\t" :: "r" (val)); + asm volatile("movq %0, %%dr2 \n\t" :: "r"(val)); break; default: - asm volatile("movq %0, %%dr3 \n\t" :: "r" (val)); + asm volatile("movq %0, %%dr3 \n\t" :: "r"(val)); break; } } @@ -112,7 +112,7 @@ loadBreakpointState(tcb_t *source) * breakpoint control register (DR7) last since it is what "activates" the * effects of the configuration described by the other registers. */ - asm volatile ( + asm volatile( "movq %0, %%rdx \n\t" "movq (%%rdx), %%rcx \n\t" "movq %%rcx, %%dr0 \n\t" @@ -132,7 +132,7 @@ loadBreakpointState(tcb_t *source) "movq (%%rdx), %%rcx \n\t" "movq %%rcx, %%dr7 \n\t" : - : "r" (source->tcbArch.tcbContext.breakpointState.dr) + : "r"(source->tcbArch.tcbContext.breakpointState.dr) : "rdx", "rcx"); } diff --git a/include/arch/x86/arch/64/mode/model/smp.h b/include/arch/x86/arch/64/mode/model/smp.h index 3a5b235b8..871ba5065 100644 --- a/include/arch/x86/arch/64/mode/model/smp.h +++ b/include/arch/x86/arch/64/mode/model/smp.h @@ -53,9 +53,9 @@ extern char nodeSkimScratchOffset[]; static inline CONST cpu_id_t getCurrentCPUIndex(void) { cpu_id_t index; - asm ("movq %%gs:%c[offset], %[result]" - : [result] "=r" (index) - : [offset] "i" (OFFSETOF(nodeInfo_t, index))); + asm("movq %%gs:%c[offset], %[result]" + : [result] "=r"(index) + : [offset] "i"(OFFSETOF(nodeInfo_t, index))); return index; } diff --git a/include/arch/x86/arch/benchmark.h b/include/arch/x86/arch/benchmark.h index 49ec792c6..5f75e0ca3 100644 --- a/include/arch/x86/arch/benchmark.h +++ b/include/arch/x86/arch/benchmark.h @@ -19,7 +19,7 @@ timestamp(void) { uint32_t low, high; - asm volatile ( + asm volatile( "movl $0, %%eax \n" "movl $0, %%ecx \n" "cpuid \n" @@ -29,7 +29,7 @@ timestamp(void) "movl $0, %%eax \n" "movl $0, %%ecx \n" "cpuid \n" - : "=r" (high), "=r" (low) + : "=r"(high), "=r"(low) : /* no inputs */ : "eax", "ebx", "ecx", "edx" ); diff --git a/include/arch/x86/arch/machine.h b/include/arch/x86/arch/machine.h index ad987375b..8f26ec1fe 100644 --- a/include/arch/x86/arch/machine.h +++ b/include/arch/x86/arch/machine.h @@ -139,11 +139,11 @@ static inline uint32_t x86_cpuid_edx(uint32_t eax, uint32_t ecx) { uint32_t edx, ebx; asm volatile("cpuid" - : "=a" (eax), - "=b" (ebx), - "=c" (ecx), - "=d" (edx) - : "a" (eax), "c" (ecx) + : "=a"(eax), + "=b"(ebx), + "=c"(ecx), + "=d"(edx) + : "a"(eax), "c"(ecx) : "memory"); return edx; } @@ -152,11 +152,11 @@ static inline uint32_t x86_cpuid_eax(uint32_t eax, uint32_t ecx) { uint32_t edx, ebx; asm volatile("cpuid" - : "=a" (eax), - "=b" (ebx), - "=c" (ecx), - "=d" (edx) - : "a" (eax), "c" (ecx) + : "=a"(eax), + "=b"(ebx), + "=c"(ecx), + "=d"(edx) + : "a"(eax), "c"(ecx) : "memory"); return eax; } @@ -165,11 +165,11 @@ static inline uint32_t x86_cpuid_ecx(uint32_t eax, uint32_t ecx) { uint32_t edx, ebx; asm volatile("cpuid" - : "=a" (eax), - "=b" (ebx), - "=c" (ecx), - "=d" (edx) - : "a" (eax), "c" (ecx) + : "=a"(eax), + "=b"(ebx), + "=c"(ecx), + "=d"(edx) + : "a"(eax), "c"(ecx) : "memory"); return ecx; } @@ -178,11 +178,11 @@ static inline uint32_t x86_cpuid_ebx(uint32_t eax, uint32_t ecx) { uint32_t edx, ebx; asm volatile("cpuid" - : "=a" (eax), - "=b" (ebx), - "=c" (ecx), - "=d" (edx) - : "a" (eax), "c" (ecx) + : "=a"(eax), + "=b"(ebx), + "=c"(ecx), + "=d"(edx) + : "a"(eax), "c"(ecx) : "memory"); return ebx; } @@ -191,8 +191,8 @@ static inline uint64_t x86_rdtsc(void) { uint32_t hi, lo; asm volatile("rdtsc" - : "=a" (lo), - "=d" (hi) + : "=a"(lo), + "=d"(hi) ); return ((uint64_t) hi) << 32llu | (uint64_t) lo; } diff --git a/include/arch/x86/arch/machine/fpu.h b/include/arch/x86/arch/machine/fpu.h index d07c6a785..4657e885b 100644 --- a/include/arch/x86/arch/machine/fpu.h +++ b/include/arch/x86/arch/machine/fpu.h @@ -105,7 +105,7 @@ static inline void finit(void) */ static inline void enableFpu(void) { - asm volatile("clts" :: "m" (control_reg_order)); + asm volatile("clts" :: "m"(control_reg_order)); } /* diff --git a/include/arch/x86/arch/machine/hardware.h b/include/arch/x86/arch/machine/hardware.h index a718eb3b1..a177c8693 100644 --- a/include/arch/x86/arch/machine/hardware.h +++ b/include/arch/x86/arch/machine/hardware.h @@ -104,7 +104,7 @@ uint32_t CONST getCacheLineSizeBits(void); /* Flushes a specific memory range from the CPU cache */ static inline void flushCacheLine(volatile void* vaddr) { - asm volatile ("clflush %[vaddr]" : [vaddr] "+m"(*((volatile char *)vaddr))); + asm volatile("clflush %[vaddr]" : [vaddr] "+m"(*((volatile char *)vaddr))); } void flushCacheRange(void* vaddr, uint32_t size_bits); diff --git a/include/arch/x86/arch/object/vcpu.h b/include/arch/x86/arch/object/vcpu.h index 06233916a..6298749a0 100644 --- a/include/arch/x86/arch/object/vcpu.h +++ b/include/arch/x86/arch/object/vcpu.h @@ -362,7 +362,7 @@ static inline word_t vmread(word_t field) { word_t value; - asm volatile ( + asm volatile( "vmread %1, %0" : "=r"(value) : "r"(field) @@ -376,7 +376,7 @@ vmread(word_t field) static inline void vmwrite(word_t field, word_t value) { - asm volatile ( + asm volatile( "vmwrite %0, %1" : : "r"(value), "r"(field) diff --git a/include/plat/bcm2837/plat/machine.h b/include/plat/bcm2837/plat/machine.h index 912699775..b979a0a55 100644 --- a/include/plat/bcm2837/plat/machine.h +++ b/include/plat/bcm2837/plat/machine.h @@ -34,16 +34,16 @@ enum IRQConstants { //17:12 Peripheral 1..15 interrupt (Currently not used) //31:28 <Reserved> - INTERRUPT_BASIC_IRQ_ARM_TIMER = (BASIC_IRQ_OFFSET + 0), - INTERRUPT_BASIC_IRQ_ARM_MAILBOX = (BASIC_IRQ_OFFSET + 1), - INTERRUPT_BASIC_IRQ_ARM_DOORBELL0 = (BASIC_IRQ_OFFSET + 2), - INTERRUPT_BASIC_IRQ_ARM_DOORBELL1 = (BASIC_IRQ_OFFSET + 3), - INTERRUPT_BASIC_IRQ_GPU0_HALTED = (BASIC_IRQ_OFFSET + 4), - INTERRUPT_BASIC_IRQ_GPU1_HALTED = (BASIC_IRQ_OFFSET + 5), - INTERRUPT_BASIC_IRQ_ILLEGAL_ACCESS_TYPE1 = (BASIC_IRQ_OFFSET + 6), - INTERRUPT_BASIC_IRQ_ILLEGAL_ACCESS_TYPE0 = (BASIC_IRQ_OFFSET + 7), - INTERRUPT_BASIC_IRQ_PENDING_REGISTER1 = (BASIC_IRQ_OFFSET + 8), - INTERRUPT_BASIC_IRQ_PENDING_REGISTER2 = (BASIC_IRQ_OFFSET + 9), + INTERRUPT_BASIC_IRQ_ARM_TIMER = (BASIC_IRQ_OFFSET + 0), + INTERRUPT_BASIC_IRQ_ARM_MAILBOX = (BASIC_IRQ_OFFSET + 1), + INTERRUPT_BASIC_IRQ_ARM_DOORBELL0 = (BASIC_IRQ_OFFSET + 2), + INTERRUPT_BASIC_IRQ_ARM_DOORBELL1 = (BASIC_IRQ_OFFSET + 3), + INTERRUPT_BASIC_IRQ_GPU0_HALTED = (BASIC_IRQ_OFFSET + 4), + INTERRUPT_BASIC_IRQ_GPU1_HALTED = (BASIC_IRQ_OFFSET + 5), + INTERRUPT_BASIC_IRQ_ILLEGAL_ACCESS_TYPE1 = (BASIC_IRQ_OFFSET + 6), + INTERRUPT_BASIC_IRQ_ILLEGAL_ACCESS_TYPE0 = (BASIC_IRQ_OFFSET + 7), + INTERRUPT_BASIC_IRQ_PENDING_REGISTER1 = (BASIC_IRQ_OFFSET + 8), + INTERRUPT_BASIC_IRQ_PENDING_REGISTER2 = (BASIC_IRQ_OFFSET + 9), INTERRUPT_BASIC_IRQ_GPU_IRQ_7 = (BASIC_IRQ_OFFSET + 10), INTERRUPT_BASIC_IRQ_GPU_IRQ_9 = (BASIC_IRQ_OFFSET + 11), INTERRUPT_BASIC_IRQ_GPU_IRQ_10 = (BASIC_IRQ_OFFSET + 12), diff --git a/include/util.h b/include/util.h index d10d21df2..4433db029 100644 --- a/include/util.h +++ b/include/util.h @@ -100,8 +100,8 @@ long CONST char_to_long(char c); long PURE str_to_long(const char* str); -int __builtin_clzl (unsigned long x); -int __builtin_ctzl (unsigned long x); +int __builtin_clzl(unsigned long x); +int __builtin_ctzl(unsigned long x); #ifdef CONFIG_ARCH_RISCV uint32_t __clzsi2(uint32_t x); @@ -139,7 +139,7 @@ CONST ctzl(unsigned long x) #define CTZL(x) __builtin_ctzl(x) -int __builtin_popcountl (unsigned long x); +int __builtin_popcountl(unsigned long x); /** DONT_TRANSLATE */ static inline long diff --git a/libsel4/arch_include/arm/sel4/arch/syscalls.h b/libsel4/arch_include/arm/sel4/arch/syscalls.h index 46850f1e5..17ce57728 100644 --- a/libsel4/arch_include/arm/sel4/arch/syscalls.h +++ b/libsel4/arch_include/arm/sel4/arch/syscalls.h @@ -384,7 +384,7 @@ seL4_DebugNameThread(seL4_CPtr tcb, const char *name) #ifdef CONFIG_DANGEROUS_CODE_INJECTION LIBSEL4_INLINE_FUNC void -seL4_DebugRun(void (* userfn) (void *), void* userarg) +seL4_DebugRun(void (* userfn)(void *), void* userarg) { arm_sys_send_null(seL4_SysDebugRun, (seL4_Word)userfn, (seL4_Word)userarg); asm volatile("" ::: "memory"); diff --git a/libsel4/arch_include/riscv/sel4/arch/functions.h b/libsel4/arch_include/riscv/sel4/arch/functions.h index 8a8cd0a6a..a70ab625d 100644 --- a/libsel4/arch_include/riscv/sel4/arch/functions.h +++ b/libsel4/arch_include/riscv/sel4/arch/functions.h @@ -25,7 +25,7 @@ LIBSEL4_INLINE_FUNC seL4_IPCBuffer* seL4_GetIPCBuffer(void) { seL4_Word reg; - asm ("mv %0, tp" : "=r"(reg)); + asm("mv %0, tp" : "=r"(reg)); return (seL4_IPCBuffer*)reg; } diff --git a/libsel4/arch_include/riscv/sel4/arch/syscalls.h b/libsel4/arch_include/riscv/sel4/arch/syscalls.h index a8f0cd6c4..2c49d674b 100644 --- a/libsel4/arch_include/riscv/sel4/arch/syscalls.h +++ b/libsel4/arch_include/riscv/sel4/arch/syscalls.h @@ -39,10 +39,10 @@ riscv_sys_send(seL4_Word sys, seL4_Word dest, seL4_Word info_arg, seL4_Word mr0, /* Perform the system call. */ register seL4_Word scno asm("a7") = sys; - asm volatile ( + asm volatile( "ecall" - : "+r" (destptr), "+r" (msg0), "+r" (msg1), "+r" (msg2), - "+r" (msg3), "+r" (info) + : "+r"(destptr), "+r"(msg0), "+r"(msg1), "+r"(msg2), + "+r"(msg3), "+r"(info) : "r"(scno) ); } @@ -61,10 +61,10 @@ riscv_sys_reply(seL4_Word sys, seL4_Word info_arg, seL4_Word mr0, seL4_Word mr1, /* Perform the system call. */ register seL4_Word scno asm("a7") = sys; - asm volatile ( + asm volatile( "ecall" - : "+r" (msg0), "+r" (msg1), "+r" (msg2), "+r" (msg3), - "+r" (info) + : "+r"(msg0), "+r"(msg1), "+r"(msg2), "+r"(msg3), + "+r"(info) : "r"(scno) ); } @@ -77,9 +77,9 @@ riscv_sys_send_null(seL4_Word sys, seL4_Word src, seL4_Word info_arg) /* Perform the system call. */ register seL4_Word scno asm("a7") = sys; - asm volatile ( + asm volatile( "ecall" - : "+r" (destptr), "+r" (info) + : "+r"(destptr), "+r"(info) : "r"(scno) ); } @@ -99,10 +99,10 @@ riscv_sys_recv(seL4_Word sys, seL4_Word src, seL4_Word *out_badge, seL4_Word *ou /* Perform the system call. */ register seL4_Word scno asm("a7") = sys; - asm volatile ( + asm volatile( "ecall" - : "=r" (msg0), "=r" (msg1), "=r" (msg2), "=r" (msg3), - "=r" (info), "+r" (src_and_badge) + : "=r"(msg0), "=r"(msg1), "=r"(msg2), "=r"(msg3), + "=r"(info), "+r"(src_and_badge) : "r"(scno) : "memory" ); @@ -118,7 +118,7 @@ static inline void riscv_sys_null(seL4_Word sys) { register seL4_Word scno asm("a7") = sys; - asm volatile ( + asm volatile( "ecall" : /* no outputs */ : "r"(scno) @@ -143,10 +143,10 @@ riscv_sys_send_recv(seL4_Word sys, seL4_Word dest, seL4_Word *out_badge, seL4_Wo /* Perform the system call. */ register seL4_Word scno asm("a7") = sys; - asm volatile ( + asm volatile( "ecall" - : "+r" (msg0), "+r" (msg1), "+r" (msg2), "+r" (msg3), - "+r" (info), "+r" (destptr) + : "+r"(msg0), "+r"(msg1), "+r"(msg2), "+r"(msg3), + "+r"(info), "+r"(destptr) : "r"(scno) : "memory" ); @@ -471,7 +471,7 @@ LIBSEL4_INLINE_FUNC void seL4_Yield(void) { register seL4_Word scno asm("a7") = seL4_SysYield; - asm volatile ("ecall" :: "r"(scno)); + asm volatile("ecall" :: "r"(scno)); } #ifdef CONFIG_PRINTING @@ -509,14 +509,14 @@ LIBSEL4_INLINE_FUNC void seL4_DebugHalt(void) { register seL4_Word scno asm("a7") = seL4_SysDebugHalt; - asm volatile ("ecall" :: "r"(scno) : "memory"); + asm volatile("ecall" :: "r"(scno) : "memory"); } LIBSEL4_INLINE_FUNC void seL4_DebugSnapshot(void) { register seL4_Word scno asm("a7") = seL4_SysDebugSnapshot; - asm volatile ("ecall" ::"r"(scno) : "memory"); + asm volatile("ecall" ::"r"(scno) : "memory"); } LIBSEL4_INLINE_FUNC seL4_Uint32 @@ -553,12 +553,12 @@ seL4_DebugNameThread(seL4_CPtr tcb, const char *name) #ifdef SEL4_DANGEROUS_CODE_INJECTION_KERNEL LIBSEL4_INLINE_FUNC void -seL4_DebugRun(void (* userfn) (void *), void* userarg) +seL4_DebugRun(void (* userfn)(void *), void* userarg) { register seL4_Word arg1 asm("a0") = (seL4_Word)userfn; register seL4_Word arg2 asm("a1") = (seL4_Word)userarg; register seL4_Word scno asm("a7") = seL4_SysDebugRun; - asm volatile ("ecall" : "+r"(arg1) : "r"(arg2), "r"(scno)); + asm volatile("ecall" : "+r"(arg1) : "r"(arg2), "r"(scno)); } #endif diff --git a/libsel4/include/sel4/shared_types.h b/libsel4/include/sel4/shared_types.h index d0860a05e..6008f2141 100644 --- a/libsel4/include/sel4/shared_types.h +++ b/libsel4/include/sel4/shared_types.h @@ -23,7 +23,7 @@ typedef struct seL4_IPCBuffer_ { seL4_CPtr receiveCNode; seL4_CPtr receiveIndex; seL4_Word receiveDepth; -} seL4_IPCBuffer __attribute__ ((__aligned__ (sizeof(struct seL4_IPCBuffer_)))); +} seL4_IPCBuffer __attribute__((__aligned__(sizeof(struct seL4_IPCBuffer_)))); enum { seL4_CapFault_IP, diff --git a/libsel4/include/sel4/syscalls.h b/libsel4/include/sel4/syscalls.h index 06435b52c..ba0b665b4 100644 --- a/libsel4/include/sel4/syscalls.h +++ b/libsel4/include/sel4/syscalls.h @@ -355,7 +355,7 @@ seL4_DebugNameThread(seL4_CPtr tcb, const char *name); * */ LIBSEL4_INLINE_FUNC void -seL4_DebugRun(void (* userfn) (void *), void* userarg); +seL4_DebugRun(void (* userfn)(void *), void* userarg); #endif /** @} */ diff --git a/libsel4/sel4_arch_include/aarch32/sel4/sel4_arch/functions.h b/libsel4/sel4_arch_include/aarch32/sel4/sel4_arch/functions.h index 0faa499d8..da3565cd9 100644 --- a/libsel4/sel4_arch_include/aarch32/sel4/sel4_arch/functions.h +++ b/libsel4/sel4_arch_include/aarch32/sel4/sel4_arch/functions.h @@ -24,7 +24,7 @@ seL4_GetIPCBuffer(void) return *(seL4_IPCBuffer**)seL4_GlobalsFrame; #elif defined(CONFIG_IPC_BUF_TPIDRURW) seL4_Word reg; - asm ("mrc p15, 0, %0, c13, c0, 2" : "=r"(reg)); + asm("mrc p15, 0, %0, c13, c0, 2" : "=r"(reg)); return (seL4_IPCBuffer*)reg; #else #error "Unknown IPC buffer strategy" diff --git a/libsel4/sel4_arch_include/aarch32/sel4/sel4_arch/syscalls.h b/libsel4/sel4_arch_include/aarch32/sel4/sel4_arch/syscalls.h index 8a249d39b..7cbd9ae43 100644 --- a/libsel4/sel4_arch_include/aarch32/sel4/sel4_arch/syscalls.h +++ b/libsel4/sel4_arch_include/aarch32/sel4/sel4_arch/syscalls.h @@ -63,10 +63,10 @@ arm_sys_send(seL4_Word sys, seL4_Word dest, seL4_Word info_arg, seL4_Word mr0, s /* Perform the system call. */ register seL4_Word scno asm("r7") = sys; - asm volatile ( + asm volatile( "swi $0" - : "+r" (destptr), "+r" (msg0), "+r" (msg1), "+r" (msg2), - "+r" (msg3), "+r" (info) + : "+r"(destptr), "+r"(msg0), "+r"(msg1), "+r"(msg2), + "+r"(msg3), "+r"(info) : "r"(scno) ); } @@ -84,10 +84,10 @@ arm_sys_reply(seL4_Word sys, seL4_Word info_arg, seL4_Word mr0, seL4_Word mr1, s /* Perform the system call. */ register seL4_Word scno asm("r7") = sys; - asm volatile ( + asm volatile( "swi $0" - : "+r" (msg0), "+r" (msg1), "+r" (msg2), "+r" (msg3), - "+r" (info) + : "+r"(msg0), "+r"(msg1), "+r"(msg2), "+r"(msg3), + "+r"(info) : "r"(scno) ); } @@ -100,9 +100,9 @@ arm_sys_send_null(seL4_Word sys, seL4_Word src, seL4_Word info_arg) /* Perform the system call. */ register seL4_Word scno asm("r7") = sys; - asm volatile ( + asm volatile( "swi $0" - : "+r" (destptr), "+r" (info) + : "+r"(destptr), "+r"(info) : "r"(scno) ); } @@ -121,10 +121,10 @@ arm_sys_recv(seL4_Word sys, seL4_Word src, seL4_Word *out_badge, seL4_Word *out_ /* Perform the system call. */ register seL4_Word scno asm("r7") = sys; - asm volatile ( + asm volatile( "swi $0" - : "=r" (msg0), "=r" (msg1), "=r" (msg2), "=r" (msg3), - "=r" (info), "+r" (src_and_badge) + : "=r"(msg0), "=r"(msg1), "=r"(msg2), "=r"(msg3), + "=r"(info), "+r"(src_and_badge) : "r"(scno) : "memory" ); @@ -150,10 +150,10 @@ arm_sys_send_recv(seL4_Word sys, seL4_Word dest, seL4_Word *out_badge, seL4_Word /* Perform the system call. */ register seL4_Word scno asm("r7") = sys; - asm volatile ( + asm volatile( "swi $0" - : "+r" (msg0), "+r" (msg1), "+r" (msg2), "+r" (msg3), - "+r" (info), "+r" (destptr) + : "+r"(msg0), "+r"(msg1), "+r"(msg2), "+r"(msg3), + "+r"(info), "+r"(destptr) : "r"(scno) : "memory" ); @@ -169,7 +169,7 @@ static inline void arm_sys_null(seL4_Word sys) { register seL4_Word scno asm("r7") = sys; - asm volatile ( + asm volatile( "swi $0" : /* no outputs */ : "r"(scno) diff --git a/libsel4/sel4_arch_include/aarch64/sel4/sel4_arch/functions.h b/libsel4/sel4_arch_include/aarch64/sel4/sel4_arch/functions.h index b1f6ba911..abf111155 100644 --- a/libsel4/sel4_arch_include/aarch64/sel4/sel4_arch/functions.h +++ b/libsel4/sel4_arch_include/aarch64/sel4/sel4_arch/functions.h @@ -20,7 +20,7 @@ LIBSEL4_INLINE_FUNC seL4_IPCBuffer* seL4_GetIPCBuffer(void) { seL4_Word reg; - asm ("mrs %0, tpidrro_el0" : "=r" (reg)); + asm("mrs %0, tpidrro_el0" : "=r"(reg)); return (seL4_IPCBuffer*)reg; } diff --git a/libsel4/sel4_arch_include/aarch64/sel4/sel4_arch/syscalls.h b/libsel4/sel4_arch_include/aarch64/sel4/sel4_arch/syscalls.h index bbc0819e8..900255091 100644 --- a/libsel4/sel4_arch_include/aarch64/sel4/sel4_arch/syscalls.h +++ b/libsel4/sel4_arch_include/aarch64/sel4/sel4_arch/syscalls.h @@ -63,10 +63,10 @@ arm_sys_send(seL4_Word sys, seL4_Word dest, seL4_Word info_arg, seL4_Word mr0, s /* Perform the system call. */ register seL4_Word scno asm("x7") = sys; - asm volatile ( + asm volatile( "svc #0" - : "+r" (destptr), "+r" (msg0), "+r" (msg1), "+r" (msg2), - "+r" (msg3), "+r" (info) + : "+r"(destptr), "+r"(msg0), "+r"(msg1), "+r"(msg2), + "+r"(msg3), "+r"(info) : "r"(scno) ); } @@ -84,10 +84,10 @@ arm_sys_reply(seL4_Word sys, seL4_Word info_arg, seL4_Word mr0, seL4_Word mr1, s /* Perform the system call. */ register seL4_Word scno asm("x7") = sys; - asm volatile ( + asm volatile( "svc #0" - : "+r" (msg0), "+r" (msg1), "+r" (msg2), "+r" (msg3), - "+r" (info) + : "+r"(msg0), "+r"(msg1), "+r"(msg2), "+r"(msg3), + "+r"(info) : "r"(scno) ); } @@ -100,9 +100,9 @@ arm_sys_send_null(seL4_Word sys, seL4_Word src, seL4_Word info_arg) /* Perform the system call. */ register seL4_Word scno asm("x7") = sys; - asm volatile ( + asm volatile( "svc #0" - : "+r" (destptr), "+r" (info) + : "+r"(destptr), "+r"(info) : "r"(scno) ); } @@ -121,10 +121,10 @@ arm_sys_recv(seL4_Word sys, seL4_Word src, seL4_Word *out_badge, seL4_Word *out_ /* Perform the system call. */ register seL4_Word scno asm("x7") = sys; - asm volatile ( + asm volatile( "svc #0" - : "=r" (msg0), "=r" (msg1), "=r" (msg2), "=r" (msg3), - "=r" (info), "+r" (src_and_badge) + : "=r"(msg0), "=r"(msg1), "=r"(msg2), "=r"(msg3), + "=r"(info), "+r"(src_and_badge) : "r"(scno) : "memory" ); @@ -150,10 +150,10 @@ arm_sys_send_recv(seL4_Word sys, seL4_Word dest, seL4_Word *out_badge, seL4_Word /* Perform the system call. */ register seL4_Word scno asm("x7") = sys; - asm volatile ( + asm volatile( "svc #0" - : "+r" (msg0), "+r" (msg1), "+r" (msg2), "+r" (msg3), - "+r" (info), "+r" (destptr) + : "+r"(msg0), "+r"(msg1), "+r"(msg2), "+r"(msg3), + "+r"(info), "+r"(destptr) : "r"(scno) : "memory" ); @@ -169,7 +169,7 @@ static inline void arm_sys_null(seL4_Word sys) { register seL4_Word scno asm("x7") = sys; - asm volatile ( + asm volatile( "svc #0" : /* no outputs */ : "r"(scno) diff --git a/libsel4/sel4_arch_include/ia32/sel4/sel4_arch/syscalls.h b/libsel4/sel4_arch_include/ia32/sel4/sel4_arch/syscalls.h index 275794ebf..c671b4174 100644 --- a/libsel4/sel4_arch_include/ia32/sel4/sel4_arch/syscalls.h +++ b/libsel4/sel4_arch_include/ia32/sel4/sel4_arch/syscalls.h @@ -32,7 +32,7 @@ static inline void x86_sys_send(seL4_Word sys, seL4_Word dest, seL4_Word info, seL4_Word mr1, seL4_Word mr2) { - asm volatile ( + asm volatile( "pushl %%ebp \n" "pushl %%ebx \n" "movl %%ecx, %%ebp \n" @@ -43,18 +43,18 @@ x86_sys_send(seL4_Word sys, seL4_Word dest, seL4_Word info, seL4_Word mr1, seL4_ "sysenter \n" "popl %%ebx \n" "popl %%ebp \n" - : "+d" (dest) - : "a" (sys), - "S" (info), - "D" (mr1), - "c" (mr2) + : "+d"(dest) + : "a"(sys), + "S"(info), + "D"(mr1), + "c"(mr2) ); } static inline void x86_sys_reply(seL4_Word sys, seL4_Word info, seL4_Word mr1, seL4_Word mr2) { - asm volatile ( + asm volatile( "pushl %%ebp \n" "pushl %%ebx \n" "movl %%ecx, %%ebp \n" @@ -65,10 +65,10 @@ x86_sys_reply(seL4_Word sys, seL4_Word info, seL4_Word mr1, seL4_Word mr2) "popl %%ebx \n" "popl %%ebp \n" : - : "a" (sys), - "S" (info), - "D" (mr1), - "c" (mr2) + : "a"(sys), + "S"(info), + "D"(mr1), + "c"(mr2) : "%edx" ); } @@ -76,7 +76,7 @@ x86_sys_reply(seL4_Word sys, seL4_Word info, seL4_Word mr1, seL4_Word mr2) static inline void x86_sys_send_null(seL4_Word sys, seL4_Word src, seL4_Word info) { - asm volatile ( + asm volatile( "pushl %%ebp \n" "pushl %%ebx \n" "movl %%esp, %%ecx \n" @@ -86,9 +86,9 @@ x86_sys_send_null(seL4_Word sys, seL4_Word src, seL4_Word info) "sysenter \n" "popl %%ebx \n" "popl %%ebp \n" - : "+d" (src) - : "a" (sys), - "S" (info) + : "+d"(src) + : "a"(sys), + "S"(info) : "%ecx" ); } @@ -96,7 +96,7 @@ x86_sys_send_null(seL4_Word sys, seL4_Word src, seL4_Word info) static inline void x86_sys_recv(seL4_Word sys, seL4_Word src, seL4_Word *out_badge, seL4_Word *out_info, seL4_Word *out_mr1, seL4_Word *out_mr2) { - asm volatile ( + asm volatile( "pushl %%ebp \n" "pushl %%ebx \n" "movl %%esp, %%ecx \n" @@ -109,12 +109,12 @@ x86_sys_recv(seL4_Word sys, seL4_Word src, seL4_Word *out_badge, seL4_Word *out_ "movl %%ebp, %%ecx \n" "popl %%ebp \n" : - "=d" (*out_badge), - "=S" (*out_info), - "=D" (*out_mr1), - "=c" (*out_mr2) - : "a" (sys), - "d" (src) + "=d"(*out_badge), + "=S"(*out_info), + "=D"(*out_mr1), + "=c"(*out_mr2) + : "a"(sys), + "d"(src) : "memory" ); } @@ -136,15 +136,15 @@ x86_sys_send_recv(seL4_Word sys, seL4_Word dest, seL4_Word *out_badge, seL4_Word "movl %%ebp, %%ecx \n" "popl %%ebp \n" : - "=S" (*out_info), - "=D" (*in_out_mr1), - "=c" (*in_out_mr2), - "=d" (*out_badge) - : "a" (sys), - "S" (info), - "D" (*in_out_mr1), - "c" (*in_out_mr2), - "d" (dest) + "=S"(*out_info), + "=D"(*in_out_mr1), + "=c"(*in_out_mr2), + "=d"(*out_badge) + : "a"(sys), + "S"(info), + "D"(*in_out_mr1), + "c"(*in_out_mr2), + "d"(dest) : "memory" ); } @@ -152,7 +152,7 @@ x86_sys_send_recv(seL4_Word sys, seL4_Word dest, seL4_Word *out_badge, seL4_Word static inline void x86_sys_null(seL4_Word sys) { - asm volatile ( + asm volatile( "pushl %%ebp \n" "pushl %%ebx \n" "movl %%esp, %%ecx \n" @@ -162,7 +162,7 @@ x86_sys_null(seL4_Word sys) "popl %%ebx \n" "popl %%ebp \n" : - : "a" (sys) + : "a"(sys) : "%ecx", "%edx" ); } @@ -172,7 +172,7 @@ x86_sys_null(seL4_Word sys) static inline void x86_sys_send(seL4_Word sys, seL4_Word dest, seL4_Word info, seL4_Word mr1, seL4_Word mr2) { - asm volatile ( + asm volatile( "pushl %%ebp \n" "movl %%ecx, %%ebp \n" "movl %%esp, %%ecx \n" @@ -181,11 +181,11 @@ x86_sys_send(seL4_Word sys, seL4_Word dest, seL4_Word info, seL4_Word mr1, seL4_ "sysenter \n" "popl %%ebp \n" : - : "a" (sys), - "b" (dest), - "S" (info), - "D" (mr1), - "c" (mr2) + : "a"(sys), + "b"(dest), + "S"(info), + "D"(mr1), + "c"(mr2) : "%edx" ); } @@ -202,10 +202,10 @@ x86_sys_reply(seL4_Word sys, seL4_Word info, seL4_Word mr1, seL4_Word mr2) "sysenter \n" "popl %%ebp \n" : - : "a" (sys), - "S" (info), - "D" (mr1), - "c" (mr2) + : "a"(sys), + "S"(info), + "D"(mr1), + "c"(mr2) : "%ebx", "%edx" ); } @@ -213,46 +213,46 @@ x86_sys_reply(seL4_Word sys, seL4_Word info, seL4_Word mr1, seL4_Word mr2) static inline void x86_sys_send_null(seL4_Word sys, seL4_Word dest, seL4_Word info) { - asm volatile ( \ - "pushl %%ebp \n" - "movl %%esp, %%ecx \n" - "leal 1f, %%edx \n" - "1: \n" - "sysenter \n" - "popl %%ebp \n" - : - : "a" (sys), - "b" (dest), - "S" (info) - : "%ecx", "edx" - ); + asm volatile(\ + "pushl %%ebp \n" + "movl %%esp, %%ecx \n" + "leal 1f, %%edx \n" + "1: \n" + "sysenter \n" + "popl %%ebp \n" + : + : "a"(sys), + "b"(dest), + "S"(info) + : "%ecx", "edx" + ); } static inline void x86_sys_recv(seL4_Word sys, seL4_Word src, seL4_Word *out_badge, seL4_Word *out_info, seL4_Word *out_mr1, seL4_Word *out_mr2) { - asm volatile ( \ - "pushl %%ebp \n" - "movl %%esp, %%ecx \n" - "leal 1f, %%edx \n" - "1: \n" - "sysenter \n" - "movl %%ebp, %%ecx \n" - "popl %%ebp \n" - : "=b" (*out_badge), - "=S" (*out_info), - "=D" (*out_mr1), - "=c" (*out_mr2) - : "a" (sys), - "b" (src) - : "%edx", "memory" - ); + asm volatile(\ + "pushl %%ebp \n" + "movl %%esp, %%ecx \n" + "leal 1f, %%edx \n" + "1: \n" + "sysenter \n" + "movl %%ebp, %%ecx \n" + "popl %%ebp \n" + : "=b"(*out_badge), + "=S"(*out_info), + "=D"(*out_mr1), + "=c"(*out_mr2) + : "a"(sys), + "b"(src) + : "%edx", "memory" + ); } static inline void x86_sys_send_recv(seL4_Word sys, seL4_Word dest, seL4_Word *out_badge, seL4_Word info, seL4_Word *out_info, seL4_Word *in_out_mr1, seL4_Word *in_out_mr2) { - asm volatile ( + asm volatile( "pushl %%ebp \n" "movl %%ecx, %%ebp \n" "movl %%esp, %%ecx \n" @@ -261,15 +261,15 @@ x86_sys_send_recv(seL4_Word sys, seL4_Word dest, seL4_Word *out_badge, seL4_Word "sysenter \n" "movl %%ebp, %%ecx \n" "popl %%ebp \n" - : "=S" (*out_info), - "=D" (*in_out_mr1), - "=c" (*in_out_mr2), - "=b" (*out_badge) - : "a" (sys), - "S" (info), - "D" (*in_out_mr1), - "c" (*in_out_mr2), - "b" (dest) + : "=S"(*out_info), + "=D"(*in_out_mr1), + "=c"(*in_out_mr2), + "=b"(*out_badge) + : "a"(sys), + "S"(info), + "D"(*in_out_mr1), + "c"(*in_out_mr2), + "b"(dest) : "%edx", "memory" ); } @@ -277,7 +277,7 @@ x86_sys_send_recv(seL4_Word sys, seL4_Word dest, seL4_Word *out_badge, seL4_Word static inline void x86_sys_null(seL4_Word sys) { - asm volatile ( + asm volatile( "pushl %%ebp \n" "movl %%esp, %%ecx \n" "leal 1f, %%edx \n" @@ -285,7 +285,7 @@ x86_sys_null(seL4_Word sys) "sysenter \n" "popl %%ebp \n" : - : "a" (sys) + : "a"(sys) : "%ebx", "%ecx", "%edx" ); } @@ -594,7 +594,7 @@ seL4_DebugNameThread(seL4_CPtr tcb, const char *name) #if defined(CONFIG_DANGEROUS_CODE_INJECTION) LIBSEL4_INLINE_FUNC void -seL4_DebugRun(void (*userfn) (void *), void* userarg) +seL4_DebugRun(void (*userfn)(void *), void* userarg) { x86_sys_send_null(seL4_SysDebugRun, (seL4_Word)userfn, (seL4_Word)userarg); asm volatile("" ::: "%edi", "memory"); diff --git a/libsel4/sel4_arch_include/x86_64/sel4/sel4_arch/syscalls.h b/libsel4/sel4_arch_include/x86_64/sel4/sel4_arch/syscalls.h index c15f22c2d..7a2904016 100644 --- a/libsel4/sel4_arch_include/x86_64/sel4/sel4_arch/syscalls.h +++ b/libsel4/sel4_arch_include/x86_64/sel4/sel4_arch/syscalls.h @@ -428,7 +428,7 @@ seL4_DebugNameThread(seL4_CPtr tcb, const char *name) #if defined(CONFIG_DANGEROUS_CODE_INJECTION) LIBSEL4_INLINE_FUNC void -seL4_DebugRun(void (*userfn) (void *), void* userarg) +seL4_DebugRun(void (*userfn)(void *), void* userarg) { x64_sys_send_null(seL4_SysDebugRun, (seL4_Word)userfn, (seL4_Word)userarg); asm volatile("" ::: "memory"); diff --git a/libsel4/sel4_arch_include/x86_64/sel4/sel4_arch/syscalls_syscall.h b/libsel4/sel4_arch_include/x86_64/sel4/sel4_arch/syscalls_syscall.h index b77a0cb93..b6e8d4d20 100644 --- a/libsel4/sel4_arch_include/x86_64/sel4/sel4_arch/syscalls_syscall.h +++ b/libsel4/sel4_arch_include/x86_64/sel4/sel4_arch/syscalls_syscall.h @@ -25,18 +25,18 @@ x64_sys_send(seL4_Word sys, seL4_Word dest, seL4_Word info, seL4_Word msg0, seL4 register seL4_Word mr2 asm("r9") = msg2; register seL4_Word mr3 asm("r15") = msg3; - asm volatile ( + asm volatile( "movq %%rsp, %%rbx \n" "syscall \n" "movq %%rbx, %%rsp \n" : - : "d" (sys), - "D" (dest), - "S" (info), - "r" (mr0), - "r" (mr1), - "r" (mr2), - "r" (mr3) + : "d"(sys), + "D"(dest), + "S"(info), + "r"(mr0), + "r"(mr1), + "r"(mr2), + "r"(mr3) : "%rcx", "%rbx", "r11" ); } @@ -49,17 +49,17 @@ x64_sys_reply(seL4_Word sys, seL4_Word info, seL4_Word msg0, seL4_Word msg1, seL register seL4_Word mr2 asm("r9") = msg2; register seL4_Word mr3 asm("r15") = msg3; - asm volatile ( + asm volatile( "movq %%rsp, %%rbx \n" "syscall \n" "movq %%rbx, %%rsp \n" : - : "d" (sys), - "S" (info), - "r" (mr0), - "r" (mr1), - "r" (mr2), - "r" (mr3) + : "d"(sys), + "S"(info), + "r"(mr0), + "r"(mr1), + "r"(mr2), + "r"(mr3) : "%rbx", "%rcx", "%r11" ); } @@ -67,14 +67,14 @@ x64_sys_reply(seL4_Word sys, seL4_Word info, seL4_Word msg0, seL4_Word msg1, seL static inline void x64_sys_send_null(seL4_Word sys, seL4_Word dest, seL4_Word info) { - asm volatile ( + asm volatile( "movq %%rsp, %%rbx \n" "syscall \n" "movq %%rbx, %%rsp \n" : - : "d" (sys), - "D" (dest), - "S" (info) + : "d"(sys), + "D"(dest), + "S"(info) : "%rcx", "%rbx", "%r11" ); } @@ -87,18 +87,18 @@ x64_sys_recv(seL4_Word sys, seL4_Word src, seL4_Word *out_badge, seL4_Word *out_ register seL4_Word mr2 asm("r9"); register seL4_Word mr3 asm("r15"); - asm volatile ( + asm volatile( "movq %%rsp, %%rbx \n" "syscall \n" "movq %%rbx, %%rsp \n" - : "=D" (*out_badge), - "=S" (*out_info), - "=r" (mr0), - "=r" (mr1), - "=r" (mr2), - "=r" (mr3) - : "d" (sys), - "D" (src) + : "=D"(*out_badge), + "=S"(*out_info), + "=r"(mr0), + "=r"(mr1), + "=r"(mr2), + "=r"(mr3) + : "d"(sys), + "D"(src) : "%rcx", "%rbx", "r11", "memory" ); *out_mr0 = mr0; @@ -115,23 +115,23 @@ x64_sys_send_recv(seL4_Word sys, seL4_Word dest, seL4_Word *out_dest, seL4_Word register seL4_Word mr2 asm("r9") = *in_out_mr2; register seL4_Word mr3 asm("r15") = *in_out_mr3; - asm volatile ( + asm volatile( "movq %%rsp, %%rbx \n" "syscall \n" "movq %%rbx, %%rsp \n" - : "=S" (*out_info), - "=r" (mr0), - "=r" (mr1), - "=r" (mr2), - "=r" (mr3), - "=D" (*out_dest) - : "d" (sys), - "D" (dest), - "S" (info), - "r" (mr0), - "r" (mr1), - "r" (mr2), - "r" (mr3) + : "=S"(*out_info), + "=r"(mr0), + "=r"(mr1), + "=r"(mr2), + "=r"(mr3), + "=D"(*out_dest) + : "d"(sys), + "D"(dest), + "S"(info), + "r"(mr0), + "r"(mr1), + "r"(mr2), + "r"(mr3) : "%rcx", "%rbx", "r11", "memory" ); *in_out_mr0 = mr0; @@ -143,12 +143,12 @@ x64_sys_send_recv(seL4_Word sys, seL4_Word dest, seL4_Word *out_dest, seL4_Word static inline void x64_sys_null(seL4_Word sys) { - asm volatile ( + asm volatile( "movq %%rsp, %%rbx \n" "syscall \n" "movq %%rbx, %%rsp \n" : - : "d" (sys) + : "d"(sys) : "%rbx", "%rcx", "%rsi", "%rdi", "%r11" ); } diff --git a/libsel4/sel4_arch_include/x86_64/sel4/sel4_arch/syscalls_sysenter.h b/libsel4/sel4_arch_include/x86_64/sel4/sel4_arch/syscalls_sysenter.h index 46e71ae50..7aa3bce10 100644 --- a/libsel4/sel4_arch_include/x86_64/sel4/sel4_arch/syscalls_sysenter.h +++ b/libsel4/sel4_arch_include/x86_64/sel4/sel4_arch/syscalls_sysenter.h @@ -25,19 +25,19 @@ x64_sys_send(seL4_Word sys, seL4_Word dest, seL4_Word info, seL4_Word msg0, seL4 register seL4_Word mr2 asm("r9") = msg2; register seL4_Word mr3 asm("r15") = msg3; - asm volatile ( + asm volatile( "movq %%rsp, %%rcx \n" "leaq 1f, %%rdx \n" "1: \n" "sysenter \n" : - : "a" (sys), - "D" (dest), - "S" (info), - "r" (mr0), - "r" (mr1), - "r" (mr2), - "r" (mr3) + : "a"(sys), + "D"(dest), + "S"(info), + "r"(mr0), + "r"(mr1), + "r"(mr2), + "r"(mr3) : "%rcx", "%rdx" ); } @@ -50,18 +50,18 @@ x64_sys_reply(seL4_Word sys, seL4_Word info, seL4_Word msg0, seL4_Word msg1, seL register seL4_Word mr2 asm("r9") = msg2; register seL4_Word mr3 asm("r15") = msg3; - asm volatile ( + asm volatile( "movq %%rsp, %%rcx \n" "leaq 1f, %%rdx \n" "1: \n" "sysenter \n" : - : "a" (sys), - "S" (info), - "r" (mr0), - "r" (mr1), - "r" (mr2), - "r" (mr3) + : "a"(sys), + "S"(info), + "r"(mr0), + "r"(mr1), + "r"(mr2), + "r"(mr3) : "%rdx", "%rcx" ); } @@ -69,15 +69,15 @@ x64_sys_reply(seL4_Word sys, seL4_Word info, seL4_Word msg0, seL4_Word msg1, seL static inline void x64_sys_send_null(seL4_Word sys, seL4_Word dest, seL4_Word info) { - asm volatile ( + asm volatile( "movq %%rsp, %%rcx \n" "leaq 1f, %%rdx \n" "1: \n" "sysenter \n" : - : "a" (sys), - "D" (dest), - "S" (info) + : "a"(sys), + "D"(dest), + "S"(info) : "%rcx", "%rdx" ); } @@ -90,19 +90,19 @@ x64_sys_recv(seL4_Word sys, seL4_Word src, seL4_Word *out_badge, seL4_Word *out_ register seL4_Word mr2 asm("r9"); register seL4_Word mr3 asm("r15"); - asm volatile ( + asm volatile( "movq %%rsp, %%rcx \n" "leaq 1f, %%rdx \n" "1: \n" "sysenter \n" - : "=D" (*out_badge), - "=S" (*out_info), - "=r" (mr0), - "=r" (mr1), - "=r" (mr2), - "=r" (mr3) - : "a" (sys), - "D" (src) + : "=D"(*out_badge), + "=S"(*out_info), + "=r"(mr0), + "=r"(mr1), + "=r"(mr2), + "=r"(mr3) + : "a"(sys), + "D"(src) : "%rcx", "%rdx", "memory" ); @@ -120,24 +120,24 @@ x64_sys_send_recv(seL4_Word sys, seL4_Word dest, seL4_Word *out_dest, seL4_Word register seL4_Word mr2 asm("r9") = *in_out_mr2; register seL4_Word mr3 asm("r15") = *in_out_mr3; - asm volatile ( + asm volatile( "movq %%rsp, %%rcx \n" "leaq 1f, %%rdx \n" "1: \n" "sysenter \n" - : "=S" (*out_info), - "=r" (mr0), - "=r" (mr1), - "=r" (mr2), - "=r" (mr3), - "=D" (*out_dest) - : "a" (sys), - "D" (dest), - "S" (info), - "r" (mr0), - "r" (mr1), - "r" (mr2), - "r" (mr3) + : "=S"(*out_info), + "=r"(mr0), + "=r"(mr1), + "=r"(mr2), + "=r"(mr3), + "=D"(*out_dest) + : "a"(sys), + "D"(dest), + "S"(info), + "r"(mr0), + "r"(mr1), + "r"(mr2), + "r"(mr3) : "%rcx", "%rdx", "memory" ); @@ -150,13 +150,13 @@ x64_sys_send_recv(seL4_Word sys, seL4_Word dest, seL4_Word *out_dest, seL4_Word static inline void x64_sys_null(seL4_Word sys) { - asm volatile ( + asm volatile( "movq %%rsp, %%rcx \n" "leaq 1f, %%rdx \n" "1: \n" "sysenter \n" : - : "a" (sys) + : "a"(sys) : "%rbx", "%rcx", "%rdx", "%rsi", "%rdi", "memory" ); } diff --git a/src/api/syscall.c b/src/api/syscall.c index 7c9539599..99e0d368f 100644 --- a/src/api/syscall.c +++ b/src/api/syscall.c @@ -125,7 +125,7 @@ handleUnknownSyscall(word_t w) #ifdef CONFIG_DANGEROUS_CODE_INJECTION if (w == SysDebugRun) { - ((void (*) (void *))getRegister(NODE_STATE(ksCurThread), capRegister))((void*)getRegister(NODE_STATE(ksCurThread), msgInfoRegister)); + ((void (*)(void *))getRegister(NODE_STATE(ksCurThread), capRegister))((void*)getRegister(NODE_STATE(ksCurThread), msgInfoRegister)); return EXCEPTION_NONE; } #endif diff --git a/src/arch/arm/32/c_traps.c b/src/arch/arm/32/c_traps.c index 902424f55..8fc8ad2bd 100644 --- a/src/arch/arm/32/c_traps.c +++ b/src/arch/arm/32/c_traps.c @@ -63,7 +63,7 @@ void VISIBLE NORETURN restore_user_context(void) /* Return to user */ "eret" : /* no output */ - : [cur_thread_reg] "r" (cur_thread_reg) + : [cur_thread_reg] "r"(cur_thread_reg) : "memory" ); } else { @@ -71,7 +71,7 @@ void VISIBLE NORETURN restore_user_context(void) ldmdb sp, {r0-lr}^ \n\ rfeia sp" : /* no output */ - : [cur_thread] "r" (cur_thread_reg + LR_svc * sizeof(word_t)) + : [cur_thread] "r"(cur_thread_reg + LR_svc * sizeof(word_t)) ); } UNREACHABLE(); diff --git a/src/arch/arm/32/kernel/vspace.c b/src/arch/arm/32/kernel/vspace.c index fce95a275..e683c287b 100644 --- a/src/arch/arm/32/kernel/vspace.c +++ b/src/arch/arm/32/kernel/vspace.c @@ -719,7 +719,7 @@ lookupIPCBuffer(bool_t isReceiver, tcb_t *thread) cap_get_capType(bufferCap) != cap_frame_cap)) { return NULL; } - if (unlikely (generic_frame_cap_get_capFIsDevice(bufferCap))) { + if (unlikely(generic_frame_cap_get_capFIsDevice(bufferCap))) { return NULL; } @@ -1158,7 +1158,7 @@ pageTableMapped(asid_t asid, vptr_t vaddr, pte_t* pt) pde = find_ret.pd[pdIndex]; if (likely(pde_get_pdeType(pde) == pde_pde_coarse - && ptrFromPAddr (pde_pde_coarse_get_address(pde)) == pt)) { + && ptrFromPAddr(pde_pde_coarse_get_address(pde)) == pt)) { return find_ret.pd; } else { return NULL; @@ -1281,7 +1281,7 @@ unmapPageTable(asid_t asid, vptr_t vaddr, pte_t* pt) pde_t *pd, *pdSlot; unsigned int pdIndex; - pd = pageTableMapped (asid, vaddr, pt); + pd = pageTableMapped(asid, vaddr, pt); if (likely(pd != NULL)) { pdIndex = vaddr >> (PT_INDEX_BITS + PAGE_BITS); @@ -2324,7 +2324,7 @@ decodeARMPageTableInvocation(word_t invLabel, word_t length, return EXCEPTION_SYSCALL_ERROR; } setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); - return performPageTableInvocationUnmap (cap, cte); + return performPageTableInvocationUnmap(cap, cte); } if (unlikely(invLabel != ARMPageTableMap)) { @@ -2825,13 +2825,13 @@ decodeARMMMUInvocation(word_t invLabel, word_t length, cptr_t cptr, cap, excaps, buffer); case cap_page_table_cap: - return decodeARMPageTableInvocation (invLabel, length, cte, - cap, excaps, buffer); + return decodeARMPageTableInvocation(invLabel, length, cte, + cap, excaps, buffer); case cap_small_frame_cap: case cap_frame_cap: - return decodeARMFrameInvocation (invLabel, length, cte, - cap, excaps, buffer); + return decodeARMFrameInvocation(invLabel, length, cte, + cap, excaps, buffer); case cap_asid_control_cap: { word_t i; diff --git a/src/arch/arm/32/machine/capdl.c b/src/arch/arm/32/machine/capdl.c index 8d44e4bf9..555377726 100644 --- a/src/arch/arm/32/machine/capdl.c +++ b/src/arch/arm/32/machine/capdl.c @@ -103,7 +103,7 @@ static int getArg32(unsigned int *res) if (getDecodedChar(&b4)) { return 1; } - *res = (b1 << 24 ) | (b2 << 16) | (b3 << 8) | b4; + *res = (b1 << 24) | (b2 << 16) | (b3 << 8) | b4; return 0; } diff --git a/src/arch/arm/32/machine/fpu.c b/src/arch/arm/32/machine/fpu.c index 190c31982..59955dc59 100644 --- a/src/arch/arm/32/machine/fpu.c +++ b/src/arch/arm/32/machine/fpu.c @@ -54,11 +54,11 @@ bool_t isFPUD32SupportedCached; BOOT_CODE static inline bool_t isFPUD32Supported(void) { word_t mvfr0; - asm volatile (".word 0xeef73a10 \n" /* vmrs r3, mvfr0 */ - "mov %0, r3 \n" - : "=r" (mvfr0) - : - : "r3"); + asm volatile(".word 0xeef73a10 \n" /* vmrs r3, mvfr0 */ + "mov %0, r3 \n" + : "=r"(mvfr0) + : + : "r3"); return ((mvfr0 & 0xf) == 2); } @@ -107,10 +107,10 @@ fpsimd_HWCapTest(void) } /* Check of this platform supports HW FP instructions */ - asm volatile (".word 0xeef00a10 \n" /* vmrs r0, fpsid */ - "mov %0, r0 \n" - : "=r" (fpsid) : - : "r0"); + asm volatile(".word 0xeef00a10 \n" /* vmrs r0, fpsid */ + "mov %0, r0 \n" + : "=r"(fpsid) : + : "r0"); if (fpsid & BIT(FPSID_SW_BIT)) { return false; } diff --git a/src/arch/arm/32/object/objecttype.c b/src/arch/arm/32/object/objecttype.c index f3f3c40c7..818d7b769 100644 --- a/src/arch/arm/32/object/objecttype.c +++ b/src/arch/arm/32/object/objecttype.c @@ -268,8 +268,8 @@ Arch_sameRegionAs(cap_t cap_a, cap_t cap_b) word_t botA, botB, topA, topB; botA = generic_frame_cap_get_capFBasePtr(cap_a); botB = generic_frame_cap_get_capFBasePtr(cap_b); - topA = botA + MASK (pageBitsForSize(generic_frame_cap_get_capFSize(cap_a))); - topB = botB + MASK (pageBitsForSize(generic_frame_cap_get_capFSize(cap_b))) ; + topA = botA + MASK(pageBitsForSize(generic_frame_cap_get_capFSize(cap_a))); + topB = botB + MASK(pageBitsForSize(generic_frame_cap_get_capFSize(cap_b))) ; return ((botA <= botB) && (topA >= topB) && (botB <= topB)); } break; diff --git a/src/arch/arm/64/c_traps.c b/src/arch/arm/64/c_traps.c index 265d000ff..682f5fd33 100644 --- a/src/arch/arm/64/c_traps.c +++ b/src/arch/arm/64/c_traps.c @@ -67,8 +67,8 @@ void VISIBLE NORETURN restore_user_context(void) "ldr x30, [sp, %[LR]] \n" "eret" : - : "r" (NODE_STATE(ksCurThread)->tcbArch.tcbContext.registers), - [SP_EL0] "i" (PT_SP_EL0), [SPSR_EL1] "i" (PT_SPSR_EL1), [LR] "i" (PT_LR) + : "r"(NODE_STATE(ksCurThread)->tcbArch.tcbContext.registers), + [SP_EL0] "i"(PT_SP_EL0), [SPSR_EL1] "i"(PT_SPSR_EL1), [LR] "i"(PT_LR) : "memory" ); UNREACHABLE(); diff --git a/src/arch/arm/64/kernel/vspace.c b/src/arch/arm/64/kernel/vspace.c index 0170ba9ac..f08461036 100644 --- a/src/arch/arm/64/kernel/vspace.c +++ b/src/arch/arm/64/kernel/vspace.c @@ -2380,12 +2380,12 @@ decodeARMMMUInvocation(word_t invLabel, word_t length, cptr_t cptr, cap, extraCaps, buffer); case cap_page_table_cap: - return decodeARMPageTableInvocation (invLabel, length, cte, - cap, extraCaps, buffer); + return decodeARMPageTableInvocation(invLabel, length, cte, + cap, extraCaps, buffer); case cap_frame_cap: - return decodeARMFrameInvocation (invLabel, length, cte, - cap, extraCaps, buffer); + return decodeARMFrameInvocation(invLabel, length, cte, + cap, extraCaps, buffer); case cap_asid_control_cap: { unsigned int i; diff --git a/src/arch/arm/armv/armv6/benchmark.c b/src/arch/arm/armv/armv6/benchmark.c index 6c7b62870..ccb15933c 100644 --- a/src/arch/arm/armv/armv6/benchmark.c +++ b/src/arch/arm/armv/armv6/benchmark.c @@ -26,19 +26,19 @@ armv_init_ccnt(void) #ifdef CONFIG_ARM_ENABLE_PMU_OVERFLOW_INTERRUPT /* Enable generating interrupts on overflows */ pmcr = BIT(6); - asm volatile ( + asm volatile( "mcr p15, 0, %0, c15, c12, 0\n" : - : "r" (pmcr) + : "r"(pmcr) ); #endif /* CONFIG_ARM_ENABLE_PMU_OVERFLOW_INTERRUPT */ /* enable them */ pmcr |= BIT(2) | BIT(0); - asm volatile ( + asm volatile( "mcr p15, 0, %0, c15, c12, 0\n" : /* no outputs */ - : "r" (pmcr) + : "r"(pmcr) ); } diff --git a/src/arch/arm/armv/armv7-a/benchmark.c b/src/arch/arm/armv/armv7-a/benchmark.c index d31db30cb..cba7c4329 100644 --- a/src/arch/arm/armv/armv7-a/benchmark.c +++ b/src/arch/arm/armv/armv7-a/benchmark.c @@ -26,35 +26,35 @@ armv_init_ccnt(void) #ifdef CONFIG_ARM_ENABLE_PMU_OVERFLOW_INTERRUPT /* Enable generating interrupts on overflows */ val = BIT(31); - asm volatile ( + asm volatile( "mcr p15, 0, %0, c9, c14, 1\n" : - : "r" (val) + : "r"(val) ); #endif /* CONFIG_ARM_ENABLE_PMU_OVERFLOW_INTERRUPT */ /* enable them */ val = 1; - asm volatile ( + asm volatile( "mcr p15, 0, %0, c9, c14, 0\n" : - : "r" (val) + : "r"(val) ); /* reset to 0 and make available at user level */ pmcr = (1 << 2) | 1; - asm volatile ( + asm volatile( "mcr p15, 0, %0, c9, c12, 0\n" : /* no outputs */ - : "r" (pmcr) + : "r"(pmcr) ); /* turn the cycle counter on */ val = BIT(31); - asm volatile ( + asm volatile( "mcr p15, 0, %0, c9, c12, 1\n" : /* no outputs */ - : "r" (val) + : "r"(val) ); } diff --git a/src/arch/arm/kernel/boot.c b/src/arch/arm/kernel/boot.c index 2c5bef5e0..abb1a8867 100644 --- a/src/arch/arm/kernel/boot.c +++ b/src/arch/arm/kernel/boot.c @@ -591,7 +591,7 @@ init_kernel( #endif /* ENABLE_SMP_SUPPORT */ if (!result) { - fail ("Kernel init failed for some reason :("); + fail("Kernel init failed for some reason :("); } schedule(); diff --git a/src/arch/arm/machine/l2c_310.c b/src/arch/arm/machine/l2c_310.c index bd06ec971..19a55f562 100644 --- a/src/arch/arm/machine/l2c_310.c +++ b/src/arch/arm/machine/l2c_310.c @@ -234,14 +234,14 @@ volatile struct l2cc_map * const l2cc BOOT_CODE static void mshield_smc(uint32_t callid, uint32_t arg1, uint32_t arg2) { - register uint32_t _arg1 asm ("r0") = arg1; - register uint32_t _arg2 asm ("r1") = arg2; - register uint32_t _callid asm ("r12") = callid; - asm volatile ("push {r2-r12, lr}\n" - "dsb\n" - "smc #0\n" - "pop {r2-r12, lr}" - :: "r"(_callid), "r"(_arg1), "r"(_arg2)); + register uint32_t _arg1 asm("r0") = arg1; + register uint32_t _arg2 asm("r1") = arg2; + register uint32_t _callid asm("r12") = callid; + asm volatile("push {r2-r12, lr}\n" + "dsb\n" + "smc #0\n" + "pop {r2-r12, lr}" + :: "r"(_callid), "r"(_arg1), "r"(_arg2)); } #endif /* TI_MSHIELD */ @@ -303,10 +303,10 @@ initL2Cache(void) /* 2: Invalidate by way. */ l2cc->maintenance.inv_way = 0xffff; - while ( l2cc->maintenance.inv_way & 0xffff ); + while (l2cc->maintenance.inv_way & 0xffff); /* 3: write to lockdown D & I reg9 if required */ - if ( (l2cc->id.cache_type & PL310_LOCKDOWN_BY_MASK) == PL310_LOCKDOWN_BY_MASTER) { + if ((l2cc->id.cache_type & PL310_LOCKDOWN_BY_MASK) == PL310_LOCKDOWN_BY_MASTER) { /* disable lockdown */ l2cc->lockdown.d_lockdown0 = 0; l2cc->lockdown.i_lockdown0 = 0; @@ -325,7 +325,7 @@ initL2Cache(void) l2cc->lockdown.d_lockdown7 = 0; l2cc->lockdown.i_lockdown7 = 0; } - if ( (l2cc->id.cache_type & PL310_LOCKDOWN_BY_MASK) == PL310_LOCKDOWN_BY_LINE) { + if ((l2cc->id.cache_type & PL310_LOCKDOWN_BY_MASK) == PL310_LOCKDOWN_BY_LINE) { /* disable lockdown */ l2cc->lockdown.lock_line_eng = 0; } @@ -377,7 +377,7 @@ void plat_cleanCache(void) #ifndef CONFIG_DEBUG_DISABLE_L2_CACHE /* Clean by way. */ l2cc->maintenance.clean_way = 0xffff; - while ( l2cc->maintenance.clean_way & 0xffff ); + while (l2cc->maintenance.clean_way & 0xffff); L2_cacheSync(); #endif /* !CONFIG_DEBUG_DISABLE_L2_CACHE */ } diff --git a/src/arch/arm/object/iospace.c b/src/arch/arm/object/iospace.c index 325fcfaf2..697089458 100644 --- a/src/arch/arm/object/iospace.c +++ b/src/arch/arm/object/iospace.c @@ -181,7 +181,7 @@ decodeARMIOPTInvocation( return EXCEPTION_SYSCALL_ERROR; } - if (invLabel != ARMIOPageTableMap ) { + if (invLabel != ARMIOPageTableMap) { userError("IOPTInvocation: Invalid operation."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; diff --git a/src/arch/arm/object/vcpu.c b/src/arch/arm/object/vcpu.c index fb9ee5644..f502e24b7 100644 --- a/src/arch/arm/object/vcpu.c +++ b/src/arch/arm/object/vcpu.c @@ -733,7 +733,7 @@ exception_t decodeVCPUSetTCB(cap_t cap, extra_caps_t extraCaps) { cap_t tcbCap; - if ( extraCaps.excaprefs[0] == NULL) { + if (extraCaps.excaprefs[0] == NULL) { userError("VCPU SetTCB: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; diff --git a/src/arch/riscv/c_traps.c b/src/arch/riscv/c_traps.c index 01ef63a2a..19fb869ca 100644 --- a/src/arch/riscv/c_traps.c +++ b/src/arch/riscv/c_traps.c @@ -87,8 +87,8 @@ void VISIBLE NORETURN restore_user_context(void) LOAD_S " t0, (4*%[REGSIZE])(t0) \n" "sret" : /* no output */ - : [REGSIZE] "i" (sizeof(word_t)), - [cur_thread] "r" (cur_thread_reg) + : [REGSIZE] "i"(sizeof(word_t)), + [cur_thread] "r"(cur_thread_reg) : "memory" ); diff --git a/src/arch/riscv/kernel/boot.c b/src/arch/riscv/kernel/boot.c index fcf4c6d7d..db847f466 100644 --- a/src/arch/riscv/kernel/boot.c +++ b/src/arch/riscv/kernel/boot.c @@ -234,8 +234,8 @@ try_init_kernel( /* convert from physical addresses to userland vptrs */ v_region_t ui_v_reg; v_region_t it_v_reg; - ui_v_reg.start = (uint32_t) (ui_p_reg_start - pv_offset); - ui_v_reg.end = (uint32_t) (ui_p_reg_end - pv_offset); + ui_v_reg.start = (uint32_t)(ui_p_reg_start - pv_offset); + ui_v_reg.end = (uint32_t)(ui_p_reg_end - pv_offset); ipcbuf_vptr = ui_v_reg.end; bi_frame_vptr = ipcbuf_vptr + BIT(PAGE_BITS); @@ -375,7 +375,7 @@ init_kernel( v_entry); if (!result) { - fail ("Kernel init failed for some reason :("); + fail("Kernel init failed for some reason :("); } schedule(); diff --git a/src/arch/riscv/kernel/vspace.c b/src/arch/riscv/kernel/vspace.c index 19dfe6ea9..da92a3676 100644 --- a/src/arch/riscv/kernel/vspace.c +++ b/src/arch/riscv/kernel/vspace.c @@ -696,7 +696,7 @@ decodeRISCVPageTableInvocation(word_t label, unsigned int length, return EXCEPTION_SYSCALL_ERROR; } setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); - return performPageTableInvocationUnmap (cap, cte); + return performPageTableInvocationUnmap(cap, cte); } if (unlikely((label != RISCVPageTableMap))) { @@ -1237,8 +1237,8 @@ Arch_userStackTrace(tcb_t *tptr) word_t address = sp + (i * sizeof(word_t)); lookupPTSlot_ret_t ret = lookupPTSlot(vspace_root, address); if (pte_ptr_get_valid(ret.ptSlot) && !isPTEPageTable(ret.ptSlot)) { - pptr_t pptr = (pptr_t) (getPPtrFromHWPTE(ret.ptSlot)); - word_t *value = (word_t*) ((word_t)pptr + (address & MASK(ret.ptBitsLeft))); + pptr_t pptr = (pptr_t)(getPPtrFromHWPTE(ret.ptSlot)); + word_t *value = (word_t*)((word_t)pptr + (address & MASK(ret.ptBitsLeft))); printf("0x%lx: 0x%lx\n", (long) address, (long) *value); } else { printf("0x%lx: INVALID\n", (long) address); diff --git a/src/arch/riscv/object/objecttype.c b/src/arch/riscv/object/objecttype.c index e7cb5db6c..27d9ef94a 100644 --- a/src/arch/riscv/object/objecttype.c +++ b/src/arch/riscv/object/objecttype.c @@ -136,8 +136,8 @@ Arch_sameRegionAs(cap_t cap_a, cap_t cap_b) word_t botA, botB, topA, topB; botA = cap_frame_cap_get_capFBasePtr(cap_a); botB = cap_frame_cap_get_capFBasePtr(cap_b); - topA = botA + MASK (pageBitsForSize(cap_frame_cap_get_capFSize(cap_a))); - topB = botB + MASK (pageBitsForSize(cap_frame_cap_get_capFSize(cap_b))) ; + topA = botA + MASK(pageBitsForSize(cap_frame_cap_get_capFSize(cap_a))); + topB = botB + MASK(pageBitsForSize(cap_frame_cap_get_capFSize(cap_b))) ; return ((botA <= botB) && (topA >= topB) && (botB <= topB)); } break; diff --git a/src/arch/x86/64/c_traps.c b/src/arch/x86/64/c_traps.c index 90c585964..fac39306f 100644 --- a/src/arch/x86/64/c_traps.c +++ b/src/arch/x86/64/c_traps.c @@ -270,7 +270,7 @@ void VISIBLE NORETURN restore_user_context(void) #if defined(ENABLE_SMP_SUPPORT) && defined(CONFIG_KERNEL_SKIM_WINDOW) "r"(user_cr3_r11), #endif - [IF] "i" (FLAGS_IF) + [IF] "i"(FLAGS_IF) // Clobber memory so the compiler is forced to complete all stores // before running this assembler : "memory" @@ -314,7 +314,7 @@ void VISIBLE NORETURN restore_user_context(void) : : "r"(&cur_thread->tcbArch.tcbContext.registers[RDI]) #if defined(ENABLE_SMP_SUPPORT) && defined(CONFIG_KERNEL_SKIM_WINDOW) - , "c" (user_cr3) + , "c"(user_cr3) #endif // Clobber memory so the compiler is forced to complete all stores // before running this assembler @@ -390,8 +390,8 @@ void VISIBLE NORETURN restore_user_context(void) : : "r"(&cur_thread->tcbArch.tcbContext.registers[RDI]) #if defined(ENABLE_SMP_SUPPORT) && defined(CONFIG_KERNEL_SKIM_WINDOW) - , "c" (user_cr3) - , [scratch_offset] "i" (nodeSkimScratchOffset) + , "c"(user_cr3) + , [scratch_offset] "i"(nodeSkimScratchOffset) #endif // Clobber memory so the compiler is forced to complete all stores // before running this assembler diff --git a/src/arch/x86/64/kernel/thread.c b/src/arch/x86/64/kernel/thread.c index ed3d36327..9bc9c3d55 100644 --- a/src/arch/x86/64/kernel/thread.c +++ b/src/arch/x86/64/kernel/thread.c @@ -25,8 +25,8 @@ Arch_switchToThread(tcb_t* tcb) #ifdef ENABLE_SMP_SUPPORT asm volatile("movq %[value], %%gs:%c[offset]" : - : [value] "r" (&tcb->tcbArch.tcbContext.registers[Error + 1]), - [offset] "i" (OFFSETOF(nodeInfo_t, currentThreadUserContext))); + : [value] "r"(&tcb->tcbArch.tcbContext.registers[Error + 1]), + [offset] "i"(OFFSETOF(nodeInfo_t, currentThreadUserContext))); #endif if (config_set(CONFIG_KERNEL_X86_IBPB_ON_CONTEXT_SWITCH)) { x86_ibpb(); @@ -62,7 +62,7 @@ Arch_switchToIdleThread(void) asm volatile("movq %[value], %%gs:%c[offset]" : : [value] "r"(&tcb->tcbArch.tcbContext.registers[Error + 1]), - [offset] "i" (OFFSETOF(nodeInfo_t, currentThreadUserContext))); + [offset] "i"(OFFSETOF(nodeInfo_t, currentThreadUserContext))); #endif } diff --git a/src/arch/x86/64/kernel/vspace.c b/src/arch/x86/64/kernel/vspace.c index e4ce5b6b2..1015a51b2 100644 --- a/src/arch/x86/64/kernel/vspace.c +++ b/src/arch/x86/64/kernel/vspace.c @@ -523,7 +523,7 @@ init_dtrs(void) x64_install_gdt(&gdt_idt_ptr); swapgs(); - gdt_idt_ptr.limit = (sizeof(idt_entry_t) * (int_max + 1 )) - 1; + gdt_idt_ptr.limit = (sizeof(idt_entry_t) * (int_max + 1)) - 1; gdt_idt_ptr.base = (uint64_t)x86KSGlobalState[CURRENT_CPU_INDEX()].x86KSidt; x64_install_idt(&gdt_idt_ptr); @@ -1138,7 +1138,7 @@ unmapPageDirectory(asid_t asid, vptr_t vaddr, pde_t *pd) } /* check if the PDPT has the PD */ - if (! (pdpte_ptr_get_page_size(lu_ret.pdptSlot) == pdpte_pdpte_pd && + if (!(pdpte_ptr_get_page_size(lu_ret.pdptSlot) == pdpte_pdpte_pd && pdpte_pdpte_pd_ptr_get_present(lu_ret.pdptSlot) && (pdpte_pdpte_pd_ptr_get_pd_base_address(lu_ret.pdptSlot) == pptr_to_paddr(pd)))) { return; @@ -1313,7 +1313,7 @@ static void unmapPDPT(asid_t asid, vptr_t vaddr, pdpte_t *pdpt) pml4Slot = lookupPML4Slot(find_ret.vspace_root, vaddr); /* check if the PML4 has the PDPT */ - if (! (pml4e_ptr_get_present(pml4Slot) && + if (!(pml4e_ptr_get_present(pml4Slot) && pml4e_ptr_get_pdpt_base_address(pml4Slot) == pptr_to_paddr(pdpt))) { return; } @@ -1497,10 +1497,10 @@ bool_t modeUnmapPage(vm_page_size_t page_size, vspace_root_t *vroot, vptr_t vadd pdpte = pdpt_ret.pdptSlot; - if (! (pdpte_ptr_get_page_size(pdpte) == pdpte_pdpte_1g + if (!(pdpte_ptr_get_page_size(pdpte) == pdpte_pdpte_1g && pdpte_pdpte_1g_ptr_get_present(pdpte) - && (pdpte_pdpte_1g_ptr_get_page_base_address(pdpte) - == pptr_to_paddr(pptr)))) { + && (pdpte_pdpte_1g_ptr_get_page_base_address(pdpte) + == pptr_to_paddr(pptr)))) { return false; } diff --git a/src/arch/x86/kernel/boot_sys.c b/src/arch/x86/kernel/boot_sys.c index 167bc849c..d95a9d38a 100644 --- a/src/arch/x86/kernel/boot_sys.c +++ b/src/arch/x86/kernel/boot_sys.c @@ -286,19 +286,19 @@ is_compiled_for_microarchitecture(void) word_t microarch_generation = 0; x86_cpu_identity_t *model_info = x86_cpuid_get_model_info(); - if (config_set(CONFIG_ARCH_X86_SKYLAKE) ) { + if (config_set(CONFIG_ARCH_X86_SKYLAKE)) { microarch_generation = 7; - } else if (config_set(CONFIG_ARCH_X86_BROADWELL) ) { + } else if (config_set(CONFIG_ARCH_X86_BROADWELL)) { microarch_generation = 6; - } else if (config_set(CONFIG_ARCH_X86_HASWELL) ) { + } else if (config_set(CONFIG_ARCH_X86_HASWELL)) { microarch_generation = 5; - } else if (config_set(CONFIG_ARCH_X86_IVY) ) { + } else if (config_set(CONFIG_ARCH_X86_IVY)) { microarch_generation = 4; - } else if (config_set(CONFIG_ARCH_X86_SANDY) ) { + } else if (config_set(CONFIG_ARCH_X86_SANDY)) { microarch_generation = 3; - } else if (config_set(CONFIG_ARCH_X86_WESTMERE) ) { + } else if (config_set(CONFIG_ARCH_X86_WESTMERE)) { microarch_generation = 2; - } else if (config_set(CONFIG_ARCH_X86_NEHALEM) ) { + } else if (config_set(CONFIG_ARCH_X86_NEHALEM)) { microarch_generation = 1; } @@ -576,7 +576,7 @@ try_boot_sys_mbi1( modules[i].start, modules[i].end, modules[i].end - modules[i].start, - (char *) (long)modules[i].name + (char *)(long)modules[i].name ); if ((sword_t)(modules[i].end - modules[i].start) <= 0) { printf("Invalid boot module size! Possible cause: boot module file not found by QEMU\n"); diff --git a/src/arch/x86/kernel/vspace.c b/src/arch/x86/kernel/vspace.c index 28298f802..10a98edec 100644 --- a/src/arch/x86/kernel/vspace.c +++ b/src/arch/x86/kernel/vspace.c @@ -158,7 +158,7 @@ BOOT_CODE bool_t map_kernel_window_devices(pte_t *pt, uint32_t num_ioapic, paddr return false; } pte = x86_make_device_pte(phys); - assert(idx == ( (PPTR_IOAPIC_START + i * BIT(PAGE_BITS)) & MASK(LARGE_PAGE_BITS)) >> PAGE_BITS); + assert(idx == ((PPTR_IOAPIC_START + i * BIT(PAGE_BITS)) & MASK(LARGE_PAGE_BITS)) >> PAGE_BITS); pt[idx] = pte; idx++; if (idx == BIT(PT_INDEX_BITS)) { @@ -168,7 +168,7 @@ BOOT_CODE bool_t map_kernel_window_devices(pte_t *pt, uint32_t num_ioapic, paddr /* put in null mappings for any extra IOAPICs */ for (; i < CONFIG_MAX_NUM_IOAPIC; i++) { pte = x86_make_empty_pte(); - assert(idx == ( (PPTR_IOAPIC_START + i * BIT(PAGE_BITS)) & MASK(LARGE_PAGE_BITS)) >> PAGE_BITS); + assert(idx == ((PPTR_IOAPIC_START + i * BIT(PAGE_BITS)) & MASK(LARGE_PAGE_BITS)) >> PAGE_BITS); pt[idx] = pte; idx++; } @@ -507,7 +507,7 @@ init_pat_msr(void) x86_pat_msr_t pat_msr; /* First verify PAT is supported by the machine. * See section 11.12.1 of Volume 3 of the Intel manual */ - if ( (x86_cpuid_edx(0x1, 0x0) & BIT(16)) == 0) { + if ((x86_cpuid_edx(0x1, 0x0) & BIT(16)) == 0) { printf("PAT support not found\n"); return false; } @@ -726,7 +726,7 @@ void unmapPage(vm_page_size_t page_size, asid_t asid, vptr_t vptr, void *pptr) if (lu_ret.status != EXCEPTION_NONE) { return; } - if (! (pte_ptr_get_present(lu_ret.ptSlot) + if (!(pte_ptr_get_present(lu_ret.ptSlot) && (pte_ptr_get_page_base_address(lu_ret.ptSlot) == pptr_to_paddr(pptr)))) { return; @@ -740,7 +740,7 @@ void unmapPage(vm_page_size_t page_size, asid_t asid, vptr_t vptr, void *pptr) return; } pde = pd_ret.pdSlot; - if (! (pde_ptr_get_page_size(pde) == pde_pde_large + if (!(pde_ptr_get_page_size(pde) == pde_pde_large && pde_pde_large_ptr_get_present(pde) && (pde_pde_large_ptr_get_page_base_address(pde) == pptr_to_paddr(pptr)))) { @@ -780,7 +780,7 @@ void unmapPageTable(asid_t asid, vptr_t vaddr, pte_t* pt) } /* check if the PD actually refers to the PT */ - if (! (pde_ptr_get_page_size(lu_ret.pdSlot) == pde_pde_pt && + if (!(pde_ptr_get_page_size(lu_ret.pdSlot) == pde_pde_pt && pde_pde_pt_ptr_get_present(lu_ret.pdSlot) && (pde_pde_pt_ptr_get_pt_base_address(lu_ret.pdSlot) == pptr_to_paddr(pt)))) { return; @@ -1292,7 +1292,7 @@ decodeX86PageTableInvocation( return performX86PageTableInvocationUnmap(cap, cte); } - if (invLabel != X86PageTableMap ) { + if (invLabel != X86PageTableMap) { userError("X86PageTable: Illegal operation."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; diff --git a/src/arch/x86/machine/capdl.c b/src/arch/x86/machine/capdl.c index f0c333d17..2832cf3eb 100644 --- a/src/arch/x86/machine/capdl.c +++ b/src/arch/x86/machine/capdl.c @@ -104,7 +104,7 @@ static void sendWord(unsigned long word) { unsigned long i; for (i = 0; i < sizeof(unsigned long); i++) { - putEncodedChar( (word >> (i * 8)) & 0xff); + putEncodedChar((word >> (i * 8)) & 0xff); } } diff --git a/src/arch/x86/machine/hardware.c b/src/arch/x86/machine/hardware.c index 8e705b62e..25155b651 100644 --- a/src/arch/x86/machine/hardware.c +++ b/src/arch/x86/machine/hardware.c @@ -21,7 +21,7 @@ BOOT_CODE void init_sysenter_msrs(void) { - x86_wrmsr(IA32_SYSENTER_CS_MSR, (uint64_t)(word_t)SEL_CS_0); + x86_wrmsr(IA32_SYSENTER_CS_MSR, (uint64_t)(word_t)SEL_CS_0); x86_wrmsr(IA32_SYSENTER_EIP_MSR, (uint64_t)(word_t)&handle_syscall); if (config_set(CONFIG_ARCH_IA32) && !config_set(CONFIG_HARDWARE_DEBUG_API)) { /* manually add 4 bytes to x86KStss so that it is valid for both diff --git a/src/arch/x86/object/ioport.c b/src/arch/x86/object/ioport.c index 6663ce80d..7c6a4d807 100644 --- a/src/arch/x86/object/ioport.c +++ b/src/arch/x86/object/ioport.c @@ -162,7 +162,7 @@ decodeX86PortControlInvocation( } lu_ret = lookupTargetSlot(cnodeCap, index, depth); - if (lu_ret.status != EXCEPTION_NONE ) { + if (lu_ret.status != EXCEPTION_NONE) { userError("Target slot for new IO Port cap invalid: cap %lu.", getExtraCPtr(buffer, 0)); return lu_ret.status; } diff --git a/src/arch/x86/object/iospace.c b/src/arch/x86/object/iospace.c index 827402ca5..ced2ea5ef 100644 --- a/src/arch/x86/object/iospace.c +++ b/src/arch/x86/object/iospace.c @@ -200,7 +200,7 @@ decodeX86IOPTInvocation( return performX86IOPTInvocationUnmap(cap, slot); } - if (invLabel != X86IOPageTableMap ) { + if (invLabel != X86IOPageTableMap) { userError("X86IOPageTable: Illegal operation."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; @@ -444,7 +444,7 @@ void deleteIOPageTable(cap_t io_pt_cap) flushCacheRange(vtd_context_slot, VTD_CTE_SIZE_BITS); } else { io_address = cap_io_page_table_cap_get_capIOPTMappedAddress(io_pt_cap); - lu_ret = lookupIOPTSlot_resolve_levels(vtd_pte, io_address >> PAGE_BITS, level - 1, level - 1 ); + lu_ret = lookupIOPTSlot_resolve_levels(vtd_pte, io_address >> PAGE_BITS, level - 1, level - 1); /* if we have been overmapped or something */ if (lu_ret.status != EXCEPTION_NONE || lu_ret.level != 0) { diff --git a/src/arch/x86/object/objecttype.c b/src/arch/x86/object/objecttype.c index e08fe4871..67c53a5fb 100644 --- a/src/arch/x86/object/objecttype.c +++ b/src/arch/x86/object/objecttype.c @@ -301,8 +301,8 @@ bool_t CONST Arch_sameRegionAs(cap_t cap_a, cap_t cap_b) word_t botA, botB, topA, topB; botA = cap_frame_cap_get_capFBasePtr(cap_a); botB = cap_frame_cap_get_capFBasePtr(cap_b); - topA = botA + MASK (pageBitsForSize(cap_frame_cap_get_capFSize(cap_a))); - topB = botB + MASK (pageBitsForSize(cap_frame_cap_get_capFSize(cap_b))); + topA = botA + MASK(pageBitsForSize(cap_frame_cap_get_capFSize(cap_a))); + topB = botB + MASK(pageBitsForSize(cap_frame_cap_get_capFSize(cap_b))); return ((botA <= botB) && (topA >= topB) && (botB <= topB)); } break; diff --git a/src/arch/x86/object/vcpu.c b/src/arch/x86/object/vcpu.c index 5d59f291e..d524dd5f1 100644 --- a/src/arch/x86/object/vcpu.c +++ b/src/arch/x86/object/vcpu.c @@ -110,7 +110,7 @@ vmclear(void *vmcs_ptr) { uint64_t physical_address; physical_address = pptr_to_paddr((void*)vmcs_ptr); - asm volatile ( + asm volatile( "vmclear %0" : : "m"(physical_address) @@ -135,7 +135,7 @@ vmptrld(void *vmcs_ptr) uint64_t physical_address; uint8_t error; physical_address = pptr_to_paddr(vmcs_ptr); - asm volatile ( + asm volatile( "vmptrld %1; setna %0" : "=q"(error) : "m"(physical_address) @@ -911,7 +911,7 @@ static exception_t decodeSetTCB(cap_t cap, word_t length, word_t* buffer, extra_caps_t excaps) { cap_t tcbCap; - if ( excaps.excaprefs[0] == NULL) { + if (excaps.excaprefs[0] == NULL) { userError("VCPU SetTCB: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; @@ -1558,7 +1558,7 @@ invept(ept_pml4e_t *ept_pml4) address.parts[0] = pptr_to_paddr((void*)ept_pml4); address.parts[1] = 0; - asm volatile ( + asm volatile( "invept %0, %1" : : "m"(address), "r"(type) diff --git a/src/drivers/serial/bcm2835-aux-uart.c b/src/drivers/serial/bcm2835-aux-uart.c index 91a9c8b83..e15cbccba 100644 --- a/src/drivers/serial/bcm2835-aux-uart.c +++ b/src/drivers/serial/bcm2835-aux-uart.c @@ -45,7 +45,7 @@ #if defined(CONFIG_DEBUG_BUILD) || defined(CONFIG_PRINTING) void putDebugChar(unsigned char c) { - while ( !(*UART_REG(MU_LSR) & MU_LSR_TXIDLE) ); + while (!(*UART_REG(MU_LSR) & MU_LSR_TXIDLE)); *UART_REG(MU_IO) = (c & 0xff); } #endif @@ -53,7 +53,7 @@ void putDebugChar(unsigned char c) #ifdef CONFIG_DEBUG_BUILD unsigned char getDebugChar(void) { - while ( !(*UART_REG(MU_LSR) & MU_LSR_DATAREADY) ); + while (!(*UART_REG(MU_LSR) & MU_LSR_DATAREADY)); return *UART_REG(MU_IO); } #endif //CONFIG_DEBUG_BUILD diff --git a/src/drivers/serial/exynos4210-uart.c b/src/drivers/serial/exynos4210-uart.c index d7be5aba2..7eb47612e 100644 --- a/src/drivers/serial/exynos4210-uart.c +++ b/src/drivers/serial/exynos4210-uart.c @@ -40,7 +40,7 @@ void putDebugChar(unsigned char c) { - while ( (*UART_REG(UTRSTAT) & TXBUF_EMPTY) == 0 ); + while ((*UART_REG(UTRSTAT) & TXBUF_EMPTY) == 0); *UART_REG(UTXH) = (c & 0xff); } #endif @@ -49,7 +49,7 @@ putDebugChar(unsigned char c) unsigned char getDebugChar(void) { - if ( (*UART_REG(UTRSTAT) & RXBUF_READY)) { + if ((*UART_REG(UTRSTAT) & RXBUF_READY)) { return (unsigned char) * UART_REG(URXH); } else { return -1; diff --git a/src/drivers/serial/msm-uartdm.c b/src/drivers/serial/msm-uartdm.c index ad5b671f0..022bf4965 100644 --- a/src/drivers/serial/msm-uartdm.c +++ b/src/drivers/serial/msm-uartdm.c @@ -28,7 +28,7 @@ void putDebugChar(unsigned char c) { - while ( (*UART_REG(USR) & USR_TXEMP) == 0 ); + while ((*UART_REG(USR) & USR_TXEMP) == 0); /* Tell the peripheral how many characters to send */ *UART_REG(UNTX) = 1; /* Write the character into the FIFO */ @@ -40,7 +40,7 @@ putDebugChar(unsigned char c) unsigned char getDebugChar(void) { - while ( (*UART_REG(USR) & USR_RXRDY) == 0 ); + while ((*UART_REG(USR) & USR_RXRDY) == 0); return *UART_REG(UTF) & 0xff; } diff --git a/src/drivers/timer/exynos4412-mct.c b/src/drivers/timer/exynos4412-mct.c index a2398a39e..7249fe12f 100644 --- a/src/drivers/timer/exynos4412-mct.c +++ b/src/drivers/timer/exynos4412-mct.c @@ -28,7 +28,7 @@ BOOT_CODE void initTimer(void) uint64_t comparator_value = ((((uint64_t) mct->global.cnth) << 32llu) | mct->global.cntl) + TIMER_RELOAD; - mct->global.comp0h = (uint32_t) (comparator_value >> 32u); + mct->global.comp0h = (uint32_t)(comparator_value >> 32u); mct->global.comp0l = (uint32_t) comparator_value; /* Enable interrupts */ mct->global.int_en = GINT_COMP0_IRQ; diff --git a/src/fastpath/fastpath.c b/src/fastpath/fastpath.c index 88c3a03d4..72990ba40 100644 --- a/src/fastpath/fastpath.c +++ b/src/fastpath/fastpath.c @@ -177,7 +177,7 @@ fastpath_call(word_t cptr, word_t msgInfo) mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &replySlot->cteMDBNode, CTE_REF(callerSlot), 1, 1); - fastpath_copy_mrs (length, NODE_STATE(ksCurThread), dest); + fastpath_copy_mrs(length, NODE_STATE(ksCurThread), dest); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&dest->tcbState, @@ -275,7 +275,7 @@ fastpath_reply_recv(word_t cptr, word_t msgInfo) cap_pd = cap_vtable_cap_get_vspace_root_fp(newVTable); /* Ensure that the destination has a valid MMU. */ - if (unlikely(! isValidVTableRoot_fp (newVTable))) { + if (unlikely(! isValidVTableRoot_fp(newVTable))) { slowpath(SysReplyRecv); } @@ -370,7 +370,7 @@ fastpath_reply_recv(word_t cptr, word_t msgInfo) /* Replies don't have a badge. */ badge = 0; - fastpath_copy_mrs (length, NODE_STATE(ksCurThread), caller); + fastpath_copy_mrs(length, NODE_STATE(ksCurThread), caller); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&caller->tcbState, diff --git a/src/kernel/boot.c b/src/kernel/boot.c index c07948f47..bf371d9cb 100644 --- a/src/kernel/boot.c +++ b/src/kernel/boot.c @@ -92,9 +92,9 @@ alloc_region(word_t size_bits) new_rem_small.start = new_reg.end; new_rem_small.end = ndks_boot.freemem[i].end; } - if ( is_reg_empty(reg) || + if (is_reg_empty(reg) || (reg_size(new_rem_small) < reg_size(rem_small)) || - (reg_size(new_rem_small) == reg_size(rem_small) && reg_size(new_rem_large) < reg_size(rem_large)) ) { + (reg_size(new_rem_small) == reg_size(rem_small) && reg_size(new_rem_large) < reg_size(rem_large))) { reg = new_reg; rem_small = new_rem_small; rem_large = new_rem_large; @@ -123,7 +123,7 @@ write_slot(slot_ptr_t slot_ptr, cap_t cap) slot_ptr->cap = cap; slot_ptr->cteMDBNode = nullMDBNode; - mdb_node_ptr_set_mdbRevocable (&slot_ptr->cteMDBNode, true); + mdb_node_ptr_set_mdbRevocable(&slot_ptr->cteMDBNode, true); mdb_node_ptr_set_mdbFirstBadged(&slot_ptr->cteMDBNode, true); } diff --git a/src/machine/io.c b/src/machine/io.c index 7e1b36fe5..67bdc8791 100644 --- a/src/machine/io.c +++ b/src/machine/io.c @@ -127,7 +127,7 @@ print_unsigned_long_long(unsigned long long x, unsigned int ui_base) } /* we can't do 64 bit division so break it up into two hex numbers */ - upper = (unsigned int) (x >> 32llu); + upper = (unsigned int)(x >> 32llu); lower = (unsigned int) x & 0xffffffff; /* print first 32 bits if they exist */ diff --git a/src/object/cnode.c b/src/object/cnode.c index 21df320c8..0a6950125 100644 --- a/src/object/cnode.c +++ b/src/object/cnode.c @@ -173,7 +173,7 @@ decodeCNodeInvocation(word_t invLabel, word_t length, cap_t cap, break; default: - assert (0); + assert(0); return EXCEPTION_NONE; } diff --git a/src/object/endpoint.c b/src/object/endpoint.c index b529b59a0..b4d94f650 100644 --- a/src/object/endpoint.c +++ b/src/object/endpoint.c @@ -300,7 +300,7 @@ cancelAllIPC(endpoint_t *epptr) /* Set all blocked threads to restart */ for (; thread; thread = thread->tcbEPNext) { - setThreadState (thread, ThreadState_Restart); + setThreadState(thread, ThreadState_Restart); SCHED_ENQUEUE(thread); } diff --git a/src/object/untyped.c b/src/object/untyped.c index 5bceda652..7526b6155 100644 --- a/src/object/untyped.c +++ b/src/object/untyped.c @@ -259,7 +259,7 @@ resetUntypedCap(cte_t *srcSlot) srcSlot->cap = cap_untyped_cap_set_capFreeIndex(prev_cap, 0); } else { for (offset = ROUND_DOWN(offset - 1, chunk); - offset != - BIT (chunk); offset -= BIT (chunk)) { + offset != - BIT(chunk); offset -= BIT(chunk)) { clearMemory(GET_OFFSET_FREE_PTR(regionBase, offset), chunk); srcSlot->cap = cap_untyped_cap_set_capFreeIndex(prev_cap, OFFSET_TO_FREE_INDEX(offset)); status = preemptionPoint(); diff --git a/src/plat/allwinnerA20/machine/l2cache.c b/src/plat/allwinnerA20/machine/l2cache.c old mode 100755 new mode 100644 index fe18b4613..06a5128b1 --- a/src/plat/allwinnerA20/machine/l2cache.c +++ b/src/plat/allwinnerA20/machine/l2cache.c @@ -14,14 +14,14 @@ static inline word_t readACR(void) { word_t ACR; - asm volatile ("mrc p15,0,%0,c1,c0,1" : "=r"(ACR)); + asm volatile("mrc p15,0,%0,c1,c0,1" : "=r"(ACR)); return ACR; } static inline void writeACR(word_t ACR) { - asm volatile ("mcr p15,0,%0,c1,c0,1" : : "r"(ACR)); + asm volatile("mcr p15,0,%0,c1,c0,1" : : "r"(ACR)); } void diff --git a/src/plat/am335x/machine/l2cache.c b/src/plat/am335x/machine/l2cache.c index 407b6d8ca..c49d47818 100644 --- a/src/plat/am335x/machine/l2cache.c +++ b/src/plat/am335x/machine/l2cache.c @@ -14,14 +14,14 @@ static inline word_t readACR(void) { word_t ACR; - asm volatile ("mrc p15,0,%0,c1,c0,1" : "=r"(ACR)); + asm volatile("mrc p15,0,%0,c1,c0,1" : "=r"(ACR)); return ACR; } static inline void writeACR(word_t ACR) { - asm volatile ("mcr p15,0,%0,c1,c0,1" : : "r"(ACR)); + asm volatile("mcr p15,0,%0,c1,c0,1" : : "r"(ACR)); } void diff --git a/src/plat/omap3/machine/l2cache.c b/src/plat/omap3/machine/l2cache.c index 407b6d8ca..c49d47818 100644 --- a/src/plat/omap3/machine/l2cache.c +++ b/src/plat/omap3/machine/l2cache.c @@ -14,14 +14,14 @@ static inline word_t readACR(void) { word_t ACR; - asm volatile ("mrc p15,0,%0,c1,c0,1" : "=r"(ACR)); + asm volatile("mrc p15,0,%0,c1,c0,1" : "=r"(ACR)); return ACR; } static inline void writeACR(word_t ACR) { - asm volatile ("mcr p15,0,%0,c1,c0,1" : : "r"(ACR)); + asm volatile("mcr p15,0,%0,c1,c0,1" : : "r"(ACR)); } void diff --git a/src/plat/pc99/machine/hardware.c b/src/plat/pc99/machine/hardware.c index 5b3dac1d8..8173b802b 100644 --- a/src/plat/pc99/machine/hardware.c +++ b/src/plat/pc99/machine/hardware.c @@ -25,7 +25,7 @@ BOOT_CODE bool_t platAddDevices(void) * the user to generate arbitrary MSI interrupts. Only need to consider * this if it would actually be in the user device region */ if (PADDR_USER_DEVICE_TOP > 0xFFFFFFF8) { - if (!add_allocated_p_region( (p_region_t) { + if (!add_allocated_p_region((p_region_t) { (word_t)0xFFFFFFF8, (word_t)0xFFFFFFF8 + 8 })) { return false; diff --git a/src/plat/pc99/machine/intel-vtd.c b/src/plat/pc99/machine/intel-vtd.c index 92c15bf4f..082c69f3c 100644 --- a/src/plat/pc99/machine/intel-vtd.c +++ b/src/plat/pc99/machine/intel-vtd.c @@ -313,7 +313,7 @@ vtd_map_reserved_page(vtd_cte_t *vtd_context_table, int context_index, paddr_t a if (VTD_PT_INDEX_BITS * i >= 32) { iopt_index = 0; } else { - iopt_index = ( (addr >> seL4_PageBits) >> (VTD_PT_INDEX_BITS * i)) & MASK(VTD_PT_INDEX_BITS); + iopt_index = ((addr >> seL4_PageBits) >> (VTD_PT_INDEX_BITS * i)) & MASK(VTD_PT_INDEX_BITS); } vtd_pte_slot = iopt + iopt_index; if (i == 0) { diff --git a/src/plat/spike/machine/hardware.c b/src/plat/spike/machine/hardware.c index ecdf9dbd2..c870e44c8 100644 --- a/src/plat/spike/machine/hardware.c +++ b/src/plat/spike/machine/hardware.c @@ -64,7 +64,7 @@ getActiveIRQ(void) { uint64_t temp = 0; - asm volatile ("csrr %0, scause":"=r" (temp)); + asm volatile("csrr %0, scause":"=r"(temp)); if (!(temp & BIT(CONFIG_WORD_SIZE - 1))) { return irqInvalid; @@ -127,18 +127,18 @@ static inline uint64_t get_cycles(void) #if __riscv_xlen == 32 { uint32_t nH, nL; - asm volatile ( + asm volatile( "rdtimeh %0\n" "rdtime %1\n" - : "=r" (nH), "=r" (nL)); - return ((uint64_t) ((uint64_t) nH << 32)) | (nL); + : "=r"(nH), "=r"(nL)); + return ((uint64_t)((uint64_t) nH << 32)) | (nL); } #else { uint64_t n; - asm volatile ( + asm volatile( "rdtime %0" - : "=r" (n)); + : "=r"(n)); return n; } #endif diff --git a/src/plat/tk1/machine/smmu.c b/src/plat/tk1/machine/smmu.c index 153a15221..56a432160 100644 --- a/src/plat/tk1/machine/smmu.c +++ b/src/plat/tk1/machine/smmu.c @@ -93,11 +93,11 @@ smmu_disable(void) /* in hyp mode, we need call the hook in monitor mode */ /* we need physical address here */ paddr_t addr = addrFromPPtr(&do_smmu_disable); - asm (".arch_extension sec\n"); - asm volatile ("mov r0, %0\n\t" - "dsb\nisb\n" - "smc #0\n" - ::"r"(addr):"r0", "r1", "r2", "r3", "ip"); + asm(".arch_extension sec\n"); + asm volatile("mov r0, %0\n\t" + "dsb\nisb\n" + "smc #0\n" + ::"r"(addr):"r0", "r1", "r2", "r3", "ip"); } else { /* in secure mode, can enable it directly */ smmu_regs->smmu_config = 0; @@ -111,11 +111,11 @@ smmu_enable(void) { if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) { paddr_t addr = addrFromPPtr(&do_smmu_enable); - asm (".arch_extension sec\n"); - asm volatile ("mov r0, %0\n\t" - "dsb\nisb\n" - "smc #0\n" - ::"r"(addr):"r0", "r1", "r2", "r3", "ip"); + asm(".arch_extension sec\n"); + asm volatile("mov r0, %0\n\t" + "dsb\nisb\n" + "smc #0\n" + ::"r"(addr):"r0", "r1", "r2", "r3", "ip"); } else { smmu_regs->smmu_config = 1; } diff --git a/src/util.c b/src/util.c index d9af3e3fe..d7d7668ea 100644 --- a/src/util.c +++ b/src/util.c @@ -146,7 +146,7 @@ str_to_long(const char* str) uint32_t __clzsi2(uint32_t x) { uint32_t count = 0; - while ( !(x & 0x80000000U) && count < 34) { + while (!(x & 0x80000000U) && count < 34) { x <<= 1; count++; } @@ -156,7 +156,7 @@ uint32_t __clzsi2(uint32_t x) uint32_t __ctzsi2(uint32_t x) { uint32_t count = 0; - while ( !(x & 0x000000001) && count <= 32) { + while (!(x & 0x000000001) && count <= 32) { x >>= 1; count++; } @@ -166,7 +166,7 @@ uint32_t __ctzsi2(uint32_t x) uint32_t __clzdi2(uint64_t x) { uint32_t count = 0; - while ( !(x & 0x8000000000000000U) && count < 65) { + while (!(x & 0x8000000000000000U) && count < 65) { x <<= 1; count++; } @@ -176,7 +176,7 @@ uint32_t __clzdi2(uint64_t x) uint32_t __ctzdi2(uint64_t x) { uint32_t count = 0; - while ( !(x & 0x00000000000000001) && count <= 64) { + while (!(x & 0x00000000000000001) && count <= 64) { x >>= 1; count++; } -- GitLab