mirror of
https://github.com/torvalds/linux.git
synced 2025-12-01 07:26:02 +07:00
Merge tag 's390-6.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull more s390 updates from Alexander Gordeev: - Compile the decompressor with -Wno-pointer-sign flag to avoid a clang warning - Fix incomplete conversion to flag output macros in __xsch(), to avoid always zero return value instead of the expected condition code - Remove superfluous newlines from inline assemblies to improve compiler inlining decisions - Expose firmware provided UID Checking state in sysfs regardless of the device presence or state - CIO does not unregister subchannels when the attached device is invalid or unavailable. Update the purge function to remove I/O subchannels if the device number is found on cio_ignore list - Consolidate PAI crypto allocation and cleanup paths - The uv_get_secret_metadata() function has been removed some few months ago, remove also the function mention it in a comment * tag 's390-6.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390/uv: Fix comment of uv_find_secret() function s390/pai_crypto: Consolidate PAI crypto allocation and cleanup paths s390/cio: Update purge function to unregister the unused subchannels s390/pci: Expose firmware provided UID Checking state in sysfs s390: Remove superfluous newlines from inline assemblies s390/cio/ioasm: Fix __xsch() condition code handling s390: Add -Wno-pointer-sign to KBUILD_CFLAGS_DECOMPRESSOR
This commit is contained in:
@@ -25,6 +25,7 @@ endif
|
||||
KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += -D__DECOMPRESSOR
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += -Wno-pointer-sign
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
|
||||
|
||||
@@ -27,7 +27,7 @@ static inline unsigned long __hypfs_sprp_diag304(void *data, unsigned long cmd)
|
||||
{
|
||||
union register_pair r1 = { .even = virt_to_phys(data), };
|
||||
|
||||
asm volatile("diag %[r1],%[r3],0x304\n"
|
||||
asm volatile("diag %[r1],%[r3],0x304"
|
||||
: [r1] "+&d" (r1.pair)
|
||||
: [r3] "d" (cmd)
|
||||
: "memory");
|
||||
|
||||
@@ -143,7 +143,7 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid,
|
||||
" lghi 2,0\n" /* 0 into gr2 */
|
||||
" .insn rre,0xb2af0000,0,0\n" /* PQAP(TAPQ) */
|
||||
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
|
||||
" lgr %[reg2],2\n" /* gr2 into reg2 */
|
||||
" lgr %[reg2],2" /* gr2 into reg2 */
|
||||
: [reg1] "=&d" (reg1.value), [reg2] "=&d" (reg2)
|
||||
: [qid] "d" (qid)
|
||||
: "cc", "0", "1", "2");
|
||||
@@ -186,7 +186,7 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid, int fbit)
|
||||
asm volatile(
|
||||
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
|
||||
" .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */
|
||||
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
|
||||
" lgr %[reg1],1" /* gr1 (status) into reg1 */
|
||||
: [reg1] "=&d" (reg1.value)
|
||||
: [reg0] "d" (reg0)
|
||||
: "cc", "0", "1");
|
||||
@@ -211,7 +211,7 @@ static inline struct ap_queue_status ap_zapq(ap_qid_t qid, int fbit)
|
||||
asm volatile(
|
||||
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
|
||||
" .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */
|
||||
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
|
||||
" lgr %[reg1],1" /* gr1 (status) into reg1 */
|
||||
: [reg1] "=&d" (reg1.value)
|
||||
: [reg0] "d" (reg0)
|
||||
: "cc", "0", "1");
|
||||
@@ -315,7 +315,7 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
|
||||
" lgr 1,%[reg1]\n" /* irq ctrl into gr1 */
|
||||
" lgr 2,%[reg2]\n" /* ni addr into gr2 */
|
||||
" .insn rre,0xb2af0000,0,0\n" /* PQAP(AQIC) */
|
||||
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
|
||||
" lgr %[reg1],1" /* gr1 (status) into reg1 */
|
||||
: [reg1] "+&d" (reg1.value)
|
||||
: [reg0] "d" (reg0), [reg2] "d" (reg2)
|
||||
: "cc", "memory", "0", "1", "2");
|
||||
@@ -363,7 +363,7 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
|
||||
" lgr 1,%[reg1]\n" /* qact in info into gr1 */
|
||||
" .insn rre,0xb2af0000,0,0\n" /* PQAP(QACT) */
|
||||
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
|
||||
" lgr %[reg2],2\n" /* qact out info into reg2 */
|
||||
" lgr %[reg2],2" /* qact out info into reg2 */
|
||||
: [reg1] "+&d" (reg1.value), [reg2] "=&d" (reg2)
|
||||
: [reg0] "d" (reg0)
|
||||
: "cc", "0", "1", "2");
|
||||
@@ -388,7 +388,7 @@ static inline struct ap_queue_status ap_bapq(ap_qid_t qid)
|
||||
asm volatile(
|
||||
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
|
||||
" .insn rre,0xb2af0000,0,0\n" /* PQAP(BAPQ) */
|
||||
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
|
||||
" lgr %[reg1],1" /* gr1 (status) into reg1 */
|
||||
: [reg1] "=&d" (reg1.value)
|
||||
: [reg0] "d" (reg0)
|
||||
: "cc", "0", "1");
|
||||
@@ -416,7 +416,7 @@ static inline struct ap_queue_status ap_aapq(ap_qid_t qid, unsigned int sec_idx)
|
||||
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
|
||||
" lgr 2,%[reg2]\n" /* secret index into gr2 */
|
||||
" .insn rre,0xb2af0000,0,0\n" /* PQAP(AAPQ) */
|
||||
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
|
||||
" lgr %[reg1],1" /* gr1 (status) into reg1 */
|
||||
: [reg1] "=&d" (reg1.value)
|
||||
: [reg0] "d" (reg0), [reg2] "d" (reg2)
|
||||
: "cc", "0", "1", "2");
|
||||
@@ -453,7 +453,7 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
|
||||
" lgr 0,%[reg0]\n" /* qid param in gr0 */
|
||||
"0: .insn rre,0xb2ad0000,%[nqap_r1],%[nqap_r2]\n"
|
||||
" brc 2,0b\n" /* handle partial completion */
|
||||
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
|
||||
" lgr %[reg1],1" /* gr1 (status) into reg1 */
|
||||
: [reg0] "+&d" (reg0), [reg1] "=&d" (reg1.value),
|
||||
[nqap_r2] "+&d" (nqap_r2.pair)
|
||||
: [nqap_r1] "d" (nqap_r1.pair)
|
||||
@@ -518,7 +518,7 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
|
||||
" brc 6,0b\n" /* handle partial complete */
|
||||
"2: lgr %[reg0],0\n" /* gr0 (qid + info) into reg0 */
|
||||
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
|
||||
" lgr %[reg2],2\n" /* gr2 (res length) into reg2 */
|
||||
" lgr %[reg2],2" /* gr2 (res length) into reg2 */
|
||||
: [reg0] "+&d" (reg0), [reg1] "=&d" (reg1.value),
|
||||
[reg2] "=&d" (reg2), [rp1] "+&d" (rp1.pair),
|
||||
[rp2] "+&d" (rp2.pair)
|
||||
|
||||
@@ -17,7 +17,7 @@ static __always_inline int __atomic_read(const int *ptr)
|
||||
int val;
|
||||
|
||||
asm volatile(
|
||||
" l %[val],%[ptr]\n"
|
||||
" l %[val],%[ptr]"
|
||||
: [val] "=d" (val) : [ptr] "R" (*ptr));
|
||||
return val;
|
||||
}
|
||||
@@ -26,11 +26,11 @@ static __always_inline void __atomic_set(int *ptr, int val)
|
||||
{
|
||||
if (__builtin_constant_p(val) && val >= S16_MIN && val <= S16_MAX) {
|
||||
asm volatile(
|
||||
" mvhi %[ptr],%[val]\n"
|
||||
" mvhi %[ptr],%[val]"
|
||||
: [ptr] "=Q" (*ptr) : [val] "K" (val));
|
||||
} else {
|
||||
asm volatile(
|
||||
" st %[val],%[ptr]\n"
|
||||
" st %[val],%[ptr]"
|
||||
: [ptr] "=R" (*ptr) : [val] "d" (val));
|
||||
}
|
||||
}
|
||||
@@ -40,7 +40,7 @@ static __always_inline long __atomic64_read(const long *ptr)
|
||||
long val;
|
||||
|
||||
asm volatile(
|
||||
" lg %[val],%[ptr]\n"
|
||||
" lg %[val],%[ptr]"
|
||||
: [val] "=d" (val) : [ptr] "RT" (*ptr));
|
||||
return val;
|
||||
}
|
||||
@@ -49,11 +49,11 @@ static __always_inline void __atomic64_set(long *ptr, long val)
|
||||
{
|
||||
if (__builtin_constant_p(val) && val >= S16_MIN && val <= S16_MAX) {
|
||||
asm volatile(
|
||||
" mvghi %[ptr],%[val]\n"
|
||||
" mvghi %[ptr],%[val]"
|
||||
: [ptr] "=Q" (*ptr) : [val] "K" (val));
|
||||
} else {
|
||||
asm volatile(
|
||||
" stg %[val],%[ptr]\n"
|
||||
" stg %[val],%[ptr]"
|
||||
: [ptr] "=RT" (*ptr) : [val] "d" (val));
|
||||
}
|
||||
}
|
||||
@@ -66,7 +66,7 @@ static __always_inline op_type op_name(op_type val, op_type *ptr) \
|
||||
op_type old; \
|
||||
\
|
||||
asm volatile( \
|
||||
op_string " %[old],%[val],%[ptr]\n" \
|
||||
op_string " %[old],%[val],%[ptr]" \
|
||||
op_barrier \
|
||||
: [old] "=d" (old), [ptr] "+QS" (*ptr) \
|
||||
: [val] "d" (val) : "cc", "memory"); \
|
||||
@@ -75,7 +75,7 @@ static __always_inline op_type op_name(op_type val, op_type *ptr) \
|
||||
|
||||
#define __ATOMIC_OPS(op_name, op_type, op_string) \
|
||||
__ATOMIC_OP(op_name, op_type, op_string, "") \
|
||||
__ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
|
||||
__ATOMIC_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0")
|
||||
|
||||
__ATOMIC_OPS(__atomic_add, int, "laa")
|
||||
__ATOMIC_OPS(__atomic_and, int, "lan")
|
||||
@@ -94,14 +94,14 @@ __ATOMIC_OPS(__atomic64_xor, long, "laxg")
|
||||
static __always_inline void op_name(op_type val, op_type *ptr) \
|
||||
{ \
|
||||
asm volatile( \
|
||||
op_string " %[ptr],%[val]\n" \
|
||||
op_string " %[ptr],%[val]" \
|
||||
op_barrier \
|
||||
: [ptr] "+QS" (*ptr) : [val] "i" (val) : "cc", "memory");\
|
||||
}
|
||||
|
||||
#define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \
|
||||
__ATOMIC_CONST_OP(op_name, op_type, op_string, "") \
|
||||
__ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
|
||||
__ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0")
|
||||
|
||||
__ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
|
||||
__ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
|
||||
@@ -179,7 +179,7 @@ static __always_inline bool op_name(op_type val, op_type *ptr) \
|
||||
int cc; \
|
||||
\
|
||||
asm volatile( \
|
||||
op_string " %[tmp],%[val],%[ptr]\n" \
|
||||
op_string " %[tmp],%[val],%[ptr]" \
|
||||
op_barrier \
|
||||
: "=@cc" (cc), [tmp] "=d" (tmp), [ptr] "+QS" (*ptr) \
|
||||
: [val] "d" (val) \
|
||||
@@ -189,7 +189,7 @@ static __always_inline bool op_name(op_type val, op_type *ptr) \
|
||||
|
||||
#define __ATOMIC_TEST_OPS(op_name, op_type, op_string) \
|
||||
__ATOMIC_TEST_OP(op_name, op_type, op_string, "") \
|
||||
__ATOMIC_TEST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
|
||||
__ATOMIC_TEST_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0")
|
||||
|
||||
__ATOMIC_TEST_OPS(__atomic_add_and_test, int, "laal")
|
||||
__ATOMIC_TEST_OPS(__atomic64_add_and_test, long, "laalg")
|
||||
@@ -203,7 +203,7 @@ static __always_inline bool op_name(op_type val, op_type *ptr) \
|
||||
int cc; \
|
||||
\
|
||||
asm volatile( \
|
||||
op_string " %[ptr],%[val]\n" \
|
||||
op_string " %[ptr],%[val]" \
|
||||
op_barrier \
|
||||
: "=@cc" (cc), [ptr] "+QS" (*ptr) \
|
||||
: [val] "i" (val) \
|
||||
@@ -213,7 +213,7 @@ static __always_inline bool op_name(op_type val, op_type *ptr) \
|
||||
|
||||
#define __ATOMIC_CONST_TEST_OPS(op_name, op_type, op_string) \
|
||||
__ATOMIC_CONST_TEST_OP(op_name, op_type, op_string, "") \
|
||||
__ATOMIC_CONST_TEST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
|
||||
__ATOMIC_CONST_TEST_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0")
|
||||
|
||||
__ATOMIC_CONST_TEST_OPS(__atomic_add_const_and_test, int, "alsi")
|
||||
__ATOMIC_CONST_TEST_OPS(__atomic64_add_const_and_test, long, "algsi")
|
||||
|
||||
@@ -18,9 +18,9 @@
|
||||
|
||||
#ifdef MARCH_HAS_Z196_FEATURES
|
||||
/* Fast-BCR without checkpoint synchronization */
|
||||
#define __ASM_BCR_SERIALIZE "bcr 14,0\n"
|
||||
#define __ASM_BCR_SERIALIZE "bcr 14,0"
|
||||
#else
|
||||
#define __ASM_BCR_SERIALIZE "bcr 15,0\n"
|
||||
#define __ASM_BCR_SERIALIZE "bcr 15,0"
|
||||
#endif
|
||||
|
||||
static __always_inline void bcr_serialize(void)
|
||||
@@ -69,12 +69,12 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
||||
|
||||
if (__builtin_constant_p(size) && size > 0) {
|
||||
asm(" clgr %2,%1\n"
|
||||
" slbgr %0,%0\n"
|
||||
" slbgr %0,%0"
|
||||
:"=d" (mask) : "d" (size-1), "d" (index) :"cc");
|
||||
return mask;
|
||||
}
|
||||
asm(" clgr %1,%2\n"
|
||||
" slbgr %0,%0\n"
|
||||
" slbgr %0,%0"
|
||||
:"=d" (mask) : "d" (size), "d" (index) :"cc");
|
||||
return ~mask;
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ static __always_inline bool arch_test_bit(unsigned long nr, const volatile unsig
|
||||
addr += (nr ^ (BITS_PER_LONG - BITS_PER_BYTE)) / BITS_PER_BYTE;
|
||||
mask = 1UL << (nr & (BITS_PER_BYTE - 1));
|
||||
asm volatile(
|
||||
" tm %[addr],%[mask]\n"
|
||||
" tm %[addr],%[mask]"
|
||||
: "=@cc" (cc)
|
||||
: [addr] "Q" (*addr), [mask] "I" (mask)
|
||||
);
|
||||
|
||||
@@ -27,7 +27,7 @@ static inline __wsum cksm(const void *buff, int len, __wsum sum)
|
||||
kmsan_check_memory(buff, len);
|
||||
asm volatile(
|
||||
"0: cksm %[sum],%[rp]\n"
|
||||
" jo 0b\n"
|
||||
" jo 0b"
|
||||
: [sum] "+&d" (sum), [rp] "+&d" (rp.pair) : : "cc", "memory");
|
||||
return sum;
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ void __cmpxchg_called_with_bad_pointer(void);
|
||||
static __always_inline u32 __cs_asm(u64 ptr, u32 old, u32 new)
|
||||
{
|
||||
asm volatile(
|
||||
" cs %[old],%[new],%[ptr]\n"
|
||||
" cs %[old],%[new],%[ptr]"
|
||||
: [old] "+d" (old), [ptr] "+Q" (*(u32 *)ptr)
|
||||
: [new] "d" (new)
|
||||
: "memory", "cc");
|
||||
@@ -28,7 +28,7 @@ static __always_inline u32 __cs_asm(u64 ptr, u32 old, u32 new)
|
||||
static __always_inline u64 __csg_asm(u64 ptr, u64 old, u64 new)
|
||||
{
|
||||
asm volatile(
|
||||
" csg %[old],%[new],%[ptr]\n"
|
||||
" csg %[old],%[new],%[ptr]"
|
||||
: [old] "+d" (old), [ptr] "+QS" (*(u64 *)ptr)
|
||||
: [new] "d" (new)
|
||||
: "memory", "cc");
|
||||
@@ -126,7 +126,7 @@ static __always_inline u64 __arch_cmpxchg(u64 ptr, u64 old, u64 new, int size)
|
||||
} \
|
||||
case 4: { \
|
||||
asm volatile( \
|
||||
" cs %[__old],%[__new],%[__ptr]\n" \
|
||||
" cs %[__old],%[__new],%[__ptr]" \
|
||||
: [__old] "+d" (*__oldp), \
|
||||
[__ptr] "+Q" (*(ptr)), \
|
||||
"=@cc" (__cc) \
|
||||
@@ -136,7 +136,7 @@ static __always_inline u64 __arch_cmpxchg(u64 ptr, u64 old, u64 new, int size)
|
||||
} \
|
||||
case 8: { \
|
||||
asm volatile( \
|
||||
" csg %[__old],%[__new],%[__ptr]\n" \
|
||||
" csg %[__old],%[__new],%[__ptr]" \
|
||||
: [__old] "+d" (*__oldp), \
|
||||
[__ptr] "+QS" (*(ptr)), \
|
||||
"=@cc" (__cc) \
|
||||
@@ -241,7 +241,7 @@ static __always_inline u64 __arch_xchg(u64 ptr, u64 x, int size)
|
||||
static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new)
|
||||
{
|
||||
asm volatile(
|
||||
" cdsg %[old],%[new],%[ptr]\n"
|
||||
" cdsg %[old],%[new],%[ptr]"
|
||||
: [old] "+d" (old), [ptr] "+QS" (*ptr)
|
||||
: [new] "d" (new)
|
||||
: "memory", "cc");
|
||||
@@ -258,7 +258,7 @@ static __always_inline bool arch_try_cmpxchg128(volatile u128 *ptr, u128 *oldp,
|
||||
int cc;
|
||||
|
||||
asm volatile(
|
||||
" cdsg %[old],%[new],%[ptr]\n"
|
||||
" cdsg %[old],%[new],%[ptr]"
|
||||
: [old] "+d" (*oldp), [ptr] "+QS" (*ptr), "=@cc" (cc)
|
||||
: [new] "d" (new)
|
||||
: "memory");
|
||||
|
||||
@@ -229,7 +229,7 @@ static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2,
|
||||
asm volatile(
|
||||
" la %%r1,%[pb]\n"
|
||||
" lghi %%r0,%[fc]\n"
|
||||
" .insn rre,%[opc] << 16,%[r1],%[r2]\n"
|
||||
" .insn rre,%[opc] << 16,%[r1],%[r2]"
|
||||
: [pb] "=R" (*pb)
|
||||
: [opc] "i" (opc), [fc] "i" (fc),
|
||||
[r1] "i" (r1), [r2] "i" (r2)
|
||||
@@ -242,7 +242,7 @@ static __always_inline void __cpacf_query_rrf(u32 opc, u8 r1, u8 r2, u8 r3,
|
||||
asm volatile(
|
||||
" la %%r1,%[pb]\n"
|
||||
" lghi %%r0,%[fc]\n"
|
||||
" .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n"
|
||||
" .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]"
|
||||
: [pb] "=R" (*pb)
|
||||
: [opc] "i" (opc), [fc] "i" (fc), [r1] "i" (r1),
|
||||
[r2] "i" (r2), [r3] "i" (r3), [m4] "i" (m4)
|
||||
@@ -416,7 +416,7 @@ static inline int cpacf_km(unsigned long func, void *param,
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rre,%[opc] << 16,%[dst],%[src]\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
" brc 1,0b" /* handle partial completion */
|
||||
: [src] "+&d" (s.pair), [dst] "+&d" (d.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
[opc] "i" (CPACF_KM)
|
||||
@@ -448,7 +448,7 @@ static inline int cpacf_kmc(unsigned long func, void *param,
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rre,%[opc] << 16,%[dst],%[src]\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
" brc 1,0b" /* handle partial completion */
|
||||
: [src] "+&d" (s.pair), [dst] "+&d" (d.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
[opc] "i" (CPACF_KMC)
|
||||
@@ -476,7 +476,7 @@ static inline void cpacf_kimd(unsigned long func, void *param,
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rrf,%[opc] << 16,0,%[src],8,0\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
" brc 1,0b" /* handle partial completion */
|
||||
: [src] "+&d" (s.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)(param)),
|
||||
[opc] "i" (CPACF_KIMD)
|
||||
@@ -501,7 +501,7 @@ static inline void cpacf_klmd(unsigned long func, void *param,
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rrf,%[opc] << 16,0,%[src],8,0\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
" brc 1,0b" /* handle partial completion */
|
||||
: [src] "+&d" (s.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
[opc] "i" (CPACF_KLMD)
|
||||
@@ -530,7 +530,7 @@ static inline int _cpacf_kmac(unsigned long *gr0, void *param,
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rre,%[opc] << 16,0,%[src]\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
" lgr %[r0],0\n"
|
||||
" lgr %[r0],0"
|
||||
: [r0] "+d" (*gr0), [src] "+&d" (s.pair)
|
||||
: [pba] "d" ((unsigned long)param),
|
||||
[opc] "i" (CPACF_KMAC)
|
||||
@@ -580,7 +580,7 @@ static inline int cpacf_kmctr(unsigned long func, void *param, u8 *dest,
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rrf,%[opc] << 16,%[dst],%[src],%[ctr],0\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
" brc 1,0b" /* handle partial completion */
|
||||
: [src] "+&d" (s.pair), [dst] "+&d" (d.pair),
|
||||
[ctr] "+&d" (c.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
@@ -614,7 +614,7 @@ static inline void cpacf_prno(unsigned long func, void *param,
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rre,%[opc] << 16,%[dst],%[seed]\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
" brc 1,0b" /* handle partial completion */
|
||||
: [dst] "+&d" (d.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
[seed] "d" (s.pair), [opc] "i" (CPACF_PRNO)
|
||||
@@ -640,7 +640,7 @@ static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len,
|
||||
asm volatile (
|
||||
" lghi 0,%[fc]\n"
|
||||
"0: .insn rre,%[opc] << 16,%[ucbuf],%[cbuf]\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
" brc 1,0b" /* handle partial completion */
|
||||
: [ucbuf] "+&d" (u.pair), [cbuf] "+&d" (c.pair)
|
||||
: [fc] "K" (CPACF_PRNO_TRNG), [opc] "i" (CPACF_PRNO)
|
||||
: "cc", "memory", "0");
|
||||
@@ -692,7 +692,7 @@ static inline void cpacf_pckmo(long func, void *param)
|
||||
asm volatile(
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
" .insn rre,%[opc] << 16,0,0\n" /* PCKMO opcode */
|
||||
" .insn rre,%[opc] << 16,0,0" /* PCKMO opcode */
|
||||
:
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
[opc] "i" (CPACF_PCKMO)
|
||||
@@ -725,7 +725,7 @@ static inline void cpacf_kma(unsigned long func, void *param, u8 *dest,
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rrf,%[opc] << 16,%[dst],%[src],%[aad],0\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
" brc 1,0b" /* handle partial completion */
|
||||
: [dst] "+&d" (d.pair), [src] "+&d" (s.pair),
|
||||
[aad] "+&d" (a.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
|
||||
@@ -100,7 +100,7 @@ struct ctlreg {
|
||||
BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \
|
||||
typecheck(struct ctlreg, array[0]); \
|
||||
asm volatile( \
|
||||
" lctlg %[_low],%[_high],%[_arr]\n" \
|
||||
" lctlg %[_low],%[_high],%[_arr]" \
|
||||
: \
|
||||
: [_arr] "Q" (*(struct addrtype *)(&array)), \
|
||||
[_low] "i" (low), [_high] "i" (high) \
|
||||
@@ -119,7 +119,7 @@ struct ctlreg {
|
||||
BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \
|
||||
typecheck(struct ctlreg, array[0]); \
|
||||
asm volatile( \
|
||||
" stctg %[_low],%[_high],%[_arr]\n" \
|
||||
" stctg %[_low],%[_high],%[_arr]" \
|
||||
: [_arr] "=Q" (*(struct addrtype *)(&array)) \
|
||||
: [_low] "i" (low), [_high] "i" (high)); \
|
||||
} while (0)
|
||||
@@ -127,7 +127,7 @@ struct ctlreg {
|
||||
static __always_inline void local_ctl_load(unsigned int cr, struct ctlreg *reg)
|
||||
{
|
||||
asm volatile(
|
||||
" lctlg %[cr],%[cr],%[reg]\n"
|
||||
" lctlg %[cr],%[cr],%[reg]"
|
||||
:
|
||||
: [reg] "Q" (*reg), [cr] "i" (cr)
|
||||
: "memory");
|
||||
@@ -136,7 +136,7 @@ static __always_inline void local_ctl_load(unsigned int cr, struct ctlreg *reg)
|
||||
static __always_inline void local_ctl_store(unsigned int cr, struct ctlreg *reg)
|
||||
{
|
||||
asm volatile(
|
||||
" stctg %[cr],%[cr],%[reg]\n"
|
||||
" stctg %[cr],%[cr],%[reg]"
|
||||
: [reg] "=Q" (*reg)
|
||||
: [cr] "i" (cr));
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ asm(".include \"asm/fpu-insn-asm.h\"\n");
|
||||
|
||||
static __always_inline void fpu_cefbr(u8 f1, s32 val)
|
||||
{
|
||||
asm volatile("cefbr %[f1],%[val]\n"
|
||||
asm volatile("cefbr %[f1],%[val]"
|
||||
:
|
||||
: [f1] "I" (f1), [val] "d" (val)
|
||||
: "memory");
|
||||
@@ -48,7 +48,7 @@ static __always_inline unsigned long fpu_cgebr(u8 f2, u8 mode)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
asm volatile("cgebr %[val],%[mode],%[f2]\n"
|
||||
asm volatile("cgebr %[val],%[mode],%[f2]"
|
||||
: [val] "=d" (val)
|
||||
: [f2] "I" (f2), [mode] "I" (mode)
|
||||
: "memory");
|
||||
@@ -57,7 +57,7 @@ static __always_inline unsigned long fpu_cgebr(u8 f2, u8 mode)
|
||||
|
||||
static __always_inline void fpu_debr(u8 f1, u8 f2)
|
||||
{
|
||||
asm volatile("debr %[f1],%[f2]\n"
|
||||
asm volatile("debr %[f1],%[f2]"
|
||||
:
|
||||
: [f1] "I" (f1), [f2] "I" (f2)
|
||||
: "memory");
|
||||
@@ -66,7 +66,7 @@ static __always_inline void fpu_debr(u8 f1, u8 f2)
|
||||
static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg)
|
||||
{
|
||||
instrument_read(reg, sizeof(*reg));
|
||||
asm volatile("ld %[fpr],%[reg]\n"
|
||||
asm volatile("ld %[fpr],%[reg]"
|
||||
:
|
||||
: [fpr] "I" (fpr), [reg] "Q" (reg->ui)
|
||||
: "memory");
|
||||
@@ -74,7 +74,7 @@ static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg)
|
||||
|
||||
static __always_inline void fpu_ldgr(u8 f1, u32 val)
|
||||
{
|
||||
asm volatile("ldgr %[f1],%[val]\n"
|
||||
asm volatile("ldgr %[f1],%[val]"
|
||||
:
|
||||
: [f1] "I" (f1), [val] "d" (val)
|
||||
: "memory");
|
||||
@@ -113,7 +113,7 @@ static inline void fpu_lfpc_safe(unsigned int *fpc)
|
||||
static __always_inline void fpu_std(unsigned short fpr, freg_t *reg)
|
||||
{
|
||||
instrument_write(reg, sizeof(*reg));
|
||||
asm volatile("std %[fpr],%[reg]\n"
|
||||
asm volatile("std %[fpr],%[reg]"
|
||||
: [reg] "=Q" (reg->ui)
|
||||
: [fpr] "I" (fpr)
|
||||
: "memory");
|
||||
@@ -181,7 +181,7 @@ static __always_inline void fpu_vgfmg(u8 v1, u8 v2, u8 v3)
|
||||
static __always_inline void fpu_vl(u8 v1, const void *vxr)
|
||||
{
|
||||
instrument_read(vxr, sizeof(__vector128));
|
||||
asm volatile("VL %[v1],%O[vxr],,%R[vxr]\n"
|
||||
asm volatile("VL %[v1],%O[vxr],,%R[vxr]"
|
||||
:
|
||||
: [vxr] "Q" (*(__vector128 *)vxr),
|
||||
[v1] "I" (v1)
|
||||
@@ -195,7 +195,7 @@ static __always_inline void fpu_vl(u8 v1, const void *vxr)
|
||||
instrument_read(vxr, sizeof(__vector128));
|
||||
asm volatile(
|
||||
" la 1,%[vxr]\n"
|
||||
" VL %[v1],0,,1\n"
|
||||
" VL %[v1],0,,1"
|
||||
:
|
||||
: [vxr] "R" (*(__vector128 *)vxr),
|
||||
[v1] "I" (v1)
|
||||
@@ -239,7 +239,7 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
|
||||
|
||||
size = min(index + 1, sizeof(__vector128));
|
||||
instrument_read(vxr, size);
|
||||
asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]\n"
|
||||
asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]"
|
||||
:
|
||||
: [vxr] "Q" (*(u8 *)vxr),
|
||||
[index] "d" (index),
|
||||
@@ -257,7 +257,7 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
|
||||
instrument_read(vxr, size);
|
||||
asm volatile(
|
||||
" la 1,%[vxr]\n"
|
||||
" VLL %[v1],%[index],0,1\n"
|
||||
" VLL %[v1],%[index],0,1"
|
||||
:
|
||||
: [vxr] "R" (*(u8 *)vxr),
|
||||
[index] "d" (index),
|
||||
@@ -277,7 +277,7 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
|
||||
} *_v = (void *)(_vxrs); \
|
||||
\
|
||||
instrument_read(_v, size); \
|
||||
asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
|
||||
asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]" \
|
||||
: \
|
||||
: [vxrs] "Q" (*_v), \
|
||||
[v1] "I" (_v1), [v3] "I" (_v3) \
|
||||
@@ -297,7 +297,7 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
|
||||
instrument_read(_v, size); \
|
||||
asm volatile( \
|
||||
" la 1,%[vxrs]\n" \
|
||||
" VLM %[v1],%[v3],0,1\n" \
|
||||
" VLM %[v1],%[v3],0,1" \
|
||||
: \
|
||||
: [vxrs] "R" (*_v), \
|
||||
[v1] "I" (_v1), [v3] "I" (_v3) \
|
||||
@@ -360,7 +360,7 @@ static __always_inline void fpu_vsrlb(u8 v1, u8 v2, u8 v3)
|
||||
static __always_inline void fpu_vst(u8 v1, const void *vxr)
|
||||
{
|
||||
instrument_write(vxr, sizeof(__vector128));
|
||||
asm volatile("VST %[v1],%O[vxr],,%R[vxr]\n"
|
||||
asm volatile("VST %[v1],%O[vxr],,%R[vxr]"
|
||||
: [vxr] "=Q" (*(__vector128 *)vxr)
|
||||
: [v1] "I" (v1)
|
||||
: "memory");
|
||||
@@ -373,7 +373,7 @@ static __always_inline void fpu_vst(u8 v1, const void *vxr)
|
||||
instrument_write(vxr, sizeof(__vector128));
|
||||
asm volatile(
|
||||
" la 1,%[vxr]\n"
|
||||
" VST %[v1],0,,1\n"
|
||||
" VST %[v1],0,,1"
|
||||
: [vxr] "=R" (*(__vector128 *)vxr)
|
||||
: [v1] "I" (v1)
|
||||
: "memory", "1");
|
||||
@@ -389,7 +389,7 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
|
||||
|
||||
size = min(index + 1, sizeof(__vector128));
|
||||
instrument_write(vxr, size);
|
||||
asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]\n"
|
||||
asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]"
|
||||
: [vxr] "=Q" (*(u8 *)vxr)
|
||||
: [index] "d" (index), [v1] "I" (v1)
|
||||
: "memory");
|
||||
@@ -405,7 +405,7 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
|
||||
instrument_write(vxr, size);
|
||||
asm volatile(
|
||||
" la 1,%[vxr]\n"
|
||||
" VSTL %[v1],%[index],0,1\n"
|
||||
" VSTL %[v1],%[index],0,1"
|
||||
: [vxr] "=R" (*(u8 *)vxr)
|
||||
: [index] "d" (index), [v1] "I" (v1)
|
||||
: "memory", "1");
|
||||
@@ -423,7 +423,7 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
|
||||
} *_v = (void *)(_vxrs); \
|
||||
\
|
||||
instrument_write(_v, size); \
|
||||
asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
|
||||
asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]" \
|
||||
: [vxrs] "=Q" (*_v) \
|
||||
: [v1] "I" (_v1), [v3] "I" (_v3) \
|
||||
: "memory"); \
|
||||
@@ -442,7 +442,7 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
|
||||
instrument_write(_v, size); \
|
||||
asm volatile( \
|
||||
" la 1,%[vxrs]\n" \
|
||||
" VSTM %[v1],%[v3],0,1\n" \
|
||||
" VSTM %[v1],%[v3],0,1" \
|
||||
: [vxrs] "=R" (*_v) \
|
||||
: [v1] "I" (_v1), [v3] "I" (_v3) \
|
||||
: "memory", "1"); \
|
||||
|
||||
@@ -76,7 +76,7 @@ long __kvm_hypercall##args(unsigned long nr HYPERCALL_PARM_##args) \
|
||||
HYPERCALL_REGS_##args; \
|
||||
\
|
||||
asm volatile ( \
|
||||
" diag 2,4,0x500\n" \
|
||||
" diag 2,4,0x500" \
|
||||
: "=d" (__rc) \
|
||||
: "d" (__nr) HYPERCALL_FMT_##args \
|
||||
: "memory", "cc"); \
|
||||
|
||||
@@ -246,6 +246,16 @@ int clp_refresh_fh(u32 fid, u32 *fh);
|
||||
/* UID */
|
||||
void update_uid_checking(bool new);
|
||||
|
||||
/* Firmware Sysfs */
|
||||
int __init __zpci_fw_sysfs_init(void);
|
||||
|
||||
static inline int __init zpci_fw_sysfs_init(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_SYSFS))
|
||||
return __zpci_fw_sysfs_init();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* IOMMU Interface */
|
||||
int zpci_init_iommu(struct zpci_dev *zdev);
|
||||
void zpci_destroy_iommu(struct zpci_dev *zdev);
|
||||
|
||||
@@ -73,13 +73,13 @@
|
||||
if (__builtin_constant_p(val__) && \
|
||||
((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
|
||||
asm volatile( \
|
||||
op2 " %[ptr__],%[val__]\n" \
|
||||
op2 " %[ptr__],%[val__]" \
|
||||
: [ptr__] "+Q" (*ptr__) \
|
||||
: [val__] "i" ((szcast)val__) \
|
||||
: "cc"); \
|
||||
} else { \
|
||||
asm volatile( \
|
||||
op1 " %[old__],%[val__],%[ptr__]\n" \
|
||||
op1 " %[old__],%[val__],%[ptr__]" \
|
||||
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
|
||||
: [val__] "d" (val__) \
|
||||
: "cc"); \
|
||||
@@ -98,7 +98,7 @@
|
||||
preempt_disable_notrace(); \
|
||||
ptr__ = raw_cpu_ptr(&(pcp)); \
|
||||
asm volatile( \
|
||||
op " %[old__],%[val__],%[ptr__]\n" \
|
||||
op " %[old__],%[val__],%[ptr__]" \
|
||||
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
|
||||
: [val__] "d" (val__) \
|
||||
: "cc"); \
|
||||
@@ -117,7 +117,7 @@
|
||||
preempt_disable_notrace(); \
|
||||
ptr__ = raw_cpu_ptr(&(pcp)); \
|
||||
asm volatile( \
|
||||
op " %[old__],%[val__],%[ptr__]\n" \
|
||||
op " %[old__],%[val__],%[ptr__]" \
|
||||
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
|
||||
: [val__] "d" (val__) \
|
||||
: "cc"); \
|
||||
|
||||
@@ -163,7 +163,7 @@ static __always_inline void __stackleak_poison(unsigned long erase_low,
|
||||
"2: stg %[poison],0(%[addr])\n"
|
||||
" j 4f\n"
|
||||
"3: mvc 8(1,%[addr]),0(%[addr])\n"
|
||||
"4:\n"
|
||||
"4:"
|
||||
: [addr] "+&a" (erase_low), [count] "+&d" (count), [tmp] "=&a" (tmp)
|
||||
: [poison] "d" (poison)
|
||||
: "memory", "cc"
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
\
|
||||
BUILD_BUG_ON(sizeof(x) != 16); \
|
||||
asm volatile( \
|
||||
" lpq %[val],%[_x]\n" \
|
||||
" lpq %[val],%[_x]" \
|
||||
: [val] "=d" (__u.val) \
|
||||
: [_x] "QS" (x) \
|
||||
: "memory"); \
|
||||
|
||||
@@ -98,7 +98,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
|
||||
kcsan_release();
|
||||
asm_inline volatile(
|
||||
ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", ALT_FACILITY(49)) /* NIAI 7 */
|
||||
" mvhhi %[lock],0\n"
|
||||
" mvhhi %[lock],0"
|
||||
: [lock] "=Q" (((unsigned short *)&lp->lock)[1])
|
||||
:
|
||||
: "memory");
|
||||
|
||||
@@ -199,7 +199,7 @@ static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
|
||||
" lg 15,%[_stack]\n" \
|
||||
" stg %[_frame],%[_bc](15)\n" \
|
||||
" brasl 14,%[_fn]\n" \
|
||||
" lgr 15,%[_prev]\n" \
|
||||
" lgr 15,%[_prev]" \
|
||||
: [_prev] "=&d" (prev), CALL_FMT_##nr \
|
||||
: [_stack] "R" (__stack), \
|
||||
[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
|
||||
@@ -250,7 +250,7 @@ static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
|
||||
" lra 14,0(1)\n" \
|
||||
" lpswe %[psw_enter]\n" \
|
||||
"0: lpswe 0(7)\n" \
|
||||
"1:\n" \
|
||||
"1:" \
|
||||
: CALL_FMT_##nr, [psw_leave] "=Q" (psw_leave) \
|
||||
: [psw_enter] "Q" (psw_enter) \
|
||||
: "7", CALL_CLOBBER_##nr); \
|
||||
|
||||
@@ -125,7 +125,7 @@ static inline void *memscan(void *s, int c, size_t n)
|
||||
asm volatile(
|
||||
" lgr 0,%[c]\n"
|
||||
"0: srst %[ret],%[s]\n"
|
||||
" jo 0b\n"
|
||||
" jo 0b"
|
||||
: [ret] "+&a" (ret), [s] "+&a" (s)
|
||||
: [c] "d" (c)
|
||||
: "cc", "memory", "0");
|
||||
|
||||
@@ -155,7 +155,7 @@ long syscall##nr(unsigned long syscall SYSCALL_PARM_##nr) \
|
||||
SYSCALL_REGS_##nr; \
|
||||
\
|
||||
asm volatile ( \
|
||||
" svc 0\n" \
|
||||
" svc 0" \
|
||||
: "=d" (rc) \
|
||||
: "d" (r1) SYSCALL_FMT_##nr \
|
||||
: "memory"); \
|
||||
|
||||
@@ -81,7 +81,7 @@ static inline void set_tod_programmable_field(u16 val)
|
||||
{
|
||||
asm volatile(
|
||||
" lgr 0,%[val]\n"
|
||||
" sckpf\n"
|
||||
" sckpf"
|
||||
:
|
||||
: [val] "d" ((unsigned long)val)
|
||||
: "0");
|
||||
|
||||
@@ -66,7 +66,7 @@ static inline unsigned long diag310(unsigned long subcode, unsigned long size, v
|
||||
union register_pair rp = { .even = (unsigned long)addr, .odd = size };
|
||||
|
||||
diag_stat_inc(DIAG_STAT_X310);
|
||||
asm volatile("diag %[rp],%[subcode],0x310\n"
|
||||
asm volatile("diag %[rp],%[subcode],0x310"
|
||||
: [rp] "+d" (rp.pair)
|
||||
: [subcode] "d" (subcode)
|
||||
: "memory");
|
||||
|
||||
@@ -101,7 +101,7 @@ static unsigned long diag324(unsigned long subcode, void *addr)
|
||||
union register_pair rp = { .even = (unsigned long)addr };
|
||||
|
||||
diag_stat_inc(DIAG_STAT_X324);
|
||||
asm volatile("diag %[rp],%[subcode],0x324\n"
|
||||
asm volatile("diag %[rp],%[subcode],0x324"
|
||||
: [rp] "+d" (rp.pair)
|
||||
: [subcode] "d" (subcode)
|
||||
: "memory");
|
||||
|
||||
@@ -80,6 +80,15 @@ static int paicrypt_root_alloc(void)
|
||||
/* Release the PMU if event is the last perf event */
|
||||
static DEFINE_MUTEX(pai_reserve_mutex);
|
||||
|
||||
/* Free all memory allocated for event counting/sampling setup */
|
||||
static void paicrypt_free(struct paicrypt_mapptr *mp)
|
||||
{
|
||||
free_page((unsigned long)mp->mapptr->page);
|
||||
kvfree(mp->mapptr->save);
|
||||
kfree(mp->mapptr);
|
||||
mp->mapptr = NULL;
|
||||
}
|
||||
|
||||
/* Adjust usage counters and remove allocated memory when all users are
|
||||
* gone.
|
||||
*/
|
||||
@@ -93,15 +102,8 @@ static void paicrypt_event_destroy_cpu(struct perf_event *event, int cpu)
|
||||
"refcnt %u\n", __func__, event->attr.config,
|
||||
event->cpu, cpump->active_events,
|
||||
refcount_read(&cpump->refcnt));
|
||||
if (refcount_dec_and_test(&cpump->refcnt)) {
|
||||
debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
|
||||
__func__, (unsigned long)cpump->page,
|
||||
cpump->save);
|
||||
free_page((unsigned long)cpump->page);
|
||||
kvfree(cpump->save);
|
||||
kfree(cpump);
|
||||
mp->mapptr = NULL;
|
||||
}
|
||||
if (refcount_dec_and_test(&cpump->refcnt))
|
||||
paicrypt_free(mp);
|
||||
paicrypt_root_free();
|
||||
mutex_unlock(&pai_reserve_mutex);
|
||||
}
|
||||
@@ -175,14 +177,13 @@ static u64 paicrypt_getall(struct perf_event *event)
|
||||
*
|
||||
* Allocate the memory for the event.
|
||||
*/
|
||||
static struct paicrypt_map *paicrypt_busy(struct perf_event *event, int cpu)
|
||||
static int paicrypt_alloc_cpu(struct perf_event *event, int cpu)
|
||||
{
|
||||
struct paicrypt_map *cpump = NULL;
|
||||
struct paicrypt_mapptr *mp;
|
||||
int rc;
|
||||
|
||||
mutex_lock(&pai_reserve_mutex);
|
||||
|
||||
/* Allocate root node */
|
||||
rc = paicrypt_root_alloc();
|
||||
if (rc)
|
||||
@@ -192,58 +193,44 @@ static struct paicrypt_map *paicrypt_busy(struct perf_event *event, int cpu)
|
||||
mp = per_cpu_ptr(paicrypt_root.mapptr, cpu);
|
||||
cpump = mp->mapptr;
|
||||
if (!cpump) { /* Paicrypt_map allocated? */
|
||||
rc = -ENOMEM;
|
||||
cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
|
||||
if (!cpump) {
|
||||
rc = -ENOMEM;
|
||||
goto free_root;
|
||||
if (!cpump)
|
||||
goto undo;
|
||||
/* Allocate memory for counter page and counter extraction.
|
||||
* Only the first counting event has to allocate a page.
|
||||
*/
|
||||
mp->mapptr = cpump;
|
||||
cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
|
||||
cpump->save = kvmalloc_array(paicrypt_cnt + 1,
|
||||
sizeof(struct pai_userdata),
|
||||
GFP_KERNEL);
|
||||
if (!cpump->page || !cpump->save) {
|
||||
paicrypt_free(mp);
|
||||
goto undo;
|
||||
}
|
||||
INIT_LIST_HEAD(&cpump->syswide_list);
|
||||
}
|
||||
|
||||
/* Allocate memory for counter page and counter extraction.
|
||||
* Only the first counting event has to allocate a page.
|
||||
*/
|
||||
if (cpump->page) {
|
||||
refcount_set(&cpump->refcnt, 1);
|
||||
rc = 0;
|
||||
} else {
|
||||
refcount_inc(&cpump->refcnt);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
rc = -ENOMEM;
|
||||
cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!cpump->page)
|
||||
goto free_paicrypt_map;
|
||||
cpump->save = kvmalloc_array(paicrypt_cnt + 1,
|
||||
sizeof(struct pai_userdata), GFP_KERNEL);
|
||||
if (!cpump->save) {
|
||||
free_page((unsigned long)cpump->page);
|
||||
cpump->page = NULL;
|
||||
goto free_paicrypt_map;
|
||||
undo:
|
||||
if (rc) {
|
||||
/* Error in allocation of event, decrement anchor. Since
|
||||
* the event in not created, its destroy() function is never
|
||||
* invoked. Adjust the reference counter for the anchor.
|
||||
*/
|
||||
paicrypt_root_free();
|
||||
}
|
||||
|
||||
/* Set mode and reference count */
|
||||
rc = 0;
|
||||
refcount_set(&cpump->refcnt, 1);
|
||||
mp->mapptr = cpump;
|
||||
debug_sprintf_event(cfm_dbg, 5, "%s users %d refcnt %u page %#lx "
|
||||
"save %p rc %d\n", __func__, cpump->active_events,
|
||||
refcount_read(&cpump->refcnt),
|
||||
(unsigned long)cpump->page, cpump->save, rc);
|
||||
goto unlock;
|
||||
|
||||
free_paicrypt_map:
|
||||
/* Undo memory allocation */
|
||||
kfree(cpump);
|
||||
mp->mapptr = NULL;
|
||||
free_root:
|
||||
paicrypt_root_free();
|
||||
unlock:
|
||||
mutex_unlock(&pai_reserve_mutex);
|
||||
return rc ? ERR_PTR(rc) : cpump;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int paicrypt_event_init_all(struct perf_event *event)
|
||||
static int paicrypt_alloc(struct perf_event *event)
|
||||
{
|
||||
struct paicrypt_map *cpump;
|
||||
struct cpumask *maskptr;
|
||||
int cpu, rc = -ENOMEM;
|
||||
|
||||
@@ -252,12 +239,11 @@ static int paicrypt_event_init_all(struct perf_event *event)
|
||||
goto out;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
cpump = paicrypt_busy(event, cpu);
|
||||
if (IS_ERR(cpump)) {
|
||||
rc = paicrypt_alloc_cpu(event, cpu);
|
||||
if (rc) {
|
||||
for_each_cpu(cpu, maskptr)
|
||||
paicrypt_event_destroy_cpu(event, cpu);
|
||||
kfree(maskptr);
|
||||
rc = PTR_ERR(cpump);
|
||||
goto out;
|
||||
}
|
||||
cpumask_set_cpu(cpu, maskptr);
|
||||
@@ -279,7 +265,6 @@ out:
|
||||
static int paicrypt_event_init(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_attr *a = &event->attr;
|
||||
struct paicrypt_map *cpump;
|
||||
int rc = 0;
|
||||
|
||||
/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
|
||||
@@ -301,13 +286,10 @@ static int paicrypt_event_init(struct perf_event *event)
|
||||
}
|
||||
}
|
||||
|
||||
if (event->cpu >= 0) {
|
||||
cpump = paicrypt_busy(event, event->cpu);
|
||||
if (IS_ERR(cpump))
|
||||
rc = PTR_ERR(cpump);
|
||||
} else {
|
||||
rc = paicrypt_event_init_all(event);
|
||||
}
|
||||
if (event->cpu >= 0)
|
||||
rc = paicrypt_alloc_cpu(event, event->cpu);
|
||||
else
|
||||
rc = paicrypt_alloc(event);
|
||||
if (rc) {
|
||||
free_page(PAI_SAVE_AREA(event));
|
||||
goto out;
|
||||
|
||||
@@ -839,7 +839,7 @@ static void __init setup_control_program_code(void)
|
||||
return;
|
||||
|
||||
diag_stat_inc(DIAG_STAT_X318);
|
||||
asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
|
||||
asm volatile("diag %0,0,0x318" : : "d" (diag318_info.val));
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -11,7 +11,7 @@ static inline unsigned long load_real_address(unsigned long address)
|
||||
unsigned long real;
|
||||
|
||||
asm volatile(
|
||||
" lra %[real],0(%[address])\n"
|
||||
" lra %[real],0(%[address])"
|
||||
: [real] "=d" (real)
|
||||
: [address] "a" (address)
|
||||
: "cc");
|
||||
|
||||
@@ -340,7 +340,7 @@ static void pcpu_delegate(struct pcpu *pcpu, int cpu,
|
||||
"0: sigp 0,%0,%2 # sigp restart to target cpu\n"
|
||||
" brc 2,0b # busy, try again\n"
|
||||
"1: sigp 0,%1,%3 # sigp stop to current cpu\n"
|
||||
" brc 2,1b # busy, try again\n"
|
||||
" brc 2,1b # busy, try again"
|
||||
: : "d" (pcpu->address), "d" (source_cpu),
|
||||
"K" (SIGP_RESTART), "K" (SIGP_STOP)
|
||||
: "0", "1", "cc");
|
||||
|
||||
@@ -866,8 +866,8 @@ static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do the actual search for `uv_get_secret_metadata`.
|
||||
/**
|
||||
* uv_find_secret() - search secret metadata for a given secret id.
|
||||
* @secret_id: search pattern.
|
||||
* @list: ephemeral buffer space
|
||||
* @secret: output data, containing the secret's metadata.
|
||||
|
||||
@@ -356,7 +356,7 @@ static __always_inline void pfcr_query(u8 (*query)[16])
|
||||
{
|
||||
asm volatile(
|
||||
" lghi 0,0\n"
|
||||
" .insn rsy,0xeb0000000016,0,0,%[query]\n"
|
||||
" .insn rsy,0xeb0000000016,0,0,%[query]"
|
||||
: [query] "=QS" (*query)
|
||||
:
|
||||
: "cc", "0");
|
||||
@@ -368,7 +368,7 @@ static __always_inline void __sortl_query(u8 (*query)[32])
|
||||
" lghi 0,0\n"
|
||||
" la 1,%[query]\n"
|
||||
/* Parameter registers are ignored */
|
||||
" .insn rre,0xb9380000,2,4\n"
|
||||
" .insn rre,0xb9380000,2,4"
|
||||
: [query] "=R" (*query)
|
||||
:
|
||||
: "cc", "0", "1");
|
||||
@@ -380,7 +380,7 @@ static __always_inline void __dfltcc_query(u8 (*query)[32])
|
||||
" lghi 0,0\n"
|
||||
" la 1,%[query]\n"
|
||||
/* Parameter registers are ignored */
|
||||
" .insn rrf,0xb9390000,2,4,6,0\n"
|
||||
" .insn rrf,0xb9390000,2,4,6,0"
|
||||
: [query] "=R" (*query)
|
||||
:
|
||||
: "cc", "0", "1");
|
||||
|
||||
@@ -96,7 +96,7 @@ static inline int arch_load_niai4(int *lock)
|
||||
|
||||
asm_inline volatile(
|
||||
ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", ALT_FACILITY(49)) /* NIAI 4 */
|
||||
" l %[owner],%[lock]\n"
|
||||
" l %[owner],%[lock]"
|
||||
: [owner] "=d" (owner) : [lock] "R" (*lock) : "memory");
|
||||
return owner;
|
||||
}
|
||||
@@ -109,7 +109,7 @@ static inline int arch_try_cmpxchg_niai8(int *lock, int old, int new)
|
||||
|
||||
asm_inline volatile(
|
||||
ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */
|
||||
" cs %[old],%[new],%[lock]\n"
|
||||
" cs %[old],%[new],%[lock]"
|
||||
: [old] "+d" (old), [lock] "+Q" (*lock), "=@cc" (cc)
|
||||
: [new] "d" (new)
|
||||
: "memory");
|
||||
@@ -124,7 +124,7 @@ static inline int arch_try_cmpxchg_niai8(int *lock, int old, int new)
|
||||
|
||||
asm_inline volatile(
|
||||
ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */
|
||||
" cs %[old],%[new],%[lock]\n"
|
||||
" cs %[old],%[new],%[lock]"
|
||||
: [old] "+d" (old), [lock] "+Q" (*lock)
|
||||
: [new] "d" (new)
|
||||
: "cc", "memory");
|
||||
|
||||
@@ -27,7 +27,7 @@ static inline char *__strend(const char *s)
|
||||
asm volatile(
|
||||
" lghi 0,0\n"
|
||||
"0: srst %[e],%[s]\n"
|
||||
" jo 0b\n"
|
||||
" jo 0b"
|
||||
: [e] "+&a" (e), [s] "+&a" (s)
|
||||
:
|
||||
: "cc", "memory", "0");
|
||||
@@ -41,7 +41,7 @@ static inline char *__strnend(const char *s, size_t n)
|
||||
asm volatile(
|
||||
" lghi 0,0\n"
|
||||
"0: srst %[p],%[s]\n"
|
||||
" jo 0b\n"
|
||||
" jo 0b"
|
||||
: [p] "+&d" (p), [s] "+&a" (s)
|
||||
:
|
||||
: "cc", "memory", "0");
|
||||
@@ -95,7 +95,7 @@ char *strcat(char *dest, const char *src)
|
||||
"0: srst %[dummy],%[dest]\n"
|
||||
" jo 0b\n"
|
||||
"1: mvst %[dummy],%[src]\n"
|
||||
" jo 1b\n"
|
||||
" jo 1b"
|
||||
: [dummy] "+&a" (dummy), [dest] "+&a" (dest), [src] "+&a" (src)
|
||||
:
|
||||
: "cc", "memory", "0");
|
||||
@@ -291,7 +291,7 @@ void *memscan(void *s, int c, size_t n)
|
||||
asm volatile(
|
||||
" lgr 0,%[c]\n"
|
||||
"0: srst %[ret],%[s]\n"
|
||||
" jo 0b\n"
|
||||
" jo 0b"
|
||||
: [ret] "+&a" (ret), [s] "+&a" (s)
|
||||
: [c] "d" (c)
|
||||
: "cc", "memory", "0");
|
||||
|
||||
@@ -150,7 +150,7 @@ static __always_inline struct pt_regs fake_pt_regs(void)
|
||||
regs.gprs[15] = current_stack_pointer;
|
||||
|
||||
asm volatile(
|
||||
"basr %[psw_addr],0\n"
|
||||
"basr %[psw_addr],0"
|
||||
: [psw_addr] "=d" (regs.psw.addr));
|
||||
return regs;
|
||||
}
|
||||
@@ -232,7 +232,7 @@ static noinline void test_unwind_kprobed_func(void)
|
||||
asm volatile(
|
||||
" nopr %%r7\n"
|
||||
"test_unwind_kprobed_insn:\n"
|
||||
" nopr %%r7\n"
|
||||
" nopr %%r7"
|
||||
:);
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ static void xor_xc_2(unsigned long bytes, unsigned long * __restrict p1,
|
||||
"1: exrl %0,2f\n"
|
||||
" j 3f\n"
|
||||
"2: xc 0(1,%1),0(%2)\n"
|
||||
"3:\n"
|
||||
"3:"
|
||||
: : "d" (bytes), "a" (p1), "a" (p2)
|
||||
: "0", "cc", "memory");
|
||||
}
|
||||
@@ -53,7 +53,7 @@ static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1,
|
||||
" j 4f\n"
|
||||
"2: xc 0(1,%1),0(%2)\n"
|
||||
"3: xc 0(1,%1),0(%3)\n"
|
||||
"4:\n"
|
||||
"4:"
|
||||
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3)
|
||||
: : "0", "cc", "memory");
|
||||
}
|
||||
@@ -84,7 +84,7 @@ static void xor_xc_4(unsigned long bytes, unsigned long * __restrict p1,
|
||||
"2: xc 0(1,%1),0(%2)\n"
|
||||
"3: xc 0(1,%1),0(%3)\n"
|
||||
"4: xc 0(1,%1),0(%4)\n"
|
||||
"5:\n"
|
||||
"5:"
|
||||
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4)
|
||||
: : "0", "cc", "memory");
|
||||
}
|
||||
@@ -121,7 +121,7 @@ static void xor_xc_5(unsigned long bytes, unsigned long * __restrict p1,
|
||||
"3: xc 0(1,%1),0(%3)\n"
|
||||
"4: xc 0(1,%1),0(%4)\n"
|
||||
"5: xc 0(1,%1),0(%5)\n"
|
||||
"6:\n"
|
||||
"6:"
|
||||
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4),
|
||||
"+a" (p5)
|
||||
: : "0", "cc", "memory");
|
||||
|
||||
@@ -41,7 +41,7 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz
|
||||
" ex %1,0(1)\n"
|
||||
" lg %1,0(%3)\n"
|
||||
" lra %0,0(%0)\n"
|
||||
" sturg %1,%0\n"
|
||||
" sturg %1,%0"
|
||||
: "+&a" (aligned), "+&a" (count), "=m" (tmp)
|
||||
: "a" (&tmp), "a" (&tmp[offset]), "a" (src)
|
||||
: "cc", "memory", "1");
|
||||
|
||||
@@ -245,7 +245,7 @@ static inline unsigned long base_lra(unsigned long address)
|
||||
unsigned long real;
|
||||
|
||||
asm volatile(
|
||||
" lra %0,0(%1)\n"
|
||||
" lra %0,0(%1)"
|
||||
: "=d" (real) : "a" (address) : "cc");
|
||||
return real;
|
||||
}
|
||||
|
||||
@@ -1188,6 +1188,10 @@ static int __init pci_base_init(void)
|
||||
if (rc)
|
||||
goto out_find;
|
||||
|
||||
rc = zpci_fw_sysfs_init();
|
||||
if (rc)
|
||||
goto out_find;
|
||||
|
||||
s390_pci_initialized = 1;
|
||||
return 0;
|
||||
|
||||
|
||||
@@ -145,7 +145,7 @@ int zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
|
||||
return -EIO;
|
||||
|
||||
asm volatile(
|
||||
".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n"
|
||||
".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]"
|
||||
: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib));
|
||||
|
||||
return 0;
|
||||
@@ -442,7 +442,7 @@ EXPORT_SYMBOL_GPL(zpci_write_block);
|
||||
|
||||
static inline void __pciwb_mio(void)
|
||||
{
|
||||
asm volatile (".insn rre,0xb9d50000,0,0\n");
|
||||
asm volatile (".insn rre,0xb9d50000,0,0");
|
||||
}
|
||||
|
||||
void zpci_barrier(void)
|
||||
|
||||
@@ -41,6 +41,9 @@ zpci_attr(segment1, "0x%02x\n", pfip[1]);
|
||||
zpci_attr(segment2, "0x%02x\n", pfip[2]);
|
||||
zpci_attr(segment3, "0x%02x\n", pfip[3]);
|
||||
|
||||
#define ZPCI_FW_ATTR_RO(_name) \
|
||||
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
|
||||
|
||||
static ssize_t mio_enabled_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
@@ -164,6 +167,13 @@ static ssize_t uid_is_unique_show(struct device *dev,
|
||||
}
|
||||
static DEVICE_ATTR_RO(uid_is_unique);
|
||||
|
||||
static ssize_t uid_checking_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%d\n", zpci_unique_uid ? 1 : 0);
|
||||
}
|
||||
ZPCI_FW_ATTR_RO(uid_checking);
|
||||
|
||||
/* analogous to smbios index */
|
||||
static ssize_t index_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
@@ -233,3 +243,18 @@ const struct attribute_group pfip_attr_group = {
|
||||
.name = "pfip",
|
||||
.attrs = pfip_attrs,
|
||||
};
|
||||
|
||||
static struct attribute *clp_fw_attrs[] = {
|
||||
&uid_checking_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group clp_fw_attr_group = {
|
||||
.name = "clp",
|
||||
.attrs = clp_fw_attrs,
|
||||
};
|
||||
|
||||
int __init __zpci_fw_sysfs_init(void)
|
||||
{
|
||||
return sysfs_create_group(firmware_kobj, &clp_fw_attr_group);
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ void sclp_early_wait_irq(void)
|
||||
" stg %[addr],%[psw_wait_addr]\n"
|
||||
" stg %[addr],%[psw_ext_addr]\n"
|
||||
" lpswe %[psw_wait]\n"
|
||||
"0:\n"
|
||||
"0:"
|
||||
: [addr] "=&d" (addr),
|
||||
[psw_wait_addr] "=Q" (psw_wait.addr),
|
||||
[psw_ext_addr] "=Q" (get_lowcore()->external_new_psw.addr)
|
||||
|
||||
@@ -167,7 +167,7 @@ static inline void cmf_activate(void *area, unsigned int onoff)
|
||||
asm volatile(
|
||||
" lgr 1,%[r1]\n"
|
||||
" lgr 2,%[mbo]\n"
|
||||
" schm\n"
|
||||
" schm"
|
||||
:
|
||||
: [r1] "d" ((unsigned long)onoff),
|
||||
[mbo] "d" (virt_to_phys(area))
|
||||
|
||||
@@ -1316,23 +1316,34 @@ void ccw_device_schedule_recovery(void)
|
||||
spin_unlock_irqrestore(&recovery_lock, flags);
|
||||
}
|
||||
|
||||
static int purge_fn(struct device *dev, void *data)
|
||||
static int purge_fn(struct subchannel *sch, void *data)
|
||||
{
|
||||
struct ccw_device *cdev = to_ccwdev(dev);
|
||||
struct ccw_dev_id *id = &cdev->private->dev_id;
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_device *cdev;
|
||||
|
||||
spin_lock_irq(cdev->ccwlock);
|
||||
if (is_blacklisted(id->ssid, id->devno) &&
|
||||
(cdev->private->state == DEV_STATE_OFFLINE) &&
|
||||
(atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
|
||||
CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
|
||||
id->devno);
|
||||
spin_lock_irq(&sch->lock);
|
||||
if (sch->st != SUBCHANNEL_TYPE_IO || !sch->schib.pmcw.dnv)
|
||||
goto unlock;
|
||||
|
||||
if (!is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev))
|
||||
goto unlock;
|
||||
|
||||
cdev = sch_get_cdev(sch);
|
||||
if (cdev) {
|
||||
if (cdev->private->state != DEV_STATE_OFFLINE)
|
||||
goto unlock;
|
||||
|
||||
if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
|
||||
goto unlock;
|
||||
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
|
||||
css_sched_sch_todo(sch, SCH_TODO_UNREG);
|
||||
atomic_set(&cdev->private->onoff, 0);
|
||||
}
|
||||
spin_unlock_irq(cdev->ccwlock);
|
||||
|
||||
css_sched_sch_todo(sch, SCH_TODO_UNREG);
|
||||
CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x%s\n", sch->schid.ssid,
|
||||
sch->schib.pmcw.dev, cdev ? "" : " (no cdev)");
|
||||
|
||||
unlock:
|
||||
spin_unlock_irq(&sch->lock);
|
||||
/* Abort loop in case of pending signal. */
|
||||
if (signal_pending(current))
|
||||
return -EINTR;
|
||||
@@ -1348,7 +1359,7 @@ static int purge_fn(struct device *dev, void *data)
|
||||
int ccw_purge_blacklisted(void)
|
||||
{
|
||||
CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
|
||||
bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
|
||||
for_each_subchannel_staged(purge_fn, NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -253,11 +253,10 @@ static inline int __xsch(struct subchannel_id schid)
|
||||
asm volatile(
|
||||
" lgr 1,%[r1]\n"
|
||||
" xsch\n"
|
||||
" ipm %[cc]\n"
|
||||
" srl %[cc],28\n"
|
||||
: [cc] "=&d" (ccode)
|
||||
CC_IPM(cc)
|
||||
: CC_OUT(cc, ccode)
|
||||
: [r1] "d" (r1)
|
||||
: "cc", "1");
|
||||
: CC_CLOBBER_LIST("1"));
|
||||
return CC_TRANSFORM(ccode);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user