diff options
-rw-r--r-- | arch/powerpc/include/asm/ppc-opcode.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/prom.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/word-at-a-time.h | 57 | ||||
-rw-r--r-- | arch/powerpc/kernel/nvram_64.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 7 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 31 | ||||
-rw-r--r-- | arch/powerpc/kernel/swsusp_asm64.S | 4 | ||||
-rw-r--r-- | arch/powerpc/net/bpf_jit.h | 11 | ||||
-rw-r--r-- | arch/powerpc/net/bpf_jit_64.S | 9 | ||||
-rw-r--r-- | arch/powerpc/net/bpf_jit_comp.c | 37 | ||||
-rw-r--r-- | arch/powerpc/platforms/chrp/nvram.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/nvram.c | 10 | ||||
-rw-r--r-- | drivers/macintosh/Kconfig | 1 | ||||
-rw-r--r-- | drivers/of/base.c | 31 | ||||
-rw-r--r-- | include/linux/of.h | 2 |
15 files changed, 130 insertions, 90 deletions
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 442edee4b6aa..3132bb9365f3 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -203,6 +203,7 @@ /* Misc instructions for BPF compiler */ #define PPC_INST_LD 0xe8000000 #define PPC_INST_LHZ 0xa0000000 +#define PPC_INST_LHBRX 0x7c00062c #define PPC_INST_LWZ 0x80000000 #define PPC_INST_STD 0xf8000000 #define PPC_INST_STDU 0xf8000001 @@ -221,7 +222,7 @@ #define PPC_INST_MULLW 0x7c0001d6 #define PPC_INST_MULHWU 0x7c000016 #define PPC_INST_MULLI 0x1c000000 -#define PPC_INST_DIVWU 0x7c0003d6 +#define PPC_INST_DIVWU 0x7c000396 #define PPC_INST_RLWINM 0x54000000 #define PPC_INST_RLDICR 0x78000004 #define PPC_INST_SLW 0x7c000030 diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 7d0c7f3a7171..bf09e5a065b8 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -44,9 +44,6 @@ void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window, extern void kdump_move_device_tree(void); -/* cache lookup */ -struct device_node *of_find_next_cache_node(struct device_node *np); - #ifdef CONFIG_NUMA extern int of_node_to_nid(struct device_node *device); #else diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index 213a5f2b0717..9a5c928bb3c6 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -42,13 +42,6 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct #else -/* - * This is largely generic for little-endian machines, but the - * optimal byte mask counting is probably going to be something - * that is architecture-specific. If you have a reliably fast - * bit count instruction, that might be better than the multiply - * and shift, for example. - */ struct word_at_a_time { const unsigned long one_bits, high_bits; }; @@ -57,19 +50,32 @@ struct word_at_a_time { #ifdef CONFIG_64BIT -/* - * Jan Achrenius on G+: microoptimized version of - * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" - * that works for the bytemasks without having to - * mask them first. - */ -static inline long count_masked_bytes(unsigned long mask) +/* Alan Modra's little-endian strlen tail for 64-bit */ +#define create_zero_mask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) { - return mask*0x0001020304050608ul >> 56; + unsigned long leading_zero_bits; + long trailing_zero_bit_mask; + + asm ("addi %1,%2,-1\n\t" + "andc %1,%1,%2\n\t" + "popcntd %0,%1" + : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask) + : "r" (mask)); + return leading_zero_bits >> 3; } #else /* 32-bit case */ +/* + * This is largely generic for little-endian machines, but the + * optimal byte mask counting is probably going to be something + * that is architecture-specific. If you have a reliably fast + * bit count instruction, that might be better than the multiply + * and shift, for example. + */ + /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ static inline long count_masked_bytes(long mask) { @@ -79,6 +85,17 @@ static inline long count_masked_bytes(long mask) return a & mask; } +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +static inline unsigned long find_zero(unsigned long mask) +{ + return count_masked_bytes(mask); +} + #endif /* Return nonzero if it has a zero */ @@ -94,19 +111,9 @@ static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, return bits; } -static inline unsigned long create_zero_mask(unsigned long bits) -{ - bits = (bits - 1) & ~bits; - return bits >> 7; -} - /* The mask we created is directly usable as a bytemask */ #define zero_bytemask(mask) (mask) -static inline unsigned long find_zero(unsigned long mask) -{ - return count_masked_bytes(mask); -} #endif #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 8213ee1eb05a..fd82c289ab1c 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -223,9 +223,13 @@ static int __init nvram_write_header(struct nvram_partition * part) { loff_t tmp_index; int rc; - + struct nvram_header phead; + + memcpy(&phead, &part->header, NVRAM_HEADER_LEN); + phead.length = cpu_to_be16(phead.length); + tmp_index = part->index; - rc = ppc_md.nvram_write((char *)&part->header, NVRAM_HEADER_LEN, &tmp_index); + rc = ppc_md.nvram_write((char *)&phead, NVRAM_HEADER_LEN, &tmp_index); return rc; } @@ -505,6 +509,8 @@ int __init nvram_scan_partitions(void) memcpy(&phead, header, NVRAM_HEADER_LEN); + phead.length = be16_to_cpu(phead.length); + err = 0; c_sum = nvram_checksum(&phead); if (c_sum != phead.checksum) { diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 4d42c4de8b9b..75c2d1009985 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -597,12 +597,13 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *new) { struct thread_struct *new_thread, *old_thread; - unsigned long flags; struct task_struct *last; #ifdef CONFIG_PPC_BOOK3S_64 struct ppc64_tlb_batch *batch; #endif + WARN_ON(!irqs_disabled()); + /* Back up the TAR across context switches. * Note that the TAR is not available for use in the kernel. (To * provide this, the TAR should be backed up/restored on exception @@ -722,8 +723,6 @@ struct task_struct *__switch_to(struct task_struct *prev, } #endif /* CONFIG_PPC_BOOK3S_64 */ - local_irq_save(flags); - /* * We can't take a PMU exception inside _switch() since there is a * window where the kernel stack SLB and the kernel stack are out @@ -743,8 +742,6 @@ struct task_struct *__switch_to(struct task_struct *prev, } #endif /* CONFIG_PPC_BOOK3S_64 */ - local_irq_restore(flags); - return last; } diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index b7634ce41dbc..4432fd86a6d2 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -761,37 +761,6 @@ void __init early_init_devtree(void *params) *******/ /** - * of_find_next_cache_node - Find a node's subsidiary cache - * @np: node of type "cpu" or "cache" - * - * Returns a node pointer with refcount incremented, use - * of_node_put() on it when done. Caller should hold a reference - * to np. - */ -struct device_node *of_find_next_cache_node(struct device_node *np) -{ - struct device_node *child; - const phandle *handle; - - handle = of_get_property(np, "l2-cache", NULL); - if (!handle) - handle = of_get_property(np, "next-level-cache", NULL); - - if (handle) - return of_find_node_by_phandle(*handle); - - /* OF on pmac has nodes instead of properties named "l2-cache" - * beneath CPU nodes. - */ - if (!strcmp(np->type, "cpu")) - for_each_child_of_node(np, child) - if (!strcmp(child->type, "cache")) - return child; - - return NULL; -} - -/** * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device * @np: device node of the device * diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S index 22045984835f..988f38dced0f 100644 --- a/arch/powerpc/kernel/swsusp_asm64.S +++ b/arch/powerpc/kernel/swsusp_asm64.S @@ -114,7 +114,9 @@ _GLOBAL(swsusp_arch_suspend) SAVE_SPECIAL(MSR) SAVE_SPECIAL(XER) #ifdef CONFIG_PPC_BOOK3S_64 +BEGIN_FW_FTR_SECTION SAVE_SPECIAL(SDR1) +END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR) #else SAVE_SPR(TCR) @@ -231,7 +233,9 @@ nothing_to_copy: /* can't use RESTORE_SPECIAL(MSR) */ ld r0, SL_MSR(r11) mtmsrd r0, 0 +BEGIN_FW_FTR_SECTION RESTORE_SPECIAL(SDR1) +END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR) #else /* Restore SPRG1, be used to save paca */ ld r0, SL_SPRG1(r11) diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 8a5dfaf5c6b7..9aee27c582dc 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -39,6 +39,7 @@ #define r_X 5 #define r_addr 6 #define r_scratch1 7 +#define r_scratch2 8 #define r_D 14 #define r_HL 15 #define r_M 16 @@ -92,6 +93,8 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); ___PPC_RA(base) | IMM_L(i)) #define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_L(i)) +#define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \ + ___PPC_RA(base) | ___PPC_RB(b)) /* Convenience helpers for the above with 'far' offsets: */ #define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \ else { PPC_ADDIS(r, base, IMM_HA(i)); \ @@ -186,6 +189,14 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \ } } while (0); +#define PPC_LHBRX_OFFS(r, base, i) \ + do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0) +#ifdef __LITTLE_ENDIAN__ +#define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i) +#else +#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i) +#endif + static inline bool is_nearbranch(int offset) { return (offset < 32768) && (offset >= -32768); diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S index 7d3a3b5619a2..e76eba74d9da 100644 --- a/arch/powerpc/net/bpf_jit_64.S +++ b/arch/powerpc/net/bpf_jit_64.S @@ -43,8 +43,11 @@ sk_load_word_positive_offset: cmpd r_scratch1, r_addr blt bpf_slow_path_word /* Nope, just hitting the header. cr0 here is eq or gt! */ +#ifdef __LITTLE_ENDIAN__ + lwbrx r_A, r_D, r_addr +#else lwzx r_A, r_D, r_addr - /* When big endian we don't need to byteswap. */ +#endif blr /* Return success, cr0 != LT */ .globl sk_load_half @@ -56,7 +59,11 @@ sk_load_half_positive_offset: subi r_scratch1, r_HL, 2 cmpd r_scratch1, r_addr blt bpf_slow_path_half +#ifdef __LITTLE_ENDIAN__ + lhbrx r_A, r_D, r_addr +#else lhzx r_A, r_D, r_addr +#endif blr .globl sk_load_byte diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index bf56e33f8257..475d4f26fc6e 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -17,14 +17,8 @@ #include "bpf_jit.h" -#ifndef __BIG_ENDIAN -/* There are endianness assumptions herein. */ -#error "Little-endian PPC not supported in BPF compiler" -#endif - int bpf_jit_enable __read_mostly; - static inline void bpf_flush_icache(void *start, void *end) { smp_wmb(); @@ -193,6 +187,26 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_MUL(r_A, r_A, r_scratch1); } break; + case BPF_S_ALU_MOD_X: /* A %= X; */ + ctx->seen |= SEEN_XREG; + PPC_CMPWI(r_X, 0); + if (ctx->pc_ret0 != -1) { + PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); + } else { + PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); + PPC_LI(r_ret, 0); + PPC_JMP(exit_addr); + } + PPC_DIVWU(r_scratch1, r_A, r_X); + PPC_MUL(r_scratch1, r_X, r_scratch1); + PPC_SUB(r_A, r_A, r_scratch1); + break; + case BPF_S_ALU_MOD_K: /* A %= K; */ + PPC_LI32(r_scratch2, K); + PPC_DIVWU(r_scratch1, r_A, r_scratch2); + PPC_MUL(r_scratch1, r_scratch2, r_scratch1); + PPC_SUB(r_A, r_A, r_scratch1); + break; case BPF_S_ALU_DIV_X: /* A /= X; */ ctx->seen |= SEEN_XREG; PPC_CMPWI(r_X, 0); @@ -346,18 +360,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, break; /*** Ancillary info loads ***/ - - /* None of the BPF_S_ANC* codes appear to be passed by - * sk_chk_filter(). The interpreter and the x86 BPF - * compiler implement them so we do too -- they may be - * planted in future. - */ case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); - PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, - protocol)); - /* ntohs is a NOP with BE loads. */ + PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff, + protocol)); break; case BPF_S_ANC_IFINDEX: PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c index d3ceff04ffc7..9ef8cc3378d0 100644 --- a/arch/powerpc/platforms/chrp/nvram.c +++ b/arch/powerpc/platforms/chrp/nvram.c @@ -66,7 +66,7 @@ static void chrp_nvram_write(int addr, unsigned char val) void __init chrp_nvram_init(void) { struct device_node *nvram; - const unsigned int *nbytes_p; + const __be32 *nbytes_p; unsigned int proplen; nvram = of_find_node_by_type(NULL, "nvram"); @@ -79,7 +79,7 @@ void __init chrp_nvram_init(void) return; } - nvram_size = *nbytes_p; + nvram_size = be32_to_cpup(nbytes_p); printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size); of_node_put(nvram); diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index d276cd3edd8f..057fc894be51 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -429,9 +429,6 @@ static int __init pseries_nvram_init_os_partition(struct nvram_os_partition loff_t p; int size; - /* Scan nvram for partitions */ - nvram_scan_partitions(); - /* Look for ours */ p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size); @@ -795,6 +792,9 @@ static int __init pseries_nvram_init_log_partitions(void) { int rc; + /* Scan nvram for partitions */ + nvram_scan_partitions(); + rc = pseries_nvram_init_os_partition(&rtas_log_partition); nvram_init_oops_partition(rc == 0); return 0; @@ -804,7 +804,7 @@ machine_arch_initcall(pseries, pseries_nvram_init_log_partitions); int __init pSeries_nvram_init(void) { struct device_node *nvram; - const unsigned int *nbytes_p; + const __be32 *nbytes_p; unsigned int proplen; nvram = of_find_node_by_type(NULL, "nvram"); @@ -817,7 +817,7 @@ int __init pSeries_nvram_init(void) return -EIO; } - nvram_size = *nbytes_p; + nvram_size = be32_to_cpup(nbytes_p); nvram_fetch = rtas_token("nvram-fetch"); nvram_store = rtas_token("nvram-store"); diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig index 696238b9f0f7..d26a312f117a 100644 --- a/drivers/macintosh/Kconfig +++ b/drivers/macintosh/Kconfig @@ -103,6 +103,7 @@ config ADB_PMU_LED_IDE bool "Use front LED as IDE LED by default" depends on ADB_PMU_LED depends on LEDS_CLASS + depends on IDE_GD_ATA select LEDS_TRIGGERS select LEDS_TRIGGER_IDE_DISK help diff --git a/drivers/of/base.c b/drivers/of/base.c index 865d3f66c86b..b2cee3db5ceb 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1884,3 +1884,34 @@ int of_device_is_stdout_path(struct device_node *dn) return of_stdout == dn; } EXPORT_SYMBOL_GPL(of_device_is_stdout_path); + +/** + * of_find_next_cache_node - Find a node's subsidiary cache + * @np: node of type "cpu" or "cache" + * + * Returns a node pointer with refcount incremented, use + * of_node_put() on it when done. Caller should hold a reference + * to np. + */ +struct device_node *of_find_next_cache_node(const struct device_node *np) +{ + struct device_node *child; + const phandle *handle; + + handle = of_get_property(np, "l2-cache", NULL); + if (!handle) + handle = of_get_property(np, "next-level-cache", NULL); + + if (handle) + return of_find_node_by_phandle(be32_to_cpup(handle)); + + /* OF on pmac has nodes instead of properties named "l2-cache" + * beneath CPU nodes. + */ + if (!strcmp(np->type, "cpu")) + for_each_child_of_node(np, child) + if (!strcmp(child->type, "cache")) + return child; + + return NULL; +} diff --git a/include/linux/of.h b/include/linux/of.h index f95aee391e30..c08c07e249b3 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -226,6 +226,8 @@ static inline int of_get_child_count(const struct device_node *np) return num; } +/* cache lookup */ +extern struct device_node *of_find_next_cache_node(const struct device_node *); extern struct device_node *of_find_node_with_property( struct device_node *from, const char *prop_name); #define for_each_node_with_property(dn, prop_name) \ |