aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-12 00:34:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-12 00:34:19 -0500
commit66a173b926891023e34e78cb32f4681d19777e01 (patch)
treee6018f50fbceea7c07e6e27368ee817f9adb34f2 /arch/powerpc/net
parent11db81a59d0b2e563e30512cd76f23d0db384780 (diff)
parent0c4888ef1d8a8b82c29075ce7e257ff795af15c7 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Benjamin Herrenschmidt: "The bulk of this is LE updates. One should now be able to build an LE kernel and even run some things in it. I'm still sitting on a handful of patches to enable the new ABI that I *might* still send this merge window around, but due to the incertainty (they are pretty fresh) I want to keep them separate. Other notable changes are some infrastructure bits to better handle PCI pass-through under KVM, some bits and pieces added to the new PowerNV platform support such as access to the CPU SCOM bus via sysfs, and support for EEH error handling on PHB3 (Power8 PCIe). We also grew arch_get_random_long() for both pseries and powernv when running on P7+ and P8, exploiting the HW rng. And finally various embedded updates from freescale" * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (154 commits) powerpc: Fix fatal SLB miss when restoring PPR powerpc/powernv: Reserve the correct PE number powerpc/powernv: Add PE to its own PELTV powerpc/powernv: Add support for indirect XSCOM via debugfs powerpc/scom: Improve debugfs interface powerpc/scom: Enable 64-bit addresses powerpc/boot: Properly handle the base "of" boot wrapper powerpc/bpf: Support MOD operation powerpc/bpf: Fix DIVWU instruction opcode of: Move definition of of_find_next_cache_node into common code. powerpc: Remove big endianness assumption in of_find_next_cache_node powerpc/tm: Remove interrupt disable in __switch_to() powerpc: word-at-a-time optimization for 64-bit Little Endian powerpc/bpf: BPF JIT compiler for 64-bit Little Endian powerpc: Only save/restore SDR1 if in hypervisor mode powerpc/pmu: Fix ADB_PMU_LED_IDE dependencies powerpc/nvram: Fix endian issue when using the partition length powerpc/nvram: Fix endian issue when reading the NVRAM size powerpc/nvram: Scan partitions only once powerpc/mpc512x: remove unnecessary #if ...
Diffstat (limited to 'arch/powerpc/net')
-rw-r--r--arch/powerpc/net/bpf_jit.h11
-rw-r--r--arch/powerpc/net/bpf_jit_64.S9
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c37
3 files changed, 41 insertions, 16 deletions
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 8a5dfaf5c6b7..9aee27c582dc 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -39,6 +39,7 @@
39#define r_X 5 39#define r_X 5
40#define r_addr 6 40#define r_addr 6
41#define r_scratch1 7 41#define r_scratch1 7
42#define r_scratch2 8
42#define r_D 14 43#define r_D 14
43#define r_HL 15 44#define r_HL 15
44#define r_M 16 45#define r_M 16
@@ -92,6 +93,8 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
92 ___PPC_RA(base) | IMM_L(i)) 93 ___PPC_RA(base) | IMM_L(i))
93#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \ 94#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \
94 ___PPC_RA(base) | IMM_L(i)) 95 ___PPC_RA(base) | IMM_L(i))
96#define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \
97 ___PPC_RA(base) | ___PPC_RB(b))
95/* Convenience helpers for the above with 'far' offsets: */ 98/* Convenience helpers for the above with 'far' offsets: */
96#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \ 99#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
97 else { PPC_ADDIS(r, base, IMM_HA(i)); \ 100 else { PPC_ADDIS(r, base, IMM_HA(i)); \
@@ -186,6 +189,14 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
186 PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \ 189 PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \
187 } } while (0); 190 } } while (0);
188 191
192#define PPC_LHBRX_OFFS(r, base, i) \
193 do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0)
194#ifdef __LITTLE_ENDIAN__
195#define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i)
196#else
197#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
198#endif
199
189static inline bool is_nearbranch(int offset) 200static inline bool is_nearbranch(int offset)
190{ 201{
191 return (offset < 32768) && (offset >= -32768); 202 return (offset < 32768) && (offset >= -32768);
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
index 7d3a3b5619a2..e76eba74d9da 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -43,8 +43,11 @@ sk_load_word_positive_offset:
43 cmpd r_scratch1, r_addr 43 cmpd r_scratch1, r_addr
44 blt bpf_slow_path_word 44 blt bpf_slow_path_word
45 /* Nope, just hitting the header. cr0 here is eq or gt! */ 45 /* Nope, just hitting the header. cr0 here is eq or gt! */
46#ifdef __LITTLE_ENDIAN__
47 lwbrx r_A, r_D, r_addr
48#else
46 lwzx r_A, r_D, r_addr 49 lwzx r_A, r_D, r_addr
47 /* When big endian we don't need to byteswap. */ 50#endif
48 blr /* Return success, cr0 != LT */ 51 blr /* Return success, cr0 != LT */
49 52
50 .globl sk_load_half 53 .globl sk_load_half
@@ -56,7 +59,11 @@ sk_load_half_positive_offset:
56 subi r_scratch1, r_HL, 2 59 subi r_scratch1, r_HL, 2
57 cmpd r_scratch1, r_addr 60 cmpd r_scratch1, r_addr
58 blt bpf_slow_path_half 61 blt bpf_slow_path_half
62#ifdef __LITTLE_ENDIAN__
63 lhbrx r_A, r_D, r_addr
64#else
59 lhzx r_A, r_D, r_addr 65 lhzx r_A, r_D, r_addr
66#endif
60 blr 67 blr
61 68
62 .globl sk_load_byte 69 .globl sk_load_byte
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 2345bdb4d917..ac3c2a10dafd 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -17,14 +17,8 @@
17 17
18#include "bpf_jit.h" 18#include "bpf_jit.h"
19 19
20#ifndef __BIG_ENDIAN
21/* There are endianness assumptions herein. */
22#error "Little-endian PPC not supported in BPF compiler"
23#endif
24
25int bpf_jit_enable __read_mostly; 20int bpf_jit_enable __read_mostly;
26 21
27
28static inline void bpf_flush_icache(void *start, void *end) 22static inline void bpf_flush_icache(void *start, void *end)
29{ 23{
30 smp_wmb(); 24 smp_wmb();
@@ -193,6 +187,26 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
193 PPC_MUL(r_A, r_A, r_scratch1); 187 PPC_MUL(r_A, r_A, r_scratch1);
194 } 188 }
195 break; 189 break;
190 case BPF_S_ALU_MOD_X: /* A %= X; */
191 ctx->seen |= SEEN_XREG;
192 PPC_CMPWI(r_X, 0);
193 if (ctx->pc_ret0 != -1) {
194 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
195 } else {
196 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
197 PPC_LI(r_ret, 0);
198 PPC_JMP(exit_addr);
199 }
200 PPC_DIVWU(r_scratch1, r_A, r_X);
201 PPC_MUL(r_scratch1, r_X, r_scratch1);
202 PPC_SUB(r_A, r_A, r_scratch1);
203 break;
204 case BPF_S_ALU_MOD_K: /* A %= K; */
205 PPC_LI32(r_scratch2, K);
206 PPC_DIVWU(r_scratch1, r_A, r_scratch2);
207 PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
208 PPC_SUB(r_A, r_A, r_scratch1);
209 break;
196 case BPF_S_ALU_DIV_X: /* A /= X; */ 210 case BPF_S_ALU_DIV_X: /* A /= X; */
197 ctx->seen |= SEEN_XREG; 211 ctx->seen |= SEEN_XREG;
198 PPC_CMPWI(r_X, 0); 212 PPC_CMPWI(r_X, 0);
@@ -346,18 +360,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
346 break; 360 break;
347 361
348 /*** Ancillary info loads ***/ 362 /*** Ancillary info loads ***/
349
350 /* None of the BPF_S_ANC* codes appear to be passed by
351 * sk_chk_filter(). The interpreter and the x86 BPF
352 * compiler implement them so we do too -- they may be
353 * planted in future.
354 */
355 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ 363 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
356 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 364 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
357 protocol) != 2); 365 protocol) != 2);
358 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 366 PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
359 protocol)); 367 protocol));
360 /* ntohs is a NOP with BE loads. */
361 break; 368 break;
362 case BPF_S_ANC_IFINDEX: 369 case BPF_S_ANC_IFINDEX:
363 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, 370 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,