diff options
241 files changed, 5291 insertions, 5555 deletions
diff --git a/Documentation/riscv/pmu.txt b/Documentation/riscv/pmu.txt new file mode 100644 index 000000000000..b29f03a6d82f --- /dev/null +++ b/Documentation/riscv/pmu.txt | |||
| @@ -0,0 +1,249 @@ | |||
| 1 | Supporting PMUs on RISC-V platforms | ||
| 2 | ========================================== | ||
| 3 | Alan Kao <alankao@andestech.com>, Mar 2018 | ||
| 4 | |||
| 5 | Introduction | ||
| 6 | ------------ | ||
| 7 | |||
| 8 | As of this writing, perf_event-related features mentioned in The RISC-V ISA | ||
| 9 | Privileged Version 1.10 are as follows: | ||
| 10 | (please check the manual for more details) | ||
| 11 | |||
| 12 | * [m|s]counteren | ||
| 13 | * mcycle[h], cycle[h] | ||
| 14 | * minstret[h], instret[h] | ||
| 15 | * mhpeventx, mhpcounterx[h] | ||
| 16 | |||
| 17 | With such function set only, porting perf would require a lot of work, due to | ||
| 18 | the lack of the following general architectural performance monitoring features: | ||
| 19 | |||
| 20 | * Enabling/Disabling counters | ||
| 21 | Counters are just free-running all the time in our case. | ||
| 22 | * Interrupt caused by counter overflow | ||
| 23 | No such feature in the spec. | ||
| 24 | * Interrupt indicator | ||
| 25 | It is not possible to have many interrupt ports for all counters, so an | ||
| 26 | interrupt indicator is required for software to tell which counter has | ||
| 27 | just overflowed. | ||
| 28 | * Writing to counters | ||
| 29 | There will be an SBI to support this since the kernel cannot modify the | ||
| 30 | counters [1]. Alternatively, some vendor considers to implement | ||
| 31 | hardware-extension for M-S-U model machines to write counters directly. | ||
| 32 | |||
| 33 | This document aims to provide developers a quick guide on supporting their | ||
| 34 | PMUs in the kernel. The following sections briefly explain perf' mechanism | ||
| 35 | and todos. | ||
| 36 | |||
| 37 | You may check previous discussions here [1][2]. Also, it might be helpful | ||
| 38 | to check the appendix for related kernel structures. | ||
| 39 | |||
| 40 | |||
| 41 | 1. Initialization | ||
| 42 | ----------------- | ||
| 43 | |||
| 44 | *riscv_pmu* is a global pointer of type *struct riscv_pmu*, which contains | ||
| 45 | various methods according to perf's internal convention and PMU-specific | ||
| 46 | parameters. One should declare such instance to represent the PMU. By default, | ||
| 47 | *riscv_pmu* points to a constant structure *riscv_base_pmu*, which has very | ||
| 48 | basic support to a baseline QEMU model. | ||
| 49 | |||
| 50 | Then he/she can either assign the instance's pointer to *riscv_pmu* so that | ||
| 51 | the minimal and already-implemented logic can be leveraged, or invent his/her | ||
| 52 | own *riscv_init_platform_pmu* implementation. | ||
| 53 | |||
| 54 | In other words, existing sources of *riscv_base_pmu* merely provide a | ||
| 55 | reference implementation. Developers can flexibly decide how many parts they | ||
| 56 | can leverage, and in the most extreme case, they can customize every function | ||
| 57 | according to their needs. | ||
| 58 | |||
| 59 | |||
| 60 | 2. Event Initialization | ||
| 61 | ----------------------- | ||
| 62 | |||
| 63 | When a user launches a perf command to monitor some events, it is first | ||
| 64 | interpreted by the userspace perf tool into multiple *perf_event_open* | ||
| 65 | system calls, and then each of them calls to the body of *event_init* | ||
| 66 | member function that was assigned in the previous step. In *riscv_base_pmu*'s | ||
| 67 | case, it is *riscv_event_init*. | ||
| 68 | |||
| 69 | The main purpose of this function is to translate the event provided by user | ||
| 70 | into bitmap, so that HW-related control registers or counters can directly be | ||
| 71 | manipulated. The translation is based on the mappings and methods provided in | ||
| 72 | *riscv_pmu*. | ||
| 73 | |||
| 74 | Note that some features can be done in this stage as well: | ||
| 75 | |||
| 76 | (1) interrupt setting, which is stated in the next section; | ||
| 77 | (2) privilege level setting (user space only, kernel space only, both); | ||
| 78 | (3) destructor setting. Normally it is sufficient to apply *riscv_destroy_event*; | ||
| 79 | (4) tweaks for non-sampling events, which will be utilized by functions such as | ||
| 80 | *perf_adjust_period*, usually something like the follows: | ||
| 81 | |||
| 82 | if (!is_sampling_event(event)) { | ||
| 83 | hwc->sample_period = x86_pmu.max_period; | ||
| 84 | hwc->last_period = hwc->sample_period; | ||
| 85 | local64_set(&hwc->period_left, hwc->sample_period); | ||
| 86 | } | ||
| 87 | |||
| 88 | In the case of *riscv_base_pmu*, only (3) is provided for now. | ||
| 89 | |||
| 90 | |||
| 91 | 3. Interrupt | ||
| 92 | ------------ | ||
| 93 | |||
| 94 | 3.1. Interrupt Initialization | ||
| 95 | |||
| 96 | This often occurs at the beginning of the *event_init* method. In common | ||
| 97 | practice, this should be a code segment like | ||
| 98 | |||
| 99 | int x86_reserve_hardware(void) | ||
| 100 | { | ||
| 101 | int err = 0; | ||
| 102 | |||
| 103 | if (!atomic_inc_not_zero(&pmc_refcount)) { | ||
| 104 | mutex_lock(&pmc_reserve_mutex); | ||
| 105 | if (atomic_read(&pmc_refcount) == 0) { | ||
| 106 | if (!reserve_pmc_hardware()) | ||
| 107 | err = -EBUSY; | ||
| 108 | else | ||
| 109 | reserve_ds_buffers(); | ||
| 110 | } | ||
| 111 | if (!err) | ||
| 112 | atomic_inc(&pmc_refcount); | ||
| 113 | mutex_unlock(&pmc_reserve_mutex); | ||
| 114 | } | ||
| 115 | |||
| 116 | return err; | ||
| 117 | } | ||
| 118 | |||
| 119 | And the magic is in *reserve_pmc_hardware*, which usually does atomic | ||
| 120 | operations to make implemented IRQ accessible from some global function pointer. | ||
| 121 | *release_pmc_hardware* serves the opposite purpose, and it is used in event | ||
| 122 | destructors mentioned in previous section. | ||
| 123 | |||
| 124 | (Note: From the implementations in all the architectures, the *reserve/release* | ||
| 125 | pair are always IRQ settings, so the *pmc_hardware* seems somehow misleading. | ||
| 126 | It does NOT deal with the binding between an event and a physical counter, | ||
| 127 | which will be introduced in the next section.) | ||
| 128 | |||
| 129 | 3.2. IRQ Structure | ||
| 130 | |||
| 131 | Basically, a IRQ runs the following pseudo code: | ||
| 132 | |||
| 133 | for each hardware counter that triggered this overflow | ||
| 134 | |||
| 135 | get the event of this counter | ||
| 136 | |||
| 137 | // following two steps are defined as *read()*, | ||
| 138 | // check the section Reading/Writing Counters for details. | ||
| 139 | count the delta value since previous interrupt | ||
| 140 | update the event->count (# event occurs) by adding delta, and | ||
| 141 | event->hw.period_left by subtracting delta | ||
| 142 | |||
| 143 | if the event overflows | ||
| 144 | sample data | ||
| 145 | set the counter appropriately for the next overflow | ||
| 146 | |||
| 147 | if the event overflows again | ||
| 148 | too frequently, throttle this event | ||
| 149 | fi | ||
| 150 | fi | ||
| 151 | |||
| 152 | end for | ||
| 153 | |||
| 154 | However as of this writing, none of the RISC-V implementations have designed an | ||
| 155 | interrupt for perf, so the details are to be completed in the future. | ||
| 156 | |||
| 157 | 4. Reading/Writing Counters | ||
| 158 | --------------------------- | ||
| 159 | |||
| 160 | They seem symmetric but perf treats them quite differently. For reading, there | ||
| 161 | is a *read* interface in *struct pmu*, but it serves more than just reading. | ||
| 162 | According to the context, the *read* function not only reads the content of the | ||
| 163 | counter (event->count), but also updates the left period to the next interrupt | ||
| 164 | (event->hw.period_left). | ||
| 165 | |||
| 166 | But the core of perf does not need direct write to counters. Writing counters | ||
| 167 | is hidden behind the abstraction of 1) *pmu->start*, literally start counting so one | ||
| 168 | has to set the counter to a good value for the next interrupt; 2) inside the IRQ | ||
| 169 | it should set the counter to the same resonable value. | ||
| 170 | |||
| 171 | Reading is not a problem in RISC-V but writing would need some effort, since | ||
| 172 | counters are not allowed to be written by S-mode. | ||
| 173 | |||
| 174 | |||
| 175 | 5. add()/del()/start()/stop() | ||
| 176 | ----------------------------- | ||
| 177 | |||
| 178 | Basic idea: add()/del() adds/deletes events to/from a PMU, and start()/stop() | ||
| 179 | starts/stop the counter of some event in the PMU. All of them take the same | ||
| 180 | arguments: *struct perf_event *event* and *int flag*. | ||
| 181 | |||
| 182 | Consider perf as a state machine, then you will find that these functions serve | ||
| 183 | as the state transition process between those states. | ||
| 184 | Three states (event->hw.state) are defined: | ||
| 185 | |||
| 186 | * PERF_HES_STOPPED: the counter is stopped | ||
| 187 | * PERF_HES_UPTODATE: the event->count is up-to-date | ||
| 188 | * PERF_HES_ARCH: arch-dependent usage ... we don't need this for now | ||
| 189 | |||
| 190 | A normal flow of these state transitions are as follows: | ||
| 191 | |||
| 192 | * A user launches a perf event, resulting in calling to *event_init*. | ||
| 193 | * When being context-switched in, *add* is called by the perf core, with a flag | ||
| 194 | PERF_EF_START, which means that the event should be started after it is added. | ||
| 195 | At this stage, a general event is bound to a physical counter, if any. | ||
| 196 | The state changes to PERF_HES_STOPPED and PERF_HES_UPTODATE, because it is now | ||
| 197 | stopped, and the (software) event count does not need updating. | ||
| 198 | ** *start* is then called, and the counter is enabled. | ||
| 199 | With flag PERF_EF_RELOAD, it writes an appropriate value to the counter (check | ||
| 200 | previous section for detail). | ||
| 201 | Nothing is written if the flag does not contain PERF_EF_RELOAD. | ||
| 202 | The state now is reset to none, because it is neither stopped nor updated | ||
| 203 | (the counting already started) | ||
| 204 | * When being context-switched out, *del* is called. It then checks out all the | ||
| 205 | events in the PMU and calls *stop* to update their counts. | ||
| 206 | ** *stop* is called by *del* | ||
| 207 | and the perf core with flag PERF_EF_UPDATE, and it often shares the same | ||
| 208 | subroutine as *read* with the same logic. | ||
| 209 | The state changes to PERF_HES_STOPPED and PERF_HES_UPTODATE, again. | ||
| 210 | |||
| 211 | ** Life cycle of these two pairs: *add* and *del* are called repeatedly as | ||
| 212 | tasks switch in-and-out; *start* and *stop* is also called when the perf core | ||
| 213 | needs a quick stop-and-start, for instance, when the interrupt period is being | ||
| 214 | adjusted. | ||
| 215 | |||
| 216 | Current implementation is sufficient for now and can be easily extended to | ||
| 217 | features in the future. | ||
| 218 | |||
| 219 | A. Related Structures | ||
| 220 | --------------------- | ||
| 221 | |||
| 222 | * struct pmu: include/linux/perf_event.h | ||
| 223 | * struct riscv_pmu: arch/riscv/include/asm/perf_event.h | ||
| 224 | |||
| 225 | Both structures are designed to be read-only. | ||
| 226 | |||
| 227 | *struct pmu* defines some function pointer interfaces, and most of them take | ||
| 228 | *struct perf_event* as a main argument, dealing with perf events according to | ||
| 229 | perf's internal state machine (check kernel/events/core.c for details). | ||
| 230 | |||
| 231 | *struct riscv_pmu* defines PMU-specific parameters. The naming follows the | ||
| 232 | convention of all other architectures. | ||
| 233 | |||
| 234 | * struct perf_event: include/linux/perf_event.h | ||
| 235 | * struct hw_perf_event | ||
| 236 | |||
| 237 | The generic structure that represents perf events, and the hardware-related | ||
| 238 | details. | ||
| 239 | |||
| 240 | * struct riscv_hw_events: arch/riscv/include/asm/perf_event.h | ||
| 241 | |||
| 242 | The structure that holds the status of events, has two fixed members: | ||
| 243 | the number of events and the array of the events. | ||
| 244 | |||
| 245 | References | ||
| 246 | ---------- | ||
| 247 | |||
| 248 | [1] https://github.com/riscv/riscv-linux/pull/124 | ||
| 249 | [2] https://groups.google.com/a/groups.riscv.org/forum/#!topic/sw-dev/f19TmCNP6yA | ||
diff --git a/MAINTAINERS b/MAINTAINERS index fd3fc63f2759..9d5eeff51b5f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -10273,18 +10273,16 @@ F: arch/arm/boot/dts/*am5* | |||
| 10273 | F: arch/arm/boot/dts/*dra7* | 10273 | F: arch/arm/boot/dts/*dra7* |
| 10274 | 10274 | ||
| 10275 | OMAP DISPLAY SUBSYSTEM and FRAMEBUFFER SUPPORT (DSS2) | 10275 | OMAP DISPLAY SUBSYSTEM and FRAMEBUFFER SUPPORT (DSS2) |
| 10276 | M: Tomi Valkeinen <tomi.valkeinen@ti.com> | ||
| 10277 | L: linux-omap@vger.kernel.org | 10276 | L: linux-omap@vger.kernel.org |
| 10278 | L: linux-fbdev@vger.kernel.org | 10277 | L: linux-fbdev@vger.kernel.org |
| 10279 | S: Maintained | 10278 | S: Orphan |
| 10280 | F: drivers/video/fbdev/omap2/ | 10279 | F: drivers/video/fbdev/omap2/ |
| 10281 | F: Documentation/arm/OMAP/DSS | 10280 | F: Documentation/arm/OMAP/DSS |
| 10282 | 10281 | ||
| 10283 | OMAP FRAMEBUFFER SUPPORT | 10282 | OMAP FRAMEBUFFER SUPPORT |
| 10284 | M: Tomi Valkeinen <tomi.valkeinen@ti.com> | ||
| 10285 | L: linux-fbdev@vger.kernel.org | 10283 | L: linux-fbdev@vger.kernel.org |
| 10286 | L: linux-omap@vger.kernel.org | 10284 | L: linux-omap@vger.kernel.org |
| 10287 | S: Maintained | 10285 | S: Orphan |
| 10288 | F: drivers/video/fbdev/omap/ | 10286 | F: drivers/video/fbdev/omap/ |
| 10289 | 10287 | ||
| 10290 | OMAP GENERAL PURPOSE MEMORY CONTROLLER SUPPORT | 10288 | OMAP GENERAL PURPOSE MEMORY CONTROLLER SUPPORT |
| @@ -12179,7 +12177,7 @@ F: drivers/mtd/nand/raw/r852.h | |||
| 12179 | 12177 | ||
| 12180 | RISC-V ARCHITECTURE | 12178 | RISC-V ARCHITECTURE |
| 12181 | M: Palmer Dabbelt <palmer@sifive.com> | 12179 | M: Palmer Dabbelt <palmer@sifive.com> |
| 12182 | M: Albert Ou <albert@sifive.com> | 12180 | M: Albert Ou <aou@eecs.berkeley.edu> |
| 12183 | L: linux-riscv@lists.infradead.org | 12181 | L: linux-riscv@lists.infradead.org |
| 12184 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git | 12182 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git |
| 12185 | S: Supported | 12183 | S: Supported |
| @@ -12939,6 +12937,14 @@ F: drivers/media/usb/siano/ | |||
| 12939 | F: drivers/media/usb/siano/ | 12937 | F: drivers/media/usb/siano/ |
| 12940 | F: drivers/media/mmc/siano/ | 12938 | F: drivers/media/mmc/siano/ |
| 12941 | 12939 | ||
| 12940 | SIFIVE DRIVERS | ||
| 12941 | M: Palmer Dabbelt <palmer@sifive.com> | ||
| 12942 | L: linux-riscv@lists.infradead.org | ||
| 12943 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git | ||
| 12944 | S: Supported | ||
| 12945 | K: sifive | ||
| 12946 | N: sifive | ||
| 12947 | |||
| 12942 | SILEAD TOUCHSCREEN DRIVER | 12948 | SILEAD TOUCHSCREEN DRIVER |
| 12943 | M: Hans de Goede <hdegoede@redhat.com> | 12949 | M: Hans de Goede <hdegoede@redhat.com> |
| 12944 | L: linux-input@vger.kernel.org | 12950 | L: linux-input@vger.kernel.org |
| @@ -15007,8 +15013,7 @@ F: drivers/media/usb/zr364xx/ | |||
| 15007 | USER-MODE LINUX (UML) | 15013 | USER-MODE LINUX (UML) |
| 15008 | M: Jeff Dike <jdike@addtoit.com> | 15014 | M: Jeff Dike <jdike@addtoit.com> |
| 15009 | M: Richard Weinberger <richard@nod.at> | 15015 | M: Richard Weinberger <richard@nod.at> |
| 15010 | L: user-mode-linux-devel@lists.sourceforge.net | 15016 | L: linux-um@lists.infradead.org |
| 15011 | L: user-mode-linux-user@lists.sourceforge.net | ||
| 15012 | W: http://user-mode-linux.sourceforge.net | 15017 | W: http://user-mode-linux.sourceforge.net |
| 15013 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git | 15018 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git |
| 15014 | S: Maintained | 15019 | S: Maintained |
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index aa9e785c59c2..7841b8a60657 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h | |||
| @@ -134,7 +134,13 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip); | |||
| 134 | void pnv_power9_force_smt4_catch(void); | 134 | void pnv_power9_force_smt4_catch(void); |
| 135 | void pnv_power9_force_smt4_release(void); | 135 | void pnv_power9_force_smt4_release(void); |
| 136 | 136 | ||
| 137 | /* Transaction memory related */ | ||
| 137 | void tm_enable(void); | 138 | void tm_enable(void); |
| 138 | void tm_disable(void); | 139 | void tm_disable(void); |
| 139 | void tm_abort(uint8_t cause); | 140 | void tm_abort(uint8_t cause); |
| 141 | |||
| 142 | struct kvm_vcpu; | ||
| 143 | void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); | ||
| 144 | void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); | ||
| 145 | |||
| 140 | #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ | 146 | #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index e7377b73cfec..1f345a0b6ba2 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
| @@ -104,6 +104,7 @@ struct kvmppc_vcore { | |||
| 104 | ulong vtb; /* virtual timebase */ | 104 | ulong vtb; /* virtual timebase */ |
| 105 | ulong conferring_threads; | 105 | ulong conferring_threads; |
| 106 | unsigned int halt_poll_ns; | 106 | unsigned int halt_poll_ns; |
| 107 | atomic_t online_count; | ||
| 107 | }; | 108 | }; |
| 108 | 109 | ||
| 109 | struct kvmppc_vcpu_book3s { | 110 | struct kvmppc_vcpu_book3s { |
| @@ -209,6 +210,7 @@ extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) | |||
| 209 | extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, | 210 | extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, |
| 210 | unsigned int vec); | 211 | unsigned int vec); |
| 211 | extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags); | 212 | extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags); |
| 213 | extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac); | ||
| 212 | extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, | 214 | extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, |
| 213 | bool upper, u32 val); | 215 | bool upper, u32 val); |
| 214 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); | 216 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); |
| @@ -256,6 +258,21 @@ extern int kvmppc_hcall_impl_pr(unsigned long cmd); | |||
| 256 | extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); | 258 | extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); |
| 257 | extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); | 259 | extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); |
| 258 | extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); | 260 | extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); |
| 261 | |||
| 262 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 263 | void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu); | ||
| 264 | void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu); | ||
| 265 | void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu); | ||
| 266 | void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu); | ||
| 267 | #else | ||
| 268 | static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {} | ||
| 269 | static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {} | ||
| 270 | static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {} | ||
| 271 | static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} | ||
| 272 | #endif | ||
| 273 | |||
| 274 | void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); | ||
| 275 | |||
| 259 | extern int kvm_irq_bypass; | 276 | extern int kvm_irq_bypass; |
| 260 | 277 | ||
| 261 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | 278 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) |
| @@ -274,12 +291,12 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | |||
| 274 | 291 | ||
| 275 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | 292 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) |
| 276 | { | 293 | { |
| 277 | vcpu->arch.gpr[num] = val; | 294 | vcpu->arch.regs.gpr[num] = val; |
| 278 | } | 295 | } |
| 279 | 296 | ||
| 280 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) | 297 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) |
| 281 | { | 298 | { |
| 282 | return vcpu->arch.gpr[num]; | 299 | return vcpu->arch.regs.gpr[num]; |
| 283 | } | 300 | } |
| 284 | 301 | ||
| 285 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) | 302 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) |
| @@ -294,42 +311,42 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) | |||
| 294 | 311 | ||
| 295 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) | 312 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) |
| 296 | { | 313 | { |
| 297 | vcpu->arch.xer = val; | 314 | vcpu->arch.regs.xer = val; |
| 298 | } | 315 | } |
| 299 | 316 | ||
| 300 | static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) | 317 | static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) |
| 301 | { | 318 | { |
| 302 | return vcpu->arch.xer; | 319 | return vcpu->arch.regs.xer; |
| 303 | } | 320 | } |
| 304 | 321 | ||
| 305 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) | 322 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) |
| 306 | { | 323 | { |
| 307 | vcpu->arch.ctr = val; | 324 | vcpu->arch.regs.ctr = val; |
| 308 | } | 325 | } |
| 309 | 326 | ||
| 310 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) | 327 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) |
| 311 | { | 328 | { |
| 312 | return vcpu->arch.ctr; | 329 | return vcpu->arch.regs.ctr; |
| 313 | } | 330 | } |
| 314 | 331 | ||
| 315 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) | 332 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) |
| 316 | { | 333 | { |
| 317 | vcpu->arch.lr = val; | 334 | vcpu->arch.regs.link = val; |
| 318 | } | 335 | } |
| 319 | 336 | ||
| 320 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) | 337 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) |
| 321 | { | 338 | { |
| 322 | return vcpu->arch.lr; | 339 | return vcpu->arch.regs.link; |
| 323 | } | 340 | } |
| 324 | 341 | ||
| 325 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) | 342 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) |
| 326 | { | 343 | { |
| 327 | vcpu->arch.pc = val; | 344 | vcpu->arch.regs.nip = val; |
| 328 | } | 345 | } |
| 329 | 346 | ||
| 330 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) | 347 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) |
| 331 | { | 348 | { |
| 332 | return vcpu->arch.pc; | 349 | return vcpu->arch.regs.nip; |
| 333 | } | 350 | } |
| 334 | 351 | ||
| 335 | static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu); | 352 | static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu); |
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index c424e44f4c00..dc435a5af7d6 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h | |||
| @@ -483,15 +483,15 @@ static inline u64 sanitize_msr(u64 msr) | |||
| 483 | static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) | 483 | static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) |
| 484 | { | 484 | { |
| 485 | vcpu->arch.cr = vcpu->arch.cr_tm; | 485 | vcpu->arch.cr = vcpu->arch.cr_tm; |
| 486 | vcpu->arch.xer = vcpu->arch.xer_tm; | 486 | vcpu->arch.regs.xer = vcpu->arch.xer_tm; |
| 487 | vcpu->arch.lr = vcpu->arch.lr_tm; | 487 | vcpu->arch.regs.link = vcpu->arch.lr_tm; |
| 488 | vcpu->arch.ctr = vcpu->arch.ctr_tm; | 488 | vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; |
| 489 | vcpu->arch.amr = vcpu->arch.amr_tm; | 489 | vcpu->arch.amr = vcpu->arch.amr_tm; |
| 490 | vcpu->arch.ppr = vcpu->arch.ppr_tm; | 490 | vcpu->arch.ppr = vcpu->arch.ppr_tm; |
| 491 | vcpu->arch.dscr = vcpu->arch.dscr_tm; | 491 | vcpu->arch.dscr = vcpu->arch.dscr_tm; |
| 492 | vcpu->arch.tar = vcpu->arch.tar_tm; | 492 | vcpu->arch.tar = vcpu->arch.tar_tm; |
| 493 | memcpy(vcpu->arch.gpr, vcpu->arch.gpr_tm, | 493 | memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm, |
| 494 | sizeof(vcpu->arch.gpr)); | 494 | sizeof(vcpu->arch.regs.gpr)); |
| 495 | vcpu->arch.fp = vcpu->arch.fp_tm; | 495 | vcpu->arch.fp = vcpu->arch.fp_tm; |
| 496 | vcpu->arch.vr = vcpu->arch.vr_tm; | 496 | vcpu->arch.vr = vcpu->arch.vr_tm; |
| 497 | vcpu->arch.vrsave = vcpu->arch.vrsave_tm; | 497 | vcpu->arch.vrsave = vcpu->arch.vrsave_tm; |
| @@ -500,15 +500,15 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) | |||
| 500 | static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) | 500 | static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) |
| 501 | { | 501 | { |
| 502 | vcpu->arch.cr_tm = vcpu->arch.cr; | 502 | vcpu->arch.cr_tm = vcpu->arch.cr; |
| 503 | vcpu->arch.xer_tm = vcpu->arch.xer; | 503 | vcpu->arch.xer_tm = vcpu->arch.regs.xer; |
| 504 | vcpu->arch.lr_tm = vcpu->arch.lr; | 504 | vcpu->arch.lr_tm = vcpu->arch.regs.link; |
| 505 | vcpu->arch.ctr_tm = vcpu->arch.ctr; | 505 | vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; |
| 506 | vcpu->arch.amr_tm = vcpu->arch.amr; | 506 | vcpu->arch.amr_tm = vcpu->arch.amr; |
| 507 | vcpu->arch.ppr_tm = vcpu->arch.ppr; | 507 | vcpu->arch.ppr_tm = vcpu->arch.ppr; |
| 508 | vcpu->arch.dscr_tm = vcpu->arch.dscr; | 508 | vcpu->arch.dscr_tm = vcpu->arch.dscr; |
| 509 | vcpu->arch.tar_tm = vcpu->arch.tar; | 509 | vcpu->arch.tar_tm = vcpu->arch.tar; |
| 510 | memcpy(vcpu->arch.gpr_tm, vcpu->arch.gpr, | 510 | memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr, |
| 511 | sizeof(vcpu->arch.gpr)); | 511 | sizeof(vcpu->arch.regs.gpr)); |
| 512 | vcpu->arch.fp_tm = vcpu->arch.fp; | 512 | vcpu->arch.fp_tm = vcpu->arch.fp; |
| 513 | vcpu->arch.vr_tm = vcpu->arch.vr; | 513 | vcpu->arch.vr_tm = vcpu->arch.vr; |
| 514 | vcpu->arch.vrsave_tm = vcpu->arch.vrsave; | 514 | vcpu->arch.vrsave_tm = vcpu->arch.vrsave; |
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index bc6e29e4dfd4..d513e3ed1c65 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h | |||
| @@ -36,12 +36,12 @@ | |||
| 36 | 36 | ||
| 37 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | 37 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) |
| 38 | { | 38 | { |
| 39 | vcpu->arch.gpr[num] = val; | 39 | vcpu->arch.regs.gpr[num] = val; |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) | 42 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) |
| 43 | { | 43 | { |
| 44 | return vcpu->arch.gpr[num]; | 44 | return vcpu->arch.regs.gpr[num]; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) | 47 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) |
| @@ -56,12 +56,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) | |||
| 56 | 56 | ||
| 57 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) | 57 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) |
| 58 | { | 58 | { |
| 59 | vcpu->arch.xer = val; | 59 | vcpu->arch.regs.xer = val; |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) | 62 | static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) |
| 63 | { | 63 | { |
| 64 | return vcpu->arch.xer; | 64 | return vcpu->arch.regs.xer; |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) | 67 | static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) |
| @@ -72,32 +72,32 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) | |||
| 72 | 72 | ||
| 73 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) | 73 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) |
| 74 | { | 74 | { |
| 75 | vcpu->arch.ctr = val; | 75 | vcpu->arch.regs.ctr = val; |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) | 78 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) |
| 79 | { | 79 | { |
| 80 | return vcpu->arch.ctr; | 80 | return vcpu->arch.regs.ctr; |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) | 83 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) |
| 84 | { | 84 | { |
| 85 | vcpu->arch.lr = val; | 85 | vcpu->arch.regs.link = val; |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) | 88 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) |
| 89 | { | 89 | { |
| 90 | return vcpu->arch.lr; | 90 | return vcpu->arch.regs.link; |
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) | 93 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) |
| 94 | { | 94 | { |
| 95 | vcpu->arch.pc = val; | 95 | vcpu->arch.regs.nip = val; |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) | 98 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) |
| 99 | { | 99 | { |
| 100 | return vcpu->arch.pc; | 100 | return vcpu->arch.regs.nip; |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | 103 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 17498e9a26e4..fa4efa7e88f7 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
| @@ -269,7 +269,6 @@ struct kvm_arch { | |||
| 269 | unsigned long host_lpcr; | 269 | unsigned long host_lpcr; |
| 270 | unsigned long sdr1; | 270 | unsigned long sdr1; |
| 271 | unsigned long host_sdr1; | 271 | unsigned long host_sdr1; |
| 272 | int tlbie_lock; | ||
| 273 | unsigned long lpcr; | 272 | unsigned long lpcr; |
| 274 | unsigned long vrma_slb_v; | 273 | unsigned long vrma_slb_v; |
| 275 | int mmu_ready; | 274 | int mmu_ready; |
| @@ -454,6 +453,12 @@ struct mmio_hpte_cache { | |||
| 454 | #define KVMPPC_VSX_COPY_WORD 1 | 453 | #define KVMPPC_VSX_COPY_WORD 1 |
| 455 | #define KVMPPC_VSX_COPY_DWORD 2 | 454 | #define KVMPPC_VSX_COPY_DWORD 2 |
| 456 | #define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3 | 455 | #define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3 |
| 456 | #define KVMPPC_VSX_COPY_WORD_LOAD_DUMP 4 | ||
| 457 | |||
| 458 | #define KVMPPC_VMX_COPY_BYTE 8 | ||
| 459 | #define KVMPPC_VMX_COPY_HWORD 9 | ||
| 460 | #define KVMPPC_VMX_COPY_WORD 10 | ||
| 461 | #define KVMPPC_VMX_COPY_DWORD 11 | ||
| 457 | 462 | ||
| 458 | struct openpic; | 463 | struct openpic; |
| 459 | 464 | ||
| @@ -486,7 +491,7 @@ struct kvm_vcpu_arch { | |||
| 486 | struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; | 491 | struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; |
| 487 | #endif | 492 | #endif |
| 488 | 493 | ||
| 489 | ulong gpr[32]; | 494 | struct pt_regs regs; |
| 490 | 495 | ||
| 491 | struct thread_fp_state fp; | 496 | struct thread_fp_state fp; |
| 492 | 497 | ||
| @@ -521,14 +526,10 @@ struct kvm_vcpu_arch { | |||
| 521 | u32 qpr[32]; | 526 | u32 qpr[32]; |
| 522 | #endif | 527 | #endif |
| 523 | 528 | ||
| 524 | ulong pc; | ||
| 525 | ulong ctr; | ||
| 526 | ulong lr; | ||
| 527 | #ifdef CONFIG_PPC_BOOK3S | 529 | #ifdef CONFIG_PPC_BOOK3S |
| 528 | ulong tar; | 530 | ulong tar; |
| 529 | #endif | 531 | #endif |
| 530 | 532 | ||
| 531 | ulong xer; | ||
| 532 | u32 cr; | 533 | u32 cr; |
| 533 | 534 | ||
| 534 | #ifdef CONFIG_PPC_BOOK3S | 535 | #ifdef CONFIG_PPC_BOOK3S |
| @@ -626,7 +627,6 @@ struct kvm_vcpu_arch { | |||
| 626 | 627 | ||
| 627 | struct thread_vr_state vr_tm; | 628 | struct thread_vr_state vr_tm; |
| 628 | u32 vrsave_tm; /* also USPRG0 */ | 629 | u32 vrsave_tm; /* also USPRG0 */ |
| 629 | |||
| 630 | #endif | 630 | #endif |
| 631 | 631 | ||
| 632 | #ifdef CONFIG_KVM_EXIT_TIMING | 632 | #ifdef CONFIG_KVM_EXIT_TIMING |
| @@ -681,16 +681,17 @@ struct kvm_vcpu_arch { | |||
| 681 | * Number of simulations for vsx. | 681 | * Number of simulations for vsx. |
| 682 | * If we use 2*8bytes to simulate 1*16bytes, | 682 | * If we use 2*8bytes to simulate 1*16bytes, |
| 683 | * then the number should be 2 and | 683 | * then the number should be 2 and |
| 684 | * mmio_vsx_copy_type=KVMPPC_VSX_COPY_DWORD. | 684 | * mmio_copy_type=KVMPPC_VSX_COPY_DWORD. |
| 685 | * If we use 4*4bytes to simulate 1*16bytes, | 685 | * If we use 4*4bytes to simulate 1*16bytes, |
| 686 | * the number should be 4 and | 686 | * the number should be 4 and |
| 687 | * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD. | 687 | * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD. |
| 688 | */ | 688 | */ |
| 689 | u8 mmio_vsx_copy_nums; | 689 | u8 mmio_vsx_copy_nums; |
| 690 | u8 mmio_vsx_offset; | 690 | u8 mmio_vsx_offset; |
| 691 | u8 mmio_vsx_copy_type; | ||
| 692 | u8 mmio_vsx_tx_sx_enabled; | 691 | u8 mmio_vsx_tx_sx_enabled; |
| 693 | u8 mmio_vmx_copy_nums; | 692 | u8 mmio_vmx_copy_nums; |
| 693 | u8 mmio_vmx_offset; | ||
| 694 | u8 mmio_copy_type; | ||
| 694 | u8 osi_needed; | 695 | u8 osi_needed; |
| 695 | u8 osi_enabled; | 696 | u8 osi_enabled; |
| 696 | u8 papr_enabled; | 697 | u8 papr_enabled; |
| @@ -772,6 +773,8 @@ struct kvm_vcpu_arch { | |||
| 772 | u64 busy_preempt; | 773 | u64 busy_preempt; |
| 773 | 774 | ||
| 774 | u32 emul_inst; | 775 | u32 emul_inst; |
| 776 | |||
| 777 | u32 online; | ||
| 775 | #endif | 778 | #endif |
| 776 | 779 | ||
| 777 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING | 780 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index abe7032cdb54..e991821dd7fa 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
| @@ -52,7 +52,7 @@ enum emulation_result { | |||
| 52 | EMULATE_EXIT_USER, /* emulation requires exit to user-space */ | 52 | EMULATE_EXIT_USER, /* emulation requires exit to user-space */ |
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | enum instruction_type { | 55 | enum instruction_fetch_type { |
| 56 | INST_GENERIC, | 56 | INST_GENERIC, |
| 57 | INST_SC, /* system call */ | 57 | INST_SC, /* system call */ |
| 58 | }; | 58 | }; |
| @@ -81,10 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 81 | extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | 81 | extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 82 | unsigned int rt, unsigned int bytes, | 82 | unsigned int rt, unsigned int bytes, |
| 83 | int is_default_endian, int mmio_sign_extend); | 83 | int is_default_endian, int mmio_sign_extend); |
| 84 | extern int kvmppc_handle_load128_by2x64(struct kvm_run *run, | 84 | extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 85 | struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian); | 85 | unsigned int rt, unsigned int bytes, int is_default_endian); |
| 86 | extern int kvmppc_handle_store128_by2x64(struct kvm_run *run, | 86 | extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 87 | struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian); | 87 | unsigned int rs, unsigned int bytes, int is_default_endian); |
| 88 | extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | 88 | extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 89 | u64 val, unsigned int bytes, | 89 | u64 val, unsigned int bytes, |
| 90 | int is_default_endian); | 90 | int is_default_endian); |
| @@ -93,7 +93,7 @@ extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 93 | int is_default_endian); | 93 | int is_default_endian); |
| 94 | 94 | ||
| 95 | extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, | 95 | extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, |
| 96 | enum instruction_type type, u32 *inst); | 96 | enum instruction_fetch_type type, u32 *inst); |
| 97 | 97 | ||
| 98 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | 98 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
| 99 | bool data); | 99 | bool data); |
| @@ -265,6 +265,8 @@ union kvmppc_one_reg { | |||
| 265 | vector128 vval; | 265 | vector128 vval; |
| 266 | u64 vsxval[2]; | 266 | u64 vsxval[2]; |
| 267 | u32 vsx32val[4]; | 267 | u32 vsx32val[4]; |
| 268 | u16 vsx16val[8]; | ||
| 269 | u8 vsx8val[16]; | ||
| 268 | struct { | 270 | struct { |
| 269 | u64 addr; | 271 | u64 addr; |
| 270 | u64 length; | 272 | u64 length; |
| @@ -324,13 +326,14 @@ struct kvmppc_ops { | |||
| 324 | int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info); | 326 | int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info); |
| 325 | int (*set_smt_mode)(struct kvm *kvm, unsigned long mode, | 327 | int (*set_smt_mode)(struct kvm *kvm, unsigned long mode, |
| 326 | unsigned long flags); | 328 | unsigned long flags); |
| 329 | void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr); | ||
| 327 | }; | 330 | }; |
| 328 | 331 | ||
| 329 | extern struct kvmppc_ops *kvmppc_hv_ops; | 332 | extern struct kvmppc_ops *kvmppc_hv_ops; |
| 330 | extern struct kvmppc_ops *kvmppc_pr_ops; | 333 | extern struct kvmppc_ops *kvmppc_pr_ops; |
| 331 | 334 | ||
| 332 | static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, | 335 | static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, |
| 333 | enum instruction_type type, u32 *inst) | 336 | enum instruction_fetch_type type, u32 *inst) |
| 334 | { | 337 | { |
| 335 | int ret = EMULATE_DONE; | 338 | int ret = EMULATE_DONE; |
| 336 | u32 fetched_inst; | 339 | u32 fetched_inst; |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 75c5b2cd9d66..562568414cf4 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
| @@ -385,6 +385,7 @@ | |||
| 385 | #define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */ | 385 | #define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */ |
| 386 | #define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */ | 386 | #define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */ |
| 387 | #define SPRN_PMCR 0x374 /* Power Management Control Register */ | 387 | #define SPRN_PMCR 0x374 /* Power Management Control Register */ |
| 388 | #define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */ | ||
| 388 | 389 | ||
| 389 | /* HFSCR and FSCR bit numbers are the same */ | 390 | /* HFSCR and FSCR bit numbers are the same */ |
| 390 | #define FSCR_SCV_LG 12 /* Enable System Call Vectored */ | 391 | #define FSCR_SCV_LG 12 /* Enable System Call Vectored */ |
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 833ed9a16adf..1b32b56a03d3 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h | |||
| @@ -633,6 +633,7 @@ struct kvm_ppc_cpu_char { | |||
| 633 | #define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd) | 633 | #define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd) |
| 634 | 634 | ||
| 635 | #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) | 635 | #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) |
| 636 | #define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf) | ||
| 636 | 637 | ||
| 637 | /* Transactional Memory checkpointed state: | 638 | /* Transactional Memory checkpointed state: |
| 638 | * This is all GPRs, all VSX regs and a subset of SPRs | 639 | * This is all GPRs, all VSX regs and a subset of SPRs |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 9fc9e0977009..0a0544335950 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
| @@ -426,20 +426,20 @@ int main(void) | |||
| 426 | OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack); | 426 | OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack); |
| 427 | OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid); | 427 | OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid); |
| 428 | OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid); | 428 | OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid); |
| 429 | OFFSET(VCPU_GPRS, kvm_vcpu, arch.gpr); | 429 | OFFSET(VCPU_GPRS, kvm_vcpu, arch.regs.gpr); |
| 430 | OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave); | 430 | OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave); |
| 431 | OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr); | 431 | OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr); |
| 432 | #ifdef CONFIG_ALTIVEC | 432 | #ifdef CONFIG_ALTIVEC |
| 433 | OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr); | 433 | OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr); |
| 434 | #endif | 434 | #endif |
| 435 | OFFSET(VCPU_XER, kvm_vcpu, arch.xer); | 435 | OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer); |
| 436 | OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr); | 436 | OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr); |
| 437 | OFFSET(VCPU_LR, kvm_vcpu, arch.lr); | 437 | OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link); |
| 438 | #ifdef CONFIG_PPC_BOOK3S | 438 | #ifdef CONFIG_PPC_BOOK3S |
| 439 | OFFSET(VCPU_TAR, kvm_vcpu, arch.tar); | 439 | OFFSET(VCPU_TAR, kvm_vcpu, arch.tar); |
| 440 | #endif | 440 | #endif |
| 441 | OFFSET(VCPU_CR, kvm_vcpu, arch.cr); | 441 | OFFSET(VCPU_CR, kvm_vcpu, arch.cr); |
| 442 | OFFSET(VCPU_PC, kvm_vcpu, arch.pc); | 442 | OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip); |
| 443 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | 443 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 444 | OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr); | 444 | OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr); |
| 445 | OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0); | 445 | OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0); |
| @@ -696,10 +696,10 @@ int main(void) | |||
| 696 | 696 | ||
| 697 | #else /* CONFIG_PPC_BOOK3S */ | 697 | #else /* CONFIG_PPC_BOOK3S */ |
| 698 | OFFSET(VCPU_CR, kvm_vcpu, arch.cr); | 698 | OFFSET(VCPU_CR, kvm_vcpu, arch.cr); |
| 699 | OFFSET(VCPU_XER, kvm_vcpu, arch.xer); | 699 | OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer); |
| 700 | OFFSET(VCPU_LR, kvm_vcpu, arch.lr); | 700 | OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link); |
| 701 | OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr); | 701 | OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr); |
| 702 | OFFSET(VCPU_PC, kvm_vcpu, arch.pc); | 702 | OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip); |
| 703 | OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9); | 703 | OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9); |
| 704 | OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); | 704 | OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); |
| 705 | OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear); | 705 | OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear); |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 4b19da8c87ae..f872c04bb5b1 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
| @@ -63,6 +63,9 @@ kvm-pr-y := \ | |||
| 63 | book3s_64_mmu.o \ | 63 | book3s_64_mmu.o \ |
| 64 | book3s_32_mmu.o | 64 | book3s_32_mmu.o |
| 65 | 65 | ||
| 66 | kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ | ||
| 67 | tm.o | ||
| 68 | |||
| 66 | ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | 69 | ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
| 67 | kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ | 70 | kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ |
| 68 | book3s_rmhandlers.o | 71 | book3s_rmhandlers.o |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 97d4a112648f..edaf4720d156 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
| @@ -134,7 +134,7 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | |||
| 134 | { | 134 | { |
| 135 | kvmppc_unfixup_split_real(vcpu); | 135 | kvmppc_unfixup_split_real(vcpu); |
| 136 | kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); | 136 | kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); |
| 137 | kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags); | 137 | kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & ~0x783f0000ul) | flags); |
| 138 | kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); | 138 | kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); |
| 139 | vcpu->arch.mmu.reset_msr(vcpu); | 139 | vcpu->arch.mmu.reset_msr(vcpu); |
| 140 | } | 140 | } |
| @@ -256,18 +256,15 @@ void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar, | |||
| 256 | { | 256 | { |
| 257 | kvmppc_set_dar(vcpu, dar); | 257 | kvmppc_set_dar(vcpu, dar); |
| 258 | kvmppc_set_dsisr(vcpu, flags); | 258 | kvmppc_set_dsisr(vcpu, flags); |
| 259 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); | 259 | kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0); |
| 260 | } | 260 | } |
| 261 | EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); /* used by kvm_hv */ | 261 | EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); |
| 262 | 262 | ||
| 263 | void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags) | 263 | void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags) |
| 264 | { | 264 | { |
| 265 | u64 msr = kvmppc_get_msr(vcpu); | 265 | kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags); |
| 266 | msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT); | ||
| 267 | msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT); | ||
| 268 | kvmppc_set_msr_fast(vcpu, msr); | ||
| 269 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); | ||
| 270 | } | 266 | } |
| 267 | EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage); | ||
| 271 | 268 | ||
| 272 | static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, | 269 | static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, |
| 273 | unsigned int priority) | 270 | unsigned int priority) |
| @@ -450,8 +447,8 @@ int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, | |||
| 450 | return r; | 447 | return r; |
| 451 | } | 448 | } |
| 452 | 449 | ||
| 453 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | 450 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, |
| 454 | u32 *inst) | 451 | enum instruction_fetch_type type, u32 *inst) |
| 455 | { | 452 | { |
| 456 | ulong pc = kvmppc_get_pc(vcpu); | 453 | ulong pc = kvmppc_get_pc(vcpu); |
| 457 | int r; | 454 | int r; |
| @@ -509,8 +506,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 509 | { | 506 | { |
| 510 | int i; | 507 | int i; |
| 511 | 508 | ||
| 512 | vcpu_load(vcpu); | ||
| 513 | |||
| 514 | regs->pc = kvmppc_get_pc(vcpu); | 509 | regs->pc = kvmppc_get_pc(vcpu); |
| 515 | regs->cr = kvmppc_get_cr(vcpu); | 510 | regs->cr = kvmppc_get_cr(vcpu); |
| 516 | regs->ctr = kvmppc_get_ctr(vcpu); | 511 | regs->ctr = kvmppc_get_ctr(vcpu); |
| @@ -532,7 +527,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 532 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 527 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
| 533 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); | 528 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
| 534 | 529 | ||
| 535 | vcpu_put(vcpu); | ||
| 536 | return 0; | 530 | return 0; |
| 537 | } | 531 | } |
| 538 | 532 | ||
| @@ -540,8 +534,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 540 | { | 534 | { |
| 541 | int i; | 535 | int i; |
| 542 | 536 | ||
| 543 | vcpu_load(vcpu); | ||
| 544 | |||
| 545 | kvmppc_set_pc(vcpu, regs->pc); | 537 | kvmppc_set_pc(vcpu, regs->pc); |
| 546 | kvmppc_set_cr(vcpu, regs->cr); | 538 | kvmppc_set_cr(vcpu, regs->cr); |
| 547 | kvmppc_set_ctr(vcpu, regs->ctr); | 539 | kvmppc_set_ctr(vcpu, regs->ctr); |
| @@ -562,7 +554,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 562 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 554 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
| 563 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | 555 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
| 564 | 556 | ||
| 565 | vcpu_put(vcpu); | ||
| 566 | return 0; | 557 | return 0; |
| 567 | } | 558 | } |
| 568 | 559 | ||
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h index 4ad5e287b8bc..14ef03501d21 100644 --- a/arch/powerpc/kvm/book3s.h +++ b/arch/powerpc/kvm/book3s.h | |||
| @@ -31,4 +31,10 @@ extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, | |||
| 31 | extern int kvmppc_book3s_init_pr(void); | 31 | extern int kvmppc_book3s_init_pr(void); |
| 32 | extern void kvmppc_book3s_exit_pr(void); | 32 | extern void kvmppc_book3s_exit_pr(void); |
| 33 | 33 | ||
| 34 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 35 | extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val); | ||
| 36 | #else | ||
| 37 | static inline void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) {} | ||
| 38 | #endif | ||
| 39 | |||
| 34 | #endif | 40 | #endif |
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 1992676c7a94..45c8ea4a0487 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c | |||
| @@ -52,7 +52,7 @@ | |||
| 52 | static inline bool check_debug_ip(struct kvm_vcpu *vcpu) | 52 | static inline bool check_debug_ip(struct kvm_vcpu *vcpu) |
| 53 | { | 53 | { |
| 54 | #ifdef DEBUG_MMU_PTE_IP | 54 | #ifdef DEBUG_MMU_PTE_IP |
| 55 | return vcpu->arch.pc == DEBUG_MMU_PTE_IP; | 55 | return vcpu->arch.regs.nip == DEBUG_MMU_PTE_IP; |
| 56 | #else | 56 | #else |
| 57 | return true; | 57 | return true; |
| 58 | #endif | 58 | #endif |
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index a93d719edc90..cf9d686e8162 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
| @@ -38,7 +38,16 @@ | |||
| 38 | 38 | ||
| 39 | static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) | 39 | static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) |
| 40 | { | 40 | { |
| 41 | kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); | 41 | unsigned long msr = vcpu->arch.intr_msr; |
| 42 | unsigned long cur_msr = kvmppc_get_msr(vcpu); | ||
| 43 | |||
| 44 | /* If transactional, change to suspend mode on IRQ delivery */ | ||
| 45 | if (MSR_TM_TRANSACTIONAL(cur_msr)) | ||
| 46 | msr |= MSR_TS_S; | ||
| 47 | else | ||
| 48 | msr |= cur_msr & MSR_TS_MASK; | ||
| 49 | |||
| 50 | kvmppc_set_msr(vcpu, msr); | ||
| 42 | } | 51 | } |
| 43 | 52 | ||
| 44 | static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( | 53 | static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 1b3fcafc685e..7f3a8cf5d66f 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
| @@ -272,6 +272,9 @@ int kvmppc_mmu_hv_init(void) | |||
| 272 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | 272 | if (!cpu_has_feature(CPU_FTR_HVMODE)) |
| 273 | return -EINVAL; | 273 | return -EINVAL; |
| 274 | 274 | ||
| 275 | if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE)) | ||
| 276 | return -EINVAL; | ||
| 277 | |||
| 275 | /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */ | 278 | /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */ |
| 276 | host_lpid = mfspr(SPRN_LPID); | 279 | host_lpid = mfspr(SPRN_LPID); |
| 277 | rsvd_lpid = LPID_RSVD; | 280 | rsvd_lpid = LPID_RSVD; |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 481da8f93fa4..176f911ee983 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c | |||
| @@ -139,44 +139,24 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
| 139 | return 0; | 139 | return 0; |
| 140 | } | 140 | } |
| 141 | 141 | ||
| 142 | #ifdef CONFIG_PPC_64K_PAGES | ||
| 143 | #define MMU_BASE_PSIZE MMU_PAGE_64K | ||
| 144 | #else | ||
| 145 | #define MMU_BASE_PSIZE MMU_PAGE_4K | ||
| 146 | #endif | ||
| 147 | |||
| 148 | static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, | 142 | static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, |
| 149 | unsigned int pshift) | 143 | unsigned int pshift) |
| 150 | { | 144 | { |
| 151 | int psize = MMU_BASE_PSIZE; | 145 | unsigned long psize = PAGE_SIZE; |
| 152 | 146 | ||
| 153 | if (pshift >= PUD_SHIFT) | 147 | if (pshift) |
| 154 | psize = MMU_PAGE_1G; | 148 | psize = 1UL << pshift; |
| 155 | else if (pshift >= PMD_SHIFT) | 149 | |
| 156 | psize = MMU_PAGE_2M; | 150 | addr &= ~(psize - 1); |
| 157 | addr &= ~0xfffUL; | 151 | radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize); |
| 158 | addr |= mmu_psize_defs[psize].ap << 5; | ||
| 159 | asm volatile("ptesync": : :"memory"); | ||
| 160 | asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) | ||
| 161 | : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); | ||
| 162 | if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) | ||
| 163 | asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) | ||
| 164 | : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); | ||
| 165 | asm volatile("eieio ; tlbsync ; ptesync": : :"memory"); | ||
| 166 | } | 152 | } |
| 167 | 153 | ||
| 168 | static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr) | 154 | static void kvmppc_radix_flush_pwc(struct kvm *kvm) |
| 169 | { | 155 | { |
| 170 | unsigned long rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */ | 156 | radix__flush_pwc_lpid(kvm->arch.lpid); |
| 171 | |||
| 172 | asm volatile("ptesync": : :"memory"); | ||
| 173 | /* RIC=1 PRS=0 R=1 IS=2 */ | ||
| 174 | asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1) | ||
| 175 | : : "r" (rb), "r" (kvm->arch.lpid) : "memory"); | ||
| 176 | asm volatile("eieio ; tlbsync ; ptesync": : :"memory"); | ||
| 177 | } | 157 | } |
| 178 | 158 | ||
| 179 | unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, | 159 | static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, |
| 180 | unsigned long clr, unsigned long set, | 160 | unsigned long clr, unsigned long set, |
| 181 | unsigned long addr, unsigned int shift) | 161 | unsigned long addr, unsigned int shift) |
| 182 | { | 162 | { |
| @@ -228,6 +208,167 @@ static void kvmppc_pmd_free(pmd_t *pmdp) | |||
| 228 | kmem_cache_free(kvm_pmd_cache, pmdp); | 208 | kmem_cache_free(kvm_pmd_cache, pmdp); |
| 229 | } | 209 | } |
| 230 | 210 | ||
| 211 | static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, | ||
| 212 | unsigned long gpa, unsigned int shift) | ||
| 213 | |||
| 214 | { | ||
| 215 | unsigned long page_size = 1ul << shift; | ||
| 216 | unsigned long old; | ||
| 217 | |||
| 218 | old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); | ||
| 219 | kvmppc_radix_tlbie_page(kvm, gpa, shift); | ||
| 220 | if (old & _PAGE_DIRTY) { | ||
| 221 | unsigned long gfn = gpa >> PAGE_SHIFT; | ||
| 222 | struct kvm_memory_slot *memslot; | ||
| 223 | |||
| 224 | memslot = gfn_to_memslot(kvm, gfn); | ||
| 225 | if (memslot && memslot->dirty_bitmap) | ||
| 226 | kvmppc_update_dirty_map(memslot, gfn, page_size); | ||
| 227 | } | ||
| 228 | } | ||
| 229 | |||
| 230 | /* | ||
| 231 | * kvmppc_free_p?d are used to free existing page tables, and recursively | ||
| 232 | * descend and clear and free children. | ||
| 233 | * Callers are responsible for flushing the PWC. | ||
| 234 | * | ||
| 235 | * When page tables are being unmapped/freed as part of page fault path | ||
| 236 | * (full == false), ptes are not expected. There is code to unmap them | ||
| 237 | * and emit a warning if encountered, but there may already be data | ||
| 238 | * corruption due to the unexpected mappings. | ||
| 239 | */ | ||
| 240 | static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full) | ||
| 241 | { | ||
| 242 | if (full) { | ||
| 243 | memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE); | ||
| 244 | } else { | ||
| 245 | pte_t *p = pte; | ||
| 246 | unsigned long it; | ||
| 247 | |||
| 248 | for (it = 0; it < PTRS_PER_PTE; ++it, ++p) { | ||
| 249 | if (pte_val(*p) == 0) | ||
| 250 | continue; | ||
| 251 | WARN_ON_ONCE(1); | ||
| 252 | kvmppc_unmap_pte(kvm, p, | ||
| 253 | pte_pfn(*p) << PAGE_SHIFT, | ||
| 254 | PAGE_SHIFT); | ||
| 255 | } | ||
| 256 | } | ||
| 257 | |||
| 258 | kvmppc_pte_free(pte); | ||
| 259 | } | ||
| 260 | |||
| 261 | static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full) | ||
| 262 | { | ||
| 263 | unsigned long im; | ||
| 264 | pmd_t *p = pmd; | ||
| 265 | |||
| 266 | for (im = 0; im < PTRS_PER_PMD; ++im, ++p) { | ||
| 267 | if (!pmd_present(*p)) | ||
| 268 | continue; | ||
| 269 | if (pmd_is_leaf(*p)) { | ||
| 270 | if (full) { | ||
| 271 | pmd_clear(p); | ||
| 272 | } else { | ||
| 273 | WARN_ON_ONCE(1); | ||
| 274 | kvmppc_unmap_pte(kvm, (pte_t *)p, | ||
| 275 | pte_pfn(*(pte_t *)p) << PAGE_SHIFT, | ||
| 276 | PMD_SHIFT); | ||
| 277 | } | ||
| 278 | } else { | ||
| 279 | pte_t *pte; | ||
| 280 | |||
| 281 | pte = pte_offset_map(p, 0); | ||
| 282 | kvmppc_unmap_free_pte(kvm, pte, full); | ||
| 283 | pmd_clear(p); | ||
| 284 | } | ||
| 285 | } | ||
| 286 | kvmppc_pmd_free(pmd); | ||
| 287 | } | ||
| 288 | |||
| 289 | static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud) | ||
| 290 | { | ||
| 291 | unsigned long iu; | ||
| 292 | pud_t *p = pud; | ||
| 293 | |||
| 294 | for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) { | ||
| 295 | if (!pud_present(*p)) | ||
| 296 | continue; | ||
| 297 | if (pud_huge(*p)) { | ||
| 298 | pud_clear(p); | ||
| 299 | } else { | ||
| 300 | pmd_t *pmd; | ||
| 301 | |||
| 302 | pmd = pmd_offset(p, 0); | ||
| 303 | kvmppc_unmap_free_pmd(kvm, pmd, true); | ||
| 304 | pud_clear(p); | ||
| 305 | } | ||
| 306 | } | ||
| 307 | pud_free(kvm->mm, pud); | ||
| 308 | } | ||
| 309 | |||
| 310 | void kvmppc_free_radix(struct kvm *kvm) | ||
| 311 | { | ||
| 312 | unsigned long ig; | ||
| 313 | pgd_t *pgd; | ||
| 314 | |||
| 315 | if (!kvm->arch.pgtable) | ||
| 316 | return; | ||
| 317 | pgd = kvm->arch.pgtable; | ||
| 318 | for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { | ||
| 319 | pud_t *pud; | ||
| 320 | |||
| 321 | if (!pgd_present(*pgd)) | ||
| 322 | continue; | ||
| 323 | pud = pud_offset(pgd, 0); | ||
| 324 | kvmppc_unmap_free_pud(kvm, pud); | ||
| 325 | pgd_clear(pgd); | ||
| 326 | } | ||
| 327 | pgd_free(kvm->mm, kvm->arch.pgtable); | ||
| 328 | kvm->arch.pgtable = NULL; | ||
| 329 | } | ||
| 330 | |||
| 331 | static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, | ||
| 332 | unsigned long gpa) | ||
| 333 | { | ||
| 334 | pte_t *pte = pte_offset_kernel(pmd, 0); | ||
| 335 | |||
| 336 | /* | ||
| 337 | * Clearing the pmd entry then flushing the PWC ensures that the pte | ||
| 338 | * page no longer be cached by the MMU, so can be freed without | ||
| 339 | * flushing the PWC again. | ||
| 340 | */ | ||
| 341 | pmd_clear(pmd); | ||
| 342 | kvmppc_radix_flush_pwc(kvm); | ||
| 343 | |||
| 344 | kvmppc_unmap_free_pte(kvm, pte, false); | ||
| 345 | } | ||
| 346 | |||
| 347 | static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, | ||
| 348 | unsigned long gpa) | ||
| 349 | { | ||
| 350 | pmd_t *pmd = pmd_offset(pud, 0); | ||
| 351 | |||
| 352 | /* | ||
| 353 | * Clearing the pud entry then flushing the PWC ensures that the pmd | ||
| 354 | * page and any children pte pages will no longer be cached by the MMU, | ||
| 355 | * so can be freed without flushing the PWC again. | ||
| 356 | */ | ||
| 357 | pud_clear(pud); | ||
| 358 | kvmppc_radix_flush_pwc(kvm); | ||
| 359 | |||
| 360 | kvmppc_unmap_free_pmd(kvm, pmd, false); | ||
| 361 | } | ||
| 362 | |||
| 363 | /* | ||
| 364 | * There are a number of bits which may differ between different faults to | ||
| 365 | * the same partition scope entry. RC bits, in the course of cleaning and | ||
| 366 | * aging. And the write bit can change, either the access could have been | ||
| 367 | * upgraded, or a read fault could happen concurrently with a write fault | ||
| 368 | * that sets those bits first. | ||
| 369 | */ | ||
| 370 | #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)) | ||
| 371 | |||
| 231 | static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, | 372 | static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, |
| 232 | unsigned int level, unsigned long mmu_seq) | 373 | unsigned int level, unsigned long mmu_seq) |
| 233 | { | 374 | { |
| @@ -235,7 +376,6 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, | |||
| 235 | pud_t *pud, *new_pud = NULL; | 376 | pud_t *pud, *new_pud = NULL; |
| 236 | pmd_t *pmd, *new_pmd = NULL; | 377 | pmd_t *pmd, *new_pmd = NULL; |
| 237 | pte_t *ptep, *new_ptep = NULL; | 378 | pte_t *ptep, *new_ptep = NULL; |
| 238 | unsigned long old; | ||
| 239 | int ret; | 379 | int ret; |
| 240 | 380 | ||
| 241 | /* Traverse the guest's 2nd-level tree, allocate new levels needed */ | 381 | /* Traverse the guest's 2nd-level tree, allocate new levels needed */ |
| @@ -273,42 +413,39 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, | |||
| 273 | if (pud_huge(*pud)) { | 413 | if (pud_huge(*pud)) { |
| 274 | unsigned long hgpa = gpa & PUD_MASK; | 414 | unsigned long hgpa = gpa & PUD_MASK; |
| 275 | 415 | ||
| 416 | /* Check if we raced and someone else has set the same thing */ | ||
| 417 | if (level == 2) { | ||
| 418 | if (pud_raw(*pud) == pte_raw(pte)) { | ||
| 419 | ret = 0; | ||
| 420 | goto out_unlock; | ||
| 421 | } | ||
| 422 | /* Valid 1GB page here already, add our extra bits */ | ||
| 423 | WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) & | ||
| 424 | PTE_BITS_MUST_MATCH); | ||
| 425 | kvmppc_radix_update_pte(kvm, (pte_t *)pud, | ||
| 426 | 0, pte_val(pte), hgpa, PUD_SHIFT); | ||
| 427 | ret = 0; | ||
| 428 | goto out_unlock; | ||
| 429 | } | ||
| 276 | /* | 430 | /* |
| 277 | * If we raced with another CPU which has just put | 431 | * If we raced with another CPU which has just put |
| 278 | * a 1GB pte in after we saw a pmd page, try again. | 432 | * a 1GB pte in after we saw a pmd page, try again. |
| 279 | */ | 433 | */ |
| 280 | if (level <= 1 && !new_pmd) { | 434 | if (!new_pmd) { |
| 281 | ret = -EAGAIN; | 435 | ret = -EAGAIN; |
| 282 | goto out_unlock; | 436 | goto out_unlock; |
| 283 | } | 437 | } |
| 284 | /* Check if we raced and someone else has set the same thing */ | ||
| 285 | if (level == 2 && pud_raw(*pud) == pte_raw(pte)) { | ||
| 286 | ret = 0; | ||
| 287 | goto out_unlock; | ||
| 288 | } | ||
| 289 | /* Valid 1GB page here already, remove it */ | 438 | /* Valid 1GB page here already, remove it */ |
| 290 | old = kvmppc_radix_update_pte(kvm, (pte_t *)pud, | 439 | kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT); |
| 291 | ~0UL, 0, hgpa, PUD_SHIFT); | ||
| 292 | kvmppc_radix_tlbie_page(kvm, hgpa, PUD_SHIFT); | ||
| 293 | if (old & _PAGE_DIRTY) { | ||
| 294 | unsigned long gfn = hgpa >> PAGE_SHIFT; | ||
| 295 | struct kvm_memory_slot *memslot; | ||
| 296 | memslot = gfn_to_memslot(kvm, gfn); | ||
| 297 | if (memslot && memslot->dirty_bitmap) | ||
| 298 | kvmppc_update_dirty_map(memslot, | ||
| 299 | gfn, PUD_SIZE); | ||
| 300 | } | ||
| 301 | } | 440 | } |
| 302 | if (level == 2) { | 441 | if (level == 2) { |
| 303 | if (!pud_none(*pud)) { | 442 | if (!pud_none(*pud)) { |
| 304 | /* | 443 | /* |
| 305 | * There's a page table page here, but we wanted to | 444 | * There's a page table page here, but we wanted to |
| 306 | * install a large page, so remove and free the page | 445 | * install a large page, so remove and free the page |
| 307 | * table page. new_pmd will be NULL since level == 2. | 446 | * table page. |
| 308 | */ | 447 | */ |
| 309 | new_pmd = pmd_offset(pud, 0); | 448 | kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa); |
| 310 | pud_clear(pud); | ||
| 311 | kvmppc_radix_flush_pwc(kvm, gpa); | ||
| 312 | } | 449 | } |
| 313 | kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); | 450 | kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); |
| 314 | ret = 0; | 451 | ret = 0; |
| @@ -324,42 +461,40 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, | |||
| 324 | if (pmd_is_leaf(*pmd)) { | 461 | if (pmd_is_leaf(*pmd)) { |
| 325 | unsigned long lgpa = gpa & PMD_MASK; | 462 | unsigned long lgpa = gpa & PMD_MASK; |
| 326 | 463 | ||
| 464 | /* Check if we raced and someone else has set the same thing */ | ||
| 465 | if (level == 1) { | ||
| 466 | if (pmd_raw(*pmd) == pte_raw(pte)) { | ||
| 467 | ret = 0; | ||
| 468 | goto out_unlock; | ||
| 469 | } | ||
| 470 | /* Valid 2MB page here already, add our extra bits */ | ||
| 471 | WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) & | ||
| 472 | PTE_BITS_MUST_MATCH); | ||
| 473 | kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), | ||
| 474 | 0, pte_val(pte), lgpa, PMD_SHIFT); | ||
| 475 | ret = 0; | ||
| 476 | goto out_unlock; | ||
| 477 | } | ||
| 478 | |||
| 327 | /* | 479 | /* |
| 328 | * If we raced with another CPU which has just put | 480 | * If we raced with another CPU which has just put |
| 329 | * a 2MB pte in after we saw a pte page, try again. | 481 | * a 2MB pte in after we saw a pte page, try again. |
| 330 | */ | 482 | */ |
| 331 | if (level == 0 && !new_ptep) { | 483 | if (!new_ptep) { |
| 332 | ret = -EAGAIN; | 484 | ret = -EAGAIN; |
| 333 | goto out_unlock; | 485 | goto out_unlock; |
| 334 | } | 486 | } |
| 335 | /* Check if we raced and someone else has set the same thing */ | ||
| 336 | if (level == 1 && pmd_raw(*pmd) == pte_raw(pte)) { | ||
| 337 | ret = 0; | ||
| 338 | goto out_unlock; | ||
| 339 | } | ||
| 340 | /* Valid 2MB page here already, remove it */ | 487 | /* Valid 2MB page here already, remove it */ |
| 341 | old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), | 488 | kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT); |
| 342 | ~0UL, 0, lgpa, PMD_SHIFT); | ||
| 343 | kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT); | ||
| 344 | if (old & _PAGE_DIRTY) { | ||
| 345 | unsigned long gfn = lgpa >> PAGE_SHIFT; | ||
| 346 | struct kvm_memory_slot *memslot; | ||
| 347 | memslot = gfn_to_memslot(kvm, gfn); | ||
| 348 | if (memslot && memslot->dirty_bitmap) | ||
| 349 | kvmppc_update_dirty_map(memslot, | ||
| 350 | gfn, PMD_SIZE); | ||
| 351 | } | ||
| 352 | } | 489 | } |
| 353 | if (level == 1) { | 490 | if (level == 1) { |
| 354 | if (!pmd_none(*pmd)) { | 491 | if (!pmd_none(*pmd)) { |
| 355 | /* | 492 | /* |
| 356 | * There's a page table page here, but we wanted to | 493 | * There's a page table page here, but we wanted to |
| 357 | * install a large page, so remove and free the page | 494 | * install a large page, so remove and free the page |
| 358 | * table page. new_ptep will be NULL since level == 1. | 495 | * table page. |
| 359 | */ | 496 | */ |
| 360 | new_ptep = pte_offset_kernel(pmd, 0); | 497 | kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa); |
| 361 | pmd_clear(pmd); | ||
| 362 | kvmppc_radix_flush_pwc(kvm, gpa); | ||
| 363 | } | 498 | } |
| 364 | kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); | 499 | kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); |
| 365 | ret = 0; | 500 | ret = 0; |
| @@ -378,12 +513,12 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, | |||
| 378 | ret = 0; | 513 | ret = 0; |
| 379 | goto out_unlock; | 514 | goto out_unlock; |
| 380 | } | 515 | } |
| 381 | /* PTE was previously valid, so invalidate it */ | 516 | /* Valid page here already, add our extra bits */ |
| 382 | old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, | 517 | WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) & |
| 383 | 0, gpa, 0); | 518 | PTE_BITS_MUST_MATCH); |
| 384 | kvmppc_radix_tlbie_page(kvm, gpa, 0); | 519 | kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0); |
| 385 | if (old & _PAGE_DIRTY) | 520 | ret = 0; |
| 386 | mark_page_dirty(kvm, gpa >> PAGE_SHIFT); | 521 | goto out_unlock; |
| 387 | } | 522 | } |
| 388 | kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte); | 523 | kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte); |
| 389 | ret = 0; | 524 | ret = 0; |
| @@ -565,9 +700,13 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 565 | unsigned long mask = (1ul << shift) - PAGE_SIZE; | 700 | unsigned long mask = (1ul << shift) - PAGE_SIZE; |
| 566 | pte = __pte(pte_val(pte) | (hva & mask)); | 701 | pte = __pte(pte_val(pte) | (hva & mask)); |
| 567 | } | 702 | } |
| 568 | if (!(writing || upgrade_write)) | 703 | pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED); |
| 569 | pte = __pte(pte_val(pte) & ~ _PAGE_WRITE); | 704 | if (writing || upgrade_write) { |
| 570 | pte = __pte(pte_val(pte) | _PAGE_EXEC); | 705 | if (pte_val(pte) & _PAGE_WRITE) |
| 706 | pte = __pte(pte_val(pte) | _PAGE_DIRTY); | ||
| 707 | } else { | ||
| 708 | pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY)); | ||
| 709 | } | ||
| 571 | } | 710 | } |
| 572 | 711 | ||
| 573 | /* Allocate space in the tree and write the PTE */ | 712 | /* Allocate space in the tree and write the PTE */ |
| @@ -734,51 +873,6 @@ int kvmppc_init_vm_radix(struct kvm *kvm) | |||
| 734 | return 0; | 873 | return 0; |
| 735 | } | 874 | } |
| 736 | 875 | ||
| 737 | void kvmppc_free_radix(struct kvm *kvm) | ||
| 738 | { | ||
| 739 | unsigned long ig, iu, im; | ||
| 740 | pte_t *pte; | ||
| 741 | pmd_t *pmd; | ||
| 742 | pud_t *pud; | ||
| 743 | pgd_t *pgd; | ||
| 744 | |||
| 745 | if (!kvm->arch.pgtable) | ||
| 746 | return; | ||
| 747 | pgd = kvm->arch.pgtable; | ||
| 748 | for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { | ||
| 749 | if (!pgd_present(*pgd)) | ||
| 750 | continue; | ||
| 751 | pud = pud_offset(pgd, 0); | ||
| 752 | for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++pud) { | ||
| 753 | if (!pud_present(*pud)) | ||
| 754 | continue; | ||
| 755 | if (pud_huge(*pud)) { | ||
| 756 | pud_clear(pud); | ||
| 757 | continue; | ||
| 758 | } | ||
| 759 | pmd = pmd_offset(pud, 0); | ||
| 760 | for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) { | ||
| 761 | if (pmd_is_leaf(*pmd)) { | ||
| 762 | pmd_clear(pmd); | ||
| 763 | continue; | ||
| 764 | } | ||
| 765 | if (!pmd_present(*pmd)) | ||
| 766 | continue; | ||
| 767 | pte = pte_offset_map(pmd, 0); | ||
| 768 | memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE); | ||
| 769 | kvmppc_pte_free(pte); | ||
| 770 | pmd_clear(pmd); | ||
| 771 | } | ||
| 772 | kvmppc_pmd_free(pmd_offset(pud, 0)); | ||
| 773 | pud_clear(pud); | ||
| 774 | } | ||
| 775 | pud_free(kvm->mm, pud_offset(pgd, 0)); | ||
| 776 | pgd_clear(pgd); | ||
| 777 | } | ||
| 778 | pgd_free(kvm->mm, kvm->arch.pgtable); | ||
| 779 | kvm->arch.pgtable = NULL; | ||
| 780 | } | ||
| 781 | |||
| 782 | static void pte_ctor(void *addr) | 876 | static void pte_ctor(void *addr) |
| 783 | { | 877 | { |
| 784 | memset(addr, 0, RADIX_PTE_TABLE_SIZE); | 878 | memset(addr, 0, RADIX_PTE_TABLE_SIZE); |
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 4dffa611376d..d066e37551ec 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c | |||
| @@ -176,14 +176,12 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, | |||
| 176 | 176 | ||
| 177 | if (!tbltmp) | 177 | if (!tbltmp) |
| 178 | continue; | 178 | continue; |
| 179 | /* | 179 | /* Make sure hardware table parameters are compatible */ |
| 180 | * Make sure hardware table parameters are exactly the same; | 180 | if ((tbltmp->it_page_shift <= stt->page_shift) && |
| 181 | * this is used in the TCE handlers where boundary checks | 181 | (tbltmp->it_offset << tbltmp->it_page_shift == |
| 182 | * use only the first attached table. | 182 | stt->offset << stt->page_shift) && |
| 183 | */ | 183 | (tbltmp->it_size << tbltmp->it_page_shift == |
| 184 | if ((tbltmp->it_page_shift == stt->page_shift) && | 184 | stt->size << stt->page_shift)) { |
| 185 | (tbltmp->it_offset == stt->offset) && | ||
| 186 | (tbltmp->it_size == stt->size)) { | ||
| 187 | /* | 185 | /* |
| 188 | * Reference the table to avoid races with | 186 | * Reference the table to avoid races with |
| 189 | * add/remove DMA windows. | 187 | * add/remove DMA windows. |
| @@ -237,7 +235,7 @@ static void release_spapr_tce_table(struct rcu_head *head) | |||
| 237 | kfree(stt); | 235 | kfree(stt); |
| 238 | } | 236 | } |
| 239 | 237 | ||
| 240 | static int kvm_spapr_tce_fault(struct vm_fault *vmf) | 238 | static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf) |
| 241 | { | 239 | { |
| 242 | struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data; | 240 | struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data; |
| 243 | struct page *page; | 241 | struct page *page; |
| @@ -302,7 +300,8 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | |||
| 302 | int ret = -ENOMEM; | 300 | int ret = -ENOMEM; |
| 303 | int i; | 301 | int i; |
| 304 | 302 | ||
| 305 | if (!args->size) | 303 | if (!args->size || args->page_shift < 12 || args->page_shift > 34 || |
| 304 | (args->offset + args->size > (ULLONG_MAX >> args->page_shift))) | ||
| 306 | return -EINVAL; | 305 | return -EINVAL; |
| 307 | 306 | ||
| 308 | size = _ALIGN_UP(args->size, PAGE_SIZE >> 3); | 307 | size = _ALIGN_UP(args->size, PAGE_SIZE >> 3); |
| @@ -396,7 +395,7 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, | |||
| 396 | return H_SUCCESS; | 395 | return H_SUCCESS; |
| 397 | } | 396 | } |
| 398 | 397 | ||
| 399 | static long kvmppc_tce_iommu_unmap(struct kvm *kvm, | 398 | static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm, |
| 400 | struct iommu_table *tbl, unsigned long entry) | 399 | struct iommu_table *tbl, unsigned long entry) |
| 401 | { | 400 | { |
| 402 | enum dma_data_direction dir = DMA_NONE; | 401 | enum dma_data_direction dir = DMA_NONE; |
| @@ -416,7 +415,24 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm, | |||
| 416 | return ret; | 415 | return ret; |
| 417 | } | 416 | } |
| 418 | 417 | ||
| 419 | long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, | 418 | static long kvmppc_tce_iommu_unmap(struct kvm *kvm, |
| 419 | struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, | ||
| 420 | unsigned long entry) | ||
| 421 | { | ||
| 422 | unsigned long i, ret = H_SUCCESS; | ||
| 423 | unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); | ||
| 424 | unsigned long io_entry = entry * subpages; | ||
| 425 | |||
| 426 | for (i = 0; i < subpages; ++i) { | ||
| 427 | ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i); | ||
| 428 | if (ret != H_SUCCESS) | ||
| 429 | break; | ||
| 430 | } | ||
| 431 | |||
| 432 | return ret; | ||
| 433 | } | ||
| 434 | |||
| 435 | long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, | ||
| 420 | unsigned long entry, unsigned long ua, | 436 | unsigned long entry, unsigned long ua, |
| 421 | enum dma_data_direction dir) | 437 | enum dma_data_direction dir) |
| 422 | { | 438 | { |
| @@ -453,6 +469,27 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, | |||
| 453 | return 0; | 469 | return 0; |
| 454 | } | 470 | } |
| 455 | 471 | ||
| 472 | static long kvmppc_tce_iommu_map(struct kvm *kvm, | ||
| 473 | struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, | ||
| 474 | unsigned long entry, unsigned long ua, | ||
| 475 | enum dma_data_direction dir) | ||
| 476 | { | ||
| 477 | unsigned long i, pgoff, ret = H_SUCCESS; | ||
| 478 | unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); | ||
| 479 | unsigned long io_entry = entry * subpages; | ||
| 480 | |||
| 481 | for (i = 0, pgoff = 0; i < subpages; | ||
| 482 | ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) { | ||
| 483 | |||
| 484 | ret = kvmppc_tce_iommu_do_map(kvm, tbl, | ||
| 485 | io_entry + i, ua + pgoff, dir); | ||
| 486 | if (ret != H_SUCCESS) | ||
| 487 | break; | ||
| 488 | } | ||
| 489 | |||
| 490 | return ret; | ||
| 491 | } | ||
| 492 | |||
| 456 | long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | 493 | long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
| 457 | unsigned long ioba, unsigned long tce) | 494 | unsigned long ioba, unsigned long tce) |
| 458 | { | 495 | { |
| @@ -491,10 +528,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | |||
| 491 | 528 | ||
| 492 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | 529 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
| 493 | if (dir == DMA_NONE) | 530 | if (dir == DMA_NONE) |
| 494 | ret = kvmppc_tce_iommu_unmap(vcpu->kvm, | 531 | ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt, |
| 495 | stit->tbl, entry); | 532 | stit->tbl, entry); |
| 496 | else | 533 | else |
| 497 | ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl, | 534 | ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, |
| 498 | entry, ua, dir); | 535 | entry, ua, dir); |
| 499 | 536 | ||
| 500 | if (ret == H_SUCCESS) | 537 | if (ret == H_SUCCESS) |
| @@ -570,7 +607,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, | |||
| 570 | return H_PARAMETER; | 607 | return H_PARAMETER; |
| 571 | 608 | ||
| 572 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | 609 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
| 573 | ret = kvmppc_tce_iommu_map(vcpu->kvm, | 610 | ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, |
| 574 | stit->tbl, entry + i, ua, | 611 | stit->tbl, entry + i, ua, |
| 575 | iommu_tce_direction(tce)); | 612 | iommu_tce_direction(tce)); |
| 576 | 613 | ||
| @@ -615,10 +652,10 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, | |||
| 615 | return H_PARAMETER; | 652 | return H_PARAMETER; |
| 616 | 653 | ||
| 617 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | 654 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
| 618 | unsigned long entry = ioba >> stit->tbl->it_page_shift; | 655 | unsigned long entry = ioba >> stt->page_shift; |
| 619 | 656 | ||
| 620 | for (i = 0; i < npages; ++i) { | 657 | for (i = 0; i < npages; ++i) { |
| 621 | ret = kvmppc_tce_iommu_unmap(vcpu->kvm, | 658 | ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt, |
| 622 | stit->tbl, entry + i); | 659 | stit->tbl, entry + i); |
| 623 | 660 | ||
| 624 | if (ret == H_SUCCESS) | 661 | if (ret == H_SUCCESS) |
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 6651f736a0b1..925fc316a104 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c | |||
| @@ -221,7 +221,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, | |||
| 221 | return H_SUCCESS; | 221 | return H_SUCCESS; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, | 224 | static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm, |
| 225 | struct iommu_table *tbl, unsigned long entry) | 225 | struct iommu_table *tbl, unsigned long entry) |
| 226 | { | 226 | { |
| 227 | enum dma_data_direction dir = DMA_NONE; | 227 | enum dma_data_direction dir = DMA_NONE; |
| @@ -245,7 +245,24 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, | |||
| 245 | return ret; | 245 | return ret; |
| 246 | } | 246 | } |
| 247 | 247 | ||
| 248 | static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, | 248 | static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, |
| 249 | struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, | ||
| 250 | unsigned long entry) | ||
| 251 | { | ||
| 252 | unsigned long i, ret = H_SUCCESS; | ||
| 253 | unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); | ||
| 254 | unsigned long io_entry = entry * subpages; | ||
| 255 | |||
| 256 | for (i = 0; i < subpages; ++i) { | ||
| 257 | ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i); | ||
| 258 | if (ret != H_SUCCESS) | ||
| 259 | break; | ||
| 260 | } | ||
| 261 | |||
| 262 | return ret; | ||
| 263 | } | ||
| 264 | |||
| 265 | static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, | ||
| 249 | unsigned long entry, unsigned long ua, | 266 | unsigned long entry, unsigned long ua, |
| 250 | enum dma_data_direction dir) | 267 | enum dma_data_direction dir) |
| 251 | { | 268 | { |
| @@ -290,6 +307,27 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, | |||
| 290 | return 0; | 307 | return 0; |
| 291 | } | 308 | } |
| 292 | 309 | ||
| 310 | static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, | ||
| 311 | struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, | ||
| 312 | unsigned long entry, unsigned long ua, | ||
| 313 | enum dma_data_direction dir) | ||
| 314 | { | ||
| 315 | unsigned long i, pgoff, ret = H_SUCCESS; | ||
| 316 | unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); | ||
| 317 | unsigned long io_entry = entry * subpages; | ||
| 318 | |||
| 319 | for (i = 0, pgoff = 0; i < subpages; | ||
| 320 | ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) { | ||
| 321 | |||
| 322 | ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl, | ||
| 323 | io_entry + i, ua + pgoff, dir); | ||
| 324 | if (ret != H_SUCCESS) | ||
| 325 | break; | ||
| 326 | } | ||
| 327 | |||
| 328 | return ret; | ||
| 329 | } | ||
| 330 | |||
| 293 | long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | 331 | long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
| 294 | unsigned long ioba, unsigned long tce) | 332 | unsigned long ioba, unsigned long tce) |
| 295 | { | 333 | { |
| @@ -327,10 +365,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | |||
| 327 | 365 | ||
| 328 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | 366 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
| 329 | if (dir == DMA_NONE) | 367 | if (dir == DMA_NONE) |
| 330 | ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, | 368 | ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt, |
| 331 | stit->tbl, entry); | 369 | stit->tbl, entry); |
| 332 | else | 370 | else |
| 333 | ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, | 371 | ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, |
| 334 | stit->tbl, entry, ua, dir); | 372 | stit->tbl, entry, ua, dir); |
| 335 | 373 | ||
| 336 | if (ret == H_SUCCESS) | 374 | if (ret == H_SUCCESS) |
| @@ -477,7 +515,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, | |||
| 477 | return H_PARAMETER; | 515 | return H_PARAMETER; |
| 478 | 516 | ||
| 479 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | 517 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
| 480 | ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, | 518 | ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, |
| 481 | stit->tbl, entry + i, ua, | 519 | stit->tbl, entry + i, ua, |
| 482 | iommu_tce_direction(tce)); | 520 | iommu_tce_direction(tce)); |
| 483 | 521 | ||
| @@ -526,10 +564,10 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, | |||
| 526 | return H_PARAMETER; | 564 | return H_PARAMETER; |
| 527 | 565 | ||
| 528 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | 566 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
| 529 | unsigned long entry = ioba >> stit->tbl->it_page_shift; | 567 | unsigned long entry = ioba >> stt->page_shift; |
| 530 | 568 | ||
| 531 | for (i = 0; i < npages; ++i) { | 569 | for (i = 0; i < npages; ++i) { |
| 532 | ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, | 570 | ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt, |
| 533 | stit->tbl, entry + i); | 571 | stit->tbl, entry + i); |
| 534 | 572 | ||
| 535 | if (ret == H_SUCCESS) | 573 | if (ret == H_SUCCESS) |
| @@ -571,7 +609,7 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | |||
| 571 | page = stt->pages[idx / TCES_PER_PAGE]; | 609 | page = stt->pages[idx / TCES_PER_PAGE]; |
| 572 | tbl = (u64 *)page_address(page); | 610 | tbl = (u64 *)page_address(page); |
| 573 | 611 | ||
| 574 | vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE]; | 612 | vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE]; |
| 575 | 613 | ||
| 576 | return H_SUCCESS; | 614 | return H_SUCCESS; |
| 577 | } | 615 | } |
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 68d68983948e..36b11c5a0dbb 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c | |||
| @@ -23,7 +23,9 @@ | |||
| 23 | #include <asm/reg.h> | 23 | #include <asm/reg.h> |
| 24 | #include <asm/switch_to.h> | 24 | #include <asm/switch_to.h> |
| 25 | #include <asm/time.h> | 25 | #include <asm/time.h> |
| 26 | #include <asm/tm.h> | ||
| 26 | #include "book3s.h" | 27 | #include "book3s.h" |
| 28 | #include <asm/asm-prototypes.h> | ||
| 27 | 29 | ||
| 28 | #define OP_19_XOP_RFID 18 | 30 | #define OP_19_XOP_RFID 18 |
| 29 | #define OP_19_XOP_RFI 50 | 31 | #define OP_19_XOP_RFI 50 |
| @@ -47,6 +49,12 @@ | |||
| 47 | #define OP_31_XOP_EIOIO 854 | 49 | #define OP_31_XOP_EIOIO 854 |
| 48 | #define OP_31_XOP_SLBMFEE 915 | 50 | #define OP_31_XOP_SLBMFEE 915 |
| 49 | 51 | ||
| 52 | #define OP_31_XOP_TBEGIN 654 | ||
| 53 | #define OP_31_XOP_TABORT 910 | ||
| 54 | |||
| 55 | #define OP_31_XOP_TRECLAIM 942 | ||
| 56 | #define OP_31_XOP_TRCHKPT 1006 | ||
| 57 | |||
| 50 | /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ | 58 | /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ |
| 51 | #define OP_31_XOP_DCBZ 1010 | 59 | #define OP_31_XOP_DCBZ 1010 |
| 52 | 60 | ||
| @@ -87,6 +95,157 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) | |||
| 87 | return true; | 95 | return true; |
| 88 | } | 96 | } |
| 89 | 97 | ||
| 98 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 99 | static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu) | ||
| 100 | { | ||
| 101 | memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0], | ||
| 102 | sizeof(vcpu->arch.gpr_tm)); | ||
| 103 | memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp, | ||
| 104 | sizeof(struct thread_fp_state)); | ||
| 105 | memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr, | ||
| 106 | sizeof(struct thread_vr_state)); | ||
| 107 | vcpu->arch.ppr_tm = vcpu->arch.ppr; | ||
| 108 | vcpu->arch.dscr_tm = vcpu->arch.dscr; | ||
| 109 | vcpu->arch.amr_tm = vcpu->arch.amr; | ||
| 110 | vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; | ||
| 111 | vcpu->arch.tar_tm = vcpu->arch.tar; | ||
| 112 | vcpu->arch.lr_tm = vcpu->arch.regs.link; | ||
| 113 | vcpu->arch.cr_tm = vcpu->arch.cr; | ||
| 114 | vcpu->arch.xer_tm = vcpu->arch.regs.xer; | ||
| 115 | vcpu->arch.vrsave_tm = vcpu->arch.vrsave; | ||
| 116 | } | ||
| 117 | |||
| 118 | static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu) | ||
| 119 | { | ||
| 120 | memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0], | ||
| 121 | sizeof(vcpu->arch.regs.gpr)); | ||
| 122 | memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm, | ||
| 123 | sizeof(struct thread_fp_state)); | ||
| 124 | memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm, | ||
| 125 | sizeof(struct thread_vr_state)); | ||
| 126 | vcpu->arch.ppr = vcpu->arch.ppr_tm; | ||
| 127 | vcpu->arch.dscr = vcpu->arch.dscr_tm; | ||
| 128 | vcpu->arch.amr = vcpu->arch.amr_tm; | ||
| 129 | vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; | ||
| 130 | vcpu->arch.tar = vcpu->arch.tar_tm; | ||
| 131 | vcpu->arch.regs.link = vcpu->arch.lr_tm; | ||
| 132 | vcpu->arch.cr = vcpu->arch.cr_tm; | ||
| 133 | vcpu->arch.regs.xer = vcpu->arch.xer_tm; | ||
| 134 | vcpu->arch.vrsave = vcpu->arch.vrsave_tm; | ||
| 135 | } | ||
| 136 | |||
| 137 | static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val) | ||
| 138 | { | ||
| 139 | unsigned long guest_msr = kvmppc_get_msr(vcpu); | ||
| 140 | int fc_val = ra_val ? ra_val : 1; | ||
| 141 | uint64_t texasr; | ||
| 142 | |||
| 143 | /* CR0 = 0 | MSR[TS] | 0 */ | ||
| 144 | vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) | | ||
| 145 | (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) | ||
| 146 | << CR0_SHIFT); | ||
| 147 | |||
| 148 | preempt_disable(); | ||
| 149 | tm_enable(); | ||
| 150 | texasr = mfspr(SPRN_TEXASR); | ||
| 151 | kvmppc_save_tm_pr(vcpu); | ||
| 152 | kvmppc_copyfrom_vcpu_tm(vcpu); | ||
| 153 | |||
| 154 | /* failure recording depends on Failure Summary bit */ | ||
| 155 | if (!(texasr & TEXASR_FS)) { | ||
| 156 | texasr &= ~TEXASR_FC; | ||
| 157 | texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS; | ||
| 158 | |||
| 159 | texasr &= ~(TEXASR_PR | TEXASR_HV); | ||
| 160 | if (kvmppc_get_msr(vcpu) & MSR_PR) | ||
| 161 | texasr |= TEXASR_PR; | ||
| 162 | |||
| 163 | if (kvmppc_get_msr(vcpu) & MSR_HV) | ||
| 164 | texasr |= TEXASR_HV; | ||
| 165 | |||
| 166 | vcpu->arch.texasr = texasr; | ||
| 167 | vcpu->arch.tfiar = kvmppc_get_pc(vcpu); | ||
| 168 | mtspr(SPRN_TEXASR, texasr); | ||
| 169 | mtspr(SPRN_TFIAR, vcpu->arch.tfiar); | ||
| 170 | } | ||
| 171 | tm_disable(); | ||
| 172 | /* | ||
| 173 | * treclaim need quit to non-transactional state. | ||
| 174 | */ | ||
| 175 | guest_msr &= ~(MSR_TS_MASK); | ||
| 176 | kvmppc_set_msr(vcpu, guest_msr); | ||
| 177 | preempt_enable(); | ||
| 178 | |||
| 179 | if (vcpu->arch.shadow_fscr & FSCR_TAR) | ||
| 180 | mtspr(SPRN_TAR, vcpu->arch.tar); | ||
| 181 | } | ||
| 182 | |||
| 183 | static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu) | ||
| 184 | { | ||
| 185 | unsigned long guest_msr = kvmppc_get_msr(vcpu); | ||
| 186 | |||
| 187 | preempt_disable(); | ||
| 188 | /* | ||
| 189 | * need flush FP/VEC/VSX to vcpu save area before | ||
| 190 | * copy. | ||
| 191 | */ | ||
| 192 | kvmppc_giveup_ext(vcpu, MSR_VSX); | ||
| 193 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | ||
| 194 | kvmppc_copyto_vcpu_tm(vcpu); | ||
| 195 | kvmppc_save_tm_sprs(vcpu); | ||
| 196 | |||
| 197 | /* | ||
| 198 | * as a result of trecheckpoint. set TS to suspended. | ||
| 199 | */ | ||
| 200 | guest_msr &= ~(MSR_TS_MASK); | ||
| 201 | guest_msr |= MSR_TS_S; | ||
| 202 | kvmppc_set_msr(vcpu, guest_msr); | ||
| 203 | kvmppc_restore_tm_pr(vcpu); | ||
| 204 | preempt_enable(); | ||
| 205 | } | ||
| 206 | |||
| 207 | /* emulate tabort. at guest privilege state */ | ||
| 208 | void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) | ||
| 209 | { | ||
| 210 | /* currently we only emulate tabort. but no emulation of other | ||
| 211 | * tabort variants since there is no kernel usage of them at | ||
| 212 | * present. | ||
| 213 | */ | ||
| 214 | unsigned long guest_msr = kvmppc_get_msr(vcpu); | ||
| 215 | uint64_t org_texasr; | ||
| 216 | |||
| 217 | preempt_disable(); | ||
| 218 | tm_enable(); | ||
| 219 | org_texasr = mfspr(SPRN_TEXASR); | ||
| 220 | tm_abort(ra_val); | ||
| 221 | |||
| 222 | /* CR0 = 0 | MSR[TS] | 0 */ | ||
| 223 | vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) | | ||
| 224 | (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) | ||
| 225 | << CR0_SHIFT); | ||
| 226 | |||
| 227 | vcpu->arch.texasr = mfspr(SPRN_TEXASR); | ||
| 228 | /* failure recording depends on Failure Summary bit, | ||
| 229 | * and tabort will be treated as nops in non-transactional | ||
| 230 | * state. | ||
| 231 | */ | ||
| 232 | if (!(org_texasr & TEXASR_FS) && | ||
| 233 | MSR_TM_ACTIVE(guest_msr)) { | ||
| 234 | vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV); | ||
| 235 | if (guest_msr & MSR_PR) | ||
| 236 | vcpu->arch.texasr |= TEXASR_PR; | ||
| 237 | |||
| 238 | if (guest_msr & MSR_HV) | ||
| 239 | vcpu->arch.texasr |= TEXASR_HV; | ||
| 240 | |||
| 241 | vcpu->arch.tfiar = kvmppc_get_pc(vcpu); | ||
| 242 | } | ||
| 243 | tm_disable(); | ||
| 244 | preempt_enable(); | ||
| 245 | } | ||
| 246 | |||
| 247 | #endif | ||
| 248 | |||
| 90 | int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | 249 | int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 91 | unsigned int inst, int *advance) | 250 | unsigned int inst, int *advance) |
| 92 | { | 251 | { |
| @@ -117,11 +276,28 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 117 | case 19: | 276 | case 19: |
| 118 | switch (get_xop(inst)) { | 277 | switch (get_xop(inst)) { |
| 119 | case OP_19_XOP_RFID: | 278 | case OP_19_XOP_RFID: |
| 120 | case OP_19_XOP_RFI: | 279 | case OP_19_XOP_RFI: { |
| 280 | unsigned long srr1 = kvmppc_get_srr1(vcpu); | ||
| 281 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 282 | unsigned long cur_msr = kvmppc_get_msr(vcpu); | ||
| 283 | |||
| 284 | /* | ||
| 285 | * add rules to fit in ISA specification regarding TM | ||
| 286 | * state transistion in TM disable/Suspended state, | ||
| 287 | * and target TM state is TM inactive(00) state. (the | ||
| 288 | * change should be suppressed). | ||
| 289 | */ | ||
| 290 | if (((cur_msr & MSR_TM) == 0) && | ||
| 291 | ((srr1 & MSR_TM) == 0) && | ||
| 292 | MSR_TM_SUSPENDED(cur_msr) && | ||
| 293 | !MSR_TM_ACTIVE(srr1)) | ||
| 294 | srr1 |= MSR_TS_S; | ||
| 295 | #endif | ||
| 121 | kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); | 296 | kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); |
| 122 | kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu)); | 297 | kvmppc_set_msr(vcpu, srr1); |
| 123 | *advance = 0; | 298 | *advance = 0; |
| 124 | break; | 299 | break; |
| 300 | } | ||
| 125 | 301 | ||
| 126 | default: | 302 | default: |
| 127 | emulated = EMULATE_FAIL; | 303 | emulated = EMULATE_FAIL; |
| @@ -304,6 +480,140 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 304 | 480 | ||
| 305 | break; | 481 | break; |
| 306 | } | 482 | } |
| 483 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 484 | case OP_31_XOP_TBEGIN: | ||
| 485 | { | ||
| 486 | if (!cpu_has_feature(CPU_FTR_TM)) | ||
| 487 | break; | ||
| 488 | |||
| 489 | if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { | ||
| 490 | kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); | ||
| 491 | emulated = EMULATE_AGAIN; | ||
| 492 | break; | ||
| 493 | } | ||
| 494 | |||
| 495 | if (!(kvmppc_get_msr(vcpu) & MSR_PR)) { | ||
| 496 | preempt_disable(); | ||
| 497 | vcpu->arch.cr = (CR0_TBEGIN_FAILURE | | ||
| 498 | (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT))); | ||
| 499 | |||
| 500 | vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT | | ||
| 501 | (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) | ||
| 502 | << TEXASR_FC_LG)); | ||
| 503 | |||
| 504 | if ((inst >> 21) & 0x1) | ||
| 505 | vcpu->arch.texasr |= TEXASR_ROT; | ||
| 506 | |||
| 507 | if (kvmppc_get_msr(vcpu) & MSR_HV) | ||
| 508 | vcpu->arch.texasr |= TEXASR_HV; | ||
| 509 | |||
| 510 | vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4; | ||
| 511 | vcpu->arch.tfiar = kvmppc_get_pc(vcpu); | ||
| 512 | |||
| 513 | kvmppc_restore_tm_sprs(vcpu); | ||
| 514 | preempt_enable(); | ||
| 515 | } else | ||
| 516 | emulated = EMULATE_FAIL; | ||
| 517 | break; | ||
| 518 | } | ||
| 519 | case OP_31_XOP_TABORT: | ||
| 520 | { | ||
| 521 | ulong guest_msr = kvmppc_get_msr(vcpu); | ||
| 522 | unsigned long ra_val = 0; | ||
| 523 | |||
| 524 | if (!cpu_has_feature(CPU_FTR_TM)) | ||
| 525 | break; | ||
| 526 | |||
| 527 | if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { | ||
| 528 | kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); | ||
| 529 | emulated = EMULATE_AGAIN; | ||
| 530 | break; | ||
| 531 | } | ||
| 532 | |||
| 533 | /* only emulate for privilege guest, since problem state | ||
| 534 | * guest can run with TM enabled and we don't expect to | ||
| 535 | * trap at here for that case. | ||
| 536 | */ | ||
| 537 | WARN_ON(guest_msr & MSR_PR); | ||
| 538 | |||
| 539 | if (ra) | ||
| 540 | ra_val = kvmppc_get_gpr(vcpu, ra); | ||
| 541 | |||
| 542 | kvmppc_emulate_tabort(vcpu, ra_val); | ||
| 543 | break; | ||
| 544 | } | ||
| 545 | case OP_31_XOP_TRECLAIM: | ||
| 546 | { | ||
| 547 | ulong guest_msr = kvmppc_get_msr(vcpu); | ||
| 548 | unsigned long ra_val = 0; | ||
| 549 | |||
| 550 | if (!cpu_has_feature(CPU_FTR_TM)) | ||
| 551 | break; | ||
| 552 | |||
| 553 | if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { | ||
| 554 | kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); | ||
| 555 | emulated = EMULATE_AGAIN; | ||
| 556 | break; | ||
| 557 | } | ||
| 558 | |||
| 559 | /* generate interrupts based on priorities */ | ||
| 560 | if (guest_msr & MSR_PR) { | ||
| 561 | /* Privileged Instruction type Program Interrupt */ | ||
| 562 | kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); | ||
| 563 | emulated = EMULATE_AGAIN; | ||
| 564 | break; | ||
| 565 | } | ||
| 566 | |||
| 567 | if (!MSR_TM_ACTIVE(guest_msr)) { | ||
| 568 | /* TM bad thing interrupt */ | ||
| 569 | kvmppc_core_queue_program(vcpu, SRR1_PROGTM); | ||
| 570 | emulated = EMULATE_AGAIN; | ||
| 571 | break; | ||
| 572 | } | ||
| 573 | |||
| 574 | if (ra) | ||
| 575 | ra_val = kvmppc_get_gpr(vcpu, ra); | ||
| 576 | kvmppc_emulate_treclaim(vcpu, ra_val); | ||
| 577 | break; | ||
| 578 | } | ||
| 579 | case OP_31_XOP_TRCHKPT: | ||
| 580 | { | ||
| 581 | ulong guest_msr = kvmppc_get_msr(vcpu); | ||
| 582 | unsigned long texasr; | ||
| 583 | |||
| 584 | if (!cpu_has_feature(CPU_FTR_TM)) | ||
| 585 | break; | ||
| 586 | |||
| 587 | if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { | ||
| 588 | kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); | ||
| 589 | emulated = EMULATE_AGAIN; | ||
| 590 | break; | ||
| 591 | } | ||
| 592 | |||
| 593 | /* generate interrupt based on priorities */ | ||
| 594 | if (guest_msr & MSR_PR) { | ||
| 595 | /* Privileged Instruction type Program Intr */ | ||
| 596 | kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); | ||
| 597 | emulated = EMULATE_AGAIN; | ||
| 598 | break; | ||
| 599 | } | ||
| 600 | |||
| 601 | tm_enable(); | ||
| 602 | texasr = mfspr(SPRN_TEXASR); | ||
| 603 | tm_disable(); | ||
| 604 | |||
| 605 | if (MSR_TM_ACTIVE(guest_msr) || | ||
| 606 | !(texasr & (TEXASR_FS))) { | ||
| 607 | /* TM bad thing interrupt */ | ||
| 608 | kvmppc_core_queue_program(vcpu, SRR1_PROGTM); | ||
| 609 | emulated = EMULATE_AGAIN; | ||
| 610 | break; | ||
| 611 | } | ||
| 612 | |||
| 613 | kvmppc_emulate_trchkpt(vcpu); | ||
| 614 | break; | ||
| 615 | } | ||
| 616 | #endif | ||
| 307 | default: | 617 | default: |
| 308 | emulated = EMULATE_FAIL; | 618 | emulated = EMULATE_FAIL; |
| 309 | } | 619 | } |
| @@ -465,13 +775,38 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | |||
| 465 | break; | 775 | break; |
| 466 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 776 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 467 | case SPRN_TFHAR: | 777 | case SPRN_TFHAR: |
| 468 | vcpu->arch.tfhar = spr_val; | ||
| 469 | break; | ||
| 470 | case SPRN_TEXASR: | 778 | case SPRN_TEXASR: |
| 471 | vcpu->arch.texasr = spr_val; | ||
| 472 | break; | ||
| 473 | case SPRN_TFIAR: | 779 | case SPRN_TFIAR: |
| 474 | vcpu->arch.tfiar = spr_val; | 780 | if (!cpu_has_feature(CPU_FTR_TM)) |
| 781 | break; | ||
| 782 | |||
| 783 | if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { | ||
| 784 | kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); | ||
| 785 | emulated = EMULATE_AGAIN; | ||
| 786 | break; | ||
| 787 | } | ||
| 788 | |||
| 789 | if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) && | ||
| 790 | !((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) && | ||
| 791 | (sprn == SPRN_TFHAR))) { | ||
| 792 | /* it is illegal to mtspr() TM regs in | ||
| 793 | * other than non-transactional state, with | ||
| 794 | * the exception of TFHAR in suspend state. | ||
| 795 | */ | ||
| 796 | kvmppc_core_queue_program(vcpu, SRR1_PROGTM); | ||
| 797 | emulated = EMULATE_AGAIN; | ||
| 798 | break; | ||
| 799 | } | ||
| 800 | |||
| 801 | tm_enable(); | ||
| 802 | if (sprn == SPRN_TFHAR) | ||
| 803 | mtspr(SPRN_TFHAR, spr_val); | ||
| 804 | else if (sprn == SPRN_TEXASR) | ||
| 805 | mtspr(SPRN_TEXASR, spr_val); | ||
| 806 | else | ||
| 807 | mtspr(SPRN_TFIAR, spr_val); | ||
| 808 | tm_disable(); | ||
| 809 | |||
| 475 | break; | 810 | break; |
| 476 | #endif | 811 | #endif |
| 477 | #endif | 812 | #endif |
| @@ -618,13 +953,25 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val | |||
| 618 | break; | 953 | break; |
| 619 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 954 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 620 | case SPRN_TFHAR: | 955 | case SPRN_TFHAR: |
| 621 | *spr_val = vcpu->arch.tfhar; | ||
| 622 | break; | ||
| 623 | case SPRN_TEXASR: | 956 | case SPRN_TEXASR: |
| 624 | *spr_val = vcpu->arch.texasr; | ||
| 625 | break; | ||
| 626 | case SPRN_TFIAR: | 957 | case SPRN_TFIAR: |
| 627 | *spr_val = vcpu->arch.tfiar; | 958 | if (!cpu_has_feature(CPU_FTR_TM)) |
| 959 | break; | ||
| 960 | |||
| 961 | if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { | ||
| 962 | kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); | ||
| 963 | emulated = EMULATE_AGAIN; | ||
| 964 | break; | ||
| 965 | } | ||
| 966 | |||
| 967 | tm_enable(); | ||
| 968 | if (sprn == SPRN_TFHAR) | ||
| 969 | *spr_val = mfspr(SPRN_TFHAR); | ||
| 970 | else if (sprn == SPRN_TEXASR) | ||
| 971 | *spr_val = mfspr(SPRN_TEXASR); | ||
| 972 | else if (sprn == SPRN_TFIAR) | ||
| 973 | *spr_val = mfspr(SPRN_TFIAR); | ||
| 974 | tm_disable(); | ||
| 628 | break; | 975 | break; |
| 629 | #endif | 976 | #endif |
| 630 | #endif | 977 | #endif |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8858ab8b6ca4..de686b340f4a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
| @@ -123,6 +123,32 @@ static bool no_mixing_hpt_and_radix; | |||
| 123 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); | 123 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); |
| 124 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); | 124 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); |
| 125 | 125 | ||
| 126 | /* | ||
| 127 | * RWMR values for POWER8. These control the rate at which PURR | ||
| 128 | * and SPURR count and should be set according to the number of | ||
| 129 | * online threads in the vcore being run. | ||
| 130 | */ | ||
| 131 | #define RWMR_RPA_P8_1THREAD 0x164520C62609AECA | ||
| 132 | #define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9 | ||
| 133 | #define RWMR_RPA_P8_3THREAD 0x164520C62609AECA | ||
| 134 | #define RWMR_RPA_P8_4THREAD 0x199A421245058DA9 | ||
| 135 | #define RWMR_RPA_P8_5THREAD 0x164520C62609AECA | ||
| 136 | #define RWMR_RPA_P8_6THREAD 0x164520C62609AECA | ||
| 137 | #define RWMR_RPA_P8_7THREAD 0x164520C62609AECA | ||
| 138 | #define RWMR_RPA_P8_8THREAD 0x164520C62609AECA | ||
| 139 | |||
| 140 | static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = { | ||
| 141 | RWMR_RPA_P8_1THREAD, | ||
| 142 | RWMR_RPA_P8_1THREAD, | ||
| 143 | RWMR_RPA_P8_2THREAD, | ||
| 144 | RWMR_RPA_P8_3THREAD, | ||
| 145 | RWMR_RPA_P8_4THREAD, | ||
| 146 | RWMR_RPA_P8_5THREAD, | ||
| 147 | RWMR_RPA_P8_6THREAD, | ||
| 148 | RWMR_RPA_P8_7THREAD, | ||
| 149 | RWMR_RPA_P8_8THREAD, | ||
| 150 | }; | ||
| 151 | |||
| 126 | static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc, | 152 | static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc, |
| 127 | int *ip) | 153 | int *ip) |
| 128 | { | 154 | { |
| @@ -371,13 +397,13 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) | |||
| 371 | 397 | ||
| 372 | pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); | 398 | pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); |
| 373 | pr_err("pc = %.16lx msr = %.16llx trap = %x\n", | 399 | pr_err("pc = %.16lx msr = %.16llx trap = %x\n", |
| 374 | vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); | 400 | vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); |
| 375 | for (r = 0; r < 16; ++r) | 401 | for (r = 0; r < 16; ++r) |
| 376 | pr_err("r%2d = %.16lx r%d = %.16lx\n", | 402 | pr_err("r%2d = %.16lx r%d = %.16lx\n", |
| 377 | r, kvmppc_get_gpr(vcpu, r), | 403 | r, kvmppc_get_gpr(vcpu, r), |
| 378 | r+16, kvmppc_get_gpr(vcpu, r+16)); | 404 | r+16, kvmppc_get_gpr(vcpu, r+16)); |
| 379 | pr_err("ctr = %.16lx lr = %.16lx\n", | 405 | pr_err("ctr = %.16lx lr = %.16lx\n", |
| 380 | vcpu->arch.ctr, vcpu->arch.lr); | 406 | vcpu->arch.regs.ctr, vcpu->arch.regs.link); |
| 381 | pr_err("srr0 = %.16llx srr1 = %.16llx\n", | 407 | pr_err("srr0 = %.16llx srr1 = %.16llx\n", |
| 382 | vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); | 408 | vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); |
| 383 | pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", | 409 | pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", |
| @@ -385,7 +411,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) | |||
| 385 | pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", | 411 | pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", |
| 386 | vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); | 412 | vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); |
| 387 | pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", | 413 | pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", |
| 388 | vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); | 414 | vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); |
| 389 | pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); | 415 | pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); |
| 390 | pr_err("fault dar = %.16lx dsisr = %.8x\n", | 416 | pr_err("fault dar = %.16lx dsisr = %.8x\n", |
| 391 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); | 417 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); |
| @@ -1526,6 +1552,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
| 1526 | *val = get_reg_val(id, vcpu->arch.dec_expires + | 1552 | *val = get_reg_val(id, vcpu->arch.dec_expires + |
| 1527 | vcpu->arch.vcore->tb_offset); | 1553 | vcpu->arch.vcore->tb_offset); |
| 1528 | break; | 1554 | break; |
| 1555 | case KVM_REG_PPC_ONLINE: | ||
| 1556 | *val = get_reg_val(id, vcpu->arch.online); | ||
| 1557 | break; | ||
| 1529 | default: | 1558 | default: |
| 1530 | r = -EINVAL; | 1559 | r = -EINVAL; |
| 1531 | break; | 1560 | break; |
| @@ -1757,6 +1786,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
| 1757 | vcpu->arch.dec_expires = set_reg_val(id, *val) - | 1786 | vcpu->arch.dec_expires = set_reg_val(id, *val) - |
| 1758 | vcpu->arch.vcore->tb_offset; | 1787 | vcpu->arch.vcore->tb_offset; |
| 1759 | break; | 1788 | break; |
| 1789 | case KVM_REG_PPC_ONLINE: | ||
| 1790 | i = set_reg_val(id, *val); | ||
| 1791 | if (i && !vcpu->arch.online) | ||
| 1792 | atomic_inc(&vcpu->arch.vcore->online_count); | ||
| 1793 | else if (!i && vcpu->arch.online) | ||
| 1794 | atomic_dec(&vcpu->arch.vcore->online_count); | ||
| 1795 | vcpu->arch.online = i; | ||
| 1796 | break; | ||
| 1760 | default: | 1797 | default: |
| 1761 | r = -EINVAL; | 1798 | r = -EINVAL; |
| 1762 | break; | 1799 | break; |
| @@ -2850,6 +2887,25 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) | |||
| 2850 | } | 2887 | } |
| 2851 | } | 2888 | } |
| 2852 | 2889 | ||
| 2890 | /* | ||
| 2891 | * On POWER8, set RWMR register. | ||
| 2892 | * Since it only affects PURR and SPURR, it doesn't affect | ||
| 2893 | * the host, so we don't save/restore the host value. | ||
| 2894 | */ | ||
| 2895 | if (is_power8) { | ||
| 2896 | unsigned long rwmr_val = RWMR_RPA_P8_8THREAD; | ||
| 2897 | int n_online = atomic_read(&vc->online_count); | ||
| 2898 | |||
| 2899 | /* | ||
| 2900 | * Use the 8-thread value if we're doing split-core | ||
| 2901 | * or if the vcore's online count looks bogus. | ||
| 2902 | */ | ||
| 2903 | if (split == 1 && threads_per_subcore == MAX_SMT_THREADS && | ||
| 2904 | n_online >= 1 && n_online <= MAX_SMT_THREADS) | ||
| 2905 | rwmr_val = p8_rwmr_values[n_online]; | ||
| 2906 | mtspr(SPRN_RWMR, rwmr_val); | ||
| 2907 | } | ||
| 2908 | |||
| 2853 | /* Start all the threads */ | 2909 | /* Start all the threads */ |
| 2854 | active = 0; | 2910 | active = 0; |
| 2855 | for (sub = 0; sub < core_info.n_subcores; ++sub) { | 2911 | for (sub = 0; sub < core_info.n_subcores; ++sub) { |
| @@ -2902,6 +2958,32 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) | |||
| 2902 | for (sub = 0; sub < core_info.n_subcores; ++sub) | 2958 | for (sub = 0; sub < core_info.n_subcores; ++sub) |
| 2903 | spin_unlock(&core_info.vc[sub]->lock); | 2959 | spin_unlock(&core_info.vc[sub]->lock); |
| 2904 | 2960 | ||
| 2961 | if (kvm_is_radix(vc->kvm)) { | ||
| 2962 | int tmp = pcpu; | ||
| 2963 | |||
| 2964 | /* | ||
| 2965 | * Do we need to flush the process scoped TLB for the LPAR? | ||
| 2966 | * | ||
| 2967 | * On POWER9, individual threads can come in here, but the | ||
| 2968 | * TLB is shared between the 4 threads in a core, hence | ||
| 2969 | * invalidating on one thread invalidates for all. | ||
| 2970 | * Thus we make all 4 threads use the same bit here. | ||
| 2971 | * | ||
| 2972 | * Hash must be flushed in realmode in order to use tlbiel. | ||
| 2973 | */ | ||
| 2974 | mtspr(SPRN_LPID, vc->kvm->arch.lpid); | ||
| 2975 | isync(); | ||
| 2976 | |||
| 2977 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | ||
| 2978 | tmp &= ~0x3UL; | ||
| 2979 | |||
| 2980 | if (cpumask_test_cpu(tmp, &vc->kvm->arch.need_tlb_flush)) { | ||
| 2981 | radix__local_flush_tlb_lpid_guest(vc->kvm->arch.lpid); | ||
| 2982 | /* Clear the bit after the TLB flush */ | ||
| 2983 | cpumask_clear_cpu(tmp, &vc->kvm->arch.need_tlb_flush); | ||
| 2984 | } | ||
| 2985 | } | ||
| 2986 | |||
| 2905 | /* | 2987 | /* |
| 2906 | * Interrupts will be enabled once we get into the guest, | 2988 | * Interrupts will be enabled once we get into the guest, |
| 2907 | * so tell lockdep that we're about to enable interrupts. | 2989 | * so tell lockdep that we're about to enable interrupts. |
| @@ -3356,6 +3438,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
| 3356 | } | 3438 | } |
| 3357 | #endif | 3439 | #endif |
| 3358 | 3440 | ||
| 3441 | /* | ||
| 3442 | * Force online to 1 for the sake of old userspace which doesn't | ||
| 3443 | * set it. | ||
| 3444 | */ | ||
| 3445 | if (!vcpu->arch.online) { | ||
| 3446 | atomic_inc(&vcpu->arch.vcore->online_count); | ||
| 3447 | vcpu->arch.online = 1; | ||
| 3448 | } | ||
| 3449 | |||
| 3359 | kvmppc_core_prepare_to_enter(vcpu); | 3450 | kvmppc_core_prepare_to_enter(vcpu); |
| 3360 | 3451 | ||
| 3361 | /* No need to go into the guest when all we'll do is come back out */ | 3452 | /* No need to go into the guest when all we'll do is come back out */ |
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index de18299f92b7..d4a3f4da409b 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/cma.h> | 18 | #include <linux/cma.h> |
| 19 | #include <linux/bitops.h> | 19 | #include <linux/bitops.h> |
| 20 | 20 | ||
| 21 | #include <asm/asm-prototypes.h> | ||
| 21 | #include <asm/cputable.h> | 22 | #include <asm/cputable.h> |
| 22 | #include <asm/kvm_ppc.h> | 23 | #include <asm/kvm_ppc.h> |
| 23 | #include <asm/kvm_book3s.h> | 24 | #include <asm/kvm_book3s.h> |
| @@ -211,9 +212,9 @@ long kvmppc_h_random(struct kvm_vcpu *vcpu) | |||
| 211 | 212 | ||
| 212 | /* Only need to do the expensive mfmsr() on radix */ | 213 | /* Only need to do the expensive mfmsr() on radix */ |
| 213 | if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR)) | 214 | if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR)) |
| 214 | r = powernv_get_random_long(&vcpu->arch.gpr[4]); | 215 | r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]); |
| 215 | else | 216 | else |
| 216 | r = powernv_get_random_real_mode(&vcpu->arch.gpr[4]); | 217 | r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]); |
| 217 | if (r) | 218 | if (r) |
| 218 | return H_SUCCESS; | 219 | return H_SUCCESS; |
| 219 | 220 | ||
| @@ -562,7 +563,7 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) | |||
| 562 | { | 563 | { |
| 563 | if (!kvmppc_xics_enabled(vcpu)) | 564 | if (!kvmppc_xics_enabled(vcpu)) |
| 564 | return H_TOO_HARD; | 565 | return H_TOO_HARD; |
| 565 | vcpu->arch.gpr[5] = get_tb(); | 566 | vcpu->arch.regs.gpr[5] = get_tb(); |
| 566 | if (xive_enabled()) { | 567 | if (xive_enabled()) { |
| 567 | if (is_rm()) | 568 | if (is_rm()) |
| 568 | return xive_rm_h_xirr(vcpu); | 569 | return xive_rm_h_xirr(vcpu); |
| @@ -633,7 +634,19 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) | |||
| 633 | 634 | ||
| 634 | void kvmppc_bad_interrupt(struct pt_regs *regs) | 635 | void kvmppc_bad_interrupt(struct pt_regs *regs) |
| 635 | { | 636 | { |
| 636 | die("Bad interrupt in KVM entry/exit code", regs, SIGABRT); | 637 | /* |
| 638 | * 100 could happen at any time, 200 can happen due to invalid real | ||
| 639 | * address access for example (or any time due to a hardware problem). | ||
| 640 | */ | ||
| 641 | if (TRAP(regs) == 0x100) { | ||
| 642 | get_paca()->in_nmi++; | ||
| 643 | system_reset_exception(regs); | ||
| 644 | get_paca()->in_nmi--; | ||
| 645 | } else if (TRAP(regs) == 0x200) { | ||
| 646 | machine_check_exception(regs); | ||
| 647 | } else { | ||
| 648 | die("Bad interrupt in KVM entry/exit code", regs, SIGABRT); | ||
| 649 | } | ||
| 637 | panic("Bad KVM trap"); | 650 | panic("Bad KVM trap"); |
| 638 | } | 651 | } |
| 639 | 652 | ||
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 0e8493033288..82f2ff9410b6 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S | |||
| @@ -137,7 +137,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
| 137 | /* | 137 | /* |
| 138 | * We return here in virtual mode after the guest exits | 138 | * We return here in virtual mode after the guest exits |
| 139 | * with something that we can't handle in real mode. | 139 | * with something that we can't handle in real mode. |
| 140 | * Interrupts are enabled again at this point. | 140 | * Interrupts are still hard-disabled. |
| 141 | */ | 141 | */ |
| 142 | 142 | ||
| 143 | /* | 143 | /* |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 78e6a392330f..1f22d9e977d4 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
| @@ -418,7 +418,8 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, | |||
| 418 | long pte_index, unsigned long pteh, unsigned long ptel) | 418 | long pte_index, unsigned long pteh, unsigned long ptel) |
| 419 | { | 419 | { |
| 420 | return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, | 420 | return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, |
| 421 | vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); | 421 | vcpu->arch.pgdir, true, |
| 422 | &vcpu->arch.regs.gpr[4]); | ||
| 422 | } | 423 | } |
| 423 | 424 | ||
| 424 | #ifdef __BIG_ENDIAN__ | 425 | #ifdef __BIG_ENDIAN__ |
| @@ -434,24 +435,6 @@ static inline int is_mmio_hpte(unsigned long v, unsigned long r) | |||
| 434 | (HPTE_R_KEY_HI | HPTE_R_KEY_LO)); | 435 | (HPTE_R_KEY_HI | HPTE_R_KEY_LO)); |
| 435 | } | 436 | } |
| 436 | 437 | ||
| 437 | static inline int try_lock_tlbie(unsigned int *lock) | ||
| 438 | { | ||
| 439 | unsigned int tmp, old; | ||
| 440 | unsigned int token = LOCK_TOKEN; | ||
| 441 | |||
| 442 | asm volatile("1:lwarx %1,0,%2\n" | ||
| 443 | " cmpwi cr0,%1,0\n" | ||
| 444 | " bne 2f\n" | ||
| 445 | " stwcx. %3,0,%2\n" | ||
| 446 | " bne- 1b\n" | ||
| 447 | " isync\n" | ||
| 448 | "2:" | ||
| 449 | : "=&r" (tmp), "=&r" (old) | ||
| 450 | : "r" (lock), "r" (token) | ||
| 451 | : "cc", "memory"); | ||
| 452 | return old == 0; | ||
| 453 | } | ||
| 454 | |||
| 455 | static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, | 438 | static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, |
| 456 | long npages, int global, bool need_sync) | 439 | long npages, int global, bool need_sync) |
| 457 | { | 440 | { |
| @@ -463,8 +446,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, | |||
| 463 | * the RS field, this is backwards-compatible with P7 and P8. | 446 | * the RS field, this is backwards-compatible with P7 and P8. |
| 464 | */ | 447 | */ |
| 465 | if (global) { | 448 | if (global) { |
| 466 | while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) | ||
| 467 | cpu_relax(); | ||
| 468 | if (need_sync) | 449 | if (need_sync) |
| 469 | asm volatile("ptesync" : : : "memory"); | 450 | asm volatile("ptesync" : : : "memory"); |
| 470 | for (i = 0; i < npages; ++i) { | 451 | for (i = 0; i < npages; ++i) { |
| @@ -483,7 +464,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, | |||
| 483 | } | 464 | } |
| 484 | 465 | ||
| 485 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); | 466 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); |
| 486 | kvm->arch.tlbie_lock = 0; | ||
| 487 | } else { | 467 | } else { |
| 488 | if (need_sync) | 468 | if (need_sync) |
| 489 | asm volatile("ptesync" : : : "memory"); | 469 | asm volatile("ptesync" : : : "memory"); |
| @@ -561,13 +541,13 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, | |||
| 561 | unsigned long pte_index, unsigned long avpn) | 541 | unsigned long pte_index, unsigned long avpn) |
| 562 | { | 542 | { |
| 563 | return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, | 543 | return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, |
| 564 | &vcpu->arch.gpr[4]); | 544 | &vcpu->arch.regs.gpr[4]); |
| 565 | } | 545 | } |
| 566 | 546 | ||
| 567 | long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | 547 | long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) |
| 568 | { | 548 | { |
| 569 | struct kvm *kvm = vcpu->kvm; | 549 | struct kvm *kvm = vcpu->kvm; |
| 570 | unsigned long *args = &vcpu->arch.gpr[4]; | 550 | unsigned long *args = &vcpu->arch.regs.gpr[4]; |
| 571 | __be64 *hp, *hptes[4]; | 551 | __be64 *hp, *hptes[4]; |
| 572 | unsigned long tlbrb[4]; | 552 | unsigned long tlbrb[4]; |
| 573 | long int i, j, k, n, found, indexes[4]; | 553 | long int i, j, k, n, found, indexes[4]; |
| @@ -787,8 +767,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, | |||
| 787 | r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); | 767 | r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); |
| 788 | r &= ~HPTE_GR_RESERVED; | 768 | r &= ~HPTE_GR_RESERVED; |
| 789 | } | 769 | } |
| 790 | vcpu->arch.gpr[4 + i * 2] = v; | 770 | vcpu->arch.regs.gpr[4 + i * 2] = v; |
| 791 | vcpu->arch.gpr[5 + i * 2] = r; | 771 | vcpu->arch.regs.gpr[5 + i * 2] = r; |
| 792 | } | 772 | } |
| 793 | return H_SUCCESS; | 773 | return H_SUCCESS; |
| 794 | } | 774 | } |
| @@ -834,7 +814,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, | |||
| 834 | } | 814 | } |
| 835 | } | 815 | } |
| 836 | } | 816 | } |
| 837 | vcpu->arch.gpr[4] = gr; | 817 | vcpu->arch.regs.gpr[4] = gr; |
| 838 | ret = H_SUCCESS; | 818 | ret = H_SUCCESS; |
| 839 | out: | 819 | out: |
| 840 | unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); | 820 | unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); |
| @@ -881,7 +861,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, | |||
| 881 | kvmppc_set_dirty_from_hpte(kvm, v, gr); | 861 | kvmppc_set_dirty_from_hpte(kvm, v, gr); |
| 882 | } | 862 | } |
| 883 | } | 863 | } |
| 884 | vcpu->arch.gpr[4] = gr; | 864 | vcpu->arch.regs.gpr[4] = gr; |
| 885 | ret = H_SUCCESS; | 865 | ret = H_SUCCESS; |
| 886 | out: | 866 | out: |
| 887 | unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); | 867 | unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 2a862618f072..758d1d23215e 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c | |||
| @@ -517,7 +517,7 @@ unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu) | |||
| 517 | } while (!icp_rm_try_update(icp, old_state, new_state)); | 517 | } while (!icp_rm_try_update(icp, old_state, new_state)); |
| 518 | 518 | ||
| 519 | /* Return the result in GPR4 */ | 519 | /* Return the result in GPR4 */ |
| 520 | vcpu->arch.gpr[4] = xirr; | 520 | vcpu->arch.regs.gpr[4] = xirr; |
| 521 | 521 | ||
| 522 | return check_too_hard(xics, icp); | 522 | return check_too_hard(xics, icp); |
| 523 | } | 523 | } |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index b97d261d3b89..153988d878e8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
| @@ -39,8 +39,6 @@ BEGIN_FTR_SECTION; \ | |||
| 39 | extsw reg, reg; \ | 39 | extsw reg, reg; \ |
| 40 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) | 40 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
| 41 | 41 | ||
| 42 | #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) | ||
| 43 | |||
| 44 | /* Values in HSTATE_NAPPING(r13) */ | 42 | /* Values in HSTATE_NAPPING(r13) */ |
| 45 | #define NAPPING_CEDE 1 | 43 | #define NAPPING_CEDE 1 |
| 46 | #define NAPPING_NOVCPU 2 | 44 | #define NAPPING_NOVCPU 2 |
| @@ -639,6 +637,10 @@ kvmppc_hv_entry: | |||
| 639 | /* Primary thread switches to guest partition. */ | 637 | /* Primary thread switches to guest partition. */ |
| 640 | cmpwi r6,0 | 638 | cmpwi r6,0 |
| 641 | bne 10f | 639 | bne 10f |
| 640 | |||
| 641 | /* Radix has already switched LPID and flushed core TLB */ | ||
| 642 | bne cr7, 22f | ||
| 643 | |||
| 642 | lwz r7,KVM_LPID(r9) | 644 | lwz r7,KVM_LPID(r9) |
| 643 | BEGIN_FTR_SECTION | 645 | BEGIN_FTR_SECTION |
| 644 | ld r6,KVM_SDR1(r9) | 646 | ld r6,KVM_SDR1(r9) |
| @@ -650,7 +652,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) | |||
| 650 | mtspr SPRN_LPID,r7 | 652 | mtspr SPRN_LPID,r7 |
| 651 | isync | 653 | isync |
| 652 | 654 | ||
| 653 | /* See if we need to flush the TLB */ | 655 | /* See if we need to flush the TLB. Hash has to be done in RM */ |
| 654 | lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ | 656 | lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ |
| 655 | BEGIN_FTR_SECTION | 657 | BEGIN_FTR_SECTION |
| 656 | /* | 658 | /* |
| @@ -677,15 +679,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
| 677 | li r7,0x800 /* IS field = 0b10 */ | 679 | li r7,0x800 /* IS field = 0b10 */ |
| 678 | ptesync | 680 | ptesync |
| 679 | li r0,0 /* RS for P9 version of tlbiel */ | 681 | li r0,0 /* RS for P9 version of tlbiel */ |
| 680 | bne cr7, 29f | ||
| 681 | 28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */ | 682 | 28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */ |
| 682 | addi r7,r7,0x1000 | 683 | addi r7,r7,0x1000 |
| 683 | bdnz 28b | 684 | bdnz 28b |
| 684 | b 30f | 685 | ptesync |
| 685 | 29: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */ | ||
| 686 | addi r7,r7,0x1000 | ||
| 687 | bdnz 29b | ||
| 688 | 30: ptesync | ||
| 689 | 23: ldarx r7,0,r6 /* clear the bit after TLB flushed */ | 686 | 23: ldarx r7,0,r6 /* clear the bit after TLB flushed */ |
| 690 | andc r7,r7,r8 | 687 | andc r7,r7,r8 |
| 691 | stdcx. r7,0,r6 | 688 | stdcx. r7,0,r6 |
| @@ -799,7 +796,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) | |||
| 799 | /* | 796 | /* |
| 800 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR | 797 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR |
| 801 | */ | 798 | */ |
| 802 | bl kvmppc_restore_tm | 799 | mr r3, r4 |
| 800 | ld r4, VCPU_MSR(r3) | ||
| 801 | bl kvmppc_restore_tm_hv | ||
| 802 | ld r4, HSTATE_KVM_VCPU(r13) | ||
| 803 | 91: | 803 | 91: |
| 804 | #endif | 804 | #endif |
| 805 | 805 | ||
| @@ -1783,7 +1783,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) | |||
| 1783 | /* | 1783 | /* |
| 1784 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR | 1784 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR |
| 1785 | */ | 1785 | */ |
| 1786 | bl kvmppc_save_tm | 1786 | mr r3, r9 |
| 1787 | ld r4, VCPU_MSR(r3) | ||
| 1788 | bl kvmppc_save_tm_hv | ||
| 1789 | ld r9, HSTATE_KVM_VCPU(r13) | ||
| 1787 | 91: | 1790 | 91: |
| 1788 | #endif | 1791 | #endif |
| 1789 | 1792 | ||
| @@ -2686,8 +2689,9 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) | |||
| 2686 | /* | 2689 | /* |
| 2687 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR | 2690 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR |
| 2688 | */ | 2691 | */ |
| 2689 | ld r9, HSTATE_KVM_VCPU(r13) | 2692 | ld r3, HSTATE_KVM_VCPU(r13) |
| 2690 | bl kvmppc_save_tm | 2693 | ld r4, VCPU_MSR(r3) |
| 2694 | bl kvmppc_save_tm_hv | ||
| 2691 | 91: | 2695 | 91: |
| 2692 | #endif | 2696 | #endif |
| 2693 | 2697 | ||
| @@ -2805,7 +2809,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) | |||
| 2805 | /* | 2809 | /* |
| 2806 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR | 2810 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR |
| 2807 | */ | 2811 | */ |
| 2808 | bl kvmppc_restore_tm | 2812 | mr r3, r4 |
| 2813 | ld r4, VCPU_MSR(r3) | ||
| 2814 | bl kvmppc_restore_tm_hv | ||
| 2815 | ld r4, HSTATE_KVM_VCPU(r13) | ||
| 2809 | 91: | 2816 | 91: |
| 2810 | #endif | 2817 | #endif |
| 2811 | 2818 | ||
| @@ -3126,11 +3133,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
| 3126 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 3133 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 3127 | /* | 3134 | /* |
| 3128 | * Save transactional state and TM-related registers. | 3135 | * Save transactional state and TM-related registers. |
| 3129 | * Called with r9 pointing to the vcpu struct. | 3136 | * Called with r3 pointing to the vcpu struct and r4 containing |
| 3137 | * the guest MSR value. | ||
| 3130 | * This can modify all checkpointed registers, but | 3138 | * This can modify all checkpointed registers, but |
| 3131 | * restores r1, r2 and r9 (vcpu pointer) before exit. | 3139 | * restores r1 and r2 before exit. |
| 3132 | */ | 3140 | */ |
| 3133 | kvmppc_save_tm: | 3141 | kvmppc_save_tm_hv: |
| 3142 | /* See if we need to handle fake suspend mode */ | ||
| 3143 | BEGIN_FTR_SECTION | ||
| 3144 | b __kvmppc_save_tm | ||
| 3145 | END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) | ||
| 3146 | |||
| 3147 | lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ | ||
| 3148 | cmpwi r0, 0 | ||
| 3149 | beq __kvmppc_save_tm | ||
| 3150 | |||
| 3151 | /* The following code handles the fake_suspend = 1 case */ | ||
| 3134 | mflr r0 | 3152 | mflr r0 |
| 3135 | std r0, PPC_LR_STKOFF(r1) | 3153 | std r0, PPC_LR_STKOFF(r1) |
| 3136 | stdu r1, -PPC_MIN_STKFRM(r1) | 3154 | stdu r1, -PPC_MIN_STKFRM(r1) |
| @@ -3141,59 +3159,37 @@ kvmppc_save_tm: | |||
| 3141 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG | 3159 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG |
| 3142 | mtmsrd r8 | 3160 | mtmsrd r8 |
| 3143 | 3161 | ||
| 3144 | ld r5, VCPU_MSR(r9) | ||
| 3145 | rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 | ||
| 3146 | beq 1f /* TM not active in guest. */ | ||
| 3147 | |||
| 3148 | std r1, HSTATE_HOST_R1(r13) | ||
| 3149 | li r3, TM_CAUSE_KVM_RESCHED | ||
| 3150 | |||
| 3151 | BEGIN_FTR_SECTION | ||
| 3152 | lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ | ||
| 3153 | cmpwi r0, 0 | ||
| 3154 | beq 3f | ||
| 3155 | rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ | 3162 | rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ |
| 3156 | beq 4f | 3163 | beq 4f |
| 3157 | BEGIN_FTR_SECTION_NESTED(96) | 3164 | BEGIN_FTR_SECTION |
| 3158 | bl pnv_power9_force_smt4_catch | 3165 | bl pnv_power9_force_smt4_catch |
| 3159 | END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) | 3166 | END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) |
| 3160 | nop | 3167 | nop |
| 3161 | b 6f | ||
| 3162 | 3: | ||
| 3163 | /* Emulation of the treclaim instruction needs TEXASR before treclaim */ | ||
| 3164 | mfspr r6, SPRN_TEXASR | ||
| 3165 | std r6, VCPU_ORIG_TEXASR(r9) | ||
| 3166 | 6: | ||
| 3167 | END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) | ||
| 3168 | 3168 | ||
| 3169 | /* Clear the MSR RI since r1, r13 are all going to be foobar. */ | 3169 | std r1, HSTATE_HOST_R1(r13) |
| 3170 | |||
| 3171 | /* Clear the MSR RI since r1, r13 may be foobar. */ | ||
| 3170 | li r5, 0 | 3172 | li r5, 0 |
| 3171 | mtmsrd r5, 1 | 3173 | mtmsrd r5, 1 |
| 3172 | 3174 | ||
| 3173 | /* All GPRs are volatile at this point. */ | 3175 | /* We have to treclaim here because that's the only way to do S->N */ |
| 3176 | li r3, TM_CAUSE_KVM_RESCHED | ||
| 3174 | TRECLAIM(R3) | 3177 | TRECLAIM(R3) |
| 3175 | 3178 | ||
| 3176 | /* Temporarily store r13 and r9 so we have some regs to play with */ | ||
| 3177 | SET_SCRATCH0(r13) | ||
| 3178 | GET_PACA(r13) | ||
| 3179 | std r9, PACATMSCRATCH(r13) | ||
| 3180 | |||
| 3181 | /* If doing TM emulation on POWER9 DD2.2, check for fake suspend mode */ | ||
| 3182 | BEGIN_FTR_SECTION | ||
| 3183 | lbz r9, HSTATE_FAKE_SUSPEND(r13) | ||
| 3184 | cmpwi r9, 0 | ||
| 3185 | beq 2f | ||
| 3186 | /* | 3179 | /* |
| 3187 | * We were in fake suspend, so we are not going to save the | 3180 | * We were in fake suspend, so we are not going to save the |
| 3188 | * register state as the guest checkpointed state (since | 3181 | * register state as the guest checkpointed state (since |
| 3189 | * we already have it), therefore we can now use any volatile GPR. | 3182 | * we already have it), therefore we can now use any volatile GPR. |
| 3190 | */ | 3183 | */ |
| 3191 | /* Reload stack pointer and TOC. */ | 3184 | /* Reload PACA pointer, stack pointer and TOC. */ |
| 3185 | GET_PACA(r13) | ||
| 3192 | ld r1, HSTATE_HOST_R1(r13) | 3186 | ld r1, HSTATE_HOST_R1(r13) |
| 3193 | ld r2, PACATOC(r13) | 3187 | ld r2, PACATOC(r13) |
| 3188 | |||
| 3194 | /* Set MSR RI now we have r1 and r13 back. */ | 3189 | /* Set MSR RI now we have r1 and r13 back. */ |
| 3195 | li r5, MSR_RI | 3190 | li r5, MSR_RI |
| 3196 | mtmsrd r5, 1 | 3191 | mtmsrd r5, 1 |
| 3192 | |||
| 3197 | HMT_MEDIUM | 3193 | HMT_MEDIUM |
| 3198 | ld r6, HSTATE_DSCR(r13) | 3194 | ld r6, HSTATE_DSCR(r13) |
| 3199 | mtspr SPRN_DSCR, r6 | 3195 | mtspr SPRN_DSCR, r6 |
| @@ -3208,85 +3204,9 @@ END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) | |||
| 3208 | li r0, PSSCR_FAKE_SUSPEND | 3204 | li r0, PSSCR_FAKE_SUSPEND |
| 3209 | andc r3, r3, r0 | 3205 | andc r3, r3, r0 |
| 3210 | mtspr SPRN_PSSCR, r3 | 3206 | mtspr SPRN_PSSCR, r3 |
| 3211 | ld r9, HSTATE_KVM_VCPU(r13) | ||
| 3212 | /* Don't save TEXASR, use value from last exit in real suspend state */ | ||
| 3213 | b 11f | ||
| 3214 | 2: | ||
| 3215 | END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) | ||
| 3216 | 3207 | ||
| 3208 | /* Don't save TEXASR, use value from last exit in real suspend state */ | ||
| 3217 | ld r9, HSTATE_KVM_VCPU(r13) | 3209 | ld r9, HSTATE_KVM_VCPU(r13) |
| 3218 | |||
| 3219 | /* Get a few more GPRs free. */ | ||
| 3220 | std r29, VCPU_GPRS_TM(29)(r9) | ||
| 3221 | std r30, VCPU_GPRS_TM(30)(r9) | ||
| 3222 | std r31, VCPU_GPRS_TM(31)(r9) | ||
| 3223 | |||
| 3224 | /* Save away PPR and DSCR soon so don't run with user values. */ | ||
| 3225 | mfspr r31, SPRN_PPR | ||
| 3226 | HMT_MEDIUM | ||
| 3227 | mfspr r30, SPRN_DSCR | ||
| 3228 | ld r29, HSTATE_DSCR(r13) | ||
| 3229 | mtspr SPRN_DSCR, r29 | ||
| 3230 | |||
| 3231 | /* Save all but r9, r13 & r29-r31 */ | ||
| 3232 | reg = 0 | ||
| 3233 | .rept 29 | ||
| 3234 | .if (reg != 9) && (reg != 13) | ||
| 3235 | std reg, VCPU_GPRS_TM(reg)(r9) | ||
| 3236 | .endif | ||
| 3237 | reg = reg + 1 | ||
| 3238 | .endr | ||
| 3239 | /* ... now save r13 */ | ||
| 3240 | GET_SCRATCH0(r4) | ||
| 3241 | std r4, VCPU_GPRS_TM(13)(r9) | ||
| 3242 | /* ... and save r9 */ | ||
| 3243 | ld r4, PACATMSCRATCH(r13) | ||
| 3244 | std r4, VCPU_GPRS_TM(9)(r9) | ||
| 3245 | |||
| 3246 | /* Reload stack pointer and TOC. */ | ||
| 3247 | ld r1, HSTATE_HOST_R1(r13) | ||
| 3248 | ld r2, PACATOC(r13) | ||
| 3249 | |||
| 3250 | /* Set MSR RI now we have r1 and r13 back. */ | ||
| 3251 | li r5, MSR_RI | ||
| 3252 | mtmsrd r5, 1 | ||
| 3253 | |||
| 3254 | /* Save away checkpinted SPRs. */ | ||
| 3255 | std r31, VCPU_PPR_TM(r9) | ||
| 3256 | std r30, VCPU_DSCR_TM(r9) | ||
| 3257 | mflr r5 | ||
| 3258 | mfcr r6 | ||
| 3259 | mfctr r7 | ||
| 3260 | mfspr r8, SPRN_AMR | ||
| 3261 | mfspr r10, SPRN_TAR | ||
| 3262 | mfxer r11 | ||
| 3263 | std r5, VCPU_LR_TM(r9) | ||
| 3264 | stw r6, VCPU_CR_TM(r9) | ||
| 3265 | std r7, VCPU_CTR_TM(r9) | ||
| 3266 | std r8, VCPU_AMR_TM(r9) | ||
| 3267 | std r10, VCPU_TAR_TM(r9) | ||
| 3268 | std r11, VCPU_XER_TM(r9) | ||
| 3269 | |||
| 3270 | /* Restore r12 as trap number. */ | ||
| 3271 | lwz r12, VCPU_TRAP(r9) | ||
| 3272 | |||
| 3273 | /* Save FP/VSX. */ | ||
| 3274 | addi r3, r9, VCPU_FPRS_TM | ||
| 3275 | bl store_fp_state | ||
| 3276 | addi r3, r9, VCPU_VRS_TM | ||
| 3277 | bl store_vr_state | ||
| 3278 | mfspr r6, SPRN_VRSAVE | ||
| 3279 | stw r6, VCPU_VRSAVE_TM(r9) | ||
| 3280 | 1: | ||
| 3281 | /* | ||
| 3282 | * We need to save these SPRs after the treclaim so that the software | ||
| 3283 | * error code is recorded correctly in the TEXASR. Also the user may | ||
| 3284 | * change these outside of a transaction, so they must always be | ||
| 3285 | * context switched. | ||
| 3286 | */ | ||
| 3287 | mfspr r7, SPRN_TEXASR | ||
| 3288 | std r7, VCPU_TEXASR(r9) | ||
| 3289 | 11: | ||
| 3290 | mfspr r5, SPRN_TFHAR | 3210 | mfspr r5, SPRN_TFHAR |
| 3291 | mfspr r6, SPRN_TFIAR | 3211 | mfspr r6, SPRN_TFIAR |
| 3292 | std r5, VCPU_TFHAR(r9) | 3212 | std r5, VCPU_TFHAR(r9) |
| @@ -3299,149 +3219,63 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) | |||
| 3299 | 3219 | ||
| 3300 | /* | 3220 | /* |
| 3301 | * Restore transactional state and TM-related registers. | 3221 | * Restore transactional state and TM-related registers. |
| 3302 | * Called with r4 pointing to the vcpu struct. | 3222 | * Called with r3 pointing to the vcpu struct |
| 3223 | * and r4 containing the guest MSR value. | ||
| 3303 | * This potentially modifies all checkpointed registers. | 3224 | * This potentially modifies all checkpointed registers. |
| 3304 | * It restores r1, r2, r4 from the PACA. | 3225 | * It restores r1 and r2 from the PACA. |
| 3305 | */ | 3226 | */ |
| 3306 | kvmppc_restore_tm: | 3227 | kvmppc_restore_tm_hv: |
| 3228 | /* | ||
| 3229 | * If we are doing TM emulation for the guest on a POWER9 DD2, | ||
| 3230 | * then we don't actually do a trechkpt -- we either set up | ||
| 3231 | * fake-suspend mode, or emulate a TM rollback. | ||
| 3232 | */ | ||
| 3233 | BEGIN_FTR_SECTION | ||
| 3234 | b __kvmppc_restore_tm | ||
| 3235 | END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) | ||
| 3307 | mflr r0 | 3236 | mflr r0 |
| 3308 | std r0, PPC_LR_STKOFF(r1) | 3237 | std r0, PPC_LR_STKOFF(r1) |
| 3309 | 3238 | ||
| 3310 | /* Turn on TM/FP/VSX/VMX so we can restore them. */ | 3239 | li r0, 0 |
| 3240 | stb r0, HSTATE_FAKE_SUSPEND(r13) | ||
| 3241 | |||
| 3242 | /* Turn on TM so we can restore TM SPRs */ | ||
| 3311 | mfmsr r5 | 3243 | mfmsr r5 |
| 3312 | li r6, MSR_TM >> 32 | 3244 | li r0, 1 |
| 3313 | sldi r6, r6, 32 | 3245 | rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG |
| 3314 | or r5, r5, r6 | ||
| 3315 | ori r5, r5, MSR_FP | ||
| 3316 | oris r5, r5, (MSR_VEC | MSR_VSX)@h | ||
| 3317 | mtmsrd r5 | 3246 | mtmsrd r5 |
| 3318 | 3247 | ||
| 3319 | /* | 3248 | /* |
| 3320 | * The user may change these outside of a transaction, so they must | 3249 | * The user may change these outside of a transaction, so they must |
| 3321 | * always be context switched. | 3250 | * always be context switched. |
| 3322 | */ | 3251 | */ |
| 3323 | ld r5, VCPU_TFHAR(r4) | 3252 | ld r5, VCPU_TFHAR(r3) |
| 3324 | ld r6, VCPU_TFIAR(r4) | 3253 | ld r6, VCPU_TFIAR(r3) |
| 3325 | ld r7, VCPU_TEXASR(r4) | 3254 | ld r7, VCPU_TEXASR(r3) |
| 3326 | mtspr SPRN_TFHAR, r5 | 3255 | mtspr SPRN_TFHAR, r5 |
| 3327 | mtspr SPRN_TFIAR, r6 | 3256 | mtspr SPRN_TFIAR, r6 |
| 3328 | mtspr SPRN_TEXASR, r7 | 3257 | mtspr SPRN_TEXASR, r7 |
| 3329 | 3258 | ||
| 3330 | li r0, 0 | 3259 | rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 |
| 3331 | stb r0, HSTATE_FAKE_SUSPEND(r13) | ||
| 3332 | ld r5, VCPU_MSR(r4) | ||
| 3333 | rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 | ||
| 3334 | beqlr /* TM not active in guest */ | 3260 | beqlr /* TM not active in guest */ |
| 3335 | std r1, HSTATE_HOST_R1(r13) | ||
| 3336 | 3261 | ||
| 3337 | /* Make sure the failure summary is set, otherwise we'll program check | 3262 | /* Make sure the failure summary is set */ |
| 3338 | * when we trechkpt. It's possible that this might have been not set | ||
| 3339 | * on a kvmppc_set_one_reg() call but we shouldn't let this crash the | ||
| 3340 | * host. | ||
| 3341 | */ | ||
| 3342 | oris r7, r7, (TEXASR_FS)@h | 3263 | oris r7, r7, (TEXASR_FS)@h |
| 3343 | mtspr SPRN_TEXASR, r7 | 3264 | mtspr SPRN_TEXASR, r7 |
| 3344 | 3265 | ||
| 3345 | /* | ||
| 3346 | * If we are doing TM emulation for the guest on a POWER9 DD2, | ||
| 3347 | * then we don't actually do a trechkpt -- we either set up | ||
| 3348 | * fake-suspend mode, or emulate a TM rollback. | ||
| 3349 | */ | ||
| 3350 | BEGIN_FTR_SECTION | ||
| 3351 | b .Ldo_tm_fake_load | ||
| 3352 | END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) | ||
| 3353 | |||
| 3354 | /* | ||
| 3355 | * We need to load up the checkpointed state for the guest. | ||
| 3356 | * We need to do this early as it will blow away any GPRs, VSRs and | ||
| 3357 | * some SPRs. | ||
| 3358 | */ | ||
| 3359 | |||
| 3360 | mr r31, r4 | ||
| 3361 | addi r3, r31, VCPU_FPRS_TM | ||
| 3362 | bl load_fp_state | ||
| 3363 | addi r3, r31, VCPU_VRS_TM | ||
| 3364 | bl load_vr_state | ||
| 3365 | mr r4, r31 | ||
| 3366 | lwz r7, VCPU_VRSAVE_TM(r4) | ||
| 3367 | mtspr SPRN_VRSAVE, r7 | ||
| 3368 | |||
| 3369 | ld r5, VCPU_LR_TM(r4) | ||
| 3370 | lwz r6, VCPU_CR_TM(r4) | ||
| 3371 | ld r7, VCPU_CTR_TM(r4) | ||
| 3372 | ld r8, VCPU_AMR_TM(r4) | ||
| 3373 | ld r9, VCPU_TAR_TM(r4) | ||
| 3374 | ld r10, VCPU_XER_TM(r4) | ||
| 3375 | mtlr r5 | ||
| 3376 | mtcr r6 | ||
| 3377 | mtctr r7 | ||
| 3378 | mtspr SPRN_AMR, r8 | ||
| 3379 | mtspr SPRN_TAR, r9 | ||
| 3380 | mtxer r10 | ||
| 3381 | |||
| 3382 | /* | ||
| 3383 | * Load up PPR and DSCR values but don't put them in the actual SPRs | ||
| 3384 | * till the last moment to avoid running with userspace PPR and DSCR for | ||
| 3385 | * too long. | ||
| 3386 | */ | ||
| 3387 | ld r29, VCPU_DSCR_TM(r4) | ||
| 3388 | ld r30, VCPU_PPR_TM(r4) | ||
| 3389 | |||
| 3390 | std r2, PACATMSCRATCH(r13) /* Save TOC */ | ||
| 3391 | |||
| 3392 | /* Clear the MSR RI since r1, r13 are all going to be foobar. */ | ||
| 3393 | li r5, 0 | ||
| 3394 | mtmsrd r5, 1 | ||
| 3395 | |||
| 3396 | /* Load GPRs r0-r28 */ | ||
| 3397 | reg = 0 | ||
| 3398 | .rept 29 | ||
| 3399 | ld reg, VCPU_GPRS_TM(reg)(r31) | ||
| 3400 | reg = reg + 1 | ||
| 3401 | .endr | ||
| 3402 | |||
| 3403 | mtspr SPRN_DSCR, r29 | ||
| 3404 | mtspr SPRN_PPR, r30 | ||
| 3405 | |||
| 3406 | /* Load final GPRs */ | ||
| 3407 | ld 29, VCPU_GPRS_TM(29)(r31) | ||
| 3408 | ld 30, VCPU_GPRS_TM(30)(r31) | ||
| 3409 | ld 31, VCPU_GPRS_TM(31)(r31) | ||
| 3410 | |||
| 3411 | /* TM checkpointed state is now setup. All GPRs are now volatile. */ | ||
| 3412 | TRECHKPT | ||
| 3413 | |||
| 3414 | /* Now let's get back the state we need. */ | ||
| 3415 | HMT_MEDIUM | ||
| 3416 | GET_PACA(r13) | ||
| 3417 | ld r29, HSTATE_DSCR(r13) | ||
| 3418 | mtspr SPRN_DSCR, r29 | ||
| 3419 | ld r4, HSTATE_KVM_VCPU(r13) | ||
| 3420 | ld r1, HSTATE_HOST_R1(r13) | ||
| 3421 | ld r2, PACATMSCRATCH(r13) | ||
| 3422 | |||
| 3423 | /* Set the MSR RI since we have our registers back. */ | ||
| 3424 | li r5, MSR_RI | ||
| 3425 | mtmsrd r5, 1 | ||
| 3426 | 9: | ||
| 3427 | ld r0, PPC_LR_STKOFF(r1) | ||
| 3428 | mtlr r0 | ||
| 3429 | blr | ||
| 3430 | |||
| 3431 | .Ldo_tm_fake_load: | ||
| 3432 | cmpwi r5, 1 /* check for suspended state */ | 3266 | cmpwi r5, 1 /* check for suspended state */ |
| 3433 | bgt 10f | 3267 | bgt 10f |
| 3434 | stb r5, HSTATE_FAKE_SUSPEND(r13) | 3268 | stb r5, HSTATE_FAKE_SUSPEND(r13) |
| 3435 | b 9b /* and return */ | 3269 | b 9f /* and return */ |
| 3436 | 10: stdu r1, -PPC_MIN_STKFRM(r1) | 3270 | 10: stdu r1, -PPC_MIN_STKFRM(r1) |
| 3437 | /* guest is in transactional state, so simulate rollback */ | 3271 | /* guest is in transactional state, so simulate rollback */ |
| 3438 | mr r3, r4 | ||
| 3439 | bl kvmhv_emulate_tm_rollback | 3272 | bl kvmhv_emulate_tm_rollback |
| 3440 | nop | 3273 | nop |
| 3441 | ld r4, HSTATE_KVM_VCPU(r13) /* our vcpu pointer has been trashed */ | ||
| 3442 | addi r1, r1, PPC_MIN_STKFRM | 3274 | addi r1, r1, PPC_MIN_STKFRM |
| 3443 | b 9b | 3275 | 9: ld r0, PPC_LR_STKOFF(r1) |
| 3444 | #endif | 3276 | mtlr r0 |
| 3277 | blr | ||
| 3278 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | ||
| 3445 | 3279 | ||
| 3446 | /* | 3280 | /* |
| 3447 | * We come here if we get any exception or interrupt while we are | 3281 | * We come here if we get any exception or interrupt while we are |
| @@ -3572,6 +3406,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) | |||
| 3572 | bcl 20, 31, .+4 | 3406 | bcl 20, 31, .+4 |
| 3573 | 5: mflr r3 | 3407 | 5: mflr r3 |
| 3574 | addi r3, r3, 9f - 5b | 3408 | addi r3, r3, 9f - 5b |
| 3409 | li r4, -1 | ||
| 3410 | rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */ | ||
| 3575 | ld r4, PACAKMSR(r13) | 3411 | ld r4, PACAKMSR(r13) |
| 3576 | mtspr SPRN_SRR0, r3 | 3412 | mtspr SPRN_SRR0, r3 |
| 3577 | mtspr SPRN_SRR1, r4 | 3413 | mtspr SPRN_SRR1, r4 |
diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c index bf710ad3a6d7..008285058f9b 100644 --- a/arch/powerpc/kvm/book3s_hv_tm.c +++ b/arch/powerpc/kvm/book3s_hv_tm.c | |||
| @@ -19,7 +19,7 @@ static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause) | |||
| 19 | u64 texasr, tfiar; | 19 | u64 texasr, tfiar; |
| 20 | u64 msr = vcpu->arch.shregs.msr; | 20 | u64 msr = vcpu->arch.shregs.msr; |
| 21 | 21 | ||
| 22 | tfiar = vcpu->arch.pc & ~0x3ull; | 22 | tfiar = vcpu->arch.regs.nip & ~0x3ull; |
| 23 | texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT; | 23 | texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT; |
| 24 | if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) | 24 | if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) |
| 25 | texasr |= TEXASR_SUSP; | 25 | texasr |= TEXASR_SUSP; |
| @@ -57,8 +57,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) | |||
| 57 | (newmsr & MSR_TM))); | 57 | (newmsr & MSR_TM))); |
| 58 | newmsr = sanitize_msr(newmsr); | 58 | newmsr = sanitize_msr(newmsr); |
| 59 | vcpu->arch.shregs.msr = newmsr; | 59 | vcpu->arch.shregs.msr = newmsr; |
| 60 | vcpu->arch.cfar = vcpu->arch.pc - 4; | 60 | vcpu->arch.cfar = vcpu->arch.regs.nip - 4; |
| 61 | vcpu->arch.pc = vcpu->arch.shregs.srr0; | 61 | vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; |
| 62 | return RESUME_GUEST; | 62 | return RESUME_GUEST; |
| 63 | 63 | ||
| 64 | case PPC_INST_RFEBB: | 64 | case PPC_INST_RFEBB: |
| @@ -90,8 +90,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) | |||
| 90 | vcpu->arch.bescr = bescr; | 90 | vcpu->arch.bescr = bescr; |
| 91 | msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; | 91 | msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; |
| 92 | vcpu->arch.shregs.msr = msr; | 92 | vcpu->arch.shregs.msr = msr; |
| 93 | vcpu->arch.cfar = vcpu->arch.pc - 4; | 93 | vcpu->arch.cfar = vcpu->arch.regs.nip - 4; |
| 94 | vcpu->arch.pc = vcpu->arch.ebbrr; | 94 | vcpu->arch.regs.nip = vcpu->arch.ebbrr; |
| 95 | return RESUME_GUEST; | 95 | return RESUME_GUEST; |
| 96 | 96 | ||
| 97 | case PPC_INST_MTMSRD: | 97 | case PPC_INST_MTMSRD: |
diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c index d98ccfd2b88c..b2c7c6fca4f9 100644 --- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c | |||
| @@ -35,8 +35,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) | |||
| 35 | return 0; | 35 | return 0; |
| 36 | newmsr = sanitize_msr(newmsr); | 36 | newmsr = sanitize_msr(newmsr); |
| 37 | vcpu->arch.shregs.msr = newmsr; | 37 | vcpu->arch.shregs.msr = newmsr; |
| 38 | vcpu->arch.cfar = vcpu->arch.pc - 4; | 38 | vcpu->arch.cfar = vcpu->arch.regs.nip - 4; |
| 39 | vcpu->arch.pc = vcpu->arch.shregs.srr0; | 39 | vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; |
| 40 | return 1; | 40 | return 1; |
| 41 | 41 | ||
| 42 | case PPC_INST_RFEBB: | 42 | case PPC_INST_RFEBB: |
| @@ -58,8 +58,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) | |||
| 58 | mtspr(SPRN_BESCR, bescr); | 58 | mtspr(SPRN_BESCR, bescr); |
| 59 | msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; | 59 | msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; |
| 60 | vcpu->arch.shregs.msr = msr; | 60 | vcpu->arch.shregs.msr = msr; |
| 61 | vcpu->arch.cfar = vcpu->arch.pc - 4; | 61 | vcpu->arch.cfar = vcpu->arch.regs.nip - 4; |
| 62 | vcpu->arch.pc = mfspr(SPRN_EBBRR); | 62 | vcpu->arch.regs.nip = mfspr(SPRN_EBBRR); |
| 63 | return 1; | 63 | return 1; |
| 64 | 64 | ||
| 65 | case PPC_INST_MTMSRD: | 65 | case PPC_INST_MTMSRD: |
| @@ -103,7 +103,7 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) | |||
| 103 | void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu) | 103 | void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu) |
| 104 | { | 104 | { |
| 105 | vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */ | 105 | vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */ |
| 106 | vcpu->arch.pc = vcpu->arch.tfhar; | 106 | vcpu->arch.regs.nip = vcpu->arch.tfhar; |
| 107 | copy_from_checkpoint(vcpu); | 107 | copy_from_checkpoint(vcpu); |
| 108 | vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000; | 108 | vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000; |
| 109 | } | 109 | } |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index d3f304d06adf..c3b8006f0eac 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
| @@ -42,6 +42,8 @@ | |||
| 42 | #include <linux/highmem.h> | 42 | #include <linux/highmem.h> |
| 43 | #include <linux/module.h> | 43 | #include <linux/module.h> |
| 44 | #include <linux/miscdevice.h> | 44 | #include <linux/miscdevice.h> |
| 45 | #include <asm/asm-prototypes.h> | ||
| 46 | #include <asm/tm.h> | ||
| 45 | 47 | ||
| 46 | #include "book3s.h" | 48 | #include "book3s.h" |
| 47 | 49 | ||
| @@ -53,7 +55,9 @@ | |||
| 53 | 55 | ||
| 54 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | 56 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, |
| 55 | ulong msr); | 57 | ulong msr); |
| 56 | static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); | 58 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 59 | static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac); | ||
| 60 | #endif | ||
| 57 | 61 | ||
| 58 | /* Some compatibility defines */ | 62 | /* Some compatibility defines */ |
| 59 | #ifdef CONFIG_PPC_BOOK3S_32 | 63 | #ifdef CONFIG_PPC_BOOK3S_32 |
| @@ -114,6 +118,8 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) | |||
| 114 | 118 | ||
| 115 | if (kvmppc_is_split_real(vcpu)) | 119 | if (kvmppc_is_split_real(vcpu)) |
| 116 | kvmppc_fixup_split_real(vcpu); | 120 | kvmppc_fixup_split_real(vcpu); |
| 121 | |||
| 122 | kvmppc_restore_tm_pr(vcpu); | ||
| 117 | } | 123 | } |
| 118 | 124 | ||
| 119 | static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) | 125 | static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) |
| @@ -133,6 +139,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) | |||
| 133 | 139 | ||
| 134 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); | 140 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
| 135 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | 141 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
| 142 | kvmppc_save_tm_pr(vcpu); | ||
| 136 | 143 | ||
| 137 | /* Enable AIL if supported */ | 144 | /* Enable AIL if supported */ |
| 138 | if (cpu_has_feature(CPU_FTR_HVMODE) && | 145 | if (cpu_has_feature(CPU_FTR_HVMODE) && |
| @@ -147,25 +154,25 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) | |||
| 147 | { | 154 | { |
| 148 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 155 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
| 149 | 156 | ||
| 150 | svcpu->gpr[0] = vcpu->arch.gpr[0]; | 157 | svcpu->gpr[0] = vcpu->arch.regs.gpr[0]; |
| 151 | svcpu->gpr[1] = vcpu->arch.gpr[1]; | 158 | svcpu->gpr[1] = vcpu->arch.regs.gpr[1]; |
| 152 | svcpu->gpr[2] = vcpu->arch.gpr[2]; | 159 | svcpu->gpr[2] = vcpu->arch.regs.gpr[2]; |
| 153 | svcpu->gpr[3] = vcpu->arch.gpr[3]; | 160 | svcpu->gpr[3] = vcpu->arch.regs.gpr[3]; |
| 154 | svcpu->gpr[4] = vcpu->arch.gpr[4]; | 161 | svcpu->gpr[4] = vcpu->arch.regs.gpr[4]; |
| 155 | svcpu->gpr[5] = vcpu->arch.gpr[5]; | 162 | svcpu->gpr[5] = vcpu->arch.regs.gpr[5]; |
| 156 | svcpu->gpr[6] = vcpu->arch.gpr[6]; | 163 | svcpu->gpr[6] = vcpu->arch.regs.gpr[6]; |
| 157 | svcpu->gpr[7] = vcpu->arch.gpr[7]; | 164 | svcpu->gpr[7] = vcpu->arch.regs.gpr[7]; |
| 158 | svcpu->gpr[8] = vcpu->arch.gpr[8]; | 165 | svcpu->gpr[8] = vcpu->arch.regs.gpr[8]; |
| 159 | svcpu->gpr[9] = vcpu->arch.gpr[9]; | 166 | svcpu->gpr[9] = vcpu->arch.regs.gpr[9]; |
| 160 | svcpu->gpr[10] = vcpu->arch.gpr[10]; | 167 | svcpu->gpr[10] = vcpu->arch.regs.gpr[10]; |
| 161 | svcpu->gpr[11] = vcpu->arch.gpr[11]; | 168 | svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; |
| 162 | svcpu->gpr[12] = vcpu->arch.gpr[12]; | 169 | svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; |
| 163 | svcpu->gpr[13] = vcpu->arch.gpr[13]; | 170 | svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; |
| 164 | svcpu->cr = vcpu->arch.cr; | 171 | svcpu->cr = vcpu->arch.cr; |
| 165 | svcpu->xer = vcpu->arch.xer; | 172 | svcpu->xer = vcpu->arch.regs.xer; |
| 166 | svcpu->ctr = vcpu->arch.ctr; | 173 | svcpu->ctr = vcpu->arch.regs.ctr; |
| 167 | svcpu->lr = vcpu->arch.lr; | 174 | svcpu->lr = vcpu->arch.regs.link; |
| 168 | svcpu->pc = vcpu->arch.pc; | 175 | svcpu->pc = vcpu->arch.regs.nip; |
| 169 | #ifdef CONFIG_PPC_BOOK3S_64 | 176 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 170 | svcpu->shadow_fscr = vcpu->arch.shadow_fscr; | 177 | svcpu->shadow_fscr = vcpu->arch.shadow_fscr; |
| 171 | #endif | 178 | #endif |
| @@ -182,10 +189,45 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) | |||
| 182 | svcpu_put(svcpu); | 189 | svcpu_put(svcpu); |
| 183 | } | 190 | } |
| 184 | 191 | ||
| 192 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | ||
| 193 | { | ||
| 194 | ulong guest_msr = kvmppc_get_msr(vcpu); | ||
| 195 | ulong smsr = guest_msr; | ||
| 196 | |||
| 197 | /* Guest MSR values */ | ||
| 198 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 199 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE | | ||
| 200 | MSR_TM | MSR_TS_MASK; | ||
| 201 | #else | ||
| 202 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; | ||
| 203 | #endif | ||
| 204 | /* Process MSR values */ | ||
| 205 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; | ||
| 206 | /* External providers the guest reserved */ | ||
| 207 | smsr |= (guest_msr & vcpu->arch.guest_owned_ext); | ||
| 208 | /* 64-bit Process MSR values */ | ||
| 209 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
| 210 | smsr |= MSR_ISF | MSR_HV; | ||
| 211 | #endif | ||
| 212 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 213 | /* | ||
| 214 | * in guest privileged state, we want to fail all TM transactions. | ||
| 215 | * So disable MSR TM bit so that all tbegin. will be able to be | ||
| 216 | * trapped into host. | ||
| 217 | */ | ||
| 218 | if (!(guest_msr & MSR_PR)) | ||
| 219 | smsr &= ~MSR_TM; | ||
| 220 | #endif | ||
| 221 | vcpu->arch.shadow_msr = smsr; | ||
| 222 | } | ||
| 223 | |||
| 185 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | 224 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ |
| 186 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) | 225 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) |
| 187 | { | 226 | { |
| 188 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 227 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
| 228 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 229 | ulong old_msr; | ||
| 230 | #endif | ||
| 189 | 231 | ||
| 190 | /* | 232 | /* |
| 191 | * Maybe we were already preempted and synced the svcpu from | 233 | * Maybe we were already preempted and synced the svcpu from |
| @@ -194,25 +236,25 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) | |||
| 194 | if (!svcpu->in_use) | 236 | if (!svcpu->in_use) |
| 195 | goto out; | 237 | goto out; |
| 196 | 238 | ||
| 197 | vcpu->arch.gpr[0] = svcpu->gpr[0]; | 239 | vcpu->arch.regs.gpr[0] = svcpu->gpr[0]; |
| 198 | vcpu->arch.gpr[1] = svcpu->gpr[1]; | 240 | vcpu->arch.regs.gpr[1] = svcpu->gpr[1]; |
| 199 | vcpu->arch.gpr[2] = svcpu->gpr[2]; | 241 | vcpu->arch.regs.gpr[2] = svcpu->gpr[2]; |
| 200 | vcpu->arch.gpr[3] = svcpu->gpr[3]; | 242 | vcpu->arch.regs.gpr[3] = svcpu->gpr[3]; |
| 201 | vcpu->arch.gpr[4] = svcpu->gpr[4]; | 243 | vcpu->arch.regs.gpr[4] = svcpu->gpr[4]; |
| 202 | vcpu->arch.gpr[5] = svcpu->gpr[5]; | 244 | vcpu->arch.regs.gpr[5] = svcpu->gpr[5]; |
| 203 | vcpu->arch.gpr[6] = svcpu->gpr[6]; | 245 | vcpu->arch.regs.gpr[6] = svcpu->gpr[6]; |
| 204 | vcpu->arch.gpr[7] = svcpu->gpr[7]; | 246 | vcpu->arch.regs.gpr[7] = svcpu->gpr[7]; |
| 205 | vcpu->arch.gpr[8] = svcpu->gpr[8]; | 247 | vcpu->arch.regs.gpr[8] = svcpu->gpr[8]; |
| 206 | vcpu->arch.gpr[9] = svcpu->gpr[9]; | 248 | vcpu->arch.regs.gpr[9] = svcpu->gpr[9]; |
| 207 | vcpu->arch.gpr[10] = svcpu->gpr[10]; | 249 | vcpu->arch.regs.gpr[10] = svcpu->gpr[10]; |
| 208 | vcpu->arch.gpr[11] = svcpu->gpr[11]; | 250 | vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; |
| 209 | vcpu->arch.gpr[12] = svcpu->gpr[12]; | 251 | vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; |
| 210 | vcpu->arch.gpr[13] = svcpu->gpr[13]; | 252 | vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; |
| 211 | vcpu->arch.cr = svcpu->cr; | 253 | vcpu->arch.cr = svcpu->cr; |
| 212 | vcpu->arch.xer = svcpu->xer; | 254 | vcpu->arch.regs.xer = svcpu->xer; |
| 213 | vcpu->arch.ctr = svcpu->ctr; | 255 | vcpu->arch.regs.ctr = svcpu->ctr; |
| 214 | vcpu->arch.lr = svcpu->lr; | 256 | vcpu->arch.regs.link = svcpu->lr; |
| 215 | vcpu->arch.pc = svcpu->pc; | 257 | vcpu->arch.regs.nip = svcpu->pc; |
| 216 | vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; | 258 | vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; |
| 217 | vcpu->arch.fault_dar = svcpu->fault_dar; | 259 | vcpu->arch.fault_dar = svcpu->fault_dar; |
| 218 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | 260 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; |
| @@ -228,12 +270,116 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) | |||
| 228 | to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb; | 270 | to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb; |
| 229 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | 271 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
| 230 | vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; | 272 | vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; |
| 273 | |||
| 274 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 275 | /* | ||
| 276 | * Unlike other MSR bits, MSR[TS]bits can be changed at guest without | ||
| 277 | * notifying host: | ||
| 278 | * modified by unprivileged instructions like "tbegin"/"tend"/ | ||
| 279 | * "tresume"/"tsuspend" in PR KVM guest. | ||
| 280 | * | ||
| 281 | * It is necessary to sync here to calculate a correct shadow_msr. | ||
| 282 | * | ||
| 283 | * privileged guest's tbegin will be failed at present. So we | ||
| 284 | * only take care of problem state guest. | ||
| 285 | */ | ||
| 286 | old_msr = kvmppc_get_msr(vcpu); | ||
| 287 | if (unlikely((old_msr & MSR_PR) && | ||
| 288 | (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) != | ||
| 289 | (old_msr & (MSR_TS_MASK)))) { | ||
| 290 | old_msr &= ~(MSR_TS_MASK); | ||
| 291 | old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)); | ||
| 292 | kvmppc_set_msr_fast(vcpu, old_msr); | ||
| 293 | kvmppc_recalc_shadow_msr(vcpu); | ||
| 294 | } | ||
| 295 | #endif | ||
| 296 | |||
| 231 | svcpu->in_use = false; | 297 | svcpu->in_use = false; |
| 232 | 298 | ||
| 233 | out: | 299 | out: |
| 234 | svcpu_put(svcpu); | 300 | svcpu_put(svcpu); |
| 235 | } | 301 | } |
| 236 | 302 | ||
| 303 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 304 | void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) | ||
| 305 | { | ||
| 306 | tm_enable(); | ||
| 307 | vcpu->arch.tfhar = mfspr(SPRN_TFHAR); | ||
| 308 | vcpu->arch.texasr = mfspr(SPRN_TEXASR); | ||
| 309 | vcpu->arch.tfiar = mfspr(SPRN_TFIAR); | ||
| 310 | tm_disable(); | ||
| 311 | } | ||
| 312 | |||
| 313 | void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) | ||
| 314 | { | ||
| 315 | tm_enable(); | ||
| 316 | mtspr(SPRN_TFHAR, vcpu->arch.tfhar); | ||
| 317 | mtspr(SPRN_TEXASR, vcpu->arch.texasr); | ||
| 318 | mtspr(SPRN_TFIAR, vcpu->arch.tfiar); | ||
| 319 | tm_disable(); | ||
| 320 | } | ||
| 321 | |||
| 322 | /* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at | ||
| 323 | * hardware. | ||
| 324 | */ | ||
| 325 | static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu) | ||
| 326 | { | ||
| 327 | ulong exit_nr; | ||
| 328 | ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) & | ||
| 329 | (MSR_FP | MSR_VEC | MSR_VSX); | ||
| 330 | |||
| 331 | if (!ext_diff) | ||
| 332 | return; | ||
| 333 | |||
| 334 | if (ext_diff == MSR_FP) | ||
| 335 | exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL; | ||
| 336 | else if (ext_diff == MSR_VEC) | ||
| 337 | exit_nr = BOOK3S_INTERRUPT_ALTIVEC; | ||
| 338 | else | ||
| 339 | exit_nr = BOOK3S_INTERRUPT_VSX; | ||
| 340 | |||
| 341 | kvmppc_handle_ext(vcpu, exit_nr, ext_diff); | ||
| 342 | } | ||
| 343 | |||
| 344 | void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) | ||
| 345 | { | ||
| 346 | if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) { | ||
| 347 | kvmppc_save_tm_sprs(vcpu); | ||
| 348 | return; | ||
| 349 | } | ||
| 350 | |||
| 351 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | ||
| 352 | kvmppc_giveup_ext(vcpu, MSR_VSX); | ||
| 353 | |||
| 354 | preempt_disable(); | ||
| 355 | _kvmppc_save_tm_pr(vcpu, mfmsr()); | ||
| 356 | preempt_enable(); | ||
| 357 | } | ||
| 358 | |||
| 359 | void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) | ||
| 360 | { | ||
| 361 | if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { | ||
| 362 | kvmppc_restore_tm_sprs(vcpu); | ||
| 363 | if (kvmppc_get_msr(vcpu) & MSR_TM) { | ||
| 364 | kvmppc_handle_lost_math_exts(vcpu); | ||
| 365 | if (vcpu->arch.fscr & FSCR_TAR) | ||
| 366 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); | ||
| 367 | } | ||
| 368 | return; | ||
| 369 | } | ||
| 370 | |||
| 371 | preempt_disable(); | ||
| 372 | _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); | ||
| 373 | preempt_enable(); | ||
| 374 | |||
| 375 | if (kvmppc_get_msr(vcpu) & MSR_TM) { | ||
| 376 | kvmppc_handle_lost_math_exts(vcpu); | ||
| 377 | if (vcpu->arch.fscr & FSCR_TAR) | ||
| 378 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); | ||
| 379 | } | ||
| 380 | } | ||
| 381 | #endif | ||
| 382 | |||
| 237 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) | 383 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |
| 238 | { | 384 | { |
| 239 | int r = 1; /* Indicate we want to get back into the guest */ | 385 | int r = 1; /* Indicate we want to get back into the guest */ |
| @@ -306,32 +452,29 @@ static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte) | |||
| 306 | 452 | ||
| 307 | /*****************************************/ | 453 | /*****************************************/ |
| 308 | 454 | ||
| 309 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | ||
| 310 | { | ||
| 311 | ulong guest_msr = kvmppc_get_msr(vcpu); | ||
| 312 | ulong smsr = guest_msr; | ||
| 313 | |||
| 314 | /* Guest MSR values */ | ||
| 315 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; | ||
| 316 | /* Process MSR values */ | ||
| 317 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; | ||
| 318 | /* External providers the guest reserved */ | ||
| 319 | smsr |= (guest_msr & vcpu->arch.guest_owned_ext); | ||
| 320 | /* 64-bit Process MSR values */ | ||
| 321 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
| 322 | smsr |= MSR_ISF | MSR_HV; | ||
| 323 | #endif | ||
| 324 | vcpu->arch.shadow_msr = smsr; | ||
| 325 | } | ||
| 326 | |||
| 327 | static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) | 455 | static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) |
| 328 | { | 456 | { |
| 329 | ulong old_msr = kvmppc_get_msr(vcpu); | 457 | ulong old_msr; |
| 458 | |||
| 459 | /* For PAPR guest, make sure MSR reflects guest mode */ | ||
| 460 | if (vcpu->arch.papr_enabled) | ||
| 461 | msr = (msr & ~MSR_HV) | MSR_ME; | ||
| 330 | 462 | ||
| 331 | #ifdef EXIT_DEBUG | 463 | #ifdef EXIT_DEBUG |
| 332 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | 464 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); |
| 333 | #endif | 465 | #endif |
| 334 | 466 | ||
| 467 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 468 | /* We should never target guest MSR to TS=10 && PR=0, | ||
| 469 | * since we always fail transaction for guest privilege | ||
| 470 | * state. | ||
| 471 | */ | ||
| 472 | if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr)) | ||
| 473 | kvmppc_emulate_tabort(vcpu, | ||
| 474 | TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT); | ||
| 475 | #endif | ||
| 476 | |||
| 477 | old_msr = kvmppc_get_msr(vcpu); | ||
| 335 | msr &= to_book3s(vcpu)->msr_mask; | 478 | msr &= to_book3s(vcpu)->msr_mask; |
| 336 | kvmppc_set_msr_fast(vcpu, msr); | 479 | kvmppc_set_msr_fast(vcpu, msr); |
| 337 | kvmppc_recalc_shadow_msr(vcpu); | 480 | kvmppc_recalc_shadow_msr(vcpu); |
| @@ -387,6 +530,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) | |||
| 387 | /* Preload FPU if it's enabled */ | 530 | /* Preload FPU if it's enabled */ |
| 388 | if (kvmppc_get_msr(vcpu) & MSR_FP) | 531 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
| 389 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 532 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
| 533 | |||
| 534 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 535 | if (kvmppc_get_msr(vcpu) & MSR_TM) | ||
| 536 | kvmppc_handle_lost_math_exts(vcpu); | ||
| 537 | #endif | ||
| 390 | } | 538 | } |
| 391 | 539 | ||
| 392 | void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) | 540 | void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) |
| @@ -584,24 +732,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 584 | pte.may_execute = !data; | 732 | pte.may_execute = !data; |
| 585 | } | 733 | } |
| 586 | 734 | ||
| 587 | if (page_found == -ENOENT) { | 735 | if (page_found == -ENOENT || page_found == -EPERM) { |
| 588 | /* Page not found in guest PTE entries */ | 736 | /* Page not found in guest PTE entries, or protection fault */ |
| 589 | u64 ssrr1 = vcpu->arch.shadow_srr1; | 737 | u64 flags; |
| 590 | u64 msr = kvmppc_get_msr(vcpu); | 738 | |
| 591 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); | 739 | if (page_found == -EPERM) |
| 592 | kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr); | 740 | flags = DSISR_PROTFAULT; |
| 593 | kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); | 741 | else |
| 594 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 742 | flags = DSISR_NOHPTE; |
| 595 | } else if (page_found == -EPERM) { | 743 | if (data) { |
| 596 | /* Storage protection */ | 744 | flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE; |
| 597 | u32 dsisr = vcpu->arch.fault_dsisr; | 745 | kvmppc_core_queue_data_storage(vcpu, eaddr, flags); |
| 598 | u64 ssrr1 = vcpu->arch.shadow_srr1; | 746 | } else { |
| 599 | u64 msr = kvmppc_get_msr(vcpu); | 747 | kvmppc_core_queue_inst_storage(vcpu, flags); |
| 600 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); | 748 | } |
| 601 | dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT; | ||
| 602 | kvmppc_set_dsisr(vcpu, dsisr); | ||
| 603 | kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); | ||
| 604 | kvmppc_book3s_queue_irqprio(vcpu, vec); | ||
| 605 | } else if (page_found == -EINVAL) { | 749 | } else if (page_found == -EINVAL) { |
| 606 | /* Page not found in guest SLB */ | 750 | /* Page not found in guest SLB */ |
| 607 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); | 751 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
| @@ -683,7 +827,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | |||
| 683 | } | 827 | } |
| 684 | 828 | ||
| 685 | /* Give up facility (TAR / EBB / DSCR) */ | 829 | /* Give up facility (TAR / EBB / DSCR) */ |
| 686 | static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) | 830 | void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) |
| 687 | { | 831 | { |
| 688 | #ifdef CONFIG_PPC_BOOK3S_64 | 832 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 689 | if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { | 833 | if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { |
| @@ -802,7 +946,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) | |||
| 802 | 946 | ||
| 803 | #ifdef CONFIG_PPC_BOOK3S_64 | 947 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 804 | 948 | ||
| 805 | static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) | 949 | void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) |
| 806 | { | 950 | { |
| 807 | /* Inject the Interrupt Cause field and trigger a guest interrupt */ | 951 | /* Inject the Interrupt Cause field and trigger a guest interrupt */ |
| 808 | vcpu->arch.fscr &= ~(0xffULL << 56); | 952 | vcpu->arch.fscr &= ~(0xffULL << 56); |
| @@ -864,6 +1008,18 @@ static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) | |||
| 864 | break; | 1008 | break; |
| 865 | } | 1009 | } |
| 866 | 1010 | ||
| 1011 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 1012 | /* Since we disabled MSR_TM at privilege state, the mfspr instruction | ||
| 1013 | * for TM spr can trigger TM fac unavailable. In this case, the | ||
| 1014 | * emulation is handled by kvmppc_emulate_fac(), which invokes | ||
| 1015 | * kvmppc_emulate_mfspr() finally. But note the mfspr can include | ||
| 1016 | * RT for NV registers. So it need to restore those NV reg to reflect | ||
| 1017 | * the update. | ||
| 1018 | */ | ||
| 1019 | if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR)) | ||
| 1020 | return RESUME_GUEST_NV; | ||
| 1021 | #endif | ||
| 1022 | |||
| 867 | return RESUME_GUEST; | 1023 | return RESUME_GUEST; |
| 868 | } | 1024 | } |
| 869 | 1025 | ||
| @@ -872,7 +1028,12 @@ void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) | |||
| 872 | if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { | 1028 | if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { |
| 873 | /* TAR got dropped, drop it in shadow too */ | 1029 | /* TAR got dropped, drop it in shadow too */ |
| 874 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | 1030 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
| 1031 | } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) { | ||
| 1032 | vcpu->arch.fscr = fscr; | ||
| 1033 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); | ||
| 1034 | return; | ||
| 875 | } | 1035 | } |
| 1036 | |||
| 876 | vcpu->arch.fscr = fscr; | 1037 | vcpu->arch.fscr = fscr; |
| 877 | } | 1038 | } |
| 878 | #endif | 1039 | #endif |
| @@ -1017,10 +1178,8 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 1017 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | 1178 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); |
| 1018 | r = RESUME_GUEST; | 1179 | r = RESUME_GUEST; |
| 1019 | } else { | 1180 | } else { |
| 1020 | u64 msr = kvmppc_get_msr(vcpu); | 1181 | kvmppc_core_queue_inst_storage(vcpu, |
| 1021 | msr |= shadow_srr1 & 0x58000000; | 1182 | shadow_srr1 & 0x58000000); |
| 1022 | kvmppc_set_msr_fast(vcpu, msr); | ||
| 1023 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | ||
| 1024 | r = RESUME_GUEST; | 1183 | r = RESUME_GUEST; |
| 1025 | } | 1184 | } |
| 1026 | break; | 1185 | break; |
| @@ -1059,9 +1218,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 1059 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); | 1218 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
| 1060 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | 1219 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 1061 | } else { | 1220 | } else { |
| 1062 | kvmppc_set_dar(vcpu, dar); | 1221 | kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr); |
| 1063 | kvmppc_set_dsisr(vcpu, fault_dsisr); | ||
| 1064 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | ||
| 1065 | r = RESUME_GUEST; | 1222 | r = RESUME_GUEST; |
| 1066 | } | 1223 | } |
| 1067 | break; | 1224 | break; |
| @@ -1092,10 +1249,13 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 1092 | case BOOK3S_INTERRUPT_EXTERNAL: | 1249 | case BOOK3S_INTERRUPT_EXTERNAL: |
| 1093 | case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: | 1250 | case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: |
| 1094 | case BOOK3S_INTERRUPT_EXTERNAL_HV: | 1251 | case BOOK3S_INTERRUPT_EXTERNAL_HV: |
| 1252 | case BOOK3S_INTERRUPT_H_VIRT: | ||
| 1095 | vcpu->stat.ext_intr_exits++; | 1253 | vcpu->stat.ext_intr_exits++; |
| 1096 | r = RESUME_GUEST; | 1254 | r = RESUME_GUEST; |
| 1097 | break; | 1255 | break; |
| 1256 | case BOOK3S_INTERRUPT_HMI: | ||
| 1098 | case BOOK3S_INTERRUPT_PERFMON: | 1257 | case BOOK3S_INTERRUPT_PERFMON: |
| 1258 | case BOOK3S_INTERRUPT_SYSTEM_RESET: | ||
| 1099 | r = RESUME_GUEST; | 1259 | r = RESUME_GUEST; |
| 1100 | break; | 1260 | break; |
| 1101 | case BOOK3S_INTERRUPT_PROGRAM: | 1261 | case BOOK3S_INTERRUPT_PROGRAM: |
| @@ -1225,8 +1385,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 1225 | } | 1385 | } |
| 1226 | #ifdef CONFIG_PPC_BOOK3S_64 | 1386 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 1227 | case BOOK3S_INTERRUPT_FAC_UNAVAIL: | 1387 | case BOOK3S_INTERRUPT_FAC_UNAVAIL: |
| 1228 | kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); | 1388 | r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); |
| 1229 | r = RESUME_GUEST; | ||
| 1230 | break; | 1389 | break; |
| 1231 | #endif | 1390 | #endif |
| 1232 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | 1391 | case BOOK3S_INTERRUPT_MACHINE_CHECK: |
| @@ -1379,6 +1538,73 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, | |||
| 1379 | else | 1538 | else |
| 1380 | *val = get_reg_val(id, 0); | 1539 | *val = get_reg_val(id, 0); |
| 1381 | break; | 1540 | break; |
| 1541 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 1542 | case KVM_REG_PPC_TFHAR: | ||
| 1543 | *val = get_reg_val(id, vcpu->arch.tfhar); | ||
| 1544 | break; | ||
| 1545 | case KVM_REG_PPC_TFIAR: | ||
| 1546 | *val = get_reg_val(id, vcpu->arch.tfiar); | ||
| 1547 | break; | ||
| 1548 | case KVM_REG_PPC_TEXASR: | ||
| 1549 | *val = get_reg_val(id, vcpu->arch.texasr); | ||
| 1550 | break; | ||
| 1551 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: | ||
| 1552 | *val = get_reg_val(id, | ||
| 1553 | vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]); | ||
| 1554 | break; | ||
| 1555 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: | ||
| 1556 | { | ||
| 1557 | int i, j; | ||
| 1558 | |||
| 1559 | i = id - KVM_REG_PPC_TM_VSR0; | ||
| 1560 | if (i < 32) | ||
| 1561 | for (j = 0; j < TS_FPRWIDTH; j++) | ||
| 1562 | val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; | ||
| 1563 | else { | ||
| 1564 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | ||
| 1565 | val->vval = vcpu->arch.vr_tm.vr[i-32]; | ||
| 1566 | else | ||
| 1567 | r = -ENXIO; | ||
| 1568 | } | ||
| 1569 | break; | ||
| 1570 | } | ||
| 1571 | case KVM_REG_PPC_TM_CR: | ||
| 1572 | *val = get_reg_val(id, vcpu->arch.cr_tm); | ||
| 1573 | break; | ||
| 1574 | case KVM_REG_PPC_TM_XER: | ||
| 1575 | *val = get_reg_val(id, vcpu->arch.xer_tm); | ||
| 1576 | break; | ||
| 1577 | case KVM_REG_PPC_TM_LR: | ||
| 1578 | *val = get_reg_val(id, vcpu->arch.lr_tm); | ||
| 1579 | break; | ||
| 1580 | case KVM_REG_PPC_TM_CTR: | ||
| 1581 | *val = get_reg_val(id, vcpu->arch.ctr_tm); | ||
| 1582 | break; | ||
| 1583 | case KVM_REG_PPC_TM_FPSCR: | ||
| 1584 | *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); | ||
| 1585 | break; | ||
| 1586 | case KVM_REG_PPC_TM_AMR: | ||
| 1587 | *val = get_reg_val(id, vcpu->arch.amr_tm); | ||
| 1588 | break; | ||
| 1589 | case KVM_REG_PPC_TM_PPR: | ||
| 1590 | *val = get_reg_val(id, vcpu->arch.ppr_tm); | ||
| 1591 | break; | ||
| 1592 | case KVM_REG_PPC_TM_VRSAVE: | ||
| 1593 | *val = get_reg_val(id, vcpu->arch.vrsave_tm); | ||
| 1594 | break; | ||
| 1595 | case KVM_REG_PPC_TM_VSCR: | ||
| 1596 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | ||
| 1597 | *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); | ||
| 1598 | else | ||
| 1599 | r = -ENXIO; | ||
| 1600 | break; | ||
| 1601 | case KVM_REG_PPC_TM_DSCR: | ||
| 1602 | *val = get_reg_val(id, vcpu->arch.dscr_tm); | ||
| 1603 | break; | ||
| 1604 | case KVM_REG_PPC_TM_TAR: | ||
| 1605 | *val = get_reg_val(id, vcpu->arch.tar_tm); | ||
| 1606 | break; | ||
| 1607 | #endif | ||
| 1382 | default: | 1608 | default: |
| 1383 | r = -EINVAL; | 1609 | r = -EINVAL; |
| 1384 | break; | 1610 | break; |
| @@ -1412,6 +1638,72 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, | |||
| 1412 | case KVM_REG_PPC_LPCR_64: | 1638 | case KVM_REG_PPC_LPCR_64: |
| 1413 | kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); | 1639 | kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); |
| 1414 | break; | 1640 | break; |
| 1641 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 1642 | case KVM_REG_PPC_TFHAR: | ||
| 1643 | vcpu->arch.tfhar = set_reg_val(id, *val); | ||
| 1644 | break; | ||
| 1645 | case KVM_REG_PPC_TFIAR: | ||
| 1646 | vcpu->arch.tfiar = set_reg_val(id, *val); | ||
| 1647 | break; | ||
| 1648 | case KVM_REG_PPC_TEXASR: | ||
| 1649 | vcpu->arch.texasr = set_reg_val(id, *val); | ||
| 1650 | break; | ||
| 1651 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: | ||
| 1652 | vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] = | ||
| 1653 | set_reg_val(id, *val); | ||
| 1654 | break; | ||
| 1655 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: | ||
| 1656 | { | ||
| 1657 | int i, j; | ||
| 1658 | |||
| 1659 | i = id - KVM_REG_PPC_TM_VSR0; | ||
| 1660 | if (i < 32) | ||
| 1661 | for (j = 0; j < TS_FPRWIDTH; j++) | ||
| 1662 | vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; | ||
| 1663 | else | ||
| 1664 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | ||
| 1665 | vcpu->arch.vr_tm.vr[i-32] = val->vval; | ||
| 1666 | else | ||
| 1667 | r = -ENXIO; | ||
| 1668 | break; | ||
| 1669 | } | ||
| 1670 | case KVM_REG_PPC_TM_CR: | ||
| 1671 | vcpu->arch.cr_tm = set_reg_val(id, *val); | ||
| 1672 | break; | ||
| 1673 | case KVM_REG_PPC_TM_XER: | ||
| 1674 | vcpu->arch.xer_tm = set_reg_val(id, *val); | ||
| 1675 | break; | ||
| 1676 | case KVM_REG_PPC_TM_LR: | ||
| 1677 | vcpu->arch.lr_tm = set_reg_val(id, *val); | ||
| 1678 | break; | ||
| 1679 | case KVM_REG_PPC_TM_CTR: | ||
| 1680 | vcpu->arch.ctr_tm = set_reg_val(id, *val); | ||
| 1681 | break; | ||
| 1682 | case KVM_REG_PPC_TM_FPSCR: | ||
| 1683 | vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); | ||
| 1684 | break; | ||
| 1685 | case KVM_REG_PPC_TM_AMR: | ||
| 1686 | vcpu->arch.amr_tm = set_reg_val(id, *val); | ||
| 1687 | break; | ||
| 1688 | case KVM_REG_PPC_TM_PPR: | ||
| 1689 | vcpu->arch.ppr_tm = set_reg_val(id, *val); | ||
| 1690 | break; | ||
| 1691 | case KVM_REG_PPC_TM_VRSAVE: | ||
| 1692 | vcpu->arch.vrsave_tm = set_reg_val(id, *val); | ||
| 1693 | break; | ||
| 1694 | case KVM_REG_PPC_TM_VSCR: | ||
| 1695 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | ||
| 1696 | vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); | ||
| 1697 | else | ||
| 1698 | r = -ENXIO; | ||
| 1699 | break; | ||
| 1700 | case KVM_REG_PPC_TM_DSCR: | ||
| 1701 | vcpu->arch.dscr_tm = set_reg_val(id, *val); | ||
| 1702 | break; | ||
| 1703 | case KVM_REG_PPC_TM_TAR: | ||
| 1704 | vcpu->arch.tar_tm = set_reg_val(id, *val); | ||
| 1705 | break; | ||
| 1706 | #endif | ||
| 1415 | default: | 1707 | default: |
| 1416 | r = -EINVAL; | 1708 | r = -EINVAL; |
| 1417 | break; | 1709 | break; |
| @@ -1687,6 +1979,17 @@ static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, | |||
| 1687 | 1979 | ||
| 1688 | return 0; | 1980 | return 0; |
| 1689 | } | 1981 | } |
| 1982 | |||
| 1983 | static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) | ||
| 1984 | { | ||
| 1985 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) | ||
| 1986 | return -ENODEV; | ||
| 1987 | /* Require flags and process table base and size to all be zero. */ | ||
| 1988 | if (cfg->flags || cfg->process_table) | ||
| 1989 | return -EINVAL; | ||
| 1990 | return 0; | ||
| 1991 | } | ||
| 1992 | |||
| 1690 | #else | 1993 | #else |
| 1691 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, | 1994 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, |
| 1692 | struct kvm_ppc_smmu_info *info) | 1995 | struct kvm_ppc_smmu_info *info) |
| @@ -1735,9 +2038,12 @@ static void kvmppc_core_destroy_vm_pr(struct kvm *kvm) | |||
| 1735 | static int kvmppc_core_check_processor_compat_pr(void) | 2038 | static int kvmppc_core_check_processor_compat_pr(void) |
| 1736 | { | 2039 | { |
| 1737 | /* | 2040 | /* |
| 1738 | * Disable KVM for Power9 untill the required bits merged. | 2041 | * PR KVM can work on POWER9 inside a guest partition |
| 2042 | * running in HPT mode. It can't work if we are using | ||
| 2043 | * radix translation (because radix provides no way for | ||
| 2044 | * a process to have unique translations in quadrant 3). | ||
| 1739 | */ | 2045 | */ |
| 1740 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | 2046 | if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled()) |
| 1741 | return -EIO; | 2047 | return -EIO; |
| 1742 | return 0; | 2048 | return 0; |
| 1743 | } | 2049 | } |
| @@ -1781,7 +2087,9 @@ static struct kvmppc_ops kvm_ops_pr = { | |||
| 1781 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, | 2087 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, |
| 1782 | #ifdef CONFIG_PPC_BOOK3S_64 | 2088 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 1783 | .hcall_implemented = kvmppc_hcall_impl_pr, | 2089 | .hcall_implemented = kvmppc_hcall_impl_pr, |
| 2090 | .configure_mmu = kvm_configure_mmu_pr, | ||
| 1784 | #endif | 2091 | #endif |
| 2092 | .giveup_ext = kvmppc_giveup_ext, | ||
| 1785 | }; | 2093 | }; |
| 1786 | 2094 | ||
| 1787 | 2095 | ||
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 93a180ceefad..98ccc7ec5d48 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S | |||
| @@ -383,6 +383,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
| 383 | */ | 383 | */ |
| 384 | 384 | ||
| 385 | PPC_LL r6, HSTATE_HOST_MSR(r13) | 385 | PPC_LL r6, HSTATE_HOST_MSR(r13) |
| 386 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 387 | /* | ||
| 388 | * We don't want to change MSR[TS] bits via rfi here. | ||
| 389 | * The actual TM handling logic will be in host with | ||
| 390 | * recovered DR/IR bits after HSTATE_VMHANDLER. | ||
| 391 | * And MSR_TM can be enabled in HOST_MSR so rfid may | ||
| 392 | * not suppress this change and can lead to exception. | ||
| 393 | * Manually set MSR to prevent TS state change here. | ||
| 394 | */ | ||
| 395 | mfmsr r7 | ||
| 396 | rldicl r7, r7, 64 - MSR_TS_S_LG, 62 | ||
| 397 | rldimi r6, r7, MSR_TS_S_LG, 63 - MSR_TS_T_LG | ||
| 398 | #endif | ||
| 386 | PPC_LL r8, HSTATE_VMHANDLER(r13) | 399 | PPC_LL r8, HSTATE_VMHANDLER(r13) |
| 387 | 400 | ||
| 388 | #ifdef CONFIG_PPC64 | 401 | #ifdef CONFIG_PPC64 |
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index 99c3620b40d9..6e41ba7ec8f4 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c | |||
| @@ -334,7 +334,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu) | |||
| 334 | */ | 334 | */ |
| 335 | 335 | ||
| 336 | /* Return interrupt and old CPPR in GPR4 */ | 336 | /* Return interrupt and old CPPR in GPR4 */ |
| 337 | vcpu->arch.gpr[4] = hirq | (old_cppr << 24); | 337 | vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24); |
| 338 | 338 | ||
| 339 | return H_SUCCESS; | 339 | return H_SUCCESS; |
| 340 | } | 340 | } |
| @@ -369,7 +369,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long | |||
| 369 | hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll); | 369 | hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll); |
| 370 | 370 | ||
| 371 | /* Return interrupt and old CPPR in GPR4 */ | 371 | /* Return interrupt and old CPPR in GPR4 */ |
| 372 | vcpu->arch.gpr[4] = hirq | (xc->cppr << 24); | 372 | vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24); |
| 373 | 373 | ||
| 374 | return H_SUCCESS; | 374 | return H_SUCCESS; |
| 375 | } | 375 | } |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 876d4f294fdd..a9ca016da670 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
| @@ -77,8 +77,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | |||
| 77 | { | 77 | { |
| 78 | int i; | 78 | int i; |
| 79 | 79 | ||
| 80 | printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); | 80 | printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip, |
| 81 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); | 81 | vcpu->arch.shared->msr); |
| 82 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link, | ||
| 83 | vcpu->arch.regs.ctr); | ||
| 82 | printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, | 84 | printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, |
| 83 | vcpu->arch.shared->srr1); | 85 | vcpu->arch.shared->srr1); |
| 84 | 86 | ||
| @@ -491,24 +493,25 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
| 491 | if (allowed) { | 493 | if (allowed) { |
| 492 | switch (int_class) { | 494 | switch (int_class) { |
| 493 | case INT_CLASS_NONCRIT: | 495 | case INT_CLASS_NONCRIT: |
| 494 | set_guest_srr(vcpu, vcpu->arch.pc, | 496 | set_guest_srr(vcpu, vcpu->arch.regs.nip, |
| 495 | vcpu->arch.shared->msr); | 497 | vcpu->arch.shared->msr); |
| 496 | break; | 498 | break; |
| 497 | case INT_CLASS_CRIT: | 499 | case INT_CLASS_CRIT: |
| 498 | set_guest_csrr(vcpu, vcpu->arch.pc, | 500 | set_guest_csrr(vcpu, vcpu->arch.regs.nip, |
| 499 | vcpu->arch.shared->msr); | 501 | vcpu->arch.shared->msr); |
| 500 | break; | 502 | break; |
| 501 | case INT_CLASS_DBG: | 503 | case INT_CLASS_DBG: |
| 502 | set_guest_dsrr(vcpu, vcpu->arch.pc, | 504 | set_guest_dsrr(vcpu, vcpu->arch.regs.nip, |
| 503 | vcpu->arch.shared->msr); | 505 | vcpu->arch.shared->msr); |
| 504 | break; | 506 | break; |
| 505 | case INT_CLASS_MC: | 507 | case INT_CLASS_MC: |
| 506 | set_guest_mcsrr(vcpu, vcpu->arch.pc, | 508 | set_guest_mcsrr(vcpu, vcpu->arch.regs.nip, |
| 507 | vcpu->arch.shared->msr); | 509 | vcpu->arch.shared->msr); |
| 508 | break; | 510 | break; |
| 509 | } | 511 | } |
| 510 | 512 | ||
| 511 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | 513 | vcpu->arch.regs.nip = vcpu->arch.ivpr | |
| 514 | vcpu->arch.ivor[priority]; | ||
| 512 | if (update_esr == true) | 515 | if (update_esr == true) |
| 513 | kvmppc_set_esr(vcpu, vcpu->arch.queued_esr); | 516 | kvmppc_set_esr(vcpu, vcpu->arch.queued_esr); |
| 514 | if (update_dear == true) | 517 | if (update_dear == true) |
| @@ -826,7 +829,7 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
| 826 | 829 | ||
| 827 | case EMULATE_FAIL: | 830 | case EMULATE_FAIL: |
| 828 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | 831 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
| 829 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | 832 | __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst); |
| 830 | /* For debugging, encode the failing instruction and | 833 | /* For debugging, encode the failing instruction and |
| 831 | * report it to userspace. */ | 834 | * report it to userspace. */ |
| 832 | run->hw.hardware_exit_reason = ~0ULL << 32; | 835 | run->hw.hardware_exit_reason = ~0ULL << 32; |
| @@ -875,7 +878,7 @@ static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
| 875 | */ | 878 | */ |
| 876 | vcpu->arch.dbsr = 0; | 879 | vcpu->arch.dbsr = 0; |
| 877 | run->debug.arch.status = 0; | 880 | run->debug.arch.status = 0; |
| 878 | run->debug.arch.address = vcpu->arch.pc; | 881 | run->debug.arch.address = vcpu->arch.regs.nip; |
| 879 | 882 | ||
| 880 | if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) { | 883 | if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) { |
| 881 | run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT; | 884 | run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT; |
| @@ -971,7 +974,7 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 971 | 974 | ||
| 972 | case EMULATE_FAIL: | 975 | case EMULATE_FAIL: |
| 973 | pr_debug("%s: load instruction from guest address %lx failed\n", | 976 | pr_debug("%s: load instruction from guest address %lx failed\n", |
| 974 | __func__, vcpu->arch.pc); | 977 | __func__, vcpu->arch.regs.nip); |
| 975 | /* For debugging, encode the failing instruction and | 978 | /* For debugging, encode the failing instruction and |
| 976 | * report it to userspace. */ | 979 | * report it to userspace. */ |
| 977 | run->hw.hardware_exit_reason = ~0ULL << 32; | 980 | run->hw.hardware_exit_reason = ~0ULL << 32; |
| @@ -1169,7 +1172,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 1169 | case BOOKE_INTERRUPT_SPE_FP_DATA: | 1172 | case BOOKE_INTERRUPT_SPE_FP_DATA: |
| 1170 | case BOOKE_INTERRUPT_SPE_FP_ROUND: | 1173 | case BOOKE_INTERRUPT_SPE_FP_ROUND: |
| 1171 | printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", | 1174 | printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", |
| 1172 | __func__, exit_nr, vcpu->arch.pc); | 1175 | __func__, exit_nr, vcpu->arch.regs.nip); |
| 1173 | run->hw.hardware_exit_reason = exit_nr; | 1176 | run->hw.hardware_exit_reason = exit_nr; |
| 1174 | r = RESUME_HOST; | 1177 | r = RESUME_HOST; |
| 1175 | break; | 1178 | break; |
| @@ -1299,7 +1302,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 1299 | } | 1302 | } |
| 1300 | 1303 | ||
| 1301 | case BOOKE_INTERRUPT_ITLB_MISS: { | 1304 | case BOOKE_INTERRUPT_ITLB_MISS: { |
| 1302 | unsigned long eaddr = vcpu->arch.pc; | 1305 | unsigned long eaddr = vcpu->arch.regs.nip; |
| 1303 | gpa_t gpaddr; | 1306 | gpa_t gpaddr; |
| 1304 | gfn_t gfn; | 1307 | gfn_t gfn; |
| 1305 | int gtlb_index; | 1308 | int gtlb_index; |
| @@ -1391,7 +1394,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
| 1391 | int i; | 1394 | int i; |
| 1392 | int r; | 1395 | int r; |
| 1393 | 1396 | ||
| 1394 | vcpu->arch.pc = 0; | 1397 | vcpu->arch.regs.nip = 0; |
| 1395 | vcpu->arch.shared->pir = vcpu->vcpu_id; | 1398 | vcpu->arch.shared->pir = vcpu->vcpu_id; |
| 1396 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ | 1399 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
| 1397 | kvmppc_set_msr(vcpu, 0); | 1400 | kvmppc_set_msr(vcpu, 0); |
| @@ -1440,10 +1443,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 1440 | 1443 | ||
| 1441 | vcpu_load(vcpu); | 1444 | vcpu_load(vcpu); |
| 1442 | 1445 | ||
| 1443 | regs->pc = vcpu->arch.pc; | 1446 | regs->pc = vcpu->arch.regs.nip; |
| 1444 | regs->cr = kvmppc_get_cr(vcpu); | 1447 | regs->cr = kvmppc_get_cr(vcpu); |
| 1445 | regs->ctr = vcpu->arch.ctr; | 1448 | regs->ctr = vcpu->arch.regs.ctr; |
| 1446 | regs->lr = vcpu->arch.lr; | 1449 | regs->lr = vcpu->arch.regs.link; |
| 1447 | regs->xer = kvmppc_get_xer(vcpu); | 1450 | regs->xer = kvmppc_get_xer(vcpu); |
| 1448 | regs->msr = vcpu->arch.shared->msr; | 1451 | regs->msr = vcpu->arch.shared->msr; |
| 1449 | regs->srr0 = kvmppc_get_srr0(vcpu); | 1452 | regs->srr0 = kvmppc_get_srr0(vcpu); |
| @@ -1471,10 +1474,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 1471 | 1474 | ||
| 1472 | vcpu_load(vcpu); | 1475 | vcpu_load(vcpu); |
| 1473 | 1476 | ||
| 1474 | vcpu->arch.pc = regs->pc; | 1477 | vcpu->arch.regs.nip = regs->pc; |
| 1475 | kvmppc_set_cr(vcpu, regs->cr); | 1478 | kvmppc_set_cr(vcpu, regs->cr); |
| 1476 | vcpu->arch.ctr = regs->ctr; | 1479 | vcpu->arch.regs.ctr = regs->ctr; |
| 1477 | vcpu->arch.lr = regs->lr; | 1480 | vcpu->arch.regs.link = regs->lr; |
| 1478 | kvmppc_set_xer(vcpu, regs->xer); | 1481 | kvmppc_set_xer(vcpu, regs->xer); |
| 1479 | kvmppc_set_msr(vcpu, regs->msr); | 1482 | kvmppc_set_msr(vcpu, regs->msr); |
| 1480 | kvmppc_set_srr0(vcpu, regs->srr0); | 1483 | kvmppc_set_srr0(vcpu, regs->srr0); |
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index a82f64502de1..d23e582f0fee 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c | |||
| @@ -34,19 +34,19 @@ | |||
| 34 | 34 | ||
| 35 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) | 35 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) |
| 36 | { | 36 | { |
| 37 | vcpu->arch.pc = vcpu->arch.shared->srr0; | 37 | vcpu->arch.regs.nip = vcpu->arch.shared->srr0; |
| 38 | kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); | 38 | kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) | 41 | static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) |
| 42 | { | 42 | { |
| 43 | vcpu->arch.pc = vcpu->arch.dsrr0; | 43 | vcpu->arch.regs.nip = vcpu->arch.dsrr0; |
| 44 | kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); | 44 | kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) | 47 | static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) |
| 48 | { | 48 | { |
| 49 | vcpu->arch.pc = vcpu->arch.csrr0; | 49 | vcpu->arch.regs.nip = vcpu->arch.csrr0; |
| 50 | kvmppc_set_msr(vcpu, vcpu->arch.csrr1); | 50 | kvmppc_set_msr(vcpu, vcpu->arch.csrr1); |
| 51 | } | 51 | } |
| 52 | 52 | ||
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 990db69a1d0b..3f8189eb56ed 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c | |||
| @@ -53,7 +53,7 @@ static int dbell2prio(ulong param) | |||
| 53 | 53 | ||
| 54 | static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) | 54 | static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) |
| 55 | { | 55 | { |
| 56 | ulong param = vcpu->arch.gpr[rb]; | 56 | ulong param = vcpu->arch.regs.gpr[rb]; |
| 57 | int prio = dbell2prio(param); | 57 | int prio = dbell2prio(param); |
| 58 | 58 | ||
| 59 | if (prio < 0) | 59 | if (prio < 0) |
| @@ -65,7 +65,7 @@ static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) | |||
| 65 | 65 | ||
| 66 | static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) | 66 | static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) |
| 67 | { | 67 | { |
| 68 | ulong param = vcpu->arch.gpr[rb]; | 68 | ulong param = vcpu->arch.regs.gpr[rb]; |
| 69 | int prio = dbell2prio(rb); | 69 | int prio = dbell2prio(rb); |
| 70 | int pir = param & PPC_DBELL_PIR_MASK; | 70 | int pir = param & PPC_DBELL_PIR_MASK; |
| 71 | int i; | 71 | int i; |
| @@ -94,7 +94,7 @@ static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 94 | switch (get_oc(inst)) { | 94 | switch (get_oc(inst)) { |
| 95 | case EHPRIV_OC_DEBUG: | 95 | case EHPRIV_OC_DEBUG: |
| 96 | run->exit_reason = KVM_EXIT_DEBUG; | 96 | run->exit_reason = KVM_EXIT_DEBUG; |
| 97 | run->debug.arch.address = vcpu->arch.pc; | 97 | run->debug.arch.address = vcpu->arch.regs.nip; |
| 98 | run->debug.arch.status = 0; | 98 | run->debug.arch.status = 0; |
| 99 | kvmppc_account_exit(vcpu, DEBUG_EXITS); | 99 | kvmppc_account_exit(vcpu, DEBUG_EXITS); |
| 100 | emulated = EMULATE_EXIT_USER; | 100 | emulated = EMULATE_EXIT_USER; |
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c index ddbf8f0284c0..24296f4cadc6 100644 --- a/arch/powerpc/kvm/e500_mmu.c +++ b/arch/powerpc/kvm/e500_mmu.c | |||
| @@ -513,7 +513,7 @@ void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) | |||
| 513 | { | 513 | { |
| 514 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); | 514 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); |
| 515 | 515 | ||
| 516 | kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); | 516 | kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as); |
| 517 | } | 517 | } |
| 518 | 518 | ||
| 519 | void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) | 519 | void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index c878b4ffb86f..8f2985e46f6f 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c | |||
| @@ -625,8 +625,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
| 625 | } | 625 | } |
| 626 | 626 | ||
| 627 | #ifdef CONFIG_KVM_BOOKE_HV | 627 | #ifdef CONFIG_KVM_BOOKE_HV |
| 628 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | 628 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, |
| 629 | u32 *instr) | 629 | enum instruction_fetch_type type, u32 *instr) |
| 630 | { | 630 | { |
| 631 | gva_t geaddr; | 631 | gva_t geaddr; |
| 632 | hpa_t addr; | 632 | hpa_t addr; |
| @@ -715,8 +715,8 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | |||
| 715 | return EMULATE_DONE; | 715 | return EMULATE_DONE; |
| 716 | } | 716 | } |
| 717 | #else | 717 | #else |
| 718 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | 718 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, |
| 719 | u32 *instr) | 719 | enum instruction_fetch_type type, u32 *instr) |
| 720 | { | 720 | { |
| 721 | return EMULATE_AGAIN; | 721 | return EMULATE_AGAIN; |
| 722 | } | 722 | } |
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index a382e15135e6..afde788be141 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <asm/kvm_ppc.h> | 31 | #include <asm/kvm_ppc.h> |
| 32 | #include <asm/disassemble.h> | 32 | #include <asm/disassemble.h> |
| 33 | #include <asm/ppc-opcode.h> | 33 | #include <asm/ppc-opcode.h> |
| 34 | #include <asm/sstep.h> | ||
| 34 | #include "timing.h" | 35 | #include "timing.h" |
| 35 | #include "trace.h" | 36 | #include "trace.h" |
| 36 | 37 | ||
| @@ -84,8 +85,9 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) | |||
| 84 | struct kvm_run *run = vcpu->run; | 85 | struct kvm_run *run = vcpu->run; |
| 85 | u32 inst; | 86 | u32 inst; |
| 86 | int ra, rs, rt; | 87 | int ra, rs, rt; |
| 87 | enum emulation_result emulated; | 88 | enum emulation_result emulated = EMULATE_FAIL; |
| 88 | int advance = 1; | 89 | int advance = 1; |
| 90 | struct instruction_op op; | ||
| 89 | 91 | ||
| 90 | /* this default type might be overwritten by subcategories */ | 92 | /* this default type might be overwritten by subcategories */ |
| 91 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | 93 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); |
| @@ -107,580 +109,276 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) | |||
| 107 | vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst); | 109 | vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst); |
| 108 | vcpu->arch.mmio_vsx_copy_nums = 0; | 110 | vcpu->arch.mmio_vsx_copy_nums = 0; |
| 109 | vcpu->arch.mmio_vsx_offset = 0; | 111 | vcpu->arch.mmio_vsx_offset = 0; |
| 110 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE; | 112 | vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE; |
| 111 | vcpu->arch.mmio_sp64_extend = 0; | 113 | vcpu->arch.mmio_sp64_extend = 0; |
| 112 | vcpu->arch.mmio_sign_extend = 0; | 114 | vcpu->arch.mmio_sign_extend = 0; |
| 113 | vcpu->arch.mmio_vmx_copy_nums = 0; | 115 | vcpu->arch.mmio_vmx_copy_nums = 0; |
| 116 | vcpu->arch.mmio_vmx_offset = 0; | ||
| 117 | vcpu->arch.mmio_host_swabbed = 0; | ||
| 114 | 118 | ||
| 115 | switch (get_op(inst)) { | 119 | emulated = EMULATE_FAIL; |
| 116 | case 31: | 120 | vcpu->arch.regs.msr = vcpu->arch.shared->msr; |
| 117 | switch (get_xop(inst)) { | 121 | vcpu->arch.regs.ccr = vcpu->arch.cr; |
| 118 | case OP_31_XOP_LWZX: | 122 | if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { |
| 119 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 123 | int type = op.type & INSTR_TYPE_MASK; |
| 120 | break; | 124 | int size = GETSIZE(op.type); |
| 121 | |||
| 122 | case OP_31_XOP_LWZUX: | ||
| 123 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
| 124 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 125 | break; | ||
| 126 | 125 | ||
| 127 | case OP_31_XOP_LBZX: | 126 | switch (type) { |
| 128 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 127 | case LOAD: { |
| 129 | break; | 128 | int instr_byte_swap = op.type & BYTEREV; |
| 130 | |||
| 131 | case OP_31_XOP_LBZUX: | ||
| 132 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
| 133 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 134 | break; | ||
| 135 | 129 | ||
| 136 | case OP_31_XOP_STDX: | 130 | if (op.type & SIGNEXT) |
| 137 | emulated = kvmppc_handle_store(run, vcpu, | 131 | emulated = kvmppc_handle_loads(run, vcpu, |
| 138 | kvmppc_get_gpr(vcpu, rs), 8, 1); | 132 | op.reg, size, !instr_byte_swap); |
| 139 | break; | 133 | else |
| 134 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 135 | op.reg, size, !instr_byte_swap); | ||
| 140 | 136 | ||
| 141 | case OP_31_XOP_STDUX: | 137 | if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) |
| 142 | emulated = kvmppc_handle_store(run, vcpu, | 138 | kvmppc_set_gpr(vcpu, op.update_reg, op.ea); |
| 143 | kvmppc_get_gpr(vcpu, rs), 8, 1); | ||
| 144 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 145 | break; | ||
| 146 | 139 | ||
| 147 | case OP_31_XOP_STWX: | ||
| 148 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 149 | kvmppc_get_gpr(vcpu, rs), 4, 1); | ||
| 150 | break; | ||
| 151 | |||
| 152 | case OP_31_XOP_STWUX: | ||
| 153 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 154 | kvmppc_get_gpr(vcpu, rs), 4, 1); | ||
| 155 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 156 | break; | ||
| 157 | |||
| 158 | case OP_31_XOP_STBX: | ||
| 159 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 160 | kvmppc_get_gpr(vcpu, rs), 1, 1); | ||
| 161 | break; | ||
| 162 | |||
| 163 | case OP_31_XOP_STBUX: | ||
| 164 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 165 | kvmppc_get_gpr(vcpu, rs), 1, 1); | ||
| 166 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 167 | break; | ||
| 168 | |||
| 169 | case OP_31_XOP_LHAX: | ||
| 170 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
| 171 | break; | ||
| 172 | |||
| 173 | case OP_31_XOP_LHAUX: | ||
| 174 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
| 175 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 176 | break; | ||
| 177 | |||
| 178 | case OP_31_XOP_LHZX: | ||
| 179 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
| 180 | break; | ||
| 181 | |||
| 182 | case OP_31_XOP_LHZUX: | ||
| 183 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
| 184 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 185 | break; | ||
| 186 | |||
| 187 | case OP_31_XOP_STHX: | ||
| 188 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 189 | kvmppc_get_gpr(vcpu, rs), 2, 1); | ||
| 190 | break; | ||
| 191 | |||
| 192 | case OP_31_XOP_STHUX: | ||
| 193 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 194 | kvmppc_get_gpr(vcpu, rs), 2, 1); | ||
| 195 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 196 | break; | ||
| 197 | |||
| 198 | case OP_31_XOP_DCBST: | ||
| 199 | case OP_31_XOP_DCBF: | ||
| 200 | case OP_31_XOP_DCBI: | ||
| 201 | /* Do nothing. The guest is performing dcbi because | ||
| 202 | * hardware DMA is not snooped by the dcache, but | ||
| 203 | * emulated DMA either goes through the dcache as | ||
| 204 | * normal writes, or the host kernel has handled dcache | ||
| 205 | * coherence. */ | ||
| 206 | break; | ||
| 207 | |||
| 208 | case OP_31_XOP_LWBRX: | ||
| 209 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); | ||
| 210 | break; | ||
| 211 | |||
| 212 | case OP_31_XOP_STWBRX: | ||
| 213 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 214 | kvmppc_get_gpr(vcpu, rs), 4, 0); | ||
| 215 | break; | 140 | break; |
| 216 | 141 | } | |
| 217 | case OP_31_XOP_LHBRX: | ||
| 218 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | ||
| 219 | break; | ||
| 220 | |||
| 221 | case OP_31_XOP_STHBRX: | ||
| 222 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 223 | kvmppc_get_gpr(vcpu, rs), 2, 0); | ||
| 224 | break; | ||
| 225 | |||
| 226 | case OP_31_XOP_LDBRX: | ||
| 227 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 0); | ||
| 228 | break; | ||
| 229 | |||
| 230 | case OP_31_XOP_STDBRX: | ||
| 231 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 232 | kvmppc_get_gpr(vcpu, rs), 8, 0); | ||
| 233 | break; | ||
| 234 | |||
| 235 | case OP_31_XOP_LDX: | ||
| 236 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | ||
| 237 | break; | ||
| 238 | |||
| 239 | case OP_31_XOP_LDUX: | ||
| 240 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | ||
| 241 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 242 | break; | ||
| 243 | |||
| 244 | case OP_31_XOP_LWAX: | ||
| 245 | emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); | ||
| 246 | break; | ||
| 247 | |||
| 248 | case OP_31_XOP_LWAUX: | ||
| 249 | emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); | ||
| 250 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 251 | break; | ||
| 252 | |||
| 253 | #ifdef CONFIG_PPC_FPU | 142 | #ifdef CONFIG_PPC_FPU |
| 254 | case OP_31_XOP_LFSX: | 143 | case LOAD_FP: |
| 255 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 256 | return EMULATE_DONE; | ||
| 257 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 258 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 259 | KVM_MMIO_REG_FPR|rt, 4, 1); | ||
| 260 | break; | ||
| 261 | |||
| 262 | case OP_31_XOP_LFSUX: | ||
| 263 | if (kvmppc_check_fp_disabled(vcpu)) | 144 | if (kvmppc_check_fp_disabled(vcpu)) |
| 264 | return EMULATE_DONE; | 145 | return EMULATE_DONE; |
| 265 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 266 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 267 | KVM_MMIO_REG_FPR|rt, 4, 1); | ||
| 268 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 269 | break; | ||
| 270 | 146 | ||
| 271 | case OP_31_XOP_LFDX: | 147 | if (op.type & FPCONV) |
| 272 | if (kvmppc_check_fp_disabled(vcpu)) | 148 | vcpu->arch.mmio_sp64_extend = 1; |
| 273 | return EMULATE_DONE; | ||
| 274 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 275 | KVM_MMIO_REG_FPR|rt, 8, 1); | ||
| 276 | break; | ||
| 277 | 149 | ||
| 278 | case OP_31_XOP_LFDUX: | 150 | if (op.type & SIGNEXT) |
| 279 | if (kvmppc_check_fp_disabled(vcpu)) | 151 | emulated = kvmppc_handle_loads(run, vcpu, |
| 280 | return EMULATE_DONE; | 152 | KVM_MMIO_REG_FPR|op.reg, size, 1); |
| 281 | emulated = kvmppc_handle_load(run, vcpu, | 153 | else |
| 282 | KVM_MMIO_REG_FPR|rt, 8, 1); | 154 | emulated = kvmppc_handle_load(run, vcpu, |
| 283 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 155 | KVM_MMIO_REG_FPR|op.reg, size, 1); |
| 284 | break; | ||
| 285 | |||
| 286 | case OP_31_XOP_LFIWAX: | ||
| 287 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 288 | return EMULATE_DONE; | ||
| 289 | emulated = kvmppc_handle_loads(run, vcpu, | ||
| 290 | KVM_MMIO_REG_FPR|rt, 4, 1); | ||
| 291 | break; | ||
| 292 | 156 | ||
| 293 | case OP_31_XOP_LFIWZX: | 157 | if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) |
| 294 | if (kvmppc_check_fp_disabled(vcpu)) | 158 | kvmppc_set_gpr(vcpu, op.update_reg, op.ea); |
| 295 | return EMULATE_DONE; | ||
| 296 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 297 | KVM_MMIO_REG_FPR|rt, 4, 1); | ||
| 298 | break; | ||
| 299 | 159 | ||
| 300 | case OP_31_XOP_STFSX: | ||
| 301 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 302 | return EMULATE_DONE; | ||
| 303 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 304 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 305 | VCPU_FPR(vcpu, rs), 4, 1); | ||
| 306 | break; | 160 | break; |
| 307 | 161 | #endif | |
| 308 | case OP_31_XOP_STFSUX: | 162 | #ifdef CONFIG_ALTIVEC |
| 309 | if (kvmppc_check_fp_disabled(vcpu)) | 163 | case LOAD_VMX: |
| 310 | return EMULATE_DONE; | 164 | if (kvmppc_check_altivec_disabled(vcpu)) |
| 311 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 312 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 313 | VCPU_FPR(vcpu, rs), 4, 1); | ||
| 314 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 315 | break; | ||
| 316 | |||
| 317 | case OP_31_XOP_STFDX: | ||
| 318 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 319 | return EMULATE_DONE; | ||
| 320 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 321 | VCPU_FPR(vcpu, rs), 8, 1); | ||
| 322 | break; | ||
| 323 | |||
| 324 | case OP_31_XOP_STFDUX: | ||
| 325 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 326 | return EMULATE_DONE; | 165 | return EMULATE_DONE; |
| 327 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 328 | VCPU_FPR(vcpu, rs), 8, 1); | ||
| 329 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 330 | break; | ||
| 331 | 166 | ||
| 332 | case OP_31_XOP_STFIWX: | 167 | /* Hardware enforces alignment of VMX accesses */ |
| 333 | if (kvmppc_check_fp_disabled(vcpu)) | 168 | vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1); |
| 334 | return EMULATE_DONE; | 169 | vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1); |
| 335 | emulated = kvmppc_handle_store(run, vcpu, | 170 | |
| 336 | VCPU_FPR(vcpu, rs), 4, 1); | 171 | if (size == 16) { /* lvx */ |
| 172 | vcpu->arch.mmio_copy_type = | ||
| 173 | KVMPPC_VMX_COPY_DWORD; | ||
| 174 | } else if (size == 4) { /* lvewx */ | ||
| 175 | vcpu->arch.mmio_copy_type = | ||
| 176 | KVMPPC_VMX_COPY_WORD; | ||
| 177 | } else if (size == 2) { /* lvehx */ | ||
| 178 | vcpu->arch.mmio_copy_type = | ||
| 179 | KVMPPC_VMX_COPY_HWORD; | ||
| 180 | } else if (size == 1) { /* lvebx */ | ||
| 181 | vcpu->arch.mmio_copy_type = | ||
| 182 | KVMPPC_VMX_COPY_BYTE; | ||
| 183 | } else | ||
| 184 | break; | ||
| 185 | |||
| 186 | vcpu->arch.mmio_vmx_offset = | ||
| 187 | (vcpu->arch.vaddr_accessed & 0xf)/size; | ||
| 188 | |||
| 189 | if (size == 16) { | ||
| 190 | vcpu->arch.mmio_vmx_copy_nums = 2; | ||
| 191 | emulated = kvmppc_handle_vmx_load(run, | ||
| 192 | vcpu, KVM_MMIO_REG_VMX|op.reg, | ||
| 193 | 8, 1); | ||
| 194 | } else { | ||
| 195 | vcpu->arch.mmio_vmx_copy_nums = 1; | ||
| 196 | emulated = kvmppc_handle_vmx_load(run, vcpu, | ||
| 197 | KVM_MMIO_REG_VMX|op.reg, | ||
| 198 | size, 1); | ||
| 199 | } | ||
| 337 | break; | 200 | break; |
| 338 | #endif | 201 | #endif |
| 339 | |||
| 340 | #ifdef CONFIG_VSX | 202 | #ifdef CONFIG_VSX |
| 341 | case OP_31_XOP_LXSDX: | 203 | case LOAD_VSX: { |
| 342 | if (kvmppc_check_vsx_disabled(vcpu)) | 204 | int io_size_each; |
| 343 | return EMULATE_DONE; | 205 | |
| 344 | vcpu->arch.mmio_vsx_copy_nums = 1; | 206 | if (op.vsx_flags & VSX_CHECK_VEC) { |
| 345 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | 207 | if (kvmppc_check_altivec_disabled(vcpu)) |
| 346 | emulated = kvmppc_handle_vsx_load(run, vcpu, | 208 | return EMULATE_DONE; |
| 347 | KVM_MMIO_REG_VSX|rt, 8, 1, 0); | 209 | } else { |
| 348 | break; | 210 | if (kvmppc_check_vsx_disabled(vcpu)) |
| 349 | 211 | return EMULATE_DONE; | |
| 350 | case OP_31_XOP_LXSSPX: | 212 | } |
| 351 | if (kvmppc_check_vsx_disabled(vcpu)) | 213 | |
| 352 | return EMULATE_DONE; | 214 | if (op.vsx_flags & VSX_FPCONV) |
| 353 | vcpu->arch.mmio_vsx_copy_nums = 1; | 215 | vcpu->arch.mmio_sp64_extend = 1; |
| 354 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | 216 | |
| 355 | vcpu->arch.mmio_sp64_extend = 1; | 217 | if (op.element_size == 8) { |
| 356 | emulated = kvmppc_handle_vsx_load(run, vcpu, | 218 | if (op.vsx_flags & VSX_SPLAT) |
| 357 | KVM_MMIO_REG_VSX|rt, 4, 1, 0); | 219 | vcpu->arch.mmio_copy_type = |
| 358 | break; | 220 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP; |
| 221 | else | ||
| 222 | vcpu->arch.mmio_copy_type = | ||
| 223 | KVMPPC_VSX_COPY_DWORD; | ||
| 224 | } else if (op.element_size == 4) { | ||
| 225 | if (op.vsx_flags & VSX_SPLAT) | ||
| 226 | vcpu->arch.mmio_copy_type = | ||
| 227 | KVMPPC_VSX_COPY_WORD_LOAD_DUMP; | ||
| 228 | else | ||
| 229 | vcpu->arch.mmio_copy_type = | ||
| 230 | KVMPPC_VSX_COPY_WORD; | ||
| 231 | } else | ||
| 232 | break; | ||
| 233 | |||
| 234 | if (size < op.element_size) { | ||
| 235 | /* precision convert case: lxsspx, etc */ | ||
| 236 | vcpu->arch.mmio_vsx_copy_nums = 1; | ||
| 237 | io_size_each = size; | ||
| 238 | } else { /* lxvw4x, lxvd2x, etc */ | ||
| 239 | vcpu->arch.mmio_vsx_copy_nums = | ||
| 240 | size/op.element_size; | ||
| 241 | io_size_each = op.element_size; | ||
| 242 | } | ||
| 359 | 243 | ||
| 360 | case OP_31_XOP_LXSIWAX: | ||
| 361 | if (kvmppc_check_vsx_disabled(vcpu)) | ||
| 362 | return EMULATE_DONE; | ||
| 363 | vcpu->arch.mmio_vsx_copy_nums = 1; | ||
| 364 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | ||
| 365 | emulated = kvmppc_handle_vsx_load(run, vcpu, | 244 | emulated = kvmppc_handle_vsx_load(run, vcpu, |
| 366 | KVM_MMIO_REG_VSX|rt, 4, 1, 1); | 245 | KVM_MMIO_REG_VSX | (op.reg & 0x1f), |
| 367 | break; | 246 | io_size_each, 1, op.type & SIGNEXT); |
| 368 | |||
| 369 | case OP_31_XOP_LXSIWZX: | ||
| 370 | if (kvmppc_check_vsx_disabled(vcpu)) | ||
| 371 | return EMULATE_DONE; | ||
| 372 | vcpu->arch.mmio_vsx_copy_nums = 1; | ||
| 373 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | ||
| 374 | emulated = kvmppc_handle_vsx_load(run, vcpu, | ||
| 375 | KVM_MMIO_REG_VSX|rt, 4, 1, 0); | ||
| 376 | break; | 247 | break; |
| 248 | } | ||
| 249 | #endif | ||
| 250 | case STORE: | ||
| 251 | /* if need byte reverse, op.val has been reversed by | ||
| 252 | * analyse_instr(). | ||
| 253 | */ | ||
| 254 | emulated = kvmppc_handle_store(run, vcpu, op.val, | ||
| 255 | size, 1); | ||
| 377 | 256 | ||
| 378 | case OP_31_XOP_LXVD2X: | 257 | if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) |
| 379 | /* | 258 | kvmppc_set_gpr(vcpu, op.update_reg, op.ea); |
| 380 | * In this case, the official load/store process is like this: | ||
| 381 | * Step1, exit from vm by page fault isr, then kvm save vsr. | ||
| 382 | * Please see guest_exit_cont->store_fp_state->SAVE_32VSRS | ||
| 383 | * as reference. | ||
| 384 | * | ||
| 385 | * Step2, copy data between memory and VCPU | ||
| 386 | * Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use | ||
| 387 | * 2copies*8bytes or 4copies*4bytes | ||
| 388 | * to simulate one copy of 16bytes. | ||
| 389 | * Also there is an endian issue here, we should notice the | ||
| 390 | * layout of memory. | ||
| 391 | * Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference. | ||
| 392 | * If host is little-endian, kvm will call XXSWAPD for | ||
| 393 | * LXVD2X_ROT/STXVD2X_ROT. | ||
| 394 | * So, if host is little-endian, | ||
| 395 | * the postion of memeory should be swapped. | ||
| 396 | * | ||
| 397 | * Step3, return to guest, kvm reset register. | ||
| 398 | * Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS | ||
| 399 | * as reference. | ||
| 400 | */ | ||
| 401 | if (kvmppc_check_vsx_disabled(vcpu)) | ||
| 402 | return EMULATE_DONE; | ||
| 403 | vcpu->arch.mmio_vsx_copy_nums = 2; | ||
| 404 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | ||
| 405 | emulated = kvmppc_handle_vsx_load(run, vcpu, | ||
| 406 | KVM_MMIO_REG_VSX|rt, 8, 1, 0); | ||
| 407 | break; | ||
| 408 | 259 | ||
| 409 | case OP_31_XOP_LXVW4X: | ||
| 410 | if (kvmppc_check_vsx_disabled(vcpu)) | ||
| 411 | return EMULATE_DONE; | ||
| 412 | vcpu->arch.mmio_vsx_copy_nums = 4; | ||
| 413 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; | ||
| 414 | emulated = kvmppc_handle_vsx_load(run, vcpu, | ||
| 415 | KVM_MMIO_REG_VSX|rt, 4, 1, 0); | ||
| 416 | break; | 260 | break; |
| 417 | 261 | #ifdef CONFIG_PPC_FPU | |
| 418 | case OP_31_XOP_LXVDSX: | 262 | case STORE_FP: |
| 419 | if (kvmppc_check_vsx_disabled(vcpu)) | 263 | if (kvmppc_check_fp_disabled(vcpu)) |
| 420 | return EMULATE_DONE; | 264 | return EMULATE_DONE; |
| 421 | vcpu->arch.mmio_vsx_copy_nums = 1; | ||
| 422 | vcpu->arch.mmio_vsx_copy_type = | ||
| 423 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP; | ||
| 424 | emulated = kvmppc_handle_vsx_load(run, vcpu, | ||
| 425 | KVM_MMIO_REG_VSX|rt, 8, 1, 0); | ||
| 426 | break; | ||
| 427 | 265 | ||
| 428 | case OP_31_XOP_STXSDX: | 266 | /* The FP registers need to be flushed so that |
| 429 | if (kvmppc_check_vsx_disabled(vcpu)) | 267 | * kvmppc_handle_store() can read actual FP vals |
| 430 | return EMULATE_DONE; | 268 | * from vcpu->arch. |
| 431 | vcpu->arch.mmio_vsx_copy_nums = 1; | 269 | */ |
| 432 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | 270 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
| 433 | emulated = kvmppc_handle_vsx_store(run, vcpu, | 271 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, |
| 434 | rs, 8, 1); | 272 | MSR_FP); |
| 435 | break; | ||
| 436 | 273 | ||
| 437 | case OP_31_XOP_STXSSPX: | 274 | if (op.type & FPCONV) |
| 438 | if (kvmppc_check_vsx_disabled(vcpu)) | 275 | vcpu->arch.mmio_sp64_extend = 1; |
| 439 | return EMULATE_DONE; | ||
| 440 | vcpu->arch.mmio_vsx_copy_nums = 1; | ||
| 441 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | ||
| 442 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 443 | emulated = kvmppc_handle_vsx_store(run, vcpu, | ||
| 444 | rs, 4, 1); | ||
| 445 | break; | ||
| 446 | 276 | ||
| 447 | case OP_31_XOP_STXSIWX: | 277 | emulated = kvmppc_handle_store(run, vcpu, |
| 448 | if (kvmppc_check_vsx_disabled(vcpu)) | 278 | VCPU_FPR(vcpu, op.reg), size, 1); |
| 449 | return EMULATE_DONE; | ||
| 450 | vcpu->arch.mmio_vsx_offset = 1; | ||
| 451 | vcpu->arch.mmio_vsx_copy_nums = 1; | ||
| 452 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; | ||
| 453 | emulated = kvmppc_handle_vsx_store(run, vcpu, | ||
| 454 | rs, 4, 1); | ||
| 455 | break; | ||
| 456 | 279 | ||
| 457 | case OP_31_XOP_STXVD2X: | 280 | if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) |
| 458 | if (kvmppc_check_vsx_disabled(vcpu)) | 281 | kvmppc_set_gpr(vcpu, op.update_reg, op.ea); |
| 459 | return EMULATE_DONE; | ||
| 460 | vcpu->arch.mmio_vsx_copy_nums = 2; | ||
| 461 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | ||
| 462 | emulated = kvmppc_handle_vsx_store(run, vcpu, | ||
| 463 | rs, 8, 1); | ||
| 464 | break; | ||
| 465 | 282 | ||
| 466 | case OP_31_XOP_STXVW4X: | ||
| 467 | if (kvmppc_check_vsx_disabled(vcpu)) | ||
| 468 | return EMULATE_DONE; | ||
| 469 | vcpu->arch.mmio_vsx_copy_nums = 4; | ||
| 470 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; | ||
| 471 | emulated = kvmppc_handle_vsx_store(run, vcpu, | ||
| 472 | rs, 4, 1); | ||
| 473 | break; | 283 | break; |
| 474 | #endif /* CONFIG_VSX */ | 284 | #endif |
| 475 | |||
| 476 | #ifdef CONFIG_ALTIVEC | 285 | #ifdef CONFIG_ALTIVEC |
| 477 | case OP_31_XOP_LVX: | 286 | case STORE_VMX: |
| 478 | if (kvmppc_check_altivec_disabled(vcpu)) | 287 | if (kvmppc_check_altivec_disabled(vcpu)) |
| 479 | return EMULATE_DONE; | 288 | return EMULATE_DONE; |
| 480 | vcpu->arch.vaddr_accessed &= ~0xFULL; | ||
| 481 | vcpu->arch.paddr_accessed &= ~0xFULL; | ||
| 482 | vcpu->arch.mmio_vmx_copy_nums = 2; | ||
| 483 | emulated = kvmppc_handle_load128_by2x64(run, vcpu, | ||
| 484 | KVM_MMIO_REG_VMX|rt, 1); | ||
| 485 | break; | ||
| 486 | 289 | ||
| 487 | case OP_31_XOP_STVX: | 290 | /* Hardware enforces alignment of VMX accesses. */ |
| 488 | if (kvmppc_check_altivec_disabled(vcpu)) | 291 | vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1); |
| 489 | return EMULATE_DONE; | 292 | vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1); |
| 490 | vcpu->arch.vaddr_accessed &= ~0xFULL; | 293 | |
| 491 | vcpu->arch.paddr_accessed &= ~0xFULL; | 294 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
| 492 | vcpu->arch.mmio_vmx_copy_nums = 2; | 295 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, |
| 493 | emulated = kvmppc_handle_store128_by2x64(run, vcpu, | 296 | MSR_VEC); |
| 494 | rs, 1); | 297 | if (size == 16) { /* stvx */ |
| 495 | break; | 298 | vcpu->arch.mmio_copy_type = |
| 496 | #endif /* CONFIG_ALTIVEC */ | 299 | KVMPPC_VMX_COPY_DWORD; |
| 300 | } else if (size == 4) { /* stvewx */ | ||
| 301 | vcpu->arch.mmio_copy_type = | ||
| 302 | KVMPPC_VMX_COPY_WORD; | ||
| 303 | } else if (size == 2) { /* stvehx */ | ||
| 304 | vcpu->arch.mmio_copy_type = | ||
| 305 | KVMPPC_VMX_COPY_HWORD; | ||
| 306 | } else if (size == 1) { /* stvebx */ | ||
| 307 | vcpu->arch.mmio_copy_type = | ||
| 308 | KVMPPC_VMX_COPY_BYTE; | ||
| 309 | } else | ||
| 310 | break; | ||
| 311 | |||
| 312 | vcpu->arch.mmio_vmx_offset = | ||
| 313 | (vcpu->arch.vaddr_accessed & 0xf)/size; | ||
| 314 | |||
| 315 | if (size == 16) { | ||
| 316 | vcpu->arch.mmio_vmx_copy_nums = 2; | ||
| 317 | emulated = kvmppc_handle_vmx_store(run, | ||
| 318 | vcpu, op.reg, 8, 1); | ||
| 319 | } else { | ||
| 320 | vcpu->arch.mmio_vmx_copy_nums = 1; | ||
| 321 | emulated = kvmppc_handle_vmx_store(run, | ||
| 322 | vcpu, op.reg, size, 1); | ||
| 323 | } | ||
| 497 | 324 | ||
| 498 | default: | ||
| 499 | emulated = EMULATE_FAIL; | ||
| 500 | break; | 325 | break; |
| 501 | } | ||
| 502 | break; | ||
| 503 | |||
| 504 | case OP_LWZ: | ||
| 505 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
| 506 | break; | ||
| 507 | |||
| 508 | #ifdef CONFIG_PPC_FPU | ||
| 509 | case OP_STFS: | ||
| 510 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 511 | return EMULATE_DONE; | ||
| 512 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 513 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 514 | VCPU_FPR(vcpu, rs), | ||
| 515 | 4, 1); | ||
| 516 | break; | ||
| 517 | |||
| 518 | case OP_STFSU: | ||
| 519 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 520 | return EMULATE_DONE; | ||
| 521 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 522 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 523 | VCPU_FPR(vcpu, rs), | ||
| 524 | 4, 1); | ||
| 525 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 526 | break; | ||
| 527 | |||
| 528 | case OP_STFD: | ||
| 529 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 530 | return EMULATE_DONE; | ||
| 531 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 532 | VCPU_FPR(vcpu, rs), | ||
| 533 | 8, 1); | ||
| 534 | break; | ||
| 535 | |||
| 536 | case OP_STFDU: | ||
| 537 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 538 | return EMULATE_DONE; | ||
| 539 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 540 | VCPU_FPR(vcpu, rs), | ||
| 541 | 8, 1); | ||
| 542 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 543 | break; | ||
| 544 | #endif | 326 | #endif |
| 327 | #ifdef CONFIG_VSX | ||
| 328 | case STORE_VSX: { | ||
| 329 | int io_size_each; | ||
| 330 | |||
| 331 | if (op.vsx_flags & VSX_CHECK_VEC) { | ||
| 332 | if (kvmppc_check_altivec_disabled(vcpu)) | ||
| 333 | return EMULATE_DONE; | ||
| 334 | } else { | ||
| 335 | if (kvmppc_check_vsx_disabled(vcpu)) | ||
| 336 | return EMULATE_DONE; | ||
| 337 | } | ||
| 338 | |||
| 339 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) | ||
| 340 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, | ||
| 341 | MSR_VSX); | ||
| 342 | |||
| 343 | if (op.vsx_flags & VSX_FPCONV) | ||
| 344 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 345 | |||
| 346 | if (op.element_size == 8) | ||
| 347 | vcpu->arch.mmio_copy_type = | ||
| 348 | KVMPPC_VSX_COPY_DWORD; | ||
| 349 | else if (op.element_size == 4) | ||
| 350 | vcpu->arch.mmio_copy_type = | ||
| 351 | KVMPPC_VSX_COPY_WORD; | ||
| 352 | else | ||
| 353 | break; | ||
| 354 | |||
| 355 | if (size < op.element_size) { | ||
| 356 | /* precise conversion case, like stxsspx */ | ||
| 357 | vcpu->arch.mmio_vsx_copy_nums = 1; | ||
| 358 | io_size_each = size; | ||
| 359 | } else { /* stxvw4x, stxvd2x, etc */ | ||
| 360 | vcpu->arch.mmio_vsx_copy_nums = | ||
| 361 | size/op.element_size; | ||
| 362 | io_size_each = op.element_size; | ||
| 363 | } | ||
| 545 | 364 | ||
| 546 | case OP_LD: | 365 | emulated = kvmppc_handle_vsx_store(run, vcpu, |
| 547 | rt = get_rt(inst); | 366 | op.reg & 0x1f, io_size_each, 1); |
| 548 | switch (inst & 3) { | ||
| 549 | case 0: /* ld */ | ||
| 550 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | ||
| 551 | break; | ||
| 552 | case 1: /* ldu */ | ||
| 553 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | ||
| 554 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 555 | break; | ||
| 556 | case 2: /* lwa */ | ||
| 557 | emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); | ||
| 558 | break; | 367 | break; |
| 559 | default: | ||
| 560 | emulated = EMULATE_FAIL; | ||
| 561 | } | 368 | } |
| 562 | break; | 369 | #endif |
| 563 | 370 | case CACHEOP: | |
| 564 | case OP_LWZU: | 371 | /* Do nothing. The guest is performing dcbi because |
| 565 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 372 | * hardware DMA is not snooped by the dcache, but |
| 566 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 373 | * emulated DMA either goes through the dcache as |
| 567 | break; | 374 | * normal writes, or the host kernel has handled dcache |
| 568 | 375 | * coherence. | |
| 569 | case OP_LBZ: | 376 | */ |
| 570 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 377 | emulated = EMULATE_DONE; |
| 571 | break; | ||
| 572 | |||
| 573 | case OP_LBZU: | ||
| 574 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
| 575 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 576 | break; | ||
| 577 | |||
| 578 | case OP_STW: | ||
| 579 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 580 | kvmppc_get_gpr(vcpu, rs), | ||
| 581 | 4, 1); | ||
| 582 | break; | ||
| 583 | |||
| 584 | case OP_STD: | ||
| 585 | rs = get_rs(inst); | ||
| 586 | switch (inst & 3) { | ||
| 587 | case 0: /* std */ | ||
| 588 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 589 | kvmppc_get_gpr(vcpu, rs), 8, 1); | ||
| 590 | break; | ||
| 591 | case 1: /* stdu */ | ||
| 592 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 593 | kvmppc_get_gpr(vcpu, rs), 8, 1); | ||
| 594 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 595 | break; | 378 | break; |
| 596 | default: | 379 | default: |
| 597 | emulated = EMULATE_FAIL; | 380 | break; |
| 598 | } | 381 | } |
| 599 | break; | ||
| 600 | |||
| 601 | case OP_STWU: | ||
| 602 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 603 | kvmppc_get_gpr(vcpu, rs), 4, 1); | ||
| 604 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 605 | break; | ||
| 606 | |||
| 607 | case OP_STB: | ||
| 608 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 609 | kvmppc_get_gpr(vcpu, rs), 1, 1); | ||
| 610 | break; | ||
| 611 | |||
| 612 | case OP_STBU: | ||
| 613 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 614 | kvmppc_get_gpr(vcpu, rs), 1, 1); | ||
| 615 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 616 | break; | ||
| 617 | |||
| 618 | case OP_LHZ: | ||
| 619 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
| 620 | break; | ||
| 621 | |||
| 622 | case OP_LHZU: | ||
| 623 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
| 624 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 625 | break; | ||
| 626 | |||
| 627 | case OP_LHA: | ||
| 628 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
| 629 | break; | ||
| 630 | |||
| 631 | case OP_LHAU: | ||
| 632 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
| 633 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 634 | break; | ||
| 635 | |||
| 636 | case OP_STH: | ||
| 637 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 638 | kvmppc_get_gpr(vcpu, rs), 2, 1); | ||
| 639 | break; | ||
| 640 | |||
| 641 | case OP_STHU: | ||
| 642 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 643 | kvmppc_get_gpr(vcpu, rs), 2, 1); | ||
| 644 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 645 | break; | ||
| 646 | |||
| 647 | #ifdef CONFIG_PPC_FPU | ||
| 648 | case OP_LFS: | ||
| 649 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 650 | return EMULATE_DONE; | ||
| 651 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 652 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 653 | KVM_MMIO_REG_FPR|rt, 4, 1); | ||
| 654 | break; | ||
| 655 | |||
| 656 | case OP_LFSU: | ||
| 657 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 658 | return EMULATE_DONE; | ||
| 659 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 660 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 661 | KVM_MMIO_REG_FPR|rt, 4, 1); | ||
| 662 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 663 | break; | ||
| 664 | |||
| 665 | case OP_LFD: | ||
| 666 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 667 | return EMULATE_DONE; | ||
| 668 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 669 | KVM_MMIO_REG_FPR|rt, 8, 1); | ||
| 670 | break; | ||
| 671 | |||
| 672 | case OP_LFDU: | ||
| 673 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 674 | return EMULATE_DONE; | ||
| 675 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 676 | KVM_MMIO_REG_FPR|rt, 8, 1); | ||
| 677 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 678 | break; | ||
| 679 | #endif | ||
| 680 | |||
| 681 | default: | ||
| 682 | emulated = EMULATE_FAIL; | ||
| 683 | break; | ||
| 684 | } | 382 | } |
| 685 | 383 | ||
| 686 | if (emulated == EMULATE_FAIL) { | 384 | if (emulated == EMULATE_FAIL) { |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 3764d000872e..0e8c20c5eaac 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
| @@ -648,9 +648,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
| 648 | #endif | 648 | #endif |
| 649 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 649 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 650 | case KVM_CAP_PPC_HTM: | 650 | case KVM_CAP_PPC_HTM: |
| 651 | r = hv_enabled && | 651 | r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || |
| 652 | (!!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || | 652 | (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); |
| 653 | cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); | ||
| 654 | break; | 653 | break; |
| 655 | #endif | 654 | #endif |
| 656 | default: | 655 | default: |
| @@ -907,6 +906,26 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, | |||
| 907 | } | 906 | } |
| 908 | } | 907 | } |
| 909 | 908 | ||
| 909 | static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, | ||
| 910 | u32 gpr) | ||
| 911 | { | ||
| 912 | union kvmppc_one_reg val; | ||
| 913 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | ||
| 914 | |||
| 915 | if (vcpu->arch.mmio_vsx_tx_sx_enabled) { | ||
| 916 | val.vsx32val[0] = gpr; | ||
| 917 | val.vsx32val[1] = gpr; | ||
| 918 | val.vsx32val[2] = gpr; | ||
| 919 | val.vsx32val[3] = gpr; | ||
| 920 | VCPU_VSX_VR(vcpu, index) = val.vval; | ||
| 921 | } else { | ||
| 922 | val.vsx32val[0] = gpr; | ||
| 923 | val.vsx32val[1] = gpr; | ||
| 924 | VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; | ||
| 925 | VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; | ||
| 926 | } | ||
| 927 | } | ||
| 928 | |||
| 910 | static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, | 929 | static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, |
| 911 | u32 gpr32) | 930 | u32 gpr32) |
| 912 | { | 931 | { |
| @@ -933,30 +952,110 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, | |||
| 933 | #endif /* CONFIG_VSX */ | 952 | #endif /* CONFIG_VSX */ |
| 934 | 953 | ||
| 935 | #ifdef CONFIG_ALTIVEC | 954 | #ifdef CONFIG_ALTIVEC |
| 955 | static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, | ||
| 956 | int index, int element_size) | ||
| 957 | { | ||
| 958 | int offset; | ||
| 959 | int elts = sizeof(vector128)/element_size; | ||
| 960 | |||
| 961 | if ((index < 0) || (index >= elts)) | ||
| 962 | return -1; | ||
| 963 | |||
| 964 | if (kvmppc_need_byteswap(vcpu)) | ||
| 965 | offset = elts - index - 1; | ||
| 966 | else | ||
| 967 | offset = index; | ||
| 968 | |||
| 969 | return offset; | ||
| 970 | } | ||
| 971 | |||
| 972 | static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, | ||
| 973 | int index) | ||
| 974 | { | ||
| 975 | return kvmppc_get_vmx_offset_generic(vcpu, index, 8); | ||
| 976 | } | ||
| 977 | |||
| 978 | static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, | ||
| 979 | int index) | ||
| 980 | { | ||
| 981 | return kvmppc_get_vmx_offset_generic(vcpu, index, 4); | ||
| 982 | } | ||
| 983 | |||
| 984 | static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, | ||
| 985 | int index) | ||
| 986 | { | ||
| 987 | return kvmppc_get_vmx_offset_generic(vcpu, index, 2); | ||
| 988 | } | ||
| 989 | |||
| 990 | static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, | ||
| 991 | int index) | ||
| 992 | { | ||
| 993 | return kvmppc_get_vmx_offset_generic(vcpu, index, 1); | ||
| 994 | } | ||
| 995 | |||
| 996 | |||
| 936 | static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, | 997 | static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, |
| 937 | u64 gpr) | 998 | u64 gpr) |
| 938 | { | 999 | { |
| 1000 | union kvmppc_one_reg val; | ||
| 1001 | int offset = kvmppc_get_vmx_dword_offset(vcpu, | ||
| 1002 | vcpu->arch.mmio_vmx_offset); | ||
| 939 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | 1003 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 940 | u32 hi, lo; | ||
| 941 | u32 di; | ||
| 942 | 1004 | ||
| 943 | #ifdef __BIG_ENDIAN | 1005 | if (offset == -1) |
| 944 | hi = gpr >> 32; | 1006 | return; |
| 945 | lo = gpr & 0xffffffff; | 1007 | |
| 946 | #else | 1008 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 947 | lo = gpr >> 32; | 1009 | val.vsxval[offset] = gpr; |
| 948 | hi = gpr & 0xffffffff; | 1010 | VCPU_VSX_VR(vcpu, index) = val.vval; |
| 949 | #endif | 1011 | } |
| 1012 | |||
| 1013 | static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, | ||
| 1014 | u32 gpr32) | ||
| 1015 | { | ||
| 1016 | union kvmppc_one_reg val; | ||
| 1017 | int offset = kvmppc_get_vmx_word_offset(vcpu, | ||
| 1018 | vcpu->arch.mmio_vmx_offset); | ||
| 1019 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | ||
| 950 | 1020 | ||
| 951 | di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ | 1021 | if (offset == -1) |
| 952 | if (di > 1) | ||
| 953 | return; | 1022 | return; |
| 954 | 1023 | ||
| 955 | if (vcpu->arch.mmio_host_swabbed) | 1024 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 956 | di = 1 - di; | 1025 | val.vsx32val[offset] = gpr32; |
| 1026 | VCPU_VSX_VR(vcpu, index) = val.vval; | ||
| 1027 | } | ||
| 1028 | |||
| 1029 | static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, | ||
| 1030 | u16 gpr16) | ||
| 1031 | { | ||
| 1032 | union kvmppc_one_reg val; | ||
| 1033 | int offset = kvmppc_get_vmx_hword_offset(vcpu, | ||
| 1034 | vcpu->arch.mmio_vmx_offset); | ||
| 1035 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | ||
| 1036 | |||
| 1037 | if (offset == -1) | ||
| 1038 | return; | ||
| 1039 | |||
| 1040 | val.vval = VCPU_VSX_VR(vcpu, index); | ||
| 1041 | val.vsx16val[offset] = gpr16; | ||
| 1042 | VCPU_VSX_VR(vcpu, index) = val.vval; | ||
| 1043 | } | ||
| 1044 | |||
| 1045 | static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, | ||
| 1046 | u8 gpr8) | ||
| 1047 | { | ||
| 1048 | union kvmppc_one_reg val; | ||
| 1049 | int offset = kvmppc_get_vmx_byte_offset(vcpu, | ||
| 1050 | vcpu->arch.mmio_vmx_offset); | ||
| 1051 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | ||
| 1052 | |||
| 1053 | if (offset == -1) | ||
| 1054 | return; | ||
| 957 | 1055 | ||
| 958 | VCPU_VSX_VR(vcpu, index).u[di * 2] = hi; | 1056 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 959 | VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo; | 1057 | val.vsx8val[offset] = gpr8; |
| 1058 | VCPU_VSX_VR(vcpu, index) = val.vval; | ||
| 960 | } | 1059 | } |
| 961 | #endif /* CONFIG_ALTIVEC */ | 1060 | #endif /* CONFIG_ALTIVEC */ |
| 962 | 1061 | ||
| @@ -1041,6 +1140,9 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | |||
| 1041 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); | 1140 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
| 1042 | break; | 1141 | break; |
| 1043 | case KVM_MMIO_REG_FPR: | 1142 | case KVM_MMIO_REG_FPR: |
| 1143 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) | ||
| 1144 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); | ||
| 1145 | |||
| 1044 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; | 1146 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
| 1045 | break; | 1147 | break; |
| 1046 | #ifdef CONFIG_PPC_BOOK3S | 1148 | #ifdef CONFIG_PPC_BOOK3S |
| @@ -1054,18 +1156,36 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | |||
| 1054 | #endif | 1156 | #endif |
| 1055 | #ifdef CONFIG_VSX | 1157 | #ifdef CONFIG_VSX |
| 1056 | case KVM_MMIO_REG_VSX: | 1158 | case KVM_MMIO_REG_VSX: |
| 1057 | if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD) | 1159 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
| 1160 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); | ||
| 1161 | |||
| 1162 | if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) | ||
| 1058 | kvmppc_set_vsr_dword(vcpu, gpr); | 1163 | kvmppc_set_vsr_dword(vcpu, gpr); |
| 1059 | else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD) | 1164 | else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) |
| 1060 | kvmppc_set_vsr_word(vcpu, gpr); | 1165 | kvmppc_set_vsr_word(vcpu, gpr); |
| 1061 | else if (vcpu->arch.mmio_vsx_copy_type == | 1166 | else if (vcpu->arch.mmio_copy_type == |
| 1062 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) | 1167 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) |
| 1063 | kvmppc_set_vsr_dword_dump(vcpu, gpr); | 1168 | kvmppc_set_vsr_dword_dump(vcpu, gpr); |
| 1169 | else if (vcpu->arch.mmio_copy_type == | ||
| 1170 | KVMPPC_VSX_COPY_WORD_LOAD_DUMP) | ||
| 1171 | kvmppc_set_vsr_word_dump(vcpu, gpr); | ||
| 1064 | break; | 1172 | break; |
| 1065 | #endif | 1173 | #endif |
| 1066 | #ifdef CONFIG_ALTIVEC | 1174 | #ifdef CONFIG_ALTIVEC |
| 1067 | case KVM_MMIO_REG_VMX: | 1175 | case KVM_MMIO_REG_VMX: |
| 1068 | kvmppc_set_vmx_dword(vcpu, gpr); | 1176 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
| 1177 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); | ||
| 1178 | |||
| 1179 | if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) | ||
| 1180 | kvmppc_set_vmx_dword(vcpu, gpr); | ||
| 1181 | else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) | ||
| 1182 | kvmppc_set_vmx_word(vcpu, gpr); | ||
| 1183 | else if (vcpu->arch.mmio_copy_type == | ||
| 1184 | KVMPPC_VMX_COPY_HWORD) | ||
| 1185 | kvmppc_set_vmx_hword(vcpu, gpr); | ||
| 1186 | else if (vcpu->arch.mmio_copy_type == | ||
| 1187 | KVMPPC_VMX_COPY_BYTE) | ||
| 1188 | kvmppc_set_vmx_byte(vcpu, gpr); | ||
| 1069 | break; | 1189 | break; |
| 1070 | #endif | 1190 | #endif |
| 1071 | default: | 1191 | default: |
| @@ -1228,7 +1348,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) | |||
| 1228 | u32 dword_offset, word_offset; | 1348 | u32 dword_offset, word_offset; |
| 1229 | union kvmppc_one_reg reg; | 1349 | union kvmppc_one_reg reg; |
| 1230 | int vsx_offset = 0; | 1350 | int vsx_offset = 0; |
| 1231 | int copy_type = vcpu->arch.mmio_vsx_copy_type; | 1351 | int copy_type = vcpu->arch.mmio_copy_type; |
| 1232 | int result = 0; | 1352 | int result = 0; |
| 1233 | 1353 | ||
| 1234 | switch (copy_type) { | 1354 | switch (copy_type) { |
| @@ -1344,14 +1464,16 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, | |||
| 1344 | #endif /* CONFIG_VSX */ | 1464 | #endif /* CONFIG_VSX */ |
| 1345 | 1465 | ||
| 1346 | #ifdef CONFIG_ALTIVEC | 1466 | #ifdef CONFIG_ALTIVEC |
| 1347 | /* handle quadword load access in two halves */ | 1467 | int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1348 | int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, | 1468 | unsigned int rt, unsigned int bytes, int is_default_endian) |
| 1349 | unsigned int rt, int is_default_endian) | ||
| 1350 | { | 1469 | { |
| 1351 | enum emulation_result emulated = EMULATE_DONE; | 1470 | enum emulation_result emulated = EMULATE_DONE; |
| 1352 | 1471 | ||
| 1472 | if (vcpu->arch.mmio_vsx_copy_nums > 2) | ||
| 1473 | return EMULATE_FAIL; | ||
| 1474 | |||
| 1353 | while (vcpu->arch.mmio_vmx_copy_nums) { | 1475 | while (vcpu->arch.mmio_vmx_copy_nums) { |
| 1354 | emulated = __kvmppc_handle_load(run, vcpu, rt, 8, | 1476 | emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, |
| 1355 | is_default_endian, 0); | 1477 | is_default_endian, 0); |
| 1356 | 1478 | ||
| 1357 | if (emulated != EMULATE_DONE) | 1479 | if (emulated != EMULATE_DONE) |
| @@ -1359,55 +1481,127 @@ int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 1359 | 1481 | ||
| 1360 | vcpu->arch.paddr_accessed += run->mmio.len; | 1482 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1361 | vcpu->arch.mmio_vmx_copy_nums--; | 1483 | vcpu->arch.mmio_vmx_copy_nums--; |
| 1484 | vcpu->arch.mmio_vmx_offset++; | ||
| 1362 | } | 1485 | } |
| 1363 | 1486 | ||
| 1364 | return emulated; | 1487 | return emulated; |
| 1365 | } | 1488 | } |
| 1366 | 1489 | ||
| 1367 | static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val) | 1490 | int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) |
| 1368 | { | 1491 | { |
| 1369 | vector128 vrs = VCPU_VSX_VR(vcpu, rs); | 1492 | union kvmppc_one_reg reg; |
| 1370 | u32 di; | 1493 | int vmx_offset = 0; |
| 1371 | u64 w0, w1; | 1494 | int result = 0; |
| 1372 | 1495 | ||
| 1373 | di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ | 1496 | vmx_offset = |
| 1374 | if (di > 1) | 1497 | kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); |
| 1498 | |||
| 1499 | if (vmx_offset == -1) | ||
| 1375 | return -1; | 1500 | return -1; |
| 1376 | 1501 | ||
| 1377 | if (vcpu->arch.mmio_host_swabbed) | 1502 | reg.vval = VCPU_VSX_VR(vcpu, index); |
| 1378 | di = 1 - di; | 1503 | *val = reg.vsxval[vmx_offset]; |
| 1379 | 1504 | ||
| 1380 | w0 = vrs.u[di * 2]; | 1505 | return result; |
| 1381 | w1 = vrs.u[di * 2 + 1]; | 1506 | } |
| 1382 | 1507 | ||
| 1383 | #ifdef __BIG_ENDIAN | 1508 | int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) |
| 1384 | *val = (w0 << 32) | w1; | 1509 | { |
| 1385 | #else | 1510 | union kvmppc_one_reg reg; |
| 1386 | *val = (w1 << 32) | w0; | 1511 | int vmx_offset = 0; |
| 1387 | #endif | 1512 | int result = 0; |
| 1388 | return 0; | 1513 | |
| 1514 | vmx_offset = | ||
| 1515 | kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); | ||
| 1516 | |||
| 1517 | if (vmx_offset == -1) | ||
| 1518 | return -1; | ||
| 1519 | |||
| 1520 | reg.vval = VCPU_VSX_VR(vcpu, index); | ||
| 1521 | *val = reg.vsx32val[vmx_offset]; | ||
| 1522 | |||
| 1523 | return result; | ||
| 1524 | } | ||
| 1525 | |||
| 1526 | int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) | ||
| 1527 | { | ||
| 1528 | union kvmppc_one_reg reg; | ||
| 1529 | int vmx_offset = 0; | ||
| 1530 | int result = 0; | ||
| 1531 | |||
| 1532 | vmx_offset = | ||
| 1533 | kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); | ||
| 1534 | |||
| 1535 | if (vmx_offset == -1) | ||
| 1536 | return -1; | ||
| 1537 | |||
| 1538 | reg.vval = VCPU_VSX_VR(vcpu, index); | ||
| 1539 | *val = reg.vsx16val[vmx_offset]; | ||
| 1540 | |||
| 1541 | return result; | ||
| 1389 | } | 1542 | } |
| 1390 | 1543 | ||
| 1391 | /* handle quadword store in two halves */ | 1544 | int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) |
| 1392 | int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, | 1545 | { |
| 1393 | unsigned int rs, int is_default_endian) | 1546 | union kvmppc_one_reg reg; |
| 1547 | int vmx_offset = 0; | ||
| 1548 | int result = 0; | ||
| 1549 | |||
| 1550 | vmx_offset = | ||
| 1551 | kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); | ||
| 1552 | |||
| 1553 | if (vmx_offset == -1) | ||
| 1554 | return -1; | ||
| 1555 | |||
| 1556 | reg.vval = VCPU_VSX_VR(vcpu, index); | ||
| 1557 | *val = reg.vsx8val[vmx_offset]; | ||
| 1558 | |||
| 1559 | return result; | ||
| 1560 | } | ||
| 1561 | |||
| 1562 | int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
| 1563 | unsigned int rs, unsigned int bytes, int is_default_endian) | ||
| 1394 | { | 1564 | { |
| 1395 | u64 val = 0; | 1565 | u64 val = 0; |
| 1566 | unsigned int index = rs & KVM_MMIO_REG_MASK; | ||
| 1396 | enum emulation_result emulated = EMULATE_DONE; | 1567 | enum emulation_result emulated = EMULATE_DONE; |
| 1397 | 1568 | ||
| 1569 | if (vcpu->arch.mmio_vsx_copy_nums > 2) | ||
| 1570 | return EMULATE_FAIL; | ||
| 1571 | |||
| 1398 | vcpu->arch.io_gpr = rs; | 1572 | vcpu->arch.io_gpr = rs; |
| 1399 | 1573 | ||
| 1400 | while (vcpu->arch.mmio_vmx_copy_nums) { | 1574 | while (vcpu->arch.mmio_vmx_copy_nums) { |
| 1401 | if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1) | 1575 | switch (vcpu->arch.mmio_copy_type) { |
| 1576 | case KVMPPC_VMX_COPY_DWORD: | ||
| 1577 | if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) | ||
| 1578 | return EMULATE_FAIL; | ||
| 1579 | |||
| 1580 | break; | ||
| 1581 | case KVMPPC_VMX_COPY_WORD: | ||
| 1582 | if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) | ||
| 1583 | return EMULATE_FAIL; | ||
| 1584 | break; | ||
| 1585 | case KVMPPC_VMX_COPY_HWORD: | ||
| 1586 | if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) | ||
| 1587 | return EMULATE_FAIL; | ||
| 1588 | break; | ||
| 1589 | case KVMPPC_VMX_COPY_BYTE: | ||
| 1590 | if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) | ||
| 1591 | return EMULATE_FAIL; | ||
| 1592 | break; | ||
| 1593 | default: | ||
| 1402 | return EMULATE_FAIL; | 1594 | return EMULATE_FAIL; |
| 1595 | } | ||
| 1403 | 1596 | ||
| 1404 | emulated = kvmppc_handle_store(run, vcpu, val, 8, | 1597 | emulated = kvmppc_handle_store(run, vcpu, val, bytes, |
| 1405 | is_default_endian); | 1598 | is_default_endian); |
| 1406 | if (emulated != EMULATE_DONE) | 1599 | if (emulated != EMULATE_DONE) |
| 1407 | break; | 1600 | break; |
| 1408 | 1601 | ||
| 1409 | vcpu->arch.paddr_accessed += run->mmio.len; | 1602 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1410 | vcpu->arch.mmio_vmx_copy_nums--; | 1603 | vcpu->arch.mmio_vmx_copy_nums--; |
| 1604 | vcpu->arch.mmio_vmx_offset++; | ||
| 1411 | } | 1605 | } |
| 1412 | 1606 | ||
| 1413 | return emulated; | 1607 | return emulated; |
| @@ -1422,11 +1616,11 @@ static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, | |||
| 1422 | vcpu->arch.paddr_accessed += run->mmio.len; | 1616 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1423 | 1617 | ||
| 1424 | if (!vcpu->mmio_is_write) { | 1618 | if (!vcpu->mmio_is_write) { |
| 1425 | emulated = kvmppc_handle_load128_by2x64(run, vcpu, | 1619 | emulated = kvmppc_handle_vmx_load(run, vcpu, |
| 1426 | vcpu->arch.io_gpr, 1); | 1620 | vcpu->arch.io_gpr, run->mmio.len, 1); |
| 1427 | } else { | 1621 | } else { |
| 1428 | emulated = kvmppc_handle_store128_by2x64(run, vcpu, | 1622 | emulated = kvmppc_handle_vmx_store(run, vcpu, |
| 1429 | vcpu->arch.io_gpr, 1); | 1623 | vcpu->arch.io_gpr, run->mmio.len, 1); |
| 1430 | } | 1624 | } |
| 1431 | 1625 | ||
| 1432 | switch (emulated) { | 1626 | switch (emulated) { |
| @@ -1570,8 +1764,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
| 1570 | } | 1764 | } |
| 1571 | #endif | 1765 | #endif |
| 1572 | #ifdef CONFIG_ALTIVEC | 1766 | #ifdef CONFIG_ALTIVEC |
| 1573 | if (vcpu->arch.mmio_vmx_copy_nums > 0) | 1767 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { |
| 1574 | vcpu->arch.mmio_vmx_copy_nums--; | 1768 | vcpu->arch.mmio_vmx_copy_nums--; |
| 1769 | vcpu->arch.mmio_vmx_offset++; | ||
| 1770 | } | ||
| 1575 | 1771 | ||
| 1576 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { | 1772 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { |
| 1577 | r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); | 1773 | r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); |
| @@ -1784,16 +1980,16 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
| 1784 | void __user *argp = (void __user *)arg; | 1980 | void __user *argp = (void __user *)arg; |
| 1785 | long r; | 1981 | long r; |
| 1786 | 1982 | ||
| 1787 | vcpu_load(vcpu); | ||
| 1788 | |||
| 1789 | switch (ioctl) { | 1983 | switch (ioctl) { |
| 1790 | case KVM_ENABLE_CAP: | 1984 | case KVM_ENABLE_CAP: |
| 1791 | { | 1985 | { |
| 1792 | struct kvm_enable_cap cap; | 1986 | struct kvm_enable_cap cap; |
| 1793 | r = -EFAULT; | 1987 | r = -EFAULT; |
| 1988 | vcpu_load(vcpu); | ||
| 1794 | if (copy_from_user(&cap, argp, sizeof(cap))) | 1989 | if (copy_from_user(&cap, argp, sizeof(cap))) |
| 1795 | goto out; | 1990 | goto out; |
| 1796 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | 1991 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); |
| 1992 | vcpu_put(vcpu); | ||
| 1797 | break; | 1993 | break; |
| 1798 | } | 1994 | } |
| 1799 | 1995 | ||
| @@ -1815,9 +2011,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
| 1815 | case KVM_DIRTY_TLB: { | 2011 | case KVM_DIRTY_TLB: { |
| 1816 | struct kvm_dirty_tlb dirty; | 2012 | struct kvm_dirty_tlb dirty; |
| 1817 | r = -EFAULT; | 2013 | r = -EFAULT; |
| 2014 | vcpu_load(vcpu); | ||
| 1818 | if (copy_from_user(&dirty, argp, sizeof(dirty))) | 2015 | if (copy_from_user(&dirty, argp, sizeof(dirty))) |
| 1819 | goto out; | 2016 | goto out; |
| 1820 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); | 2017 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); |
| 2018 | vcpu_put(vcpu); | ||
| 1821 | break; | 2019 | break; |
| 1822 | } | 2020 | } |
| 1823 | #endif | 2021 | #endif |
| @@ -1826,7 +2024,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
| 1826 | } | 2024 | } |
| 1827 | 2025 | ||
| 1828 | out: | 2026 | out: |
| 1829 | vcpu_put(vcpu); | ||
| 1830 | return r; | 2027 | return r; |
| 1831 | } | 2028 | } |
| 1832 | 2029 | ||
diff --git a/arch/powerpc/kvm/tm.S b/arch/powerpc/kvm/tm.S new file mode 100644 index 000000000000..90e330f21356 --- /dev/null +++ b/arch/powerpc/kvm/tm.S | |||
| @@ -0,0 +1,384 @@ | |||
| 1 | /* | ||
| 2 | * This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License, version 2, as | ||
| 4 | * published by the Free Software Foundation. | ||
| 5 | * | ||
| 6 | * This program is distributed in the hope that it will be useful, | ||
| 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 9 | * GNU General Public License for more details. | ||
| 10 | * | ||
| 11 | * Derived from book3s_hv_rmhandlers.S, which is: | ||
| 12 | * | ||
| 13 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
| 14 | * | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <asm/reg.h> | ||
| 18 | #include <asm/ppc_asm.h> | ||
| 19 | #include <asm/asm-offsets.h> | ||
| 20 | #include <asm/export.h> | ||
| 21 | #include <asm/tm.h> | ||
| 22 | #include <asm/cputable.h> | ||
| 23 | |||
| 24 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 25 | #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) | ||
| 26 | |||
| 27 | /* | ||
| 28 | * Save transactional state and TM-related registers. | ||
| 29 | * Called with: | ||
| 30 | * - r3 pointing to the vcpu struct | ||
| 31 | * - r4 points to the MSR with current TS bits: | ||
| 32 | * (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR). | ||
| 33 | * This can modify all checkpointed registers, but | ||
| 34 | * restores r1, r2 before exit. | ||
| 35 | */ | ||
| 36 | _GLOBAL(__kvmppc_save_tm) | ||
| 37 | mflr r0 | ||
| 38 | std r0, PPC_LR_STKOFF(r1) | ||
| 39 | |||
| 40 | /* Turn on TM. */ | ||
| 41 | mfmsr r8 | ||
| 42 | li r0, 1 | ||
| 43 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG | ||
| 44 | ori r8, r8, MSR_FP | ||
| 45 | oris r8, r8, (MSR_VEC | MSR_VSX)@h | ||
| 46 | mtmsrd r8 | ||
| 47 | |||
| 48 | rldicl. r4, r4, 64 - MSR_TS_S_LG, 62 | ||
| 49 | beq 1f /* TM not active in guest. */ | ||
| 50 | |||
| 51 | std r1, HSTATE_SCRATCH2(r13) | ||
| 52 | std r3, HSTATE_SCRATCH1(r13) | ||
| 53 | |||
| 54 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | ||
| 55 | BEGIN_FTR_SECTION | ||
| 56 | /* Emulation of the treclaim instruction needs TEXASR before treclaim */ | ||
| 57 | mfspr r6, SPRN_TEXASR | ||
| 58 | std r6, VCPU_ORIG_TEXASR(r3) | ||
| 59 | END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) | ||
| 60 | #endif | ||
| 61 | |||
| 62 | /* Clear the MSR RI since r1, r13 are all going to be foobar. */ | ||
| 63 | li r5, 0 | ||
| 64 | mtmsrd r5, 1 | ||
| 65 | |||
| 66 | li r3, TM_CAUSE_KVM_RESCHED | ||
| 67 | |||
| 68 | /* All GPRs are volatile at this point. */ | ||
| 69 | TRECLAIM(R3) | ||
| 70 | |||
| 71 | /* Temporarily store r13 and r9 so we have some regs to play with */ | ||
| 72 | SET_SCRATCH0(r13) | ||
| 73 | GET_PACA(r13) | ||
| 74 | std r9, PACATMSCRATCH(r13) | ||
| 75 | ld r9, HSTATE_SCRATCH1(r13) | ||
| 76 | |||
| 77 | /* Get a few more GPRs free. */ | ||
| 78 | std r29, VCPU_GPRS_TM(29)(r9) | ||
| 79 | std r30, VCPU_GPRS_TM(30)(r9) | ||
| 80 | std r31, VCPU_GPRS_TM(31)(r9) | ||
| 81 | |||
| 82 | /* Save away PPR and DSCR soon so don't run with user values. */ | ||
| 83 | mfspr r31, SPRN_PPR | ||
| 84 | HMT_MEDIUM | ||
| 85 | mfspr r30, SPRN_DSCR | ||
| 86 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | ||
| 87 | ld r29, HSTATE_DSCR(r13) | ||
| 88 | mtspr SPRN_DSCR, r29 | ||
| 89 | #endif | ||
| 90 | |||
| 91 | /* Save all but r9, r13 & r29-r31 */ | ||
| 92 | reg = 0 | ||
| 93 | .rept 29 | ||
| 94 | .if (reg != 9) && (reg != 13) | ||
| 95 | std reg, VCPU_GPRS_TM(reg)(r9) | ||
| 96 | .endif | ||
| 97 | reg = reg + 1 | ||
| 98 | .endr | ||
| 99 | /* ... now save r13 */ | ||
| 100 | GET_SCRATCH0(r4) | ||
| 101 | std r4, VCPU_GPRS_TM(13)(r9) | ||
| 102 | /* ... and save r9 */ | ||
| 103 | ld r4, PACATMSCRATCH(r13) | ||
| 104 | std r4, VCPU_GPRS_TM(9)(r9) | ||
| 105 | |||
| 106 | /* Reload stack pointer and TOC. */ | ||
| 107 | ld r1, HSTATE_SCRATCH2(r13) | ||
| 108 | ld r2, PACATOC(r13) | ||
| 109 | |||
| 110 | /* Set MSR RI now we have r1 and r13 back. */ | ||
| 111 | li r5, MSR_RI | ||
| 112 | mtmsrd r5, 1 | ||
| 113 | |||
| 114 | /* Save away checkpinted SPRs. */ | ||
| 115 | std r31, VCPU_PPR_TM(r9) | ||
| 116 | std r30, VCPU_DSCR_TM(r9) | ||
| 117 | mflr r5 | ||
| 118 | mfcr r6 | ||
| 119 | mfctr r7 | ||
| 120 | mfspr r8, SPRN_AMR | ||
| 121 | mfspr r10, SPRN_TAR | ||
| 122 | mfxer r11 | ||
| 123 | std r5, VCPU_LR_TM(r9) | ||
| 124 | stw r6, VCPU_CR_TM(r9) | ||
| 125 | std r7, VCPU_CTR_TM(r9) | ||
| 126 | std r8, VCPU_AMR_TM(r9) | ||
| 127 | std r10, VCPU_TAR_TM(r9) | ||
| 128 | std r11, VCPU_XER_TM(r9) | ||
| 129 | |||
| 130 | /* Restore r12 as trap number. */ | ||
| 131 | lwz r12, VCPU_TRAP(r9) | ||
| 132 | |||
| 133 | /* Save FP/VSX. */ | ||
| 134 | addi r3, r9, VCPU_FPRS_TM | ||
| 135 | bl store_fp_state | ||
| 136 | addi r3, r9, VCPU_VRS_TM | ||
| 137 | bl store_vr_state | ||
| 138 | mfspr r6, SPRN_VRSAVE | ||
| 139 | stw r6, VCPU_VRSAVE_TM(r9) | ||
| 140 | 1: | ||
| 141 | /* | ||
| 142 | * We need to save these SPRs after the treclaim so that the software | ||
| 143 | * error code is recorded correctly in the TEXASR. Also the user may | ||
| 144 | * change these outside of a transaction, so they must always be | ||
| 145 | * context switched. | ||
| 146 | */ | ||
| 147 | mfspr r7, SPRN_TEXASR | ||
| 148 | std r7, VCPU_TEXASR(r9) | ||
| 149 | 11: | ||
| 150 | mfspr r5, SPRN_TFHAR | ||
| 151 | mfspr r6, SPRN_TFIAR | ||
| 152 | std r5, VCPU_TFHAR(r9) | ||
| 153 | std r6, VCPU_TFIAR(r9) | ||
| 154 | |||
| 155 | ld r0, PPC_LR_STKOFF(r1) | ||
| 156 | mtlr r0 | ||
| 157 | blr | ||
| 158 | |||
| 159 | /* | ||
| 160 | * _kvmppc_save_tm_pr() is a wrapper around __kvmppc_save_tm(), so that it can | ||
| 161 | * be invoked from C function by PR KVM only. | ||
| 162 | */ | ||
| 163 | _GLOBAL(_kvmppc_save_tm_pr) | ||
| 164 | mflr r5 | ||
| 165 | std r5, PPC_LR_STKOFF(r1) | ||
| 166 | stdu r1, -SWITCH_FRAME_SIZE(r1) | ||
| 167 | SAVE_NVGPRS(r1) | ||
| 168 | |||
| 169 | /* save MSR since TM/math bits might be impacted | ||
| 170 | * by __kvmppc_save_tm(). | ||
| 171 | */ | ||
| 172 | mfmsr r5 | ||
| 173 | SAVE_GPR(5, r1) | ||
| 174 | |||
| 175 | /* also save DSCR/CR/TAR so that it can be recovered later */ | ||
| 176 | mfspr r6, SPRN_DSCR | ||
| 177 | SAVE_GPR(6, r1) | ||
| 178 | |||
| 179 | mfcr r7 | ||
| 180 | stw r7, _CCR(r1) | ||
| 181 | |||
| 182 | mfspr r8, SPRN_TAR | ||
| 183 | SAVE_GPR(8, r1) | ||
| 184 | |||
| 185 | bl __kvmppc_save_tm | ||
| 186 | |||
| 187 | REST_GPR(8, r1) | ||
| 188 | mtspr SPRN_TAR, r8 | ||
| 189 | |||
| 190 | ld r7, _CCR(r1) | ||
| 191 | mtcr r7 | ||
| 192 | |||
| 193 | REST_GPR(6, r1) | ||
| 194 | mtspr SPRN_DSCR, r6 | ||
| 195 | |||
| 196 | /* need preserve current MSR's MSR_TS bits */ | ||
| 197 | REST_GPR(5, r1) | ||
| 198 | mfmsr r6 | ||
| 199 | rldicl r6, r6, 64 - MSR_TS_S_LG, 62 | ||
| 200 | rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG | ||
| 201 | mtmsrd r5 | ||
| 202 | |||
| 203 | REST_NVGPRS(r1) | ||
| 204 | addi r1, r1, SWITCH_FRAME_SIZE | ||
| 205 | ld r5, PPC_LR_STKOFF(r1) | ||
| 206 | mtlr r5 | ||
| 207 | blr | ||
| 208 | |||
| 209 | EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr); | ||
| 210 | |||
| 211 | /* | ||
| 212 | * Restore transactional state and TM-related registers. | ||
| 213 | * Called with: | ||
| 214 | * - r3 pointing to the vcpu struct. | ||
| 215 | * - r4 is the guest MSR with desired TS bits: | ||
| 216 | * For HV KVM, it is VCPU_MSR | ||
| 217 | * For PR KVM, it is provided by caller | ||
| 218 | * This potentially modifies all checkpointed registers. | ||
| 219 | * It restores r1, r2 from the PACA. | ||
| 220 | */ | ||
| 221 | _GLOBAL(__kvmppc_restore_tm) | ||
| 222 | mflr r0 | ||
| 223 | std r0, PPC_LR_STKOFF(r1) | ||
| 224 | |||
| 225 | /* Turn on TM/FP/VSX/VMX so we can restore them. */ | ||
| 226 | mfmsr r5 | ||
| 227 | li r6, MSR_TM >> 32 | ||
| 228 | sldi r6, r6, 32 | ||
| 229 | or r5, r5, r6 | ||
| 230 | ori r5, r5, MSR_FP | ||
| 231 | oris r5, r5, (MSR_VEC | MSR_VSX)@h | ||
| 232 | mtmsrd r5 | ||
| 233 | |||
| 234 | /* | ||
| 235 | * The user may change these outside of a transaction, so they must | ||
| 236 | * always be context switched. | ||
| 237 | */ | ||
| 238 | ld r5, VCPU_TFHAR(r3) | ||
| 239 | ld r6, VCPU_TFIAR(r3) | ||
| 240 | ld r7, VCPU_TEXASR(r3) | ||
| 241 | mtspr SPRN_TFHAR, r5 | ||
| 242 | mtspr SPRN_TFIAR, r6 | ||
| 243 | mtspr SPRN_TEXASR, r7 | ||
| 244 | |||
| 245 | mr r5, r4 | ||
| 246 | rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 | ||
| 247 | beqlr /* TM not active in guest */ | ||
| 248 | std r1, HSTATE_SCRATCH2(r13) | ||
| 249 | |||
| 250 | /* Make sure the failure summary is set, otherwise we'll program check | ||
| 251 | * when we trechkpt. It's possible that this might have been not set | ||
| 252 | * on a kvmppc_set_one_reg() call but we shouldn't let this crash the | ||
| 253 | * host. | ||
| 254 | */ | ||
| 255 | oris r7, r7, (TEXASR_FS)@h | ||
| 256 | mtspr SPRN_TEXASR, r7 | ||
| 257 | |||
| 258 | /* | ||
| 259 | * We need to load up the checkpointed state for the guest. | ||
| 260 | * We need to do this early as it will blow away any GPRs, VSRs and | ||
| 261 | * some SPRs. | ||
| 262 | */ | ||
| 263 | |||
| 264 | mr r31, r3 | ||
| 265 | addi r3, r31, VCPU_FPRS_TM | ||
| 266 | bl load_fp_state | ||
| 267 | addi r3, r31, VCPU_VRS_TM | ||
| 268 | bl load_vr_state | ||
| 269 | mr r3, r31 | ||
| 270 | lwz r7, VCPU_VRSAVE_TM(r3) | ||
| 271 | mtspr SPRN_VRSAVE, r7 | ||
| 272 | |||
| 273 | ld r5, VCPU_LR_TM(r3) | ||
| 274 | lwz r6, VCPU_CR_TM(r3) | ||
| 275 | ld r7, VCPU_CTR_TM(r3) | ||
| 276 | ld r8, VCPU_AMR_TM(r3) | ||
| 277 | ld r9, VCPU_TAR_TM(r3) | ||
| 278 | ld r10, VCPU_XER_TM(r3) | ||
| 279 | mtlr r5 | ||
| 280 | mtcr r6 | ||
| 281 | mtctr r7 | ||
| 282 | mtspr SPRN_AMR, r8 | ||
| 283 | mtspr SPRN_TAR, r9 | ||
| 284 | mtxer r10 | ||
| 285 | |||
| 286 | /* | ||
| 287 | * Load up PPR and DSCR values but don't put them in the actual SPRs | ||
| 288 | * till the last moment to avoid running with userspace PPR and DSCR for | ||
| 289 | * too long. | ||
| 290 | */ | ||
| 291 | ld r29, VCPU_DSCR_TM(r3) | ||
| 292 | ld r30, VCPU_PPR_TM(r3) | ||
| 293 | |||
| 294 | std r2, PACATMSCRATCH(r13) /* Save TOC */ | ||
| 295 | |||
| 296 | /* Clear the MSR RI since r1, r13 are all going to be foobar. */ | ||
| 297 | li r5, 0 | ||
| 298 | mtmsrd r5, 1 | ||
| 299 | |||
| 300 | /* Load GPRs r0-r28 */ | ||
| 301 | reg = 0 | ||
| 302 | .rept 29 | ||
| 303 | ld reg, VCPU_GPRS_TM(reg)(r31) | ||
| 304 | reg = reg + 1 | ||
| 305 | .endr | ||
| 306 | |||
| 307 | mtspr SPRN_DSCR, r29 | ||
| 308 | mtspr SPRN_PPR, r30 | ||
| 309 | |||
| 310 | /* Load final GPRs */ | ||
| 311 | ld 29, VCPU_GPRS_TM(29)(r31) | ||
| 312 | ld 30, VCPU_GPRS_TM(30)(r31) | ||
| 313 | ld 31, VCPU_GPRS_TM(31)(r31) | ||
| 314 | |||
| 315 | /* TM checkpointed state is now setup. All GPRs are now volatile. */ | ||
| 316 | TRECHKPT | ||
| 317 | |||
| 318 | /* Now let's get back the state we need. */ | ||
| 319 | HMT_MEDIUM | ||
| 320 | GET_PACA(r13) | ||
| 321 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | ||
| 322 | ld r29, HSTATE_DSCR(r13) | ||
| 323 | mtspr SPRN_DSCR, r29 | ||
| 324 | #endif | ||
| 325 | ld r1, HSTATE_SCRATCH2(r13) | ||
| 326 | ld r2, PACATMSCRATCH(r13) | ||
| 327 | |||
| 328 | /* Set the MSR RI since we have our registers back. */ | ||
| 329 | li r5, MSR_RI | ||
| 330 | mtmsrd r5, 1 | ||
| 331 | ld r0, PPC_LR_STKOFF(r1) | ||
| 332 | mtlr r0 | ||
| 333 | blr | ||
| 334 | |||
| 335 | /* | ||
| 336 | * _kvmppc_restore_tm_pr() is a wrapper around __kvmppc_restore_tm(), so that it | ||
| 337 | * can be invoked from C function by PR KVM only. | ||
| 338 | */ | ||
| 339 | _GLOBAL(_kvmppc_restore_tm_pr) | ||
| 340 | mflr r5 | ||
| 341 | std r5, PPC_LR_STKOFF(r1) | ||
| 342 | stdu r1, -SWITCH_FRAME_SIZE(r1) | ||
| 343 | SAVE_NVGPRS(r1) | ||
| 344 | |||
| 345 | /* save MSR to avoid TM/math bits change */ | ||
| 346 | mfmsr r5 | ||
| 347 | SAVE_GPR(5, r1) | ||
| 348 | |||
| 349 | /* also save DSCR/CR/TAR so that it can be recovered later */ | ||
| 350 | mfspr r6, SPRN_DSCR | ||
| 351 | SAVE_GPR(6, r1) | ||
| 352 | |||
| 353 | mfcr r7 | ||
| 354 | stw r7, _CCR(r1) | ||
| 355 | |||
| 356 | mfspr r8, SPRN_TAR | ||
| 357 | SAVE_GPR(8, r1) | ||
| 358 | |||
| 359 | bl __kvmppc_restore_tm | ||
| 360 | |||
| 361 | REST_GPR(8, r1) | ||
| 362 | mtspr SPRN_TAR, r8 | ||
| 363 | |||
| 364 | ld r7, _CCR(r1) | ||
| 365 | mtcr r7 | ||
| 366 | |||
| 367 | REST_GPR(6, r1) | ||
| 368 | mtspr SPRN_DSCR, r6 | ||
| 369 | |||
| 370 | /* need preserve current MSR's MSR_TS bits */ | ||
| 371 | REST_GPR(5, r1) | ||
| 372 | mfmsr r6 | ||
| 373 | rldicl r6, r6, 64 - MSR_TS_S_LG, 62 | ||
| 374 | rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG | ||
| 375 | mtmsrd r5 | ||
| 376 | |||
| 377 | REST_NVGPRS(r1) | ||
| 378 | addi r1, r1, SWITCH_FRAME_SIZE | ||
| 379 | ld r5, PPC_LR_STKOFF(r1) | ||
| 380 | mtlr r5 | ||
| 381 | blr | ||
| 382 | |||
| 383 | EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr); | ||
| 384 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | ||
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 42e581a268e1..f12680c9b947 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig | |||
| @@ -32,6 +32,7 @@ config RISCV | |||
| 32 | select HAVE_MEMBLOCK_NODE_MAP | 32 | select HAVE_MEMBLOCK_NODE_MAP |
| 33 | select HAVE_DMA_CONTIGUOUS | 33 | select HAVE_DMA_CONTIGUOUS |
| 34 | select HAVE_GENERIC_DMA_COHERENT | 34 | select HAVE_GENERIC_DMA_COHERENT |
| 35 | select HAVE_PERF_EVENTS | ||
| 35 | select IRQ_DOMAIN | 36 | select IRQ_DOMAIN |
| 36 | select NO_BOOTMEM | 37 | select NO_BOOTMEM |
| 37 | select RISCV_ISA_A if SMP | 38 | select RISCV_ISA_A if SMP |
| @@ -193,6 +194,19 @@ config RISCV_ISA_C | |||
| 193 | config RISCV_ISA_A | 194 | config RISCV_ISA_A |
| 194 | def_bool y | 195 | def_bool y |
| 195 | 196 | ||
| 197 | menu "supported PMU type" | ||
| 198 | depends on PERF_EVENTS | ||
| 199 | |||
| 200 | config RISCV_BASE_PMU | ||
| 201 | bool "Base Performance Monitoring Unit" | ||
| 202 | def_bool y | ||
| 203 | help | ||
| 204 | A base PMU that serves as a reference implementation and has limited | ||
| 205 | feature of perf. It can run on any RISC-V machines so serves as the | ||
| 206 | fallback, but this option can also be disable to reduce kernel size. | ||
| 207 | |||
| 208 | endmenu | ||
| 209 | |||
| 196 | endmenu | 210 | endmenu |
| 197 | 211 | ||
| 198 | menu "Kernel type" | 212 | menu "Kernel type" |
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 76e958a5414a..6d4a5f6c3f4f 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile | |||
| @@ -71,6 +71,9 @@ KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) | |||
| 71 | # architectures. It's faster to have GCC emit only aligned accesses. | 71 | # architectures. It's faster to have GCC emit only aligned accesses. |
| 72 | KBUILD_CFLAGS += $(call cc-option,-mstrict-align) | 72 | KBUILD_CFLAGS += $(call cc-option,-mstrict-align) |
| 73 | 73 | ||
| 74 | # arch specific predefines for sparse | ||
| 75 | CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS) | ||
| 76 | |||
| 74 | head-y := arch/riscv/kernel/head.o | 77 | head-y := arch/riscv/kernel/head.o |
| 75 | 78 | ||
| 76 | core-y += arch/riscv/kernel/ arch/riscv/mm/ | 79 | core-y += arch/riscv/kernel/ arch/riscv/mm/ |
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index bca0eee733b0..07326466871b 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig | |||
| @@ -44,6 +44,7 @@ CONFIG_INPUT_MOUSEDEV=y | |||
| 44 | CONFIG_SERIAL_8250=y | 44 | CONFIG_SERIAL_8250=y |
| 45 | CONFIG_SERIAL_8250_CONSOLE=y | 45 | CONFIG_SERIAL_8250_CONSOLE=y |
| 46 | CONFIG_SERIAL_OF_PLATFORM=y | 46 | CONFIG_SERIAL_OF_PLATFORM=y |
| 47 | CONFIG_HVC_RISCV_SBI=y | ||
| 47 | # CONFIG_PTP_1588_CLOCK is not set | 48 | # CONFIG_PTP_1588_CLOCK is not set |
| 48 | CONFIG_DRM=y | 49 | CONFIG_DRM=y |
| 49 | CONFIG_DRM_RADEON=y | 50 | CONFIG_DRM_RADEON=y |
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 4286a5f83876..576ffdca06ba 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild | |||
| @@ -25,6 +25,7 @@ generic-y += kdebug.h | |||
| 25 | generic-y += kmap_types.h | 25 | generic-y += kmap_types.h |
| 26 | generic-y += kvm_para.h | 26 | generic-y += kvm_para.h |
| 27 | generic-y += local.h | 27 | generic-y += local.h |
| 28 | generic-y += local64.h | ||
| 28 | generic-y += mm-arch-hooks.h | 29 | generic-y += mm-arch-hooks.h |
| 29 | generic-y += mman.h | 30 | generic-y += mman.h |
| 30 | generic-y += module.h | 31 | generic-y += module.h |
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index efd89a88d2d0..8f13074413a7 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h | |||
| @@ -47,7 +47,7 @@ static inline void flush_dcache_page(struct page *page) | |||
| 47 | 47 | ||
| 48 | #else /* CONFIG_SMP */ | 48 | #else /* CONFIG_SMP */ |
| 49 | 49 | ||
| 50 | #define flush_icache_all() sbi_remote_fence_i(0) | 50 | #define flush_icache_all() sbi_remote_fence_i(NULL) |
| 51 | void flush_icache_mm(struct mm_struct *mm, bool local); | 51 | void flush_icache_mm(struct mm_struct *mm, bool local); |
| 52 | 52 | ||
| 53 | #endif /* CONFIG_SMP */ | 53 | #endif /* CONFIG_SMP */ |
diff --git a/arch/riscv/include/asm/perf_event.h b/arch/riscv/include/asm/perf_event.h new file mode 100644 index 000000000000..0e638a0c3feb --- /dev/null +++ b/arch/riscv/include/asm/perf_event.h | |||
| @@ -0,0 +1,84 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2018 SiFive | ||
| 4 | * Copyright (C) 2018 Andes Technology Corporation | ||
| 5 | * | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef _ASM_RISCV_PERF_EVENT_H | ||
| 9 | #define _ASM_RISCV_PERF_EVENT_H | ||
| 10 | |||
| 11 | #include <linux/perf_event.h> | ||
| 12 | #include <linux/ptrace.h> | ||
| 13 | |||
| 14 | #define RISCV_BASE_COUNTERS 2 | ||
| 15 | |||
| 16 | /* | ||
| 17 | * The RISCV_MAX_COUNTERS parameter should be specified. | ||
| 18 | */ | ||
| 19 | |||
| 20 | #ifdef CONFIG_RISCV_BASE_PMU | ||
| 21 | #define RISCV_MAX_COUNTERS 2 | ||
| 22 | #endif | ||
| 23 | |||
| 24 | #ifndef RISCV_MAX_COUNTERS | ||
| 25 | #error "Please provide a valid RISCV_MAX_COUNTERS for the PMU." | ||
| 26 | #endif | ||
| 27 | |||
| 28 | /* | ||
| 29 | * These are the indexes of bits in counteren register *minus* 1, | ||
| 30 | * except for cycle. It would be coherent if it can directly mapped | ||
| 31 | * to counteren bit definition, but there is a *time* register at | ||
| 32 | * counteren[1]. Per-cpu structure is scarce resource here. | ||
| 33 | * | ||
| 34 | * According to the spec, an implementation can support counter up to | ||
| 35 | * mhpmcounter31, but many high-end processors has at most 6 general | ||
| 36 | * PMCs, we give the definition to MHPMCOUNTER8 here. | ||
| 37 | */ | ||
| 38 | #define RISCV_PMU_CYCLE 0 | ||
| 39 | #define RISCV_PMU_INSTRET 1 | ||
| 40 | #define RISCV_PMU_MHPMCOUNTER3 2 | ||
| 41 | #define RISCV_PMU_MHPMCOUNTER4 3 | ||
| 42 | #define RISCV_PMU_MHPMCOUNTER5 4 | ||
| 43 | #define RISCV_PMU_MHPMCOUNTER6 5 | ||
| 44 | #define RISCV_PMU_MHPMCOUNTER7 6 | ||
| 45 | #define RISCV_PMU_MHPMCOUNTER8 7 | ||
| 46 | |||
| 47 | #define RISCV_OP_UNSUPP (-EOPNOTSUPP) | ||
| 48 | |||
| 49 | struct cpu_hw_events { | ||
| 50 | /* # currently enabled events*/ | ||
| 51 | int n_events; | ||
| 52 | /* currently enabled events */ | ||
| 53 | struct perf_event *events[RISCV_MAX_COUNTERS]; | ||
| 54 | /* vendor-defined PMU data */ | ||
| 55 | void *platform; | ||
| 56 | }; | ||
| 57 | |||
| 58 | struct riscv_pmu { | ||
| 59 | struct pmu *pmu; | ||
| 60 | |||
| 61 | /* generic hw/cache events table */ | ||
| 62 | const int *hw_events; | ||
| 63 | const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] | ||
| 64 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 65 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
| 66 | /* method used to map hw/cache events */ | ||
| 67 | int (*map_hw_event)(u64 config); | ||
| 68 | int (*map_cache_event)(u64 config); | ||
| 69 | |||
| 70 | /* max generic hw events in map */ | ||
| 71 | int max_events; | ||
| 72 | /* number total counters, 2(base) + x(general) */ | ||
| 73 | int num_counters; | ||
| 74 | /* the width of the counter */ | ||
| 75 | int counter_width; | ||
| 76 | |||
| 77 | /* vendor-defined PMU features */ | ||
| 78 | void *platform; | ||
| 79 | |||
| 80 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | ||
| 81 | int irq; | ||
| 82 | }; | ||
| 83 | |||
| 84 | #endif /* _ASM_RISCV_PERF_EVENT_H */ | ||
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 7b209aec355d..85c2d8bae957 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h | |||
| @@ -49,7 +49,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, | |||
| 49 | 49 | ||
| 50 | #include <asm/sbi.h> | 50 | #include <asm/sbi.h> |
| 51 | 51 | ||
| 52 | #define flush_tlb_all() sbi_remote_sfence_vma(0, 0, -1) | 52 | #define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1) |
| 53 | #define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0) | 53 | #define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0) |
| 54 | #define flush_tlb_range(vma, start, end) \ | 54 | #define flush_tlb_range(vma, start, end) \ |
| 55 | sbi_remote_sfence_vma(mm_cpumask((vma)->vm_mm)->bits, \ | 55 | sbi_remote_sfence_vma(mm_cpumask((vma)->vm_mm)->bits, \ |
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 14b0b22fb578..473cfc84e412 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h | |||
| @@ -392,19 +392,21 @@ do { \ | |||
| 392 | }) | 392 | }) |
| 393 | 393 | ||
| 394 | 394 | ||
| 395 | extern unsigned long __must_check __copy_user(void __user *to, | 395 | extern unsigned long __must_check __asm_copy_to_user(void __user *to, |
| 396 | const void *from, unsigned long n); | ||
| 397 | extern unsigned long __must_check __asm_copy_from_user(void *to, | ||
| 396 | const void __user *from, unsigned long n); | 398 | const void __user *from, unsigned long n); |
| 397 | 399 | ||
| 398 | static inline unsigned long | 400 | static inline unsigned long |
| 399 | raw_copy_from_user(void *to, const void __user *from, unsigned long n) | 401 | raw_copy_from_user(void *to, const void __user *from, unsigned long n) |
| 400 | { | 402 | { |
| 401 | return __copy_user(to, from, n); | 403 | return __asm_copy_to_user(to, from, n); |
| 402 | } | 404 | } |
| 403 | 405 | ||
| 404 | static inline unsigned long | 406 | static inline unsigned long |
| 405 | raw_copy_to_user(void __user *to, const void *from, unsigned long n) | 407 | raw_copy_to_user(void __user *to, const void *from, unsigned long n) |
| 406 | { | 408 | { |
| 407 | return __copy_user(to, from, n); | 409 | return __asm_copy_from_user(to, from, n); |
| 408 | } | 410 | } |
| 409 | 411 | ||
| 410 | extern long strncpy_from_user(char *dest, const char __user *src, long count); | 412 | extern long strncpy_from_user(char *dest, const char __user *src, long count); |
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 8586dd96c2f0..e1274fc03af4 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile | |||
| @@ -39,4 +39,6 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o | |||
| 39 | obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o | 39 | obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o |
| 40 | obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o | 40 | obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o |
| 41 | 41 | ||
| 42 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | ||
| 43 | |||
| 42 | clean: | 44 | clean: |
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S index ce9bdc57a2a1..5721624886a1 100644 --- a/arch/riscv/kernel/mcount.S +++ b/arch/riscv/kernel/mcount.S | |||
| @@ -126,5 +126,5 @@ do_trace: | |||
| 126 | RESTORE_ABI_STATE | 126 | RESTORE_ABI_STATE |
| 127 | ret | 127 | ret |
| 128 | ENDPROC(_mcount) | 128 | ENDPROC(_mcount) |
| 129 | EXPORT_SYMBOL(_mcount) | ||
| 130 | #endif | 129 | #endif |
| 130 | EXPORT_SYMBOL(_mcount) | ||
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 5dddba301d0a..1d5e9b934b8c 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c | |||
| @@ -17,6 +17,17 @@ | |||
| 17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
| 18 | #include <linux/moduleloader.h> | 18 | #include <linux/moduleloader.h> |
| 19 | 19 | ||
| 20 | static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) | ||
| 21 | { | ||
| 22 | if (v != (u32)v) { | ||
| 23 | pr_err("%s: value %016llx out of range for 32-bit field\n", | ||
| 24 | me->name, v); | ||
| 25 | return -EINVAL; | ||
| 26 | } | ||
| 27 | *location = v; | ||
| 28 | return 0; | ||
| 29 | } | ||
| 30 | |||
| 20 | static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v) | 31 | static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v) |
| 21 | { | 32 | { |
| 22 | *(u64 *)location = v; | 33 | *(u64 *)location = v; |
| @@ -265,6 +276,7 @@ static int apply_r_riscv_sub32_rela(struct module *me, u32 *location, | |||
| 265 | 276 | ||
| 266 | static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, | 277 | static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, |
| 267 | Elf_Addr v) = { | 278 | Elf_Addr v) = { |
| 279 | [R_RISCV_32] = apply_r_riscv_32_rela, | ||
| 268 | [R_RISCV_64] = apply_r_riscv_64_rela, | 280 | [R_RISCV_64] = apply_r_riscv_64_rela, |
| 269 | [R_RISCV_BRANCH] = apply_r_riscv_branch_rela, | 281 | [R_RISCV_BRANCH] = apply_r_riscv_branch_rela, |
| 270 | [R_RISCV_JAL] = apply_r_riscv_jal_rela, | 282 | [R_RISCV_JAL] = apply_r_riscv_jal_rela, |
diff --git a/arch/riscv/kernel/perf_event.c b/arch/riscv/kernel/perf_event.c new file mode 100644 index 000000000000..b0e10c4e9f77 --- /dev/null +++ b/arch/riscv/kernel/perf_event.c | |||
| @@ -0,0 +1,485 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | ||
| 4 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | ||
| 5 | * Copyright (C) 2009 Jaswinder Singh Rajput | ||
| 6 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | ||
| 7 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra | ||
| 8 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | ||
| 9 | * Copyright (C) 2009 Google, Inc., Stephane Eranian | ||
| 10 | * Copyright 2014 Tilera Corporation. All Rights Reserved. | ||
| 11 | * Copyright (C) 2018 Andes Technology Corporation | ||
| 12 | * | ||
| 13 | * Perf_events support for RISC-V platforms. | ||
| 14 | * | ||
| 15 | * Since the spec. (as of now, Priv-Spec 1.10) does not provide enough | ||
| 16 | * functionality for perf event to fully work, this file provides | ||
| 17 | * the very basic framework only. | ||
| 18 | * | ||
| 19 | * For platform portings, please check Documentations/riscv/pmu.txt. | ||
| 20 | * | ||
| 21 | * The Copyright line includes x86 and tile ones. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/kprobes.h> | ||
| 25 | #include <linux/kernel.h> | ||
| 26 | #include <linux/kdebug.h> | ||
| 27 | #include <linux/mutex.h> | ||
| 28 | #include <linux/bitmap.h> | ||
| 29 | #include <linux/irq.h> | ||
| 30 | #include <linux/interrupt.h> | ||
| 31 | #include <linux/perf_event.h> | ||
| 32 | #include <linux/atomic.h> | ||
| 33 | #include <linux/of.h> | ||
| 34 | #include <asm/perf_event.h> | ||
| 35 | |||
| 36 | static const struct riscv_pmu *riscv_pmu __read_mostly; | ||
| 37 | static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | ||
| 38 | |||
| 39 | /* | ||
| 40 | * Hardware & cache maps and their methods | ||
| 41 | */ | ||
| 42 | |||
| 43 | static const int riscv_hw_event_map[] = { | ||
| 44 | [PERF_COUNT_HW_CPU_CYCLES] = RISCV_PMU_CYCLE, | ||
| 45 | [PERF_COUNT_HW_INSTRUCTIONS] = RISCV_PMU_INSTRET, | ||
| 46 | [PERF_COUNT_HW_CACHE_REFERENCES] = RISCV_OP_UNSUPP, | ||
| 47 | [PERF_COUNT_HW_CACHE_MISSES] = RISCV_OP_UNSUPP, | ||
| 48 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = RISCV_OP_UNSUPP, | ||
| 49 | [PERF_COUNT_HW_BRANCH_MISSES] = RISCV_OP_UNSUPP, | ||
| 50 | [PERF_COUNT_HW_BUS_CYCLES] = RISCV_OP_UNSUPP, | ||
| 51 | }; | ||
| 52 | |||
| 53 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
| 54 | static const int riscv_cache_event_map[PERF_COUNT_HW_CACHE_MAX] | ||
| 55 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 56 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
| 57 | [C(L1D)] = { | ||
| 58 | [C(OP_READ)] = { | ||
| 59 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 60 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 61 | }, | ||
| 62 | [C(OP_WRITE)] = { | ||
| 63 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 64 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 65 | }, | ||
| 66 | [C(OP_PREFETCH)] = { | ||
| 67 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 68 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 69 | }, | ||
| 70 | }, | ||
| 71 | [C(L1I)] = { | ||
| 72 | [C(OP_READ)] = { | ||
| 73 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 74 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 75 | }, | ||
| 76 | [C(OP_WRITE)] = { | ||
| 77 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 78 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 79 | }, | ||
| 80 | [C(OP_PREFETCH)] = { | ||
| 81 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 82 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 83 | }, | ||
| 84 | }, | ||
| 85 | [C(LL)] = { | ||
| 86 | [C(OP_READ)] = { | ||
| 87 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 88 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 89 | }, | ||
| 90 | [C(OP_WRITE)] = { | ||
| 91 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 92 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 93 | }, | ||
| 94 | [C(OP_PREFETCH)] = { | ||
| 95 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 96 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 97 | }, | ||
| 98 | }, | ||
| 99 | [C(DTLB)] = { | ||
| 100 | [C(OP_READ)] = { | ||
| 101 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 102 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 103 | }, | ||
| 104 | [C(OP_WRITE)] = { | ||
| 105 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 106 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 107 | }, | ||
| 108 | [C(OP_PREFETCH)] = { | ||
| 109 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 110 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 111 | }, | ||
| 112 | }, | ||
| 113 | [C(ITLB)] = { | ||
| 114 | [C(OP_READ)] = { | ||
| 115 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 116 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 117 | }, | ||
| 118 | [C(OP_WRITE)] = { | ||
| 119 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 120 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 121 | }, | ||
| 122 | [C(OP_PREFETCH)] = { | ||
| 123 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 124 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 125 | }, | ||
| 126 | }, | ||
| 127 | [C(BPU)] = { | ||
| 128 | [C(OP_READ)] = { | ||
| 129 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 130 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 131 | }, | ||
| 132 | [C(OP_WRITE)] = { | ||
| 133 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 134 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 135 | }, | ||
| 136 | [C(OP_PREFETCH)] = { | ||
| 137 | [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, | ||
| 138 | [C(RESULT_MISS)] = RISCV_OP_UNSUPP, | ||
| 139 | }, | ||
| 140 | }, | ||
| 141 | }; | ||
| 142 | |||
| 143 | static int riscv_map_hw_event(u64 config) | ||
| 144 | { | ||
| 145 | if (config >= riscv_pmu->max_events) | ||
| 146 | return -EINVAL; | ||
| 147 | |||
| 148 | return riscv_pmu->hw_events[config]; | ||
| 149 | } | ||
| 150 | |||
| 151 | int riscv_map_cache_decode(u64 config, unsigned int *type, | ||
| 152 | unsigned int *op, unsigned int *result) | ||
| 153 | { | ||
| 154 | return -ENOENT; | ||
| 155 | } | ||
| 156 | |||
| 157 | static int riscv_map_cache_event(u64 config) | ||
| 158 | { | ||
| 159 | unsigned int type, op, result; | ||
| 160 | int err = -ENOENT; | ||
| 161 | int code; | ||
| 162 | |||
| 163 | err = riscv_map_cache_decode(config, &type, &op, &result); | ||
| 164 | if (!riscv_pmu->cache_events || err) | ||
| 165 | return err; | ||
| 166 | |||
| 167 | if (type >= PERF_COUNT_HW_CACHE_MAX || | ||
| 168 | op >= PERF_COUNT_HW_CACHE_OP_MAX || | ||
| 169 | result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
| 170 | return -EINVAL; | ||
| 171 | |||
| 172 | code = (*riscv_pmu->cache_events)[type][op][result]; | ||
| 173 | if (code == RISCV_OP_UNSUPP) | ||
| 174 | return -EINVAL; | ||
| 175 | |||
| 176 | return code; | ||
| 177 | } | ||
| 178 | |||
| 179 | /* | ||
| 180 | * Low-level functions: reading/writing counters | ||
| 181 | */ | ||
| 182 | |||
| 183 | static inline u64 read_counter(int idx) | ||
| 184 | { | ||
| 185 | u64 val = 0; | ||
| 186 | |||
| 187 | switch (idx) { | ||
| 188 | case RISCV_PMU_CYCLE: | ||
| 189 | val = csr_read(cycle); | ||
| 190 | break; | ||
| 191 | case RISCV_PMU_INSTRET: | ||
| 192 | val = csr_read(instret); | ||
| 193 | break; | ||
| 194 | default: | ||
| 195 | WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS); | ||
| 196 | return -EINVAL; | ||
| 197 | } | ||
| 198 | |||
| 199 | return val; | ||
| 200 | } | ||
| 201 | |||
| 202 | static inline void write_counter(int idx, u64 value) | ||
| 203 | { | ||
| 204 | /* currently not supported */ | ||
| 205 | WARN_ON_ONCE(1); | ||
| 206 | } | ||
| 207 | |||
| 208 | /* | ||
| 209 | * pmu->read: read and update the counter | ||
| 210 | * | ||
| 211 | * Other architectures' implementation often have a xxx_perf_event_update | ||
| 212 | * routine, which can return counter values when called in the IRQ, but | ||
| 213 | * return void when being called by the pmu->read method. | ||
| 214 | */ | ||
| 215 | static void riscv_pmu_read(struct perf_event *event) | ||
| 216 | { | ||
| 217 | struct hw_perf_event *hwc = &event->hw; | ||
| 218 | u64 prev_raw_count, new_raw_count; | ||
| 219 | u64 oldval; | ||
| 220 | int idx = hwc->idx; | ||
| 221 | u64 delta; | ||
| 222 | |||
| 223 | do { | ||
| 224 | prev_raw_count = local64_read(&hwc->prev_count); | ||
| 225 | new_raw_count = read_counter(idx); | ||
| 226 | |||
| 227 | oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
| 228 | new_raw_count); | ||
| 229 | } while (oldval != prev_raw_count); | ||
| 230 | |||
| 231 | /* | ||
| 232 | * delta is the value to update the counter we maintain in the kernel. | ||
| 233 | */ | ||
| 234 | delta = (new_raw_count - prev_raw_count) & | ||
| 235 | ((1ULL << riscv_pmu->counter_width) - 1); | ||
| 236 | local64_add(delta, &event->count); | ||
| 237 | /* | ||
| 238 | * Something like local64_sub(delta, &hwc->period_left) here is | ||
| 239 | * needed if there is an interrupt for perf. | ||
| 240 | */ | ||
| 241 | } | ||
| 242 | |||
| 243 | /* | ||
| 244 | * State transition functions: | ||
| 245 | * | ||
| 246 | * stop()/start() & add()/del() | ||
| 247 | */ | ||
| 248 | |||
| 249 | /* | ||
| 250 | * pmu->stop: stop the counter | ||
| 251 | */ | ||
| 252 | static void riscv_pmu_stop(struct perf_event *event, int flags) | ||
| 253 | { | ||
| 254 | struct hw_perf_event *hwc = &event->hw; | ||
| 255 | |||
| 256 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); | ||
| 257 | hwc->state |= PERF_HES_STOPPED; | ||
| 258 | |||
| 259 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | ||
| 260 | riscv_pmu->pmu->read(event); | ||
| 261 | hwc->state |= PERF_HES_UPTODATE; | ||
| 262 | } | ||
| 263 | } | ||
| 264 | |||
| 265 | /* | ||
| 266 | * pmu->start: start the event. | ||
| 267 | */ | ||
| 268 | static void riscv_pmu_start(struct perf_event *event, int flags) | ||
| 269 | { | ||
| 270 | struct hw_perf_event *hwc = &event->hw; | ||
| 271 | |||
| 272 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | ||
| 273 | return; | ||
| 274 | |||
| 275 | if (flags & PERF_EF_RELOAD) { | ||
| 276 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
| 277 | |||
| 278 | /* | ||
| 279 | * Set the counter to the period to the next interrupt here, | ||
| 280 | * if you have any. | ||
| 281 | */ | ||
| 282 | } | ||
| 283 | |||
| 284 | hwc->state = 0; | ||
| 285 | perf_event_update_userpage(event); | ||
| 286 | |||
| 287 | /* | ||
| 288 | * Since we cannot write to counters, this serves as an initialization | ||
| 289 | * to the delta-mechanism in pmu->read(); otherwise, the delta would be | ||
| 290 | * wrong when pmu->read is called for the first time. | ||
| 291 | */ | ||
| 292 | local64_set(&hwc->prev_count, read_counter(hwc->idx)); | ||
| 293 | } | ||
| 294 | |||
| 295 | /* | ||
| 296 | * pmu->add: add the event to PMU. | ||
| 297 | */ | ||
| 298 | static int riscv_pmu_add(struct perf_event *event, int flags) | ||
| 299 | { | ||
| 300 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | ||
| 301 | struct hw_perf_event *hwc = &event->hw; | ||
| 302 | |||
| 303 | if (cpuc->n_events == riscv_pmu->num_counters) | ||
| 304 | return -ENOSPC; | ||
| 305 | |||
| 306 | /* | ||
| 307 | * We don't have general conunters, so no binding-event-to-counter | ||
| 308 | * process here. | ||
| 309 | * | ||
| 310 | * Indexing using hwc->config generally not works, since config may | ||
| 311 | * contain extra information, but here the only info we have in | ||
| 312 | * hwc->config is the event index. | ||
| 313 | */ | ||
| 314 | hwc->idx = hwc->config; | ||
| 315 | cpuc->events[hwc->idx] = event; | ||
| 316 | cpuc->n_events++; | ||
| 317 | |||
| 318 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | ||
| 319 | |||
| 320 | if (flags & PERF_EF_START) | ||
| 321 | riscv_pmu->pmu->start(event, PERF_EF_RELOAD); | ||
| 322 | |||
| 323 | return 0; | ||
| 324 | } | ||
| 325 | |||
| 326 | /* | ||
| 327 | * pmu->del: delete the event from PMU. | ||
| 328 | */ | ||
| 329 | static void riscv_pmu_del(struct perf_event *event, int flags) | ||
| 330 | { | ||
| 331 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | ||
| 332 | struct hw_perf_event *hwc = &event->hw; | ||
| 333 | |||
| 334 | cpuc->events[hwc->idx] = NULL; | ||
| 335 | cpuc->n_events--; | ||
| 336 | riscv_pmu->pmu->stop(event, PERF_EF_UPDATE); | ||
| 337 | perf_event_update_userpage(event); | ||
| 338 | } | ||
| 339 | |||
| 340 | /* | ||
| 341 | * Interrupt: a skeletion for reference. | ||
| 342 | */ | ||
| 343 | |||
| 344 | static DEFINE_MUTEX(pmc_reserve_mutex); | ||
| 345 | |||
| 346 | irqreturn_t riscv_base_pmu_handle_irq(int irq_num, void *dev) | ||
| 347 | { | ||
| 348 | return IRQ_NONE; | ||
| 349 | } | ||
| 350 | |||
| 351 | static int reserve_pmc_hardware(void) | ||
| 352 | { | ||
| 353 | int err = 0; | ||
| 354 | |||
| 355 | mutex_lock(&pmc_reserve_mutex); | ||
| 356 | if (riscv_pmu->irq >= 0 && riscv_pmu->handle_irq) { | ||
| 357 | err = request_irq(riscv_pmu->irq, riscv_pmu->handle_irq, | ||
| 358 | IRQF_PERCPU, "riscv-base-perf", NULL); | ||
| 359 | } | ||
| 360 | mutex_unlock(&pmc_reserve_mutex); | ||
| 361 | |||
| 362 | return err; | ||
| 363 | } | ||
| 364 | |||
| 365 | void release_pmc_hardware(void) | ||
| 366 | { | ||
| 367 | mutex_lock(&pmc_reserve_mutex); | ||
| 368 | if (riscv_pmu->irq >= 0) | ||
| 369 | free_irq(riscv_pmu->irq, NULL); | ||
| 370 | mutex_unlock(&pmc_reserve_mutex); | ||
| 371 | } | ||
| 372 | |||
| 373 | /* | ||
| 374 | * Event Initialization/Finalization | ||
| 375 | */ | ||
| 376 | |||
| 377 | static atomic_t riscv_active_events = ATOMIC_INIT(0); | ||
| 378 | |||
| 379 | static void riscv_event_destroy(struct perf_event *event) | ||
| 380 | { | ||
| 381 | if (atomic_dec_return(&riscv_active_events) == 0) | ||
| 382 | release_pmc_hardware(); | ||
| 383 | } | ||
| 384 | |||
| 385 | static int riscv_event_init(struct perf_event *event) | ||
| 386 | { | ||
| 387 | struct perf_event_attr *attr = &event->attr; | ||
| 388 | struct hw_perf_event *hwc = &event->hw; | ||
| 389 | int err; | ||
| 390 | int code; | ||
| 391 | |||
| 392 | if (atomic_inc_return(&riscv_active_events) == 1) { | ||
| 393 | err = reserve_pmc_hardware(); | ||
| 394 | |||
| 395 | if (err) { | ||
| 396 | pr_warn("PMC hardware not available\n"); | ||
| 397 | atomic_dec(&riscv_active_events); | ||
| 398 | return -EBUSY; | ||
| 399 | } | ||
| 400 | } | ||
| 401 | |||
| 402 | switch (event->attr.type) { | ||
| 403 | case PERF_TYPE_HARDWARE: | ||
| 404 | code = riscv_pmu->map_hw_event(attr->config); | ||
| 405 | break; | ||
| 406 | case PERF_TYPE_HW_CACHE: | ||
| 407 | code = riscv_pmu->map_cache_event(attr->config); | ||
| 408 | break; | ||
| 409 | case PERF_TYPE_RAW: | ||
| 410 | return -EOPNOTSUPP; | ||
| 411 | default: | ||
| 412 | return -ENOENT; | ||
| 413 | } | ||
| 414 | |||
| 415 | event->destroy = riscv_event_destroy; | ||
| 416 | if (code < 0) { | ||
| 417 | event->destroy(event); | ||
| 418 | return code; | ||
| 419 | } | ||
| 420 | |||
| 421 | /* | ||
| 422 | * idx is set to -1 because the index of a general event should not be | ||
| 423 | * decided until binding to some counter in pmu->add(). | ||
| 424 | * | ||
| 425 | * But since we don't have such support, later in pmu->add(), we just | ||
| 426 | * use hwc->config as the index instead. | ||
| 427 | */ | ||
| 428 | hwc->config = code; | ||
| 429 | hwc->idx = -1; | ||
| 430 | |||
| 431 | return 0; | ||
| 432 | } | ||
| 433 | |||
| 434 | /* | ||
| 435 | * Initialization | ||
| 436 | */ | ||
| 437 | |||
| 438 | static struct pmu min_pmu = { | ||
| 439 | .name = "riscv-base", | ||
| 440 | .event_init = riscv_event_init, | ||
| 441 | .add = riscv_pmu_add, | ||
| 442 | .del = riscv_pmu_del, | ||
| 443 | .start = riscv_pmu_start, | ||
| 444 | .stop = riscv_pmu_stop, | ||
| 445 | .read = riscv_pmu_read, | ||
| 446 | }; | ||
| 447 | |||
| 448 | static const struct riscv_pmu riscv_base_pmu = { | ||
| 449 | .pmu = &min_pmu, | ||
| 450 | .max_events = ARRAY_SIZE(riscv_hw_event_map), | ||
| 451 | .map_hw_event = riscv_map_hw_event, | ||
| 452 | .hw_events = riscv_hw_event_map, | ||
| 453 | .map_cache_event = riscv_map_cache_event, | ||
| 454 | .cache_events = &riscv_cache_event_map, | ||
| 455 | .counter_width = 63, | ||
| 456 | .num_counters = RISCV_BASE_COUNTERS + 0, | ||
| 457 | .handle_irq = &riscv_base_pmu_handle_irq, | ||
| 458 | |||
| 459 | /* This means this PMU has no IRQ. */ | ||
| 460 | .irq = -1, | ||
| 461 | }; | ||
| 462 | |||
| 463 | static const struct of_device_id riscv_pmu_of_ids[] = { | ||
| 464 | {.compatible = "riscv,base-pmu", .data = &riscv_base_pmu}, | ||
| 465 | { /* sentinel value */ } | ||
| 466 | }; | ||
| 467 | |||
| 468 | int __init init_hw_perf_events(void) | ||
| 469 | { | ||
| 470 | struct device_node *node = of_find_node_by_type(NULL, "pmu"); | ||
| 471 | const struct of_device_id *of_id; | ||
| 472 | |||
| 473 | riscv_pmu = &riscv_base_pmu; | ||
| 474 | |||
| 475 | if (node) { | ||
| 476 | of_id = of_match_node(riscv_pmu_of_ids, node); | ||
| 477 | |||
| 478 | if (of_id) | ||
| 479 | riscv_pmu = of_id->data; | ||
| 480 | } | ||
| 481 | |||
| 482 | perf_pmu_register(riscv_pmu->pmu, "cpu", PERF_TYPE_RAW); | ||
| 483 | return 0; | ||
| 484 | } | ||
| 485 | arch_initcall(init_hw_perf_events); | ||
diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c index 551734248748..f247d6d2137c 100644 --- a/arch/riscv/kernel/riscv_ksyms.c +++ b/arch/riscv/kernel/riscv_ksyms.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | * Assembly functions that may be used (directly or indirectly) by modules | 13 | * Assembly functions that may be used (directly or indirectly) by modules |
| 14 | */ | 14 | */ |
| 15 | EXPORT_SYMBOL(__clear_user); | 15 | EXPORT_SYMBOL(__clear_user); |
| 16 | EXPORT_SYMBOL(__copy_user); | 16 | EXPORT_SYMBOL(__asm_copy_to_user); |
| 17 | EXPORT_SYMBOL(__asm_copy_from_user); | ||
| 17 | EXPORT_SYMBOL(memset); | 18 | EXPORT_SYMBOL(memset); |
| 18 | EXPORT_SYMBOL(memcpy); | 19 | EXPORT_SYMBOL(memcpy); |
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index b99d9dd21fd0..81a1952015a6 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c | |||
| @@ -148,7 +148,7 @@ int is_valid_bugaddr(unsigned long pc) | |||
| 148 | 148 | ||
| 149 | if (pc < PAGE_OFFSET) | 149 | if (pc < PAGE_OFFSET) |
| 150 | return 0; | 150 | return 0; |
| 151 | if (probe_kernel_address((bug_insn_t __user *)pc, insn)) | 151 | if (probe_kernel_address((bug_insn_t *)pc, insn)) |
| 152 | return 0; | 152 | return 0; |
| 153 | return (insn == __BUG_INSN); | 153 | return (insn == __BUG_INSN); |
| 154 | } | 154 | } |
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S index 58fb2877c865..399e6f0c2d98 100644 --- a/arch/riscv/lib/uaccess.S +++ b/arch/riscv/lib/uaccess.S | |||
| @@ -13,7 +13,8 @@ _epc: | |||
| 13 | .previous | 13 | .previous |
| 14 | .endm | 14 | .endm |
| 15 | 15 | ||
| 16 | ENTRY(__copy_user) | 16 | ENTRY(__asm_copy_to_user) |
| 17 | ENTRY(__asm_copy_from_user) | ||
| 17 | 18 | ||
| 18 | /* Enable access to user memory */ | 19 | /* Enable access to user memory */ |
| 19 | li t6, SR_SUM | 20 | li t6, SR_SUM |
| @@ -63,7 +64,8 @@ ENTRY(__copy_user) | |||
| 63 | addi a0, a0, 1 | 64 | addi a0, a0, 1 |
| 64 | bltu a1, a3, 5b | 65 | bltu a1, a3, 5b |
| 65 | j 3b | 66 | j 3b |
| 66 | ENDPROC(__copy_user) | 67 | ENDPROC(__asm_copy_to_user) |
| 68 | ENDPROC(__asm_copy_from_user) | ||
| 67 | 69 | ||
| 68 | 70 | ||
| 69 | ENTRY(__clear_user) | 71 | ENTRY(__clear_user) |
| @@ -84,7 +86,7 @@ ENTRY(__clear_user) | |||
| 84 | bgeu t0, t1, 2f | 86 | bgeu t0, t1, 2f |
| 85 | bltu a0, t0, 4f | 87 | bltu a0, t0, 4f |
| 86 | 1: | 88 | 1: |
| 87 | fixup REG_S, zero, (a0), 10f | 89 | fixup REG_S, zero, (a0), 11f |
| 88 | addi a0, a0, SZREG | 90 | addi a0, a0, SZREG |
| 89 | bltu a0, t1, 1b | 91 | bltu a0, t1, 1b |
| 90 | 2: | 92 | 2: |
| @@ -96,12 +98,12 @@ ENTRY(__clear_user) | |||
| 96 | li a0, 0 | 98 | li a0, 0 |
| 97 | ret | 99 | ret |
| 98 | 4: /* Edge case: unalignment */ | 100 | 4: /* Edge case: unalignment */ |
| 99 | fixup sb, zero, (a0), 10f | 101 | fixup sb, zero, (a0), 11f |
| 100 | addi a0, a0, 1 | 102 | addi a0, a0, 1 |
| 101 | bltu a0, t0, 4b | 103 | bltu a0, t0, 4b |
| 102 | j 1b | 104 | j 1b |
| 103 | 5: /* Edge case: remainder */ | 105 | 5: /* Edge case: remainder */ |
| 104 | fixup sb, zero, (a0), 10f | 106 | fixup sb, zero, (a0), 11f |
| 105 | addi a0, a0, 1 | 107 | addi a0, a0, 1 |
| 106 | bltu a0, a3, 5b | 108 | bltu a0, a3, 5b |
| 107 | j 3b | 109 | j 3b |
| @@ -109,9 +111,14 @@ ENDPROC(__clear_user) | |||
| 109 | 111 | ||
| 110 | .section .fixup,"ax" | 112 | .section .fixup,"ax" |
| 111 | .balign 4 | 113 | .balign 4 |
| 114 | /* Fixup code for __copy_user(10) and __clear_user(11) */ | ||
| 112 | 10: | 115 | 10: |
| 113 | /* Disable access to user memory */ | 116 | /* Disable access to user memory */ |
| 114 | csrs sstatus, t6 | 117 | csrs sstatus, t6 |
| 115 | sub a0, a3, a0 | 118 | mv a0, a2 |
| 119 | ret | ||
| 120 | 11: | ||
| 121 | csrs sstatus, t6 | ||
| 122 | mv a0, a1 | ||
| 116 | ret | 123 | ret |
| 117 | .previous | 124 | .previous |
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 627075e6d875..50ee3bb5a63a 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c | |||
| @@ -188,7 +188,7 @@ static int get_transport_options(struct arglist *def) | |||
| 188 | if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0) | 188 | if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0) |
| 189 | return (vec_rx | VECTOR_BPF); | 189 | return (vec_rx | VECTOR_BPF); |
| 190 | if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0) | 190 | if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0) |
| 191 | return (vec_rx | vec_tx); | 191 | return (vec_rx | vec_tx | VECTOR_QDISC_BYPASS); |
| 192 | return (vec_rx | vec_tx); | 192 | return (vec_rx | vec_tx); |
| 193 | } | 193 | } |
| 194 | 194 | ||
| @@ -504,15 +504,19 @@ static struct vector_queue *create_queue( | |||
| 504 | 504 | ||
| 505 | result = kmalloc(sizeof(struct vector_queue), GFP_KERNEL); | 505 | result = kmalloc(sizeof(struct vector_queue), GFP_KERNEL); |
| 506 | if (result == NULL) | 506 | if (result == NULL) |
| 507 | goto out_fail; | 507 | return NULL; |
| 508 | result->max_depth = max_size; | 508 | result->max_depth = max_size; |
| 509 | result->dev = vp->dev; | 509 | result->dev = vp->dev; |
| 510 | result->mmsg_vector = kmalloc( | 510 | result->mmsg_vector = kmalloc( |
| 511 | (sizeof(struct mmsghdr) * max_size), GFP_KERNEL); | 511 | (sizeof(struct mmsghdr) * max_size), GFP_KERNEL); |
| 512 | if (result->mmsg_vector == NULL) | ||
| 513 | goto out_mmsg_fail; | ||
| 512 | result->skbuff_vector = kmalloc( | 514 | result->skbuff_vector = kmalloc( |
| 513 | (sizeof(void *) * max_size), GFP_KERNEL); | 515 | (sizeof(void *) * max_size), GFP_KERNEL); |
| 514 | if (result->mmsg_vector == NULL || result->skbuff_vector == NULL) | 516 | if (result->skbuff_vector == NULL) |
| 515 | goto out_fail; | 517 | goto out_skb_fail; |
| 518 | |||
| 519 | /* further failures can be handled safely by destroy_queue*/ | ||
| 516 | 520 | ||
| 517 | mmsg_vector = result->mmsg_vector; | 521 | mmsg_vector = result->mmsg_vector; |
| 518 | for (i = 0; i < max_size; i++) { | 522 | for (i = 0; i < max_size; i++) { |
| @@ -563,6 +567,11 @@ static struct vector_queue *create_queue( | |||
| 563 | result->head = 0; | 567 | result->head = 0; |
| 564 | result->tail = 0; | 568 | result->tail = 0; |
| 565 | return result; | 569 | return result; |
| 570 | out_skb_fail: | ||
| 571 | kfree(result->mmsg_vector); | ||
| 572 | out_mmsg_fail: | ||
| 573 | kfree(result); | ||
| 574 | return NULL; | ||
| 566 | out_fail: | 575 | out_fail: |
| 567 | destroy_queue(result); | 576 | destroy_queue(result); |
| 568 | return NULL; | 577 | return NULL; |
| @@ -1232,9 +1241,8 @@ static int vector_net_open(struct net_device *dev) | |||
| 1232 | 1241 | ||
| 1233 | if ((vp->options & VECTOR_QDISC_BYPASS) != 0) { | 1242 | if ((vp->options & VECTOR_QDISC_BYPASS) != 0) { |
| 1234 | if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd)) | 1243 | if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd)) |
| 1235 | vp->options = vp->options | VECTOR_BPF; | 1244 | vp->options |= VECTOR_BPF; |
| 1236 | } | 1245 | } |
| 1237 | |||
| 1238 | if ((vp->options & VECTOR_BPF) != 0) | 1246 | if ((vp->options & VECTOR_BPF) != 0) |
| 1239 | vp->bpf = uml_vector_default_bpf(vp->fds->rx_fd, dev->dev_addr); | 1247 | vp->bpf = uml_vector_default_bpf(vp->fds->rx_fd, dev->dev_addr); |
| 1240 | 1248 | ||
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S index b30d73ca29d0..7adb4e6b658a 100644 --- a/arch/um/include/asm/common.lds.S +++ b/arch/um/include/asm/common.lds.S | |||
| @@ -53,12 +53,6 @@ | |||
| 53 | CON_INITCALL | 53 | CON_INITCALL |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | .uml.initcall.init : { | ||
| 57 | __uml_initcall_start = .; | ||
| 58 | *(.uml.initcall.init) | ||
| 59 | __uml_initcall_end = .; | ||
| 60 | } | ||
| 61 | |||
| 62 | SECURITY_INIT | 56 | SECURITY_INIT |
| 63 | 57 | ||
| 64 | .exitcall : { | 58 | .exitcall : { |
diff --git a/arch/um/include/shared/init.h b/arch/um/include/shared/init.h index b3f5865a92c9..c66de434a983 100644 --- a/arch/um/include/shared/init.h +++ b/arch/um/include/shared/init.h | |||
| @@ -64,14 +64,10 @@ struct uml_param { | |||
| 64 | int (*setup_func)(char *, int *); | 64 | int (*setup_func)(char *, int *); |
| 65 | }; | 65 | }; |
| 66 | 66 | ||
| 67 | extern initcall_t __uml_initcall_start, __uml_initcall_end; | ||
| 68 | extern initcall_t __uml_postsetup_start, __uml_postsetup_end; | 67 | extern initcall_t __uml_postsetup_start, __uml_postsetup_end; |
| 69 | extern const char *__uml_help_start, *__uml_help_end; | 68 | extern const char *__uml_help_start, *__uml_help_end; |
| 70 | #endif | 69 | #endif |
| 71 | 70 | ||
| 72 | #define __uml_initcall(fn) \ | ||
| 73 | static initcall_t __uml_initcall_##fn __uml_init_call = fn | ||
| 74 | |||
| 75 | #define __uml_exitcall(fn) \ | 71 | #define __uml_exitcall(fn) \ |
| 76 | static exitcall_t __uml_exitcall_##fn __uml_exit_call = fn | 72 | static exitcall_t __uml_exitcall_##fn __uml_exit_call = fn |
| 77 | 73 | ||
| @@ -108,7 +104,6 @@ extern struct uml_param __uml_setup_start, __uml_setup_end; | |||
| 108 | */ | 104 | */ |
| 109 | #define __uml_init_setup __used __section(.uml.setup.init) | 105 | #define __uml_init_setup __used __section(.uml.setup.init) |
| 110 | #define __uml_setup_help __used __section(.uml.help.init) | 106 | #define __uml_setup_help __used __section(.uml.help.init) |
| 111 | #define __uml_init_call __used __section(.uml.initcall.init) | ||
| 112 | #define __uml_postsetup_call __used __section(.uml.postsetup.init) | 107 | #define __uml_postsetup_call __used __section(.uml.postsetup.init) |
| 113 | #define __uml_exit_call __used __section(.uml.exitcall.exit) | 108 | #define __uml_exit_call __used __section(.uml.exitcall.exit) |
| 114 | 109 | ||
diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c index 5f970ece5ac3..f1fee2b91239 100644 --- a/arch/um/os-Linux/main.c +++ b/arch/um/os-Linux/main.c | |||
| @@ -40,17 +40,6 @@ static void set_stklim(void) | |||
| 40 | } | 40 | } |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | static __init void do_uml_initcalls(void) | ||
| 44 | { | ||
| 45 | initcall_t *call; | ||
| 46 | |||
| 47 | call = &__uml_initcall_start; | ||
| 48 | while (call < &__uml_initcall_end) { | ||
| 49 | (*call)(); | ||
| 50 | call++; | ||
| 51 | } | ||
| 52 | } | ||
| 53 | |||
| 54 | static void last_ditch_exit(int sig) | 43 | static void last_ditch_exit(int sig) |
| 55 | { | 44 | { |
| 56 | uml_cleanup(); | 45 | uml_cleanup(); |
| @@ -151,7 +140,6 @@ int __init main(int argc, char **argv, char **envp) | |||
| 151 | scan_elf_aux(envp); | 140 | scan_elf_aux(envp); |
| 152 | #endif | 141 | #endif |
| 153 | 142 | ||
| 154 | do_uml_initcalls(); | ||
| 155 | change_sig(SIGPIPE, 0); | 143 | change_sig(SIGPIPE, 0); |
| 156 | ret = linux_main(argc, argv); | 144 | ret = linux_main(argc, argv); |
| 157 | 145 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d0dd35d582da..559a12b6184d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -4429,16 +4429,14 @@ static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) | |||
| 4429 | goto out_vmcs; | 4429 | goto out_vmcs; |
| 4430 | memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); | 4430 | memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); |
| 4431 | 4431 | ||
| 4432 | #if IS_ENABLED(CONFIG_HYPERV) | 4432 | if (IS_ENABLED(CONFIG_HYPERV) && |
| 4433 | if (static_branch_unlikely(&enable_evmcs) && | 4433 | static_branch_unlikely(&enable_evmcs) && |
| 4434 | (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { | 4434 | (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { |
| 4435 | struct hv_enlightened_vmcs *evmcs = | 4435 | struct hv_enlightened_vmcs *evmcs = |
| 4436 | (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs; | 4436 | (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs; |
| 4437 | 4437 | ||
| 4438 | evmcs->hv_enlightenments_control.msr_bitmap = 1; | 4438 | evmcs->hv_enlightenments_control.msr_bitmap = 1; |
| 4439 | } | 4439 | } |
| 4440 | #endif | ||
| 4441 | |||
| 4442 | } | 4440 | } |
| 4443 | return 0; | 4441 | return 0; |
| 4444 | 4442 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6bcecc325e7e..0046aa70205a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -8567,7 +8567,7 @@ int kvm_arch_hardware_setup(void) | |||
| 8567 | /* | 8567 | /* |
| 8568 | * Make sure the user can only configure tsc_khz values that | 8568 | * Make sure the user can only configure tsc_khz values that |
| 8569 | * fit into a signed integer. | 8569 | * fit into a signed integer. |
| 8570 | * A min value is not calculated needed because it will always | 8570 | * A min value is not calculated because it will always |
| 8571 | * be 1 on all machines. | 8571 | * be 1 on all machines. |
| 8572 | */ | 8572 | */ |
| 8573 | u64 max = min(0x7fffffffULL, | 8573 | u64 max = min(0x7fffffffULL, |
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig index c987c826daa3..0426d66660d1 100644 --- a/drivers/gpu/drm/shmobile/Kconfig +++ b/drivers/gpu/drm/shmobile/Kconfig | |||
| @@ -2,7 +2,6 @@ config DRM_SHMOBILE | |||
| 2 | tristate "DRM Support for SH Mobile" | 2 | tristate "DRM Support for SH Mobile" |
| 3 | depends on DRM && ARM | 3 | depends on DRM && ARM |
| 4 | depends on ARCH_SHMOBILE || COMPILE_TEST | 4 | depends on ARCH_SHMOBILE || COMPILE_TEST |
| 5 | depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM | ||
| 6 | select BACKLIGHT_CLASS_DEVICE | 5 | select BACKLIGHT_CLASS_DEVICE |
| 7 | select BACKLIGHT_LCD_SUPPORT | 6 | select BACKLIGHT_LCD_SUPPORT |
| 8 | select DRM_KMS_HELPER | 7 | select DRM_KMS_HELPER |
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index e7738939a86d..40df8887fc17 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c | |||
| @@ -21,8 +21,6 @@ | |||
| 21 | #include <drm/drm_gem_cma_helper.h> | 21 | #include <drm/drm_gem_cma_helper.h> |
| 22 | #include <drm/drm_plane_helper.h> | 22 | #include <drm/drm_plane_helper.h> |
| 23 | 23 | ||
| 24 | #include <video/sh_mobile_meram.h> | ||
| 25 | |||
| 26 | #include "shmob_drm_backlight.h" | 24 | #include "shmob_drm_backlight.h" |
| 27 | #include "shmob_drm_crtc.h" | 25 | #include "shmob_drm_crtc.h" |
| 28 | #include "shmob_drm_drv.h" | 26 | #include "shmob_drm_drv.h" |
| @@ -47,20 +45,12 @@ static int shmob_drm_clk_on(struct shmob_drm_device *sdev) | |||
| 47 | if (ret < 0) | 45 | if (ret < 0) |
| 48 | return ret; | 46 | return ret; |
| 49 | } | 47 | } |
| 50 | #if 0 | ||
| 51 | if (sdev->meram_dev && sdev->meram_dev->pdev) | ||
| 52 | pm_runtime_get_sync(&sdev->meram_dev->pdev->dev); | ||
| 53 | #endif | ||
| 54 | 48 | ||
| 55 | return 0; | 49 | return 0; |
| 56 | } | 50 | } |
| 57 | 51 | ||
| 58 | static void shmob_drm_clk_off(struct shmob_drm_device *sdev) | 52 | static void shmob_drm_clk_off(struct shmob_drm_device *sdev) |
| 59 | { | 53 | { |
| 60 | #if 0 | ||
| 61 | if (sdev->meram_dev && sdev->meram_dev->pdev) | ||
| 62 | pm_runtime_put_sync(&sdev->meram_dev->pdev->dev); | ||
| 63 | #endif | ||
| 64 | if (sdev->clock) | 54 | if (sdev->clock) |
| 65 | clk_disable_unprepare(sdev->clock); | 55 | clk_disable_unprepare(sdev->clock); |
| 66 | } | 56 | } |
| @@ -269,12 +259,6 @@ static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc) | |||
| 269 | if (!scrtc->started) | 259 | if (!scrtc->started) |
| 270 | return; | 260 | return; |
| 271 | 261 | ||
| 272 | /* Disable the MERAM cache. */ | ||
| 273 | if (scrtc->cache) { | ||
| 274 | sh_mobile_meram_cache_free(sdev->meram, scrtc->cache); | ||
| 275 | scrtc->cache = NULL; | ||
| 276 | } | ||
| 277 | |||
| 278 | /* Stop the LCDC. */ | 262 | /* Stop the LCDC. */ |
| 279 | shmob_drm_crtc_start_stop(scrtc, false); | 263 | shmob_drm_crtc_start_stop(scrtc, false); |
| 280 | 264 | ||
| @@ -305,7 +289,6 @@ static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc, | |||
| 305 | { | 289 | { |
| 306 | struct drm_crtc *crtc = &scrtc->crtc; | 290 | struct drm_crtc *crtc = &scrtc->crtc; |
| 307 | struct drm_framebuffer *fb = crtc->primary->fb; | 291 | struct drm_framebuffer *fb = crtc->primary->fb; |
| 308 | struct shmob_drm_device *sdev = crtc->dev->dev_private; | ||
| 309 | struct drm_gem_cma_object *gem; | 292 | struct drm_gem_cma_object *gem; |
| 310 | unsigned int bpp; | 293 | unsigned int bpp; |
| 311 | 294 | ||
| @@ -321,11 +304,6 @@ static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc, | |||
| 321 | + y / (bpp == 4 ? 2 : 1) * fb->pitches[1] | 304 | + y / (bpp == 4 ? 2 : 1) * fb->pitches[1] |
| 322 | + x * (bpp == 16 ? 2 : 1); | 305 | + x * (bpp == 16 ? 2 : 1); |
| 323 | } | 306 | } |
| 324 | |||
| 325 | if (scrtc->cache) | ||
| 326 | sh_mobile_meram_cache_update(sdev->meram, scrtc->cache, | ||
| 327 | scrtc->dma[0], scrtc->dma[1], | ||
| 328 | &scrtc->dma[0], &scrtc->dma[1]); | ||
| 329 | } | 307 | } |
| 330 | 308 | ||
| 331 | static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc) | 309 | static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc) |
| @@ -372,9 +350,7 @@ static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc, | |||
| 372 | { | 350 | { |
| 373 | struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); | 351 | struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); |
| 374 | struct shmob_drm_device *sdev = crtc->dev->dev_private; | 352 | struct shmob_drm_device *sdev = crtc->dev->dev_private; |
| 375 | const struct sh_mobile_meram_cfg *mdata = sdev->pdata->meram; | ||
| 376 | const struct shmob_drm_format_info *format; | 353 | const struct shmob_drm_format_info *format; |
| 377 | void *cache; | ||
| 378 | 354 | ||
| 379 | format = shmob_drm_format_info(crtc->primary->fb->format->format); | 355 | format = shmob_drm_format_info(crtc->primary->fb->format->format); |
| 380 | if (format == NULL) { | 356 | if (format == NULL) { |
| @@ -386,24 +362,6 @@ static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc, | |||
| 386 | scrtc->format = format; | 362 | scrtc->format = format; |
| 387 | scrtc->line_size = crtc->primary->fb->pitches[0]; | 363 | scrtc->line_size = crtc->primary->fb->pitches[0]; |
| 388 | 364 | ||
| 389 | if (sdev->meram) { | ||
| 390 | /* Enable MERAM cache if configured. We need to de-init | ||
| 391 | * configured ICBs before we can re-initialize them. | ||
| 392 | */ | ||
| 393 | if (scrtc->cache) { | ||
| 394 | sh_mobile_meram_cache_free(sdev->meram, scrtc->cache); | ||
| 395 | scrtc->cache = NULL; | ||
| 396 | } | ||
| 397 | |||
| 398 | cache = sh_mobile_meram_cache_alloc(sdev->meram, mdata, | ||
| 399 | crtc->primary->fb->pitches[0], | ||
| 400 | adjusted_mode->vdisplay, | ||
| 401 | format->meram, | ||
| 402 | &scrtc->line_size); | ||
| 403 | if (!IS_ERR(cache)) | ||
| 404 | scrtc->cache = cache; | ||
| 405 | } | ||
| 406 | |||
| 407 | shmob_drm_crtc_compute_base(scrtc, x, y); | 365 | shmob_drm_crtc_compute_base(scrtc, x, y); |
| 408 | 366 | ||
| 409 | return 0; | 367 | return 0; |
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h index f152973df11c..c11f421737dc 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h | |||
| @@ -28,7 +28,6 @@ struct shmob_drm_crtc { | |||
| 28 | int dpms; | 28 | int dpms; |
| 29 | 29 | ||
| 30 | const struct shmob_drm_format_info *format; | 30 | const struct shmob_drm_format_info *format; |
| 31 | void *cache; | ||
| 32 | unsigned long dma[2]; | 31 | unsigned long dma[2]; |
| 33 | unsigned int line_size; | 32 | unsigned int line_size; |
| 34 | bool started; | 33 | bool started; |
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.h b/drivers/gpu/drm/shmobile/shmob_drm_drv.h index 02ea315ba69a..088a6e55fa29 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.h +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.h | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | struct clk; | 23 | struct clk; |
| 24 | struct device; | 24 | struct device; |
| 25 | struct drm_device; | 25 | struct drm_device; |
| 26 | struct sh_mobile_meram_info; | ||
| 27 | 26 | ||
| 28 | struct shmob_drm_device { | 27 | struct shmob_drm_device { |
| 29 | struct device *dev; | 28 | struct device *dev; |
| @@ -31,7 +30,6 @@ struct shmob_drm_device { | |||
| 31 | 30 | ||
| 32 | void __iomem *mmio; | 31 | void __iomem *mmio; |
| 33 | struct clk *clock; | 32 | struct clk *clock; |
| 34 | struct sh_mobile_meram_info *meram; | ||
| 35 | u32 lddckr; | 33 | u32 lddckr; |
| 36 | u32 ldmt1r; | 34 | u32 ldmt1r; |
| 37 | 35 | ||
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c index d36919b14da7..447638581c08 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c | |||
| @@ -18,8 +18,6 @@ | |||
| 18 | #include <drm/drm_gem_cma_helper.h> | 18 | #include <drm/drm_gem_cma_helper.h> |
| 19 | #include <drm/drm_gem_framebuffer_helper.h> | 19 | #include <drm/drm_gem_framebuffer_helper.h> |
| 20 | 20 | ||
| 21 | #include <video/sh_mobile_meram.h> | ||
| 22 | |||
| 23 | #include "shmob_drm_crtc.h" | 21 | #include "shmob_drm_crtc.h" |
| 24 | #include "shmob_drm_drv.h" | 22 | #include "shmob_drm_drv.h" |
| 25 | #include "shmob_drm_kms.h" | 23 | #include "shmob_drm_kms.h" |
| @@ -35,55 +33,46 @@ static const struct shmob_drm_format_info shmob_drm_format_infos[] = { | |||
| 35 | .bpp = 16, | 33 | .bpp = 16, |
| 36 | .yuv = false, | 34 | .yuv = false, |
| 37 | .lddfr = LDDFR_PKF_RGB16, | 35 | .lddfr = LDDFR_PKF_RGB16, |
| 38 | .meram = SH_MOBILE_MERAM_PF_RGB, | ||
| 39 | }, { | 36 | }, { |
| 40 | .fourcc = DRM_FORMAT_RGB888, | 37 | .fourcc = DRM_FORMAT_RGB888, |
| 41 | .bpp = 24, | 38 | .bpp = 24, |
| 42 | .yuv = false, | 39 | .yuv = false, |
| 43 | .lddfr = LDDFR_PKF_RGB24, | 40 | .lddfr = LDDFR_PKF_RGB24, |
| 44 | .meram = SH_MOBILE_MERAM_PF_RGB, | ||
| 45 | }, { | 41 | }, { |
| 46 | .fourcc = DRM_FORMAT_ARGB8888, | 42 | .fourcc = DRM_FORMAT_ARGB8888, |
| 47 | .bpp = 32, | 43 | .bpp = 32, |
| 48 | .yuv = false, | 44 | .yuv = false, |
| 49 | .lddfr = LDDFR_PKF_ARGB32, | 45 | .lddfr = LDDFR_PKF_ARGB32, |
| 50 | .meram = SH_MOBILE_MERAM_PF_RGB, | ||
| 51 | }, { | 46 | }, { |
| 52 | .fourcc = DRM_FORMAT_NV12, | 47 | .fourcc = DRM_FORMAT_NV12, |
| 53 | .bpp = 12, | 48 | .bpp = 12, |
| 54 | .yuv = true, | 49 | .yuv = true, |
| 55 | .lddfr = LDDFR_CC | LDDFR_YF_420, | 50 | .lddfr = LDDFR_CC | LDDFR_YF_420, |
| 56 | .meram = SH_MOBILE_MERAM_PF_NV, | ||
| 57 | }, { | 51 | }, { |
| 58 | .fourcc = DRM_FORMAT_NV21, | 52 | .fourcc = DRM_FORMAT_NV21, |
| 59 | .bpp = 12, | 53 | .bpp = 12, |
| 60 | .yuv = true, | 54 | .yuv = true, |
| 61 | .lddfr = LDDFR_CC | LDDFR_YF_420, | 55 | .lddfr = LDDFR_CC | LDDFR_YF_420, |
| 62 | .meram = SH_MOBILE_MERAM_PF_NV, | ||
| 63 | }, { | 56 | }, { |
| 64 | .fourcc = DRM_FORMAT_NV16, | 57 | .fourcc = DRM_FORMAT_NV16, |
| 65 | .bpp = 16, | 58 | .bpp = 16, |
| 66 | .yuv = true, | 59 | .yuv = true, |
| 67 | .lddfr = LDDFR_CC | LDDFR_YF_422, | 60 | .lddfr = LDDFR_CC | LDDFR_YF_422, |
| 68 | .meram = SH_MOBILE_MERAM_PF_NV, | ||
| 69 | }, { | 61 | }, { |
| 70 | .fourcc = DRM_FORMAT_NV61, | 62 | .fourcc = DRM_FORMAT_NV61, |
| 71 | .bpp = 16, | 63 | .bpp = 16, |
| 72 | .yuv = true, | 64 | .yuv = true, |
| 73 | .lddfr = LDDFR_CC | LDDFR_YF_422, | 65 | .lddfr = LDDFR_CC | LDDFR_YF_422, |
| 74 | .meram = SH_MOBILE_MERAM_PF_NV, | ||
| 75 | }, { | 66 | }, { |
| 76 | .fourcc = DRM_FORMAT_NV24, | 67 | .fourcc = DRM_FORMAT_NV24, |
| 77 | .bpp = 24, | 68 | .bpp = 24, |
| 78 | .yuv = true, | 69 | .yuv = true, |
| 79 | .lddfr = LDDFR_CC | LDDFR_YF_444, | 70 | .lddfr = LDDFR_CC | LDDFR_YF_444, |
| 80 | .meram = SH_MOBILE_MERAM_PF_NV24, | ||
| 81 | }, { | 71 | }, { |
| 82 | .fourcc = DRM_FORMAT_NV42, | 72 | .fourcc = DRM_FORMAT_NV42, |
| 83 | .bpp = 24, | 73 | .bpp = 24, |
| 84 | .yuv = true, | 74 | .yuv = true, |
| 85 | .lddfr = LDDFR_CC | LDDFR_YF_444, | 75 | .lddfr = LDDFR_CC | LDDFR_YF_444, |
| 86 | .meram = SH_MOBILE_MERAM_PF_NV24, | ||
| 87 | }, | 76 | }, |
| 88 | }; | 77 | }; |
| 89 | 78 | ||
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.h b/drivers/gpu/drm/shmobile/shmob_drm_kms.h index 06d5b7caa026..753e2817dc2c 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_kms.h +++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.h | |||
| @@ -24,7 +24,6 @@ struct shmob_drm_format_info { | |||
| 24 | unsigned int bpp; | 24 | unsigned int bpp; |
| 25 | bool yuv; | 25 | bool yuv; |
| 26 | u32 lddfr; | 26 | u32 lddfr; |
| 27 | unsigned int meram; | ||
| 28 | }; | 27 | }; |
| 29 | 28 | ||
| 30 | const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc); | 29 | const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc); |
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c index 97f6e4a3eb0d..1d0359f713ca 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c | |||
| @@ -17,8 +17,6 @@ | |||
| 17 | #include <drm/drm_fb_cma_helper.h> | 17 | #include <drm/drm_fb_cma_helper.h> |
| 18 | #include <drm/drm_gem_cma_helper.h> | 18 | #include <drm/drm_gem_cma_helper.h> |
| 19 | 19 | ||
| 20 | #include <video/sh_mobile_meram.h> | ||
| 21 | |||
| 22 | #include "shmob_drm_drv.h" | 20 | #include "shmob_drm_drv.h" |
| 23 | #include "shmob_drm_kms.h" | 21 | #include "shmob_drm_kms.h" |
| 24 | #include "shmob_drm_plane.h" | 22 | #include "shmob_drm_plane.h" |
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c index f01c3e813247..c8bb82fe0b9d 100644 --- a/drivers/media/platform/via-camera.c +++ b/drivers/media/platform/via-camera.c | |||
| @@ -27,7 +27,12 @@ | |||
| 27 | #include <linux/via-core.h> | 27 | #include <linux/via-core.h> |
| 28 | #include <linux/via-gpio.h> | 28 | #include <linux/via-gpio.h> |
| 29 | #include <linux/via_i2c.h> | 29 | #include <linux/via_i2c.h> |
| 30 | |||
| 31 | #ifdef CONFIG_X86 | ||
| 30 | #include <asm/olpc.h> | 32 | #include <asm/olpc.h> |
| 33 | #else | ||
| 34 | #define machine_is_olpc(x) 0 | ||
| 35 | #endif | ||
| 31 | 36 | ||
| 32 | #include "via-camera.h" | 37 | #include "via-camera.h" |
| 33 | 38 | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 448d1fafc827..f4d81765221e 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h | |||
| @@ -325,6 +325,8 @@ struct nicvf { | |||
| 325 | struct tasklet_struct qs_err_task; | 325 | struct tasklet_struct qs_err_task; |
| 326 | struct work_struct reset_task; | 326 | struct work_struct reset_task; |
| 327 | struct nicvf_work rx_mode_work; | 327 | struct nicvf_work rx_mode_work; |
| 328 | /* spinlock to protect workqueue arguments from concurrent access */ | ||
| 329 | spinlock_t rx_mode_wq_lock; | ||
| 328 | 330 | ||
| 329 | /* PTP timestamp */ | 331 | /* PTP timestamp */ |
| 330 | struct cavium_ptp *ptp_clock; | 332 | struct cavium_ptp *ptp_clock; |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 7135db45927e..135766c4296b 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
| @@ -1923,17 +1923,12 @@ static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) | |||
| 1923 | } | 1923 | } |
| 1924 | } | 1924 | } |
| 1925 | 1925 | ||
| 1926 | static void nicvf_set_rx_mode_task(struct work_struct *work_arg) | 1926 | static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, |
| 1927 | struct nicvf *nic) | ||
| 1927 | { | 1928 | { |
| 1928 | struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, | ||
| 1929 | work.work); | ||
| 1930 | struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); | ||
| 1931 | union nic_mbx mbx = {}; | 1929 | union nic_mbx mbx = {}; |
| 1932 | int idx; | 1930 | int idx; |
| 1933 | 1931 | ||
| 1934 | if (!vf_work) | ||
| 1935 | return; | ||
| 1936 | |||
| 1937 | /* From the inside of VM code flow we have only 128 bits memory | 1932 | /* From the inside of VM code flow we have only 128 bits memory |
| 1938 | * available to send message to host's PF, so send all mc addrs | 1933 | * available to send message to host's PF, so send all mc addrs |
| 1939 | * one by one, starting from flush command in case if kernel | 1934 | * one by one, starting from flush command in case if kernel |
| @@ -1944,7 +1939,7 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg) | |||
| 1944 | mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; | 1939 | mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; |
| 1945 | nicvf_send_msg_to_pf(nic, &mbx); | 1940 | nicvf_send_msg_to_pf(nic, &mbx); |
| 1946 | 1941 | ||
| 1947 | if (vf_work->mode & BGX_XCAST_MCAST_FILTER) { | 1942 | if (mode & BGX_XCAST_MCAST_FILTER) { |
| 1948 | /* once enabling filtering, we need to signal to PF to add | 1943 | /* once enabling filtering, we need to signal to PF to add |
| 1949 | * its' own LMAC to the filter to accept packets for it. | 1944 | * its' own LMAC to the filter to accept packets for it. |
| 1950 | */ | 1945 | */ |
| @@ -1954,23 +1949,46 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg) | |||
| 1954 | } | 1949 | } |
| 1955 | 1950 | ||
| 1956 | /* check if we have any specific MACs to be added to PF DMAC filter */ | 1951 | /* check if we have any specific MACs to be added to PF DMAC filter */ |
| 1957 | if (vf_work->mc) { | 1952 | if (mc_addrs) { |
| 1958 | /* now go through kernel list of MACs and add them one by one */ | 1953 | /* now go through kernel list of MACs and add them one by one */ |
| 1959 | for (idx = 0; idx < vf_work->mc->count; idx++) { | 1954 | for (idx = 0; idx < mc_addrs->count; idx++) { |
| 1960 | mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; | 1955 | mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; |
| 1961 | mbx.xcast.data.mac = vf_work->mc->mc[idx]; | 1956 | mbx.xcast.data.mac = mc_addrs->mc[idx]; |
| 1962 | nicvf_send_msg_to_pf(nic, &mbx); | 1957 | nicvf_send_msg_to_pf(nic, &mbx); |
| 1963 | } | 1958 | } |
| 1964 | kfree(vf_work->mc); | 1959 | kfree(mc_addrs); |
| 1965 | } | 1960 | } |
| 1966 | 1961 | ||
| 1967 | /* and finally set rx mode for PF accordingly */ | 1962 | /* and finally set rx mode for PF accordingly */ |
| 1968 | mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; | 1963 | mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; |
| 1969 | mbx.xcast.data.mode = vf_work->mode; | 1964 | mbx.xcast.data.mode = mode; |
| 1970 | 1965 | ||
| 1971 | nicvf_send_msg_to_pf(nic, &mbx); | 1966 | nicvf_send_msg_to_pf(nic, &mbx); |
| 1972 | } | 1967 | } |
| 1973 | 1968 | ||
| 1969 | static void nicvf_set_rx_mode_task(struct work_struct *work_arg) | ||
| 1970 | { | ||
| 1971 | struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, | ||
| 1972 | work.work); | ||
| 1973 | struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); | ||
| 1974 | u8 mode; | ||
| 1975 | struct xcast_addr_list *mc; | ||
| 1976 | |||
| 1977 | if (!vf_work) | ||
| 1978 | return; | ||
| 1979 | |||
| 1980 | /* Save message data locally to prevent them from | ||
| 1981 | * being overwritten by next ndo_set_rx_mode call(). | ||
| 1982 | */ | ||
| 1983 | spin_lock(&nic->rx_mode_wq_lock); | ||
| 1984 | mode = vf_work->mode; | ||
| 1985 | mc = vf_work->mc; | ||
| 1986 | vf_work->mc = NULL; | ||
| 1987 | spin_unlock(&nic->rx_mode_wq_lock); | ||
| 1988 | |||
| 1989 | __nicvf_set_rx_mode_task(mode, mc, nic); | ||
| 1990 | } | ||
| 1991 | |||
| 1974 | static void nicvf_set_rx_mode(struct net_device *netdev) | 1992 | static void nicvf_set_rx_mode(struct net_device *netdev) |
| 1975 | { | 1993 | { |
| 1976 | struct nicvf *nic = netdev_priv(netdev); | 1994 | struct nicvf *nic = netdev_priv(netdev); |
| @@ -2004,9 +2022,12 @@ static void nicvf_set_rx_mode(struct net_device *netdev) | |||
| 2004 | } | 2022 | } |
| 2005 | } | 2023 | } |
| 2006 | } | 2024 | } |
| 2025 | spin_lock(&nic->rx_mode_wq_lock); | ||
| 2026 | kfree(nic->rx_mode_work.mc); | ||
| 2007 | nic->rx_mode_work.mc = mc_list; | 2027 | nic->rx_mode_work.mc = mc_list; |
| 2008 | nic->rx_mode_work.mode = mode; | 2028 | nic->rx_mode_work.mode = mode; |
| 2009 | queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 2 * HZ); | 2029 | queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); |
| 2030 | spin_unlock(&nic->rx_mode_wq_lock); | ||
| 2010 | } | 2031 | } |
| 2011 | 2032 | ||
| 2012 | static const struct net_device_ops nicvf_netdev_ops = { | 2033 | static const struct net_device_ops nicvf_netdev_ops = { |
| @@ -2163,6 +2184,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2163 | INIT_WORK(&nic->reset_task, nicvf_reset_task); | 2184 | INIT_WORK(&nic->reset_task, nicvf_reset_task); |
| 2164 | 2185 | ||
| 2165 | INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); | 2186 | INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); |
| 2187 | spin_lock_init(&nic->rx_mode_wq_lock); | ||
| 2166 | 2188 | ||
| 2167 | err = register_netdev(netdev); | 2189 | err = register_netdev(netdev); |
| 2168 | if (err) { | 2190 | if (err) { |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 2edfdbdaae48..7b795edd9d3a 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c | |||
| @@ -3362,10 +3362,17 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 3362 | 3362 | ||
| 3363 | err = sysfs_create_group(&adapter->port[0]->dev.kobj, | 3363 | err = sysfs_create_group(&adapter->port[0]->dev.kobj, |
| 3364 | &cxgb3_attr_group); | 3364 | &cxgb3_attr_group); |
| 3365 | if (err) { | ||
| 3366 | dev_err(&pdev->dev, "cannot create sysfs group\n"); | ||
| 3367 | goto out_close_led; | ||
| 3368 | } | ||
| 3365 | 3369 | ||
| 3366 | print_port_info(adapter, ai); | 3370 | print_port_info(adapter, ai); |
| 3367 | return 0; | 3371 | return 0; |
| 3368 | 3372 | ||
| 3373 | out_close_led: | ||
| 3374 | t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0); | ||
| 3375 | |||
| 3369 | out_free_dev: | 3376 | out_free_dev: |
| 3370 | iounmap(adapter->regs); | 3377 | iounmap(adapter->regs); |
| 3371 | for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i) | 3378 | for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index fc534e91c6b2..144d5fe6b944 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h | |||
| @@ -760,9 +760,9 @@ struct ixgbe_adapter { | |||
| 760 | #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ | 760 | #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ |
| 761 | u32 *rss_key; | 761 | u32 *rss_key; |
| 762 | 762 | ||
| 763 | #ifdef CONFIG_XFRM | 763 | #ifdef CONFIG_XFRM_OFFLOAD |
| 764 | struct ixgbe_ipsec *ipsec; | 764 | struct ixgbe_ipsec *ipsec; |
| 765 | #endif /* CONFIG_XFRM */ | 765 | #endif /* CONFIG_XFRM_OFFLOAD */ |
| 766 | }; | 766 | }; |
| 767 | 767 | ||
| 768 | static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) | 768 | static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 344a1f213a5f..c116f459945d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c | |||
| @@ -158,7 +158,16 @@ static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter) | |||
| 158 | reg |= IXGBE_SECRXCTRL_RX_DIS; | 158 | reg |= IXGBE_SECRXCTRL_RX_DIS; |
| 159 | IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); | 159 | IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); |
| 160 | 160 | ||
| 161 | IXGBE_WRITE_FLUSH(hw); | 161 | /* If both Tx and Rx are ready there are no packets |
| 162 | * that we need to flush so the loopback configuration | ||
| 163 | * below is not necessary. | ||
| 164 | */ | ||
| 165 | t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & | ||
| 166 | IXGBE_SECTXSTAT_SECTX_RDY; | ||
| 167 | r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & | ||
| 168 | IXGBE_SECRXSTAT_SECRX_RDY; | ||
| 169 | if (t_rdy && r_rdy) | ||
| 170 | return; | ||
| 162 | 171 | ||
| 163 | /* If the tx fifo doesn't have link, but still has data, | 172 | /* If the tx fifo doesn't have link, but still has data, |
| 164 | * we can't clear the tx sec block. Set the MAC loopback | 173 | * we can't clear the tx sec block. Set the MAC loopback |
| @@ -185,7 +194,7 @@ static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter) | |||
| 185 | IXGBE_SECTXSTAT_SECTX_RDY; | 194 | IXGBE_SECTXSTAT_SECTX_RDY; |
| 186 | r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & | 195 | r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & |
| 187 | IXGBE_SECRXSTAT_SECRX_RDY; | 196 | IXGBE_SECRXSTAT_SECRX_RDY; |
| 188 | } while (!t_rdy && !r_rdy && limit--); | 197 | } while (!(t_rdy && r_rdy) && limit--); |
| 189 | 198 | ||
| 190 | /* undo loopback if we played with it earlier */ | 199 | /* undo loopback if we played with it earlier */ |
| 191 | if (!link) { | 200 | if (!link) { |
| @@ -966,10 +975,22 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, | |||
| 966 | **/ | 975 | **/ |
| 967 | void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) | 976 | void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) |
| 968 | { | 977 | { |
| 978 | struct ixgbe_hw *hw = &adapter->hw; | ||
| 969 | struct ixgbe_ipsec *ipsec; | 979 | struct ixgbe_ipsec *ipsec; |
| 980 | u32 t_dis, r_dis; | ||
| 970 | size_t size; | 981 | size_t size; |
| 971 | 982 | ||
| 972 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | 983 | if (hw->mac.type == ixgbe_mac_82598EB) |
| 984 | return; | ||
| 985 | |||
| 986 | /* If there is no support for either Tx or Rx offload | ||
| 987 | * we should not be advertising support for IPsec. | ||
| 988 | */ | ||
| 989 | t_dis = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & | ||
| 990 | IXGBE_SECTXSTAT_SECTX_OFF_DIS; | ||
| 991 | r_dis = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & | ||
| 992 | IXGBE_SECRXSTAT_SECRX_OFF_DIS; | ||
| 993 | if (t_dis || r_dis) | ||
| 973 | return; | 994 | return; |
| 974 | 995 | ||
| 975 | ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); | 996 | ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); |
| @@ -1001,13 +1022,6 @@ void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) | |||
| 1001 | 1022 | ||
| 1002 | adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops; | 1023 | adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops; |
| 1003 | 1024 | ||
| 1004 | #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \ | ||
| 1005 | NETIF_F_HW_ESP_TX_CSUM | \ | ||
| 1006 | NETIF_F_GSO_ESP) | ||
| 1007 | |||
| 1008 | adapter->netdev->features |= IXGBE_ESP_FEATURES; | ||
| 1009 | adapter->netdev->hw_enc_features |= IXGBE_ESP_FEATURES; | ||
| 1010 | |||
| 1011 | return; | 1025 | return; |
| 1012 | 1026 | ||
| 1013 | err2: | 1027 | err2: |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 893a9206e718..d361f570ca37 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | |||
| @@ -593,6 +593,14 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) | |||
| 593 | } | 593 | } |
| 594 | 594 | ||
| 595 | #endif | 595 | #endif |
| 596 | /* To support macvlan offload we have to use num_tc to | ||
| 597 | * restrict the queues that can be used by the device. | ||
| 598 | * By doing this we can avoid reporting a false number of | ||
| 599 | * queues. | ||
| 600 | */ | ||
| 601 | if (vmdq_i > 1) | ||
| 602 | netdev_set_num_tc(adapter->netdev, 1); | ||
| 603 | |||
| 596 | /* populate TC0 for use by pool 0 */ | 604 | /* populate TC0 for use by pool 0 */ |
| 597 | netdev_set_tc_queue(adapter->netdev, 0, | 605 | netdev_set_tc_queue(adapter->netdev, 0, |
| 598 | adapter->num_rx_queues_per_pool, 0); | 606 | adapter->num_rx_queues_per_pool, 0); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0b1ba3ae159c..3e87dbbc9024 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
| @@ -6117,6 +6117,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, | |||
| 6117 | #ifdef CONFIG_IXGBE_DCB | 6117 | #ifdef CONFIG_IXGBE_DCB |
| 6118 | ixgbe_init_dcb(adapter); | 6118 | ixgbe_init_dcb(adapter); |
| 6119 | #endif | 6119 | #endif |
| 6120 | ixgbe_init_ipsec_offload(adapter); | ||
| 6120 | 6121 | ||
| 6121 | /* default flow control settings */ | 6122 | /* default flow control settings */ |
| 6122 | hw->fc.requested_mode = ixgbe_fc_full; | 6123 | hw->fc.requested_mode = ixgbe_fc_full; |
| @@ -8822,14 +8823,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
| 8822 | } else { | 8823 | } else { |
| 8823 | netdev_reset_tc(dev); | 8824 | netdev_reset_tc(dev); |
| 8824 | 8825 | ||
| 8825 | /* To support macvlan offload we have to use num_tc to | ||
| 8826 | * restrict the queues that can be used by the device. | ||
| 8827 | * By doing this we can avoid reporting a false number of | ||
| 8828 | * queues. | ||
| 8829 | */ | ||
| 8830 | if (!tc && adapter->num_rx_pools > 1) | ||
| 8831 | netdev_set_num_tc(dev, 1); | ||
| 8832 | |||
| 8833 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | 8826 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) |
| 8834 | adapter->hw.fc.requested_mode = adapter->last_lfc_mode; | 8827 | adapter->hw.fc.requested_mode = adapter->last_lfc_mode; |
| 8835 | 8828 | ||
| @@ -9904,7 +9897,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, | |||
| 9904 | * the TSO, so it's the exception. | 9897 | * the TSO, so it's the exception. |
| 9905 | */ | 9898 | */ |
| 9906 | if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { | 9899 | if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { |
| 9907 | #ifdef CONFIG_XFRM | 9900 | #ifdef CONFIG_XFRM_OFFLOAD |
| 9908 | if (!skb->sp) | 9901 | if (!skb->sp) |
| 9909 | #endif | 9902 | #endif |
| 9910 | features &= ~NETIF_F_TSO; | 9903 | features &= ~NETIF_F_TSO; |
| @@ -10437,6 +10430,14 @@ skip_sriov: | |||
| 10437 | if (hw->mac.type >= ixgbe_mac_82599EB) | 10430 | if (hw->mac.type >= ixgbe_mac_82599EB) |
| 10438 | netdev->features |= NETIF_F_SCTP_CRC; | 10431 | netdev->features |= NETIF_F_SCTP_CRC; |
| 10439 | 10432 | ||
| 10433 | #ifdef CONFIG_XFRM_OFFLOAD | ||
| 10434 | #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \ | ||
| 10435 | NETIF_F_HW_ESP_TX_CSUM | \ | ||
| 10436 | NETIF_F_GSO_ESP) | ||
| 10437 | |||
| 10438 | if (adapter->ipsec) | ||
| 10439 | netdev->features |= IXGBE_ESP_FEATURES; | ||
| 10440 | #endif | ||
| 10440 | /* copy netdev features into list of user selectable features */ | 10441 | /* copy netdev features into list of user selectable features */ |
| 10441 | netdev->hw_features |= netdev->features | | 10442 | netdev->hw_features |= netdev->features | |
| 10442 | NETIF_F_HW_VLAN_CTAG_FILTER | | 10443 | NETIF_F_HW_VLAN_CTAG_FILTER | |
| @@ -10499,8 +10500,6 @@ skip_sriov: | |||
| 10499 | NETIF_F_FCOE_MTU; | 10500 | NETIF_F_FCOE_MTU; |
| 10500 | } | 10501 | } |
| 10501 | #endif /* IXGBE_FCOE */ | 10502 | #endif /* IXGBE_FCOE */ |
| 10502 | ixgbe_init_ipsec_offload(adapter); | ||
| 10503 | |||
| 10504 | if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) | 10503 | if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) |
| 10505 | netdev->hw_features |= NETIF_F_LRO; | 10504 | netdev->hw_features |= NETIF_F_LRO; |
| 10506 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) | 10505 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index e8ed37749ab1..44cfb2021145 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | |||
| @@ -599,13 +599,15 @@ struct ixgbe_nvm_version { | |||
| 599 | #define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 | 599 | #define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 |
| 600 | 600 | ||
| 601 | #define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 | 601 | #define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 |
| 602 | #define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 | 602 | #define IXGBE_SECTXSTAT_SECTX_OFF_DIS 0x00000002 |
| 603 | #define IXGBE_SECTXSTAT_ECC_TXERR 0x00000004 | ||
| 603 | 604 | ||
| 604 | #define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 | 605 | #define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 |
| 605 | #define IXGBE_SECRXCTRL_RX_DIS 0x00000002 | 606 | #define IXGBE_SECRXCTRL_RX_DIS 0x00000002 |
| 606 | 607 | ||
| 607 | #define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 | 608 | #define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 |
| 608 | #define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 | 609 | #define IXGBE_SECRXSTAT_SECRX_OFF_DIS 0x00000002 |
| 610 | #define IXGBE_SECRXSTAT_ECC_RXERR 0x00000004 | ||
| 609 | 611 | ||
| 610 | /* LinkSec (MacSec) Registers */ | 612 | /* LinkSec (MacSec) Registers */ |
| 611 | #define IXGBE_LSECTXCAP 0x08A00 | 613 | #define IXGBE_LSECTXCAP 0x08A00 |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 77b2adb29341..6aaaf3d9ba31 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
| @@ -4756,12 +4756,6 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) | |||
| 4756 | kfree(mlxsw_sp_rt6); | 4756 | kfree(mlxsw_sp_rt6); |
| 4757 | } | 4757 | } |
| 4758 | 4758 | ||
| 4759 | static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt) | ||
| 4760 | { | ||
| 4761 | /* RTF_CACHE routes are ignored */ | ||
| 4762 | return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY; | ||
| 4763 | } | ||
| 4764 | |||
| 4765 | static struct fib6_info * | 4759 | static struct fib6_info * |
| 4766 | mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) | 4760 | mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) |
| 4767 | { | 4761 | { |
| @@ -4771,11 +4765,11 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) | |||
| 4771 | 4765 | ||
| 4772 | static struct mlxsw_sp_fib6_entry * | 4766 | static struct mlxsw_sp_fib6_entry * |
| 4773 | mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, | 4767 | mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, |
| 4774 | const struct fib6_info *nrt, bool replace) | 4768 | const struct fib6_info *nrt, bool append) |
| 4775 | { | 4769 | { |
| 4776 | struct mlxsw_sp_fib6_entry *fib6_entry; | 4770 | struct mlxsw_sp_fib6_entry *fib6_entry; |
| 4777 | 4771 | ||
| 4778 | if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace) | 4772 | if (!append) |
| 4779 | return NULL; | 4773 | return NULL; |
| 4780 | 4774 | ||
| 4781 | list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { | 4775 | list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { |
| @@ -4790,8 +4784,7 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, | |||
| 4790 | break; | 4784 | break; |
| 4791 | if (rt->fib6_metric < nrt->fib6_metric) | 4785 | if (rt->fib6_metric < nrt->fib6_metric) |
| 4792 | continue; | 4786 | continue; |
| 4793 | if (rt->fib6_metric == nrt->fib6_metric && | 4787 | if (rt->fib6_metric == nrt->fib6_metric) |
| 4794 | mlxsw_sp_fib6_rt_can_mp(rt)) | ||
| 4795 | return fib6_entry; | 4788 | return fib6_entry; |
| 4796 | if (rt->fib6_metric > nrt->fib6_metric) | 4789 | if (rt->fib6_metric > nrt->fib6_metric) |
| 4797 | break; | 4790 | break; |
| @@ -5170,7 +5163,7 @@ static struct mlxsw_sp_fib6_entry * | |||
| 5170 | mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, | 5163 | mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, |
| 5171 | const struct fib6_info *nrt, bool replace) | 5164 | const struct fib6_info *nrt, bool replace) |
| 5172 | { | 5165 | { |
| 5173 | struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL; | 5166 | struct mlxsw_sp_fib6_entry *fib6_entry; |
| 5174 | 5167 | ||
| 5175 | list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { | 5168 | list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { |
| 5176 | struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); | 5169 | struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); |
| @@ -5179,18 +5172,13 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, | |||
| 5179 | continue; | 5172 | continue; |
| 5180 | if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) | 5173 | if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) |
| 5181 | break; | 5174 | break; |
| 5182 | if (replace && rt->fib6_metric == nrt->fib6_metric) { | 5175 | if (replace && rt->fib6_metric == nrt->fib6_metric) |
| 5183 | if (mlxsw_sp_fib6_rt_can_mp(rt) == | 5176 | return fib6_entry; |
| 5184 | mlxsw_sp_fib6_rt_can_mp(nrt)) | ||
| 5185 | return fib6_entry; | ||
| 5186 | if (mlxsw_sp_fib6_rt_can_mp(nrt)) | ||
| 5187 | fallback = fallback ?: fib6_entry; | ||
| 5188 | } | ||
| 5189 | if (rt->fib6_metric > nrt->fib6_metric) | 5177 | if (rt->fib6_metric > nrt->fib6_metric) |
| 5190 | return fallback ?: fib6_entry; | 5178 | return fib6_entry; |
| 5191 | } | 5179 | } |
| 5192 | 5180 | ||
| 5193 | return fallback; | 5181 | return NULL; |
| 5194 | } | 5182 | } |
| 5195 | 5183 | ||
| 5196 | static int | 5184 | static int |
| @@ -5316,7 +5304,8 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp, | |||
| 5316 | } | 5304 | } |
| 5317 | 5305 | ||
| 5318 | static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, | 5306 | static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, |
| 5319 | struct fib6_info *rt, bool replace) | 5307 | struct fib6_info *rt, bool replace, |
| 5308 | bool append) | ||
| 5320 | { | 5309 | { |
| 5321 | struct mlxsw_sp_fib6_entry *fib6_entry; | 5310 | struct mlxsw_sp_fib6_entry *fib6_entry; |
| 5322 | struct mlxsw_sp_fib_node *fib_node; | 5311 | struct mlxsw_sp_fib_node *fib_node; |
| @@ -5342,7 +5331,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, | |||
| 5342 | /* Before creating a new entry, try to append route to an existing | 5331 | /* Before creating a new entry, try to append route to an existing |
| 5343 | * multipath entry. | 5332 | * multipath entry. |
| 5344 | */ | 5333 | */ |
| 5345 | fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace); | 5334 | fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append); |
| 5346 | if (fib6_entry) { | 5335 | if (fib6_entry) { |
| 5347 | err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt); | 5336 | err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt); |
| 5348 | if (err) | 5337 | if (err) |
| @@ -5350,6 +5339,14 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, | |||
| 5350 | return 0; | 5339 | return 0; |
| 5351 | } | 5340 | } |
| 5352 | 5341 | ||
| 5342 | /* We received an append event, yet did not find any route to | ||
| 5343 | * append to. | ||
| 5344 | */ | ||
| 5345 | if (WARN_ON(append)) { | ||
| 5346 | err = -EINVAL; | ||
| 5347 | goto err_fib6_entry_append; | ||
| 5348 | } | ||
| 5349 | |||
| 5353 | fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt); | 5350 | fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt); |
| 5354 | if (IS_ERR(fib6_entry)) { | 5351 | if (IS_ERR(fib6_entry)) { |
| 5355 | err = PTR_ERR(fib6_entry); | 5352 | err = PTR_ERR(fib6_entry); |
| @@ -5367,6 +5364,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, | |||
| 5367 | err_fib6_node_entry_link: | 5364 | err_fib6_node_entry_link: |
| 5368 | mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); | 5365 | mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); |
| 5369 | err_fib6_entry_create: | 5366 | err_fib6_entry_create: |
| 5367 | err_fib6_entry_append: | ||
| 5370 | err_fib6_entry_nexthop_add: | 5368 | err_fib6_entry_nexthop_add: |
| 5371 | mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); | 5369 | mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); |
| 5372 | return err; | 5370 | return err; |
| @@ -5717,7 +5715,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) | |||
| 5717 | struct mlxsw_sp_fib_event_work *fib_work = | 5715 | struct mlxsw_sp_fib_event_work *fib_work = |
| 5718 | container_of(work, struct mlxsw_sp_fib_event_work, work); | 5716 | container_of(work, struct mlxsw_sp_fib_event_work, work); |
| 5719 | struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; | 5717 | struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; |
| 5720 | bool replace; | 5718 | bool replace, append; |
| 5721 | int err; | 5719 | int err; |
| 5722 | 5720 | ||
| 5723 | rtnl_lock(); | 5721 | rtnl_lock(); |
| @@ -5728,8 +5726,10 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) | |||
| 5728 | case FIB_EVENT_ENTRY_APPEND: /* fall through */ | 5726 | case FIB_EVENT_ENTRY_APPEND: /* fall through */ |
| 5729 | case FIB_EVENT_ENTRY_ADD: | 5727 | case FIB_EVENT_ENTRY_ADD: |
| 5730 | replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; | 5728 | replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; |
| 5729 | append = fib_work->event == FIB_EVENT_ENTRY_APPEND; | ||
| 5731 | err = mlxsw_sp_router_fib6_add(mlxsw_sp, | 5730 | err = mlxsw_sp_router_fib6_add(mlxsw_sp, |
| 5732 | fib_work->fen6_info.rt, replace); | 5731 | fib_work->fen6_info.rt, replace, |
| 5732 | append); | ||
| 5733 | if (err) | 5733 | if (err) |
| 5734 | mlxsw_sp_router_fib_abort(mlxsw_sp); | 5734 | mlxsw_sp_router_fib_abort(mlxsw_sp); |
| 5735 | mlxsw_sp_rt6_release(fib_work->fen6_info.rt); | 5735 | mlxsw_sp_rt6_release(fib_work->fen6_info.rt); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index e97652c40d13..eea5666a86b2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
| @@ -1018,8 +1018,10 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, | |||
| 1018 | int err; | 1018 | int err; |
| 1019 | 1019 | ||
| 1020 | /* No need to continue if only VLAN flags were changed */ | 1020 | /* No need to continue if only VLAN flags were changed */ |
| 1021 | if (mlxsw_sp_port_vlan->bridge_port) | 1021 | if (mlxsw_sp_port_vlan->bridge_port) { |
| 1022 | mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); | ||
| 1022 | return 0; | 1023 | return 0; |
| 1024 | } | ||
| 1023 | 1025 | ||
| 1024 | err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port); | 1026 | err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port); |
| 1025 | if (err) | 1027 | if (err) |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 19cfa162ac65..1decf3a1cad3 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c | |||
| @@ -455,6 +455,7 @@ static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, | |||
| 455 | 455 | ||
| 456 | eth_hw_addr_random(nn->dp.netdev); | 456 | eth_hw_addr_random(nn->dp.netdev); |
| 457 | netif_keep_dst(nn->dp.netdev); | 457 | netif_keep_dst(nn->dp.netdev); |
| 458 | nn->vnic_no_name = true; | ||
| 458 | 459 | ||
| 459 | return 0; | 460 | return 0; |
| 460 | 461 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index ec524d97869d..78afe75129ab 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c | |||
| @@ -381,6 +381,8 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, | |||
| 381 | err = PTR_ERR_OR_ZERO(rt); | 381 | err = PTR_ERR_OR_ZERO(rt); |
| 382 | if (err) | 382 | if (err) |
| 383 | return NOTIFY_DONE; | 383 | return NOTIFY_DONE; |
| 384 | |||
| 385 | ip_rt_put(rt); | ||
| 384 | #else | 386 | #else |
| 385 | return NOTIFY_DONE; | 387 | return NOTIFY_DONE; |
| 386 | #endif | 388 | #endif |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 57cb035dcc6d..2a71a9ffd095 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h | |||
| @@ -590,6 +590,8 @@ struct nfp_net_dp { | |||
| 590 | * @vnic_list: Entry on device vNIC list | 590 | * @vnic_list: Entry on device vNIC list |
| 591 | * @pdev: Backpointer to PCI device | 591 | * @pdev: Backpointer to PCI device |
| 592 | * @app: APP handle if available | 592 | * @app: APP handle if available |
| 593 | * @vnic_no_name: For non-port PF vNIC make ndo_get_phys_port_name return | ||
| 594 | * -EOPNOTSUPP to keep backwards compatibility (set by app) | ||
| 593 | * @port: Pointer to nfp_port structure if vNIC is a port | 595 | * @port: Pointer to nfp_port structure if vNIC is a port |
| 594 | * @app_priv: APP private data for this vNIC | 596 | * @app_priv: APP private data for this vNIC |
| 595 | */ | 597 | */ |
| @@ -663,6 +665,8 @@ struct nfp_net { | |||
| 663 | struct pci_dev *pdev; | 665 | struct pci_dev *pdev; |
| 664 | struct nfp_app *app; | 666 | struct nfp_app *app; |
| 665 | 667 | ||
| 668 | bool vnic_no_name; | ||
| 669 | |||
| 666 | struct nfp_port *port; | 670 | struct nfp_port *port; |
| 667 | 671 | ||
| 668 | void *app_priv; | 672 | void *app_priv; |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 75110c8d6a90..d4c27f849f9b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
| @@ -3121,7 +3121,7 @@ static void nfp_net_stat64(struct net_device *netdev, | |||
| 3121 | struct nfp_net *nn = netdev_priv(netdev); | 3121 | struct nfp_net *nn = netdev_priv(netdev); |
| 3122 | int r; | 3122 | int r; |
| 3123 | 3123 | ||
| 3124 | for (r = 0; r < nn->dp.num_r_vecs; r++) { | 3124 | for (r = 0; r < nn->max_r_vecs; r++) { |
| 3125 | struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; | 3125 | struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; |
| 3126 | u64 data[3]; | 3126 | u64 data[3]; |
| 3127 | unsigned int start; | 3127 | unsigned int start; |
| @@ -3286,7 +3286,7 @@ nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len) | |||
| 3286 | if (nn->port) | 3286 | if (nn->port) |
| 3287 | return nfp_port_get_phys_port_name(netdev, name, len); | 3287 | return nfp_port_get_phys_port_name(netdev, name, len); |
| 3288 | 3288 | ||
| 3289 | if (nn->dp.is_vf) | 3289 | if (nn->dp.is_vf || nn->vnic_no_name) |
| 3290 | return -EOPNOTSUPP; | 3290 | return -EOPNOTSUPP; |
| 3291 | 3291 | ||
| 3292 | n = snprintf(name, len, "n%d", nn->id); | 3292 | n = snprintf(name, len, "n%d", nn->id); |
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c index 2dd89dba9311..d32af598da90 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c | |||
| @@ -98,21 +98,18 @@ struct nfp_resource { | |||
| 98 | 98 | ||
| 99 | static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) | 99 | static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) |
| 100 | { | 100 | { |
| 101 | char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ] = {}; | ||
| 102 | struct nfp_resource_entry entry; | 101 | struct nfp_resource_entry entry; |
| 103 | u32 cpp_id, key; | 102 | u32 cpp_id, key; |
| 104 | int ret, i; | 103 | int ret, i; |
| 105 | 104 | ||
| 106 | cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */ | 105 | cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */ |
| 107 | 106 | ||
| 108 | strncpy(name_pad, res->name, sizeof(name_pad)); | ||
| 109 | |||
| 110 | /* Search for a matching entry */ | 107 | /* Search for a matching entry */ |
| 111 | if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) { | 108 | if (!strcmp(res->name, NFP_RESOURCE_TBL_NAME)) { |
| 112 | nfp_err(cpp, "Grabbing device lock not supported\n"); | 109 | nfp_err(cpp, "Grabbing device lock not supported\n"); |
| 113 | return -EOPNOTSUPP; | 110 | return -EOPNOTSUPP; |
| 114 | } | 111 | } |
| 115 | key = crc32_posix(name_pad, sizeof(name_pad)); | 112 | key = crc32_posix(res->name, NFP_RESOURCE_ENTRY_NAME_SZ); |
| 116 | 113 | ||
| 117 | for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) { | 114 | for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) { |
| 118 | u64 addr = NFP_RESOURCE_TBL_BASE + | 115 | u64 addr = NFP_RESOURCE_TBL_BASE + |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index e78e5db39458..c694e3428dfc 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c | |||
| @@ -384,6 +384,7 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) | |||
| 384 | } | 384 | } |
| 385 | 385 | ||
| 386 | sgmii_pdev = of_find_device_by_node(np); | 386 | sgmii_pdev = of_find_device_by_node(np); |
| 387 | of_node_put(np); | ||
| 387 | if (!sgmii_pdev) { | 388 | if (!sgmii_pdev) { |
| 388 | dev_err(&pdev->dev, "invalid internal-phy property\n"); | 389 | dev_err(&pdev->dev, "invalid internal-phy property\n"); |
| 389 | return -ENODEV; | 390 | return -ENODEV; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 4ff231df7322..c5979569fd60 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c | |||
| @@ -334,9 +334,10 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) | |||
| 334 | 334 | ||
| 335 | dwmac->data = (const struct meson8b_dwmac_data *) | 335 | dwmac->data = (const struct meson8b_dwmac_data *) |
| 336 | of_device_get_match_data(&pdev->dev); | 336 | of_device_get_match_data(&pdev->dev); |
| 337 | if (!dwmac->data) | 337 | if (!dwmac->data) { |
| 338 | return -EINVAL; | 338 | ret = -EINVAL; |
| 339 | 339 | goto err_remove_config_dt; | |
| 340 | } | ||
| 340 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 341 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 341 | dwmac->regs = devm_ioremap_resource(&pdev->dev, res); | 342 | dwmac->regs = devm_ioremap_resource(&pdev->dev, res); |
| 342 | if (IS_ERR(dwmac->regs)) { | 343 | if (IS_ERR(dwmac->regs)) { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 14770fc8865e..1f50e83cafb2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c | |||
| @@ -252,13 +252,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv) | |||
| 252 | return ret; | 252 | return ret; |
| 253 | } | 253 | } |
| 254 | 254 | ||
| 255 | /* Run quirks, if needed */ | 255 | /* Save quirks, if needed for posterior use */ |
| 256 | if (entry->quirks) { | 256 | priv->hwif_quirks = entry->quirks; |
| 257 | ret = entry->quirks(priv); | ||
| 258 | if (ret) | ||
| 259 | return ret; | ||
| 260 | } | ||
| 261 | |||
| 262 | return 0; | 257 | return 0; |
| 263 | } | 258 | } |
| 264 | 259 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 025efbf6145c..76649adf8fb0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
| @@ -129,6 +129,7 @@ struct stmmac_priv { | |||
| 129 | struct net_device *dev; | 129 | struct net_device *dev; |
| 130 | struct device *device; | 130 | struct device *device; |
| 131 | struct mac_device_info *hw; | 131 | struct mac_device_info *hw; |
| 132 | int (*hwif_quirks)(struct stmmac_priv *priv); | ||
| 132 | struct mutex lock; | 133 | struct mutex lock; |
| 133 | 134 | ||
| 134 | /* RX Queue */ | 135 | /* RX Queue */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 11fb7c777d89..e79b0d7b388a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -3182,17 +3182,22 @@ dma_map_err: | |||
| 3182 | 3182 | ||
| 3183 | static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) | 3183 | static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) |
| 3184 | { | 3184 | { |
| 3185 | struct ethhdr *ehdr; | 3185 | struct vlan_ethhdr *veth; |
| 3186 | __be16 vlan_proto; | ||
| 3186 | u16 vlanid; | 3187 | u16 vlanid; |
| 3187 | 3188 | ||
| 3188 | if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) == | 3189 | veth = (struct vlan_ethhdr *)skb->data; |
| 3189 | NETIF_F_HW_VLAN_CTAG_RX && | 3190 | vlan_proto = veth->h_vlan_proto; |
| 3190 | !__vlan_get_tag(skb, &vlanid)) { | 3191 | |
| 3192 | if ((vlan_proto == htons(ETH_P_8021Q) && | ||
| 3193 | dev->features & NETIF_F_HW_VLAN_CTAG_RX) || | ||
| 3194 | (vlan_proto == htons(ETH_P_8021AD) && | ||
| 3195 | dev->features & NETIF_F_HW_VLAN_STAG_RX)) { | ||
| 3191 | /* pop the vlan tag */ | 3196 | /* pop the vlan tag */ |
| 3192 | ehdr = (struct ethhdr *)skb->data; | 3197 | vlanid = ntohs(veth->h_vlan_TCI); |
| 3193 | memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2); | 3198 | memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); |
| 3194 | skb_pull(skb, VLAN_HLEN); | 3199 | skb_pull(skb, VLAN_HLEN); |
| 3195 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); | 3200 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); |
| 3196 | } | 3201 | } |
| 3197 | } | 3202 | } |
| 3198 | 3203 | ||
| @@ -4130,6 +4135,13 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
| 4130 | if (priv->dma_cap.tsoen) | 4135 | if (priv->dma_cap.tsoen) |
| 4131 | dev_info(priv->device, "TSO supported\n"); | 4136 | dev_info(priv->device, "TSO supported\n"); |
| 4132 | 4137 | ||
| 4138 | /* Run HW quirks, if any */ | ||
| 4139 | if (priv->hwif_quirks) { | ||
| 4140 | ret = priv->hwif_quirks(priv); | ||
| 4141 | if (ret) | ||
| 4142 | return ret; | ||
| 4143 | } | ||
| 4144 | |||
| 4133 | return 0; | 4145 | return 0; |
| 4134 | } | 4146 | } |
| 4135 | 4147 | ||
| @@ -4235,7 +4247,7 @@ int stmmac_dvr_probe(struct device *device, | |||
| 4235 | ndev->watchdog_timeo = msecs_to_jiffies(watchdog); | 4247 | ndev->watchdog_timeo = msecs_to_jiffies(watchdog); |
| 4236 | #ifdef STMMAC_VLAN_TAG_USED | 4248 | #ifdef STMMAC_VLAN_TAG_USED |
| 4237 | /* Both mac100 and gmac support receive VLAN tag detection */ | 4249 | /* Both mac100 and gmac support receive VLAN tag detection */ |
| 4238 | ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; | 4250 | ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; |
| 4239 | #endif | 4251 | #endif |
| 4240 | priv->msg_enable = netif_msg_init(debug, default_msg_level); | 4252 | priv->msg_enable = netif_msg_init(debug, default_msg_level); |
| 4241 | 4253 | ||
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 69e31ceccfae..2a0c06e0f730 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c | |||
| @@ -123,7 +123,6 @@ | |||
| 123 | * @phy_node: pointer to the PHY device node | 123 | * @phy_node: pointer to the PHY device node |
| 124 | * @mii_bus: pointer to the MII bus | 124 | * @mii_bus: pointer to the MII bus |
| 125 | * @last_link: last link status | 125 | * @last_link: last link status |
| 126 | * @has_mdio: indicates whether MDIO is included in the HW | ||
| 127 | */ | 126 | */ |
| 128 | struct net_local { | 127 | struct net_local { |
| 129 | 128 | ||
| @@ -144,7 +143,6 @@ struct net_local { | |||
| 144 | struct mii_bus *mii_bus; | 143 | struct mii_bus *mii_bus; |
| 145 | 144 | ||
| 146 | int last_link; | 145 | int last_link; |
| 147 | bool has_mdio; | ||
| 148 | }; | 146 | }; |
| 149 | 147 | ||
| 150 | 148 | ||
| @@ -863,14 +861,14 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) | |||
| 863 | bus->write = xemaclite_mdio_write; | 861 | bus->write = xemaclite_mdio_write; |
| 864 | bus->parent = dev; | 862 | bus->parent = dev; |
| 865 | 863 | ||
| 866 | lp->mii_bus = bus; | ||
| 867 | |||
| 868 | rc = of_mdiobus_register(bus, np); | 864 | rc = of_mdiobus_register(bus, np); |
| 869 | if (rc) { | 865 | if (rc) { |
| 870 | dev_err(dev, "Failed to register mdio bus.\n"); | 866 | dev_err(dev, "Failed to register mdio bus.\n"); |
| 871 | goto err_register; | 867 | goto err_register; |
| 872 | } | 868 | } |
| 873 | 869 | ||
| 870 | lp->mii_bus = bus; | ||
| 871 | |||
| 874 | return 0; | 872 | return 0; |
| 875 | 873 | ||
| 876 | err_register: | 874 | err_register: |
| @@ -1145,9 +1143,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) | |||
| 1145 | xemaclite_update_address(lp, ndev->dev_addr); | 1143 | xemaclite_update_address(lp, ndev->dev_addr); |
| 1146 | 1144 | ||
| 1147 | lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); | 1145 | lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); |
| 1148 | rc = xemaclite_mdio_setup(lp, &ofdev->dev); | 1146 | xemaclite_mdio_setup(lp, &ofdev->dev); |
| 1149 | if (rc) | ||
| 1150 | dev_warn(&ofdev->dev, "error registering MDIO bus\n"); | ||
| 1151 | 1147 | ||
| 1152 | dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr); | 1148 | dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr); |
| 1153 | 1149 | ||
| @@ -1191,7 +1187,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev) | |||
| 1191 | struct net_local *lp = netdev_priv(ndev); | 1187 | struct net_local *lp = netdev_priv(ndev); |
| 1192 | 1188 | ||
| 1193 | /* Un-register the mii_bus, if configured */ | 1189 | /* Un-register the mii_bus, if configured */ |
| 1194 | if (lp->has_mdio) { | 1190 | if (lp->mii_bus) { |
| 1195 | mdiobus_unregister(lp->mii_bus); | 1191 | mdiobus_unregister(lp->mii_bus); |
| 1196 | mdiobus_free(lp->mii_bus); | 1192 | mdiobus_free(lp->mii_bus); |
| 1197 | lp->mii_bus = NULL; | 1193 | lp->mii_bus = NULL; |
diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig index 23a2d145813a..0765d5f61714 100644 --- a/drivers/net/hyperv/Kconfig +++ b/drivers/net/hyperv/Kconfig | |||
| @@ -2,6 +2,5 @@ config HYPERV_NET | |||
| 2 | tristate "Microsoft Hyper-V virtual network driver" | 2 | tristate "Microsoft Hyper-V virtual network driver" |
| 3 | depends on HYPERV | 3 | depends on HYPERV |
| 4 | select UCS2_STRING | 4 | select UCS2_STRING |
| 5 | select FAILOVER | ||
| 6 | help | 5 | help |
| 7 | Select this option to enable the Hyper-V virtual network driver. | 6 | Select this option to enable the Hyper-V virtual network driver. |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 23304aca25f9..1a924b867b07 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
| @@ -901,6 +901,8 @@ struct net_device_context { | |||
| 901 | struct hv_device *device_ctx; | 901 | struct hv_device *device_ctx; |
| 902 | /* netvsc_device */ | 902 | /* netvsc_device */ |
| 903 | struct netvsc_device __rcu *nvdev; | 903 | struct netvsc_device __rcu *nvdev; |
| 904 | /* list of netvsc net_devices */ | ||
| 905 | struct list_head list; | ||
| 904 | /* reconfigure work */ | 906 | /* reconfigure work */ |
| 905 | struct delayed_work dwork; | 907 | struct delayed_work dwork; |
| 906 | /* last reconfig time */ | 908 | /* last reconfig time */ |
| @@ -931,8 +933,6 @@ struct net_device_context { | |||
| 931 | u32 vf_alloc; | 933 | u32 vf_alloc; |
| 932 | /* Serial number of the VF to team with */ | 934 | /* Serial number of the VF to team with */ |
| 933 | u32 vf_serial; | 935 | u32 vf_serial; |
| 934 | |||
| 935 | struct failover *failover; | ||
| 936 | }; | 936 | }; |
| 937 | 937 | ||
| 938 | /* Per channel data */ | 938 | /* Per channel data */ |
| @@ -1277,17 +1277,17 @@ struct ndis_lsov2_offload { | |||
| 1277 | 1277 | ||
| 1278 | struct ndis_ipsecv2_offload { | 1278 | struct ndis_ipsecv2_offload { |
| 1279 | u32 encap; | 1279 | u32 encap; |
| 1280 | u16 ip6; | 1280 | u8 ip6; |
| 1281 | u16 ip4opt; | 1281 | u8 ip4opt; |
| 1282 | u16 ip6ext; | 1282 | u8 ip6ext; |
| 1283 | u16 ah; | 1283 | u8 ah; |
| 1284 | u16 esp; | 1284 | u8 esp; |
| 1285 | u16 ah_esp; | 1285 | u8 ah_esp; |
| 1286 | u16 xport; | 1286 | u8 xport; |
| 1287 | u16 tun; | 1287 | u8 tun; |
| 1288 | u16 xport_tun; | 1288 | u8 xport_tun; |
| 1289 | u16 lso; | 1289 | u8 lso; |
| 1290 | u16 extseq; | 1290 | u8 extseq; |
| 1291 | u32 udp_esp; | 1291 | u32 udp_esp; |
| 1292 | u32 auth; | 1292 | u32 auth; |
| 1293 | u32 crypto; | 1293 | u32 crypto; |
| @@ -1295,8 +1295,8 @@ struct ndis_ipsecv2_offload { | |||
| 1295 | }; | 1295 | }; |
| 1296 | 1296 | ||
| 1297 | struct ndis_rsc_offload { | 1297 | struct ndis_rsc_offload { |
| 1298 | u16 ip4; | 1298 | u8 ip4; |
| 1299 | u16 ip6; | 1299 | u8 ip6; |
| 1300 | }; | 1300 | }; |
| 1301 | 1301 | ||
| 1302 | struct ndis_encap_offload { | 1302 | struct ndis_encap_offload { |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 7b18a8c267c2..fe2256bf1d13 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -42,7 +42,6 @@ | |||
| 42 | #include <net/pkt_sched.h> | 42 | #include <net/pkt_sched.h> |
| 43 | #include <net/checksum.h> | 43 | #include <net/checksum.h> |
| 44 | #include <net/ip6_checksum.h> | 44 | #include <net/ip6_checksum.h> |
| 45 | #include <net/failover.h> | ||
| 46 | 45 | ||
| 47 | #include "hyperv_net.h" | 46 | #include "hyperv_net.h" |
| 48 | 47 | ||
| @@ -68,6 +67,8 @@ static int debug = -1; | |||
| 68 | module_param(debug, int, 0444); | 67 | module_param(debug, int, 0444); |
| 69 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | 68 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
| 70 | 69 | ||
| 70 | static LIST_HEAD(netvsc_dev_list); | ||
| 71 | |||
| 71 | static void netvsc_change_rx_flags(struct net_device *net, int change) | 72 | static void netvsc_change_rx_flags(struct net_device *net, int change) |
| 72 | { | 73 | { |
| 73 | struct net_device_context *ndev_ctx = netdev_priv(net); | 74 | struct net_device_context *ndev_ctx = netdev_priv(net); |
| @@ -1780,6 +1781,36 @@ out_unlock: | |||
| 1780 | rtnl_unlock(); | 1781 | rtnl_unlock(); |
| 1781 | } | 1782 | } |
| 1782 | 1783 | ||
| 1784 | static struct net_device *get_netvsc_bymac(const u8 *mac) | ||
| 1785 | { | ||
| 1786 | struct net_device_context *ndev_ctx; | ||
| 1787 | |||
| 1788 | list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { | ||
| 1789 | struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx); | ||
| 1790 | |||
| 1791 | if (ether_addr_equal(mac, dev->perm_addr)) | ||
| 1792 | return dev; | ||
| 1793 | } | ||
| 1794 | |||
| 1795 | return NULL; | ||
| 1796 | } | ||
| 1797 | |||
| 1798 | static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) | ||
| 1799 | { | ||
| 1800 | struct net_device_context *net_device_ctx; | ||
| 1801 | struct net_device *dev; | ||
| 1802 | |||
| 1803 | dev = netdev_master_upper_dev_get(vf_netdev); | ||
| 1804 | if (!dev || dev->netdev_ops != &device_ops) | ||
| 1805 | return NULL; /* not a netvsc device */ | ||
| 1806 | |||
| 1807 | net_device_ctx = netdev_priv(dev); | ||
| 1808 | if (!rtnl_dereference(net_device_ctx->nvdev)) | ||
| 1809 | return NULL; /* device is removed */ | ||
| 1810 | |||
| 1811 | return dev; | ||
| 1812 | } | ||
| 1813 | |||
| 1783 | /* Called when VF is injecting data into network stack. | 1814 | /* Called when VF is injecting data into network stack. |
| 1784 | * Change the associated network device from VF to netvsc. | 1815 | * Change the associated network device from VF to netvsc. |
| 1785 | * note: already called with rcu_read_lock | 1816 | * note: already called with rcu_read_lock |
| @@ -1802,6 +1833,46 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) | |||
| 1802 | return RX_HANDLER_ANOTHER; | 1833 | return RX_HANDLER_ANOTHER; |
| 1803 | } | 1834 | } |
| 1804 | 1835 | ||
| 1836 | static int netvsc_vf_join(struct net_device *vf_netdev, | ||
| 1837 | struct net_device *ndev) | ||
| 1838 | { | ||
| 1839 | struct net_device_context *ndev_ctx = netdev_priv(ndev); | ||
| 1840 | int ret; | ||
| 1841 | |||
| 1842 | ret = netdev_rx_handler_register(vf_netdev, | ||
| 1843 | netvsc_vf_handle_frame, ndev); | ||
| 1844 | if (ret != 0) { | ||
| 1845 | netdev_err(vf_netdev, | ||
| 1846 | "can not register netvsc VF receive handler (err = %d)\n", | ||
| 1847 | ret); | ||
| 1848 | goto rx_handler_failed; | ||
| 1849 | } | ||
| 1850 | |||
| 1851 | ret = netdev_master_upper_dev_link(vf_netdev, ndev, | ||
| 1852 | NULL, NULL, NULL); | ||
| 1853 | if (ret != 0) { | ||
| 1854 | netdev_err(vf_netdev, | ||
| 1855 | "can not set master device %s (err = %d)\n", | ||
| 1856 | ndev->name, ret); | ||
| 1857 | goto upper_link_failed; | ||
| 1858 | } | ||
| 1859 | |||
| 1860 | /* set slave flag before open to prevent IPv6 addrconf */ | ||
| 1861 | vf_netdev->flags |= IFF_SLAVE; | ||
| 1862 | |||
| 1863 | schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); | ||
| 1864 | |||
| 1865 | call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); | ||
| 1866 | |||
| 1867 | netdev_info(vf_netdev, "joined to %s\n", ndev->name); | ||
| 1868 | return 0; | ||
| 1869 | |||
| 1870 | upper_link_failed: | ||
| 1871 | netdev_rx_handler_unregister(vf_netdev); | ||
| 1872 | rx_handler_failed: | ||
| 1873 | return ret; | ||
| 1874 | } | ||
| 1875 | |||
| 1805 | static void __netvsc_vf_setup(struct net_device *ndev, | 1876 | static void __netvsc_vf_setup(struct net_device *ndev, |
| 1806 | struct net_device *vf_netdev) | 1877 | struct net_device *vf_netdev) |
| 1807 | { | 1878 | { |
| @@ -1852,95 +1923,104 @@ static void netvsc_vf_setup(struct work_struct *w) | |||
| 1852 | rtnl_unlock(); | 1923 | rtnl_unlock(); |
| 1853 | } | 1924 | } |
| 1854 | 1925 | ||
| 1855 | static int netvsc_pre_register_vf(struct net_device *vf_netdev, | 1926 | static int netvsc_register_vf(struct net_device *vf_netdev) |
| 1856 | struct net_device *ndev) | ||
| 1857 | { | 1927 | { |
| 1928 | struct net_device *ndev; | ||
| 1858 | struct net_device_context *net_device_ctx; | 1929 | struct net_device_context *net_device_ctx; |
| 1859 | struct netvsc_device *netvsc_dev; | 1930 | struct netvsc_device *netvsc_dev; |
| 1931 | int ret; | ||
| 1932 | |||
| 1933 | if (vf_netdev->addr_len != ETH_ALEN) | ||
| 1934 | return NOTIFY_DONE; | ||
| 1935 | |||
| 1936 | /* | ||
| 1937 | * We will use the MAC address to locate the synthetic interface to | ||
| 1938 | * associate with the VF interface. If we don't find a matching | ||
| 1939 | * synthetic interface, move on. | ||
| 1940 | */ | ||
| 1941 | ndev = get_netvsc_bymac(vf_netdev->perm_addr); | ||
| 1942 | if (!ndev) | ||
| 1943 | return NOTIFY_DONE; | ||
| 1860 | 1944 | ||
| 1861 | net_device_ctx = netdev_priv(ndev); | 1945 | net_device_ctx = netdev_priv(ndev); |
| 1862 | netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); | 1946 | netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); |
| 1863 | if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) | 1947 | if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) |
| 1864 | return -ENODEV; | 1948 | return NOTIFY_DONE; |
| 1865 | |||
| 1866 | return 0; | ||
| 1867 | } | ||
| 1868 | 1949 | ||
| 1869 | static int netvsc_register_vf(struct net_device *vf_netdev, | 1950 | /* if syntihetic interface is a different namespace, |
| 1870 | struct net_device *ndev) | 1951 | * then move the VF to that namespace; join will be |
| 1871 | { | 1952 | * done again in that context. |
| 1872 | struct net_device_context *ndev_ctx = netdev_priv(ndev); | 1953 | */ |
| 1873 | 1954 | if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) { | |
| 1874 | /* set slave flag before open to prevent IPv6 addrconf */ | 1955 | ret = dev_change_net_namespace(vf_netdev, |
| 1875 | vf_netdev->flags |= IFF_SLAVE; | 1956 | dev_net(ndev), "eth%d"); |
| 1876 | 1957 | if (ret) | |
| 1877 | schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); | 1958 | netdev_err(vf_netdev, |
| 1959 | "could not move to same namespace as %s: %d\n", | ||
| 1960 | ndev->name, ret); | ||
| 1961 | else | ||
| 1962 | netdev_info(vf_netdev, | ||
| 1963 | "VF moved to namespace with: %s\n", | ||
| 1964 | ndev->name); | ||
| 1965 | return NOTIFY_DONE; | ||
| 1966 | } | ||
| 1878 | 1967 | ||
| 1879 | call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); | 1968 | netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); |
| 1880 | 1969 | ||
| 1881 | netdev_info(vf_netdev, "joined to %s\n", ndev->name); | 1970 | if (netvsc_vf_join(vf_netdev, ndev) != 0) |
| 1971 | return NOTIFY_DONE; | ||
| 1882 | 1972 | ||
| 1883 | dev_hold(vf_netdev); | 1973 | dev_hold(vf_netdev); |
| 1884 | rcu_assign_pointer(ndev_ctx->vf_netdev, vf_netdev); | 1974 | rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); |
| 1885 | 1975 | return NOTIFY_OK; | |
| 1886 | return 0; | ||
| 1887 | } | 1976 | } |
| 1888 | 1977 | ||
| 1889 | /* VF up/down change detected, schedule to change data path */ | 1978 | /* VF up/down change detected, schedule to change data path */ |
| 1890 | static int netvsc_vf_changed(struct net_device *vf_netdev, | 1979 | static int netvsc_vf_changed(struct net_device *vf_netdev) |
| 1891 | struct net_device *ndev) | ||
| 1892 | { | 1980 | { |
| 1893 | struct net_device_context *net_device_ctx; | 1981 | struct net_device_context *net_device_ctx; |
| 1894 | struct netvsc_device *netvsc_dev; | 1982 | struct netvsc_device *netvsc_dev; |
| 1983 | struct net_device *ndev; | ||
| 1895 | bool vf_is_up = netif_running(vf_netdev); | 1984 | bool vf_is_up = netif_running(vf_netdev); |
| 1896 | 1985 | ||
| 1986 | ndev = get_netvsc_byref(vf_netdev); | ||
| 1987 | if (!ndev) | ||
| 1988 | return NOTIFY_DONE; | ||
| 1989 | |||
| 1897 | net_device_ctx = netdev_priv(ndev); | 1990 | net_device_ctx = netdev_priv(ndev); |
| 1898 | netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); | 1991 | netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); |
| 1899 | if (!netvsc_dev) | 1992 | if (!netvsc_dev) |
| 1900 | return -ENODEV; | 1993 | return NOTIFY_DONE; |
| 1901 | 1994 | ||
| 1902 | netvsc_switch_datapath(ndev, vf_is_up); | 1995 | netvsc_switch_datapath(ndev, vf_is_up); |
| 1903 | netdev_info(ndev, "Data path switched %s VF: %s\n", | 1996 | netdev_info(ndev, "Data path switched %s VF: %s\n", |
| 1904 | vf_is_up ? "to" : "from", vf_netdev->name); | 1997 | vf_is_up ? "to" : "from", vf_netdev->name); |
| 1905 | 1998 | ||
| 1906 | return 0; | 1999 | return NOTIFY_OK; |
| 1907 | } | 2000 | } |
| 1908 | 2001 | ||
| 1909 | static int netvsc_pre_unregister_vf(struct net_device *vf_netdev, | 2002 | static int netvsc_unregister_vf(struct net_device *vf_netdev) |
| 1910 | struct net_device *ndev) | ||
| 1911 | { | 2003 | { |
| 2004 | struct net_device *ndev; | ||
| 1912 | struct net_device_context *net_device_ctx; | 2005 | struct net_device_context *net_device_ctx; |
| 1913 | 2006 | ||
| 1914 | net_device_ctx = netdev_priv(ndev); | 2007 | ndev = get_netvsc_byref(vf_netdev); |
| 1915 | cancel_delayed_work_sync(&net_device_ctx->vf_takeover); | 2008 | if (!ndev) |
| 1916 | 2009 | return NOTIFY_DONE; | |
| 1917 | return 0; | ||
| 1918 | } | ||
| 1919 | |||
| 1920 | static int netvsc_unregister_vf(struct net_device *vf_netdev, | ||
| 1921 | struct net_device *ndev) | ||
| 1922 | { | ||
| 1923 | struct net_device_context *net_device_ctx; | ||
| 1924 | 2010 | ||
| 1925 | net_device_ctx = netdev_priv(ndev); | 2011 | net_device_ctx = netdev_priv(ndev); |
| 2012 | cancel_delayed_work_sync(&net_device_ctx->vf_takeover); | ||
| 1926 | 2013 | ||
| 1927 | netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); | 2014 | netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); |
| 1928 | 2015 | ||
| 2016 | netdev_rx_handler_unregister(vf_netdev); | ||
| 2017 | netdev_upper_dev_unlink(vf_netdev, ndev); | ||
| 1929 | RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); | 2018 | RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); |
| 1930 | dev_put(vf_netdev); | 2019 | dev_put(vf_netdev); |
| 1931 | 2020 | ||
| 1932 | return 0; | 2021 | return NOTIFY_OK; |
| 1933 | } | 2022 | } |
| 1934 | 2023 | ||
| 1935 | static struct failover_ops netvsc_failover_ops = { | ||
| 1936 | .slave_pre_register = netvsc_pre_register_vf, | ||
| 1937 | .slave_register = netvsc_register_vf, | ||
| 1938 | .slave_pre_unregister = netvsc_pre_unregister_vf, | ||
| 1939 | .slave_unregister = netvsc_unregister_vf, | ||
| 1940 | .slave_link_change = netvsc_vf_changed, | ||
| 1941 | .slave_handle_frame = netvsc_vf_handle_frame, | ||
| 1942 | }; | ||
| 1943 | |||
| 1944 | static int netvsc_probe(struct hv_device *dev, | 2024 | static int netvsc_probe(struct hv_device *dev, |
| 1945 | const struct hv_vmbus_device_id *dev_id) | 2025 | const struct hv_vmbus_device_id *dev_id) |
| 1946 | { | 2026 | { |
| @@ -2024,23 +2104,19 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 2024 | else | 2104 | else |
| 2025 | net->max_mtu = ETH_DATA_LEN; | 2105 | net->max_mtu = ETH_DATA_LEN; |
| 2026 | 2106 | ||
| 2027 | ret = register_netdev(net); | 2107 | rtnl_lock(); |
| 2108 | ret = register_netdevice(net); | ||
| 2028 | if (ret != 0) { | 2109 | if (ret != 0) { |
| 2029 | pr_err("Unable to register netdev.\n"); | 2110 | pr_err("Unable to register netdev.\n"); |
| 2030 | goto register_failed; | 2111 | goto register_failed; |
| 2031 | } | 2112 | } |
| 2032 | 2113 | ||
| 2033 | net_device_ctx->failover = failover_register(net, &netvsc_failover_ops); | 2114 | list_add(&net_device_ctx->list, &netvsc_dev_list); |
| 2034 | if (IS_ERR(net_device_ctx->failover)) { | 2115 | rtnl_unlock(); |
| 2035 | ret = PTR_ERR(net_device_ctx->failover); | 2116 | return 0; |
| 2036 | goto err_failover; | ||
| 2037 | } | ||
| 2038 | |||
| 2039 | return ret; | ||
| 2040 | 2117 | ||
| 2041 | err_failover: | ||
| 2042 | unregister_netdev(net); | ||
| 2043 | register_failed: | 2118 | register_failed: |
| 2119 | rtnl_unlock(); | ||
| 2044 | rndis_filter_device_remove(dev, nvdev); | 2120 | rndis_filter_device_remove(dev, nvdev); |
| 2045 | rndis_failed: | 2121 | rndis_failed: |
| 2046 | free_percpu(net_device_ctx->vf_stats); | 2122 | free_percpu(net_device_ctx->vf_stats); |
| @@ -2080,14 +2156,13 @@ static int netvsc_remove(struct hv_device *dev) | |||
| 2080 | rtnl_lock(); | 2156 | rtnl_lock(); |
| 2081 | vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); | 2157 | vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); |
| 2082 | if (vf_netdev) | 2158 | if (vf_netdev) |
| 2083 | failover_slave_unregister(vf_netdev); | 2159 | netvsc_unregister_vf(vf_netdev); |
| 2084 | 2160 | ||
| 2085 | if (nvdev) | 2161 | if (nvdev) |
| 2086 | rndis_filter_device_remove(dev, nvdev); | 2162 | rndis_filter_device_remove(dev, nvdev); |
| 2087 | 2163 | ||
| 2088 | unregister_netdevice(net); | 2164 | unregister_netdevice(net); |
| 2089 | 2165 | list_del(&ndev_ctx->list); | |
| 2090 | failover_unregister(ndev_ctx->failover); | ||
| 2091 | 2166 | ||
| 2092 | rtnl_unlock(); | 2167 | rtnl_unlock(); |
| 2093 | rcu_read_unlock(); | 2168 | rcu_read_unlock(); |
| @@ -2115,8 +2190,54 @@ static struct hv_driver netvsc_drv = { | |||
| 2115 | .remove = netvsc_remove, | 2190 | .remove = netvsc_remove, |
| 2116 | }; | 2191 | }; |
| 2117 | 2192 | ||
| 2193 | /* | ||
| 2194 | * On Hyper-V, every VF interface is matched with a corresponding | ||
| 2195 | * synthetic interface. The synthetic interface is presented first | ||
| 2196 | * to the guest. When the corresponding VF instance is registered, | ||
| 2197 | * we will take care of switching the data path. | ||
| 2198 | */ | ||
| 2199 | static int netvsc_netdev_event(struct notifier_block *this, | ||
| 2200 | unsigned long event, void *ptr) | ||
| 2201 | { | ||
| 2202 | struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); | ||
| 2203 | |||
| 2204 | /* Skip our own events */ | ||
| 2205 | if (event_dev->netdev_ops == &device_ops) | ||
| 2206 | return NOTIFY_DONE; | ||
| 2207 | |||
| 2208 | /* Avoid non-Ethernet type devices */ | ||
| 2209 | if (event_dev->type != ARPHRD_ETHER) | ||
| 2210 | return NOTIFY_DONE; | ||
| 2211 | |||
| 2212 | /* Avoid Vlan dev with same MAC registering as VF */ | ||
| 2213 | if (is_vlan_dev(event_dev)) | ||
| 2214 | return NOTIFY_DONE; | ||
| 2215 | |||
| 2216 | /* Avoid Bonding master dev with same MAC registering as VF */ | ||
| 2217 | if ((event_dev->priv_flags & IFF_BONDING) && | ||
| 2218 | (event_dev->flags & IFF_MASTER)) | ||
| 2219 | return NOTIFY_DONE; | ||
| 2220 | |||
| 2221 | switch (event) { | ||
| 2222 | case NETDEV_REGISTER: | ||
| 2223 | return netvsc_register_vf(event_dev); | ||
| 2224 | case NETDEV_UNREGISTER: | ||
| 2225 | return netvsc_unregister_vf(event_dev); | ||
| 2226 | case NETDEV_UP: | ||
| 2227 | case NETDEV_DOWN: | ||
| 2228 | return netvsc_vf_changed(event_dev); | ||
| 2229 | default: | ||
| 2230 | return NOTIFY_DONE; | ||
| 2231 | } | ||
| 2232 | } | ||
| 2233 | |||
| 2234 | static struct notifier_block netvsc_netdev_notifier = { | ||
| 2235 | .notifier_call = netvsc_netdev_event, | ||
| 2236 | }; | ||
| 2237 | |||
| 2118 | static void __exit netvsc_drv_exit(void) | 2238 | static void __exit netvsc_drv_exit(void) |
| 2119 | { | 2239 | { |
| 2240 | unregister_netdevice_notifier(&netvsc_netdev_notifier); | ||
| 2120 | vmbus_driver_unregister(&netvsc_drv); | 2241 | vmbus_driver_unregister(&netvsc_drv); |
| 2121 | } | 2242 | } |
| 2122 | 2243 | ||
| @@ -2135,6 +2256,7 @@ static int __init netvsc_drv_init(void) | |||
| 2135 | if (ret) | 2256 | if (ret) |
| 2136 | return ret; | 2257 | return ret; |
| 2137 | 2258 | ||
| 2259 | register_netdevice_notifier(&netvsc_netdev_notifier); | ||
| 2138 | return 0; | 2260 | return 0; |
| 2139 | } | 2261 | } |
| 2140 | 2262 | ||
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 4e4c8daf44c3..33265747bf39 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c | |||
| @@ -26,10 +26,7 @@ | |||
| 26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
| 27 | #include <linux/mdio-bitbang.h> | 27 | #include <linux/mdio-bitbang.h> |
| 28 | #include <linux/mdio-gpio.h> | 28 | #include <linux/mdio-gpio.h> |
| 29 | #include <linux/gpio.h> | ||
| 30 | #include <linux/gpio/consumer.h> | 29 | #include <linux/gpio/consumer.h> |
| 31 | |||
| 32 | #include <linux/of_gpio.h> | ||
| 33 | #include <linux/of_mdio.h> | 30 | #include <linux/of_mdio.h> |
| 34 | 31 | ||
| 35 | struct mdio_gpio_info { | 32 | struct mdio_gpio_info { |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 9825bfd42abc..18e819d964f1 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -3572,11 +3572,14 @@ static int __init init_mac80211_hwsim(void) | |||
| 3572 | hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0); | 3572 | hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0); |
| 3573 | if (!hwsim_wq) | 3573 | if (!hwsim_wq) |
| 3574 | return -ENOMEM; | 3574 | return -ENOMEM; |
| 3575 | rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); | 3575 | |
| 3576 | err = rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); | ||
| 3577 | if (err) | ||
| 3578 | goto out_free_wq; | ||
| 3576 | 3579 | ||
| 3577 | err = register_pernet_device(&hwsim_net_ops); | 3580 | err = register_pernet_device(&hwsim_net_ops); |
| 3578 | if (err) | 3581 | if (err) |
| 3579 | return err; | 3582 | goto out_free_rht; |
| 3580 | 3583 | ||
| 3581 | err = platform_driver_register(&mac80211_hwsim_driver); | 3584 | err = platform_driver_register(&mac80211_hwsim_driver); |
| 3582 | if (err) | 3585 | if (err) |
| @@ -3701,6 +3704,10 @@ out_unregister_driver: | |||
| 3701 | platform_driver_unregister(&mac80211_hwsim_driver); | 3704 | platform_driver_unregister(&mac80211_hwsim_driver); |
| 3702 | out_unregister_pernet: | 3705 | out_unregister_pernet: |
| 3703 | unregister_pernet_device(&hwsim_net_ops); | 3706 | unregister_pernet_device(&hwsim_net_ops); |
| 3707 | out_free_rht: | ||
| 3708 | rhashtable_destroy(&hwsim_radios_rht); | ||
| 3709 | out_free_wq: | ||
| 3710 | destroy_workqueue(hwsim_wq); | ||
| 3704 | return err; | 3711 | return err; |
| 3705 | } | 3712 | } |
| 3706 | module_init(init_mac80211_hwsim); | 3713 | module_init(init_mac80211_hwsim); |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 679da1abd73c..922ce0abf5cf 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
| @@ -239,7 +239,7 @@ static void rx_refill_timeout(struct timer_list *t) | |||
| 239 | static int netfront_tx_slot_available(struct netfront_queue *queue) | 239 | static int netfront_tx_slot_available(struct netfront_queue *queue) |
| 240 | { | 240 | { |
| 241 | return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < | 241 | return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < |
| 242 | (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); | 242 | (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1); |
| 243 | } | 243 | } |
| 244 | 244 | ||
| 245 | static void xennet_maybe_wake_tx(struct netfront_queue *queue) | 245 | static void xennet_maybe_wake_tx(struct netfront_queue *queue) |
| @@ -790,7 +790,7 @@ static int xennet_get_responses(struct netfront_queue *queue, | |||
| 790 | RING_IDX cons = queue->rx.rsp_cons; | 790 | RING_IDX cons = queue->rx.rsp_cons; |
| 791 | struct sk_buff *skb = xennet_get_rx_skb(queue, cons); | 791 | struct sk_buff *skb = xennet_get_rx_skb(queue, cons); |
| 792 | grant_ref_t ref = xennet_get_rx_ref(queue, cons); | 792 | grant_ref_t ref = xennet_get_rx_ref(queue, cons); |
| 793 | int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); | 793 | int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); |
| 794 | int slots = 1; | 794 | int slots = 1; |
| 795 | int err = 0; | 795 | int err = 0; |
| 796 | unsigned long ret; | 796 | unsigned long ret; |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index ce8c95b6365b..a502f1af4a21 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -2349,6 +2349,9 @@ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type) | |||
| 2349 | struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL); | 2349 | struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL); |
| 2350 | if (!node) | 2350 | if (!node) |
| 2351 | return NULL; | 2351 | return NULL; |
| 2352 | |||
| 2353 | /* Make sure all padding within the structure is initialized. */ | ||
| 2354 | memset(&node->msg, 0, sizeof node->msg); | ||
| 2352 | node->vq = vq; | 2355 | node->vq = vq; |
| 2353 | node->msg.type = type; | 2356 | node->msg.type = type; |
| 2354 | return node; | 2357 | return node; |
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index d94254263ea5..591a13a59787 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig | |||
| @@ -1437,7 +1437,7 @@ config FB_SIS_315 | |||
| 1437 | 1437 | ||
| 1438 | config FB_VIA | 1438 | config FB_VIA |
| 1439 | tristate "VIA UniChrome (Pro) and Chrome9 display support" | 1439 | tristate "VIA UniChrome (Pro) and Chrome9 display support" |
| 1440 | depends on FB && PCI && X86 && GPIOLIB && I2C | 1440 | depends on FB && PCI && GPIOLIB && I2C && (X86 || COMPILE_TEST) |
| 1441 | select FB_CFB_FILLRECT | 1441 | select FB_CFB_FILLRECT |
| 1442 | select FB_CFB_COPYAREA | 1442 | select FB_CFB_COPYAREA |
| 1443 | select FB_CFB_IMAGEBLIT | 1443 | select FB_CFB_IMAGEBLIT |
| @@ -1888,7 +1888,6 @@ config FB_W100 | |||
| 1888 | config FB_SH_MOBILE_LCDC | 1888 | config FB_SH_MOBILE_LCDC |
| 1889 | tristate "SuperH Mobile LCDC framebuffer support" | 1889 | tristate "SuperH Mobile LCDC framebuffer support" |
| 1890 | depends on FB && (SUPERH || ARCH_RENESAS) && HAVE_CLK | 1890 | depends on FB && (SUPERH || ARCH_RENESAS) && HAVE_CLK |
| 1891 | depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM | ||
| 1892 | select FB_SYS_FILLRECT | 1891 | select FB_SYS_FILLRECT |
| 1893 | select FB_SYS_COPYAREA | 1892 | select FB_SYS_COPYAREA |
| 1894 | select FB_SYS_IMAGEBLIT | 1893 | select FB_SYS_IMAGEBLIT |
| @@ -2253,39 +2252,6 @@ config FB_BROADSHEET | |||
| 2253 | and could also have been called by other names when coupled with | 2252 | and could also have been called by other names when coupled with |
| 2254 | a bridge adapter. | 2253 | a bridge adapter. |
| 2255 | 2254 | ||
| 2256 | config FB_AUO_K190X | ||
| 2257 | tristate "AUO-K190X EPD controller support" | ||
| 2258 | depends on FB | ||
| 2259 | select FB_SYS_FILLRECT | ||
| 2260 | select FB_SYS_COPYAREA | ||
| 2261 | select FB_SYS_IMAGEBLIT | ||
| 2262 | select FB_SYS_FOPS | ||
| 2263 | select FB_DEFERRED_IO | ||
| 2264 | help | ||
| 2265 | Provides support for epaper controllers from the K190X series | ||
| 2266 | of AUO. These controllers can be used to drive epaper displays | ||
| 2267 | from Sipix. | ||
| 2268 | |||
| 2269 | This option enables the common support, shared by the individual | ||
| 2270 | controller drivers. You will also have to enable the driver | ||
| 2271 | for the controller type used in your device. | ||
| 2272 | |||
| 2273 | config FB_AUO_K1900 | ||
| 2274 | tristate "AUO-K1900 EPD controller support" | ||
| 2275 | depends on FB && FB_AUO_K190X | ||
| 2276 | help | ||
| 2277 | This driver implements support for the AUO K1900 epd-controller. | ||
| 2278 | This controller can drive Sipix epaper displays but can only do | ||
| 2279 | serial updates, reducing the number of possible frames per second. | ||
| 2280 | |||
| 2281 | config FB_AUO_K1901 | ||
| 2282 | tristate "AUO-K1901 EPD controller support" | ||
| 2283 | depends on FB && FB_AUO_K190X | ||
| 2284 | help | ||
| 2285 | This driver implements support for the AUO K1901 epd-controller. | ||
| 2286 | This controller can drive Sipix epaper displays and supports | ||
| 2287 | concurrent updates, making higher frames per second possible. | ||
| 2288 | |||
| 2289 | config FB_JZ4740 | 2255 | config FB_JZ4740 |
| 2290 | tristate "JZ4740 LCD framebuffer support" | 2256 | tristate "JZ4740 LCD framebuffer support" |
| 2291 | depends on FB && MACH_JZ4740 | 2257 | depends on FB && MACH_JZ4740 |
| @@ -2346,18 +2312,6 @@ source "drivers/video/fbdev/omap/Kconfig" | |||
| 2346 | source "drivers/video/fbdev/omap2/Kconfig" | 2312 | source "drivers/video/fbdev/omap2/Kconfig" |
| 2347 | source "drivers/video/fbdev/mmp/Kconfig" | 2313 | source "drivers/video/fbdev/mmp/Kconfig" |
| 2348 | 2314 | ||
| 2349 | config FB_SH_MOBILE_MERAM | ||
| 2350 | tristate "SuperH Mobile MERAM read ahead support" | ||
| 2351 | depends on (SUPERH || ARCH_SHMOBILE) | ||
| 2352 | select GENERIC_ALLOCATOR | ||
| 2353 | ---help--- | ||
| 2354 | Enable MERAM support for the SuperH controller. | ||
| 2355 | |||
| 2356 | This will allow for caching of the framebuffer to provide more | ||
| 2357 | reliable access under heavy main memory bus traffic situations. | ||
| 2358 | Up to 4 memory channels can be configured, allowing 4 RGB or | ||
| 2359 | 2 YCbCr framebuffers to be configured. | ||
| 2360 | |||
| 2361 | config FB_SSD1307 | 2315 | config FB_SSD1307 |
| 2362 | tristate "Solomon SSD1307 framebuffer support" | 2316 | tristate "Solomon SSD1307 framebuffer support" |
| 2363 | depends on FB && I2C | 2317 | depends on FB && I2C |
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index 55282a21b500..13c900320c2c 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile | |||
| @@ -100,9 +100,6 @@ obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o | |||
| 100 | obj-$(CONFIG_FB_MAXINE) += maxinefb.o | 100 | obj-$(CONFIG_FB_MAXINE) += maxinefb.o |
| 101 | obj-$(CONFIG_FB_METRONOME) += metronomefb.o | 101 | obj-$(CONFIG_FB_METRONOME) += metronomefb.o |
| 102 | obj-$(CONFIG_FB_BROADSHEET) += broadsheetfb.o | 102 | obj-$(CONFIG_FB_BROADSHEET) += broadsheetfb.o |
| 103 | obj-$(CONFIG_FB_AUO_K190X) += auo_k190x.o | ||
| 104 | obj-$(CONFIG_FB_AUO_K1900) += auo_k1900fb.o | ||
| 105 | obj-$(CONFIG_FB_AUO_K1901) += auo_k1901fb.o | ||
| 106 | obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o | 103 | obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o |
| 107 | obj-$(CONFIG_FB_SH7760) += sh7760fb.o | 104 | obj-$(CONFIG_FB_SH7760) += sh7760fb.o |
| 108 | obj-$(CONFIG_FB_IMX) += imxfb.o | 105 | obj-$(CONFIG_FB_IMX) += imxfb.o |
| @@ -116,7 +113,6 @@ obj-$(CONFIG_FB_SM501) += sm501fb.o | |||
| 116 | obj-$(CONFIG_FB_UDL) += udlfb.o | 113 | obj-$(CONFIG_FB_UDL) += udlfb.o |
| 117 | obj-$(CONFIG_FB_SMSCUFX) += smscufx.o | 114 | obj-$(CONFIG_FB_SMSCUFX) += smscufx.o |
| 118 | obj-$(CONFIG_FB_XILINX) += xilinxfb.o | 115 | obj-$(CONFIG_FB_XILINX) += xilinxfb.o |
| 119 | obj-$(CONFIG_FB_SH_MOBILE_MERAM) += sh_mobile_meram.o | ||
| 120 | obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o | 116 | obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o |
| 121 | obj-$(CONFIG_FB_OMAP) += omap/ | 117 | obj-$(CONFIG_FB_OMAP) += omap/ |
| 122 | obj-y += omap2/ | 118 | obj-y += omap2/ |
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c index 09b0e558dce8..6cc46867ff57 100644 --- a/drivers/video/fbdev/aty/aty128fb.c +++ b/drivers/video/fbdev/aty/aty128fb.c | |||
| @@ -2442,7 +2442,7 @@ static void aty128_set_suspend(struct aty128fb_par *par, int suspend) | |||
| 2442 | (void)aty_ld_pll(POWER_MANAGEMENT); | 2442 | (void)aty_ld_pll(POWER_MANAGEMENT); |
| 2443 | aty_st_le32(BUS_CNTL1, 0x00000010); | 2443 | aty_st_le32(BUS_CNTL1, 0x00000010); |
| 2444 | aty_st_le32(MEM_POWER_MISC, 0x0c830000); | 2444 | aty_st_le32(MEM_POWER_MISC, 0x0c830000); |
| 2445 | mdelay(100); | 2445 | msleep(100); |
| 2446 | 2446 | ||
| 2447 | /* Switch PCI power management to D2 */ | 2447 | /* Switch PCI power management to D2 */ |
| 2448 | pci_set_power_state(pdev, PCI_D2); | 2448 | pci_set_power_state(pdev, PCI_D2); |
diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c index 7137c12cbcee..e695adb0e573 100644 --- a/drivers/video/fbdev/aty/radeon_pm.c +++ b/drivers/video/fbdev/aty/radeon_pm.c | |||
| @@ -2678,17 +2678,17 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) | |||
| 2678 | * it, we'll restore the dynamic clocks state on wakeup | 2678 | * it, we'll restore the dynamic clocks state on wakeup |
| 2679 | */ | 2679 | */ |
| 2680 | radeon_pm_disable_dynamic_mode(rinfo); | 2680 | radeon_pm_disable_dynamic_mode(rinfo); |
| 2681 | mdelay(50); | 2681 | msleep(50); |
| 2682 | radeon_pm_save_regs(rinfo, 1); | 2682 | radeon_pm_save_regs(rinfo, 1); |
| 2683 | 2683 | ||
| 2684 | if (rinfo->is_mobility && !(rinfo->pm_mode & radeon_pm_d2)) { | 2684 | if (rinfo->is_mobility && !(rinfo->pm_mode & radeon_pm_d2)) { |
| 2685 | /* Switch off LVDS interface */ | 2685 | /* Switch off LVDS interface */ |
| 2686 | mdelay(1); | 2686 | usleep_range(1000, 2000); |
| 2687 | OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_BL_MOD_EN)); | 2687 | OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_BL_MOD_EN)); |
| 2688 | mdelay(1); | 2688 | usleep_range(1000, 2000); |
| 2689 | OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_EN | LVDS_ON)); | 2689 | OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_EN | LVDS_ON)); |
| 2690 | OUTREG(LVDS_PLL_CNTL, (INREG(LVDS_PLL_CNTL) & ~30000) | 0x20000); | 2690 | OUTREG(LVDS_PLL_CNTL, (INREG(LVDS_PLL_CNTL) & ~30000) | 0x20000); |
| 2691 | mdelay(20); | 2691 | msleep(20); |
| 2692 | OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_DIGON)); | 2692 | OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_DIGON)); |
| 2693 | } | 2693 | } |
| 2694 | pci_disable_device(pdev); | 2694 | pci_disable_device(pdev); |
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index d555a78df5c6..0adf0683cf08 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c | |||
| @@ -464,7 +464,7 @@ static int au1100fb_drv_probe(struct platform_device *dev) | |||
| 464 | PAGE_ALIGN(fbdev->fb_len), | 464 | PAGE_ALIGN(fbdev->fb_len), |
| 465 | &fbdev->fb_phys, GFP_KERNEL); | 465 | &fbdev->fb_phys, GFP_KERNEL); |
| 466 | if (!fbdev->fb_mem) { | 466 | if (!fbdev->fb_mem) { |
| 467 | print_err("fail to allocate frambuffer (size: %dK))", | 467 | print_err("fail to allocate framebuffer (size: %dK))", |
| 468 | fbdev->fb_len / 1024); | 468 | fbdev->fb_len / 1024); |
| 469 | return -ENOMEM; | 469 | return -ENOMEM; |
| 470 | } | 470 | } |
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index 87d5a62bf6ca..3872ccef4cb2 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c | |||
| @@ -1696,7 +1696,7 @@ static int au1200fb_drv_probe(struct platform_device *dev) | |||
| 1696 | &fbdev->fb_phys, GFP_KERNEL, | 1696 | &fbdev->fb_phys, GFP_KERNEL, |
| 1697 | DMA_ATTR_NON_CONSISTENT); | 1697 | DMA_ATTR_NON_CONSISTENT); |
| 1698 | if (!fbdev->fb_mem) { | 1698 | if (!fbdev->fb_mem) { |
| 1699 | print_err("fail to allocate frambuffer (size: %dK))", | 1699 | print_err("fail to allocate framebuffer (size: %dK))", |
| 1700 | fbdev->fb_len / 1024); | 1700 | fbdev->fb_len / 1024); |
| 1701 | ret = -ENOMEM; | 1701 | ret = -ENOMEM; |
| 1702 | goto failed; | 1702 | goto failed; |
diff --git a/drivers/video/fbdev/auo_k1900fb.c b/drivers/video/fbdev/auo_k1900fb.c deleted file mode 100644 index 7637c60eae3d..000000000000 --- a/drivers/video/fbdev/auo_k1900fb.c +++ /dev/null | |||
| @@ -1,204 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * auok190xfb.c -- FB driver for AUO-K1900 controllers | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de> | ||
| 5 | * | ||
| 6 | * based on broadsheetfb.c | ||
| 7 | * | ||
| 8 | * Copyright (C) 2008, Jaya Kumar | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License version 2 as | ||
| 12 | * published by the Free Software Foundation. | ||
| 13 | * | ||
| 14 | * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven. | ||
| 15 | * | ||
| 16 | * This driver is written to be used with the AUO-K1900 display controller. | ||
| 17 | * | ||
| 18 | * It is intended to be architecture independent. A board specific driver | ||
| 19 | * must be used to perform all the physical IO interactions. | ||
| 20 | * | ||
| 21 | * The controller supports different update modes: | ||
| 22 | * mode0+1 16 step gray (4bit) | ||
| 23 | * mode2 4 step gray (2bit) - FIXME: add strange refresh | ||
| 24 | * mode3 2 step gray (1bit) - FIXME: add strange refresh | ||
| 25 | * mode4 handwriting mode (strange behaviour) | ||
| 26 | * mode5 automatic selection of update mode | ||
| 27 | */ | ||
| 28 | |||
| 29 | #include <linux/module.h> | ||
| 30 | #include <linux/kernel.h> | ||
| 31 | #include <linux/errno.h> | ||
| 32 | #include <linux/string.h> | ||
| 33 | #include <linux/mm.h> | ||
| 34 | #include <linux/slab.h> | ||
| 35 | #include <linux/delay.h> | ||
| 36 | #include <linux/interrupt.h> | ||
| 37 | #include <linux/fb.h> | ||
| 38 | #include <linux/init.h> | ||
| 39 | #include <linux/platform_device.h> | ||
| 40 | #include <linux/list.h> | ||
| 41 | #include <linux/firmware.h> | ||
| 42 | #include <linux/gpio.h> | ||
| 43 | #include <linux/pm_runtime.h> | ||
| 44 | |||
| 45 | #include <video/auo_k190xfb.h> | ||
| 46 | |||
| 47 | #include "auo_k190x.h" | ||
| 48 | |||
| 49 | /* | ||
| 50 | * AUO-K1900 specific commands | ||
| 51 | */ | ||
| 52 | |||
| 53 | #define AUOK1900_CMD_PARTIALDISP 0x1001 | ||
| 54 | #define AUOK1900_CMD_ROTATION 0x1006 | ||
| 55 | #define AUOK1900_CMD_LUT_STOP 0x1009 | ||
| 56 | |||
| 57 | #define AUOK1900_INIT_TEMP_AVERAGE (1 << 13) | ||
| 58 | #define AUOK1900_INIT_ROTATE(_x) ((_x & 0x3) << 10) | ||
| 59 | #define AUOK1900_INIT_RESOLUTION(_res) ((_res & 0x7) << 2) | ||
| 60 | |||
| 61 | static void auok1900_init(struct auok190xfb_par *par) | ||
| 62 | { | ||
| 63 | struct device *dev = par->info->device; | ||
| 64 | struct auok190x_board *board = par->board; | ||
| 65 | u16 init_param = 0; | ||
| 66 | |||
| 67 | pm_runtime_get_sync(dev); | ||
| 68 | |||
| 69 | init_param |= AUOK1900_INIT_TEMP_AVERAGE; | ||
| 70 | init_param |= AUOK1900_INIT_ROTATE(par->rotation); | ||
| 71 | init_param |= AUOK190X_INIT_INVERSE_WHITE; | ||
| 72 | init_param |= AUOK190X_INIT_FORMAT0; | ||
| 73 | init_param |= AUOK1900_INIT_RESOLUTION(par->resolution); | ||
| 74 | init_param |= AUOK190X_INIT_SHIFT_RIGHT; | ||
| 75 | |||
| 76 | auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param); | ||
| 77 | |||
| 78 | /* let the controller finish */ | ||
| 79 | board->wait_for_rdy(par); | ||
| 80 | |||
| 81 | pm_runtime_mark_last_busy(dev); | ||
| 82 | pm_runtime_put_autosuspend(dev); | ||
| 83 | } | ||
| 84 | |||
| 85 | static void auok1900_update_region(struct auok190xfb_par *par, int mode, | ||
| 86 | u16 y1, u16 y2) | ||
| 87 | { | ||
| 88 | struct device *dev = par->info->device; | ||
| 89 | unsigned char *buf = (unsigned char *)par->info->screen_base; | ||
| 90 | int xres = par->info->var.xres; | ||
| 91 | int line_length = par->info->fix.line_length; | ||
| 92 | u16 args[4]; | ||
| 93 | |||
| 94 | pm_runtime_get_sync(dev); | ||
| 95 | |||
| 96 | mutex_lock(&(par->io_lock)); | ||
| 97 | |||
| 98 | /* y1 and y2 must be a multiple of 2 so drop the lowest bit */ | ||
| 99 | y1 &= 0xfffe; | ||
| 100 | y2 &= 0xfffe; | ||
| 101 | |||
| 102 | dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n", | ||
| 103 | 1, y1+1, xres, y2-y1, mode); | ||
| 104 | |||
| 105 | /* to FIX handle different partial update modes */ | ||
| 106 | args[0] = mode | 1; | ||
| 107 | args[1] = y1 + 1; | ||
| 108 | args[2] = xres; | ||
| 109 | args[3] = y2 - y1; | ||
| 110 | buf += y1 * line_length; | ||
| 111 | auok190x_send_cmdargs_pixels(par, AUOK1900_CMD_PARTIALDISP, 4, args, | ||
| 112 | ((y2 - y1) * line_length)/2, (u16 *) buf); | ||
| 113 | auok190x_send_command(par, AUOK190X_CMD_DATA_STOP); | ||
| 114 | |||
| 115 | par->update_cnt++; | ||
| 116 | |||
| 117 | mutex_unlock(&(par->io_lock)); | ||
| 118 | |||
| 119 | pm_runtime_mark_last_busy(dev); | ||
| 120 | pm_runtime_put_autosuspend(dev); | ||
| 121 | } | ||
| 122 | |||
| 123 | static void auok1900fb_dpy_update_pages(struct auok190xfb_par *par, | ||
| 124 | u16 y1, u16 y2) | ||
| 125 | { | ||
| 126 | int mode; | ||
| 127 | |||
| 128 | if (par->update_mode < 0) { | ||
| 129 | mode = AUOK190X_UPDATE_MODE(1); | ||
| 130 | par->last_mode = -1; | ||
| 131 | } else { | ||
| 132 | mode = AUOK190X_UPDATE_MODE(par->update_mode); | ||
| 133 | par->last_mode = par->update_mode; | ||
| 134 | } | ||
| 135 | |||
| 136 | if (par->flash) | ||
| 137 | mode |= AUOK190X_UPDATE_NONFLASH; | ||
| 138 | |||
| 139 | auok1900_update_region(par, mode, y1, y2); | ||
| 140 | } | ||
| 141 | |||
| 142 | static void auok1900fb_dpy_update(struct auok190xfb_par *par) | ||
| 143 | { | ||
| 144 | int mode; | ||
| 145 | |||
| 146 | if (par->update_mode < 0) { | ||
| 147 | mode = AUOK190X_UPDATE_MODE(0); | ||
| 148 | par->last_mode = -1; | ||
| 149 | } else { | ||
| 150 | mode = AUOK190X_UPDATE_MODE(par->update_mode); | ||
| 151 | par->last_mode = par->update_mode; | ||
| 152 | } | ||
| 153 | |||
| 154 | if (par->flash) | ||
| 155 | mode |= AUOK190X_UPDATE_NONFLASH; | ||
| 156 | |||
| 157 | auok1900_update_region(par, mode, 0, par->info->var.yres); | ||
| 158 | par->update_cnt = 0; | ||
| 159 | } | ||
| 160 | |||
| 161 | static bool auok1900fb_need_refresh(struct auok190xfb_par *par) | ||
| 162 | { | ||
| 163 | return (par->update_cnt > 10); | ||
| 164 | } | ||
| 165 | |||
| 166 | static int auok1900fb_probe(struct platform_device *pdev) | ||
| 167 | { | ||
| 168 | struct auok190x_init_data init; | ||
| 169 | struct auok190x_board *board; | ||
| 170 | |||
| 171 | /* pick up board specific routines */ | ||
| 172 | board = pdev->dev.platform_data; | ||
| 173 | if (!board) | ||
| 174 | return -EINVAL; | ||
| 175 | |||
| 176 | /* fill temporary init struct for common init */ | ||
| 177 | init.id = "auo_k1900fb"; | ||
| 178 | init.board = board; | ||
| 179 | init.update_partial = auok1900fb_dpy_update_pages; | ||
| 180 | init.update_all = auok1900fb_dpy_update; | ||
| 181 | init.need_refresh = auok1900fb_need_refresh; | ||
| 182 | init.init = auok1900_init; | ||
| 183 | |||
| 184 | return auok190x_common_probe(pdev, &init); | ||
| 185 | } | ||
| 186 | |||
| 187 | static int auok1900fb_remove(struct platform_device *pdev) | ||
| 188 | { | ||
| 189 | return auok190x_common_remove(pdev); | ||
| 190 | } | ||
| 191 | |||
| 192 | static struct platform_driver auok1900fb_driver = { | ||
| 193 | .probe = auok1900fb_probe, | ||
| 194 | .remove = auok1900fb_remove, | ||
| 195 | .driver = { | ||
| 196 | .name = "auo_k1900fb", | ||
| 197 | .pm = &auok190x_pm, | ||
| 198 | }, | ||
| 199 | }; | ||
| 200 | module_platform_driver(auok1900fb_driver); | ||
| 201 | |||
| 202 | MODULE_DESCRIPTION("framebuffer driver for the AUO-K1900 EPD controller"); | ||
| 203 | MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); | ||
| 204 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/fbdev/auo_k1901fb.c b/drivers/video/fbdev/auo_k1901fb.c deleted file mode 100644 index 681fe61957b6..000000000000 --- a/drivers/video/fbdev/auo_k1901fb.c +++ /dev/null | |||
| @@ -1,257 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * auok190xfb.c -- FB driver for AUO-K1901 controllers | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de> | ||
| 5 | * | ||
| 6 | * based on broadsheetfb.c | ||
| 7 | * | ||
| 8 | * Copyright (C) 2008, Jaya Kumar | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License version 2 as | ||
| 12 | * published by the Free Software Foundation. | ||
| 13 | * | ||
| 14 | * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven. | ||
| 15 | * | ||
| 16 | * This driver is written to be used with the AUO-K1901 display controller. | ||
| 17 | * | ||
| 18 | * It is intended to be architecture independent. A board specific driver | ||
| 19 | * must be used to perform all the physical IO interactions. | ||
| 20 | * | ||
| 21 | * The controller supports different update modes: | ||
| 22 | * mode0+1 16 step gray (4bit) | ||
| 23 | * mode2+3 4 step gray (2bit) | ||
| 24 | * mode4+5 2 step gray (1bit) | ||
| 25 | * - mode4 is described as "without LUT" | ||
| 26 | * mode7 automatic selection of update mode | ||
| 27 | * | ||
| 28 | * The most interesting difference to the K1900 is the ability to do screen | ||
| 29 | * updates in an asynchronous fashion. Where the K1900 needs to wait for the | ||
| 30 | * current update to complete, the K1901 can process later updates already. | ||
| 31 | */ | ||
| 32 | |||
| 33 | #include <linux/module.h> | ||
| 34 | #include <linux/kernel.h> | ||
| 35 | #include <linux/errno.h> | ||
| 36 | #include <linux/string.h> | ||
| 37 | #include <linux/mm.h> | ||
| 38 | #include <linux/slab.h> | ||
| 39 | #include <linux/delay.h> | ||
| 40 | #include <linux/interrupt.h> | ||
| 41 | #include <linux/fb.h> | ||
| 42 | #include <linux/init.h> | ||
| 43 | #include <linux/platform_device.h> | ||
| 44 | #include <linux/list.h> | ||
| 45 | #include <linux/firmware.h> | ||
| 46 | #include <linux/gpio.h> | ||
| 47 | #include <linux/pm_runtime.h> | ||
| 48 | |||
| 49 | #include <video/auo_k190xfb.h> | ||
| 50 | |||
| 51 | #include "auo_k190x.h" | ||
| 52 | |||
| 53 | /* | ||
| 54 | * AUO-K1901 specific commands | ||
| 55 | */ | ||
| 56 | |||
| 57 | #define AUOK1901_CMD_LUT_INTERFACE 0x0005 | ||
| 58 | #define AUOK1901_CMD_DMA_START 0x1001 | ||
| 59 | #define AUOK1901_CMD_CURSOR_START 0x1007 | ||
| 60 | #define AUOK1901_CMD_CURSOR_STOP AUOK190X_CMD_DATA_STOP | ||
| 61 | #define AUOK1901_CMD_DDMA_START 0x1009 | ||
| 62 | |||
| 63 | #define AUOK1901_INIT_GATE_PULSE_LOW (0 << 14) | ||
| 64 | #define AUOK1901_INIT_GATE_PULSE_HIGH (1 << 14) | ||
| 65 | #define AUOK1901_INIT_SINGLE_GATE (0 << 13) | ||
| 66 | #define AUOK1901_INIT_DOUBLE_GATE (1 << 13) | ||
| 67 | |||
| 68 | /* Bits to pixels | ||
| 69 | * Mode 15-12 11-8 7-4 3-0 | ||
| 70 | * format2 2 T 1 T | ||
| 71 | * format3 1 T 2 T | ||
| 72 | * format4 T 2 T 1 | ||
| 73 | * format5 T 1 T 2 | ||
| 74 | * | ||
| 75 | * halftone modes: | ||
| 76 | * format6 2 2 1 1 | ||
| 77 | * format7 1 1 2 2 | ||
| 78 | */ | ||
| 79 | #define AUOK1901_INIT_FORMAT2 (1 << 7) | ||
| 80 | #define AUOK1901_INIT_FORMAT3 ((1 << 7) | (1 << 6)) | ||
| 81 | #define AUOK1901_INIT_FORMAT4 (1 << 8) | ||
| 82 | #define AUOK1901_INIT_FORMAT5 ((1 << 8) | (1 << 6)) | ||
| 83 | #define AUOK1901_INIT_FORMAT6 ((1 << 8) | (1 << 7)) | ||
| 84 | #define AUOK1901_INIT_FORMAT7 ((1 << 8) | (1 << 7) | (1 << 6)) | ||
| 85 | |||
| 86 | /* res[4] to bit 10 | ||
| 87 | * res[3-0] to bits 5-2 | ||
| 88 | */ | ||
| 89 | #define AUOK1901_INIT_RESOLUTION(_res) (((_res & (1 << 4)) << 6) \ | ||
| 90 | | ((_res & 0xf) << 2)) | ||
| 91 | |||
| 92 | /* | ||
| 93 | * portrait / landscape orientation in AUOK1901_CMD_DMA_START | ||
| 94 | */ | ||
| 95 | #define AUOK1901_DMA_ROTATE90(_rot) ((_rot & 1) << 13) | ||
| 96 | |||
| 97 | /* | ||
| 98 | * equivalent to 1 << 11, needs the ~ to have same rotation like K1900 | ||
| 99 | */ | ||
| 100 | #define AUOK1901_DDMA_ROTATE180(_rot) ((~_rot & 2) << 10) | ||
| 101 | |||
| 102 | static void auok1901_init(struct auok190xfb_par *par) | ||
| 103 | { | ||
| 104 | struct device *dev = par->info->device; | ||
| 105 | struct auok190x_board *board = par->board; | ||
| 106 | u16 init_param = 0; | ||
| 107 | |||
| 108 | pm_runtime_get_sync(dev); | ||
| 109 | |||
| 110 | init_param |= AUOK190X_INIT_INVERSE_WHITE; | ||
| 111 | init_param |= AUOK190X_INIT_FORMAT0; | ||
| 112 | init_param |= AUOK1901_INIT_RESOLUTION(par->resolution); | ||
| 113 | init_param |= AUOK190X_INIT_SHIFT_LEFT; | ||
| 114 | |||
| 115 | auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param); | ||
| 116 | |||
| 117 | /* let the controller finish */ | ||
| 118 | board->wait_for_rdy(par); | ||
| 119 | |||
| 120 | pm_runtime_mark_last_busy(dev); | ||
| 121 | pm_runtime_put_autosuspend(dev); | ||
| 122 | } | ||
| 123 | |||
| 124 | static void auok1901_update_region(struct auok190xfb_par *par, int mode, | ||
| 125 | u16 y1, u16 y2) | ||
| 126 | { | ||
| 127 | struct device *dev = par->info->device; | ||
| 128 | unsigned char *buf = (unsigned char *)par->info->screen_base; | ||
| 129 | int xres = par->info->var.xres; | ||
| 130 | int line_length = par->info->fix.line_length; | ||
| 131 | u16 args[5]; | ||
| 132 | |||
| 133 | pm_runtime_get_sync(dev); | ||
| 134 | |||
| 135 | mutex_lock(&(par->io_lock)); | ||
| 136 | |||
| 137 | /* y1 and y2 must be a multiple of 2 so drop the lowest bit */ | ||
| 138 | y1 &= 0xfffe; | ||
| 139 | y2 &= 0xfffe; | ||
| 140 | |||
| 141 | dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n", | ||
| 142 | 1, y1+1, xres, y2-y1, mode); | ||
| 143 | |||
| 144 | /* K1901: first transfer the region data */ | ||
| 145 | args[0] = AUOK1901_DMA_ROTATE90(par->rotation) | 1; | ||
| 146 | args[1] = y1 + 1; | ||
| 147 | args[2] = xres; | ||
| 148 | args[3] = y2 - y1; | ||
| 149 | buf += y1 * line_length; | ||
| 150 | auok190x_send_cmdargs_pixels_nowait(par, AUOK1901_CMD_DMA_START, 4, | ||
| 151 | args, ((y2 - y1) * line_length)/2, | ||
| 152 | (u16 *) buf); | ||
| 153 | auok190x_send_command_nowait(par, AUOK190X_CMD_DATA_STOP); | ||
| 154 | |||
| 155 | /* K1901: second tell the controller to update the region with mode */ | ||
| 156 | args[0] = mode | AUOK1901_DDMA_ROTATE180(par->rotation); | ||
| 157 | args[1] = 1; | ||
| 158 | args[2] = y1 + 1; | ||
| 159 | args[3] = xres; | ||
| 160 | args[4] = y2 - y1; | ||
| 161 | auok190x_send_cmdargs_nowait(par, AUOK1901_CMD_DDMA_START, 5, args); | ||
| 162 | |||
| 163 | par->update_cnt++; | ||
| 164 | |||
| 165 | mutex_unlock(&(par->io_lock)); | ||
| 166 | |||
| 167 | pm_runtime_mark_last_busy(dev); | ||
| 168 | pm_runtime_put_autosuspend(dev); | ||
| 169 | } | ||
| 170 | |||
| 171 | static void auok1901fb_dpy_update_pages(struct auok190xfb_par *par, | ||
| 172 | u16 y1, u16 y2) | ||
| 173 | { | ||
| 174 | int mode; | ||
| 175 | |||
| 176 | if (par->update_mode < 0) { | ||
| 177 | mode = AUOK190X_UPDATE_MODE(1); | ||
| 178 | par->last_mode = -1; | ||
| 179 | } else { | ||
| 180 | mode = AUOK190X_UPDATE_MODE(par->update_mode); | ||
| 181 | par->last_mode = par->update_mode; | ||
| 182 | } | ||
| 183 | |||
| 184 | if (par->flash) | ||
| 185 | mode |= AUOK190X_UPDATE_NONFLASH; | ||
| 186 | |||
| 187 | auok1901_update_region(par, mode, y1, y2); | ||
| 188 | } | ||
| 189 | |||
| 190 | static void auok1901fb_dpy_update(struct auok190xfb_par *par) | ||
| 191 | { | ||
| 192 | int mode; | ||
| 193 | |||
| 194 | /* When doing full updates, wait for the controller to be ready | ||
| 195 | * This will hopefully catch some hangs of the K1901 | ||
| 196 | */ | ||
| 197 | par->board->wait_for_rdy(par); | ||
| 198 | |||
| 199 | if (par->update_mode < 0) { | ||
| 200 | mode = AUOK190X_UPDATE_MODE(0); | ||
| 201 | par->last_mode = -1; | ||
| 202 | } else { | ||
| 203 | mode = AUOK190X_UPDATE_MODE(par->update_mode); | ||
| 204 | par->last_mode = par->update_mode; | ||
| 205 | } | ||
| 206 | |||
| 207 | if (par->flash) | ||
| 208 | mode |= AUOK190X_UPDATE_NONFLASH; | ||
| 209 | |||
| 210 | auok1901_update_region(par, mode, 0, par->info->var.yres); | ||
| 211 | par->update_cnt = 0; | ||
| 212 | } | ||
| 213 | |||
| 214 | static bool auok1901fb_need_refresh(struct auok190xfb_par *par) | ||
| 215 | { | ||
| 216 | return (par->update_cnt > 10); | ||
| 217 | } | ||
| 218 | |||
| 219 | static int auok1901fb_probe(struct platform_device *pdev) | ||
| 220 | { | ||
| 221 | struct auok190x_init_data init; | ||
| 222 | struct auok190x_board *board; | ||
| 223 | |||
| 224 | /* pick up board specific routines */ | ||
| 225 | board = pdev->dev.platform_data; | ||
| 226 | if (!board) | ||
| 227 | return -EINVAL; | ||
| 228 | |||
| 229 | /* fill temporary init struct for common init */ | ||
| 230 | init.id = "auo_k1901fb"; | ||
| 231 | init.board = board; | ||
| 232 | init.update_partial = auok1901fb_dpy_update_pages; | ||
| 233 | init.update_all = auok1901fb_dpy_update; | ||
| 234 | init.need_refresh = auok1901fb_need_refresh; | ||
| 235 | init.init = auok1901_init; | ||
| 236 | |||
| 237 | return auok190x_common_probe(pdev, &init); | ||
| 238 | } | ||
| 239 | |||
| 240 | static int auok1901fb_remove(struct platform_device *pdev) | ||
| 241 | { | ||
| 242 | return auok190x_common_remove(pdev); | ||
| 243 | } | ||
| 244 | |||
| 245 | static struct platform_driver auok1901fb_driver = { | ||
| 246 | .probe = auok1901fb_probe, | ||
| 247 | .remove = auok1901fb_remove, | ||
| 248 | .driver = { | ||
| 249 | .name = "auo_k1901fb", | ||
| 250 | .pm = &auok190x_pm, | ||
| 251 | }, | ||
| 252 | }; | ||
| 253 | module_platform_driver(auok1901fb_driver); | ||
| 254 | |||
| 255 | MODULE_DESCRIPTION("framebuffer driver for the AUO-K1901 EPD controller"); | ||
| 256 | MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); | ||
| 257 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/fbdev/auo_k190x.c b/drivers/video/fbdev/auo_k190x.c deleted file mode 100644 index 9d24d1b3e9ef..000000000000 --- a/drivers/video/fbdev/auo_k190x.c +++ /dev/null | |||
| @@ -1,1195 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Common code for AUO-K190X framebuffer drivers | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/sched/mm.h> | ||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/gpio.h> | ||
| 15 | #include <linux/platform_device.h> | ||
| 16 | #include <linux/pm_runtime.h> | ||
| 17 | #include <linux/fb.h> | ||
| 18 | #include <linux/delay.h> | ||
| 19 | #include <linux/uaccess.h> | ||
| 20 | #include <linux/vmalloc.h> | ||
| 21 | #include <linux/regulator/consumer.h> | ||
| 22 | |||
| 23 | #include <video/auo_k190xfb.h> | ||
| 24 | |||
| 25 | #include "auo_k190x.h" | ||
| 26 | |||
| 27 | struct panel_info { | ||
| 28 | int w; | ||
| 29 | int h; | ||
| 30 | }; | ||
| 31 | |||
| 32 | /* table of panel specific parameters to be indexed into by the board drivers */ | ||
| 33 | static struct panel_info panel_table[] = { | ||
| 34 | /* standard 6" */ | ||
| 35 | [AUOK190X_RESOLUTION_800_600] = { | ||
| 36 | .w = 800, | ||
| 37 | .h = 600, | ||
| 38 | }, | ||
| 39 | /* standard 9" */ | ||
| 40 | [AUOK190X_RESOLUTION_1024_768] = { | ||
| 41 | .w = 1024, | ||
| 42 | .h = 768, | ||
| 43 | }, | ||
| 44 | [AUOK190X_RESOLUTION_600_800] = { | ||
| 45 | .w = 600, | ||
| 46 | .h = 800, | ||
| 47 | }, | ||
| 48 | [AUOK190X_RESOLUTION_768_1024] = { | ||
| 49 | .w = 768, | ||
| 50 | .h = 1024, | ||
| 51 | }, | ||
| 52 | }; | ||
| 53 | |||
| 54 | /* | ||
| 55 | * private I80 interface to the board driver | ||
| 56 | */ | ||
| 57 | |||
| 58 | static void auok190x_issue_data(struct auok190xfb_par *par, u16 data) | ||
| 59 | { | ||
| 60 | par->board->set_ctl(par, AUOK190X_I80_WR, 0); | ||
| 61 | par->board->set_hdb(par, data); | ||
| 62 | par->board->set_ctl(par, AUOK190X_I80_WR, 1); | ||
| 63 | } | ||
| 64 | |||
| 65 | static void auok190x_issue_cmd(struct auok190xfb_par *par, u16 data) | ||
| 66 | { | ||
| 67 | par->board->set_ctl(par, AUOK190X_I80_DC, 0); | ||
| 68 | auok190x_issue_data(par, data); | ||
| 69 | par->board->set_ctl(par, AUOK190X_I80_DC, 1); | ||
| 70 | } | ||
| 71 | |||
| 72 | /** | ||
| 73 | * Conversion of 16bit color to 4bit grayscale | ||
| 74 | * does roughly (0.3 * R + 0.6 G + 0.1 B) / 2 | ||
| 75 | */ | ||
| 76 | static inline int rgb565_to_gray4(u16 data, struct fb_var_screeninfo *var) | ||
| 77 | { | ||
| 78 | return ((((data & 0xF800) >> var->red.offset) * 77 + | ||
| 79 | ((data & 0x07E0) >> (var->green.offset + 1)) * 151 + | ||
| 80 | ((data & 0x1F) >> var->blue.offset) * 28) >> 8 >> 1); | ||
| 81 | } | ||
| 82 | |||
| 83 | static int auok190x_issue_pixels_rgb565(struct auok190xfb_par *par, int size, | ||
| 84 | u16 *data) | ||
| 85 | { | ||
| 86 | struct fb_var_screeninfo *var = &par->info->var; | ||
| 87 | struct device *dev = par->info->device; | ||
| 88 | int i; | ||
| 89 | u16 tmp; | ||
| 90 | |||
| 91 | if (size & 7) { | ||
| 92 | dev_err(dev, "issue_pixels: size %d must be a multiple of 8\n", | ||
| 93 | size); | ||
| 94 | return -EINVAL; | ||
| 95 | } | ||
| 96 | |||
| 97 | for (i = 0; i < (size >> 2); i++) { | ||
| 98 | par->board->set_ctl(par, AUOK190X_I80_WR, 0); | ||
| 99 | |||
| 100 | tmp = (rgb565_to_gray4(data[4*i], var) & 0x000F); | ||
| 101 | tmp |= (rgb565_to_gray4(data[4*i+1], var) << 4) & 0x00F0; | ||
| 102 | tmp |= (rgb565_to_gray4(data[4*i+2], var) << 8) & 0x0F00; | ||
| 103 | tmp |= (rgb565_to_gray4(data[4*i+3], var) << 12) & 0xF000; | ||
| 104 | |||
| 105 | par->board->set_hdb(par, tmp); | ||
| 106 | par->board->set_ctl(par, AUOK190X_I80_WR, 1); | ||
| 107 | } | ||
| 108 | |||
| 109 | return 0; | ||
| 110 | } | ||
| 111 | |||
| 112 | static int auok190x_issue_pixels_gray8(struct auok190xfb_par *par, int size, | ||
| 113 | u16 *data) | ||
| 114 | { | ||
| 115 | struct device *dev = par->info->device; | ||
| 116 | int i; | ||
| 117 | u16 tmp; | ||
| 118 | |||
| 119 | if (size & 3) { | ||
| 120 | dev_err(dev, "issue_pixels: size %d must be a multiple of 4\n", | ||
| 121 | size); | ||
| 122 | return -EINVAL; | ||
| 123 | } | ||
| 124 | |||
| 125 | for (i = 0; i < (size >> 1); i++) { | ||
| 126 | par->board->set_ctl(par, AUOK190X_I80_WR, 0); | ||
| 127 | |||
| 128 | /* simple reduction of 8bit staticgray to 4bit gray | ||
| 129 | * combines 4 * 4bit pixel values into a 16bit value | ||
| 130 | */ | ||
| 131 | tmp = (data[2*i] & 0xF0) >> 4; | ||
| 132 | tmp |= (data[2*i] & 0xF000) >> 8; | ||
| 133 | tmp |= (data[2*i+1] & 0xF0) << 4; | ||
| 134 | tmp |= (data[2*i+1] & 0xF000); | ||
| 135 | |||
| 136 | par->board->set_hdb(par, tmp); | ||
| 137 | par->board->set_ctl(par, AUOK190X_I80_WR, 1); | ||
| 138 | } | ||
| 139 | |||
| 140 | return 0; | ||
| 141 | } | ||
| 142 | |||
| 143 | static int auok190x_issue_pixels(struct auok190xfb_par *par, int size, | ||
| 144 | u16 *data) | ||
| 145 | { | ||
| 146 | struct fb_info *info = par->info; | ||
| 147 | struct device *dev = par->info->device; | ||
| 148 | |||
| 149 | if (info->var.bits_per_pixel == 8 && info->var.grayscale) | ||
| 150 | auok190x_issue_pixels_gray8(par, size, data); | ||
| 151 | else if (info->var.bits_per_pixel == 16) | ||
| 152 | auok190x_issue_pixels_rgb565(par, size, data); | ||
| 153 | else | ||
| 154 | dev_err(dev, "unsupported color mode (bits: %d, gray: %d)\n", | ||
| 155 | info->var.bits_per_pixel, info->var.grayscale); | ||
| 156 | |||
| 157 | return 0; | ||
| 158 | } | ||
| 159 | |||
| 160 | static u16 auok190x_read_data(struct auok190xfb_par *par) | ||
| 161 | { | ||
| 162 | u16 data; | ||
| 163 | |||
| 164 | par->board->set_ctl(par, AUOK190X_I80_OE, 0); | ||
| 165 | data = par->board->get_hdb(par); | ||
| 166 | par->board->set_ctl(par, AUOK190X_I80_OE, 1); | ||
| 167 | |||
| 168 | return data; | ||
| 169 | } | ||
| 170 | |||
| 171 | /* | ||
| 172 | * Command interface for the controller drivers | ||
| 173 | */ | ||
| 174 | |||
| 175 | void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data) | ||
| 176 | { | ||
| 177 | par->board->set_ctl(par, AUOK190X_I80_CS, 0); | ||
| 178 | auok190x_issue_cmd(par, data); | ||
| 179 | par->board->set_ctl(par, AUOK190X_I80_CS, 1); | ||
| 180 | } | ||
| 181 | EXPORT_SYMBOL_GPL(auok190x_send_command_nowait); | ||
| 182 | |||
| 183 | void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd, | ||
| 184 | int argc, u16 *argv) | ||
| 185 | { | ||
| 186 | int i; | ||
| 187 | |||
| 188 | par->board->set_ctl(par, AUOK190X_I80_CS, 0); | ||
| 189 | auok190x_issue_cmd(par, cmd); | ||
| 190 | |||
| 191 | for (i = 0; i < argc; i++) | ||
| 192 | auok190x_issue_data(par, argv[i]); | ||
| 193 | par->board->set_ctl(par, AUOK190X_I80_CS, 1); | ||
| 194 | } | ||
| 195 | EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_nowait); | ||
| 196 | |||
| 197 | int auok190x_send_command(struct auok190xfb_par *par, u16 data) | ||
| 198 | { | ||
| 199 | int ret; | ||
| 200 | |||
| 201 | ret = par->board->wait_for_rdy(par); | ||
| 202 | if (ret) | ||
| 203 | return ret; | ||
| 204 | |||
| 205 | auok190x_send_command_nowait(par, data); | ||
| 206 | return 0; | ||
| 207 | } | ||
| 208 | EXPORT_SYMBOL_GPL(auok190x_send_command); | ||
| 209 | |||
| 210 | int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd, | ||
| 211 | int argc, u16 *argv) | ||
| 212 | { | ||
| 213 | int ret; | ||
| 214 | |||
| 215 | ret = par->board->wait_for_rdy(par); | ||
| 216 | if (ret) | ||
| 217 | return ret; | ||
| 218 | |||
| 219 | auok190x_send_cmdargs_nowait(par, cmd, argc, argv); | ||
| 220 | return 0; | ||
| 221 | } | ||
| 222 | EXPORT_SYMBOL_GPL(auok190x_send_cmdargs); | ||
| 223 | |||
| 224 | int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd, | ||
| 225 | int argc, u16 *argv) | ||
| 226 | { | ||
| 227 | int i, ret; | ||
| 228 | |||
| 229 | ret = par->board->wait_for_rdy(par); | ||
| 230 | if (ret) | ||
| 231 | return ret; | ||
| 232 | |||
| 233 | par->board->set_ctl(par, AUOK190X_I80_CS, 0); | ||
| 234 | auok190x_issue_cmd(par, cmd); | ||
| 235 | |||
| 236 | for (i = 0; i < argc; i++) | ||
| 237 | argv[i] = auok190x_read_data(par); | ||
| 238 | par->board->set_ctl(par, AUOK190X_I80_CS, 1); | ||
| 239 | |||
| 240 | return 0; | ||
| 241 | } | ||
| 242 | EXPORT_SYMBOL_GPL(auok190x_read_cmdargs); | ||
| 243 | |||
| 244 | void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par, u16 cmd, | ||
| 245 | int argc, u16 *argv, int size, u16 *data) | ||
| 246 | { | ||
| 247 | int i; | ||
| 248 | |||
| 249 | par->board->set_ctl(par, AUOK190X_I80_CS, 0); | ||
| 250 | |||
| 251 | auok190x_issue_cmd(par, cmd); | ||
| 252 | |||
| 253 | for (i = 0; i < argc; i++) | ||
| 254 | auok190x_issue_data(par, argv[i]); | ||
| 255 | |||
| 256 | auok190x_issue_pixels(par, size, data); | ||
| 257 | |||
| 258 | par->board->set_ctl(par, AUOK190X_I80_CS, 1); | ||
| 259 | } | ||
| 260 | EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels_nowait); | ||
| 261 | |||
| 262 | int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd, | ||
| 263 | int argc, u16 *argv, int size, u16 *data) | ||
| 264 | { | ||
| 265 | int ret; | ||
| 266 | |||
| 267 | ret = par->board->wait_for_rdy(par); | ||
| 268 | if (ret) | ||
| 269 | return ret; | ||
| 270 | |||
| 271 | auok190x_send_cmdargs_pixels_nowait(par, cmd, argc, argv, size, data); | ||
| 272 | |||
| 273 | return 0; | ||
| 274 | } | ||
| 275 | EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels); | ||
| 276 | |||
| 277 | /* | ||
| 278 | * fbdefio callbacks - common on both controllers. | ||
| 279 | */ | ||
| 280 | |||
| 281 | static void auok190xfb_dpy_first_io(struct fb_info *info) | ||
| 282 | { | ||
| 283 | /* tell runtime-pm that we wish to use the device in a short time */ | ||
| 284 | pm_runtime_get(info->device); | ||
| 285 | } | ||
| 286 | |||
| 287 | /* this is called back from the deferred io workqueue */ | ||
| 288 | static void auok190xfb_dpy_deferred_io(struct fb_info *info, | ||
| 289 | struct list_head *pagelist) | ||
| 290 | { | ||
| 291 | struct fb_deferred_io *fbdefio = info->fbdefio; | ||
| 292 | struct auok190xfb_par *par = info->par; | ||
| 293 | u16 line_length = info->fix.line_length; | ||
| 294 | u16 yres = info->var.yres; | ||
| 295 | u16 y1 = 0, h = 0; | ||
| 296 | int prev_index = -1; | ||
| 297 | struct page *cur; | ||
| 298 | int h_inc; | ||
| 299 | int threshold; | ||
| 300 | |||
| 301 | if (!list_empty(pagelist)) | ||
| 302 | /* the device resume should've been requested through first_io, | ||
| 303 | * if the resume did not finish until now, wait for it. | ||
| 304 | */ | ||
| 305 | pm_runtime_barrier(info->device); | ||
| 306 | else | ||
| 307 | /* We reached this via the fsync or some other way. | ||
| 308 | * In either case the first_io function did not run, | ||
| 309 | * so we runtime_resume the device here synchronously. | ||
| 310 | */ | ||
| 311 | pm_runtime_get_sync(info->device); | ||
| 312 | |||
| 313 | /* Do a full screen update every n updates to prevent | ||
| 314 | * excessive darkening of the Sipix display. | ||
| 315 | * If we do this, there is no need to walk the pages. | ||
| 316 | */ | ||
| 317 | if (par->need_refresh(par)) { | ||
| 318 | par->update_all(par); | ||
| 319 | goto out; | ||
| 320 | } | ||
| 321 | |||
| 322 | /* height increment is fixed per page */ | ||
| 323 | h_inc = DIV_ROUND_UP(PAGE_SIZE , line_length); | ||
| 324 | |||
| 325 | /* calculate number of pages from pixel height */ | ||
| 326 | threshold = par->consecutive_threshold / h_inc; | ||
| 327 | if (threshold < 1) | ||
| 328 | threshold = 1; | ||
| 329 | |||
| 330 | /* walk the written page list and swizzle the data */ | ||
| 331 | list_for_each_entry(cur, &fbdefio->pagelist, lru) { | ||
| 332 | if (prev_index < 0) { | ||
| 333 | /* just starting so assign first page */ | ||
| 334 | y1 = (cur->index << PAGE_SHIFT) / line_length; | ||
| 335 | h = h_inc; | ||
| 336 | } else if ((cur->index - prev_index) <= threshold) { | ||
| 337 | /* page is within our threshold for single updates */ | ||
| 338 | h += h_inc * (cur->index - prev_index); | ||
| 339 | } else { | ||
| 340 | /* page not consecutive, issue previous update first */ | ||
| 341 | par->update_partial(par, y1, y1 + h); | ||
| 342 | |||
| 343 | /* start over with our non consecutive page */ | ||
| 344 | y1 = (cur->index << PAGE_SHIFT) / line_length; | ||
| 345 | h = h_inc; | ||
| 346 | } | ||
| 347 | prev_index = cur->index; | ||
| 348 | } | ||
| 349 | |||
| 350 | /* if we still have any pages to update we do so now */ | ||
| 351 | if (h >= yres) | ||
| 352 | /* its a full screen update, just do it */ | ||
| 353 | par->update_all(par); | ||
| 354 | else | ||
| 355 | par->update_partial(par, y1, min((u16) (y1 + h), yres)); | ||
| 356 | |||
| 357 | out: | ||
| 358 | pm_runtime_mark_last_busy(info->device); | ||
| 359 | pm_runtime_put_autosuspend(info->device); | ||
| 360 | } | ||
| 361 | |||
| 362 | /* | ||
| 363 | * framebuffer operations | ||
| 364 | */ | ||
| 365 | |||
| 366 | /* | ||
| 367 | * this is the slow path from userspace. they can seek and write to | ||
| 368 | * the fb. it's inefficient to do anything less than a full screen draw | ||
| 369 | */ | ||
| 370 | static ssize_t auok190xfb_write(struct fb_info *info, const char __user *buf, | ||
| 371 | size_t count, loff_t *ppos) | ||
| 372 | { | ||
| 373 | struct auok190xfb_par *par = info->par; | ||
| 374 | unsigned long p = *ppos; | ||
| 375 | void *dst; | ||
| 376 | int err = 0; | ||
| 377 | unsigned long total_size; | ||
| 378 | |||
| 379 | if (info->state != FBINFO_STATE_RUNNING) | ||
| 380 | return -EPERM; | ||
| 381 | |||
| 382 | total_size = info->fix.smem_len; | ||
| 383 | |||
| 384 | if (p > total_size) | ||
| 385 | return -EFBIG; | ||
| 386 | |||
| 387 | if (count > total_size) { | ||
| 388 | err = -EFBIG; | ||
| 389 | count = total_size; | ||
| 390 | } | ||
| 391 | |||
| 392 | if (count + p > total_size) { | ||
| 393 | if (!err) | ||
| 394 | err = -ENOSPC; | ||
| 395 | |||
| 396 | count = total_size - p; | ||
| 397 | } | ||
| 398 | |||
| 399 | dst = (void *)(info->screen_base + p); | ||
| 400 | |||
| 401 | if (copy_from_user(dst, buf, count)) | ||
| 402 | err = -EFAULT; | ||
| 403 | |||
| 404 | if (!err) | ||
| 405 | *ppos += count; | ||
| 406 | |||
| 407 | par->update_all(par); | ||
| 408 | |||
| 409 | return (err) ? err : count; | ||
| 410 | } | ||
| 411 | |||
| 412 | static void auok190xfb_fillrect(struct fb_info *info, | ||
| 413 | const struct fb_fillrect *rect) | ||
| 414 | { | ||
| 415 | struct auok190xfb_par *par = info->par; | ||
| 416 | |||
| 417 | sys_fillrect(info, rect); | ||
| 418 | |||
| 419 | par->update_all(par); | ||
| 420 | } | ||
| 421 | |||
| 422 | static void auok190xfb_copyarea(struct fb_info *info, | ||
| 423 | const struct fb_copyarea *area) | ||
| 424 | { | ||
| 425 | struct auok190xfb_par *par = info->par; | ||
| 426 | |||
| 427 | sys_copyarea(info, area); | ||
| 428 | |||
| 429 | par->update_all(par); | ||
| 430 | } | ||
| 431 | |||
| 432 | static void auok190xfb_imageblit(struct fb_info *info, | ||
| 433 | const struct fb_image *image) | ||
| 434 | { | ||
| 435 | struct auok190xfb_par *par = info->par; | ||
| 436 | |||
| 437 | sys_imageblit(info, image); | ||
| 438 | |||
| 439 | par->update_all(par); | ||
| 440 | } | ||
| 441 | |||
| 442 | static int auok190xfb_check_var(struct fb_var_screeninfo *var, | ||
| 443 | struct fb_info *info) | ||
| 444 | { | ||
| 445 | struct device *dev = info->device; | ||
| 446 | struct auok190xfb_par *par = info->par; | ||
| 447 | struct panel_info *panel = &panel_table[par->resolution]; | ||
| 448 | int size; | ||
| 449 | |||
| 450 | /* | ||
| 451 | * Color depth | ||
| 452 | */ | ||
| 453 | |||
| 454 | if (var->bits_per_pixel == 8 && var->grayscale == 1) { | ||
| 455 | /* | ||
| 456 | * For 8-bit grayscale, R, G, and B offset are equal. | ||
| 457 | */ | ||
| 458 | var->red.length = 8; | ||
| 459 | var->red.offset = 0; | ||
| 460 | var->red.msb_right = 0; | ||
| 461 | |||
| 462 | var->green.length = 8; | ||
| 463 | var->green.offset = 0; | ||
| 464 | var->green.msb_right = 0; | ||
| 465 | |||
| 466 | var->blue.length = 8; | ||
| 467 | var->blue.offset = 0; | ||
| 468 | var->blue.msb_right = 0; | ||
| 469 | |||
| 470 | var->transp.length = 0; | ||
| 471 | var->transp.offset = 0; | ||
| 472 | var->transp.msb_right = 0; | ||
| 473 | } else if (var->bits_per_pixel == 16) { | ||
| 474 | var->red.length = 5; | ||
| 475 | var->red.offset = 11; | ||
| 476 | var->red.msb_right = 0; | ||
| 477 | |||
| 478 | var->green.length = 6; | ||
| 479 | var->green.offset = 5; | ||
| 480 | var->green.msb_right = 0; | ||
| 481 | |||
| 482 | var->blue.length = 5; | ||
| 483 | var->blue.offset = 0; | ||
| 484 | var->blue.msb_right = 0; | ||
| 485 | |||
| 486 | var->transp.length = 0; | ||
| 487 | var->transp.offset = 0; | ||
| 488 | var->transp.msb_right = 0; | ||
| 489 | } else { | ||
| 490 | dev_warn(dev, "unsupported color mode (bits: %d, grayscale: %d)\n", | ||
| 491 | info->var.bits_per_pixel, info->var.grayscale); | ||
| 492 | return -EINVAL; | ||
| 493 | } | ||
| 494 | |||
| 495 | /* | ||
| 496 | * Dimensions | ||
| 497 | */ | ||
| 498 | |||
| 499 | switch (var->rotate) { | ||
| 500 | case FB_ROTATE_UR: | ||
| 501 | case FB_ROTATE_UD: | ||
| 502 | var->xres = panel->w; | ||
| 503 | var->yres = panel->h; | ||
| 504 | break; | ||
| 505 | case FB_ROTATE_CW: | ||
| 506 | case FB_ROTATE_CCW: | ||
| 507 | var->xres = panel->h; | ||
| 508 | var->yres = panel->w; | ||
| 509 | break; | ||
| 510 | default: | ||
| 511 | dev_dbg(dev, "Invalid rotation request\n"); | ||
| 512 | return -EINVAL; | ||
| 513 | } | ||
| 514 | |||
| 515 | var->xres_virtual = var->xres; | ||
| 516 | var->yres_virtual = var->yres; | ||
| 517 | |||
| 518 | /* | ||
| 519 | * Memory limit | ||
| 520 | */ | ||
| 521 | |||
| 522 | size = var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8; | ||
| 523 | if (size > info->fix.smem_len) { | ||
| 524 | dev_err(dev, "Memory limit exceeded, requested %dK\n", | ||
| 525 | size >> 10); | ||
| 526 | return -ENOMEM; | ||
| 527 | } | ||
| 528 | |||
| 529 | return 0; | ||
| 530 | } | ||
| 531 | |||
| 532 | static int auok190xfb_set_fix(struct fb_info *info) | ||
| 533 | { | ||
| 534 | struct fb_fix_screeninfo *fix = &info->fix; | ||
| 535 | struct fb_var_screeninfo *var = &info->var; | ||
| 536 | |||
| 537 | fix->line_length = var->xres_virtual * var->bits_per_pixel / 8; | ||
| 538 | |||
| 539 | fix->type = FB_TYPE_PACKED_PIXELS; | ||
| 540 | fix->accel = FB_ACCEL_NONE; | ||
| 541 | fix->visual = (var->grayscale) ? FB_VISUAL_STATIC_PSEUDOCOLOR | ||
| 542 | : FB_VISUAL_TRUECOLOR; | ||
| 543 | fix->xpanstep = 0; | ||
| 544 | fix->ypanstep = 0; | ||
| 545 | fix->ywrapstep = 0; | ||
| 546 | |||
| 547 | return 0; | ||
| 548 | } | ||
| 549 | |||
| 550 | static int auok190xfb_set_par(struct fb_info *info) | ||
| 551 | { | ||
| 552 | struct auok190xfb_par *par = info->par; | ||
| 553 | |||
| 554 | par->rotation = info->var.rotate; | ||
| 555 | auok190xfb_set_fix(info); | ||
| 556 | |||
| 557 | /* reinit the controller to honor the rotation */ | ||
| 558 | par->init(par); | ||
| 559 | |||
| 560 | /* wait for init to complete */ | ||
| 561 | par->board->wait_for_rdy(par); | ||
| 562 | |||
| 563 | return 0; | ||
| 564 | } | ||
| 565 | |||
| 566 | static struct fb_ops auok190xfb_ops = { | ||
| 567 | .owner = THIS_MODULE, | ||
| 568 | .fb_read = fb_sys_read, | ||
| 569 | .fb_write = auok190xfb_write, | ||
| 570 | .fb_fillrect = auok190xfb_fillrect, | ||
| 571 | .fb_copyarea = auok190xfb_copyarea, | ||
| 572 | .fb_imageblit = auok190xfb_imageblit, | ||
| 573 | .fb_check_var = auok190xfb_check_var, | ||
| 574 | .fb_set_par = auok190xfb_set_par, | ||
| 575 | }; | ||
| 576 | |||
| 577 | /* | ||
| 578 | * Controller-functions common to both K1900 and K1901 | ||
| 579 | */ | ||
| 580 | |||
| 581 | static int auok190x_read_temperature(struct auok190xfb_par *par) | ||
| 582 | { | ||
| 583 | struct device *dev = par->info->device; | ||
| 584 | u16 data[4]; | ||
| 585 | int temp; | ||
| 586 | |||
| 587 | pm_runtime_get_sync(dev); | ||
| 588 | |||
| 589 | mutex_lock(&(par->io_lock)); | ||
| 590 | |||
| 591 | auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data); | ||
| 592 | |||
| 593 | mutex_unlock(&(par->io_lock)); | ||
| 594 | |||
| 595 | pm_runtime_mark_last_busy(dev); | ||
| 596 | pm_runtime_put_autosuspend(dev); | ||
| 597 | |||
| 598 | /* sanitize and split of half-degrees for now */ | ||
| 599 | temp = ((data[0] & AUOK190X_VERSION_TEMP_MASK) >> 1); | ||
| 600 | |||
| 601 | /* handle positive and negative temperatures */ | ||
| 602 | if (temp >= 201) | ||
| 603 | return (255 - temp + 1) * (-1); | ||
| 604 | else | ||
| 605 | return temp; | ||
| 606 | } | ||
| 607 | |||
| 608 | static void auok190x_identify(struct auok190xfb_par *par) | ||
| 609 | { | ||
| 610 | struct device *dev = par->info->device; | ||
| 611 | u16 data[4]; | ||
| 612 | |||
| 613 | pm_runtime_get_sync(dev); | ||
| 614 | |||
| 615 | mutex_lock(&(par->io_lock)); | ||
| 616 | |||
| 617 | auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data); | ||
| 618 | |||
| 619 | mutex_unlock(&(par->io_lock)); | ||
| 620 | |||
| 621 | par->epd_type = data[1] & AUOK190X_VERSION_TEMP_MASK; | ||
| 622 | |||
| 623 | par->panel_size_int = AUOK190X_VERSION_SIZE_INT(data[2]); | ||
| 624 | par->panel_size_float = AUOK190X_VERSION_SIZE_FLOAT(data[2]); | ||
| 625 | par->panel_model = AUOK190X_VERSION_MODEL(data[2]); | ||
| 626 | |||
| 627 | par->tcon_version = AUOK190X_VERSION_TCON(data[3]); | ||
| 628 | par->lut_version = AUOK190X_VERSION_LUT(data[3]); | ||
| 629 | |||
| 630 | dev_dbg(dev, "panel %d.%din, model 0x%x, EPD 0x%x TCON-rev 0x%x, LUT-rev 0x%x", | ||
| 631 | par->panel_size_int, par->panel_size_float, par->panel_model, | ||
| 632 | par->epd_type, par->tcon_version, par->lut_version); | ||
| 633 | |||
| 634 | pm_runtime_mark_last_busy(dev); | ||
| 635 | pm_runtime_put_autosuspend(dev); | ||
| 636 | } | ||
| 637 | |||
| 638 | /* | ||
| 639 | * Sysfs functions | ||
| 640 | */ | ||
| 641 | |||
| 642 | static ssize_t update_mode_show(struct device *dev, | ||
| 643 | struct device_attribute *attr, char *buf) | ||
| 644 | { | ||
| 645 | struct fb_info *info = dev_get_drvdata(dev); | ||
| 646 | struct auok190xfb_par *par = info->par; | ||
| 647 | |||
| 648 | return sprintf(buf, "%d\n", par->update_mode); | ||
| 649 | } | ||
| 650 | |||
| 651 | static ssize_t update_mode_store(struct device *dev, | ||
| 652 | struct device_attribute *attr, | ||
| 653 | const char *buf, size_t count) | ||
| 654 | { | ||
| 655 | struct fb_info *info = dev_get_drvdata(dev); | ||
| 656 | struct auok190xfb_par *par = info->par; | ||
| 657 | int mode, ret; | ||
| 658 | |||
| 659 | ret = kstrtoint(buf, 10, &mode); | ||
| 660 | if (ret) | ||
| 661 | return ret; | ||
| 662 | |||
| 663 | par->update_mode = mode; | ||
| 664 | |||
| 665 | /* if we enter a better mode, do a full update */ | ||
| 666 | if (par->last_mode > 1 && mode < par->last_mode) | ||
| 667 | par->update_all(par); | ||
| 668 | |||
| 669 | return count; | ||
| 670 | } | ||
| 671 | |||
| 672 | static ssize_t flash_show(struct device *dev, struct device_attribute *attr, | ||
| 673 | char *buf) | ||
| 674 | { | ||
| 675 | struct fb_info *info = dev_get_drvdata(dev); | ||
| 676 | struct auok190xfb_par *par = info->par; | ||
| 677 | |||
| 678 | return sprintf(buf, "%d\n", par->flash); | ||
| 679 | } | ||
| 680 | |||
| 681 | static ssize_t flash_store(struct device *dev, struct device_attribute *attr, | ||
| 682 | const char *buf, size_t count) | ||
| 683 | { | ||
| 684 | struct fb_info *info = dev_get_drvdata(dev); | ||
| 685 | struct auok190xfb_par *par = info->par; | ||
| 686 | int flash, ret; | ||
| 687 | |||
| 688 | ret = kstrtoint(buf, 10, &flash); | ||
| 689 | if (ret) | ||
| 690 | return ret; | ||
| 691 | |||
| 692 | if (flash > 0) | ||
| 693 | par->flash = 1; | ||
| 694 | else | ||
| 695 | par->flash = 0; | ||
| 696 | |||
| 697 | return count; | ||
| 698 | } | ||
| 699 | |||
| 700 | static ssize_t temp_show(struct device *dev, struct device_attribute *attr, | ||
| 701 | char *buf) | ||
| 702 | { | ||
| 703 | struct fb_info *info = dev_get_drvdata(dev); | ||
| 704 | struct auok190xfb_par *par = info->par; | ||
| 705 | int temp; | ||
| 706 | |||
| 707 | temp = auok190x_read_temperature(par); | ||
| 708 | return sprintf(buf, "%d\n", temp); | ||
| 709 | } | ||
| 710 | |||
| 711 | static DEVICE_ATTR_RW(update_mode); | ||
| 712 | static DEVICE_ATTR_RW(flash); | ||
| 713 | static DEVICE_ATTR(temp, 0644, temp_show, NULL); | ||
| 714 | |||
| 715 | static struct attribute *auok190x_attributes[] = { | ||
| 716 | &dev_attr_update_mode.attr, | ||
| 717 | &dev_attr_flash.attr, | ||
| 718 | &dev_attr_temp.attr, | ||
| 719 | NULL | ||
| 720 | }; | ||
| 721 | |||
| 722 | static const struct attribute_group auok190x_attr_group = { | ||
| 723 | .attrs = auok190x_attributes, | ||
| 724 | }; | ||
| 725 | |||
| 726 | static int auok190x_power(struct auok190xfb_par *par, bool on) | ||
| 727 | { | ||
| 728 | struct auok190x_board *board = par->board; | ||
| 729 | int ret; | ||
| 730 | |||
| 731 | if (on) { | ||
| 732 | /* We should maintain POWER up for at least 80ms before set | ||
| 733 | * RST_N and SLP_N to high (TCON spec 20100803_v35 p59) | ||
| 734 | */ | ||
| 735 | ret = regulator_enable(par->regulator); | ||
| 736 | if (ret) | ||
| 737 | return ret; | ||
| 738 | |||
| 739 | msleep(200); | ||
| 740 | gpio_set_value(board->gpio_nrst, 1); | ||
| 741 | gpio_set_value(board->gpio_nsleep, 1); | ||
| 742 | msleep(200); | ||
| 743 | } else { | ||
| 744 | regulator_disable(par->regulator); | ||
| 745 | gpio_set_value(board->gpio_nrst, 0); | ||
| 746 | gpio_set_value(board->gpio_nsleep, 0); | ||
| 747 | } | ||
| 748 | |||
| 749 | return 0; | ||
| 750 | } | ||
| 751 | |||
| 752 | /* | ||
| 753 | * Recovery - powercycle the controller | ||
| 754 | */ | ||
| 755 | |||
| 756 | static void auok190x_recover(struct auok190xfb_par *par) | ||
| 757 | { | ||
| 758 | struct device *dev = par->info->device; | ||
| 759 | |||
| 760 | auok190x_power(par, 0); | ||
| 761 | msleep(100); | ||
| 762 | auok190x_power(par, 1); | ||
| 763 | |||
| 764 | /* after powercycling the device, it's always active */ | ||
| 765 | pm_runtime_set_active(dev); | ||
| 766 | par->standby = 0; | ||
| 767 | |||
| 768 | par->init(par); | ||
| 769 | |||
| 770 | /* wait for init to complete */ | ||
| 771 | par->board->wait_for_rdy(par); | ||
| 772 | } | ||
| 773 | |||
| 774 | /* | ||
| 775 | * Power-management | ||
| 776 | */ | ||
| 777 | static int __maybe_unused auok190x_runtime_suspend(struct device *dev) | ||
| 778 | { | ||
| 779 | struct platform_device *pdev = to_platform_device(dev); | ||
| 780 | struct fb_info *info = platform_get_drvdata(pdev); | ||
| 781 | struct auok190xfb_par *par = info->par; | ||
| 782 | struct auok190x_board *board = par->board; | ||
| 783 | u16 standby_param; | ||
| 784 | |||
| 785 | /* take and keep the lock until we are resumed, as the controller | ||
| 786 | * will never reach the non-busy state when in standby mode | ||
| 787 | */ | ||
| 788 | mutex_lock(&(par->io_lock)); | ||
| 789 | |||
| 790 | if (par->standby) { | ||
| 791 | dev_warn(dev, "already in standby, runtime-pm pairing mismatch\n"); | ||
| 792 | mutex_unlock(&(par->io_lock)); | ||
| 793 | return 0; | ||
| 794 | } | ||
| 795 | |||
| 796 | /* according to runtime_pm.txt runtime_suspend only means, that the | ||
| 797 | * device will not process data and will not communicate with the CPU | ||
| 798 | * As we hold the lock, this stays true even without standby | ||
| 799 | */ | ||
| 800 | if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) { | ||
| 801 | dev_dbg(dev, "runtime suspend without standby\n"); | ||
| 802 | goto finish; | ||
| 803 | } else if (board->quirks & AUOK190X_QUIRK_STANDBYPARAM) { | ||
| 804 | /* for some TCON versions STANDBY expects a parameter (0) but | ||
| 805 | * it seems the real tcon version has to be determined yet. | ||
| 806 | */ | ||
| 807 | dev_dbg(dev, "runtime suspend with additional empty param\n"); | ||
| 808 | standby_param = 0; | ||
| 809 | auok190x_send_cmdargs(par, AUOK190X_CMD_STANDBY, 1, | ||
| 810 | &standby_param); | ||
| 811 | } else { | ||
| 812 | dev_dbg(dev, "runtime suspend without param\n"); | ||
| 813 | auok190x_send_command(par, AUOK190X_CMD_STANDBY); | ||
| 814 | } | ||
| 815 | |||
| 816 | msleep(64); | ||
| 817 | |||
| 818 | finish: | ||
| 819 | par->standby = 1; | ||
| 820 | |||
| 821 | return 0; | ||
| 822 | } | ||
| 823 | |||
| 824 | static int __maybe_unused auok190x_runtime_resume(struct device *dev) | ||
| 825 | { | ||
| 826 | struct platform_device *pdev = to_platform_device(dev); | ||
| 827 | struct fb_info *info = platform_get_drvdata(pdev); | ||
| 828 | struct auok190xfb_par *par = info->par; | ||
| 829 | struct auok190x_board *board = par->board; | ||
| 830 | |||
| 831 | if (!par->standby) { | ||
| 832 | dev_warn(dev, "not in standby, runtime-pm pairing mismatch\n"); | ||
| 833 | return 0; | ||
| 834 | } | ||
| 835 | |||
| 836 | if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) { | ||
| 837 | dev_dbg(dev, "runtime resume without standby\n"); | ||
| 838 | } else { | ||
| 839 | /* when in standby, controller is always busy | ||
| 840 | * and only accepts the wakeup command | ||
| 841 | */ | ||
| 842 | dev_dbg(dev, "runtime resume from standby\n"); | ||
| 843 | auok190x_send_command_nowait(par, AUOK190X_CMD_WAKEUP); | ||
| 844 | |||
| 845 | msleep(160); | ||
| 846 | |||
| 847 | /* wait for the controller to be ready and release the lock */ | ||
| 848 | board->wait_for_rdy(par); | ||
| 849 | } | ||
| 850 | |||
| 851 | par->standby = 0; | ||
| 852 | |||
| 853 | mutex_unlock(&(par->io_lock)); | ||
| 854 | |||
| 855 | return 0; | ||
| 856 | } | ||
| 857 | |||
| 858 | static int __maybe_unused auok190x_suspend(struct device *dev) | ||
| 859 | { | ||
| 860 | struct platform_device *pdev = to_platform_device(dev); | ||
| 861 | struct fb_info *info = platform_get_drvdata(pdev); | ||
| 862 | struct auok190xfb_par *par = info->par; | ||
| 863 | struct auok190x_board *board = par->board; | ||
| 864 | int ret; | ||
| 865 | |||
| 866 | dev_dbg(dev, "suspend\n"); | ||
| 867 | if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) { | ||
| 868 | /* suspend via powering off the ic */ | ||
| 869 | dev_dbg(dev, "suspend with broken standby\n"); | ||
| 870 | |||
| 871 | auok190x_power(par, 0); | ||
| 872 | } else { | ||
| 873 | dev_dbg(dev, "suspend using sleep\n"); | ||
| 874 | |||
| 875 | /* the sleep state can only be entered from the standby state. | ||
| 876 | * pm_runtime_get_noresume gets called before the suspend call. | ||
| 877 | * So the devices usage count is >0 but it is not necessarily | ||
| 878 | * active. | ||
| 879 | */ | ||
| 880 | if (!pm_runtime_status_suspended(dev)) { | ||
| 881 | ret = auok190x_runtime_suspend(dev); | ||
| 882 | if (ret < 0) { | ||
| 883 | dev_err(dev, "auok190x_runtime_suspend failed with %d\n", | ||
| 884 | ret); | ||
| 885 | return ret; | ||
| 886 | } | ||
| 887 | par->manual_standby = 1; | ||
| 888 | } | ||
| 889 | |||
| 890 | gpio_direction_output(board->gpio_nsleep, 0); | ||
| 891 | } | ||
| 892 | |||
| 893 | msleep(100); | ||
| 894 | |||
| 895 | return 0; | ||
| 896 | } | ||
| 897 | |||
| 898 | static int __maybe_unused auok190x_resume(struct device *dev) | ||
| 899 | { | ||
| 900 | struct platform_device *pdev = to_platform_device(dev); | ||
| 901 | struct fb_info *info = platform_get_drvdata(pdev); | ||
| 902 | struct auok190xfb_par *par = info->par; | ||
| 903 | struct auok190x_board *board = par->board; | ||
| 904 | |||
| 905 | dev_dbg(dev, "resume\n"); | ||
| 906 | if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) { | ||
| 907 | dev_dbg(dev, "resume with broken standby\n"); | ||
| 908 | |||
| 909 | auok190x_power(par, 1); | ||
| 910 | |||
| 911 | par->init(par); | ||
| 912 | } else { | ||
| 913 | dev_dbg(dev, "resume from sleep\n"); | ||
| 914 | |||
| 915 | /* device should be in runtime suspend when we were suspended | ||
| 916 | * and pm_runtime_put_sync gets called after this function. | ||
| 917 | * So there is no need to touch the standby mode here at all. | ||
| 918 | */ | ||
| 919 | gpio_direction_output(board->gpio_nsleep, 1); | ||
| 920 | msleep(100); | ||
| 921 | |||
| 922 | /* an additional init call seems to be necessary after sleep */ | ||
| 923 | auok190x_runtime_resume(dev); | ||
| 924 | par->init(par); | ||
| 925 | |||
| 926 | /* if we were runtime-suspended before, suspend again*/ | ||
| 927 | if (!par->manual_standby) | ||
| 928 | auok190x_runtime_suspend(dev); | ||
| 929 | else | ||
| 930 | par->manual_standby = 0; | ||
| 931 | } | ||
| 932 | |||
| 933 | return 0; | ||
| 934 | } | ||
| 935 | |||
| 936 | const struct dev_pm_ops auok190x_pm = { | ||
| 937 | SET_RUNTIME_PM_OPS(auok190x_runtime_suspend, auok190x_runtime_resume, | ||
| 938 | NULL) | ||
| 939 | SET_SYSTEM_SLEEP_PM_OPS(auok190x_suspend, auok190x_resume) | ||
| 940 | }; | ||
| 941 | EXPORT_SYMBOL_GPL(auok190x_pm); | ||
| 942 | |||
| 943 | /* | ||
| 944 | * Common probe and remove code | ||
| 945 | */ | ||
| 946 | |||
| 947 | int auok190x_common_probe(struct platform_device *pdev, | ||
| 948 | struct auok190x_init_data *init) | ||
| 949 | { | ||
| 950 | struct auok190x_board *board = init->board; | ||
| 951 | struct auok190xfb_par *par; | ||
| 952 | struct fb_info *info; | ||
| 953 | struct panel_info *panel; | ||
| 954 | int videomemorysize, ret; | ||
| 955 | unsigned char *videomemory; | ||
| 956 | |||
| 957 | /* check board contents */ | ||
| 958 | if (!board->init || !board->cleanup || !board->wait_for_rdy | ||
| 959 | || !board->set_ctl || !board->set_hdb || !board->get_hdb | ||
| 960 | || !board->setup_irq) | ||
| 961 | return -EINVAL; | ||
| 962 | |||
| 963 | info = framebuffer_alloc(sizeof(struct auok190xfb_par), &pdev->dev); | ||
| 964 | if (!info) | ||
| 965 | return -ENOMEM; | ||
| 966 | |||
| 967 | par = info->par; | ||
| 968 | par->info = info; | ||
| 969 | par->board = board; | ||
| 970 | par->recover = auok190x_recover; | ||
| 971 | par->update_partial = init->update_partial; | ||
| 972 | par->update_all = init->update_all; | ||
| 973 | par->need_refresh = init->need_refresh; | ||
| 974 | par->init = init->init; | ||
| 975 | |||
| 976 | /* init update modes */ | ||
| 977 | par->update_cnt = 0; | ||
| 978 | par->update_mode = -1; | ||
| 979 | par->last_mode = -1; | ||
| 980 | par->flash = 0; | ||
| 981 | |||
| 982 | par->regulator = regulator_get(info->device, "vdd"); | ||
| 983 | if (IS_ERR(par->regulator)) { | ||
| 984 | ret = PTR_ERR(par->regulator); | ||
| 985 | dev_err(info->device, "Failed to get regulator: %d\n", ret); | ||
| 986 | goto err_reg; | ||
| 987 | } | ||
| 988 | |||
| 989 | ret = board->init(par); | ||
| 990 | if (ret) { | ||
| 991 | dev_err(info->device, "board init failed, %d\n", ret); | ||
| 992 | goto err_board; | ||
| 993 | } | ||
| 994 | |||
| 995 | ret = gpio_request(board->gpio_nsleep, "AUOK190x sleep"); | ||
| 996 | if (ret) { | ||
| 997 | dev_err(info->device, "could not request sleep gpio, %d\n", | ||
| 998 | ret); | ||
| 999 | goto err_gpio1; | ||
| 1000 | } | ||
| 1001 | |||
| 1002 | ret = gpio_direction_output(board->gpio_nsleep, 0); | ||
| 1003 | if (ret) { | ||
| 1004 | dev_err(info->device, "could not set sleep gpio, %d\n", ret); | ||
| 1005 | goto err_gpio2; | ||
| 1006 | } | ||
| 1007 | |||
| 1008 | ret = gpio_request(board->gpio_nrst, "AUOK190x reset"); | ||
| 1009 | if (ret) { | ||
| 1010 | dev_err(info->device, "could not request reset gpio, %d\n", | ||
| 1011 | ret); | ||
| 1012 | goto err_gpio2; | ||
| 1013 | } | ||
| 1014 | |||
| 1015 | ret = gpio_direction_output(board->gpio_nrst, 0); | ||
| 1016 | if (ret) { | ||
| 1017 | dev_err(info->device, "could not set reset gpio, %d\n", ret); | ||
| 1018 | goto err_gpio3; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | ret = auok190x_power(par, 1); | ||
| 1022 | if (ret) { | ||
| 1023 | dev_err(info->device, "could not power on the device, %d\n", | ||
| 1024 | ret); | ||
| 1025 | goto err_gpio3; | ||
| 1026 | } | ||
| 1027 | |||
| 1028 | mutex_init(&par->io_lock); | ||
| 1029 | |||
| 1030 | init_waitqueue_head(&par->waitq); | ||
| 1031 | |||
| 1032 | ret = par->board->setup_irq(par->info); | ||
| 1033 | if (ret) { | ||
| 1034 | dev_err(info->device, "could not setup ready-irq, %d\n", ret); | ||
| 1035 | goto err_irq; | ||
| 1036 | } | ||
| 1037 | |||
| 1038 | /* wait for init to complete */ | ||
| 1039 | par->board->wait_for_rdy(par); | ||
| 1040 | |||
| 1041 | /* | ||
| 1042 | * From here on the controller can talk to us | ||
| 1043 | */ | ||
| 1044 | |||
| 1045 | /* initialise fix, var, resolution and rotation */ | ||
| 1046 | |||
| 1047 | strlcpy(info->fix.id, init->id, 16); | ||
| 1048 | info->var.bits_per_pixel = 8; | ||
| 1049 | info->var.grayscale = 1; | ||
| 1050 | |||
| 1051 | panel = &panel_table[board->resolution]; | ||
| 1052 | |||
| 1053 | par->resolution = board->resolution; | ||
| 1054 | par->rotation = 0; | ||
| 1055 | |||
| 1056 | /* videomemory handling */ | ||
| 1057 | |||
| 1058 | videomemorysize = roundup((panel->w * panel->h) * 2, PAGE_SIZE); | ||
| 1059 | videomemory = vzalloc(videomemorysize); | ||
| 1060 | if (!videomemory) { | ||
| 1061 | ret = -ENOMEM; | ||
| 1062 | goto err_irq; | ||
| 1063 | } | ||
| 1064 | |||
| 1065 | info->screen_base = (char *)videomemory; | ||
| 1066 | info->fix.smem_len = videomemorysize; | ||
| 1067 | |||
| 1068 | info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB; | ||
| 1069 | info->fbops = &auok190xfb_ops; | ||
| 1070 | |||
| 1071 | ret = auok190xfb_check_var(&info->var, info); | ||
| 1072 | if (ret) | ||
| 1073 | goto err_defio; | ||
| 1074 | |||
| 1075 | auok190xfb_set_fix(info); | ||
| 1076 | |||
| 1077 | /* deferred io init */ | ||
| 1078 | |||
| 1079 | info->fbdefio = devm_kzalloc(info->device, | ||
| 1080 | sizeof(struct fb_deferred_io), | ||
| 1081 | GFP_KERNEL); | ||
| 1082 | if (!info->fbdefio) { | ||
| 1083 | dev_err(info->device, "Failed to allocate memory\n"); | ||
| 1084 | ret = -ENOMEM; | ||
| 1085 | goto err_defio; | ||
| 1086 | } | ||
| 1087 | |||
| 1088 | dev_dbg(info->device, "targeting %d frames per second\n", board->fps); | ||
| 1089 | info->fbdefio->delay = HZ / board->fps; | ||
| 1090 | info->fbdefio->first_io = auok190xfb_dpy_first_io, | ||
| 1091 | info->fbdefio->deferred_io = auok190xfb_dpy_deferred_io, | ||
| 1092 | fb_deferred_io_init(info); | ||
| 1093 | |||
| 1094 | /* color map */ | ||
| 1095 | |||
| 1096 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | ||
| 1097 | if (ret < 0) { | ||
| 1098 | dev_err(info->device, "Failed to allocate colormap\n"); | ||
| 1099 | goto err_cmap; | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | /* controller init */ | ||
| 1103 | |||
| 1104 | par->consecutive_threshold = 100; | ||
| 1105 | par->init(par); | ||
| 1106 | auok190x_identify(par); | ||
| 1107 | |||
| 1108 | platform_set_drvdata(pdev, info); | ||
| 1109 | |||
| 1110 | ret = register_framebuffer(info); | ||
| 1111 | if (ret < 0) | ||
| 1112 | goto err_regfb; | ||
| 1113 | |||
| 1114 | ret = sysfs_create_group(&info->device->kobj, &auok190x_attr_group); | ||
| 1115 | if (ret) | ||
| 1116 | goto err_sysfs; | ||
| 1117 | |||
| 1118 | dev_info(info->device, "fb%d: %dx%d using %dK of video memory\n", | ||
| 1119 | info->node, info->var.xres, info->var.yres, | ||
| 1120 | videomemorysize >> 10); | ||
| 1121 | |||
| 1122 | /* increase autosuspend_delay when we use alternative methods | ||
| 1123 | * for runtime_pm | ||
| 1124 | */ | ||
| 1125 | par->autosuspend_delay = (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) | ||
| 1126 | ? 1000 : 200; | ||
| 1127 | |||
| 1128 | pm_runtime_set_active(info->device); | ||
| 1129 | pm_runtime_enable(info->device); | ||
| 1130 | pm_runtime_set_autosuspend_delay(info->device, par->autosuspend_delay); | ||
| 1131 | pm_runtime_use_autosuspend(info->device); | ||
| 1132 | |||
| 1133 | return 0; | ||
| 1134 | |||
| 1135 | err_sysfs: | ||
| 1136 | unregister_framebuffer(info); | ||
| 1137 | err_regfb: | ||
| 1138 | fb_dealloc_cmap(&info->cmap); | ||
| 1139 | err_cmap: | ||
| 1140 | fb_deferred_io_cleanup(info); | ||
| 1141 | err_defio: | ||
| 1142 | vfree((void *)info->screen_base); | ||
| 1143 | err_irq: | ||
| 1144 | auok190x_power(par, 0); | ||
| 1145 | err_gpio3: | ||
| 1146 | gpio_free(board->gpio_nrst); | ||
| 1147 | err_gpio2: | ||
| 1148 | gpio_free(board->gpio_nsleep); | ||
| 1149 | err_gpio1: | ||
| 1150 | board->cleanup(par); | ||
| 1151 | err_board: | ||
| 1152 | regulator_put(par->regulator); | ||
| 1153 | err_reg: | ||
| 1154 | framebuffer_release(info); | ||
| 1155 | |||
| 1156 | return ret; | ||
| 1157 | } | ||
| 1158 | EXPORT_SYMBOL_GPL(auok190x_common_probe); | ||
| 1159 | |||
| 1160 | int auok190x_common_remove(struct platform_device *pdev) | ||
| 1161 | { | ||
| 1162 | struct fb_info *info = platform_get_drvdata(pdev); | ||
| 1163 | struct auok190xfb_par *par = info->par; | ||
| 1164 | struct auok190x_board *board = par->board; | ||
| 1165 | |||
| 1166 | pm_runtime_disable(info->device); | ||
| 1167 | |||
| 1168 | sysfs_remove_group(&info->device->kobj, &auok190x_attr_group); | ||
| 1169 | |||
| 1170 | unregister_framebuffer(info); | ||
| 1171 | |||
| 1172 | fb_dealloc_cmap(&info->cmap); | ||
| 1173 | |||
| 1174 | fb_deferred_io_cleanup(info); | ||
| 1175 | |||
| 1176 | vfree((void *)info->screen_base); | ||
| 1177 | |||
| 1178 | auok190x_power(par, 0); | ||
| 1179 | |||
| 1180 | gpio_free(board->gpio_nrst); | ||
| 1181 | gpio_free(board->gpio_nsleep); | ||
| 1182 | |||
| 1183 | board->cleanup(par); | ||
| 1184 | |||
| 1185 | regulator_put(par->regulator); | ||
| 1186 | |||
| 1187 | framebuffer_release(info); | ||
| 1188 | |||
| 1189 | return 0; | ||
| 1190 | } | ||
| 1191 | EXPORT_SYMBOL_GPL(auok190x_common_remove); | ||
| 1192 | |||
| 1193 | MODULE_DESCRIPTION("Common code for AUO-K190X controllers"); | ||
| 1194 | MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); | ||
| 1195 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/fbdev/auo_k190x.h b/drivers/video/fbdev/auo_k190x.h deleted file mode 100644 index e35af1f51b28..000000000000 --- a/drivers/video/fbdev/auo_k190x.h +++ /dev/null | |||
| @@ -1,129 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Private common definitions for AUO-K190X framebuffer drivers | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | /* | ||
| 12 | * I80 interface specific defines | ||
| 13 | */ | ||
| 14 | |||
| 15 | #define AUOK190X_I80_CS 0x01 | ||
| 16 | #define AUOK190X_I80_DC 0x02 | ||
| 17 | #define AUOK190X_I80_WR 0x03 | ||
| 18 | #define AUOK190X_I80_OE 0x04 | ||
| 19 | |||
| 20 | /* | ||
| 21 | * AUOK190x commands, common to both controllers | ||
| 22 | */ | ||
| 23 | |||
| 24 | #define AUOK190X_CMD_INIT 0x0000 | ||
| 25 | #define AUOK190X_CMD_STANDBY 0x0001 | ||
| 26 | #define AUOK190X_CMD_WAKEUP 0x0002 | ||
| 27 | #define AUOK190X_CMD_TCON_RESET 0x0003 | ||
| 28 | #define AUOK190X_CMD_DATA_STOP 0x1002 | ||
| 29 | #define AUOK190X_CMD_LUT_START 0x1003 | ||
| 30 | #define AUOK190X_CMD_DISP_REFRESH 0x1004 | ||
| 31 | #define AUOK190X_CMD_DISP_RESET 0x1005 | ||
| 32 | #define AUOK190X_CMD_PRE_DISPLAY_START 0x100D | ||
| 33 | #define AUOK190X_CMD_PRE_DISPLAY_STOP 0x100F | ||
| 34 | #define AUOK190X_CMD_FLASH_W 0x2000 | ||
| 35 | #define AUOK190X_CMD_FLASH_E 0x2001 | ||
| 36 | #define AUOK190X_CMD_FLASH_STS 0x2002 | ||
| 37 | #define AUOK190X_CMD_FRAMERATE 0x3000 | ||
| 38 | #define AUOK190X_CMD_READ_VERSION 0x4000 | ||
| 39 | #define AUOK190X_CMD_READ_STATUS 0x4001 | ||
| 40 | #define AUOK190X_CMD_READ_LUT 0x4003 | ||
| 41 | #define AUOK190X_CMD_DRIVERTIMING 0x5000 | ||
| 42 | #define AUOK190X_CMD_LBALANCE 0x5001 | ||
| 43 | #define AUOK190X_CMD_AGINGMODE 0x6000 | ||
| 44 | #define AUOK190X_CMD_AGINGEXIT 0x6001 | ||
| 45 | |||
| 46 | /* | ||
| 47 | * Common settings for AUOK190X_CMD_INIT | ||
| 48 | */ | ||
| 49 | |||
| 50 | #define AUOK190X_INIT_DATA_FILTER (0 << 12) | ||
| 51 | #define AUOK190X_INIT_DATA_BYPASS (1 << 12) | ||
| 52 | #define AUOK190X_INIT_INVERSE_WHITE (0 << 9) | ||
| 53 | #define AUOK190X_INIT_INVERSE_BLACK (1 << 9) | ||
| 54 | #define AUOK190X_INIT_SCAN_DOWN (0 << 1) | ||
| 55 | #define AUOK190X_INIT_SCAN_UP (1 << 1) | ||
| 56 | #define AUOK190X_INIT_SHIFT_LEFT (0 << 0) | ||
| 57 | #define AUOK190X_INIT_SHIFT_RIGHT (1 << 0) | ||
| 58 | |||
| 59 | /* Common bits to pixels | ||
| 60 | * Mode 15-12 11-8 7-4 3-0 | ||
| 61 | * format0 4 3 2 1 | ||
| 62 | * format1 3 4 1 2 | ||
| 63 | */ | ||
| 64 | |||
| 65 | #define AUOK190X_INIT_FORMAT0 0 | ||
| 66 | #define AUOK190X_INIT_FORMAT1 (1 << 6) | ||
| 67 | |||
| 68 | /* | ||
| 69 | * settings for AUOK190X_CMD_RESET | ||
| 70 | */ | ||
| 71 | |||
| 72 | #define AUOK190X_RESET_TCON (0 << 0) | ||
| 73 | #define AUOK190X_RESET_NORMAL (1 << 0) | ||
| 74 | #define AUOK190X_RESET_PON (1 << 1) | ||
| 75 | |||
| 76 | /* | ||
| 77 | * AUOK190X_CMD_VERSION | ||
| 78 | */ | ||
| 79 | |||
| 80 | #define AUOK190X_VERSION_TEMP_MASK (0x1ff) | ||
| 81 | #define AUOK190X_VERSION_EPD_MASK (0xff) | ||
| 82 | #define AUOK190X_VERSION_SIZE_INT(_val) ((_val & 0xfc00) >> 10) | ||
| 83 | #define AUOK190X_VERSION_SIZE_FLOAT(_val) ((_val & 0x3c0) >> 6) | ||
| 84 | #define AUOK190X_VERSION_MODEL(_val) (_val & 0x3f) | ||
| 85 | #define AUOK190X_VERSION_LUT(_val) (_val & 0xff) | ||
| 86 | #define AUOK190X_VERSION_TCON(_val) ((_val & 0xff00) >> 8) | ||
| 87 | |||
| 88 | /* | ||
| 89 | * update modes for CMD_PARTIALDISP on K1900 and CMD_DDMA on K1901 | ||
| 90 | */ | ||
| 91 | |||
| 92 | #define AUOK190X_UPDATE_MODE(_res) ((_res & 0x7) << 12) | ||
| 93 | #define AUOK190X_UPDATE_NONFLASH (1 << 15) | ||
| 94 | |||
| 95 | /* | ||
| 96 | * track panel specific parameters for common init | ||
| 97 | */ | ||
| 98 | |||
| 99 | struct auok190x_init_data { | ||
| 100 | char *id; | ||
| 101 | struct auok190x_board *board; | ||
| 102 | |||
| 103 | void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2); | ||
| 104 | void (*update_all)(struct auok190xfb_par *par); | ||
| 105 | bool (*need_refresh)(struct auok190xfb_par *par); | ||
| 106 | void (*init)(struct auok190xfb_par *par); | ||
| 107 | }; | ||
| 108 | |||
| 109 | |||
| 110 | extern void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data); | ||
| 111 | extern int auok190x_send_command(struct auok190xfb_par *par, u16 data); | ||
| 112 | extern void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd, | ||
| 113 | int argc, u16 *argv); | ||
| 114 | extern int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd, | ||
| 115 | int argc, u16 *argv); | ||
| 116 | extern void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par, | ||
| 117 | u16 cmd, int argc, u16 *argv, | ||
| 118 | int size, u16 *data); | ||
| 119 | extern int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd, | ||
| 120 | int argc, u16 *argv, int size, | ||
| 121 | u16 *data); | ||
| 122 | extern int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd, | ||
| 123 | int argc, u16 *argv); | ||
| 124 | |||
| 125 | extern int auok190x_common_probe(struct platform_device *pdev, | ||
| 126 | struct auok190x_init_data *init); | ||
| 127 | extern int auok190x_common_remove(struct platform_device *pdev); | ||
| 128 | |||
| 129 | extern const struct dev_pm_ops auok190x_pm; | ||
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index 487d5e336e1b..82c20c6047b0 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c | |||
| @@ -37,7 +37,7 @@ static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs | |||
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | /* this is to find and return the vmalloc-ed fb pages */ | 39 | /* this is to find and return the vmalloc-ed fb pages */ |
| 40 | static int fb_deferred_io_fault(struct vm_fault *vmf) | 40 | static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) |
| 41 | { | 41 | { |
| 42 | unsigned long offset; | 42 | unsigned long offset; |
| 43 | struct page *page; | 43 | struct page *page; |
| @@ -90,7 +90,7 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy | |||
| 90 | EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); | 90 | EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); |
| 91 | 91 | ||
| 92 | /* vm_ops->page_mkwrite handler */ | 92 | /* vm_ops->page_mkwrite handler */ |
| 93 | static int fb_deferred_io_mkwrite(struct vm_fault *vmf) | 93 | static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) |
| 94 | { | 94 | { |
| 95 | struct page *page = vmf->page; | 95 | struct page *page = vmf->page; |
| 96 | struct fb_info *info = vmf->vma->vm_private_data; | 96 | struct fb_info *info = vmf->vma->vm_private_data; |
diff --git a/drivers/video/fbdev/mmp/fb/mmpfb.c b/drivers/video/fbdev/mmp/fb/mmpfb.c index f27697e07c55..ee212be67dc6 100644 --- a/drivers/video/fbdev/mmp/fb/mmpfb.c +++ b/drivers/video/fbdev/mmp/fb/mmpfb.c | |||
| @@ -495,10 +495,9 @@ static int modes_setup(struct mmpfb_info *fbi) | |||
| 495 | /* put videomode list to info structure */ | 495 | /* put videomode list to info structure */ |
| 496 | videomodes = kcalloc(videomode_num, sizeof(struct fb_videomode), | 496 | videomodes = kcalloc(videomode_num, sizeof(struct fb_videomode), |
| 497 | GFP_KERNEL); | 497 | GFP_KERNEL); |
| 498 | if (!videomodes) { | 498 | if (!videomodes) |
| 499 | dev_err(fbi->dev, "can't malloc video modes\n"); | ||
| 500 | return -ENOMEM; | 499 | return -ENOMEM; |
| 501 | } | 500 | |
| 502 | for (i = 0; i < videomode_num; i++) | 501 | for (i = 0; i < videomode_num; i++) |
| 503 | mmpmode_to_fbmode(&videomodes[i], &mmp_modes[i]); | 502 | mmpmode_to_fbmode(&videomodes[i], &mmp_modes[i]); |
| 504 | fb_videomode_to_modelist(videomodes, videomode_num, &info->modelist); | 503 | fb_videomode_to_modelist(videomodes, videomode_num, &info->modelist); |
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c index b6f83d5df9fd..fcdbb2df137f 100644 --- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c +++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c | |||
| @@ -406,12 +406,10 @@ static int path_init(struct mmphw_path_plat *path_plat, | |||
| 406 | dev_info(ctrl->dev, "%s: %s\n", __func__, config->name); | 406 | dev_info(ctrl->dev, "%s: %s\n", __func__, config->name); |
| 407 | 407 | ||
| 408 | /* init driver data */ | 408 | /* init driver data */ |
| 409 | path_info = kzalloc(sizeof(struct mmp_path_info), GFP_KERNEL); | 409 | path_info = kzalloc(sizeof(*path_info), GFP_KERNEL); |
| 410 | if (!path_info) { | 410 | if (!path_info) |
| 411 | dev_err(ctrl->dev, "%s: unable to alloc path_info for %s\n", | ||
| 412 | __func__, config->name); | ||
| 413 | return 0; | 411 | return 0; |
| 414 | } | 412 | |
| 415 | path_info->name = config->name; | 413 | path_info->name = config->name; |
| 416 | path_info->id = path_plat->id; | 414 | path_info->id = path_plat->id; |
| 417 | path_info->dev = ctrl->dev; | 415 | path_info->dev = ctrl->dev; |
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c index 2e50120bcfae..fbeeed5afe35 100644 --- a/drivers/video/fbdev/nvidia/nvidia.c +++ b/drivers/video/fbdev/nvidia/nvidia.c | |||
| @@ -1548,7 +1548,7 @@ MODULE_PARM_DESC(noaccel, | |||
| 1548 | "(default=0)"); | 1548 | "(default=0)"); |
| 1549 | module_param(noscale, int, 0); | 1549 | module_param(noscale, int, 0); |
| 1550 | MODULE_PARM_DESC(noscale, | 1550 | MODULE_PARM_DESC(noscale, |
| 1551 | "Disables screen scaleing. (0 or 1=disable) " | 1551 | "Disables screen scaling. (0 or 1=disable) " |
| 1552 | "(default=0, do scaling)"); | 1552 | "(default=0, do scaling)"); |
| 1553 | module_param(paneltweak, int, 0); | 1553 | module_param(paneltweak, int, 0); |
| 1554 | MODULE_PARM_DESC(paneltweak, | 1554 | MODULE_PARM_DESC(paneltweak, |
diff --git a/drivers/video/fbdev/omap/lcd_ams_delta.c b/drivers/video/fbdev/omap/lcd_ams_delta.c index a4ee947006c7..e8c748a0dfe2 100644 --- a/drivers/video/fbdev/omap/lcd_ams_delta.c +++ b/drivers/video/fbdev/omap/lcd_ams_delta.c | |||
| @@ -197,3 +197,7 @@ static struct platform_driver ams_delta_panel_driver = { | |||
| 197 | }; | 197 | }; |
| 198 | 198 | ||
| 199 | module_platform_driver(ams_delta_panel_driver); | 199 | module_platform_driver(ams_delta_panel_driver); |
| 200 | |||
| 201 | MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>"); | ||
| 202 | MODULE_DESCRIPTION("LCD panel support for the Amstrad E3 (Delta) videophone"); | ||
| 203 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/fbdev/omap/lcd_h3.c b/drivers/video/fbdev/omap/lcd_h3.c index 796f4634c4c6..fd0ac997fb8c 100644 --- a/drivers/video/fbdev/omap/lcd_h3.c +++ b/drivers/video/fbdev/omap/lcd_h3.c | |||
| @@ -89,3 +89,7 @@ static struct platform_driver h3_panel_driver = { | |||
| 89 | }; | 89 | }; |
| 90 | 90 | ||
| 91 | module_platform_driver(h3_panel_driver); | 91 | module_platform_driver(h3_panel_driver); |
| 92 | |||
| 93 | MODULE_AUTHOR("Imre Deak"); | ||
| 94 | MODULE_DESCRIPTION("LCD panel support for the TI OMAP H3 board"); | ||
| 95 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/fbdev/omap/lcd_htcherald.c b/drivers/video/fbdev/omap/lcd_htcherald.c index 9d692f5b8025..db4ff1c6add9 100644 --- a/drivers/video/fbdev/omap/lcd_htcherald.c +++ b/drivers/video/fbdev/omap/lcd_htcherald.c | |||
| @@ -66,3 +66,7 @@ static struct platform_driver htcherald_panel_driver = { | |||
| 66 | }; | 66 | }; |
| 67 | 67 | ||
| 68 | module_platform_driver(htcherald_panel_driver); | 68 | module_platform_driver(htcherald_panel_driver); |
| 69 | |||
| 70 | MODULE_AUTHOR("Cory Maccarrone"); | ||
| 71 | MODULE_LICENSE("GPL"); | ||
| 72 | MODULE_DESCRIPTION("LCD panel support for the HTC Herald"); | ||
diff --git a/drivers/video/fbdev/omap/lcd_inn1510.c b/drivers/video/fbdev/omap/lcd_inn1510.c index b284050f5471..1ea775f17bc1 100644 --- a/drivers/video/fbdev/omap/lcd_inn1510.c +++ b/drivers/video/fbdev/omap/lcd_inn1510.c | |||
| @@ -73,3 +73,7 @@ static struct platform_driver innovator1510_panel_driver = { | |||
| 73 | }; | 73 | }; |
| 74 | 74 | ||
| 75 | module_platform_driver(innovator1510_panel_driver); | 75 | module_platform_driver(innovator1510_panel_driver); |
| 76 | |||
| 77 | MODULE_AUTHOR("Imre Deak"); | ||
| 78 | MODULE_DESCRIPTION("LCD panel support for the TI OMAP1510 Innovator board"); | ||
| 79 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/fbdev/omap/lcd_inn1610.c b/drivers/video/fbdev/omap/lcd_inn1610.c index 1841710e796f..8d0cf68d2de3 100644 --- a/drivers/video/fbdev/omap/lcd_inn1610.c +++ b/drivers/video/fbdev/omap/lcd_inn1610.c | |||
| @@ -106,3 +106,7 @@ static struct platform_driver innovator1610_panel_driver = { | |||
| 106 | }; | 106 | }; |
| 107 | 107 | ||
| 108 | module_platform_driver(innovator1610_panel_driver); | 108 | module_platform_driver(innovator1610_panel_driver); |
| 109 | |||
| 110 | MODULE_AUTHOR("Imre Deak"); | ||
| 111 | MODULE_DESCRIPTION("LCD panel support for the TI OMAP1610 Innovator board"); | ||
| 112 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/fbdev/omap/lcd_osk.c b/drivers/video/fbdev/omap/lcd_osk.c index b0be5771fe90..9fc43a14957d 100644 --- a/drivers/video/fbdev/omap/lcd_osk.c +++ b/drivers/video/fbdev/omap/lcd_osk.c | |||
| @@ -93,3 +93,7 @@ static struct platform_driver osk_panel_driver = { | |||
| 93 | }; | 93 | }; |
| 94 | 94 | ||
| 95 | module_platform_driver(osk_panel_driver); | 95 | module_platform_driver(osk_panel_driver); |
| 96 | |||
| 97 | MODULE_AUTHOR("Imre Deak"); | ||
| 98 | MODULE_DESCRIPTION("LCD panel support for the TI OMAP OSK board"); | ||
| 99 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/fbdev/omap/lcd_palmte.c b/drivers/video/fbdev/omap/lcd_palmte.c index cef96386cf80..a0e888643131 100644 --- a/drivers/video/fbdev/omap/lcd_palmte.c +++ b/drivers/video/fbdev/omap/lcd_palmte.c | |||
| @@ -59,3 +59,7 @@ static struct platform_driver palmte_panel_driver = { | |||
| 59 | }; | 59 | }; |
| 60 | 60 | ||
| 61 | module_platform_driver(palmte_panel_driver); | 61 | module_platform_driver(palmte_panel_driver); |
| 62 | |||
| 63 | MODULE_AUTHOR("Romain Goyet <r.goyet@gmail.com>, Laurent Gonzalez <palmte.linux@free.fr>"); | ||
| 64 | MODULE_DESCRIPTION("LCD panel support for the Palm Tungsten E"); | ||
| 65 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/fbdev/omap/lcd_palmtt.c b/drivers/video/fbdev/omap/lcd_palmtt.c index 627f13dae5ad..2c45375e456f 100644 --- a/drivers/video/fbdev/omap/lcd_palmtt.c +++ b/drivers/video/fbdev/omap/lcd_palmtt.c | |||
| @@ -72,3 +72,7 @@ static struct platform_driver palmtt_panel_driver = { | |||
| 72 | }; | 72 | }; |
| 73 | 73 | ||
| 74 | module_platform_driver(palmtt_panel_driver); | 74 | module_platform_driver(palmtt_panel_driver); |
| 75 | |||
| 76 | MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); | ||
| 77 | MODULE_DESCRIPTION("LCD panel support for Palm Tungsten|T"); | ||
| 78 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/fbdev/omap/lcd_palmz71.c b/drivers/video/fbdev/omap/lcd_palmz71.c index c46d4db1f839..c99a15ab1826 100644 --- a/drivers/video/fbdev/omap/lcd_palmz71.c +++ b/drivers/video/fbdev/omap/lcd_palmz71.c | |||
| @@ -66,3 +66,7 @@ static struct platform_driver palmz71_panel_driver = { | |||
| 66 | }; | 66 | }; |
| 67 | 67 | ||
| 68 | module_platform_driver(palmz71_panel_driver); | 68 | module_platform_driver(palmz71_panel_driver); |
| 69 | |||
| 70 | MODULE_AUTHOR("Romain Goyet, Laurent Gonzalez, Marek Vasut"); | ||
| 71 | MODULE_LICENSE("GPL"); | ||
| 72 | MODULE_DESCRIPTION("LCD panel support for the Palm Zire71"); | ||
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index 3479a47a3082..585f39efcff6 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c | |||
| @@ -1645,7 +1645,7 @@ static int omapfb_do_probe(struct platform_device *pdev, | |||
| 1645 | goto cleanup; | 1645 | goto cleanup; |
| 1646 | } | 1646 | } |
| 1647 | 1647 | ||
| 1648 | fbdev = kzalloc(sizeof(struct omapfb_device), GFP_KERNEL); | 1648 | fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); |
| 1649 | if (fbdev == NULL) { | 1649 | if (fbdev == NULL) { |
| 1650 | dev_err(&pdev->dev, | 1650 | dev_err(&pdev->dev, |
| 1651 | "unable to allocate memory for device info\n"); | 1651 | "unable to allocate memory for device info\n"); |
diff --git a/drivers/video/fbdev/omap2/omapfb/Kconfig b/drivers/video/fbdev/omap2/omapfb/Kconfig index e6226aeed17e..3bf154e676d1 100644 --- a/drivers/video/fbdev/omap2/omapfb/Kconfig +++ b/drivers/video/fbdev/omap2/omapfb/Kconfig | |||
| @@ -5,6 +5,7 @@ menuconfig FB_OMAP2 | |||
| 5 | tristate "OMAP2+ frame buffer support" | 5 | tristate "OMAP2+ frame buffer support" |
| 6 | depends on FB | 6 | depends on FB |
| 7 | depends on DRM_OMAP = n | 7 | depends on DRM_OMAP = n |
| 8 | depends on GPIOLIB | ||
| 8 | 9 | ||
| 9 | select FB_OMAP2_DSS | 10 | select FB_OMAP2_DSS |
| 10 | select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3 | 11 | select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3 |
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index bef431530090..87497a00241f 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c | |||
| @@ -387,8 +387,7 @@ static void dsicm_get_resolution(struct omap_dss_device *dssdev, | |||
| 387 | static ssize_t dsicm_num_errors_show(struct device *dev, | 387 | static ssize_t dsicm_num_errors_show(struct device *dev, |
| 388 | struct device_attribute *attr, char *buf) | 388 | struct device_attribute *attr, char *buf) |
| 389 | { | 389 | { |
| 390 | struct platform_device *pdev = to_platform_device(dev); | 390 | struct panel_drv_data *ddata = dev_get_drvdata(dev); |
| 391 | struct panel_drv_data *ddata = platform_get_drvdata(pdev); | ||
| 392 | struct omap_dss_device *in = ddata->in; | 391 | struct omap_dss_device *in = ddata->in; |
| 393 | u8 errors = 0; | 392 | u8 errors = 0; |
| 394 | int r; | 393 | int r; |
| @@ -419,8 +418,7 @@ static ssize_t dsicm_num_errors_show(struct device *dev, | |||
| 419 | static ssize_t dsicm_hw_revision_show(struct device *dev, | 418 | static ssize_t dsicm_hw_revision_show(struct device *dev, |
| 420 | struct device_attribute *attr, char *buf) | 419 | struct device_attribute *attr, char *buf) |
| 421 | { | 420 | { |
| 422 | struct platform_device *pdev = to_platform_device(dev); | 421 | struct panel_drv_data *ddata = dev_get_drvdata(dev); |
| 423 | struct panel_drv_data *ddata = platform_get_drvdata(pdev); | ||
| 424 | struct omap_dss_device *in = ddata->in; | 422 | struct omap_dss_device *in = ddata->in; |
| 425 | u8 id1, id2, id3; | 423 | u8 id1, id2, id3; |
| 426 | int r; | 424 | int r; |
| @@ -451,8 +449,7 @@ static ssize_t dsicm_store_ulps(struct device *dev, | |||
| 451 | struct device_attribute *attr, | 449 | struct device_attribute *attr, |
| 452 | const char *buf, size_t count) | 450 | const char *buf, size_t count) |
| 453 | { | 451 | { |
| 454 | struct platform_device *pdev = to_platform_device(dev); | 452 | struct panel_drv_data *ddata = dev_get_drvdata(dev); |
| 455 | struct panel_drv_data *ddata = platform_get_drvdata(pdev); | ||
| 456 | struct omap_dss_device *in = ddata->in; | 453 | struct omap_dss_device *in = ddata->in; |
| 457 | unsigned long t; | 454 | unsigned long t; |
| 458 | int r; | 455 | int r; |
| @@ -486,8 +483,7 @@ static ssize_t dsicm_show_ulps(struct device *dev, | |||
| 486 | struct device_attribute *attr, | 483 | struct device_attribute *attr, |
| 487 | char *buf) | 484 | char *buf) |
| 488 | { | 485 | { |
| 489 | struct platform_device *pdev = to_platform_device(dev); | 486 | struct panel_drv_data *ddata = dev_get_drvdata(dev); |
| 490 | struct panel_drv_data *ddata = platform_get_drvdata(pdev); | ||
| 491 | unsigned t; | 487 | unsigned t; |
| 492 | 488 | ||
| 493 | mutex_lock(&ddata->lock); | 489 | mutex_lock(&ddata->lock); |
| @@ -501,8 +497,7 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev, | |||
| 501 | struct device_attribute *attr, | 497 | struct device_attribute *attr, |
| 502 | const char *buf, size_t count) | 498 | const char *buf, size_t count) |
| 503 | { | 499 | { |
| 504 | struct platform_device *pdev = to_platform_device(dev); | 500 | struct panel_drv_data *ddata = dev_get_drvdata(dev); |
| 505 | struct panel_drv_data *ddata = platform_get_drvdata(pdev); | ||
| 506 | struct omap_dss_device *in = ddata->in; | 501 | struct omap_dss_device *in = ddata->in; |
| 507 | unsigned long t; | 502 | unsigned long t; |
| 508 | int r; | 503 | int r; |
| @@ -533,8 +528,7 @@ static ssize_t dsicm_show_ulps_timeout(struct device *dev, | |||
| 533 | struct device_attribute *attr, | 528 | struct device_attribute *attr, |
| 534 | char *buf) | 529 | char *buf) |
| 535 | { | 530 | { |
| 536 | struct platform_device *pdev = to_platform_device(dev); | 531 | struct panel_drv_data *ddata = dev_get_drvdata(dev); |
| 537 | struct panel_drv_data *ddata = platform_get_drvdata(pdev); | ||
| 538 | unsigned t; | 532 | unsigned t; |
| 539 | 533 | ||
| 540 | mutex_lock(&ddata->lock); | 534 | mutex_lock(&ddata->lock); |
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c index c3d49e13643c..76722a59f55e 100644 --- a/drivers/video/fbdev/pxafb.c +++ b/drivers/video/fbdev/pxafb.c | |||
| @@ -2115,12 +2115,10 @@ static int of_get_pxafb_display(struct device *dev, struct device_node *disp, | |||
| 2115 | if (ret) | 2115 | if (ret) |
| 2116 | s = "color-tft"; | 2116 | s = "color-tft"; |
| 2117 | 2117 | ||
| 2118 | for (i = 0; lcd_types[i]; i++) | 2118 | i = match_string(lcd_types, -1, s); |
| 2119 | if (!strcmp(s, lcd_types[i])) | 2119 | if (i < 0) { |
| 2120 | break; | ||
| 2121 | if (!i || !lcd_types[i]) { | ||
| 2122 | dev_err(dev, "lcd-type %s is unknown\n", s); | 2120 | dev_err(dev, "lcd-type %s is unknown\n", s); |
| 2123 | return -EINVAL; | 2121 | return i; |
| 2124 | } | 2122 | } |
| 2125 | info->lcd_conn |= LCD_CONN_TYPE(i); | 2123 | info->lcd_conn |= LCD_CONN_TYPE(i); |
| 2126 | info->lcd_conn |= LCD_CONN_WIDTH(bus_width); | 2124 | info->lcd_conn |= LCD_CONN_WIDTH(bus_width); |
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c index c20468362f11..c09d7426cd92 100644 --- a/drivers/video/fbdev/savage/savagefb_driver.c +++ b/drivers/video/fbdev/savage/savagefb_driver.c | |||
| @@ -1892,11 +1892,11 @@ static int savage_init_hw(struct savagefb_par *par) | |||
| 1892 | vga_out8(0x3d4, 0x66, par); | 1892 | vga_out8(0x3d4, 0x66, par); |
| 1893 | cr66 = vga_in8(0x3d5, par); | 1893 | cr66 = vga_in8(0x3d5, par); |
| 1894 | vga_out8(0x3d5, cr66 | 0x02, par); | 1894 | vga_out8(0x3d5, cr66 | 0x02, par); |
| 1895 | mdelay(10); | 1895 | usleep_range(10000, 11000); |
| 1896 | 1896 | ||
| 1897 | vga_out8(0x3d4, 0x66, par); | 1897 | vga_out8(0x3d4, 0x66, par); |
| 1898 | vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */ | 1898 | vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */ |
| 1899 | mdelay(10); | 1899 | usleep_range(10000, 11000); |
| 1900 | 1900 | ||
| 1901 | 1901 | ||
| 1902 | /* | 1902 | /* |
| @@ -1906,11 +1906,11 @@ static int savage_init_hw(struct savagefb_par *par) | |||
| 1906 | vga_out8(0x3d4, 0x3f, par); | 1906 | vga_out8(0x3d4, 0x3f, par); |
| 1907 | cr3f = vga_in8(0x3d5, par); | 1907 | cr3f = vga_in8(0x3d5, par); |
| 1908 | vga_out8(0x3d5, cr3f | 0x08, par); | 1908 | vga_out8(0x3d5, cr3f | 0x08, par); |
| 1909 | mdelay(10); | 1909 | usleep_range(10000, 11000); |
| 1910 | 1910 | ||
| 1911 | vga_out8(0x3d4, 0x3f, par); | 1911 | vga_out8(0x3d4, 0x3f, par); |
| 1912 | vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */ | 1912 | vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */ |
| 1913 | mdelay(10); | 1913 | usleep_range(10000, 11000); |
| 1914 | 1914 | ||
| 1915 | /* Savage ramdac speeds */ | 1915 | /* Savage ramdac speeds */ |
| 1916 | par->numClocks = 4; | 1916 | par->numClocks = 4; |
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index c3a46506e47e..dc46be38c970 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c | |||
| @@ -29,7 +29,6 @@ | |||
| 29 | #include <linux/vmalloc.h> | 29 | #include <linux/vmalloc.h> |
| 30 | 30 | ||
| 31 | #include <video/sh_mobile_lcdc.h> | 31 | #include <video/sh_mobile_lcdc.h> |
| 32 | #include <video/sh_mobile_meram.h> | ||
| 33 | 32 | ||
| 34 | #include "sh_mobile_lcdcfb.h" | 33 | #include "sh_mobile_lcdcfb.h" |
| 35 | 34 | ||
| @@ -217,7 +216,6 @@ struct sh_mobile_lcdc_priv { | |||
| 217 | struct notifier_block notifier; | 216 | struct notifier_block notifier; |
| 218 | int started; | 217 | int started; |
| 219 | int forced_fourcc; /* 2 channel LCDC must share fourcc setting */ | 218 | int forced_fourcc; /* 2 channel LCDC must share fourcc setting */ |
| 220 | struct sh_mobile_meram_info *meram_dev; | ||
| 221 | }; | 219 | }; |
| 222 | 220 | ||
| 223 | /* ----------------------------------------------------------------------------- | 221 | /* ----------------------------------------------------------------------------- |
| @@ -346,16 +344,12 @@ static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv) | |||
| 346 | if (priv->dot_clk) | 344 | if (priv->dot_clk) |
| 347 | clk_prepare_enable(priv->dot_clk); | 345 | clk_prepare_enable(priv->dot_clk); |
| 348 | pm_runtime_get_sync(priv->dev); | 346 | pm_runtime_get_sync(priv->dev); |
| 349 | if (priv->meram_dev && priv->meram_dev->pdev) | ||
| 350 | pm_runtime_get_sync(&priv->meram_dev->pdev->dev); | ||
| 351 | } | 347 | } |
| 352 | } | 348 | } |
| 353 | 349 | ||
| 354 | static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) | 350 | static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) |
| 355 | { | 351 | { |
| 356 | if (atomic_sub_return(1, &priv->hw_usecnt) == -1) { | 352 | if (atomic_sub_return(1, &priv->hw_usecnt) == -1) { |
| 357 | if (priv->meram_dev && priv->meram_dev->pdev) | ||
| 358 | pm_runtime_put_sync(&priv->meram_dev->pdev->dev); | ||
| 359 | pm_runtime_put(priv->dev); | 353 | pm_runtime_put(priv->dev); |
| 360 | if (priv->dot_clk) | 354 | if (priv->dot_clk) |
| 361 | clk_disable_unprepare(priv->dot_clk); | 355 | clk_disable_unprepare(priv->dot_clk); |
| @@ -1073,7 +1067,6 @@ static void __sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) | |||
| 1073 | 1067 | ||
| 1074 | static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) | 1068 | static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) |
| 1075 | { | 1069 | { |
| 1076 | struct sh_mobile_meram_info *mdev = priv->meram_dev; | ||
| 1077 | struct sh_mobile_lcdc_chan *ch; | 1070 | struct sh_mobile_lcdc_chan *ch; |
| 1078 | unsigned long tmp; | 1071 | unsigned long tmp; |
| 1079 | int ret; | 1072 | int ret; |
| @@ -1106,9 +1099,6 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) | |||
| 1106 | 1099 | ||
| 1107 | /* Compute frame buffer base address and pitch for each channel. */ | 1100 | /* Compute frame buffer base address and pitch for each channel. */ |
| 1108 | for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { | 1101 | for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { |
| 1109 | int pixelformat; | ||
| 1110 | void *cache; | ||
| 1111 | |||
| 1112 | ch = &priv->ch[k]; | 1102 | ch = &priv->ch[k]; |
| 1113 | if (!ch->enabled) | 1103 | if (!ch->enabled) |
| 1114 | continue; | 1104 | continue; |
| @@ -1117,45 +1107,6 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) | |||
| 1117 | ch->base_addr_c = ch->dma_handle | 1107 | ch->base_addr_c = ch->dma_handle |
| 1118 | + ch->xres_virtual * ch->yres_virtual; | 1108 | + ch->xres_virtual * ch->yres_virtual; |
| 1119 | ch->line_size = ch->pitch; | 1109 | ch->line_size = ch->pitch; |
| 1120 | |||
| 1121 | /* Enable MERAM if possible. */ | ||
| 1122 | if (mdev == NULL || ch->cfg->meram_cfg == NULL) | ||
| 1123 | continue; | ||
| 1124 | |||
| 1125 | /* Free the allocated MERAM cache. */ | ||
| 1126 | if (ch->cache) { | ||
| 1127 | sh_mobile_meram_cache_free(mdev, ch->cache); | ||
| 1128 | ch->cache = NULL; | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | switch (ch->format->fourcc) { | ||
| 1132 | case V4L2_PIX_FMT_NV12: | ||
| 1133 | case V4L2_PIX_FMT_NV21: | ||
| 1134 | case V4L2_PIX_FMT_NV16: | ||
| 1135 | case V4L2_PIX_FMT_NV61: | ||
| 1136 | pixelformat = SH_MOBILE_MERAM_PF_NV; | ||
| 1137 | break; | ||
| 1138 | case V4L2_PIX_FMT_NV24: | ||
| 1139 | case V4L2_PIX_FMT_NV42: | ||
| 1140 | pixelformat = SH_MOBILE_MERAM_PF_NV24; | ||
| 1141 | break; | ||
| 1142 | case V4L2_PIX_FMT_RGB565: | ||
| 1143 | case V4L2_PIX_FMT_BGR24: | ||
| 1144 | case V4L2_PIX_FMT_BGR32: | ||
| 1145 | default: | ||
| 1146 | pixelformat = SH_MOBILE_MERAM_PF_RGB; | ||
| 1147 | break; | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | cache = sh_mobile_meram_cache_alloc(mdev, ch->cfg->meram_cfg, | ||
| 1151 | ch->pitch, ch->yres, pixelformat, | ||
| 1152 | &ch->line_size); | ||
| 1153 | if (!IS_ERR(cache)) { | ||
| 1154 | sh_mobile_meram_cache_update(mdev, cache, | ||
| 1155 | ch->base_addr_y, ch->base_addr_c, | ||
| 1156 | &ch->base_addr_y, &ch->base_addr_c); | ||
| 1157 | ch->cache = cache; | ||
| 1158 | } | ||
| 1159 | } | 1110 | } |
| 1160 | 1111 | ||
| 1161 | for (k = 0; k < ARRAY_SIZE(priv->overlays); ++k) { | 1112 | for (k = 0; k < ARRAY_SIZE(priv->overlays); ++k) { |
| @@ -1223,13 +1174,6 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) | |||
| 1223 | } | 1174 | } |
| 1224 | 1175 | ||
| 1225 | sh_mobile_lcdc_display_off(ch); | 1176 | sh_mobile_lcdc_display_off(ch); |
| 1226 | |||
| 1227 | /* Free the MERAM cache. */ | ||
| 1228 | if (ch->cache) { | ||
| 1229 | sh_mobile_meram_cache_free(priv->meram_dev, ch->cache); | ||
| 1230 | ch->cache = NULL; | ||
| 1231 | } | ||
| 1232 | |||
| 1233 | } | 1177 | } |
| 1234 | 1178 | ||
| 1235 | /* stop the lcdc */ | 1179 | /* stop the lcdc */ |
| @@ -1851,11 +1795,6 @@ static int sh_mobile_lcdc_pan(struct fb_var_screeninfo *var, | |||
| 1851 | base_addr_c = ch->dma_handle + ch->xres_virtual * ch->yres_virtual | 1795 | base_addr_c = ch->dma_handle + ch->xres_virtual * ch->yres_virtual |
| 1852 | + c_offset; | 1796 | + c_offset; |
| 1853 | 1797 | ||
| 1854 | if (ch->cache) | ||
| 1855 | sh_mobile_meram_cache_update(priv->meram_dev, ch->cache, | ||
| 1856 | base_addr_y, base_addr_c, | ||
| 1857 | &base_addr_y, &base_addr_c); | ||
| 1858 | |||
| 1859 | ch->base_addr_y = base_addr_y; | 1798 | ch->base_addr_y = base_addr_y; |
| 1860 | ch->base_addr_c = base_addr_c; | 1799 | ch->base_addr_c = base_addr_c; |
| 1861 | ch->pan_y_offset = y_offset; | 1800 | ch->pan_y_offset = y_offset; |
| @@ -2149,10 +2088,8 @@ sh_mobile_lcdc_channel_fb_register(struct sh_mobile_lcdc_chan *ch) | |||
| 2149 | if (info->fbdefio) { | 2088 | if (info->fbdefio) { |
| 2150 | ch->sglist = vmalloc(sizeof(struct scatterlist) * | 2089 | ch->sglist = vmalloc(sizeof(struct scatterlist) * |
| 2151 | ch->fb_size >> PAGE_SHIFT); | 2090 | ch->fb_size >> PAGE_SHIFT); |
| 2152 | if (!ch->sglist) { | 2091 | if (!ch->sglist) |
| 2153 | dev_err(ch->lcdc->dev, "cannot allocate sglist\n"); | ||
| 2154 | return -ENOMEM; | 2092 | return -ENOMEM; |
| 2155 | } | ||
| 2156 | } | 2093 | } |
| 2157 | 2094 | ||
| 2158 | info->bl_dev = ch->bl; | 2095 | info->bl_dev = ch->bl; |
| @@ -2354,8 +2291,7 @@ static int sh_mobile_lcdc_resume(struct device *dev) | |||
| 2354 | 2291 | ||
| 2355 | static int sh_mobile_lcdc_runtime_suspend(struct device *dev) | 2292 | static int sh_mobile_lcdc_runtime_suspend(struct device *dev) |
| 2356 | { | 2293 | { |
| 2357 | struct platform_device *pdev = to_platform_device(dev); | 2294 | struct sh_mobile_lcdc_priv *priv = dev_get_drvdata(dev); |
| 2358 | struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev); | ||
| 2359 | 2295 | ||
| 2360 | /* turn off LCDC hardware */ | 2296 | /* turn off LCDC hardware */ |
| 2361 | lcdc_write(priv, _LDCNT1R, 0); | 2297 | lcdc_write(priv, _LDCNT1R, 0); |
| @@ -2365,8 +2301,7 @@ static int sh_mobile_lcdc_runtime_suspend(struct device *dev) | |||
| 2365 | 2301 | ||
| 2366 | static int sh_mobile_lcdc_runtime_resume(struct device *dev) | 2302 | static int sh_mobile_lcdc_runtime_resume(struct device *dev) |
| 2367 | { | 2303 | { |
| 2368 | struct platform_device *pdev = to_platform_device(dev); | 2304 | struct sh_mobile_lcdc_priv *priv = dev_get_drvdata(dev); |
| 2369 | struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev); | ||
| 2370 | 2305 | ||
| 2371 | __sh_mobile_lcdc_start(priv); | 2306 | __sh_mobile_lcdc_start(priv); |
| 2372 | 2307 | ||
| @@ -2718,13 +2653,11 @@ static int sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
| 2718 | } | 2653 | } |
| 2719 | 2654 | ||
| 2720 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 2655 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
| 2721 | if (!priv) { | 2656 | if (!priv) |
| 2722 | dev_err(&pdev->dev, "cannot allocate device data\n"); | ||
| 2723 | return -ENOMEM; | 2657 | return -ENOMEM; |
| 2724 | } | ||
| 2725 | 2658 | ||
| 2726 | priv->dev = &pdev->dev; | 2659 | priv->dev = &pdev->dev; |
| 2727 | priv->meram_dev = pdata->meram_dev; | 2660 | |
| 2728 | for (i = 0; i < ARRAY_SIZE(priv->ch); i++) | 2661 | for (i = 0; i < ARRAY_SIZE(priv->ch); i++) |
| 2729 | mutex_init(&priv->ch[i].open_lock); | 2662 | mutex_init(&priv->ch[i].open_lock); |
| 2730 | platform_set_drvdata(pdev, priv); | 2663 | platform_set_drvdata(pdev, priv); |
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.h b/drivers/video/fbdev/sh_mobile_lcdcfb.h index cc52c74721fe..b8e47a8bd8ab 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.h +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.h | |||
| @@ -61,7 +61,6 @@ struct sh_mobile_lcdc_chan { | |||
| 61 | unsigned long *reg_offs; | 61 | unsigned long *reg_offs; |
| 62 | unsigned long ldmt1r_value; | 62 | unsigned long ldmt1r_value; |
| 63 | unsigned long enabled; /* ME and SE in LDCNT2R */ | 63 | unsigned long enabled; /* ME and SE in LDCNT2R */ |
| 64 | void *cache; | ||
| 65 | 64 | ||
| 66 | struct mutex open_lock; /* protects the use counter */ | 65 | struct mutex open_lock; /* protects the use counter */ |
| 67 | int use_count; | 66 | int use_count; |
diff --git a/drivers/video/fbdev/sh_mobile_meram.c b/drivers/video/fbdev/sh_mobile_meram.c deleted file mode 100644 index baadfb207b2e..000000000000 --- a/drivers/video/fbdev/sh_mobile_meram.c +++ /dev/null | |||
| @@ -1,758 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * SuperH Mobile MERAM Driver for SuperH Mobile LCDC Driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2011 Damian Hobson-Garcia <dhobsong@igel.co.jp> | ||
| 5 | * Takanari Hayama <taki@igel.co.jp> | ||
| 6 | * | ||
| 7 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 8 | * License. See the file "COPYING" in the main directory of this archive | ||
| 9 | * for more details. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/device.h> | ||
| 13 | #include <linux/err.h> | ||
| 14 | #include <linux/export.h> | ||
| 15 | #include <linux/genalloc.h> | ||
| 16 | #include <linux/io.h> | ||
| 17 | #include <linux/kernel.h> | ||
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/platform_device.h> | ||
| 20 | #include <linux/pm_runtime.h> | ||
| 21 | #include <linux/slab.h> | ||
| 22 | |||
| 23 | #include <video/sh_mobile_meram.h> | ||
| 24 | |||
| 25 | /* ----------------------------------------------------------------------------- | ||
| 26 | * MERAM registers | ||
| 27 | */ | ||
| 28 | |||
| 29 | #define MEVCR1 0x4 | ||
| 30 | #define MEVCR1_RST (1 << 31) | ||
| 31 | #define MEVCR1_WD (1 << 30) | ||
| 32 | #define MEVCR1_AMD1 (1 << 29) | ||
| 33 | #define MEVCR1_AMD0 (1 << 28) | ||
| 34 | #define MEQSEL1 0x40 | ||
| 35 | #define MEQSEL2 0x44 | ||
| 36 | |||
| 37 | #define MExxCTL 0x400 | ||
| 38 | #define MExxCTL_BV (1 << 31) | ||
| 39 | #define MExxCTL_BSZ_SHIFT 28 | ||
| 40 | #define MExxCTL_MSAR_MASK (0x7ff << MExxCTL_MSAR_SHIFT) | ||
| 41 | #define MExxCTL_MSAR_SHIFT 16 | ||
| 42 | #define MExxCTL_NXT_MASK (0x1f << MExxCTL_NXT_SHIFT) | ||
| 43 | #define MExxCTL_NXT_SHIFT 11 | ||
| 44 | #define MExxCTL_WD1 (1 << 10) | ||
| 45 | #define MExxCTL_WD0 (1 << 9) | ||
| 46 | #define MExxCTL_WS (1 << 8) | ||
| 47 | #define MExxCTL_CB (1 << 7) | ||
| 48 | #define MExxCTL_WBF (1 << 6) | ||
| 49 | #define MExxCTL_WF (1 << 5) | ||
| 50 | #define MExxCTL_RF (1 << 4) | ||
| 51 | #define MExxCTL_CM (1 << 3) | ||
| 52 | #define MExxCTL_MD_READ (1 << 0) | ||
| 53 | #define MExxCTL_MD_WRITE (2 << 0) | ||
| 54 | #define MExxCTL_MD_ICB_WB (3 << 0) | ||
| 55 | #define MExxCTL_MD_ICB (4 << 0) | ||
| 56 | #define MExxCTL_MD_FB (7 << 0) | ||
| 57 | #define MExxCTL_MD_MASK (7 << 0) | ||
| 58 | #define MExxBSIZE 0x404 | ||
| 59 | #define MExxBSIZE_RCNT_SHIFT 28 | ||
| 60 | #define MExxBSIZE_YSZM1_SHIFT 16 | ||
| 61 | #define MExxBSIZE_XSZM1_SHIFT 0 | ||
| 62 | #define MExxMNCF 0x408 | ||
| 63 | #define MExxMNCF_KWBNM_SHIFT 28 | ||
| 64 | #define MExxMNCF_KRBNM_SHIFT 24 | ||
| 65 | #define MExxMNCF_BNM_SHIFT 16 | ||
| 66 | #define MExxMNCF_XBV (1 << 15) | ||
| 67 | #define MExxMNCF_CPL_YCBCR444 (1 << 12) | ||
| 68 | #define MExxMNCF_CPL_YCBCR420 (2 << 12) | ||
| 69 | #define MExxMNCF_CPL_YCBCR422 (3 << 12) | ||
| 70 | #define MExxMNCF_CPL_MSK (3 << 12) | ||
| 71 | #define MExxMNCF_BL (1 << 2) | ||
| 72 | #define MExxMNCF_LNM_SHIFT 0 | ||
| 73 | #define MExxSARA 0x410 | ||
| 74 | #define MExxSARB 0x414 | ||
| 75 | #define MExxSBSIZE 0x418 | ||
| 76 | #define MExxSBSIZE_HDV (1 << 31) | ||
| 77 | #define MExxSBSIZE_HSZ16 (0 << 28) | ||
| 78 | #define MExxSBSIZE_HSZ32 (1 << 28) | ||
| 79 | #define MExxSBSIZE_HSZ64 (2 << 28) | ||
| 80 | #define MExxSBSIZE_HSZ128 (3 << 28) | ||
| 81 | #define MExxSBSIZE_SBSIZZ_SHIFT 0 | ||
| 82 | |||
| 83 | #define MERAM_MExxCTL_VAL(next, addr) \ | ||
| 84 | ((((next) << MExxCTL_NXT_SHIFT) & MExxCTL_NXT_MASK) | \ | ||
| 85 | (((addr) << MExxCTL_MSAR_SHIFT) & MExxCTL_MSAR_MASK)) | ||
| 86 | #define MERAM_MExxBSIZE_VAL(rcnt, yszm1, xszm1) \ | ||
| 87 | (((rcnt) << MExxBSIZE_RCNT_SHIFT) | \ | ||
| 88 | ((yszm1) << MExxBSIZE_YSZM1_SHIFT) | \ | ||
| 89 | ((xszm1) << MExxBSIZE_XSZM1_SHIFT)) | ||
| 90 | |||
| 91 | static const unsigned long common_regs[] = { | ||
| 92 | MEVCR1, | ||
| 93 | MEQSEL1, | ||
| 94 | MEQSEL2, | ||
| 95 | }; | ||
| 96 | #define MERAM_REGS_SIZE ARRAY_SIZE(common_regs) | ||
| 97 | |||
| 98 | static const unsigned long icb_regs[] = { | ||
| 99 | MExxCTL, | ||
| 100 | MExxBSIZE, | ||
| 101 | MExxMNCF, | ||
| 102 | MExxSARA, | ||
| 103 | MExxSARB, | ||
| 104 | MExxSBSIZE, | ||
| 105 | }; | ||
| 106 | #define ICB_REGS_SIZE ARRAY_SIZE(icb_regs) | ||
| 107 | |||
| 108 | /* | ||
| 109 | * sh_mobile_meram_icb - MERAM ICB information | ||
| 110 | * @regs: Registers cache | ||
| 111 | * @index: ICB index | ||
| 112 | * @offset: MERAM block offset | ||
| 113 | * @size: MERAM block size in KiB | ||
| 114 | * @cache_unit: Bytes to cache per ICB | ||
| 115 | * @pixelformat: Video pixel format of the data stored in the ICB | ||
| 116 | * @current_reg: Which of Start Address Register A (0) or B (1) is in use | ||
| 117 | */ | ||
| 118 | struct sh_mobile_meram_icb { | ||
| 119 | unsigned long regs[ICB_REGS_SIZE]; | ||
| 120 | unsigned int index; | ||
| 121 | unsigned long offset; | ||
| 122 | unsigned int size; | ||
| 123 | |||
| 124 | unsigned int cache_unit; | ||
| 125 | unsigned int pixelformat; | ||
| 126 | unsigned int current_reg; | ||
| 127 | }; | ||
| 128 | |||
| 129 | #define MERAM_ICB_NUM 32 | ||
| 130 | |||
| 131 | struct sh_mobile_meram_fb_plane { | ||
| 132 | struct sh_mobile_meram_icb *marker; | ||
| 133 | struct sh_mobile_meram_icb *cache; | ||
| 134 | }; | ||
| 135 | |||
| 136 | struct sh_mobile_meram_fb_cache { | ||
| 137 | unsigned int nplanes; | ||
| 138 | struct sh_mobile_meram_fb_plane planes[2]; | ||
| 139 | }; | ||
| 140 | |||
| 141 | /* | ||
| 142 | * sh_mobile_meram_priv - MERAM device | ||
| 143 | * @base: Registers base address | ||
| 144 | * @meram: MERAM physical address | ||
| 145 | * @regs: Registers cache | ||
| 146 | * @lock: Protects used_icb and icbs | ||
| 147 | * @used_icb: Bitmask of used ICBs | ||
| 148 | * @icbs: ICBs | ||
| 149 | * @pool: Allocation pool to manage the MERAM | ||
| 150 | */ | ||
| 151 | struct sh_mobile_meram_priv { | ||
| 152 | void __iomem *base; | ||
| 153 | unsigned long meram; | ||
| 154 | unsigned long regs[MERAM_REGS_SIZE]; | ||
| 155 | |||
| 156 | struct mutex lock; | ||
| 157 | unsigned long used_icb; | ||
| 158 | struct sh_mobile_meram_icb icbs[MERAM_ICB_NUM]; | ||
| 159 | |||
| 160 | struct gen_pool *pool; | ||
| 161 | }; | ||
| 162 | |||
| 163 | /* settings */ | ||
| 164 | #define MERAM_GRANULARITY 1024 | ||
| 165 | #define MERAM_SEC_LINE 15 | ||
| 166 | #define MERAM_LINE_WIDTH 2048 | ||
| 167 | |||
| 168 | /* ----------------------------------------------------------------------------- | ||
| 169 | * Registers access | ||
| 170 | */ | ||
| 171 | |||
| 172 | #define MERAM_ICB_OFFSET(base, idx, off) ((base) + (off) + (idx) * 0x20) | ||
| 173 | |||
| 174 | static inline void meram_write_icb(void __iomem *base, unsigned int idx, | ||
| 175 | unsigned int off, unsigned long val) | ||
| 176 | { | ||
| 177 | iowrite32(val, MERAM_ICB_OFFSET(base, idx, off)); | ||
| 178 | } | ||
| 179 | |||
| 180 | static inline unsigned long meram_read_icb(void __iomem *base, unsigned int idx, | ||
| 181 | unsigned int off) | ||
| 182 | { | ||
| 183 | return ioread32(MERAM_ICB_OFFSET(base, idx, off)); | ||
| 184 | } | ||
| 185 | |||
| 186 | static inline void meram_write_reg(void __iomem *base, unsigned int off, | ||
| 187 | unsigned long val) | ||
| 188 | { | ||
| 189 | iowrite32(val, base + off); | ||
| 190 | } | ||
| 191 | |||
| 192 | static inline unsigned long meram_read_reg(void __iomem *base, unsigned int off) | ||
| 193 | { | ||
| 194 | return ioread32(base + off); | ||
| 195 | } | ||
| 196 | |||
| 197 | /* ----------------------------------------------------------------------------- | ||
| 198 | * MERAM allocation and free | ||
| 199 | */ | ||
| 200 | |||
| 201 | static unsigned long meram_alloc(struct sh_mobile_meram_priv *priv, size_t size) | ||
| 202 | { | ||
| 203 | return gen_pool_alloc(priv->pool, size); | ||
| 204 | } | ||
| 205 | |||
| 206 | static void meram_free(struct sh_mobile_meram_priv *priv, unsigned long mem, | ||
| 207 | size_t size) | ||
| 208 | { | ||
| 209 | gen_pool_free(priv->pool, mem, size); | ||
| 210 | } | ||
| 211 | |||
| 212 | /* ----------------------------------------------------------------------------- | ||
| 213 | * LCDC cache planes allocation, init, cleanup and free | ||
| 214 | */ | ||
| 215 | |||
| 216 | /* Allocate ICBs and MERAM for a plane. */ | ||
| 217 | static int meram_plane_alloc(struct sh_mobile_meram_priv *priv, | ||
| 218 | struct sh_mobile_meram_fb_plane *plane, | ||
| 219 | size_t size) | ||
| 220 | { | ||
| 221 | unsigned long mem; | ||
| 222 | unsigned long idx; | ||
| 223 | |||
| 224 | idx = find_first_zero_bit(&priv->used_icb, 28); | ||
| 225 | if (idx == 28) | ||
| 226 | return -ENOMEM; | ||
| 227 | plane->cache = &priv->icbs[idx]; | ||
| 228 | |||
| 229 | idx = find_next_zero_bit(&priv->used_icb, 32, 28); | ||
| 230 | if (idx == 32) | ||
| 231 | return -ENOMEM; | ||
| 232 | plane->marker = &priv->icbs[idx]; | ||
| 233 | |||
| 234 | mem = meram_alloc(priv, size * 1024); | ||
| 235 | if (mem == 0) | ||
| 236 | return -ENOMEM; | ||
| 237 | |||
| 238 | __set_bit(plane->marker->index, &priv->used_icb); | ||
| 239 | __set_bit(plane->cache->index, &priv->used_icb); | ||
| 240 | |||
| 241 | plane->marker->offset = mem - priv->meram; | ||
| 242 | plane->marker->size = size; | ||
| 243 | |||
| 244 | return 0; | ||
| 245 | } | ||
| 246 | |||
| 247 | /* Free ICBs and MERAM for a plane. */ | ||
| 248 | static void meram_plane_free(struct sh_mobile_meram_priv *priv, | ||
| 249 | struct sh_mobile_meram_fb_plane *plane) | ||
| 250 | { | ||
| 251 | meram_free(priv, priv->meram + plane->marker->offset, | ||
| 252 | plane->marker->size * 1024); | ||
| 253 | |||
| 254 | __clear_bit(plane->marker->index, &priv->used_icb); | ||
| 255 | __clear_bit(plane->cache->index, &priv->used_icb); | ||
| 256 | } | ||
| 257 | |||
| 258 | /* Is this a YCbCr(NV12, NV16 or NV24) colorspace? */ | ||
| 259 | static int is_nvcolor(int cspace) | ||
| 260 | { | ||
| 261 | if (cspace == SH_MOBILE_MERAM_PF_NV || | ||
| 262 | cspace == SH_MOBILE_MERAM_PF_NV24) | ||
| 263 | return 1; | ||
| 264 | return 0; | ||
| 265 | } | ||
| 266 | |||
| 267 | /* Set the next address to fetch. */ | ||
| 268 | static void meram_set_next_addr(struct sh_mobile_meram_priv *priv, | ||
| 269 | struct sh_mobile_meram_fb_cache *cache, | ||
| 270 | unsigned long base_addr_y, | ||
| 271 | unsigned long base_addr_c) | ||
| 272 | { | ||
| 273 | struct sh_mobile_meram_icb *icb = cache->planes[0].marker; | ||
| 274 | unsigned long target; | ||
| 275 | |||
| 276 | icb->current_reg ^= 1; | ||
| 277 | target = icb->current_reg ? MExxSARB : MExxSARA; | ||
| 278 | |||
| 279 | /* set the next address to fetch */ | ||
| 280 | meram_write_icb(priv->base, cache->planes[0].cache->index, target, | ||
| 281 | base_addr_y); | ||
| 282 | meram_write_icb(priv->base, cache->planes[0].marker->index, target, | ||
| 283 | base_addr_y + cache->planes[0].marker->cache_unit); | ||
| 284 | |||
| 285 | if (cache->nplanes == 2) { | ||
| 286 | meram_write_icb(priv->base, cache->planes[1].cache->index, | ||
| 287 | target, base_addr_c); | ||
| 288 | meram_write_icb(priv->base, cache->planes[1].marker->index, | ||
| 289 | target, base_addr_c + | ||
| 290 | cache->planes[1].marker->cache_unit); | ||
| 291 | } | ||
| 292 | } | ||
| 293 | |||
| 294 | /* Get the next ICB address. */ | ||
| 295 | static void | ||
| 296 | meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata, | ||
| 297 | struct sh_mobile_meram_fb_cache *cache, | ||
| 298 | unsigned long *icb_addr_y, unsigned long *icb_addr_c) | ||
| 299 | { | ||
| 300 | struct sh_mobile_meram_icb *icb = cache->planes[0].marker; | ||
| 301 | unsigned long icb_offset; | ||
| 302 | |||
| 303 | if (pdata->addr_mode == SH_MOBILE_MERAM_MODE0) | ||
| 304 | icb_offset = 0x80000000 | (icb->current_reg << 29); | ||
| 305 | else | ||
| 306 | icb_offset = 0xc0000000 | (icb->current_reg << 23); | ||
| 307 | |||
| 308 | *icb_addr_y = icb_offset | (cache->planes[0].marker->index << 24); | ||
| 309 | if (cache->nplanes == 2) | ||
| 310 | *icb_addr_c = icb_offset | ||
| 311 | | (cache->planes[1].marker->index << 24); | ||
| 312 | } | ||
| 313 | |||
| 314 | #define MERAM_CALC_BYTECOUNT(x, y) \ | ||
| 315 | (((x) * (y) + (MERAM_LINE_WIDTH - 1)) & ~(MERAM_LINE_WIDTH - 1)) | ||
| 316 | |||
| 317 | /* Initialize MERAM. */ | ||
| 318 | static int meram_plane_init(struct sh_mobile_meram_priv *priv, | ||
| 319 | struct sh_mobile_meram_fb_plane *plane, | ||
| 320 | unsigned int xres, unsigned int yres, | ||
| 321 | unsigned int *out_pitch) | ||
| 322 | { | ||
| 323 | struct sh_mobile_meram_icb *marker = plane->marker; | ||
| 324 | unsigned long total_byte_count = MERAM_CALC_BYTECOUNT(xres, yres); | ||
| 325 | unsigned long bnm; | ||
| 326 | unsigned int lcdc_pitch; | ||
| 327 | unsigned int xpitch; | ||
| 328 | unsigned int line_cnt; | ||
| 329 | unsigned int save_lines; | ||
| 330 | |||
| 331 | /* adjust pitch to 1024, 2048, 4096 or 8192 */ | ||
| 332 | lcdc_pitch = (xres - 1) | 1023; | ||
| 333 | lcdc_pitch = lcdc_pitch | (lcdc_pitch >> 1); | ||
| 334 | lcdc_pitch = lcdc_pitch | (lcdc_pitch >> 2); | ||
| 335 | lcdc_pitch += 1; | ||
| 336 | |||
| 337 | /* derive settings */ | ||
| 338 | if (lcdc_pitch == 8192 && yres >= 1024) { | ||
| 339 | lcdc_pitch = xpitch = MERAM_LINE_WIDTH; | ||
| 340 | line_cnt = total_byte_count >> 11; | ||
| 341 | *out_pitch = xres; | ||
| 342 | save_lines = plane->marker->size / 16 / MERAM_SEC_LINE; | ||
| 343 | save_lines *= MERAM_SEC_LINE; | ||
| 344 | } else { | ||
| 345 | xpitch = xres; | ||
| 346 | line_cnt = yres; | ||
| 347 | *out_pitch = lcdc_pitch; | ||
| 348 | save_lines = plane->marker->size / (lcdc_pitch >> 10) / 2; | ||
| 349 | save_lines &= 0xff; | ||
| 350 | } | ||
| 351 | bnm = (save_lines - 1) << 16; | ||
| 352 | |||
| 353 | /* TODO: we better to check if we have enough MERAM buffer size */ | ||
| 354 | |||
| 355 | /* set up ICB */ | ||
| 356 | meram_write_icb(priv->base, plane->cache->index, MExxBSIZE, | ||
| 357 | MERAM_MExxBSIZE_VAL(0x0, line_cnt - 1, xpitch - 1)); | ||
| 358 | meram_write_icb(priv->base, plane->marker->index, MExxBSIZE, | ||
| 359 | MERAM_MExxBSIZE_VAL(0xf, line_cnt - 1, xpitch - 1)); | ||
| 360 | |||
| 361 | meram_write_icb(priv->base, plane->cache->index, MExxMNCF, bnm); | ||
| 362 | meram_write_icb(priv->base, plane->marker->index, MExxMNCF, bnm); | ||
| 363 | |||
| 364 | meram_write_icb(priv->base, plane->cache->index, MExxSBSIZE, xpitch); | ||
| 365 | meram_write_icb(priv->base, plane->marker->index, MExxSBSIZE, xpitch); | ||
| 366 | |||
| 367 | /* save a cache unit size */ | ||
| 368 | plane->cache->cache_unit = xres * save_lines; | ||
| 369 | plane->marker->cache_unit = xres * save_lines; | ||
| 370 | |||
| 371 | /* | ||
| 372 | * Set MERAM for framebuffer | ||
| 373 | * | ||
| 374 | * we also chain the cache_icb and the marker_icb. | ||
| 375 | * we also split the allocated MERAM buffer between two ICBs. | ||
| 376 | */ | ||
| 377 | meram_write_icb(priv->base, plane->cache->index, MExxCTL, | ||
| 378 | MERAM_MExxCTL_VAL(plane->marker->index, marker->offset) | ||
| 379 | | MExxCTL_WD1 | MExxCTL_WD0 | MExxCTL_WS | MExxCTL_CM | | ||
| 380 | MExxCTL_MD_FB); | ||
| 381 | meram_write_icb(priv->base, plane->marker->index, MExxCTL, | ||
| 382 | MERAM_MExxCTL_VAL(plane->cache->index, marker->offset + | ||
| 383 | plane->marker->size / 2) | | ||
| 384 | MExxCTL_WD1 | MExxCTL_WD0 | MExxCTL_WS | MExxCTL_CM | | ||
| 385 | MExxCTL_MD_FB); | ||
| 386 | |||
| 387 | return 0; | ||
| 388 | } | ||
| 389 | |||
| 390 | static void meram_plane_cleanup(struct sh_mobile_meram_priv *priv, | ||
| 391 | struct sh_mobile_meram_fb_plane *plane) | ||
| 392 | { | ||
| 393 | /* disable ICB */ | ||
| 394 | meram_write_icb(priv->base, plane->cache->index, MExxCTL, | ||
| 395 | MExxCTL_WBF | MExxCTL_WF | MExxCTL_RF); | ||
| 396 | meram_write_icb(priv->base, plane->marker->index, MExxCTL, | ||
| 397 | MExxCTL_WBF | MExxCTL_WF | MExxCTL_RF); | ||
| 398 | |||
| 399 | plane->cache->cache_unit = 0; | ||
| 400 | plane->marker->cache_unit = 0; | ||
| 401 | } | ||
| 402 | |||
| 403 | /* ----------------------------------------------------------------------------- | ||
| 404 | * MERAM operations | ||
| 405 | */ | ||
| 406 | |||
| 407 | unsigned long sh_mobile_meram_alloc(struct sh_mobile_meram_info *pdata, | ||
| 408 | size_t size) | ||
| 409 | { | ||
| 410 | struct sh_mobile_meram_priv *priv = pdata->priv; | ||
| 411 | |||
| 412 | return meram_alloc(priv, size); | ||
| 413 | } | ||
| 414 | EXPORT_SYMBOL_GPL(sh_mobile_meram_alloc); | ||
| 415 | |||
| 416 | void sh_mobile_meram_free(struct sh_mobile_meram_info *pdata, unsigned long mem, | ||
| 417 | size_t size) | ||
| 418 | { | ||
| 419 | struct sh_mobile_meram_priv *priv = pdata->priv; | ||
| 420 | |||
| 421 | meram_free(priv, mem, size); | ||
| 422 | } | ||
| 423 | EXPORT_SYMBOL_GPL(sh_mobile_meram_free); | ||
| 424 | |||
| 425 | /* Allocate memory for the ICBs and mark them as used. */ | ||
| 426 | static struct sh_mobile_meram_fb_cache * | ||
| 427 | meram_cache_alloc(struct sh_mobile_meram_priv *priv, | ||
| 428 | const struct sh_mobile_meram_cfg *cfg, | ||
| 429 | int pixelformat) | ||
| 430 | { | ||
| 431 | unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1; | ||
| 432 | struct sh_mobile_meram_fb_cache *cache; | ||
| 433 | int ret; | ||
| 434 | |||
| 435 | cache = kzalloc(sizeof(*cache), GFP_KERNEL); | ||
| 436 | if (cache == NULL) | ||
| 437 | return ERR_PTR(-ENOMEM); | ||
| 438 | |||
| 439 | cache->nplanes = nplanes; | ||
| 440 | |||
| 441 | ret = meram_plane_alloc(priv, &cache->planes[0], | ||
| 442 | cfg->icb[0].meram_size); | ||
| 443 | if (ret < 0) | ||
| 444 | goto error; | ||
| 445 | |||
| 446 | cache->planes[0].marker->current_reg = 1; | ||
| 447 | cache->planes[0].marker->pixelformat = pixelformat; | ||
| 448 | |||
| 449 | if (cache->nplanes == 1) | ||
| 450 | return cache; | ||
| 451 | |||
| 452 | ret = meram_plane_alloc(priv, &cache->planes[1], | ||
| 453 | cfg->icb[1].meram_size); | ||
| 454 | if (ret < 0) { | ||
| 455 | meram_plane_free(priv, &cache->planes[0]); | ||
| 456 | goto error; | ||
| 457 | } | ||
| 458 | |||
| 459 | return cache; | ||
| 460 | |||
| 461 | error: | ||
| 462 | kfree(cache); | ||
| 463 | return ERR_PTR(-ENOMEM); | ||
| 464 | } | ||
| 465 | |||
| 466 | void *sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *pdata, | ||
| 467 | const struct sh_mobile_meram_cfg *cfg, | ||
| 468 | unsigned int xres, unsigned int yres, | ||
| 469 | unsigned int pixelformat, unsigned int *pitch) | ||
| 470 | { | ||
| 471 | struct sh_mobile_meram_fb_cache *cache; | ||
| 472 | struct sh_mobile_meram_priv *priv = pdata->priv; | ||
| 473 | struct platform_device *pdev = pdata->pdev; | ||
| 474 | unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1; | ||
| 475 | unsigned int out_pitch; | ||
| 476 | |||
| 477 | if (priv == NULL) | ||
| 478 | return ERR_PTR(-ENODEV); | ||
| 479 | |||
| 480 | if (pixelformat != SH_MOBILE_MERAM_PF_NV && | ||
| 481 | pixelformat != SH_MOBILE_MERAM_PF_NV24 && | ||
| 482 | pixelformat != SH_MOBILE_MERAM_PF_RGB) | ||
| 483 | return ERR_PTR(-EINVAL); | ||
| 484 | |||
| 485 | dev_dbg(&pdev->dev, "registering %dx%d (%s)", xres, yres, | ||
| 486 | !pixelformat ? "yuv" : "rgb"); | ||
| 487 | |||
| 488 | /* we can't handle wider than 8192px */ | ||
| 489 | if (xres > 8192) { | ||
| 490 | dev_err(&pdev->dev, "width exceeding the limit (> 8192)."); | ||
| 491 | return ERR_PTR(-EINVAL); | ||
| 492 | } | ||
| 493 | |||
| 494 | if (cfg->icb[0].meram_size == 0) | ||
| 495 | return ERR_PTR(-EINVAL); | ||
| 496 | |||
| 497 | if (nplanes == 2 && cfg->icb[1].meram_size == 0) | ||
| 498 | return ERR_PTR(-EINVAL); | ||
| 499 | |||
| 500 | mutex_lock(&priv->lock); | ||
| 501 | |||
| 502 | /* We now register the ICBs and allocate the MERAM regions. */ | ||
| 503 | cache = meram_cache_alloc(priv, cfg, pixelformat); | ||
| 504 | if (IS_ERR(cache)) { | ||
| 505 | dev_err(&pdev->dev, "MERAM allocation failed (%ld).", | ||
| 506 | PTR_ERR(cache)); | ||
| 507 | goto err; | ||
| 508 | } | ||
| 509 | |||
| 510 | /* initialize MERAM */ | ||
| 511 | meram_plane_init(priv, &cache->planes[0], xres, yres, &out_pitch); | ||
| 512 | *pitch = out_pitch; | ||
| 513 | if (pixelformat == SH_MOBILE_MERAM_PF_NV) | ||
| 514 | meram_plane_init(priv, &cache->planes[1], | ||
| 515 | xres, (yres + 1) / 2, &out_pitch); | ||
| 516 | else if (pixelformat == SH_MOBILE_MERAM_PF_NV24) | ||
| 517 | meram_plane_init(priv, &cache->planes[1], | ||
| 518 | 2 * xres, (yres + 1) / 2, &out_pitch); | ||
| 519 | |||
| 520 | err: | ||
| 521 | mutex_unlock(&priv->lock); | ||
| 522 | return cache; | ||
| 523 | } | ||
| 524 | EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_alloc); | ||
| 525 | |||
| 526 | void | ||
| 527 | sh_mobile_meram_cache_free(struct sh_mobile_meram_info *pdata, void *data) | ||
| 528 | { | ||
| 529 | struct sh_mobile_meram_fb_cache *cache = data; | ||
| 530 | struct sh_mobile_meram_priv *priv = pdata->priv; | ||
| 531 | |||
| 532 | mutex_lock(&priv->lock); | ||
| 533 | |||
| 534 | /* Cleanup and free. */ | ||
| 535 | meram_plane_cleanup(priv, &cache->planes[0]); | ||
| 536 | meram_plane_free(priv, &cache->planes[0]); | ||
| 537 | |||
| 538 | if (cache->nplanes == 2) { | ||
| 539 | meram_plane_cleanup(priv, &cache->planes[1]); | ||
| 540 | meram_plane_free(priv, &cache->planes[1]); | ||
| 541 | } | ||
| 542 | |||
| 543 | kfree(cache); | ||
| 544 | |||
| 545 | mutex_unlock(&priv->lock); | ||
| 546 | } | ||
| 547 | EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_free); | ||
| 548 | |||
| 549 | void | ||
| 550 | sh_mobile_meram_cache_update(struct sh_mobile_meram_info *pdata, void *data, | ||
| 551 | unsigned long base_addr_y, | ||
| 552 | unsigned long base_addr_c, | ||
| 553 | unsigned long *icb_addr_y, | ||
| 554 | unsigned long *icb_addr_c) | ||
| 555 | { | ||
| 556 | struct sh_mobile_meram_fb_cache *cache = data; | ||
| 557 | struct sh_mobile_meram_priv *priv = pdata->priv; | ||
| 558 | |||
| 559 | mutex_lock(&priv->lock); | ||
| 560 | |||
| 561 | meram_set_next_addr(priv, cache, base_addr_y, base_addr_c); | ||
| 562 | meram_get_next_icb_addr(pdata, cache, icb_addr_y, icb_addr_c); | ||
| 563 | |||
| 564 | mutex_unlock(&priv->lock); | ||
| 565 | } | ||
| 566 | EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_update); | ||
| 567 | |||
| 568 | /* ----------------------------------------------------------------------------- | ||
| 569 | * Power management | ||
| 570 | */ | ||
| 571 | |||
| 572 | #ifdef CONFIG_PM | ||
| 573 | static int sh_mobile_meram_suspend(struct device *dev) | ||
| 574 | { | ||
| 575 | struct platform_device *pdev = to_platform_device(dev); | ||
| 576 | struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev); | ||
| 577 | unsigned int i, j; | ||
| 578 | |||
| 579 | for (i = 0; i < MERAM_REGS_SIZE; i++) | ||
| 580 | priv->regs[i] = meram_read_reg(priv->base, common_regs[i]); | ||
| 581 | |||
| 582 | for (i = 0; i < 32; i++) { | ||
| 583 | if (!test_bit(i, &priv->used_icb)) | ||
| 584 | continue; | ||
| 585 | for (j = 0; j < ICB_REGS_SIZE; j++) { | ||
| 586 | priv->icbs[i].regs[j] = | ||
| 587 | meram_read_icb(priv->base, i, icb_regs[j]); | ||
| 588 | /* Reset ICB on resume */ | ||
| 589 | if (icb_regs[j] == MExxCTL) | ||
| 590 | priv->icbs[i].regs[j] |= | ||
| 591 | MExxCTL_WBF | MExxCTL_WF | MExxCTL_RF; | ||
| 592 | } | ||
| 593 | } | ||
| 594 | return 0; | ||
| 595 | } | ||
| 596 | |||
| 597 | static int sh_mobile_meram_resume(struct device *dev) | ||
| 598 | { | ||
| 599 | struct platform_device *pdev = to_platform_device(dev); | ||
| 600 | struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev); | ||
| 601 | unsigned int i, j; | ||
| 602 | |||
| 603 | for (i = 0; i < 32; i++) { | ||
| 604 | if (!test_bit(i, &priv->used_icb)) | ||
| 605 | continue; | ||
| 606 | for (j = 0; j < ICB_REGS_SIZE; j++) | ||
| 607 | meram_write_icb(priv->base, i, icb_regs[j], | ||
| 608 | priv->icbs[i].regs[j]); | ||
| 609 | } | ||
| 610 | |||
| 611 | for (i = 0; i < MERAM_REGS_SIZE; i++) | ||
| 612 | meram_write_reg(priv->base, common_regs[i], priv->regs[i]); | ||
| 613 | return 0; | ||
| 614 | } | ||
| 615 | #endif /* CONFIG_PM */ | ||
| 616 | |||
| 617 | static UNIVERSAL_DEV_PM_OPS(sh_mobile_meram_dev_pm_ops, | ||
| 618 | sh_mobile_meram_suspend, | ||
| 619 | sh_mobile_meram_resume, NULL); | ||
| 620 | |||
| 621 | /* ----------------------------------------------------------------------------- | ||
| 622 | * Probe/remove and driver init/exit | ||
| 623 | */ | ||
| 624 | |||
| 625 | static int sh_mobile_meram_probe(struct platform_device *pdev) | ||
| 626 | { | ||
| 627 | struct sh_mobile_meram_priv *priv; | ||
| 628 | struct sh_mobile_meram_info *pdata = pdev->dev.platform_data; | ||
| 629 | struct resource *regs; | ||
| 630 | struct resource *meram; | ||
| 631 | unsigned int i; | ||
| 632 | int error; | ||
| 633 | |||
| 634 | if (!pdata) { | ||
| 635 | dev_err(&pdev->dev, "no platform data defined\n"); | ||
| 636 | return -EINVAL; | ||
| 637 | } | ||
| 638 | |||
| 639 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 640 | meram = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 641 | if (regs == NULL || meram == NULL) { | ||
| 642 | dev_err(&pdev->dev, "cannot get platform resources\n"); | ||
| 643 | return -ENOENT; | ||
| 644 | } | ||
| 645 | |||
| 646 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
| 647 | if (!priv) { | ||
| 648 | dev_err(&pdev->dev, "cannot allocate device data\n"); | ||
| 649 | return -ENOMEM; | ||
| 650 | } | ||
| 651 | |||
| 652 | /* Initialize private data. */ | ||
| 653 | mutex_init(&priv->lock); | ||
| 654 | priv->used_icb = pdata->reserved_icbs; | ||
| 655 | |||
| 656 | for (i = 0; i < MERAM_ICB_NUM; ++i) | ||
| 657 | priv->icbs[i].index = i; | ||
| 658 | |||
| 659 | pdata->priv = priv; | ||
| 660 | pdata->pdev = pdev; | ||
| 661 | |||
| 662 | /* Request memory regions and remap the registers. */ | ||
| 663 | if (!request_mem_region(regs->start, resource_size(regs), pdev->name)) { | ||
| 664 | dev_err(&pdev->dev, "MERAM registers region already claimed\n"); | ||
| 665 | error = -EBUSY; | ||
| 666 | goto err_req_regs; | ||
| 667 | } | ||
| 668 | |||
| 669 | if (!request_mem_region(meram->start, resource_size(meram), | ||
| 670 | pdev->name)) { | ||
| 671 | dev_err(&pdev->dev, "MERAM memory region already claimed\n"); | ||
| 672 | error = -EBUSY; | ||
| 673 | goto err_req_meram; | ||
| 674 | } | ||
| 675 | |||
| 676 | priv->base = ioremap_nocache(regs->start, resource_size(regs)); | ||
| 677 | if (!priv->base) { | ||
| 678 | dev_err(&pdev->dev, "ioremap failed\n"); | ||
| 679 | error = -EFAULT; | ||
| 680 | goto err_ioremap; | ||
| 681 | } | ||
| 682 | |||
| 683 | priv->meram = meram->start; | ||
| 684 | |||
| 685 | /* Create and initialize the MERAM memory pool. */ | ||
| 686 | priv->pool = gen_pool_create(ilog2(MERAM_GRANULARITY), -1); | ||
| 687 | if (priv->pool == NULL) { | ||
| 688 | error = -ENOMEM; | ||
| 689 | goto err_genpool; | ||
| 690 | } | ||
| 691 | |||
| 692 | error = gen_pool_add(priv->pool, meram->start, resource_size(meram), | ||
| 693 | -1); | ||
| 694 | if (error < 0) | ||
| 695 | goto err_genpool; | ||
| 696 | |||
| 697 | /* initialize ICB addressing mode */ | ||
| 698 | if (pdata->addr_mode == SH_MOBILE_MERAM_MODE1) | ||
| 699 | meram_write_reg(priv->base, MEVCR1, MEVCR1_AMD1); | ||
| 700 | |||
| 701 | platform_set_drvdata(pdev, priv); | ||
| 702 | pm_runtime_enable(&pdev->dev); | ||
| 703 | |||
| 704 | dev_info(&pdev->dev, "sh_mobile_meram initialized."); | ||
| 705 | |||
| 706 | return 0; | ||
| 707 | |||
| 708 | err_genpool: | ||
| 709 | if (priv->pool) | ||
| 710 | gen_pool_destroy(priv->pool); | ||
| 711 | iounmap(priv->base); | ||
| 712 | err_ioremap: | ||
| 713 | release_mem_region(meram->start, resource_size(meram)); | ||
| 714 | err_req_meram: | ||
| 715 | release_mem_region(regs->start, resource_size(regs)); | ||
| 716 | err_req_regs: | ||
| 717 | mutex_destroy(&priv->lock); | ||
| 718 | kfree(priv); | ||
| 719 | |||
| 720 | return error; | ||
| 721 | } | ||
| 722 | |||
| 723 | |||
| 724 | static int sh_mobile_meram_remove(struct platform_device *pdev) | ||
| 725 | { | ||
| 726 | struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev); | ||
| 727 | struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 728 | struct resource *meram = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 729 | |||
| 730 | pm_runtime_disable(&pdev->dev); | ||
| 731 | |||
| 732 | gen_pool_destroy(priv->pool); | ||
| 733 | |||
| 734 | iounmap(priv->base); | ||
| 735 | release_mem_region(meram->start, resource_size(meram)); | ||
| 736 | release_mem_region(regs->start, resource_size(regs)); | ||
| 737 | |||
| 738 | mutex_destroy(&priv->lock); | ||
| 739 | |||
| 740 | kfree(priv); | ||
| 741 | |||
| 742 | return 0; | ||
| 743 | } | ||
| 744 | |||
| 745 | static struct platform_driver sh_mobile_meram_driver = { | ||
| 746 | .driver = { | ||
| 747 | .name = "sh_mobile_meram", | ||
| 748 | .pm = &sh_mobile_meram_dev_pm_ops, | ||
| 749 | }, | ||
| 750 | .probe = sh_mobile_meram_probe, | ||
| 751 | .remove = sh_mobile_meram_remove, | ||
| 752 | }; | ||
| 753 | |||
| 754 | module_platform_driver(sh_mobile_meram_driver); | ||
| 755 | |||
| 756 | MODULE_DESCRIPTION("SuperH Mobile MERAM driver"); | ||
| 757 | MODULE_AUTHOR("Damian Hobson-Garcia / Takanari Hayama"); | ||
| 758 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c index 6f0a19501c6a..dde52d027416 100644 --- a/drivers/video/fbdev/sm501fb.c +++ b/drivers/video/fbdev/sm501fb.c | |||
| @@ -1932,8 +1932,7 @@ static int sm501fb_probe(struct platform_device *pdev) | |||
| 1932 | int ret; | 1932 | int ret; |
| 1933 | 1933 | ||
| 1934 | /* allocate our framebuffers */ | 1934 | /* allocate our framebuffers */ |
| 1935 | 1935 | info = kzalloc(sizeof(*info), GFP_KERNEL); | |
| 1936 | info = kzalloc(sizeof(struct sm501fb_info), GFP_KERNEL); | ||
| 1937 | if (!info) { | 1936 | if (!info) { |
| 1938 | dev_err(dev, "failed to allocate state\n"); | 1937 | dev_err(dev, "failed to allocate state\n"); |
| 1939 | return -ENOMEM; | 1938 | return -ENOMEM; |
diff --git a/drivers/video/fbdev/via/global.h b/drivers/video/fbdev/via/global.h index 275dbbbd6b81..649d2ca5516e 100644 --- a/drivers/video/fbdev/via/global.h +++ b/drivers/video/fbdev/via/global.h | |||
| @@ -33,6 +33,12 @@ | |||
| 33 | #include <linux/console.h> | 33 | #include <linux/console.h> |
| 34 | #include <linux/timer.h> | 34 | #include <linux/timer.h> |
| 35 | 35 | ||
| 36 | #ifdef CONFIG_X86 | ||
| 37 | #include <asm/olpc.h> | ||
| 38 | #else | ||
| 39 | #define machine_is_olpc(x) 0 | ||
| 40 | #endif | ||
| 41 | |||
| 36 | #include "debug.h" | 42 | #include "debug.h" |
| 37 | 43 | ||
| 38 | #include "viafbdev.h" | 44 | #include "viafbdev.h" |
diff --git a/drivers/video/fbdev/via/hw.c b/drivers/video/fbdev/via/hw.c index 22450908306c..48969c644599 100644 --- a/drivers/video/fbdev/via/hw.c +++ b/drivers/video/fbdev/via/hw.c | |||
| @@ -20,7 +20,6 @@ | |||
| 20 | */ | 20 | */ |
| 21 | 21 | ||
| 22 | #include <linux/via-core.h> | 22 | #include <linux/via-core.h> |
| 23 | #include <asm/olpc.h> | ||
| 24 | #include "global.h" | 23 | #include "global.h" |
| 25 | #include "via_clock.h" | 24 | #include "via_clock.h" |
| 26 | 25 | ||
diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c index 77774d8abf94..b041eb27a9bf 100644 --- a/drivers/video/fbdev/via/via-core.c +++ b/drivers/video/fbdev/via/via-core.c | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
| 18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
| 19 | #include <linux/pm.h> | 19 | #include <linux/pm.h> |
| 20 | #include <asm/olpc.h> | ||
| 21 | 20 | ||
| 22 | /* | 21 | /* |
| 23 | * The default port config. | 22 | * The default port config. |
diff --git a/drivers/video/fbdev/via/via_clock.c b/drivers/video/fbdev/via/via_clock.c index bf269fa43977..3d0efdbaea58 100644 --- a/drivers/video/fbdev/via/via_clock.c +++ b/drivers/video/fbdev/via/via_clock.c | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | 25 | ||
| 26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
| 27 | #include <linux/via-core.h> | 27 | #include <linux/via-core.h> |
| 28 | #include <asm/olpc.h> | 28 | |
| 29 | #include "via_clock.h" | 29 | #include "via_clock.h" |
| 30 | #include "global.h" | 30 | #include "global.h" |
| 31 | #include "debug.h" | 31 | #include "debug.h" |
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c index 52f577b0669b..d2f785068ef4 100644 --- a/drivers/video/fbdev/via/viafbdev.c +++ b/drivers/video/fbdev/via/viafbdev.c | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | #include <linux/stat.h> | 25 | #include <linux/stat.h> |
| 26 | #include <linux/via-core.h> | 26 | #include <linux/via-core.h> |
| 27 | #include <linux/via_i2c.h> | 27 | #include <linux/via_i2c.h> |
| 28 | #include <asm/olpc.h> | ||
| 29 | 28 | ||
| 30 | #define _MASTER_FILE | 29 | #define _MASTER_FILE |
| 31 | #include "global.h" | 30 | #include "global.h" |
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index b563a4499cc8..705aebd74e56 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c | |||
| @@ -578,6 +578,8 @@ static void virtio_pci_remove(struct pci_dev *pci_dev) | |||
| 578 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); | 578 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); |
| 579 | struct device *dev = get_device(&vp_dev->vdev.dev); | 579 | struct device *dev = get_device(&vp_dev->vdev.dev); |
| 580 | 580 | ||
| 581 | pci_disable_sriov(pci_dev); | ||
| 582 | |||
| 581 | unregister_virtio_device(&vp_dev->vdev); | 583 | unregister_virtio_device(&vp_dev->vdev); |
| 582 | 584 | ||
| 583 | if (vp_dev->ioaddr) | 585 | if (vp_dev->ioaddr) |
| @@ -589,6 +591,33 @@ static void virtio_pci_remove(struct pci_dev *pci_dev) | |||
| 589 | put_device(dev); | 591 | put_device(dev); |
| 590 | } | 592 | } |
| 591 | 593 | ||
| 594 | static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs) | ||
| 595 | { | ||
| 596 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); | ||
| 597 | struct virtio_device *vdev = &vp_dev->vdev; | ||
| 598 | int ret; | ||
| 599 | |||
| 600 | if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK)) | ||
| 601 | return -EBUSY; | ||
| 602 | |||
| 603 | if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV)) | ||
| 604 | return -EINVAL; | ||
| 605 | |||
| 606 | if (pci_vfs_assigned(pci_dev)) | ||
| 607 | return -EPERM; | ||
| 608 | |||
| 609 | if (num_vfs == 0) { | ||
| 610 | pci_disable_sriov(pci_dev); | ||
| 611 | return 0; | ||
| 612 | } | ||
| 613 | |||
| 614 | ret = pci_enable_sriov(pci_dev, num_vfs); | ||
| 615 | if (ret < 0) | ||
| 616 | return ret; | ||
| 617 | |||
| 618 | return num_vfs; | ||
| 619 | } | ||
| 620 | |||
| 592 | static struct pci_driver virtio_pci_driver = { | 621 | static struct pci_driver virtio_pci_driver = { |
| 593 | .name = "virtio-pci", | 622 | .name = "virtio-pci", |
| 594 | .id_table = virtio_pci_id_table, | 623 | .id_table = virtio_pci_id_table, |
| @@ -597,6 +626,7 @@ static struct pci_driver virtio_pci_driver = { | |||
| 597 | #ifdef CONFIG_PM_SLEEP | 626 | #ifdef CONFIG_PM_SLEEP |
| 598 | .driver.pm = &virtio_pci_pm_ops, | 627 | .driver.pm = &virtio_pci_pm_ops, |
| 599 | #endif | 628 | #endif |
| 629 | .sriov_configure = virtio_pci_sriov_configure, | ||
| 600 | }; | 630 | }; |
| 601 | 631 | ||
| 602 | module_pci_driver(virtio_pci_driver); | 632 | module_pci_driver(virtio_pci_driver); |
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 2555d80f6eec..07571daccfec 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c | |||
| @@ -153,14 +153,28 @@ static u64 vp_get_features(struct virtio_device *vdev) | |||
| 153 | return features; | 153 | return features; |
| 154 | } | 154 | } |
| 155 | 155 | ||
| 156 | static void vp_transport_features(struct virtio_device *vdev, u64 features) | ||
| 157 | { | ||
| 158 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
| 159 | struct pci_dev *pci_dev = vp_dev->pci_dev; | ||
| 160 | |||
| 161 | if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) && | ||
| 162 | pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV)) | ||
| 163 | __virtio_set_bit(vdev, VIRTIO_F_SR_IOV); | ||
| 164 | } | ||
| 165 | |||
| 156 | /* virtio config->finalize_features() implementation */ | 166 | /* virtio config->finalize_features() implementation */ |
| 157 | static int vp_finalize_features(struct virtio_device *vdev) | 167 | static int vp_finalize_features(struct virtio_device *vdev) |
| 158 | { | 168 | { |
| 159 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 169 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
| 170 | u64 features = vdev->features; | ||
| 160 | 171 | ||
| 161 | /* Give virtio_ring a chance to accept features. */ | 172 | /* Give virtio_ring a chance to accept features. */ |
| 162 | vring_transport_features(vdev); | 173 | vring_transport_features(vdev); |
| 163 | 174 | ||
| 175 | /* Give virtio_pci a chance to accept features. */ | ||
| 176 | vp_transport_features(vdev, features); | ||
| 177 | |||
| 164 | if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { | 178 | if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { |
| 165 | dev_err(&vdev->dev, "virtio: device uses modern interface " | 179 | dev_err(&vdev->dev, "virtio: device uses modern interface " |
| 166 | "but does not have VIRTIO_F_VERSION_1\n"); | 180 | "but does not have VIRTIO_F_VERSION_1\n"); |
diff --git a/fs/afs/Makefile b/fs/afs/Makefile index 532acae25453..546874057bd3 100644 --- a/fs/afs/Makefile +++ b/fs/afs/Makefile | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | afs-cache-$(CONFIG_AFS_FSCACHE) := cache.o | 6 | afs-cache-$(CONFIG_AFS_FSCACHE) := cache.o |
| 7 | 7 | ||
| 8 | kafs-objs := \ | 8 | kafs-y := \ |
| 9 | $(afs-cache-y) \ | 9 | $(afs-cache-y) \ |
| 10 | addr_list.o \ | 10 | addr_list.o \ |
| 11 | callback.o \ | 11 | callback.o \ |
| @@ -21,7 +21,6 @@ kafs-objs := \ | |||
| 21 | main.o \ | 21 | main.o \ |
| 22 | misc.o \ | 22 | misc.o \ |
| 23 | mntpt.o \ | 23 | mntpt.o \ |
| 24 | proc.o \ | ||
| 25 | rotate.o \ | 24 | rotate.o \ |
| 26 | rxrpc.o \ | 25 | rxrpc.o \ |
| 27 | security.o \ | 26 | security.o \ |
| @@ -34,4 +33,5 @@ kafs-objs := \ | |||
| 34 | write.o \ | 33 | write.o \ |
| 35 | xattr.o | 34 | xattr.o |
| 36 | 35 | ||
| 36 | kafs-$(CONFIG_PROC_FS) += proc.o | ||
| 37 | obj-$(CONFIG_AFS_FS) := kafs.o | 37 | obj-$(CONFIG_AFS_FS) := kafs.o |
diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c index 2c46c46f3a6d..025a9a5e1c32 100644 --- a/fs/afs/addr_list.c +++ b/fs/afs/addr_list.c | |||
| @@ -215,7 +215,7 @@ struct afs_addr_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry) | |||
| 215 | _enter("%s", cell->name); | 215 | _enter("%s", cell->name); |
| 216 | 216 | ||
| 217 | ret = dns_query("afsdb", cell->name, cell->name_len, | 217 | ret = dns_query("afsdb", cell->name, cell->name_len, |
| 218 | "ipv4", &vllist, _expiry); | 218 | "", &vllist, _expiry); |
| 219 | if (ret < 0) | 219 | if (ret < 0) |
| 220 | return ERR_PTR(ret); | 220 | return ERR_PTR(ret); |
| 221 | 221 | ||
diff --git a/fs/afs/callback.c b/fs/afs/callback.c index 571437dcb252..5f261fbf2182 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c | |||
| @@ -21,6 +21,66 @@ | |||
| 21 | #include "internal.h" | 21 | #include "internal.h" |
| 22 | 22 | ||
| 23 | /* | 23 | /* |
| 24 | * Create volume and callback interests on a server. | ||
| 25 | */ | ||
| 26 | static struct afs_cb_interest *afs_create_interest(struct afs_server *server, | ||
| 27 | struct afs_vnode *vnode) | ||
| 28 | { | ||
| 29 | struct afs_vol_interest *new_vi, *vi; | ||
| 30 | struct afs_cb_interest *new; | ||
| 31 | struct hlist_node **pp; | ||
| 32 | |||
| 33 | new_vi = kzalloc(sizeof(struct afs_vol_interest), GFP_KERNEL); | ||
| 34 | if (!new_vi) | ||
| 35 | return NULL; | ||
| 36 | |||
| 37 | new = kzalloc(sizeof(struct afs_cb_interest), GFP_KERNEL); | ||
| 38 | if (!new) { | ||
| 39 | kfree(new_vi); | ||
| 40 | return NULL; | ||
| 41 | } | ||
| 42 | |||
| 43 | new_vi->usage = 1; | ||
| 44 | new_vi->vid = vnode->volume->vid; | ||
| 45 | INIT_HLIST_NODE(&new_vi->srv_link); | ||
| 46 | INIT_HLIST_HEAD(&new_vi->cb_interests); | ||
| 47 | |||
| 48 | refcount_set(&new->usage, 1); | ||
| 49 | new->sb = vnode->vfs_inode.i_sb; | ||
| 50 | new->vid = vnode->volume->vid; | ||
| 51 | new->server = afs_get_server(server); | ||
| 52 | INIT_HLIST_NODE(&new->cb_vlink); | ||
| 53 | |||
| 54 | write_lock(&server->cb_break_lock); | ||
| 55 | |||
| 56 | for (pp = &server->cb_volumes.first; *pp; pp = &(*pp)->next) { | ||
| 57 | vi = hlist_entry(*pp, struct afs_vol_interest, srv_link); | ||
| 58 | if (vi->vid < new_vi->vid) | ||
| 59 | continue; | ||
| 60 | if (vi->vid > new_vi->vid) | ||
| 61 | break; | ||
| 62 | vi->usage++; | ||
| 63 | goto found_vi; | ||
| 64 | } | ||
| 65 | |||
| 66 | new_vi->srv_link.pprev = pp; | ||
| 67 | new_vi->srv_link.next = *pp; | ||
| 68 | if (*pp) | ||
| 69 | (*pp)->pprev = &new_vi->srv_link.next; | ||
| 70 | *pp = &new_vi->srv_link; | ||
| 71 | vi = new_vi; | ||
| 72 | new_vi = NULL; | ||
| 73 | found_vi: | ||
| 74 | |||
| 75 | new->vol_interest = vi; | ||
| 76 | hlist_add_head(&new->cb_vlink, &vi->cb_interests); | ||
| 77 | |||
| 78 | write_unlock(&server->cb_break_lock); | ||
| 79 | kfree(new_vi); | ||
| 80 | return new; | ||
| 81 | } | ||
| 82 | |||
| 83 | /* | ||
| 24 | * Set up an interest-in-callbacks record for a volume on a server and | 84 | * Set up an interest-in-callbacks record for a volume on a server and |
| 25 | * register it with the server. | 85 | * register it with the server. |
| 26 | * - Called with vnode->io_lock held. | 86 | * - Called with vnode->io_lock held. |
| @@ -77,20 +137,10 @@ again: | |||
| 77 | } | 137 | } |
| 78 | 138 | ||
| 79 | if (!cbi) { | 139 | if (!cbi) { |
| 80 | new = kzalloc(sizeof(struct afs_cb_interest), GFP_KERNEL); | 140 | new = afs_create_interest(server, vnode); |
| 81 | if (!new) | 141 | if (!new) |
| 82 | return -ENOMEM; | 142 | return -ENOMEM; |
| 83 | 143 | ||
| 84 | refcount_set(&new->usage, 1); | ||
| 85 | new->sb = vnode->vfs_inode.i_sb; | ||
| 86 | new->vid = vnode->volume->vid; | ||
| 87 | new->server = afs_get_server(server); | ||
| 88 | INIT_LIST_HEAD(&new->cb_link); | ||
| 89 | |||
| 90 | write_lock(&server->cb_break_lock); | ||
| 91 | list_add_tail(&new->cb_link, &server->cb_interests); | ||
| 92 | write_unlock(&server->cb_break_lock); | ||
| 93 | |||
| 94 | write_lock(&slist->lock); | 144 | write_lock(&slist->lock); |
| 95 | if (!entry->cb_interest) { | 145 | if (!entry->cb_interest) { |
| 96 | entry->cb_interest = afs_get_cb_interest(new); | 146 | entry->cb_interest = afs_get_cb_interest(new); |
| @@ -126,11 +176,22 @@ again: | |||
| 126 | */ | 176 | */ |
| 127 | void afs_put_cb_interest(struct afs_net *net, struct afs_cb_interest *cbi) | 177 | void afs_put_cb_interest(struct afs_net *net, struct afs_cb_interest *cbi) |
| 128 | { | 178 | { |
| 179 | struct afs_vol_interest *vi; | ||
| 180 | |||
| 129 | if (cbi && refcount_dec_and_test(&cbi->usage)) { | 181 | if (cbi && refcount_dec_and_test(&cbi->usage)) { |
| 130 | if (!list_empty(&cbi->cb_link)) { | 182 | if (!hlist_unhashed(&cbi->cb_vlink)) { |
| 131 | write_lock(&cbi->server->cb_break_lock); | 183 | write_lock(&cbi->server->cb_break_lock); |
| 132 | list_del_init(&cbi->cb_link); | 184 | |
| 185 | hlist_del_init(&cbi->cb_vlink); | ||
| 186 | vi = cbi->vol_interest; | ||
| 187 | cbi->vol_interest = NULL; | ||
| 188 | if (--vi->usage == 0) | ||
| 189 | hlist_del(&vi->srv_link); | ||
| 190 | else | ||
| 191 | vi = NULL; | ||
| 192 | |||
| 133 | write_unlock(&cbi->server->cb_break_lock); | 193 | write_unlock(&cbi->server->cb_break_lock); |
| 194 | kfree(vi); | ||
| 134 | afs_put_server(net, cbi->server); | 195 | afs_put_server(net, cbi->server); |
| 135 | } | 196 | } |
| 136 | kfree(cbi); | 197 | kfree(cbi); |
| @@ -182,20 +243,34 @@ void afs_break_callback(struct afs_vnode *vnode) | |||
| 182 | static void afs_break_one_callback(struct afs_server *server, | 243 | static void afs_break_one_callback(struct afs_server *server, |
| 183 | struct afs_fid *fid) | 244 | struct afs_fid *fid) |
| 184 | { | 245 | { |
| 246 | struct afs_vol_interest *vi; | ||
| 185 | struct afs_cb_interest *cbi; | 247 | struct afs_cb_interest *cbi; |
| 186 | struct afs_iget_data data; | 248 | struct afs_iget_data data; |
| 187 | struct afs_vnode *vnode; | 249 | struct afs_vnode *vnode; |
| 188 | struct inode *inode; | 250 | struct inode *inode; |
| 189 | 251 | ||
| 190 | read_lock(&server->cb_break_lock); | 252 | read_lock(&server->cb_break_lock); |
| 253 | hlist_for_each_entry(vi, &server->cb_volumes, srv_link) { | ||
| 254 | if (vi->vid < fid->vid) | ||
| 255 | continue; | ||
| 256 | if (vi->vid > fid->vid) { | ||
| 257 | vi = NULL; | ||
| 258 | break; | ||
| 259 | } | ||
| 260 | //atomic_inc(&vi->usage); | ||
| 261 | break; | ||
| 262 | } | ||
| 263 | |||
| 264 | /* TODO: Find all matching volumes if we couldn't match the server and | ||
| 265 | * break them anyway. | ||
| 266 | */ | ||
| 267 | if (!vi) | ||
| 268 | goto out; | ||
| 191 | 269 | ||
| 192 | /* Step through all interested superblocks. There may be more than one | 270 | /* Step through all interested superblocks. There may be more than one |
| 193 | * because of cell aliasing. | 271 | * because of cell aliasing. |
| 194 | */ | 272 | */ |
| 195 | list_for_each_entry(cbi, &server->cb_interests, cb_link) { | 273 | hlist_for_each_entry(cbi, &vi->cb_interests, cb_vlink) { |
| 196 | if (cbi->vid != fid->vid) | ||
| 197 | continue; | ||
| 198 | |||
| 199 | if (fid->vnode == 0 && fid->unique == 0) { | 274 | if (fid->vnode == 0 && fid->unique == 0) { |
| 200 | /* The callback break applies to an entire volume. */ | 275 | /* The callback break applies to an entire volume. */ |
| 201 | struct afs_super_info *as = AFS_FS_S(cbi->sb); | 276 | struct afs_super_info *as = AFS_FS_S(cbi->sb); |
| @@ -217,6 +292,7 @@ static void afs_break_one_callback(struct afs_server *server, | |||
| 217 | } | 292 | } |
| 218 | } | 293 | } |
| 219 | 294 | ||
| 295 | out: | ||
| 220 | read_unlock(&server->cb_break_lock); | 296 | read_unlock(&server->cb_break_lock); |
| 221 | } | 297 | } |
| 222 | 298 | ||
diff --git a/fs/afs/cell.c b/fs/afs/cell.c index fdf4c36cff79..f3d0bef16d78 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/dns_resolver.h> | 15 | #include <linux/dns_resolver.h> |
| 16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
| 17 | #include <linux/inet.h> | 17 | #include <linux/inet.h> |
| 18 | #include <linux/namei.h> | ||
| 18 | #include <keys/rxrpc-type.h> | 19 | #include <keys/rxrpc-type.h> |
| 19 | #include "internal.h" | 20 | #include "internal.h" |
| 20 | 21 | ||
| @@ -341,8 +342,8 @@ int afs_cell_init(struct afs_net *net, const char *rootcell) | |||
| 341 | 342 | ||
| 342 | /* install the new cell */ | 343 | /* install the new cell */ |
| 343 | write_seqlock(&net->cells_lock); | 344 | write_seqlock(&net->cells_lock); |
| 344 | old_root = net->ws_cell; | 345 | old_root = rcu_access_pointer(net->ws_cell); |
| 345 | net->ws_cell = new_root; | 346 | rcu_assign_pointer(net->ws_cell, new_root); |
| 346 | write_sequnlock(&net->cells_lock); | 347 | write_sequnlock(&net->cells_lock); |
| 347 | 348 | ||
| 348 | afs_put_cell(net, old_root); | 349 | afs_put_cell(net, old_root); |
| @@ -528,12 +529,14 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell) | |||
| 528 | NULL, 0, | 529 | NULL, 0, |
| 529 | cell, 0, true); | 530 | cell, 0, true); |
| 530 | #endif | 531 | #endif |
| 531 | ret = afs_proc_cell_setup(net, cell); | 532 | ret = afs_proc_cell_setup(cell); |
| 532 | if (ret < 0) | 533 | if (ret < 0) |
| 533 | return ret; | 534 | return ret; |
| 534 | spin_lock(&net->proc_cells_lock); | 535 | |
| 536 | mutex_lock(&net->proc_cells_lock); | ||
| 535 | list_add_tail(&cell->proc_link, &net->proc_cells); | 537 | list_add_tail(&cell->proc_link, &net->proc_cells); |
| 536 | spin_unlock(&net->proc_cells_lock); | 538 | afs_dynroot_mkdir(net, cell); |
| 539 | mutex_unlock(&net->proc_cells_lock); | ||
| 537 | return 0; | 540 | return 0; |
| 538 | } | 541 | } |
| 539 | 542 | ||
| @@ -544,11 +547,12 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell) | |||
| 544 | { | 547 | { |
| 545 | _enter("%s", cell->name); | 548 | _enter("%s", cell->name); |
| 546 | 549 | ||
| 547 | afs_proc_cell_remove(net, cell); | 550 | afs_proc_cell_remove(cell); |
| 548 | 551 | ||
| 549 | spin_lock(&net->proc_cells_lock); | 552 | mutex_lock(&net->proc_cells_lock); |
| 550 | list_del_init(&cell->proc_link); | 553 | list_del_init(&cell->proc_link); |
| 551 | spin_unlock(&net->proc_cells_lock); | 554 | afs_dynroot_rmdir(net, cell); |
| 555 | mutex_unlock(&net->proc_cells_lock); | ||
| 552 | 556 | ||
| 553 | #ifdef CONFIG_AFS_FSCACHE | 557 | #ifdef CONFIG_AFS_FSCACHE |
| 554 | fscache_relinquish_cookie(cell->cache, NULL, false); | 558 | fscache_relinquish_cookie(cell->cache, NULL, false); |
| @@ -755,8 +759,8 @@ void afs_cell_purge(struct afs_net *net) | |||
| 755 | _enter(""); | 759 | _enter(""); |
| 756 | 760 | ||
| 757 | write_seqlock(&net->cells_lock); | 761 | write_seqlock(&net->cells_lock); |
| 758 | ws = net->ws_cell; | 762 | ws = rcu_access_pointer(net->ws_cell); |
| 759 | net->ws_cell = NULL; | 763 | RCU_INIT_POINTER(net->ws_cell, NULL); |
| 760 | write_sequnlock(&net->cells_lock); | 764 | write_sequnlock(&net->cells_lock); |
| 761 | afs_put_cell(net, ws); | 765 | afs_put_cell(net, ws); |
| 762 | 766 | ||
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 238fd28cfdd2..9e51d6fe7e8f 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c | |||
| @@ -526,7 +526,7 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work) | |||
| 526 | nifs = 0; | 526 | nifs = 0; |
| 527 | ifs = kcalloc(32, sizeof(*ifs), GFP_KERNEL); | 527 | ifs = kcalloc(32, sizeof(*ifs), GFP_KERNEL); |
| 528 | if (ifs) { | 528 | if (ifs) { |
| 529 | nifs = afs_get_ipv4_interfaces(ifs, 32, false); | 529 | nifs = afs_get_ipv4_interfaces(call->net, ifs, 32, false); |
| 530 | if (nifs < 0) { | 530 | if (nifs < 0) { |
| 531 | kfree(ifs); | 531 | kfree(ifs); |
| 532 | ifs = NULL; | 532 | ifs = NULL; |
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index 983f3946ab57..174e843f0633 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* dir.c: AFS dynamic root handling | 1 | /* AFS dynamic root handling |
| 2 | * | 2 | * |
| 3 | * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
| @@ -46,7 +46,7 @@ static int afs_probe_cell_name(struct dentry *dentry) | |||
| 46 | return 0; | 46 | return 0; |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | ret = dns_query("afsdb", name, len, "ipv4", NULL, NULL); | 49 | ret = dns_query("afsdb", name, len, "", NULL, NULL); |
| 50 | if (ret == -ENODATA) | 50 | if (ret == -ENODATA) |
| 51 | ret = -EDESTADDRREQ; | 51 | ret = -EDESTADDRREQ; |
| 52 | return ret; | 52 | return ret; |
| @@ -207,3 +207,125 @@ const struct dentry_operations afs_dynroot_dentry_operations = { | |||
| 207 | .d_release = afs_d_release, | 207 | .d_release = afs_d_release, |
| 208 | .d_automount = afs_d_automount, | 208 | .d_automount = afs_d_automount, |
| 209 | }; | 209 | }; |
| 210 | |||
| 211 | /* | ||
| 212 | * Create a manually added cell mount directory. | ||
| 213 | * - The caller must hold net->proc_cells_lock | ||
| 214 | */ | ||
| 215 | int afs_dynroot_mkdir(struct afs_net *net, struct afs_cell *cell) | ||
| 216 | { | ||
| 217 | struct super_block *sb = net->dynroot_sb; | ||
| 218 | struct dentry *root, *subdir; | ||
| 219 | int ret; | ||
| 220 | |||
| 221 | if (!sb || atomic_read(&sb->s_active) == 0) | ||
| 222 | return 0; | ||
| 223 | |||
| 224 | /* Let the ->lookup op do the creation */ | ||
| 225 | root = sb->s_root; | ||
| 226 | inode_lock(root->d_inode); | ||
| 227 | subdir = lookup_one_len(cell->name, root, cell->name_len); | ||
| 228 | if (IS_ERR(subdir)) { | ||
| 229 | ret = PTR_ERR(subdir); | ||
| 230 | goto unlock; | ||
| 231 | } | ||
| 232 | |||
| 233 | /* Note that we're retaining an extra ref on the dentry */ | ||
| 234 | subdir->d_fsdata = (void *)1UL; | ||
| 235 | ret = 0; | ||
| 236 | unlock: | ||
| 237 | inode_unlock(root->d_inode); | ||
| 238 | return ret; | ||
| 239 | } | ||
| 240 | |||
| 241 | /* | ||
| 242 | * Remove a manually added cell mount directory. | ||
| 243 | * - The caller must hold net->proc_cells_lock | ||
| 244 | */ | ||
| 245 | void afs_dynroot_rmdir(struct afs_net *net, struct afs_cell *cell) | ||
| 246 | { | ||
| 247 | struct super_block *sb = net->dynroot_sb; | ||
| 248 | struct dentry *root, *subdir; | ||
| 249 | |||
| 250 | if (!sb || atomic_read(&sb->s_active) == 0) | ||
| 251 | return; | ||
| 252 | |||
| 253 | root = sb->s_root; | ||
| 254 | inode_lock(root->d_inode); | ||
| 255 | |||
| 256 | /* Don't want to trigger a lookup call, which will re-add the cell */ | ||
| 257 | subdir = try_lookup_one_len(cell->name, root, cell->name_len); | ||
| 258 | if (IS_ERR_OR_NULL(subdir)) { | ||
| 259 | _debug("lookup %ld", PTR_ERR(subdir)); | ||
| 260 | goto no_dentry; | ||
| 261 | } | ||
| 262 | |||
| 263 | _debug("rmdir %pd %u", subdir, d_count(subdir)); | ||
| 264 | |||
| 265 | if (subdir->d_fsdata) { | ||
| 266 | _debug("unpin %u", d_count(subdir)); | ||
| 267 | subdir->d_fsdata = NULL; | ||
| 268 | dput(subdir); | ||
| 269 | } | ||
| 270 | dput(subdir); | ||
| 271 | no_dentry: | ||
| 272 | inode_unlock(root->d_inode); | ||
| 273 | _leave(""); | ||
| 274 | } | ||
| 275 | |||
| 276 | /* | ||
| 277 | * Populate a newly created dynamic root with cell names. | ||
| 278 | */ | ||
| 279 | int afs_dynroot_populate(struct super_block *sb) | ||
| 280 | { | ||
| 281 | struct afs_cell *cell; | ||
| 282 | struct afs_net *net = afs_sb2net(sb); | ||
| 283 | int ret; | ||
| 284 | |||
| 285 | if (mutex_lock_interruptible(&net->proc_cells_lock) < 0) | ||
| 286 | return -ERESTARTSYS; | ||
| 287 | |||
| 288 | net->dynroot_sb = sb; | ||
| 289 | list_for_each_entry(cell, &net->proc_cells, proc_link) { | ||
| 290 | ret = afs_dynroot_mkdir(net, cell); | ||
| 291 | if (ret < 0) | ||
| 292 | goto error; | ||
| 293 | } | ||
| 294 | |||
| 295 | ret = 0; | ||
| 296 | out: | ||
| 297 | mutex_unlock(&net->proc_cells_lock); | ||
| 298 | return ret; | ||
| 299 | |||
| 300 | error: | ||
| 301 | net->dynroot_sb = NULL; | ||
| 302 | goto out; | ||
| 303 | } | ||
| 304 | |||
| 305 | /* | ||
| 306 | * When a dynamic root that's in the process of being destroyed, depopulate it | ||
| 307 | * of pinned directories. | ||
| 308 | */ | ||
| 309 | void afs_dynroot_depopulate(struct super_block *sb) | ||
| 310 | { | ||
| 311 | struct afs_net *net = afs_sb2net(sb); | ||
| 312 | struct dentry *root = sb->s_root, *subdir, *tmp; | ||
| 313 | |||
| 314 | /* Prevent more subdirs from being created */ | ||
| 315 | mutex_lock(&net->proc_cells_lock); | ||
| 316 | if (net->dynroot_sb == sb) | ||
| 317 | net->dynroot_sb = NULL; | ||
| 318 | mutex_unlock(&net->proc_cells_lock); | ||
| 319 | |||
| 320 | inode_lock(root->d_inode); | ||
| 321 | |||
| 322 | /* Remove all the pins for dirs created for manually added cells */ | ||
| 323 | list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) { | ||
| 324 | if (subdir->d_fsdata) { | ||
| 325 | subdir->d_fsdata = NULL; | ||
| 326 | dput(subdir); | ||
| 327 | } | ||
| 328 | } | ||
| 329 | |||
| 330 | inode_unlock(root->d_inode); | ||
| 331 | } | ||
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 5907601aafd0..50929cb91732 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c | |||
| @@ -138,10 +138,6 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call, | |||
| 138 | u64 data_version, size; | 138 | u64 data_version, size; |
| 139 | u32 type, abort_code; | 139 | u32 type, abort_code; |
| 140 | u8 flags = 0; | 140 | u8 flags = 0; |
| 141 | int ret; | ||
| 142 | |||
| 143 | if (vnode) | ||
| 144 | write_seqlock(&vnode->cb_lock); | ||
| 145 | 141 | ||
| 146 | abort_code = ntohl(xdr->abort_code); | 142 | abort_code = ntohl(xdr->abort_code); |
| 147 | 143 | ||
| @@ -154,8 +150,7 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call, | |||
| 154 | * case. | 150 | * case. |
| 155 | */ | 151 | */ |
| 156 | status->abort_code = abort_code; | 152 | status->abort_code = abort_code; |
| 157 | ret = 0; | 153 | return 0; |
| 158 | goto out; | ||
| 159 | } | 154 | } |
| 160 | 155 | ||
| 161 | pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version)); | 156 | pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version)); |
| @@ -164,8 +159,7 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call, | |||
| 164 | 159 | ||
| 165 | if (abort_code != 0 && inline_error) { | 160 | if (abort_code != 0 && inline_error) { |
| 166 | status->abort_code = abort_code; | 161 | status->abort_code = abort_code; |
| 167 | ret = 0; | 162 | return 0; |
| 168 | goto out; | ||
| 169 | } | 163 | } |
| 170 | 164 | ||
| 171 | type = ntohl(xdr->type); | 165 | type = ntohl(xdr->type); |
| @@ -235,17 +229,35 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call, | |||
| 235 | flags); | 229 | flags); |
| 236 | } | 230 | } |
| 237 | 231 | ||
| 238 | ret = 0; | 232 | return 0; |
| 239 | |||
| 240 | out: | ||
| 241 | if (vnode) | ||
| 242 | write_sequnlock(&vnode->cb_lock); | ||
| 243 | return ret; | ||
| 244 | 233 | ||
| 245 | bad: | 234 | bad: |
| 246 | xdr_dump_bad(*_bp); | 235 | xdr_dump_bad(*_bp); |
| 247 | ret = afs_protocol_error(call, -EBADMSG); | 236 | return afs_protocol_error(call, -EBADMSG); |
| 248 | goto out; | 237 | } |
| 238 | |||
| 239 | /* | ||
| 240 | * Decode the file status. We need to lock the target vnode if we're going to | ||
| 241 | * update its status so that stat() sees the attributes update atomically. | ||
| 242 | */ | ||
| 243 | static int afs_decode_status(struct afs_call *call, | ||
| 244 | const __be32 **_bp, | ||
| 245 | struct afs_file_status *status, | ||
| 246 | struct afs_vnode *vnode, | ||
| 247 | const afs_dataversion_t *expected_version, | ||
| 248 | struct afs_read *read_req) | ||
| 249 | { | ||
| 250 | int ret; | ||
| 251 | |||
| 252 | if (!vnode) | ||
| 253 | return xdr_decode_AFSFetchStatus(call, _bp, status, vnode, | ||
| 254 | expected_version, read_req); | ||
| 255 | |||
| 256 | write_seqlock(&vnode->cb_lock); | ||
| 257 | ret = xdr_decode_AFSFetchStatus(call, _bp, status, vnode, | ||
| 258 | expected_version, read_req); | ||
| 259 | write_sequnlock(&vnode->cb_lock); | ||
| 260 | return ret; | ||
| 249 | } | 261 | } |
| 250 | 262 | ||
| 251 | /* | 263 | /* |
| @@ -387,8 +399,8 @@ static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call) | |||
| 387 | 399 | ||
| 388 | /* unmarshall the reply once we've received all of it */ | 400 | /* unmarshall the reply once we've received all of it */ |
| 389 | bp = call->buffer; | 401 | bp = call->buffer; |
| 390 | if (xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode, | 402 | if (afs_decode_status(call, &bp, &vnode->status, vnode, |
| 391 | &call->expected_version, NULL) < 0) | 403 | &call->expected_version, NULL) < 0) |
| 392 | return afs_protocol_error(call, -EBADMSG); | 404 | return afs_protocol_error(call, -EBADMSG); |
| 393 | xdr_decode_AFSCallBack(call, vnode, &bp); | 405 | xdr_decode_AFSCallBack(call, vnode, &bp); |
| 394 | if (call->reply[1]) | 406 | if (call->reply[1]) |
| @@ -568,8 +580,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) | |||
| 568 | return ret; | 580 | return ret; |
| 569 | 581 | ||
| 570 | bp = call->buffer; | 582 | bp = call->buffer; |
| 571 | if (xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode, | 583 | if (afs_decode_status(call, &bp, &vnode->status, vnode, |
| 572 | &vnode->status.data_version, req) < 0) | 584 | &vnode->status.data_version, req) < 0) |
| 573 | return afs_protocol_error(call, -EBADMSG); | 585 | return afs_protocol_error(call, -EBADMSG); |
| 574 | xdr_decode_AFSCallBack(call, vnode, &bp); | 586 | xdr_decode_AFSCallBack(call, vnode, &bp); |
| 575 | if (call->reply[1]) | 587 | if (call->reply[1]) |
| @@ -721,9 +733,9 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call) | |||
| 721 | /* unmarshall the reply once we've received all of it */ | 733 | /* unmarshall the reply once we've received all of it */ |
| 722 | bp = call->buffer; | 734 | bp = call->buffer; |
| 723 | xdr_decode_AFSFid(&bp, call->reply[1]); | 735 | xdr_decode_AFSFid(&bp, call->reply[1]); |
| 724 | if (xdr_decode_AFSFetchStatus(call, &bp, call->reply[2], NULL, NULL, NULL) < 0 || | 736 | if (afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL) < 0 || |
| 725 | xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode, | 737 | afs_decode_status(call, &bp, &vnode->status, vnode, |
| 726 | &call->expected_version, NULL) < 0) | 738 | &call->expected_version, NULL) < 0) |
| 727 | return afs_protocol_error(call, -EBADMSG); | 739 | return afs_protocol_error(call, -EBADMSG); |
| 728 | xdr_decode_AFSCallBack_raw(&bp, call->reply[3]); | 740 | xdr_decode_AFSCallBack_raw(&bp, call->reply[3]); |
| 729 | /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ | 741 | /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ |
| @@ -827,8 +839,8 @@ static int afs_deliver_fs_remove(struct afs_call *call) | |||
| 827 | 839 | ||
| 828 | /* unmarshall the reply once we've received all of it */ | 840 | /* unmarshall the reply once we've received all of it */ |
| 829 | bp = call->buffer; | 841 | bp = call->buffer; |
| 830 | if (xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode, | 842 | if (afs_decode_status(call, &bp, &vnode->status, vnode, |
| 831 | &call->expected_version, NULL) < 0) | 843 | &call->expected_version, NULL) < 0) |
| 832 | return afs_protocol_error(call, -EBADMSG); | 844 | return afs_protocol_error(call, -EBADMSG); |
| 833 | /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ | 845 | /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ |
| 834 | 846 | ||
| @@ -917,9 +929,9 @@ static int afs_deliver_fs_link(struct afs_call *call) | |||
| 917 | 929 | ||
| 918 | /* unmarshall the reply once we've received all of it */ | 930 | /* unmarshall the reply once we've received all of it */ |
| 919 | bp = call->buffer; | 931 | bp = call->buffer; |
| 920 | if (xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode, NULL, NULL) < 0 || | 932 | if (afs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL) < 0 || |
| 921 | xdr_decode_AFSFetchStatus(call, &bp, &dvnode->status, dvnode, | 933 | afs_decode_status(call, &bp, &dvnode->status, dvnode, |
| 922 | &call->expected_version, NULL) < 0) | 934 | &call->expected_version, NULL) < 0) |
| 923 | return afs_protocol_error(call, -EBADMSG); | 935 | return afs_protocol_error(call, -EBADMSG); |
| 924 | /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ | 936 | /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ |
| 925 | 937 | ||
| @@ -1004,9 +1016,9 @@ static int afs_deliver_fs_symlink(struct afs_call *call) | |||
| 1004 | /* unmarshall the reply once we've received all of it */ | 1016 | /* unmarshall the reply once we've received all of it */ |
| 1005 | bp = call->buffer; | 1017 | bp = call->buffer; |
| 1006 | xdr_decode_AFSFid(&bp, call->reply[1]); | 1018 | xdr_decode_AFSFid(&bp, call->reply[1]); |
| 1007 | if (xdr_decode_AFSFetchStatus(call, &bp, call->reply[2], NULL, NULL, NULL) || | 1019 | if (afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL) || |
| 1008 | xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode, | 1020 | afs_decode_status(call, &bp, &vnode->status, vnode, |
| 1009 | &call->expected_version, NULL) < 0) | 1021 | &call->expected_version, NULL) < 0) |
| 1010 | return afs_protocol_error(call, -EBADMSG); | 1022 | return afs_protocol_error(call, -EBADMSG); |
| 1011 | /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ | 1023 | /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ |
| 1012 | 1024 | ||
| @@ -1110,12 +1122,12 @@ static int afs_deliver_fs_rename(struct afs_call *call) | |||
| 1110 | 1122 | ||
| 1111 | /* unmarshall the reply once we've received all of it */ | 1123 | /* unmarshall the reply once we've received all of it */ |
| 1112 | bp = call->buffer; | 1124 | bp = call->buffer; |
| 1113 | if (xdr_decode_AFSFetchStatus(call, &bp, &orig_dvnode->status, orig_dvnode, | 1125 | if (afs_decode_status(call, &bp, &orig_dvnode->status, orig_dvnode, |
| 1114 | &call->expected_version, NULL) < 0) | 1126 | &call->expected_version, NULL) < 0) |
| 1115 | return afs_protocol_error(call, -EBADMSG); | 1127 | return afs_protocol_error(call, -EBADMSG); |
| 1116 | if (new_dvnode != orig_dvnode && | 1128 | if (new_dvnode != orig_dvnode && |
| 1117 | xdr_decode_AFSFetchStatus(call, &bp, &new_dvnode->status, new_dvnode, | 1129 | afs_decode_status(call, &bp, &new_dvnode->status, new_dvnode, |
| 1118 | &call->expected_version_2, NULL) < 0) | 1130 | &call->expected_version_2, NULL) < 0) |
| 1119 | return afs_protocol_error(call, -EBADMSG); | 1131 | return afs_protocol_error(call, -EBADMSG); |
| 1120 | /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ | 1132 | /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ |
| 1121 | 1133 | ||
| @@ -1219,8 +1231,8 @@ static int afs_deliver_fs_store_data(struct afs_call *call) | |||
| 1219 | 1231 | ||
| 1220 | /* unmarshall the reply once we've received all of it */ | 1232 | /* unmarshall the reply once we've received all of it */ |
| 1221 | bp = call->buffer; | 1233 | bp = call->buffer; |
| 1222 | if (xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode, | 1234 | if (afs_decode_status(call, &bp, &vnode->status, vnode, |
| 1223 | &call->expected_version, NULL) < 0) | 1235 | &call->expected_version, NULL) < 0) |
| 1224 | return afs_protocol_error(call, -EBADMSG); | 1236 | return afs_protocol_error(call, -EBADMSG); |
| 1225 | /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ | 1237 | /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ |
| 1226 | 1238 | ||
| @@ -1395,8 +1407,8 @@ static int afs_deliver_fs_store_status(struct afs_call *call) | |||
| 1395 | 1407 | ||
| 1396 | /* unmarshall the reply once we've received all of it */ | 1408 | /* unmarshall the reply once we've received all of it */ |
| 1397 | bp = call->buffer; | 1409 | bp = call->buffer; |
| 1398 | if (xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode, | 1410 | if (afs_decode_status(call, &bp, &vnode->status, vnode, |
| 1399 | &call->expected_version, NULL) < 0) | 1411 | &call->expected_version, NULL) < 0) |
| 1400 | return afs_protocol_error(call, -EBADMSG); | 1412 | return afs_protocol_error(call, -EBADMSG); |
| 1401 | /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ | 1413 | /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ |
| 1402 | 1414 | ||
| @@ -2097,8 +2109,8 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call) | |||
| 2097 | 2109 | ||
| 2098 | /* unmarshall the reply once we've received all of it */ | 2110 | /* unmarshall the reply once we've received all of it */ |
| 2099 | bp = call->buffer; | 2111 | bp = call->buffer; |
| 2100 | xdr_decode_AFSFetchStatus(call, &bp, status, vnode, | 2112 | afs_decode_status(call, &bp, status, vnode, |
| 2101 | &call->expected_version, NULL); | 2113 | &call->expected_version, NULL); |
| 2102 | callback[call->count].version = ntohl(bp[0]); | 2114 | callback[call->count].version = ntohl(bp[0]); |
| 2103 | callback[call->count].expiry = ntohl(bp[1]); | 2115 | callback[call->count].expiry = ntohl(bp[1]); |
| 2104 | callback[call->count].type = ntohl(bp[2]); | 2116 | callback[call->count].type = ntohl(bp[2]); |
| @@ -2209,9 +2221,9 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) | |||
| 2209 | 2221 | ||
| 2210 | bp = call->buffer; | 2222 | bp = call->buffer; |
| 2211 | statuses = call->reply[1]; | 2223 | statuses = call->reply[1]; |
| 2212 | if (xdr_decode_AFSFetchStatus(call, &bp, &statuses[call->count], | 2224 | if (afs_decode_status(call, &bp, &statuses[call->count], |
| 2213 | call->count == 0 ? vnode : NULL, | 2225 | call->count == 0 ? vnode : NULL, |
| 2214 | NULL, NULL) < 0) | 2226 | NULL, NULL) < 0) |
| 2215 | return afs_protocol_error(call, -EBADMSG); | 2227 | return afs_protocol_error(call, -EBADMSG); |
| 2216 | 2228 | ||
| 2217 | call->count++; | 2229 | call->count++; |
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index e3f8a46663db..9778df135717 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
| @@ -22,6 +22,8 @@ | |||
| 22 | #include <linux/backing-dev.h> | 22 | #include <linux/backing-dev.h> |
| 23 | #include <linux/uuid.h> | 23 | #include <linux/uuid.h> |
| 24 | #include <net/net_namespace.h> | 24 | #include <net/net_namespace.h> |
| 25 | #include <net/netns/generic.h> | ||
| 26 | #include <net/sock.h> | ||
| 25 | #include <net/af_rxrpc.h> | 27 | #include <net/af_rxrpc.h> |
| 26 | 28 | ||
| 27 | #include "afs.h" | 29 | #include "afs.h" |
| @@ -40,7 +42,8 @@ struct afs_mount_params { | |||
| 40 | afs_voltype_t type; /* type of volume requested */ | 42 | afs_voltype_t type; /* type of volume requested */ |
| 41 | int volnamesz; /* size of volume name */ | 43 | int volnamesz; /* size of volume name */ |
| 42 | const char *volname; /* name of volume to mount */ | 44 | const char *volname; /* name of volume to mount */ |
| 43 | struct afs_net *net; /* Network namespace in effect */ | 45 | struct net *net_ns; /* Network namespace in effect */ |
| 46 | struct afs_net *net; /* the AFS net namespace stuff */ | ||
| 44 | struct afs_cell *cell; /* cell in which to find volume */ | 47 | struct afs_cell *cell; /* cell in which to find volume */ |
| 45 | struct afs_volume *volume; /* volume record */ | 48 | struct afs_volume *volume; /* volume record */ |
| 46 | struct key *key; /* key to use for secure mounting */ | 49 | struct key *key; /* key to use for secure mounting */ |
| @@ -189,7 +192,7 @@ struct afs_read { | |||
| 189 | * - there's one superblock per volume | 192 | * - there's one superblock per volume |
| 190 | */ | 193 | */ |
| 191 | struct afs_super_info { | 194 | struct afs_super_info { |
| 192 | struct afs_net *net; /* Network namespace */ | 195 | struct net *net_ns; /* Network namespace */ |
| 193 | struct afs_cell *cell; /* The cell in which the volume resides */ | 196 | struct afs_cell *cell; /* The cell in which the volume resides */ |
| 194 | struct afs_volume *volume; /* volume record */ | 197 | struct afs_volume *volume; /* volume record */ |
| 195 | bool dyn_root; /* True if dynamic root */ | 198 | bool dyn_root; /* True if dynamic root */ |
| @@ -210,7 +213,6 @@ struct afs_sysnames { | |||
| 210 | char *subs[AFS_NR_SYSNAME]; | 213 | char *subs[AFS_NR_SYSNAME]; |
| 211 | refcount_t usage; | 214 | refcount_t usage; |
| 212 | unsigned short nr; | 215 | unsigned short nr; |
| 213 | short error; | ||
| 214 | char blank[1]; | 216 | char blank[1]; |
| 215 | }; | 217 | }; |
| 216 | 218 | ||
| @@ -218,6 +220,7 @@ struct afs_sysnames { | |||
| 218 | * AFS network namespace record. | 220 | * AFS network namespace record. |
| 219 | */ | 221 | */ |
| 220 | struct afs_net { | 222 | struct afs_net { |
| 223 | struct net *net; /* Backpointer to the owning net namespace */ | ||
| 221 | struct afs_uuid uuid; | 224 | struct afs_uuid uuid; |
| 222 | bool live; /* F if this namespace is being removed */ | 225 | bool live; /* F if this namespace is being removed */ |
| 223 | 226 | ||
| @@ -231,13 +234,13 @@ struct afs_net { | |||
| 231 | 234 | ||
| 232 | /* Cell database */ | 235 | /* Cell database */ |
| 233 | struct rb_root cells; | 236 | struct rb_root cells; |
| 234 | struct afs_cell *ws_cell; | 237 | struct afs_cell __rcu *ws_cell; |
| 235 | struct work_struct cells_manager; | 238 | struct work_struct cells_manager; |
| 236 | struct timer_list cells_timer; | 239 | struct timer_list cells_timer; |
| 237 | atomic_t cells_outstanding; | 240 | atomic_t cells_outstanding; |
| 238 | seqlock_t cells_lock; | 241 | seqlock_t cells_lock; |
| 239 | 242 | ||
| 240 | spinlock_t proc_cells_lock; | 243 | struct mutex proc_cells_lock; |
| 241 | struct list_head proc_cells; | 244 | struct list_head proc_cells; |
| 242 | 245 | ||
| 243 | /* Known servers. Theoretically each fileserver can only be in one | 246 | /* Known servers. Theoretically each fileserver can only be in one |
| @@ -261,6 +264,7 @@ struct afs_net { | |||
| 261 | struct mutex lock_manager_mutex; | 264 | struct mutex lock_manager_mutex; |
| 262 | 265 | ||
| 263 | /* Misc */ | 266 | /* Misc */ |
| 267 | struct super_block *dynroot_sb; /* Dynamic root mount superblock */ | ||
| 264 | struct proc_dir_entry *proc_afs; /* /proc/net/afs directory */ | 268 | struct proc_dir_entry *proc_afs; /* /proc/net/afs directory */ |
| 265 | struct afs_sysnames *sysnames; | 269 | struct afs_sysnames *sysnames; |
| 266 | rwlock_t sysnames_lock; | 270 | rwlock_t sysnames_lock; |
| @@ -280,7 +284,6 @@ struct afs_net { | |||
| 280 | }; | 284 | }; |
| 281 | 285 | ||
| 282 | extern const char afs_init_sysname[]; | 286 | extern const char afs_init_sysname[]; |
| 283 | extern struct afs_net __afs_net;// Dummy AFS network namespace; TODO: replace with real netns | ||
| 284 | 287 | ||
| 285 | enum afs_cell_state { | 288 | enum afs_cell_state { |
| 286 | AFS_CELL_UNSET, | 289 | AFS_CELL_UNSET, |
| @@ -404,16 +407,27 @@ struct afs_server { | |||
| 404 | rwlock_t fs_lock; /* access lock */ | 407 | rwlock_t fs_lock; /* access lock */ |
| 405 | 408 | ||
| 406 | /* callback promise management */ | 409 | /* callback promise management */ |
| 407 | struct list_head cb_interests; /* List of superblocks using this server */ | 410 | struct hlist_head cb_volumes; /* List of volume interests on this server */ |
| 408 | unsigned cb_s_break; /* Break-everything counter. */ | 411 | unsigned cb_s_break; /* Break-everything counter. */ |
| 409 | rwlock_t cb_break_lock; /* Volume finding lock */ | 412 | rwlock_t cb_break_lock; /* Volume finding lock */ |
| 410 | }; | 413 | }; |
| 411 | 414 | ||
| 412 | /* | 415 | /* |
| 416 | * Volume collation in the server's callback interest list. | ||
| 417 | */ | ||
| 418 | struct afs_vol_interest { | ||
| 419 | struct hlist_node srv_link; /* Link in server->cb_volumes */ | ||
| 420 | struct hlist_head cb_interests; /* List of callback interests on the server */ | ||
| 421 | afs_volid_t vid; /* Volume ID to match */ | ||
| 422 | unsigned int usage; | ||
| 423 | }; | ||
| 424 | |||
| 425 | /* | ||
| 413 | * Interest by a superblock on a server. | 426 | * Interest by a superblock on a server. |
| 414 | */ | 427 | */ |
| 415 | struct afs_cb_interest { | 428 | struct afs_cb_interest { |
| 416 | struct list_head cb_link; /* Link in server->cb_interests */ | 429 | struct hlist_node cb_vlink; /* Link in vol_interest->cb_interests */ |
| 430 | struct afs_vol_interest *vol_interest; | ||
| 417 | struct afs_server *server; /* Server on which this interest resides */ | 431 | struct afs_server *server; /* Server on which this interest resides */ |
| 418 | struct super_block *sb; /* Superblock on which inodes reside */ | 432 | struct super_block *sb; /* Superblock on which inodes reside */ |
| 419 | afs_volid_t vid; /* Volume ID to match */ | 433 | afs_volid_t vid; /* Volume ID to match */ |
| @@ -720,6 +734,10 @@ extern const struct inode_operations afs_dynroot_inode_operations; | |||
| 720 | extern const struct dentry_operations afs_dynroot_dentry_operations; | 734 | extern const struct dentry_operations afs_dynroot_dentry_operations; |
| 721 | 735 | ||
| 722 | extern struct inode *afs_try_auto_mntpt(struct dentry *, struct inode *); | 736 | extern struct inode *afs_try_auto_mntpt(struct dentry *, struct inode *); |
| 737 | extern int afs_dynroot_mkdir(struct afs_net *, struct afs_cell *); | ||
| 738 | extern void afs_dynroot_rmdir(struct afs_net *, struct afs_cell *); | ||
| 739 | extern int afs_dynroot_populate(struct super_block *); | ||
| 740 | extern void afs_dynroot_depopulate(struct super_block *); | ||
| 723 | 741 | ||
| 724 | /* | 742 | /* |
| 725 | * file.c | 743 | * file.c |
| @@ -806,34 +824,36 @@ extern int afs_drop_inode(struct inode *); | |||
| 806 | * main.c | 824 | * main.c |
| 807 | */ | 825 | */ |
| 808 | extern struct workqueue_struct *afs_wq; | 826 | extern struct workqueue_struct *afs_wq; |
| 827 | extern int afs_net_id; | ||
| 809 | 828 | ||
| 810 | static inline struct afs_net *afs_d2net(struct dentry *dentry) | 829 | static inline struct afs_net *afs_net(struct net *net) |
| 811 | { | 830 | { |
| 812 | return &__afs_net; | 831 | return net_generic(net, afs_net_id); |
| 813 | } | 832 | } |
| 814 | 833 | ||
| 815 | static inline struct afs_net *afs_i2net(struct inode *inode) | 834 | static inline struct afs_net *afs_sb2net(struct super_block *sb) |
| 816 | { | 835 | { |
| 817 | return &__afs_net; | 836 | return afs_net(AFS_FS_S(sb)->net_ns); |
| 818 | } | 837 | } |
| 819 | 838 | ||
| 820 | static inline struct afs_net *afs_v2net(struct afs_vnode *vnode) | 839 | static inline struct afs_net *afs_d2net(struct dentry *dentry) |
| 821 | { | 840 | { |
| 822 | return &__afs_net; | 841 | return afs_sb2net(dentry->d_sb); |
| 823 | } | 842 | } |
| 824 | 843 | ||
| 825 | static inline struct afs_net *afs_sock2net(struct sock *sk) | 844 | static inline struct afs_net *afs_i2net(struct inode *inode) |
| 826 | { | 845 | { |
| 827 | return &__afs_net; | 846 | return afs_sb2net(inode->i_sb); |
| 828 | } | 847 | } |
| 829 | 848 | ||
| 830 | static inline struct afs_net *afs_get_net(struct afs_net *net) | 849 | static inline struct afs_net *afs_v2net(struct afs_vnode *vnode) |
| 831 | { | 850 | { |
| 832 | return net; | 851 | return afs_i2net(&vnode->vfs_inode); |
| 833 | } | 852 | } |
| 834 | 853 | ||
| 835 | static inline void afs_put_net(struct afs_net *net) | 854 | static inline struct afs_net *afs_sock2net(struct sock *sk) |
| 836 | { | 855 | { |
| 856 | return net_generic(sock_net(sk), afs_net_id); | ||
| 837 | } | 857 | } |
| 838 | 858 | ||
| 839 | static inline void __afs_stat(atomic_t *s) | 859 | static inline void __afs_stat(atomic_t *s) |
| @@ -861,16 +881,25 @@ extern void afs_mntpt_kill_timer(void); | |||
| 861 | /* | 881 | /* |
| 862 | * netdevices.c | 882 | * netdevices.c |
| 863 | */ | 883 | */ |
| 864 | extern int afs_get_ipv4_interfaces(struct afs_interface *, size_t, bool); | 884 | extern int afs_get_ipv4_interfaces(struct afs_net *, struct afs_interface *, |
| 885 | size_t, bool); | ||
| 865 | 886 | ||
| 866 | /* | 887 | /* |
| 867 | * proc.c | 888 | * proc.c |
| 868 | */ | 889 | */ |
| 890 | #ifdef CONFIG_PROC_FS | ||
| 869 | extern int __net_init afs_proc_init(struct afs_net *); | 891 | extern int __net_init afs_proc_init(struct afs_net *); |
| 870 | extern void __net_exit afs_proc_cleanup(struct afs_net *); | 892 | extern void __net_exit afs_proc_cleanup(struct afs_net *); |
| 871 | extern int afs_proc_cell_setup(struct afs_net *, struct afs_cell *); | 893 | extern int afs_proc_cell_setup(struct afs_cell *); |
| 872 | extern void afs_proc_cell_remove(struct afs_net *, struct afs_cell *); | 894 | extern void afs_proc_cell_remove(struct afs_cell *); |
| 873 | extern void afs_put_sysnames(struct afs_sysnames *); | 895 | extern void afs_put_sysnames(struct afs_sysnames *); |
| 896 | #else | ||
| 897 | static inline int afs_proc_init(struct afs_net *net) { return 0; } | ||
| 898 | static inline void afs_proc_cleanup(struct afs_net *net) {} | ||
| 899 | static inline int afs_proc_cell_setup(struct afs_cell *cell) { return 0; } | ||
| 900 | static inline void afs_proc_cell_remove(struct afs_cell *cell) {} | ||
| 901 | static inline void afs_put_sysnames(struct afs_sysnames *sysnames) {} | ||
| 902 | #endif | ||
| 874 | 903 | ||
| 875 | /* | 904 | /* |
| 876 | * rotate.c | 905 | * rotate.c |
| @@ -1002,7 +1031,7 @@ extern bool afs_annotate_server_list(struct afs_server_list *, struct afs_server | |||
| 1002 | * super.c | 1031 | * super.c |
| 1003 | */ | 1032 | */ |
| 1004 | extern int __init afs_fs_init(void); | 1033 | extern int __init afs_fs_init(void); |
| 1005 | extern void __exit afs_fs_exit(void); | 1034 | extern void afs_fs_exit(void); |
| 1006 | 1035 | ||
| 1007 | /* | 1036 | /* |
| 1008 | * vlclient.c | 1037 | * vlclient.c |
diff --git a/fs/afs/main.c b/fs/afs/main.c index d7560168b3bf..e84fe822a960 100644 --- a/fs/afs/main.c +++ b/fs/afs/main.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/completion.h> | 15 | #include <linux/completion.h> |
| 16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
| 17 | #include <linux/random.h> | 17 | #include <linux/random.h> |
| 18 | #include <linux/proc_fs.h> | ||
| 18 | #define CREATE_TRACE_POINTS | 19 | #define CREATE_TRACE_POINTS |
| 19 | #include "internal.h" | 20 | #include "internal.h" |
| 20 | 21 | ||
| @@ -32,7 +33,7 @@ module_param(rootcell, charp, 0); | |||
| 32 | MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); | 33 | MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); |
| 33 | 34 | ||
| 34 | struct workqueue_struct *afs_wq; | 35 | struct workqueue_struct *afs_wq; |
| 35 | struct afs_net __afs_net; | 36 | static struct proc_dir_entry *afs_proc_symlink; |
| 36 | 37 | ||
| 37 | #if defined(CONFIG_ALPHA) | 38 | #if defined(CONFIG_ALPHA) |
| 38 | const char afs_init_sysname[] = "alpha_linux26"; | 39 | const char afs_init_sysname[] = "alpha_linux26"; |
| @@ -67,11 +68,13 @@ const char afs_init_sysname[] = "unknown_linux26"; | |||
| 67 | /* | 68 | /* |
| 68 | * Initialise an AFS network namespace record. | 69 | * Initialise an AFS network namespace record. |
| 69 | */ | 70 | */ |
| 70 | static int __net_init afs_net_init(struct afs_net *net) | 71 | static int __net_init afs_net_init(struct net *net_ns) |
| 71 | { | 72 | { |
| 72 | struct afs_sysnames *sysnames; | 73 | struct afs_sysnames *sysnames; |
| 74 | struct afs_net *net = afs_net(net_ns); | ||
| 73 | int ret; | 75 | int ret; |
| 74 | 76 | ||
| 77 | net->net = net_ns; | ||
| 75 | net->live = true; | 78 | net->live = true; |
| 76 | generate_random_uuid((unsigned char *)&net->uuid); | 79 | generate_random_uuid((unsigned char *)&net->uuid); |
| 77 | 80 | ||
| @@ -83,7 +86,7 @@ static int __net_init afs_net_init(struct afs_net *net) | |||
| 83 | INIT_WORK(&net->cells_manager, afs_manage_cells); | 86 | INIT_WORK(&net->cells_manager, afs_manage_cells); |
| 84 | timer_setup(&net->cells_timer, afs_cells_timer, 0); | 87 | timer_setup(&net->cells_timer, afs_cells_timer, 0); |
| 85 | 88 | ||
| 86 | spin_lock_init(&net->proc_cells_lock); | 89 | mutex_init(&net->proc_cells_lock); |
| 87 | INIT_LIST_HEAD(&net->proc_cells); | 90 | INIT_LIST_HEAD(&net->proc_cells); |
| 88 | 91 | ||
| 89 | seqlock_init(&net->fs_lock); | 92 | seqlock_init(&net->fs_lock); |
| @@ -142,8 +145,10 @@ error_sysnames: | |||
| 142 | /* | 145 | /* |
| 143 | * Clean up and destroy an AFS network namespace record. | 146 | * Clean up and destroy an AFS network namespace record. |
| 144 | */ | 147 | */ |
| 145 | static void __net_exit afs_net_exit(struct afs_net *net) | 148 | static void __net_exit afs_net_exit(struct net *net_ns) |
| 146 | { | 149 | { |
| 150 | struct afs_net *net = afs_net(net_ns); | ||
| 151 | |||
| 147 | net->live = false; | 152 | net->live = false; |
| 148 | afs_cell_purge(net); | 153 | afs_cell_purge(net); |
| 149 | afs_purge_servers(net); | 154 | afs_purge_servers(net); |
| @@ -152,6 +157,13 @@ static void __net_exit afs_net_exit(struct afs_net *net) | |||
| 152 | afs_put_sysnames(net->sysnames); | 157 | afs_put_sysnames(net->sysnames); |
| 153 | } | 158 | } |
| 154 | 159 | ||
| 160 | static struct pernet_operations afs_net_ops = { | ||
| 161 | .init = afs_net_init, | ||
| 162 | .exit = afs_net_exit, | ||
| 163 | .id = &afs_net_id, | ||
| 164 | .size = sizeof(struct afs_net), | ||
| 165 | }; | ||
| 166 | |||
| 155 | /* | 167 | /* |
| 156 | * initialise the AFS client FS module | 168 | * initialise the AFS client FS module |
| 157 | */ | 169 | */ |
| @@ -178,7 +190,7 @@ static int __init afs_init(void) | |||
| 178 | goto error_cache; | 190 | goto error_cache; |
| 179 | #endif | 191 | #endif |
| 180 | 192 | ||
| 181 | ret = afs_net_init(&__afs_net); | 193 | ret = register_pernet_subsys(&afs_net_ops); |
| 182 | if (ret < 0) | 194 | if (ret < 0) |
| 183 | goto error_net; | 195 | goto error_net; |
| 184 | 196 | ||
| @@ -187,10 +199,18 @@ static int __init afs_init(void) | |||
| 187 | if (ret < 0) | 199 | if (ret < 0) |
| 188 | goto error_fs; | 200 | goto error_fs; |
| 189 | 201 | ||
| 202 | afs_proc_symlink = proc_symlink("fs/afs", NULL, "../self/net/afs"); | ||
| 203 | if (IS_ERR(afs_proc_symlink)) { | ||
| 204 | ret = PTR_ERR(afs_proc_symlink); | ||
| 205 | goto error_proc; | ||
| 206 | } | ||
| 207 | |||
| 190 | return ret; | 208 | return ret; |
| 191 | 209 | ||
| 210 | error_proc: | ||
| 211 | afs_fs_exit(); | ||
| 192 | error_fs: | 212 | error_fs: |
| 193 | afs_net_exit(&__afs_net); | 213 | unregister_pernet_subsys(&afs_net_ops); |
| 194 | error_net: | 214 | error_net: |
| 195 | #ifdef CONFIG_AFS_FSCACHE | 215 | #ifdef CONFIG_AFS_FSCACHE |
| 196 | fscache_unregister_netfs(&afs_cache_netfs); | 216 | fscache_unregister_netfs(&afs_cache_netfs); |
| @@ -219,8 +239,9 @@ static void __exit afs_exit(void) | |||
| 219 | { | 239 | { |
| 220 | printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n"); | 240 | printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n"); |
| 221 | 241 | ||
| 242 | proc_remove(afs_proc_symlink); | ||
| 222 | afs_fs_exit(); | 243 | afs_fs_exit(); |
| 223 | afs_net_exit(&__afs_net); | 244 | unregister_pernet_subsys(&afs_net_ops); |
| 224 | #ifdef CONFIG_AFS_FSCACHE | 245 | #ifdef CONFIG_AFS_FSCACHE |
| 225 | fscache_unregister_netfs(&afs_cache_netfs); | 246 | fscache_unregister_netfs(&afs_cache_netfs); |
| 226 | #endif | 247 | #endif |
diff --git a/fs/afs/netdevices.c b/fs/afs/netdevices.c index 50bd5bb1c4fb..2a009d1939d7 100644 --- a/fs/afs/netdevices.c +++ b/fs/afs/netdevices.c | |||
| @@ -17,8 +17,8 @@ | |||
| 17 | * - maxbufs must be at least 1 | 17 | * - maxbufs must be at least 1 |
| 18 | * - returns the number of interface records in the buffer | 18 | * - returns the number of interface records in the buffer |
| 19 | */ | 19 | */ |
| 20 | int afs_get_ipv4_interfaces(struct afs_interface *bufs, size_t maxbufs, | 20 | int afs_get_ipv4_interfaces(struct afs_net *net, struct afs_interface *bufs, |
| 21 | bool wantloopback) | 21 | size_t maxbufs, bool wantloopback) |
| 22 | { | 22 | { |
| 23 | struct net_device *dev; | 23 | struct net_device *dev; |
| 24 | struct in_device *idev; | 24 | struct in_device *idev; |
| @@ -27,7 +27,7 @@ int afs_get_ipv4_interfaces(struct afs_interface *bufs, size_t maxbufs, | |||
| 27 | ASSERT(maxbufs > 0); | 27 | ASSERT(maxbufs > 0); |
| 28 | 28 | ||
| 29 | rtnl_lock(); | 29 | rtnl_lock(); |
| 30 | for_each_netdev(&init_net, dev) { | 30 | for_each_netdev(net->net, dev) { |
| 31 | if (dev->type == ARPHRD_LOOPBACK && !wantloopback) | 31 | if (dev->type == ARPHRD_LOOPBACK && !wantloopback) |
| 32 | continue; | 32 | continue; |
| 33 | idev = __in_dev_get_rtnl(dev); | 33 | idev = __in_dev_get_rtnl(dev); |
diff --git a/fs/afs/proc.c b/fs/afs/proc.c index 3aad32762989..0c3285c8db95 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c | |||
| @@ -17,240 +17,78 @@ | |||
| 17 | #include <linux/uaccess.h> | 17 | #include <linux/uaccess.h> |
| 18 | #include "internal.h" | 18 | #include "internal.h" |
| 19 | 19 | ||
| 20 | static inline struct afs_net *afs_proc2net(struct file *f) | 20 | static inline struct afs_net *afs_seq2net(struct seq_file *m) |
| 21 | { | 21 | { |
| 22 | return &__afs_net; | 22 | return afs_net(seq_file_net(m)); |
| 23 | } | 23 | } |
| 24 | 24 | ||
| 25 | static inline struct afs_net *afs_seq2net(struct seq_file *m) | 25 | static inline struct afs_net *afs_seq2net_single(struct seq_file *m) |
| 26 | { | 26 | { |
| 27 | return &__afs_net; // TODO: use seq_file_net(m) | 27 | return afs_net(seq_file_single_net(m)); |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | static int afs_proc_cells_open(struct inode *inode, struct file *file); | ||
| 31 | static void *afs_proc_cells_start(struct seq_file *p, loff_t *pos); | ||
| 32 | static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos); | ||
| 33 | static void afs_proc_cells_stop(struct seq_file *p, void *v); | ||
| 34 | static int afs_proc_cells_show(struct seq_file *m, void *v); | ||
| 35 | static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf, | ||
| 36 | size_t size, loff_t *_pos); | ||
| 37 | |||
| 38 | static const struct seq_operations afs_proc_cells_ops = { | ||
| 39 | .start = afs_proc_cells_start, | ||
| 40 | .next = afs_proc_cells_next, | ||
| 41 | .stop = afs_proc_cells_stop, | ||
| 42 | .show = afs_proc_cells_show, | ||
| 43 | }; | ||
| 44 | |||
| 45 | static const struct file_operations afs_proc_cells_fops = { | ||
| 46 | .open = afs_proc_cells_open, | ||
| 47 | .read = seq_read, | ||
| 48 | .write = afs_proc_cells_write, | ||
| 49 | .llseek = seq_lseek, | ||
| 50 | .release = seq_release, | ||
| 51 | }; | ||
| 52 | |||
| 53 | static ssize_t afs_proc_rootcell_read(struct file *file, char __user *buf, | ||
| 54 | size_t size, loff_t *_pos); | ||
| 55 | static ssize_t afs_proc_rootcell_write(struct file *file, | ||
| 56 | const char __user *buf, | ||
| 57 | size_t size, loff_t *_pos); | ||
| 58 | |||
| 59 | static const struct file_operations afs_proc_rootcell_fops = { | ||
| 60 | .read = afs_proc_rootcell_read, | ||
| 61 | .write = afs_proc_rootcell_write, | ||
| 62 | .llseek = no_llseek, | ||
| 63 | }; | ||
| 64 | |||
| 65 | static void *afs_proc_cell_volumes_start(struct seq_file *p, loff_t *pos); | ||
| 66 | static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v, | ||
| 67 | loff_t *pos); | ||
| 68 | static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v); | ||
| 69 | static int afs_proc_cell_volumes_show(struct seq_file *m, void *v); | ||
| 70 | |||
| 71 | static const struct seq_operations afs_proc_cell_volumes_ops = { | ||
| 72 | .start = afs_proc_cell_volumes_start, | ||
| 73 | .next = afs_proc_cell_volumes_next, | ||
| 74 | .stop = afs_proc_cell_volumes_stop, | ||
| 75 | .show = afs_proc_cell_volumes_show, | ||
| 76 | }; | ||
| 77 | |||
| 78 | static void *afs_proc_cell_vlservers_start(struct seq_file *p, loff_t *pos); | ||
| 79 | static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, | ||
| 80 | loff_t *pos); | ||
| 81 | static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v); | ||
| 82 | static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v); | ||
| 83 | |||
| 84 | static const struct seq_operations afs_proc_cell_vlservers_ops = { | ||
| 85 | .start = afs_proc_cell_vlservers_start, | ||
| 86 | .next = afs_proc_cell_vlservers_next, | ||
| 87 | .stop = afs_proc_cell_vlservers_stop, | ||
| 88 | .show = afs_proc_cell_vlservers_show, | ||
| 89 | }; | ||
| 90 | |||
| 91 | static void *afs_proc_servers_start(struct seq_file *p, loff_t *pos); | ||
| 92 | static void *afs_proc_servers_next(struct seq_file *p, void *v, | ||
| 93 | loff_t *pos); | ||
| 94 | static void afs_proc_servers_stop(struct seq_file *p, void *v); | ||
| 95 | static int afs_proc_servers_show(struct seq_file *m, void *v); | ||
| 96 | |||
| 97 | static const struct seq_operations afs_proc_servers_ops = { | ||
| 98 | .start = afs_proc_servers_start, | ||
| 99 | .next = afs_proc_servers_next, | ||
| 100 | .stop = afs_proc_servers_stop, | ||
| 101 | .show = afs_proc_servers_show, | ||
| 102 | }; | ||
| 103 | |||
| 104 | static int afs_proc_sysname_open(struct inode *inode, struct file *file); | ||
| 105 | static int afs_proc_sysname_release(struct inode *inode, struct file *file); | ||
| 106 | static void *afs_proc_sysname_start(struct seq_file *p, loff_t *pos); | ||
| 107 | static void *afs_proc_sysname_next(struct seq_file *p, void *v, | ||
| 108 | loff_t *pos); | ||
| 109 | static void afs_proc_sysname_stop(struct seq_file *p, void *v); | ||
| 110 | static int afs_proc_sysname_show(struct seq_file *m, void *v); | ||
| 111 | static ssize_t afs_proc_sysname_write(struct file *file, | ||
| 112 | const char __user *buf, | ||
| 113 | size_t size, loff_t *_pos); | ||
| 114 | |||
| 115 | static const struct seq_operations afs_proc_sysname_ops = { | ||
| 116 | .start = afs_proc_sysname_start, | ||
| 117 | .next = afs_proc_sysname_next, | ||
| 118 | .stop = afs_proc_sysname_stop, | ||
| 119 | .show = afs_proc_sysname_show, | ||
| 120 | }; | ||
| 121 | |||
| 122 | static const struct file_operations afs_proc_sysname_fops = { | ||
| 123 | .open = afs_proc_sysname_open, | ||
| 124 | .read = seq_read, | ||
| 125 | .llseek = seq_lseek, | ||
| 126 | .release = afs_proc_sysname_release, | ||
| 127 | .write = afs_proc_sysname_write, | ||
| 128 | }; | ||
| 129 | |||
| 130 | static int afs_proc_stats_show(struct seq_file *m, void *v); | ||
| 131 | |||
| 132 | /* | 30 | /* |
| 133 | * initialise the /proc/fs/afs/ directory | 31 | * Display the list of cells known to the namespace. |
| 134 | */ | 32 | */ |
| 135 | int afs_proc_init(struct afs_net *net) | 33 | static int afs_proc_cells_show(struct seq_file *m, void *v) |
| 136 | { | 34 | { |
| 137 | _enter(""); | 35 | struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link); |
| 138 | 36 | struct afs_net *net = afs_seq2net(m); | |
| 139 | net->proc_afs = proc_mkdir("fs/afs", NULL); | ||
| 140 | if (!net->proc_afs) | ||
| 141 | goto error_dir; | ||
| 142 | 37 | ||
| 143 | if (!proc_create("cells", 0644, net->proc_afs, &afs_proc_cells_fops) || | 38 | if (v == &net->proc_cells) { |
| 144 | !proc_create("rootcell", 0644, net->proc_afs, &afs_proc_rootcell_fops) || | 39 | /* display header on line 1 */ |
| 145 | !proc_create_seq("servers", 0644, net->proc_afs, &afs_proc_servers_ops) || | 40 | seq_puts(m, "USE NAME\n"); |
| 146 | !proc_create_single("stats", 0644, net->proc_afs, afs_proc_stats_show) || | 41 | return 0; |
| 147 | !proc_create("sysname", 0644, net->proc_afs, &afs_proc_sysname_fops)) | 42 | } |
| 148 | goto error_tree; | ||
| 149 | 43 | ||
| 150 | _leave(" = 0"); | 44 | /* display one cell per line on subsequent lines */ |
| 45 | seq_printf(m, "%3u %s\n", atomic_read(&cell->usage), cell->name); | ||
| 151 | return 0; | 46 | return 0; |
| 152 | |||
| 153 | error_tree: | ||
| 154 | proc_remove(net->proc_afs); | ||
| 155 | error_dir: | ||
| 156 | _leave(" = -ENOMEM"); | ||
| 157 | return -ENOMEM; | ||
| 158 | } | ||
| 159 | |||
| 160 | /* | ||
| 161 | * clean up the /proc/fs/afs/ directory | ||
| 162 | */ | ||
| 163 | void afs_proc_cleanup(struct afs_net *net) | ||
| 164 | { | ||
| 165 | proc_remove(net->proc_afs); | ||
| 166 | net->proc_afs = NULL; | ||
| 167 | } | ||
| 168 | |||
| 169 | /* | ||
| 170 | * open "/proc/fs/afs/cells" which provides a summary of extant cells | ||
| 171 | */ | ||
| 172 | static int afs_proc_cells_open(struct inode *inode, struct file *file) | ||
| 173 | { | ||
| 174 | return seq_open(file, &afs_proc_cells_ops); | ||
| 175 | } | 47 | } |
| 176 | 48 | ||
| 177 | /* | ||
| 178 | * set up the iterator to start reading from the cells list and return the | ||
| 179 | * first item | ||
| 180 | */ | ||
| 181 | static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos) | 49 | static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos) |
| 182 | __acquires(rcu) | 50 | __acquires(rcu) |
| 183 | { | 51 | { |
| 184 | struct afs_net *net = afs_seq2net(m); | ||
| 185 | |||
| 186 | rcu_read_lock(); | 52 | rcu_read_lock(); |
| 187 | return seq_list_start_head(&net->proc_cells, *_pos); | 53 | return seq_list_start_head(&afs_seq2net(m)->proc_cells, *_pos); |
| 188 | } | 54 | } |
| 189 | 55 | ||
| 190 | /* | ||
| 191 | * move to next cell in cells list | ||
| 192 | */ | ||
| 193 | static void *afs_proc_cells_next(struct seq_file *m, void *v, loff_t *pos) | 56 | static void *afs_proc_cells_next(struct seq_file *m, void *v, loff_t *pos) |
| 194 | { | 57 | { |
| 195 | struct afs_net *net = afs_seq2net(m); | 58 | return seq_list_next(v, &afs_seq2net(m)->proc_cells, pos); |
| 196 | |||
| 197 | return seq_list_next(v, &net->proc_cells, pos); | ||
| 198 | } | 59 | } |
| 199 | 60 | ||
| 200 | /* | ||
| 201 | * clean up after reading from the cells list | ||
| 202 | */ | ||
| 203 | static void afs_proc_cells_stop(struct seq_file *m, void *v) | 61 | static void afs_proc_cells_stop(struct seq_file *m, void *v) |
| 204 | __releases(rcu) | 62 | __releases(rcu) |
| 205 | { | 63 | { |
| 206 | rcu_read_unlock(); | 64 | rcu_read_unlock(); |
| 207 | } | 65 | } |
| 208 | 66 | ||
| 209 | /* | 67 | static const struct seq_operations afs_proc_cells_ops = { |
| 210 | * display a header line followed by a load of cell lines | 68 | .start = afs_proc_cells_start, |
| 211 | */ | 69 | .next = afs_proc_cells_next, |
| 212 | static int afs_proc_cells_show(struct seq_file *m, void *v) | 70 | .stop = afs_proc_cells_stop, |
| 213 | { | 71 | .show = afs_proc_cells_show, |
| 214 | struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link); | 72 | }; |
| 215 | struct afs_net *net = afs_seq2net(m); | ||
| 216 | |||
| 217 | if (v == &net->proc_cells) { | ||
| 218 | /* display header on line 1 */ | ||
| 219 | seq_puts(m, "USE NAME\n"); | ||
| 220 | return 0; | ||
| 221 | } | ||
| 222 | |||
| 223 | /* display one cell per line on subsequent lines */ | ||
| 224 | seq_printf(m, "%3u %s\n", atomic_read(&cell->usage), cell->name); | ||
| 225 | return 0; | ||
| 226 | } | ||
| 227 | 73 | ||
| 228 | /* | 74 | /* |
| 229 | * handle writes to /proc/fs/afs/cells | 75 | * handle writes to /proc/fs/afs/cells |
| 230 | * - to add cells: echo "add <cellname> <IP>[:<IP>][:<IP>]" | 76 | * - to add cells: echo "add <cellname> <IP>[:<IP>][:<IP>]" |
| 231 | */ | 77 | */ |
| 232 | static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf, | 78 | static int afs_proc_cells_write(struct file *file, char *buf, size_t size) |
| 233 | size_t size, loff_t *_pos) | ||
| 234 | { | 79 | { |
| 235 | struct afs_net *net = afs_proc2net(file); | 80 | struct seq_file *m = file->private_data; |
| 236 | char *kbuf, *name, *args; | 81 | struct afs_net *net = afs_seq2net(m); |
| 82 | char *name, *args; | ||
| 237 | int ret; | 83 | int ret; |
| 238 | 84 | ||
| 239 | /* start by dragging the command into memory */ | ||
| 240 | if (size <= 1 || size >= PAGE_SIZE) | ||
| 241 | return -EINVAL; | ||
| 242 | |||
| 243 | kbuf = memdup_user_nul(buf, size); | ||
| 244 | if (IS_ERR(kbuf)) | ||
| 245 | return PTR_ERR(kbuf); | ||
| 246 | |||
| 247 | /* trim to first NL */ | 85 | /* trim to first NL */ |
| 248 | name = memchr(kbuf, '\n', size); | 86 | name = memchr(buf, '\n', size); |
| 249 | if (name) | 87 | if (name) |
| 250 | *name = 0; | 88 | *name = 0; |
| 251 | 89 | ||
| 252 | /* split into command, name and argslist */ | 90 | /* split into command, name and argslist */ |
| 253 | name = strchr(kbuf, ' '); | 91 | name = strchr(buf, ' '); |
| 254 | if (!name) | 92 | if (!name) |
| 255 | goto inval; | 93 | goto inval; |
| 256 | do { | 94 | do { |
| @@ -269,9 +107,9 @@ static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf, | |||
| 269 | goto inval; | 107 | goto inval; |
| 270 | 108 | ||
| 271 | /* determine command to perform */ | 109 | /* determine command to perform */ |
| 272 | _debug("cmd=%s name=%s args=%s", kbuf, name, args); | 110 | _debug("cmd=%s name=%s args=%s", buf, name, args); |
| 273 | 111 | ||
| 274 | if (strcmp(kbuf, "add") == 0) { | 112 | if (strcmp(buf, "add") == 0) { |
| 275 | struct afs_cell *cell; | 113 | struct afs_cell *cell; |
| 276 | 114 | ||
| 277 | cell = afs_lookup_cell(net, name, strlen(name), args, true); | 115 | cell = afs_lookup_cell(net, name, strlen(name), args, true); |
| @@ -287,10 +125,9 @@ static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf, | |||
| 287 | goto inval; | 125 | goto inval; |
| 288 | } | 126 | } |
| 289 | 127 | ||
| 290 | ret = size; | 128 | ret = 0; |
| 291 | 129 | ||
| 292 | done: | 130 | done: |
| 293 | kfree(kbuf); | ||
| 294 | _leave(" = %d", ret); | 131 | _leave(" = %d", ret); |
| 295 | return ret; | 132 | return ret; |
| 296 | 133 | ||
| @@ -300,200 +137,136 @@ inval: | |||
| 300 | goto done; | 137 | goto done; |
| 301 | } | 138 | } |
| 302 | 139 | ||
| 303 | static ssize_t afs_proc_rootcell_read(struct file *file, char __user *buf, | 140 | /* |
| 304 | size_t size, loff_t *_pos) | 141 | * Display the name of the current workstation cell. |
| 142 | */ | ||
| 143 | static int afs_proc_rootcell_show(struct seq_file *m, void *v) | ||
| 305 | { | 144 | { |
| 306 | struct afs_cell *cell; | 145 | struct afs_cell *cell; |
| 307 | struct afs_net *net = afs_proc2net(file); | 146 | struct afs_net *net; |
| 308 | unsigned int seq = 0; | 147 | |
| 309 | char name[AFS_MAXCELLNAME + 1]; | 148 | net = afs_seq2net_single(m); |
| 310 | int len; | 149 | if (rcu_access_pointer(net->ws_cell)) { |
| 311 | 150 | rcu_read_lock(); | |
| 312 | if (*_pos > 0) | 151 | cell = rcu_dereference(net->ws_cell); |
| 313 | return 0; | 152 | if (cell) |
| 314 | if (!net->ws_cell) | 153 | seq_printf(m, "%s\n", cell->name); |
| 315 | return 0; | 154 | rcu_read_unlock(); |
| 316 | 155 | } | |
| 317 | rcu_read_lock(); | 156 | return 0; |
| 318 | do { | ||
| 319 | read_seqbegin_or_lock(&net->cells_lock, &seq); | ||
| 320 | len = 0; | ||
| 321 | cell = rcu_dereference_raw(net->ws_cell); | ||
| 322 | if (cell) { | ||
| 323 | len = cell->name_len; | ||
| 324 | memcpy(name, cell->name, len); | ||
| 325 | } | ||
| 326 | } while (need_seqretry(&net->cells_lock, seq)); | ||
| 327 | done_seqretry(&net->cells_lock, seq); | ||
| 328 | rcu_read_unlock(); | ||
| 329 | |||
| 330 | if (!len) | ||
| 331 | return 0; | ||
| 332 | |||
| 333 | name[len++] = '\n'; | ||
| 334 | if (len > size) | ||
| 335 | len = size; | ||
| 336 | if (copy_to_user(buf, name, len) != 0) | ||
| 337 | return -EFAULT; | ||
| 338 | *_pos = 1; | ||
| 339 | return len; | ||
| 340 | } | 157 | } |
| 341 | 158 | ||
| 342 | /* | 159 | /* |
| 343 | * handle writes to /proc/fs/afs/rootcell | 160 | * Set the current workstation cell and optionally supply its list of volume |
| 344 | * - to initialize rootcell: echo "cell.name:192.168.231.14" | 161 | * location servers. |
| 162 | * | ||
| 163 | * echo "cell.name:192.168.231.14" >/proc/fs/afs/rootcell | ||
| 345 | */ | 164 | */ |
| 346 | static ssize_t afs_proc_rootcell_write(struct file *file, | 165 | static int afs_proc_rootcell_write(struct file *file, char *buf, size_t size) |
| 347 | const char __user *buf, | ||
| 348 | size_t size, loff_t *_pos) | ||
| 349 | { | 166 | { |
| 350 | struct afs_net *net = afs_proc2net(file); | 167 | struct seq_file *m = file->private_data; |
| 351 | char *kbuf, *s; | 168 | struct afs_net *net = afs_seq2net_single(m); |
| 169 | char *s; | ||
| 352 | int ret; | 170 | int ret; |
| 353 | 171 | ||
| 354 | /* start by dragging the command into memory */ | ||
| 355 | if (size <= 1 || size >= PAGE_SIZE) | ||
| 356 | return -EINVAL; | ||
| 357 | |||
| 358 | kbuf = memdup_user_nul(buf, size); | ||
| 359 | if (IS_ERR(kbuf)) | ||
| 360 | return PTR_ERR(kbuf); | ||
| 361 | |||
| 362 | ret = -EINVAL; | 172 | ret = -EINVAL; |
| 363 | if (kbuf[0] == '.') | 173 | if (buf[0] == '.') |
| 364 | goto out; | 174 | goto out; |
| 365 | if (memchr(kbuf, '/', size)) | 175 | if (memchr(buf, '/', size)) |
| 366 | goto out; | 176 | goto out; |
| 367 | 177 | ||
| 368 | /* trim to first NL */ | 178 | /* trim to first NL */ |
| 369 | s = memchr(kbuf, '\n', size); | 179 | s = memchr(buf, '\n', size); |
| 370 | if (s) | 180 | if (s) |
| 371 | *s = 0; | 181 | *s = 0; |
| 372 | 182 | ||
| 373 | /* determine command to perform */ | 183 | /* determine command to perform */ |
| 374 | _debug("rootcell=%s", kbuf); | 184 | _debug("rootcell=%s", buf); |
| 375 | 185 | ||
| 376 | ret = afs_cell_init(net, kbuf); | 186 | ret = afs_cell_init(net, buf); |
| 377 | if (ret >= 0) | ||
| 378 | ret = size; /* consume everything, always */ | ||
| 379 | 187 | ||
| 380 | out: | 188 | out: |
| 381 | kfree(kbuf); | ||
| 382 | _leave(" = %d", ret); | 189 | _leave(" = %d", ret); |
| 383 | return ret; | 190 | return ret; |
| 384 | } | 191 | } |
| 385 | 192 | ||
| 193 | static const char afs_vol_types[3][3] = { | ||
| 194 | [AFSVL_RWVOL] = "RW", | ||
| 195 | [AFSVL_ROVOL] = "RO", | ||
| 196 | [AFSVL_BACKVOL] = "BK", | ||
| 197 | }; | ||
| 198 | |||
| 386 | /* | 199 | /* |
| 387 | * initialise /proc/fs/afs/<cell>/ | 200 | * Display the list of volumes known to a cell. |
| 388 | */ | 201 | */ |
| 389 | int afs_proc_cell_setup(struct afs_net *net, struct afs_cell *cell) | 202 | static int afs_proc_cell_volumes_show(struct seq_file *m, void *v) |
| 390 | { | 203 | { |
| 391 | struct proc_dir_entry *dir; | 204 | struct afs_cell *cell = PDE_DATA(file_inode(m->file)); |
| 392 | 205 | struct afs_volume *vol = list_entry(v, struct afs_volume, proc_link); | |
| 393 | _enter("%p{%s},%p", cell, cell->name, net->proc_afs); | ||
| 394 | 206 | ||
| 395 | dir = proc_mkdir(cell->name, net->proc_afs); | 207 | /* Display header on line 1 */ |
| 396 | if (!dir) | 208 | if (v == &cell->proc_volumes) { |
| 397 | goto error_dir; | 209 | seq_puts(m, "USE VID TY\n"); |
| 210 | return 0; | ||
| 211 | } | ||
| 398 | 212 | ||
| 399 | if (!proc_create_seq_data("vlservers", 0, dir, | 213 | seq_printf(m, "%3d %08x %s\n", |
| 400 | &afs_proc_cell_vlservers_ops, cell)) | 214 | atomic_read(&vol->usage), vol->vid, |
| 401 | goto error_tree; | 215 | afs_vol_types[vol->type]); |
| 402 | if (!proc_create_seq_data("volumes", 0, dir, &afs_proc_cell_volumes_ops, | ||
| 403 | cell)) | ||
| 404 | goto error_tree; | ||
| 405 | 216 | ||
| 406 | _leave(" = 0"); | ||
| 407 | return 0; | 217 | return 0; |
| 408 | |||
| 409 | error_tree: | ||
| 410 | remove_proc_subtree(cell->name, net->proc_afs); | ||
| 411 | error_dir: | ||
| 412 | _leave(" = -ENOMEM"); | ||
| 413 | return -ENOMEM; | ||
| 414 | } | 218 | } |
| 415 | 219 | ||
| 416 | /* | ||
| 417 | * remove /proc/fs/afs/<cell>/ | ||
| 418 | */ | ||
| 419 | void afs_proc_cell_remove(struct afs_net *net, struct afs_cell *cell) | ||
| 420 | { | ||
| 421 | _enter(""); | ||
| 422 | |||
| 423 | remove_proc_subtree(cell->name, net->proc_afs); | ||
| 424 | |||
| 425 | _leave(""); | ||
| 426 | } | ||
| 427 | |||
| 428 | /* | ||
| 429 | * set up the iterator to start reading from the cells list and return the | ||
| 430 | * first item | ||
| 431 | */ | ||
| 432 | static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos) | 220 | static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos) |
| 433 | __acquires(cell->proc_lock) | 221 | __acquires(cell->proc_lock) |
| 434 | { | 222 | { |
| 435 | struct afs_cell *cell = PDE_DATA(file_inode(m->file)); | 223 | struct afs_cell *cell = PDE_DATA(file_inode(m->file)); |
| 436 | 224 | ||
| 437 | _enter("cell=%p pos=%Ld", cell, *_pos); | ||
| 438 | |||
| 439 | read_lock(&cell->proc_lock); | 225 | read_lock(&cell->proc_lock); |
| 440 | return seq_list_start_head(&cell->proc_volumes, *_pos); | 226 | return seq_list_start_head(&cell->proc_volumes, *_pos); |
| 441 | } | 227 | } |
| 442 | 228 | ||
| 443 | /* | 229 | static void *afs_proc_cell_volumes_next(struct seq_file *m, void *v, |
| 444 | * move to next cell in cells list | ||
| 445 | */ | ||
| 446 | static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v, | ||
| 447 | loff_t *_pos) | 230 | loff_t *_pos) |
| 448 | { | 231 | { |
| 449 | struct afs_cell *cell = PDE_DATA(file_inode(p->file)); | 232 | struct afs_cell *cell = PDE_DATA(file_inode(m->file)); |
| 450 | 233 | ||
| 451 | _enter("cell=%p pos=%Ld", cell, *_pos); | ||
| 452 | return seq_list_next(v, &cell->proc_volumes, _pos); | 234 | return seq_list_next(v, &cell->proc_volumes, _pos); |
| 453 | } | 235 | } |
| 454 | 236 | ||
| 455 | /* | 237 | static void afs_proc_cell_volumes_stop(struct seq_file *m, void *v) |
| 456 | * clean up after reading from the cells list | ||
| 457 | */ | ||
| 458 | static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v) | ||
| 459 | __releases(cell->proc_lock) | 238 | __releases(cell->proc_lock) |
| 460 | { | 239 | { |
| 461 | struct afs_cell *cell = PDE_DATA(file_inode(p->file)); | 240 | struct afs_cell *cell = PDE_DATA(file_inode(m->file)); |
| 462 | 241 | ||
| 463 | read_unlock(&cell->proc_lock); | 242 | read_unlock(&cell->proc_lock); |
| 464 | } | 243 | } |
| 465 | 244 | ||
| 466 | static const char afs_vol_types[3][3] = { | 245 | static const struct seq_operations afs_proc_cell_volumes_ops = { |
| 467 | [AFSVL_RWVOL] = "RW", | 246 | .start = afs_proc_cell_volumes_start, |
| 468 | [AFSVL_ROVOL] = "RO", | 247 | .next = afs_proc_cell_volumes_next, |
| 469 | [AFSVL_BACKVOL] = "BK", | 248 | .stop = afs_proc_cell_volumes_stop, |
| 249 | .show = afs_proc_cell_volumes_show, | ||
| 470 | }; | 250 | }; |
| 471 | 251 | ||
| 472 | /* | 252 | /* |
| 473 | * display a header line followed by a load of volume lines | 253 | * Display the list of Volume Location servers we're using for a cell. |
| 474 | */ | 254 | */ |
| 475 | static int afs_proc_cell_volumes_show(struct seq_file *m, void *v) | 255 | static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v) |
| 476 | { | 256 | { |
| 477 | struct afs_cell *cell = PDE_DATA(file_inode(m->file)); | 257 | struct sockaddr_rxrpc *addr = v; |
| 478 | struct afs_volume *vol = list_entry(v, struct afs_volume, proc_link); | ||
| 479 | 258 | ||
| 480 | /* Display header on line 1 */ | 259 | /* display header on line 1 */ |
| 481 | if (v == &cell->proc_volumes) { | 260 | if (v == (void *)1) { |
| 482 | seq_puts(m, "USE VID TY\n"); | 261 | seq_puts(m, "ADDRESS\n"); |
| 483 | return 0; | 262 | return 0; |
| 484 | } | 263 | } |
| 485 | 264 | ||
| 486 | seq_printf(m, "%3d %08x %s\n", | 265 | /* display one cell per line on subsequent lines */ |
| 487 | atomic_read(&vol->usage), vol->vid, | 266 | seq_printf(m, "%pISp\n", &addr->transport); |
| 488 | afs_vol_types[vol->type]); | ||
| 489 | |||
| 490 | return 0; | 267 | return 0; |
| 491 | } | 268 | } |
| 492 | 269 | ||
| 493 | /* | ||
| 494 | * set up the iterator to start reading from the cells list and return the | ||
| 495 | * first item | ||
| 496 | */ | ||
| 497 | static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos) | 270 | static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos) |
| 498 | __acquires(rcu) | 271 | __acquires(rcu) |
| 499 | { | 272 | { |
| @@ -516,14 +289,11 @@ static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos) | |||
| 516 | return alist->addrs + pos; | 289 | return alist->addrs + pos; |
| 517 | } | 290 | } |
| 518 | 291 | ||
| 519 | /* | 292 | static void *afs_proc_cell_vlservers_next(struct seq_file *m, void *v, |
| 520 | * move to next cell in cells list | ||
| 521 | */ | ||
| 522 | static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, | ||
| 523 | loff_t *_pos) | 293 | loff_t *_pos) |
| 524 | { | 294 | { |
| 525 | struct afs_addr_list *alist; | 295 | struct afs_addr_list *alist; |
| 526 | struct afs_cell *cell = PDE_DATA(file_inode(p->file)); | 296 | struct afs_cell *cell = PDE_DATA(file_inode(m->file)); |
| 527 | loff_t pos; | 297 | loff_t pos; |
| 528 | 298 | ||
| 529 | alist = rcu_dereference(cell->vl_addrs); | 299 | alist = rcu_dereference(cell->vl_addrs); |
| @@ -536,161 +306,145 @@ static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, | |||
| 536 | return alist->addrs + pos; | 306 | return alist->addrs + pos; |
| 537 | } | 307 | } |
| 538 | 308 | ||
| 539 | /* | 309 | static void afs_proc_cell_vlservers_stop(struct seq_file *m, void *v) |
| 540 | * clean up after reading from the cells list | ||
| 541 | */ | ||
| 542 | static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v) | ||
| 543 | __releases(rcu) | 310 | __releases(rcu) |
| 544 | { | 311 | { |
| 545 | rcu_read_unlock(); | 312 | rcu_read_unlock(); |
| 546 | } | 313 | } |
| 547 | 314 | ||
| 315 | static const struct seq_operations afs_proc_cell_vlservers_ops = { | ||
| 316 | .start = afs_proc_cell_vlservers_start, | ||
| 317 | .next = afs_proc_cell_vlservers_next, | ||
| 318 | .stop = afs_proc_cell_vlservers_stop, | ||
| 319 | .show = afs_proc_cell_vlservers_show, | ||
| 320 | }; | ||
| 321 | |||
| 548 | /* | 322 | /* |
| 549 | * display a header line followed by a load of volume lines | 323 | * Display the list of fileservers we're using within a namespace. |
| 550 | */ | 324 | */ |
| 551 | static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v) | 325 | static int afs_proc_servers_show(struct seq_file *m, void *v) |
| 552 | { | 326 | { |
| 553 | struct sockaddr_rxrpc *addr = v; | 327 | struct afs_server *server; |
| 328 | struct afs_addr_list *alist; | ||
| 329 | int i; | ||
| 554 | 330 | ||
| 555 | /* display header on line 1 */ | 331 | if (v == SEQ_START_TOKEN) { |
| 556 | if (v == (void *)1) { | 332 | seq_puts(m, "UUID USE ADDR\n"); |
| 557 | seq_puts(m, "ADDRESS\n"); | ||
| 558 | return 0; | 333 | return 0; |
| 559 | } | 334 | } |
| 560 | 335 | ||
| 561 | /* display one cell per line on subsequent lines */ | 336 | server = list_entry(v, struct afs_server, proc_link); |
| 562 | seq_printf(m, "%pISp\n", &addr->transport); | 337 | alist = rcu_dereference(server->addresses); |
| 338 | seq_printf(m, "%pU %3d %pISpc%s\n", | ||
| 339 | &server->uuid, | ||
| 340 | atomic_read(&server->usage), | ||
| 341 | &alist->addrs[0].transport, | ||
| 342 | alist->index == 0 ? "*" : ""); | ||
| 343 | for (i = 1; i < alist->nr_addrs; i++) | ||
| 344 | seq_printf(m, " %pISpc%s\n", | ||
| 345 | &alist->addrs[i].transport, | ||
| 346 | alist->index == i ? "*" : ""); | ||
| 563 | return 0; | 347 | return 0; |
| 564 | } | 348 | } |
| 565 | 349 | ||
| 566 | /* | ||
| 567 | * Set up the iterator to start reading from the server list and return the | ||
| 568 | * first item. | ||
| 569 | */ | ||
| 570 | static void *afs_proc_servers_start(struct seq_file *m, loff_t *_pos) | 350 | static void *afs_proc_servers_start(struct seq_file *m, loff_t *_pos) |
| 571 | __acquires(rcu) | 351 | __acquires(rcu) |
| 572 | { | 352 | { |
| 573 | struct afs_net *net = afs_seq2net(m); | ||
| 574 | |||
| 575 | rcu_read_lock(); | 353 | rcu_read_lock(); |
| 576 | return seq_hlist_start_head_rcu(&net->fs_proc, *_pos); | 354 | return seq_hlist_start_head_rcu(&afs_seq2net(m)->fs_proc, *_pos); |
| 577 | } | 355 | } |
| 578 | 356 | ||
| 579 | /* | ||
| 580 | * move to next cell in cells list | ||
| 581 | */ | ||
| 582 | static void *afs_proc_servers_next(struct seq_file *m, void *v, loff_t *_pos) | 357 | static void *afs_proc_servers_next(struct seq_file *m, void *v, loff_t *_pos) |
| 583 | { | 358 | { |
| 584 | struct afs_net *net = afs_seq2net(m); | 359 | return seq_hlist_next_rcu(v, &afs_seq2net(m)->fs_proc, _pos); |
| 585 | |||
| 586 | return seq_hlist_next_rcu(v, &net->fs_proc, _pos); | ||
| 587 | } | 360 | } |
| 588 | 361 | ||
| 589 | /* | 362 | static void afs_proc_servers_stop(struct seq_file *m, void *v) |
| 590 | * clean up after reading from the cells list | ||
| 591 | */ | ||
| 592 | static void afs_proc_servers_stop(struct seq_file *p, void *v) | ||
| 593 | __releases(rcu) | 363 | __releases(rcu) |
| 594 | { | 364 | { |
| 595 | rcu_read_unlock(); | 365 | rcu_read_unlock(); |
| 596 | } | 366 | } |
| 597 | 367 | ||
| 368 | static const struct seq_operations afs_proc_servers_ops = { | ||
| 369 | .start = afs_proc_servers_start, | ||
| 370 | .next = afs_proc_servers_next, | ||
| 371 | .stop = afs_proc_servers_stop, | ||
| 372 | .show = afs_proc_servers_show, | ||
| 373 | }; | ||
| 374 | |||
| 598 | /* | 375 | /* |
| 599 | * display a header line followed by a load of volume lines | 376 | * Display the list of strings that may be substituted for the @sys pathname |
| 377 | * macro. | ||
| 600 | */ | 378 | */ |
| 601 | static int afs_proc_servers_show(struct seq_file *m, void *v) | 379 | static int afs_proc_sysname_show(struct seq_file *m, void *v) |
| 602 | { | 380 | { |
| 603 | struct afs_server *server; | 381 | struct afs_net *net = afs_seq2net(m); |
| 604 | struct afs_addr_list *alist; | 382 | struct afs_sysnames *sysnames = net->sysnames; |
| 605 | 383 | unsigned int i = (unsigned long)v - 1; | |
| 606 | if (v == SEQ_START_TOKEN) { | ||
| 607 | seq_puts(m, "UUID USE ADDR\n"); | ||
| 608 | return 0; | ||
| 609 | } | ||
| 610 | 384 | ||
| 611 | server = list_entry(v, struct afs_server, proc_link); | 385 | if (i < sysnames->nr) |
| 612 | alist = rcu_dereference(server->addresses); | 386 | seq_printf(m, "%s\n", sysnames->subs[i]); |
| 613 | seq_printf(m, "%pU %3d %pISp\n", | ||
| 614 | &server->uuid, | ||
| 615 | atomic_read(&server->usage), | ||
| 616 | &alist->addrs[alist->index].transport); | ||
| 617 | return 0; | 387 | return 0; |
| 618 | } | 388 | } |
| 619 | 389 | ||
| 620 | void afs_put_sysnames(struct afs_sysnames *sysnames) | 390 | static void *afs_proc_sysname_start(struct seq_file *m, loff_t *pos) |
| 391 | __acquires(&net->sysnames_lock) | ||
| 621 | { | 392 | { |
| 622 | int i; | 393 | struct afs_net *net = afs_seq2net(m); |
| 394 | struct afs_sysnames *names; | ||
| 623 | 395 | ||
| 624 | if (sysnames && refcount_dec_and_test(&sysnames->usage)) { | 396 | read_lock(&net->sysnames_lock); |
| 625 | for (i = 0; i < sysnames->nr; i++) | 397 | |
| 626 | if (sysnames->subs[i] != afs_init_sysname && | 398 | names = net->sysnames; |
| 627 | sysnames->subs[i] != sysnames->blank) | 399 | if (*pos >= names->nr) |
| 628 | kfree(sysnames->subs[i]); | 400 | return NULL; |
| 629 | } | 401 | return (void *)(unsigned long)(*pos + 1); |
| 630 | } | 402 | } |
| 631 | 403 | ||
| 632 | /* | 404 | static void *afs_proc_sysname_next(struct seq_file *m, void *v, loff_t *pos) |
| 633 | * Handle opening of /proc/fs/afs/sysname. If it is opened for writing, we | ||
| 634 | * assume the caller wants to change the substitution list and we allocate a | ||
| 635 | * buffer to hold the list. | ||
| 636 | */ | ||
| 637 | static int afs_proc_sysname_open(struct inode *inode, struct file *file) | ||
| 638 | { | 405 | { |
| 639 | struct afs_sysnames *sysnames; | 406 | struct afs_net *net = afs_seq2net(m); |
| 640 | struct seq_file *m; | 407 | struct afs_sysnames *names = net->sysnames; |
| 641 | int ret; | ||
| 642 | |||
| 643 | ret = seq_open(file, &afs_proc_sysname_ops); | ||
| 644 | if (ret < 0) | ||
| 645 | return ret; | ||
| 646 | 408 | ||
| 647 | if (file->f_mode & FMODE_WRITE) { | 409 | *pos += 1; |
| 648 | sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL); | 410 | if (*pos >= names->nr) |
| 649 | if (!sysnames) { | 411 | return NULL; |
| 650 | seq_release(inode, file); | 412 | return (void *)(unsigned long)(*pos + 1); |
| 651 | return -ENOMEM; | 413 | } |
| 652 | } | ||
| 653 | 414 | ||
| 654 | refcount_set(&sysnames->usage, 1); | 415 | static void afs_proc_sysname_stop(struct seq_file *m, void *v) |
| 655 | m = file->private_data; | 416 | __releases(&net->sysnames_lock) |
| 656 | m->private = sysnames; | 417 | { |
| 657 | } | 418 | struct afs_net *net = afs_seq2net(m); |
| 658 | 419 | ||
| 659 | return 0; | 420 | read_unlock(&net->sysnames_lock); |
| 660 | } | 421 | } |
| 661 | 422 | ||
| 423 | static const struct seq_operations afs_proc_sysname_ops = { | ||
| 424 | .start = afs_proc_sysname_start, | ||
| 425 | .next = afs_proc_sysname_next, | ||
| 426 | .stop = afs_proc_sysname_stop, | ||
| 427 | .show = afs_proc_sysname_show, | ||
| 428 | }; | ||
| 429 | |||
| 662 | /* | 430 | /* |
| 663 | * Handle writes to /proc/fs/afs/sysname to set the @sys substitution. | 431 | * Allow the @sys substitution to be configured. |
| 664 | */ | 432 | */ |
| 665 | static ssize_t afs_proc_sysname_write(struct file *file, | 433 | static int afs_proc_sysname_write(struct file *file, char *buf, size_t size) |
| 666 | const char __user *buf, | ||
| 667 | size_t size, loff_t *_pos) | ||
| 668 | { | 434 | { |
| 669 | struct afs_sysnames *sysnames; | 435 | struct afs_sysnames *sysnames, *kill; |
| 670 | struct seq_file *m = file->private_data; | 436 | struct seq_file *m = file->private_data; |
| 671 | char *kbuf = NULL, *s, *p, *sub; | 437 | struct afs_net *net = afs_seq2net(m); |
| 438 | char *s, *p, *sub; | ||
| 672 | int ret, len; | 439 | int ret, len; |
| 673 | 440 | ||
| 674 | sysnames = m->private; | 441 | sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL); |
| 675 | if (!sysnames) | 442 | if (!sysnames) |
| 676 | return -EINVAL; | 443 | return -ENOMEM; |
| 677 | if (sysnames->error) | 444 | refcount_set(&sysnames->usage, 1); |
| 678 | return sysnames->error; | 445 | kill = sysnames; |
| 679 | |||
| 680 | if (size >= PAGE_SIZE - 1) { | ||
| 681 | sysnames->error = -EINVAL; | ||
| 682 | return -EINVAL; | ||
| 683 | } | ||
| 684 | if (size == 0) | ||
| 685 | return 0; | ||
| 686 | |||
| 687 | kbuf = memdup_user_nul(buf, size); | ||
| 688 | if (IS_ERR(kbuf)) | ||
| 689 | return PTR_ERR(kbuf); | ||
| 690 | |||
| 691 | inode_lock(file_inode(file)); | ||
| 692 | 446 | ||
| 693 | p = kbuf; | 447 | p = buf; |
| 694 | while ((s = strsep(&p, " \t\n"))) { | 448 | while ((s = strsep(&p, " \t\n"))) { |
| 695 | len = strlen(s); | 449 | len = strlen(s); |
| 696 | if (len == 0) | 450 | if (len == 0) |
| @@ -731,85 +485,36 @@ static ssize_t afs_proc_sysname_write(struct file *file, | |||
| 731 | sysnames->nr++; | 485 | sysnames->nr++; |
| 732 | } | 486 | } |
| 733 | 487 | ||
| 734 | ret = size; /* consume everything, always */ | 488 | if (sysnames->nr == 0) { |
| 489 | sysnames->subs[0] = sysnames->blank; | ||
| 490 | sysnames->nr++; | ||
| 491 | } | ||
| 492 | |||
| 493 | write_lock(&net->sysnames_lock); | ||
| 494 | kill = net->sysnames; | ||
| 495 | net->sysnames = sysnames; | ||
| 496 | write_unlock(&net->sysnames_lock); | ||
| 497 | ret = 0; | ||
| 735 | out: | 498 | out: |
| 736 | inode_unlock(file_inode(file)); | 499 | afs_put_sysnames(kill); |
| 737 | kfree(kbuf); | ||
| 738 | return ret; | 500 | return ret; |
| 739 | 501 | ||
| 740 | invalid: | 502 | invalid: |
| 741 | ret = -EINVAL; | 503 | ret = -EINVAL; |
| 742 | error: | 504 | error: |
| 743 | sysnames->error = ret; | ||
| 744 | goto out; | 505 | goto out; |
| 745 | } | 506 | } |
| 746 | 507 | ||
| 747 | static int afs_proc_sysname_release(struct inode *inode, struct file *file) | 508 | void afs_put_sysnames(struct afs_sysnames *sysnames) |
| 748 | { | 509 | { |
| 749 | struct afs_sysnames *sysnames, *kill = NULL; | 510 | int i; |
| 750 | struct seq_file *m = file->private_data; | ||
| 751 | struct afs_net *net = afs_seq2net(m); | ||
| 752 | 511 | ||
| 753 | sysnames = m->private; | 512 | if (sysnames && refcount_dec_and_test(&sysnames->usage)) { |
| 754 | if (sysnames) { | 513 | for (i = 0; i < sysnames->nr; i++) |
| 755 | if (!sysnames->error) { | 514 | if (sysnames->subs[i] != afs_init_sysname && |
| 756 | kill = sysnames; | 515 | sysnames->subs[i] != sysnames->blank) |
| 757 | if (sysnames->nr == 0) { | 516 | kfree(sysnames->subs[i]); |
| 758 | sysnames->subs[0] = sysnames->blank; | ||
| 759 | sysnames->nr++; | ||
| 760 | } | ||
| 761 | write_lock(&net->sysnames_lock); | ||
| 762 | kill = net->sysnames; | ||
| 763 | net->sysnames = sysnames; | ||
| 764 | write_unlock(&net->sysnames_lock); | ||
| 765 | } | ||
| 766 | afs_put_sysnames(kill); | ||
| 767 | } | 517 | } |
| 768 | |||
| 769 | return seq_release(inode, file); | ||
| 770 | } | ||
| 771 | |||
| 772 | static void *afs_proc_sysname_start(struct seq_file *m, loff_t *pos) | ||
| 773 | __acquires(&net->sysnames_lock) | ||
| 774 | { | ||
| 775 | struct afs_net *net = afs_seq2net(m); | ||
| 776 | struct afs_sysnames *names = net->sysnames; | ||
| 777 | |||
| 778 | read_lock(&net->sysnames_lock); | ||
| 779 | |||
| 780 | if (*pos >= names->nr) | ||
| 781 | return NULL; | ||
| 782 | return (void *)(unsigned long)(*pos + 1); | ||
| 783 | } | ||
| 784 | |||
| 785 | static void *afs_proc_sysname_next(struct seq_file *m, void *v, loff_t *pos) | ||
| 786 | { | ||
| 787 | struct afs_net *net = afs_seq2net(m); | ||
| 788 | struct afs_sysnames *names = net->sysnames; | ||
| 789 | |||
| 790 | *pos += 1; | ||
| 791 | if (*pos >= names->nr) | ||
| 792 | return NULL; | ||
| 793 | return (void *)(unsigned long)(*pos + 1); | ||
| 794 | } | ||
| 795 | |||
| 796 | static void afs_proc_sysname_stop(struct seq_file *m, void *v) | ||
| 797 | __releases(&net->sysnames_lock) | ||
| 798 | { | ||
| 799 | struct afs_net *net = afs_seq2net(m); | ||
| 800 | |||
| 801 | read_unlock(&net->sysnames_lock); | ||
| 802 | } | ||
| 803 | |||
| 804 | static int afs_proc_sysname_show(struct seq_file *m, void *v) | ||
| 805 | { | ||
| 806 | struct afs_net *net = afs_seq2net(m); | ||
| 807 | struct afs_sysnames *sysnames = net->sysnames; | ||
| 808 | unsigned int i = (unsigned long)v - 1; | ||
| 809 | |||
| 810 | if (i < sysnames->nr) | ||
| 811 | seq_printf(m, "%s\n", sysnames->subs[i]); | ||
| 812 | return 0; | ||
| 813 | } | 518 | } |
| 814 | 519 | ||
| 815 | /* | 520 | /* |
| @@ -817,7 +522,7 @@ static int afs_proc_sysname_show(struct seq_file *m, void *v) | |||
| 817 | */ | 522 | */ |
| 818 | static int afs_proc_stats_show(struct seq_file *m, void *v) | 523 | static int afs_proc_stats_show(struct seq_file *m, void *v) |
| 819 | { | 524 | { |
| 820 | struct afs_net *net = afs_seq2net(m); | 525 | struct afs_net *net = afs_seq2net_single(m); |
| 821 | 526 | ||
| 822 | seq_puts(m, "kAFS statistics\n"); | 527 | seq_puts(m, "kAFS statistics\n"); |
| 823 | 528 | ||
| @@ -842,3 +547,101 @@ static int afs_proc_stats_show(struct seq_file *m, void *v) | |||
| 842 | atomic_long_read(&net->n_store_bytes)); | 547 | atomic_long_read(&net->n_store_bytes)); |
| 843 | return 0; | 548 | return 0; |
| 844 | } | 549 | } |
| 550 | |||
| 551 | /* | ||
| 552 | * initialise /proc/fs/afs/<cell>/ | ||
| 553 | */ | ||
| 554 | int afs_proc_cell_setup(struct afs_cell *cell) | ||
| 555 | { | ||
| 556 | struct proc_dir_entry *dir; | ||
| 557 | struct afs_net *net = cell->net; | ||
| 558 | |||
| 559 | _enter("%p{%s},%p", cell, cell->name, net->proc_afs); | ||
| 560 | |||
| 561 | dir = proc_net_mkdir(net->net, cell->name, net->proc_afs); | ||
| 562 | if (!dir) | ||
| 563 | goto error_dir; | ||
| 564 | |||
| 565 | if (!proc_create_net_data("vlservers", 0444, dir, | ||
| 566 | &afs_proc_cell_vlservers_ops, | ||
| 567 | sizeof(struct seq_net_private), | ||
| 568 | cell) || | ||
| 569 | !proc_create_net_data("volumes", 0444, dir, | ||
| 570 | &afs_proc_cell_volumes_ops, | ||
| 571 | sizeof(struct seq_net_private), | ||
| 572 | cell)) | ||
| 573 | goto error_tree; | ||
| 574 | |||
| 575 | _leave(" = 0"); | ||
| 576 | return 0; | ||
| 577 | |||
| 578 | error_tree: | ||
| 579 | remove_proc_subtree(cell->name, net->proc_afs); | ||
| 580 | error_dir: | ||
| 581 | _leave(" = -ENOMEM"); | ||
| 582 | return -ENOMEM; | ||
| 583 | } | ||
| 584 | |||
| 585 | /* | ||
| 586 | * remove /proc/fs/afs/<cell>/ | ||
| 587 | */ | ||
| 588 | void afs_proc_cell_remove(struct afs_cell *cell) | ||
| 589 | { | ||
| 590 | struct afs_net *net = cell->net; | ||
| 591 | |||
| 592 | _enter(""); | ||
| 593 | remove_proc_subtree(cell->name, net->proc_afs); | ||
| 594 | _leave(""); | ||
| 595 | } | ||
| 596 | |||
| 597 | /* | ||
| 598 | * initialise the /proc/fs/afs/ directory | ||
| 599 | */ | ||
| 600 | int afs_proc_init(struct afs_net *net) | ||
| 601 | { | ||
| 602 | struct proc_dir_entry *p; | ||
| 603 | |||
| 604 | _enter(""); | ||
| 605 | |||
| 606 | p = proc_net_mkdir(net->net, "afs", net->net->proc_net); | ||
| 607 | if (!p) | ||
| 608 | goto error_dir; | ||
| 609 | |||
| 610 | if (!proc_create_net_data_write("cells", 0644, p, | ||
| 611 | &afs_proc_cells_ops, | ||
| 612 | afs_proc_cells_write, | ||
| 613 | sizeof(struct seq_net_private), | ||
| 614 | NULL) || | ||
| 615 | !proc_create_net_single_write("rootcell", 0644, p, | ||
| 616 | afs_proc_rootcell_show, | ||
| 617 | afs_proc_rootcell_write, | ||
| 618 | NULL) || | ||
| 619 | !proc_create_net("servers", 0444, p, &afs_proc_servers_ops, | ||
| 620 | sizeof(struct seq_net_private)) || | ||
| 621 | !proc_create_net_single("stats", 0444, p, afs_proc_stats_show, NULL) || | ||
| 622 | !proc_create_net_data_write("sysname", 0644, p, | ||
| 623 | &afs_proc_sysname_ops, | ||
| 624 | afs_proc_sysname_write, | ||
| 625 | sizeof(struct seq_net_private), | ||
| 626 | NULL)) | ||
| 627 | goto error_tree; | ||
| 628 | |||
| 629 | net->proc_afs = p; | ||
| 630 | _leave(" = 0"); | ||
| 631 | return 0; | ||
| 632 | |||
| 633 | error_tree: | ||
| 634 | proc_remove(p); | ||
| 635 | error_dir: | ||
| 636 | _leave(" = -ENOMEM"); | ||
| 637 | return -ENOMEM; | ||
| 638 | } | ||
| 639 | |||
| 640 | /* | ||
| 641 | * clean up the /proc/fs/afs/ directory | ||
| 642 | */ | ||
| 643 | void afs_proc_cleanup(struct afs_net *net) | ||
| 644 | { | ||
| 645 | proc_remove(net->proc_afs); | ||
| 646 | net->proc_afs = NULL; | ||
| 647 | } | ||
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 08735948f15d..a1b18082991b 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c | |||
| @@ -46,7 +46,7 @@ int afs_open_socket(struct afs_net *net) | |||
| 46 | 46 | ||
| 47 | _enter(""); | 47 | _enter(""); |
| 48 | 48 | ||
| 49 | ret = sock_create_kern(&init_net, AF_RXRPC, SOCK_DGRAM, PF_INET6, &socket); | 49 | ret = sock_create_kern(net->net, AF_RXRPC, SOCK_DGRAM, PF_INET6, &socket); |
| 50 | if (ret < 0) | 50 | if (ret < 0) |
| 51 | goto error_1; | 51 | goto error_1; |
| 52 | 52 | ||
diff --git a/fs/afs/server.c b/fs/afs/server.c index 3af4625e2f8c..1d329e6981d5 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c | |||
| @@ -228,7 +228,7 @@ static struct afs_server *afs_alloc_server(struct afs_net *net, | |||
| 228 | server->flags = (1UL << AFS_SERVER_FL_NEW); | 228 | server->flags = (1UL << AFS_SERVER_FL_NEW); |
| 229 | server->update_at = ktime_get_real_seconds() + afs_server_update_delay; | 229 | server->update_at = ktime_get_real_seconds() + afs_server_update_delay; |
| 230 | rwlock_init(&server->fs_lock); | 230 | rwlock_init(&server->fs_lock); |
| 231 | INIT_LIST_HEAD(&server->cb_interests); | 231 | INIT_HLIST_HEAD(&server->cb_volumes); |
| 232 | rwlock_init(&server->cb_break_lock); | 232 | rwlock_init(&server->cb_break_lock); |
| 233 | 233 | ||
| 234 | afs_inc_servers_outstanding(net); | 234 | afs_inc_servers_outstanding(net); |
diff --git a/fs/afs/super.c b/fs/afs/super.c index 9e5d7966621c..4d3e274207fb 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c | |||
| @@ -48,6 +48,8 @@ struct file_system_type afs_fs_type = { | |||
| 48 | }; | 48 | }; |
| 49 | MODULE_ALIAS_FS("afs"); | 49 | MODULE_ALIAS_FS("afs"); |
| 50 | 50 | ||
| 51 | int afs_net_id; | ||
| 52 | |||
| 51 | static const struct super_operations afs_super_ops = { | 53 | static const struct super_operations afs_super_ops = { |
| 52 | .statfs = afs_statfs, | 54 | .statfs = afs_statfs, |
| 53 | .alloc_inode = afs_alloc_inode, | 55 | .alloc_inode = afs_alloc_inode, |
| @@ -117,7 +119,7 @@ int __init afs_fs_init(void) | |||
| 117 | /* | 119 | /* |
| 118 | * clean up the filesystem | 120 | * clean up the filesystem |
| 119 | */ | 121 | */ |
| 120 | void __exit afs_fs_exit(void) | 122 | void afs_fs_exit(void) |
| 121 | { | 123 | { |
| 122 | _enter(""); | 124 | _enter(""); |
| 123 | 125 | ||
| @@ -351,14 +353,19 @@ static int afs_test_super(struct super_block *sb, void *data) | |||
| 351 | struct afs_super_info *as1 = data; | 353 | struct afs_super_info *as1 = data; |
| 352 | struct afs_super_info *as = AFS_FS_S(sb); | 354 | struct afs_super_info *as = AFS_FS_S(sb); |
| 353 | 355 | ||
| 354 | return (as->net == as1->net && | 356 | return (as->net_ns == as1->net_ns && |
| 355 | as->volume && | 357 | as->volume && |
| 356 | as->volume->vid == as1->volume->vid); | 358 | as->volume->vid == as1->volume->vid && |
| 359 | !as->dyn_root); | ||
| 357 | } | 360 | } |
| 358 | 361 | ||
| 359 | static int afs_dynroot_test_super(struct super_block *sb, void *data) | 362 | static int afs_dynroot_test_super(struct super_block *sb, void *data) |
| 360 | { | 363 | { |
| 361 | return false; | 364 | struct afs_super_info *as1 = data; |
| 365 | struct afs_super_info *as = AFS_FS_S(sb); | ||
| 366 | |||
| 367 | return (as->net_ns == as1->net_ns && | ||
| 368 | as->dyn_root); | ||
| 362 | } | 369 | } |
| 363 | 370 | ||
| 364 | static int afs_set_super(struct super_block *sb, void *data) | 371 | static int afs_set_super(struct super_block *sb, void *data) |
| @@ -418,10 +425,14 @@ static int afs_fill_super(struct super_block *sb, | |||
| 418 | if (!sb->s_root) | 425 | if (!sb->s_root) |
| 419 | goto error; | 426 | goto error; |
| 420 | 427 | ||
| 421 | if (params->dyn_root) | 428 | if (as->dyn_root) { |
| 422 | sb->s_d_op = &afs_dynroot_dentry_operations; | 429 | sb->s_d_op = &afs_dynroot_dentry_operations; |
| 423 | else | 430 | ret = afs_dynroot_populate(sb); |
| 431 | if (ret < 0) | ||
| 432 | goto error; | ||
| 433 | } else { | ||
| 424 | sb->s_d_op = &afs_fs_dentry_operations; | 434 | sb->s_d_op = &afs_fs_dentry_operations; |
| 435 | } | ||
| 425 | 436 | ||
| 426 | _leave(" = 0"); | 437 | _leave(" = 0"); |
| 427 | return 0; | 438 | return 0; |
| @@ -437,7 +448,7 @@ static struct afs_super_info *afs_alloc_sbi(struct afs_mount_params *params) | |||
| 437 | 448 | ||
| 438 | as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); | 449 | as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); |
| 439 | if (as) { | 450 | if (as) { |
| 440 | as->net = afs_get_net(params->net); | 451 | as->net_ns = get_net(params->net_ns); |
| 441 | if (params->dyn_root) | 452 | if (params->dyn_root) |
| 442 | as->dyn_root = true; | 453 | as->dyn_root = true; |
| 443 | else | 454 | else |
| @@ -450,12 +461,31 @@ static void afs_destroy_sbi(struct afs_super_info *as) | |||
| 450 | { | 461 | { |
| 451 | if (as) { | 462 | if (as) { |
| 452 | afs_put_volume(as->cell, as->volume); | 463 | afs_put_volume(as->cell, as->volume); |
| 453 | afs_put_cell(as->net, as->cell); | 464 | afs_put_cell(afs_net(as->net_ns), as->cell); |
| 454 | afs_put_net(as->net); | 465 | put_net(as->net_ns); |
| 455 | kfree(as); | 466 | kfree(as); |
| 456 | } | 467 | } |
| 457 | } | 468 | } |
| 458 | 469 | ||
| 470 | static void afs_kill_super(struct super_block *sb) | ||
| 471 | { | ||
| 472 | struct afs_super_info *as = AFS_FS_S(sb); | ||
| 473 | struct afs_net *net = afs_net(as->net_ns); | ||
| 474 | |||
| 475 | if (as->dyn_root) | ||
| 476 | afs_dynroot_depopulate(sb); | ||
| 477 | |||
| 478 | /* Clear the callback interests (which will do ilookup5) before | ||
| 479 | * deactivating the superblock. | ||
| 480 | */ | ||
| 481 | if (as->volume) | ||
| 482 | afs_clear_callback_interests(net, as->volume->servers); | ||
| 483 | kill_anon_super(sb); | ||
| 484 | if (as->volume) | ||
| 485 | afs_deactivate_volume(as->volume); | ||
| 486 | afs_destroy_sbi(as); | ||
| 487 | } | ||
| 488 | |||
| 459 | /* | 489 | /* |
| 460 | * get an AFS superblock | 490 | * get an AFS superblock |
| 461 | */ | 491 | */ |
| @@ -472,12 +502,13 @@ static struct dentry *afs_mount(struct file_system_type *fs_type, | |||
| 472 | _enter(",,%s,%p", dev_name, options); | 502 | _enter(",,%s,%p", dev_name, options); |
| 473 | 503 | ||
| 474 | memset(¶ms, 0, sizeof(params)); | 504 | memset(¶ms, 0, sizeof(params)); |
| 475 | params.net = &__afs_net; | ||
| 476 | 505 | ||
| 477 | ret = -EINVAL; | 506 | ret = -EINVAL; |
| 478 | if (current->nsproxy->net_ns != &init_net) | 507 | if (current->nsproxy->net_ns != &init_net) |
| 479 | goto error; | 508 | goto error; |
| 480 | 509 | params.net_ns = current->nsproxy->net_ns; | |
| 510 | params.net = afs_net(params.net_ns); | ||
| 511 | |||
| 481 | /* parse the options and device name */ | 512 | /* parse the options and device name */ |
| 482 | if (options) { | 513 | if (options) { |
| 483 | ret = afs_parse_options(¶ms, options, &dev_name); | 514 | ret = afs_parse_options(¶ms, options, &dev_name); |
| @@ -563,21 +594,6 @@ error: | |||
| 563 | return ERR_PTR(ret); | 594 | return ERR_PTR(ret); |
| 564 | } | 595 | } |
| 565 | 596 | ||
| 566 | static void afs_kill_super(struct super_block *sb) | ||
| 567 | { | ||
| 568 | struct afs_super_info *as = AFS_FS_S(sb); | ||
| 569 | |||
| 570 | /* Clear the callback interests (which will do ilookup5) before | ||
| 571 | * deactivating the superblock. | ||
| 572 | */ | ||
| 573 | if (as->volume) | ||
| 574 | afs_clear_callback_interests(as->net, as->volume->servers); | ||
| 575 | kill_anon_super(sb); | ||
| 576 | if (as->volume) | ||
| 577 | afs_deactivate_volume(as->volume); | ||
| 578 | afs_destroy_sbi(as); | ||
| 579 | } | ||
| 580 | |||
| 581 | /* | 597 | /* |
| 582 | * Initialise an inode cache slab element prior to any use. Note that | 598 | * Initialise an inode cache slab element prior to any use. Note that |
| 583 | * afs_alloc_inode() *must* reset anything that could incorrectly leak from one | 599 | * afs_alloc_inode() *must* reset anything that could incorrectly leak from one |
| @@ -1661,7 +1661,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, | |||
| 1661 | if (mask && !(mask & req->events)) | 1661 | if (mask && !(mask & req->events)) |
| 1662 | return 0; | 1662 | return 0; |
| 1663 | 1663 | ||
| 1664 | mask = file->f_op->poll_mask(file, req->events); | 1664 | mask = file->f_op->poll_mask(file, req->events) & req->events; |
| 1665 | if (!mask) | 1665 | if (!mask) |
| 1666 | return 0; | 1666 | return 0; |
| 1667 | 1667 | ||
| @@ -1719,7 +1719,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb) | |||
| 1719 | 1719 | ||
| 1720 | spin_lock_irq(&ctx->ctx_lock); | 1720 | spin_lock_irq(&ctx->ctx_lock); |
| 1721 | spin_lock(&req->head->lock); | 1721 | spin_lock(&req->head->lock); |
| 1722 | mask = req->file->f_op->poll_mask(req->file, req->events); | 1722 | mask = req->file->f_op->poll_mask(req->file, req->events) & req->events; |
| 1723 | if (!mask) { | 1723 | if (!mask) { |
| 1724 | __add_wait_queue(req->head, &req->wait); | 1724 | __add_wait_queue(req->head, &req->wait); |
| 1725 | list_add_tail(&aiocb->ki_list, &ctx->active_reqs); | 1725 | list_add_tail(&aiocb->ki_list, &ctx->active_reqs); |
diff --git a/fs/eventfd.c b/fs/eventfd.c index 61c9514da5e9..ceb1031f1cac 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c | |||
| @@ -156,11 +156,11 @@ static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask) | |||
| 156 | count = READ_ONCE(ctx->count); | 156 | count = READ_ONCE(ctx->count); |
| 157 | 157 | ||
| 158 | if (count > 0) | 158 | if (count > 0) |
| 159 | events |= EPOLLIN; | 159 | events |= (EPOLLIN & eventmask); |
| 160 | if (count == ULLONG_MAX) | 160 | if (count == ULLONG_MAX) |
| 161 | events |= EPOLLERR; | 161 | events |= EPOLLERR; |
| 162 | if (ULLONG_MAX - 1 > count) | 162 | if (ULLONG_MAX - 1 > count) |
| 163 | events |= EPOLLOUT; | 163 | events |= (EPOLLOUT & eventmask); |
| 164 | 164 | ||
| 165 | return events; | 165 | return events; |
| 166 | } | 166 | } |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 67db22fe99c5..ea4436f409fb 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
| @@ -922,13 +922,17 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head | |||
| 922 | return 0; | 922 | return 0; |
| 923 | } | 923 | } |
| 924 | 924 | ||
| 925 | static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait) | 925 | static struct wait_queue_head *ep_eventpoll_get_poll_head(struct file *file, |
| 926 | __poll_t eventmask) | ||
| 926 | { | 927 | { |
| 927 | struct eventpoll *ep = file->private_data; | 928 | struct eventpoll *ep = file->private_data; |
| 928 | int depth = 0; | 929 | return &ep->poll_wait; |
| 930 | } | ||
| 929 | 931 | ||
| 930 | /* Insert inside our poll wait queue */ | 932 | static __poll_t ep_eventpoll_poll_mask(struct file *file, __poll_t eventmask) |
| 931 | poll_wait(file, &ep->poll_wait, wait); | 933 | { |
| 934 | struct eventpoll *ep = file->private_data; | ||
| 935 | int depth = 0; | ||
| 932 | 936 | ||
| 933 | /* | 937 | /* |
| 934 | * Proceed to find out if wanted events are really available inside | 938 | * Proceed to find out if wanted events are really available inside |
| @@ -968,7 +972,8 @@ static const struct file_operations eventpoll_fops = { | |||
| 968 | .show_fdinfo = ep_show_fdinfo, | 972 | .show_fdinfo = ep_show_fdinfo, |
| 969 | #endif | 973 | #endif |
| 970 | .release = ep_eventpoll_release, | 974 | .release = ep_eventpoll_release, |
| 971 | .poll = ep_eventpoll_poll, | 975 | .get_poll_head = ep_eventpoll_get_poll_head, |
| 976 | .poll_mask = ep_eventpoll_poll_mask, | ||
| 972 | .llseek = noop_llseek, | 977 | .llseek = noop_llseek, |
| 973 | }; | 978 | }; |
| 974 | 979 | ||
diff --git a/fs/namei.c b/fs/namei.c index 2490ddb8bc90..734cef54fdf8 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -2464,6 +2464,35 @@ static int lookup_one_len_common(const char *name, struct dentry *base, | |||
| 2464 | } | 2464 | } |
| 2465 | 2465 | ||
| 2466 | /** | 2466 | /** |
| 2467 | * try_lookup_one_len - filesystem helper to lookup single pathname component | ||
| 2468 | * @name: pathname component to lookup | ||
| 2469 | * @base: base directory to lookup from | ||
| 2470 | * @len: maximum length @len should be interpreted to | ||
| 2471 | * | ||
| 2472 | * Look up a dentry by name in the dcache, returning NULL if it does not | ||
| 2473 | * currently exist. The function does not try to create a dentry. | ||
| 2474 | * | ||
| 2475 | * Note that this routine is purely a helper for filesystem usage and should | ||
| 2476 | * not be called by generic code. | ||
| 2477 | * | ||
| 2478 | * The caller must hold base->i_mutex. | ||
| 2479 | */ | ||
| 2480 | struct dentry *try_lookup_one_len(const char *name, struct dentry *base, int len) | ||
| 2481 | { | ||
| 2482 | struct qstr this; | ||
| 2483 | int err; | ||
| 2484 | |||
| 2485 | WARN_ON_ONCE(!inode_is_locked(base->d_inode)); | ||
| 2486 | |||
| 2487 | err = lookup_one_len_common(name, base, len, &this); | ||
| 2488 | if (err) | ||
| 2489 | return ERR_PTR(err); | ||
| 2490 | |||
| 2491 | return lookup_dcache(&this, base, 0); | ||
| 2492 | } | ||
| 2493 | EXPORT_SYMBOL(try_lookup_one_len); | ||
| 2494 | |||
| 2495 | /** | ||
| 2467 | * lookup_one_len - filesystem helper to lookup single pathname component | 2496 | * lookup_one_len - filesystem helper to lookup single pathname component |
| 2468 | * @name: pathname component to lookup | 2497 | * @name: pathname component to lookup |
| 2469 | * @base: base directory to lookup from | 2498 | * @base: base directory to lookup from |
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c index 63a1ca4b9dee..e2bea2ac5dfb 100644 --- a/fs/notify/dnotify/dnotify.c +++ b/fs/notify/dnotify/dnotify.c | |||
| @@ -79,12 +79,11 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark) | |||
| 79 | */ | 79 | */ |
| 80 | static int dnotify_handle_event(struct fsnotify_group *group, | 80 | static int dnotify_handle_event(struct fsnotify_group *group, |
| 81 | struct inode *inode, | 81 | struct inode *inode, |
| 82 | struct fsnotify_mark *inode_mark, | ||
| 83 | struct fsnotify_mark *vfsmount_mark, | ||
| 84 | u32 mask, const void *data, int data_type, | 82 | u32 mask, const void *data, int data_type, |
| 85 | const unsigned char *file_name, u32 cookie, | 83 | const unsigned char *file_name, u32 cookie, |
| 86 | struct fsnotify_iter_info *iter_info) | 84 | struct fsnotify_iter_info *iter_info) |
| 87 | { | 85 | { |
| 86 | struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); | ||
| 88 | struct dnotify_mark *dn_mark; | 87 | struct dnotify_mark *dn_mark; |
| 89 | struct dnotify_struct *dn; | 88 | struct dnotify_struct *dn; |
| 90 | struct dnotify_struct **prev; | 89 | struct dnotify_struct **prev; |
| @@ -95,7 +94,8 @@ static int dnotify_handle_event(struct fsnotify_group *group, | |||
| 95 | if (!S_ISDIR(inode->i_mode)) | 94 | if (!S_ISDIR(inode->i_mode)) |
| 96 | return 0; | 95 | return 0; |
| 97 | 96 | ||
| 98 | BUG_ON(vfsmount_mark); | 97 | if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info))) |
| 98 | return 0; | ||
| 99 | 99 | ||
| 100 | dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark); | 100 | dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark); |
| 101 | 101 | ||
| @@ -319,7 +319,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) | |||
| 319 | dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); | 319 | dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); |
| 320 | spin_lock(&fsn_mark->lock); | 320 | spin_lock(&fsn_mark->lock); |
| 321 | } else { | 321 | } else { |
| 322 | error = fsnotify_add_mark_locked(new_fsn_mark, inode, NULL, 0); | 322 | error = fsnotify_add_inode_mark_locked(new_fsn_mark, inode, 0); |
| 323 | if (error) { | 323 | if (error) { |
| 324 | mutex_unlock(&dnotify_group->mark_mutex); | 324 | mutex_unlock(&dnotify_group->mark_mutex); |
| 325 | goto out_err; | 325 | goto out_err; |
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index d94e8031fe5f..f90842efea13 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c | |||
| @@ -87,17 +87,17 @@ static int fanotify_get_response(struct fsnotify_group *group, | |||
| 87 | return ret; | 87 | return ret; |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, | 90 | static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info, |
| 91 | struct fsnotify_mark *vfsmnt_mark, | 91 | u32 event_mask, const void *data, |
| 92 | u32 event_mask, | 92 | int data_type) |
| 93 | const void *data, int data_type) | ||
| 94 | { | 93 | { |
| 95 | __u32 marks_mask = 0, marks_ignored_mask = 0; | 94 | __u32 marks_mask = 0, marks_ignored_mask = 0; |
| 96 | const struct path *path = data; | 95 | const struct path *path = data; |
| 96 | struct fsnotify_mark *mark; | ||
| 97 | int type; | ||
| 97 | 98 | ||
| 98 | pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p" | 99 | pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n", |
| 99 | " data_type=%d\n", __func__, inode_mark, vfsmnt_mark, | 100 | __func__, iter_info->report_mask, event_mask, data, data_type); |
| 100 | event_mask, data, data_type); | ||
| 101 | 101 | ||
| 102 | /* if we don't have enough info to send an event to userspace say no */ | 102 | /* if we don't have enough info to send an event to userspace say no */ |
| 103 | if (data_type != FSNOTIFY_EVENT_PATH) | 103 | if (data_type != FSNOTIFY_EVENT_PATH) |
| @@ -108,20 +108,21 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, | |||
| 108 | !d_can_lookup(path->dentry)) | 108 | !d_can_lookup(path->dentry)) |
| 109 | return false; | 109 | return false; |
| 110 | 110 | ||
| 111 | /* | 111 | fsnotify_foreach_obj_type(type) { |
| 112 | * if the event is for a child and this inode doesn't care about | 112 | if (!fsnotify_iter_should_report_type(iter_info, type)) |
| 113 | * events on the child, don't send it! | 113 | continue; |
| 114 | */ | 114 | mark = iter_info->marks[type]; |
| 115 | if (inode_mark && | 115 | /* |
| 116 | (!(event_mask & FS_EVENT_ON_CHILD) || | 116 | * if the event is for a child and this inode doesn't care about |
| 117 | (inode_mark->mask & FS_EVENT_ON_CHILD))) { | 117 | * events on the child, don't send it! |
| 118 | marks_mask |= inode_mark->mask; | 118 | */ |
| 119 | marks_ignored_mask |= inode_mark->ignored_mask; | 119 | if (type == FSNOTIFY_OBJ_TYPE_INODE && |
| 120 | } | 120 | (event_mask & FS_EVENT_ON_CHILD) && |
| 121 | !(mark->mask & FS_EVENT_ON_CHILD)) | ||
| 122 | continue; | ||
| 121 | 123 | ||
| 122 | if (vfsmnt_mark) { | 124 | marks_mask |= mark->mask; |
| 123 | marks_mask |= vfsmnt_mark->mask; | 125 | marks_ignored_mask |= mark->ignored_mask; |
| 124 | marks_ignored_mask |= vfsmnt_mark->ignored_mask; | ||
| 125 | } | 126 | } |
| 126 | 127 | ||
| 127 | if (d_is_dir(path->dentry) && | 128 | if (d_is_dir(path->dentry) && |
| @@ -178,8 +179,6 @@ init: __maybe_unused | |||
| 178 | 179 | ||
| 179 | static int fanotify_handle_event(struct fsnotify_group *group, | 180 | static int fanotify_handle_event(struct fsnotify_group *group, |
| 180 | struct inode *inode, | 181 | struct inode *inode, |
| 181 | struct fsnotify_mark *inode_mark, | ||
| 182 | struct fsnotify_mark *fanotify_mark, | ||
| 183 | u32 mask, const void *data, int data_type, | 182 | u32 mask, const void *data, int data_type, |
| 184 | const unsigned char *file_name, u32 cookie, | 183 | const unsigned char *file_name, u32 cookie, |
| 185 | struct fsnotify_iter_info *iter_info) | 184 | struct fsnotify_iter_info *iter_info) |
| @@ -199,8 +198,7 @@ static int fanotify_handle_event(struct fsnotify_group *group, | |||
| 199 | BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM); | 198 | BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM); |
| 200 | BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR); | 199 | BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR); |
| 201 | 200 | ||
| 202 | if (!fanotify_should_send_event(inode_mark, fanotify_mark, mask, data, | 201 | if (!fanotify_should_send_event(iter_info, mask, data, data_type)) |
| 203 | data_type)) | ||
| 204 | return 0; | 202 | return 0; |
| 205 | 203 | ||
| 206 | pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, | 204 | pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, |
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c index d478629c728b..10aac1942c9f 100644 --- a/fs/notify/fdinfo.c +++ b/fs/notify/fdinfo.c | |||
| @@ -77,7 +77,7 @@ static void inotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark) | |||
| 77 | struct inotify_inode_mark *inode_mark; | 77 | struct inotify_inode_mark *inode_mark; |
| 78 | struct inode *inode; | 78 | struct inode *inode; |
| 79 | 79 | ||
| 80 | if (!(mark->connector->flags & FSNOTIFY_OBJ_TYPE_INODE)) | 80 | if (mark->connector->type != FSNOTIFY_OBJ_TYPE_INODE) |
| 81 | return; | 81 | return; |
| 82 | 82 | ||
| 83 | inode_mark = container_of(mark, struct inotify_inode_mark, fsn_mark); | 83 | inode_mark = container_of(mark, struct inotify_inode_mark, fsn_mark); |
| @@ -116,7 +116,7 @@ static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark) | |||
| 116 | if (mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY) | 116 | if (mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY) |
| 117 | mflags |= FAN_MARK_IGNORED_SURV_MODIFY; | 117 | mflags |= FAN_MARK_IGNORED_SURV_MODIFY; |
| 118 | 118 | ||
| 119 | if (mark->connector->flags & FSNOTIFY_OBJ_TYPE_INODE) { | 119 | if (mark->connector->type == FSNOTIFY_OBJ_TYPE_INODE) { |
| 120 | inode = igrab(mark->connector->inode); | 120 | inode = igrab(mark->connector->inode); |
| 121 | if (!inode) | 121 | if (!inode) |
| 122 | return; | 122 | return; |
| @@ -126,7 +126,7 @@ static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark) | |||
| 126 | show_mark_fhandle(m, inode); | 126 | show_mark_fhandle(m, inode); |
| 127 | seq_putc(m, '\n'); | 127 | seq_putc(m, '\n'); |
| 128 | iput(inode); | 128 | iput(inode); |
| 129 | } else if (mark->connector->flags & FSNOTIFY_OBJ_TYPE_VFSMOUNT) { | 129 | } else if (mark->connector->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) { |
| 130 | struct mount *mnt = real_mount(mark->connector->mnt); | 130 | struct mount *mnt = real_mount(mark->connector->mnt); |
| 131 | 131 | ||
| 132 | seq_printf(m, "fanotify mnt_id:%x mflags:%x mask:%x ignored_mask:%x\n", | 132 | seq_printf(m, "fanotify mnt_id:%x mflags:%x mask:%x ignored_mask:%x\n", |
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 613ec7e5a465..f174397b63a0 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c | |||
| @@ -184,8 +184,6 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask | |||
| 184 | EXPORT_SYMBOL_GPL(__fsnotify_parent); | 184 | EXPORT_SYMBOL_GPL(__fsnotify_parent); |
| 185 | 185 | ||
| 186 | static int send_to_group(struct inode *to_tell, | 186 | static int send_to_group(struct inode *to_tell, |
| 187 | struct fsnotify_mark *inode_mark, | ||
| 188 | struct fsnotify_mark *vfsmount_mark, | ||
| 189 | __u32 mask, const void *data, | 187 | __u32 mask, const void *data, |
| 190 | int data_is, u32 cookie, | 188 | int data_is, u32 cookie, |
| 191 | const unsigned char *file_name, | 189 | const unsigned char *file_name, |
| @@ -195,48 +193,45 @@ static int send_to_group(struct inode *to_tell, | |||
| 195 | __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD); | 193 | __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD); |
| 196 | __u32 marks_mask = 0; | 194 | __u32 marks_mask = 0; |
| 197 | __u32 marks_ignored_mask = 0; | 195 | __u32 marks_ignored_mask = 0; |
| 196 | struct fsnotify_mark *mark; | ||
| 197 | int type; | ||
| 198 | 198 | ||
| 199 | if (unlikely(!inode_mark && !vfsmount_mark)) { | 199 | if (WARN_ON(!iter_info->report_mask)) |
| 200 | BUG(); | ||
| 201 | return 0; | 200 | return 0; |
| 202 | } | ||
| 203 | 201 | ||
| 204 | /* clear ignored on inode modification */ | 202 | /* clear ignored on inode modification */ |
| 205 | if (mask & FS_MODIFY) { | 203 | if (mask & FS_MODIFY) { |
| 206 | if (inode_mark && | 204 | fsnotify_foreach_obj_type(type) { |
| 207 | !(inode_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) | 205 | if (!fsnotify_iter_should_report_type(iter_info, type)) |
| 208 | inode_mark->ignored_mask = 0; | 206 | continue; |
| 209 | if (vfsmount_mark && | 207 | mark = iter_info->marks[type]; |
| 210 | !(vfsmount_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) | 208 | if (mark && |
| 211 | vfsmount_mark->ignored_mask = 0; | 209 | !(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) |
| 212 | } | 210 | mark->ignored_mask = 0; |
| 213 | 211 | } | |
| 214 | /* does the inode mark tell us to do something? */ | ||
| 215 | if (inode_mark) { | ||
| 216 | group = inode_mark->group; | ||
| 217 | marks_mask |= inode_mark->mask; | ||
| 218 | marks_ignored_mask |= inode_mark->ignored_mask; | ||
| 219 | } | 212 | } |
| 220 | 213 | ||
| 221 | /* does the vfsmount_mark tell us to do something? */ | 214 | fsnotify_foreach_obj_type(type) { |
| 222 | if (vfsmount_mark) { | 215 | if (!fsnotify_iter_should_report_type(iter_info, type)) |
| 223 | group = vfsmount_mark->group; | 216 | continue; |
| 224 | marks_mask |= vfsmount_mark->mask; | 217 | mark = iter_info->marks[type]; |
| 225 | marks_ignored_mask |= vfsmount_mark->ignored_mask; | 218 | /* does the object mark tell us to do something? */ |
| 219 | if (mark) { | ||
| 220 | group = mark->group; | ||
| 221 | marks_mask |= mark->mask; | ||
| 222 | marks_ignored_mask |= mark->ignored_mask; | ||
| 223 | } | ||
| 226 | } | 224 | } |
| 227 | 225 | ||
| 228 | pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p" | 226 | pr_debug("%s: group=%p to_tell=%p mask=%x marks_mask=%x marks_ignored_mask=%x" |
| 229 | " vfsmount_mark=%p marks_mask=%x marks_ignored_mask=%x" | ||
| 230 | " data=%p data_is=%d cookie=%d\n", | 227 | " data=%p data_is=%d cookie=%d\n", |
| 231 | __func__, group, to_tell, mask, inode_mark, vfsmount_mark, | 228 | __func__, group, to_tell, mask, marks_mask, marks_ignored_mask, |
| 232 | marks_mask, marks_ignored_mask, data, | 229 | data, data_is, cookie); |
| 233 | data_is, cookie); | ||
| 234 | 230 | ||
| 235 | if (!(test_mask & marks_mask & ~marks_ignored_mask)) | 231 | if (!(test_mask & marks_mask & ~marks_ignored_mask)) |
| 236 | return 0; | 232 | return 0; |
| 237 | 233 | ||
| 238 | return group->ops->handle_event(group, to_tell, inode_mark, | 234 | return group->ops->handle_event(group, to_tell, mask, data, data_is, |
| 239 | vfsmount_mark, mask, data, data_is, | ||
| 240 | file_name, cookie, iter_info); | 235 | file_name, cookie, iter_info); |
| 241 | } | 236 | } |
| 242 | 237 | ||
| @@ -264,6 +259,57 @@ static struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark) | |||
| 264 | } | 259 | } |
| 265 | 260 | ||
| 266 | /* | 261 | /* |
| 262 | * iter_info is a multi head priority queue of marks. | ||
| 263 | * Pick a subset of marks from queue heads, all with the | ||
| 264 | * same group and set the report_mask for selected subset. | ||
| 265 | * Returns the report_mask of the selected subset. | ||
| 266 | */ | ||
| 267 | static unsigned int fsnotify_iter_select_report_types( | ||
| 268 | struct fsnotify_iter_info *iter_info) | ||
| 269 | { | ||
| 270 | struct fsnotify_group *max_prio_group = NULL; | ||
| 271 | struct fsnotify_mark *mark; | ||
| 272 | int type; | ||
| 273 | |||
| 274 | /* Choose max prio group among groups of all queue heads */ | ||
| 275 | fsnotify_foreach_obj_type(type) { | ||
| 276 | mark = iter_info->marks[type]; | ||
| 277 | if (mark && | ||
| 278 | fsnotify_compare_groups(max_prio_group, mark->group) > 0) | ||
| 279 | max_prio_group = mark->group; | ||
| 280 | } | ||
| 281 | |||
| 282 | if (!max_prio_group) | ||
| 283 | return 0; | ||
| 284 | |||
| 285 | /* Set the report mask for marks from same group as max prio group */ | ||
| 286 | iter_info->report_mask = 0; | ||
| 287 | fsnotify_foreach_obj_type(type) { | ||
| 288 | mark = iter_info->marks[type]; | ||
| 289 | if (mark && | ||
| 290 | fsnotify_compare_groups(max_prio_group, mark->group) == 0) | ||
| 291 | fsnotify_iter_set_report_type(iter_info, type); | ||
| 292 | } | ||
| 293 | |||
| 294 | return iter_info->report_mask; | ||
| 295 | } | ||
| 296 | |||
| 297 | /* | ||
| 298 | * Pop from iter_info multi head queue, the marks that were iterated in the | ||
| 299 | * current iteration step. | ||
| 300 | */ | ||
| 301 | static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info) | ||
| 302 | { | ||
| 303 | int type; | ||
| 304 | |||
| 305 | fsnotify_foreach_obj_type(type) { | ||
| 306 | if (fsnotify_iter_should_report_type(iter_info, type)) | ||
| 307 | iter_info->marks[type] = | ||
| 308 | fsnotify_next_mark(iter_info->marks[type]); | ||
| 309 | } | ||
| 310 | } | ||
| 311 | |||
| 312 | /* | ||
| 267 | * This is the main call to fsnotify. The VFS calls into hook specific functions | 313 | * This is the main call to fsnotify. The VFS calls into hook specific functions |
| 268 | * in linux/fsnotify.h. Those functions then in turn call here. Here will call | 314 | * in linux/fsnotify.h. Those functions then in turn call here. Here will call |
| 269 | * out to all of the registered fsnotify_group. Those groups can then use the | 315 | * out to all of the registered fsnotify_group. Those groups can then use the |
| @@ -307,15 +353,15 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, | |||
| 307 | 353 | ||
| 308 | if ((mask & FS_MODIFY) || | 354 | if ((mask & FS_MODIFY) || |
| 309 | (test_mask & to_tell->i_fsnotify_mask)) { | 355 | (test_mask & to_tell->i_fsnotify_mask)) { |
| 310 | iter_info.inode_mark = | 356 | iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = |
| 311 | fsnotify_first_mark(&to_tell->i_fsnotify_marks); | 357 | fsnotify_first_mark(&to_tell->i_fsnotify_marks); |
| 312 | } | 358 | } |
| 313 | 359 | ||
| 314 | if (mnt && ((mask & FS_MODIFY) || | 360 | if (mnt && ((mask & FS_MODIFY) || |
| 315 | (test_mask & mnt->mnt_fsnotify_mask))) { | 361 | (test_mask & mnt->mnt_fsnotify_mask))) { |
| 316 | iter_info.inode_mark = | 362 | iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = |
| 317 | fsnotify_first_mark(&to_tell->i_fsnotify_marks); | 363 | fsnotify_first_mark(&to_tell->i_fsnotify_marks); |
| 318 | iter_info.vfsmount_mark = | 364 | iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] = |
| 319 | fsnotify_first_mark(&mnt->mnt_fsnotify_marks); | 365 | fsnotify_first_mark(&mnt->mnt_fsnotify_marks); |
| 320 | } | 366 | } |
| 321 | 367 | ||
| @@ -324,32 +370,14 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, | |||
| 324 | * ignore masks are properly reflected for mount mark notifications. | 370 | * ignore masks are properly reflected for mount mark notifications. |
| 325 | * That's why this traversal is so complicated... | 371 | * That's why this traversal is so complicated... |
| 326 | */ | 372 | */ |
| 327 | while (iter_info.inode_mark || iter_info.vfsmount_mark) { | 373 | while (fsnotify_iter_select_report_types(&iter_info)) { |
| 328 | struct fsnotify_mark *inode_mark = iter_info.inode_mark; | 374 | ret = send_to_group(to_tell, mask, data, data_is, cookie, |
| 329 | struct fsnotify_mark *vfsmount_mark = iter_info.vfsmount_mark; | 375 | file_name, &iter_info); |
| 330 | |||
| 331 | if (inode_mark && vfsmount_mark) { | ||
| 332 | int cmp = fsnotify_compare_groups(inode_mark->group, | ||
| 333 | vfsmount_mark->group); | ||
| 334 | if (cmp > 0) | ||
| 335 | inode_mark = NULL; | ||
| 336 | else if (cmp < 0) | ||
| 337 | vfsmount_mark = NULL; | ||
| 338 | } | ||
| 339 | |||
| 340 | ret = send_to_group(to_tell, inode_mark, vfsmount_mark, mask, | ||
| 341 | data, data_is, cookie, file_name, | ||
| 342 | &iter_info); | ||
| 343 | 376 | ||
| 344 | if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS)) | 377 | if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS)) |
| 345 | goto out; | 378 | goto out; |
| 346 | 379 | ||
| 347 | if (inode_mark) | 380 | fsnotify_iter_next(&iter_info); |
| 348 | iter_info.inode_mark = | ||
| 349 | fsnotify_next_mark(iter_info.inode_mark); | ||
| 350 | if (vfsmount_mark) | ||
| 351 | iter_info.vfsmount_mark = | ||
| 352 | fsnotify_next_mark(iter_info.vfsmount_mark); | ||
| 353 | } | 381 | } |
| 354 | ret = 0; | 382 | ret = 0; |
| 355 | out: | 383 | out: |
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h index 60f365dc1408..34515d2c4ba3 100644 --- a/fs/notify/fsnotify.h +++ b/fs/notify/fsnotify.h | |||
| @@ -9,12 +9,6 @@ | |||
| 9 | 9 | ||
| 10 | #include "../mount.h" | 10 | #include "../mount.h" |
| 11 | 11 | ||
| 12 | struct fsnotify_iter_info { | ||
| 13 | struct fsnotify_mark *inode_mark; | ||
| 14 | struct fsnotify_mark *vfsmount_mark; | ||
| 15 | int srcu_idx; | ||
| 16 | }; | ||
| 17 | |||
| 18 | /* destroy all events sitting in this groups notification queue */ | 12 | /* destroy all events sitting in this groups notification queue */ |
| 19 | extern void fsnotify_flush_notify(struct fsnotify_group *group); | 13 | extern void fsnotify_flush_notify(struct fsnotify_group *group); |
| 20 | 14 | ||
diff --git a/fs/notify/group.c b/fs/notify/group.c index b7a4b6a69efa..aa5468f23e45 100644 --- a/fs/notify/group.c +++ b/fs/notify/group.c | |||
| @@ -67,7 +67,7 @@ void fsnotify_destroy_group(struct fsnotify_group *group) | |||
| 67 | fsnotify_group_stop_queueing(group); | 67 | fsnotify_group_stop_queueing(group); |
| 68 | 68 | ||
| 69 | /* Clear all marks for this group and queue them for destruction */ | 69 | /* Clear all marks for this group and queue them for destruction */ |
| 70 | fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_ALL_TYPES); | 70 | fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_ALL_TYPES_MASK); |
| 71 | 71 | ||
| 72 | /* | 72 | /* |
| 73 | * Some marks can still be pinned when waiting for response from | 73 | * Some marks can still be pinned when waiting for response from |
diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h index c00d2caca894..7e4578d35b61 100644 --- a/fs/notify/inotify/inotify.h +++ b/fs/notify/inotify/inotify.h | |||
| @@ -25,8 +25,6 @@ extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, | |||
| 25 | struct fsnotify_group *group); | 25 | struct fsnotify_group *group); |
| 26 | extern int inotify_handle_event(struct fsnotify_group *group, | 26 | extern int inotify_handle_event(struct fsnotify_group *group, |
| 27 | struct inode *inode, | 27 | struct inode *inode, |
| 28 | struct fsnotify_mark *inode_mark, | ||
| 29 | struct fsnotify_mark *vfsmount_mark, | ||
| 30 | u32 mask, const void *data, int data_type, | 28 | u32 mask, const void *data, int data_type, |
| 31 | const unsigned char *file_name, u32 cookie, | 29 | const unsigned char *file_name, u32 cookie, |
| 32 | struct fsnotify_iter_info *iter_info); | 30 | struct fsnotify_iter_info *iter_info); |
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index 40dedb37a1f3..9ab6dde38a14 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c | |||
| @@ -65,12 +65,11 @@ static int inotify_merge(struct list_head *list, | |||
| 65 | 65 | ||
| 66 | int inotify_handle_event(struct fsnotify_group *group, | 66 | int inotify_handle_event(struct fsnotify_group *group, |
| 67 | struct inode *inode, | 67 | struct inode *inode, |
| 68 | struct fsnotify_mark *inode_mark, | ||
| 69 | struct fsnotify_mark *vfsmount_mark, | ||
| 70 | u32 mask, const void *data, int data_type, | 68 | u32 mask, const void *data, int data_type, |
| 71 | const unsigned char *file_name, u32 cookie, | 69 | const unsigned char *file_name, u32 cookie, |
| 72 | struct fsnotify_iter_info *iter_info) | 70 | struct fsnotify_iter_info *iter_info) |
| 73 | { | 71 | { |
| 72 | struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); | ||
| 74 | struct inotify_inode_mark *i_mark; | 73 | struct inotify_inode_mark *i_mark; |
| 75 | struct inotify_event_info *event; | 74 | struct inotify_event_info *event; |
| 76 | struct fsnotify_event *fsn_event; | 75 | struct fsnotify_event *fsn_event; |
| @@ -78,7 +77,8 @@ int inotify_handle_event(struct fsnotify_group *group, | |||
| 78 | int len = 0; | 77 | int len = 0; |
| 79 | int alloc_len = sizeof(struct inotify_event_info); | 78 | int alloc_len = sizeof(struct inotify_event_info); |
| 80 | 79 | ||
| 81 | BUG_ON(vfsmount_mark); | 80 | if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info))) |
| 81 | return 0; | ||
| 82 | 82 | ||
| 83 | if ((inode_mark->mask & FS_EXCL_UNLINK) && | 83 | if ((inode_mark->mask & FS_EXCL_UNLINK) && |
| 84 | (data_type == FSNOTIFY_EVENT_PATH)) { | 84 | (data_type == FSNOTIFY_EVENT_PATH)) { |
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index ef32f3657958..1cf5b779d862 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c | |||
| @@ -485,10 +485,14 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, | |||
| 485 | struct fsnotify_group *group) | 485 | struct fsnotify_group *group) |
| 486 | { | 486 | { |
| 487 | struct inotify_inode_mark *i_mark; | 487 | struct inotify_inode_mark *i_mark; |
| 488 | struct fsnotify_iter_info iter_info = { }; | ||
| 489 | |||
| 490 | fsnotify_iter_set_report_type_mark(&iter_info, FSNOTIFY_OBJ_TYPE_INODE, | ||
| 491 | fsn_mark); | ||
| 488 | 492 | ||
| 489 | /* Queue ignore event for the watch */ | 493 | /* Queue ignore event for the watch */ |
| 490 | inotify_handle_event(group, NULL, fsn_mark, NULL, FS_IN_IGNORED, | 494 | inotify_handle_event(group, NULL, FS_IN_IGNORED, NULL, |
| 491 | NULL, FSNOTIFY_EVENT_NONE, NULL, 0, NULL); | 495 | FSNOTIFY_EVENT_NONE, NULL, 0, &iter_info); |
| 492 | 496 | ||
| 493 | i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); | 497 | i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); |
| 494 | /* remove this mark from the idr */ | 498 | /* remove this mark from the idr */ |
| @@ -578,7 +582,7 @@ static int inotify_new_watch(struct fsnotify_group *group, | |||
| 578 | } | 582 | } |
| 579 | 583 | ||
| 580 | /* we are on the idr, now get on the inode */ | 584 | /* we are on the idr, now get on the inode */ |
| 581 | ret = fsnotify_add_mark_locked(&tmp_i_mark->fsn_mark, inode, NULL, 0); | 585 | ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0); |
| 582 | if (ret) { | 586 | if (ret) { |
| 583 | /* we failed to get on the inode, get off the idr */ | 587 | /* we failed to get on the inode, get off the idr */ |
| 584 | inotify_remove_from_idr(group, tmp_i_mark); | 588 | inotify_remove_from_idr(group, tmp_i_mark); |
diff --git a/fs/notify/mark.c b/fs/notify/mark.c index e9191b416434..61f4c5fa34c7 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c | |||
| @@ -119,9 +119,9 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) | |||
| 119 | if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) | 119 | if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) |
| 120 | new_mask |= mark->mask; | 120 | new_mask |= mark->mask; |
| 121 | } | 121 | } |
| 122 | if (conn->flags & FSNOTIFY_OBJ_TYPE_INODE) | 122 | if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) |
| 123 | conn->inode->i_fsnotify_mask = new_mask; | 123 | conn->inode->i_fsnotify_mask = new_mask; |
| 124 | else if (conn->flags & FSNOTIFY_OBJ_TYPE_VFSMOUNT) | 124 | else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) |
| 125 | real_mount(conn->mnt)->mnt_fsnotify_mask = new_mask; | 125 | real_mount(conn->mnt)->mnt_fsnotify_mask = new_mask; |
| 126 | } | 126 | } |
| 127 | 127 | ||
| @@ -139,7 +139,7 @@ void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) | |||
| 139 | spin_lock(&conn->lock); | 139 | spin_lock(&conn->lock); |
| 140 | __fsnotify_recalc_mask(conn); | 140 | __fsnotify_recalc_mask(conn); |
| 141 | spin_unlock(&conn->lock); | 141 | spin_unlock(&conn->lock); |
| 142 | if (conn->flags & FSNOTIFY_OBJ_TYPE_INODE) | 142 | if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) |
| 143 | __fsnotify_update_child_dentry_flags(conn->inode); | 143 | __fsnotify_update_child_dentry_flags(conn->inode); |
| 144 | } | 144 | } |
| 145 | 145 | ||
| @@ -166,18 +166,18 @@ static struct inode *fsnotify_detach_connector_from_object( | |||
| 166 | { | 166 | { |
| 167 | struct inode *inode = NULL; | 167 | struct inode *inode = NULL; |
| 168 | 168 | ||
| 169 | if (conn->flags & FSNOTIFY_OBJ_TYPE_INODE) { | 169 | if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) { |
| 170 | inode = conn->inode; | 170 | inode = conn->inode; |
| 171 | rcu_assign_pointer(inode->i_fsnotify_marks, NULL); | 171 | rcu_assign_pointer(inode->i_fsnotify_marks, NULL); |
| 172 | inode->i_fsnotify_mask = 0; | 172 | inode->i_fsnotify_mask = 0; |
| 173 | conn->inode = NULL; | 173 | conn->inode = NULL; |
| 174 | conn->flags &= ~FSNOTIFY_OBJ_TYPE_INODE; | 174 | conn->type = FSNOTIFY_OBJ_TYPE_DETACHED; |
| 175 | } else if (conn->flags & FSNOTIFY_OBJ_TYPE_VFSMOUNT) { | 175 | } else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) { |
| 176 | rcu_assign_pointer(real_mount(conn->mnt)->mnt_fsnotify_marks, | 176 | rcu_assign_pointer(real_mount(conn->mnt)->mnt_fsnotify_marks, |
| 177 | NULL); | 177 | NULL); |
| 178 | real_mount(conn->mnt)->mnt_fsnotify_mask = 0; | 178 | real_mount(conn->mnt)->mnt_fsnotify_mask = 0; |
| 179 | conn->mnt = NULL; | 179 | conn->mnt = NULL; |
| 180 | conn->flags &= ~FSNOTIFY_OBJ_TYPE_VFSMOUNT; | 180 | conn->type = FSNOTIFY_OBJ_TYPE_DETACHED; |
| 181 | } | 181 | } |
| 182 | 182 | ||
| 183 | return inode; | 183 | return inode; |
| @@ -294,12 +294,12 @@ static void fsnotify_put_mark_wake(struct fsnotify_mark *mark) | |||
| 294 | 294 | ||
| 295 | bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info) | 295 | bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info) |
| 296 | { | 296 | { |
| 297 | /* This can fail if mark is being removed */ | 297 | int type; |
| 298 | if (!fsnotify_get_mark_safe(iter_info->inode_mark)) | 298 | |
| 299 | return false; | 299 | fsnotify_foreach_obj_type(type) { |
| 300 | if (!fsnotify_get_mark_safe(iter_info->vfsmount_mark)) { | 300 | /* This can fail if mark is being removed */ |
| 301 | fsnotify_put_mark_wake(iter_info->inode_mark); | 301 | if (!fsnotify_get_mark_safe(iter_info->marks[type])) |
| 302 | return false; | 302 | goto fail; |
| 303 | } | 303 | } |
| 304 | 304 | ||
| 305 | /* | 305 | /* |
| @@ -310,13 +310,20 @@ bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info) | |||
| 310 | srcu_read_unlock(&fsnotify_mark_srcu, iter_info->srcu_idx); | 310 | srcu_read_unlock(&fsnotify_mark_srcu, iter_info->srcu_idx); |
| 311 | 311 | ||
| 312 | return true; | 312 | return true; |
| 313 | |||
| 314 | fail: | ||
| 315 | for (type--; type >= 0; type--) | ||
| 316 | fsnotify_put_mark_wake(iter_info->marks[type]); | ||
| 317 | return false; | ||
| 313 | } | 318 | } |
| 314 | 319 | ||
| 315 | void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info) | 320 | void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info) |
| 316 | { | 321 | { |
| 322 | int type; | ||
| 323 | |||
| 317 | iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); | 324 | iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); |
| 318 | fsnotify_put_mark_wake(iter_info->inode_mark); | 325 | fsnotify_foreach_obj_type(type) |
| 319 | fsnotify_put_mark_wake(iter_info->vfsmount_mark); | 326 | fsnotify_put_mark_wake(iter_info->marks[type]); |
| 320 | } | 327 | } |
| 321 | 328 | ||
| 322 | /* | 329 | /* |
| @@ -442,10 +449,10 @@ static int fsnotify_attach_connector_to_object( | |||
| 442 | spin_lock_init(&conn->lock); | 449 | spin_lock_init(&conn->lock); |
| 443 | INIT_HLIST_HEAD(&conn->list); | 450 | INIT_HLIST_HEAD(&conn->list); |
| 444 | if (inode) { | 451 | if (inode) { |
| 445 | conn->flags = FSNOTIFY_OBJ_TYPE_INODE; | 452 | conn->type = FSNOTIFY_OBJ_TYPE_INODE; |
| 446 | conn->inode = igrab(inode); | 453 | conn->inode = igrab(inode); |
| 447 | } else { | 454 | } else { |
| 448 | conn->flags = FSNOTIFY_OBJ_TYPE_VFSMOUNT; | 455 | conn->type = FSNOTIFY_OBJ_TYPE_VFSMOUNT; |
| 449 | conn->mnt = mnt; | 456 | conn->mnt = mnt; |
| 450 | } | 457 | } |
| 451 | /* | 458 | /* |
| @@ -479,8 +486,7 @@ static struct fsnotify_mark_connector *fsnotify_grab_connector( | |||
| 479 | if (!conn) | 486 | if (!conn) |
| 480 | goto out; | 487 | goto out; |
| 481 | spin_lock(&conn->lock); | 488 | spin_lock(&conn->lock); |
| 482 | if (!(conn->flags & (FSNOTIFY_OBJ_TYPE_INODE | | 489 | if (conn->type == FSNOTIFY_OBJ_TYPE_DETACHED) { |
| 483 | FSNOTIFY_OBJ_TYPE_VFSMOUNT))) { | ||
| 484 | spin_unlock(&conn->lock); | 490 | spin_unlock(&conn->lock); |
| 485 | srcu_read_unlock(&fsnotify_mark_srcu, idx); | 491 | srcu_read_unlock(&fsnotify_mark_srcu, idx); |
| 486 | return NULL; | 492 | return NULL; |
| @@ -646,16 +652,16 @@ struct fsnotify_mark *fsnotify_find_mark( | |||
| 646 | return NULL; | 652 | return NULL; |
| 647 | } | 653 | } |
| 648 | 654 | ||
| 649 | /* Clear any marks in a group with given type */ | 655 | /* Clear any marks in a group with given type mask */ |
| 650 | void fsnotify_clear_marks_by_group(struct fsnotify_group *group, | 656 | void fsnotify_clear_marks_by_group(struct fsnotify_group *group, |
| 651 | unsigned int type) | 657 | unsigned int type_mask) |
| 652 | { | 658 | { |
| 653 | struct fsnotify_mark *lmark, *mark; | 659 | struct fsnotify_mark *lmark, *mark; |
| 654 | LIST_HEAD(to_free); | 660 | LIST_HEAD(to_free); |
| 655 | struct list_head *head = &to_free; | 661 | struct list_head *head = &to_free; |
| 656 | 662 | ||
| 657 | /* Skip selection step if we want to clear all marks. */ | 663 | /* Skip selection step if we want to clear all marks. */ |
| 658 | if (type == FSNOTIFY_OBJ_ALL_TYPES) { | 664 | if (type_mask == FSNOTIFY_OBJ_ALL_TYPES_MASK) { |
| 659 | head = &group->marks_list; | 665 | head = &group->marks_list; |
| 660 | goto clear; | 666 | goto clear; |
| 661 | } | 667 | } |
| @@ -670,7 +676,7 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group, | |||
| 670 | */ | 676 | */ |
| 671 | mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); | 677 | mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); |
| 672 | list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { | 678 | list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { |
| 673 | if (mark->connector->flags & type) | 679 | if ((1U << mark->connector->type) & type_mask) |
| 674 | list_move(&mark->g_list, &to_free); | 680 | list_move(&mark->g_list, &to_free); |
| 675 | } | 681 | } |
| 676 | mutex_unlock(&group->mark_mutex); | 682 | mutex_unlock(&group->mark_mutex); |
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c index 74b37cbbd5d4..33ee8cb32f83 100644 --- a/fs/orangefs/devorangefs-req.c +++ b/fs/orangefs/devorangefs-req.c | |||
| @@ -719,37 +719,6 @@ struct ORANGEFS_dev_map_desc32 { | |||
| 719 | __s32 count; | 719 | __s32 count; |
| 720 | }; | 720 | }; |
| 721 | 721 | ||
| 722 | static unsigned long translate_dev_map26(unsigned long args, long *error) | ||
| 723 | { | ||
| 724 | struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args; | ||
| 725 | /* | ||
| 726 | * Depending on the architecture, allocate some space on the | ||
| 727 | * user-call-stack based on our expected layout. | ||
| 728 | */ | ||
| 729 | struct ORANGEFS_dev_map_desc __user *p = | ||
| 730 | compat_alloc_user_space(sizeof(*p)); | ||
| 731 | compat_uptr_t addr; | ||
| 732 | |||
| 733 | *error = 0; | ||
| 734 | /* get the ptr from the 32 bit user-space */ | ||
| 735 | if (get_user(addr, &p32->ptr)) | ||
| 736 | goto err; | ||
| 737 | /* try to put that into a 64-bit layout */ | ||
| 738 | if (put_user(compat_ptr(addr), &p->ptr)) | ||
| 739 | goto err; | ||
| 740 | /* copy the remaining fields */ | ||
| 741 | if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32))) | ||
| 742 | goto err; | ||
| 743 | if (copy_in_user(&p->size, &p32->size, sizeof(__s32))) | ||
| 744 | goto err; | ||
| 745 | if (copy_in_user(&p->count, &p32->count, sizeof(__s32))) | ||
| 746 | goto err; | ||
| 747 | return (unsigned long)p; | ||
| 748 | err: | ||
| 749 | *error = -EFAULT; | ||
| 750 | return 0; | ||
| 751 | } | ||
| 752 | |||
| 753 | /* | 722 | /* |
| 754 | * 32 bit user-space apps' ioctl handlers when kernel modules | 723 | * 32 bit user-space apps' ioctl handlers when kernel modules |
| 755 | * is compiled as a 64 bit one | 724 | * is compiled as a 64 bit one |
| @@ -758,25 +727,26 @@ static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd, | |||
| 758 | unsigned long args) | 727 | unsigned long args) |
| 759 | { | 728 | { |
| 760 | long ret; | 729 | long ret; |
| 761 | unsigned long arg = args; | ||
| 762 | 730 | ||
| 763 | /* Check for properly constructed commands */ | 731 | /* Check for properly constructed commands */ |
| 764 | ret = check_ioctl_command(cmd); | 732 | ret = check_ioctl_command(cmd); |
| 765 | if (ret < 0) | 733 | if (ret < 0) |
| 766 | return ret; | 734 | return ret; |
| 767 | if (cmd == ORANGEFS_DEV_MAP) { | 735 | if (cmd == ORANGEFS_DEV_MAP) { |
| 768 | /* | 736 | struct ORANGEFS_dev_map_desc desc; |
| 769 | * convert the arguments to what we expect internally | 737 | struct ORANGEFS_dev_map_desc32 d32; |
| 770 | * in kernel space | 738 | |
| 771 | */ | 739 | if (copy_from_user(&d32, (void __user *)args, sizeof(d32))) |
| 772 | arg = translate_dev_map26(args, &ret); | 740 | return -EFAULT; |
| 773 | if (ret < 0) { | 741 | |
| 774 | gossip_err("Could not translate dev map\n"); | 742 | desc.ptr = compat_ptr(d32.ptr); |
| 775 | return ret; | 743 | desc.total_size = d32.total_size; |
| 776 | } | 744 | desc.size = d32.size; |
| 745 | desc.count = d32.count; | ||
| 746 | return orangefs_bufmap_initialize(&desc); | ||
| 777 | } | 747 | } |
| 778 | /* no other ioctl requires translation */ | 748 | /* no other ioctl requires translation */ |
| 779 | return dispatch_ioctl_command(cmd, arg); | 749 | return dispatch_ioctl_command(cmd, args); |
| 780 | } | 750 | } |
| 781 | 751 | ||
| 782 | #endif /* CONFIG_COMPAT is in .config */ | 752 | #endif /* CONFIG_COMPAT is in .config */ |
diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 7b4d9714f248..6ac1c92997ea 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c | |||
| @@ -409,7 +409,7 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, | |||
| 409 | if (!ent) | 409 | if (!ent) |
| 410 | goto out; | 410 | goto out; |
| 411 | 411 | ||
| 412 | if (qstr.len + 1 <= sizeof(ent->inline_name)) { | 412 | if (qstr.len + 1 <= SIZEOF_PDE_INLINE_NAME) { |
| 413 | ent->name = ent->inline_name; | 413 | ent->name = ent->inline_name; |
| 414 | } else { | 414 | } else { |
| 415 | ent->name = kmalloc(qstr.len + 1, GFP_KERNEL); | 415 | ent->name = kmalloc(qstr.len + 1, GFP_KERNEL); |
| @@ -740,3 +740,27 @@ void *PDE_DATA(const struct inode *inode) | |||
| 740 | return __PDE_DATA(inode); | 740 | return __PDE_DATA(inode); |
| 741 | } | 741 | } |
| 742 | EXPORT_SYMBOL(PDE_DATA); | 742 | EXPORT_SYMBOL(PDE_DATA); |
| 743 | |||
| 744 | /* | ||
| 745 | * Pull a user buffer into memory and pass it to the file's write handler if | ||
| 746 | * one is supplied. The ->write() method is permitted to modify the | ||
| 747 | * kernel-side buffer. | ||
| 748 | */ | ||
| 749 | ssize_t proc_simple_write(struct file *f, const char __user *ubuf, size_t size, | ||
| 750 | loff_t *_pos) | ||
| 751 | { | ||
| 752 | struct proc_dir_entry *pde = PDE(file_inode(f)); | ||
| 753 | char *buf; | ||
| 754 | int ret; | ||
| 755 | |||
| 756 | if (!pde->write) | ||
| 757 | return -EACCES; | ||
| 758 | if (size == 0 || size > PAGE_SIZE - 1) | ||
| 759 | return -EINVAL; | ||
| 760 | buf = memdup_user_nul(ubuf, size); | ||
| 761 | if (IS_ERR(buf)) | ||
| 762 | return PTR_ERR(buf); | ||
| 763 | ret = pde->write(f, buf, size); | ||
| 764 | kfree(buf); | ||
| 765 | return ret == 0 ? size : ret; | ||
| 766 | } | ||
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 2cf3b74391ca..85ffbd27f288 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
| @@ -105,9 +105,8 @@ void __init proc_init_kmemcache(void) | |||
| 105 | kmem_cache_create("pde_opener", sizeof(struct pde_opener), 0, | 105 | kmem_cache_create("pde_opener", sizeof(struct pde_opener), 0, |
| 106 | SLAB_ACCOUNT|SLAB_PANIC, NULL); | 106 | SLAB_ACCOUNT|SLAB_PANIC, NULL); |
| 107 | proc_dir_entry_cache = kmem_cache_create_usercopy( | 107 | proc_dir_entry_cache = kmem_cache_create_usercopy( |
| 108 | "proc_dir_entry", sizeof(struct proc_dir_entry), 0, SLAB_PANIC, | 108 | "proc_dir_entry", SIZEOF_PDE_SLOT, 0, SLAB_PANIC, |
| 109 | offsetof(struct proc_dir_entry, inline_name), | 109 | OFFSETOF_PDE_NAME, SIZEOF_PDE_INLINE_NAME, NULL); |
| 110 | sizeof_field(struct proc_dir_entry, inline_name), NULL); | ||
| 111 | } | 110 | } |
| 112 | 111 | ||
| 113 | static int proc_show_options(struct seq_file *seq, struct dentry *root) | 112 | static int proc_show_options(struct seq_file *seq, struct dentry *root) |
diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 50cb22a08c2f..da3dbfa09e79 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h | |||
| @@ -48,6 +48,7 @@ struct proc_dir_entry { | |||
| 48 | const struct seq_operations *seq_ops; | 48 | const struct seq_operations *seq_ops; |
| 49 | int (*single_show)(struct seq_file *, void *); | 49 | int (*single_show)(struct seq_file *, void *); |
| 50 | }; | 50 | }; |
| 51 | proc_write_t write; | ||
| 51 | void *data; | 52 | void *data; |
| 52 | unsigned int state_size; | 53 | unsigned int state_size; |
| 53 | unsigned int low_ino; | 54 | unsigned int low_ino; |
| @@ -61,14 +62,20 @@ struct proc_dir_entry { | |||
| 61 | char *name; | 62 | char *name; |
| 62 | umode_t mode; | 63 | umode_t mode; |
| 63 | u8 namelen; | 64 | u8 namelen; |
| 64 | #ifdef CONFIG_64BIT | 65 | char inline_name[]; |
| 65 | #define SIZEOF_PDE_INLINE_NAME (192-155) | ||
| 66 | #else | ||
| 67 | #define SIZEOF_PDE_INLINE_NAME (128-95) | ||
| 68 | #endif | ||
| 69 | char inline_name[SIZEOF_PDE_INLINE_NAME]; | ||
| 70 | } __randomize_layout; | 66 | } __randomize_layout; |
| 71 | 67 | ||
| 68 | #define OFFSETOF_PDE_NAME offsetof(struct proc_dir_entry, inline_name) | ||
| 69 | #define SIZEOF_PDE_SLOT \ | ||
| 70 | (OFFSETOF_PDE_NAME + 34 <= 64 ? 64 : \ | ||
| 71 | OFFSETOF_PDE_NAME + 34 <= 128 ? 128 : \ | ||
| 72 | OFFSETOF_PDE_NAME + 34 <= 192 ? 192 : \ | ||
| 73 | OFFSETOF_PDE_NAME + 34 <= 256 ? 256 : \ | ||
| 74 | OFFSETOF_PDE_NAME + 34 <= 512 ? 512 : \ | ||
| 75 | 0) | ||
| 76 | |||
| 77 | #define SIZEOF_PDE_INLINE_NAME (SIZEOF_PDE_SLOT - OFFSETOF_PDE_NAME) | ||
| 78 | |||
| 72 | extern struct kmem_cache *proc_dir_entry_cache; | 79 | extern struct kmem_cache *proc_dir_entry_cache; |
| 73 | void pde_free(struct proc_dir_entry *pde); | 80 | void pde_free(struct proc_dir_entry *pde); |
| 74 | 81 | ||
| @@ -189,6 +196,7 @@ static inline bool is_empty_pde(const struct proc_dir_entry *pde) | |||
| 189 | { | 196 | { |
| 190 | return S_ISDIR(pde->mode) && !pde->proc_iops; | 197 | return S_ISDIR(pde->mode) && !pde->proc_iops; |
| 191 | } | 198 | } |
| 199 | extern ssize_t proc_simple_write(struct file *, const char __user *, size_t, loff_t *); | ||
| 192 | 200 | ||
| 193 | /* | 201 | /* |
| 194 | * inode.c | 202 | * inode.c |
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index 7d94fa005b0d..d5e0fcb3439e 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c | |||
| @@ -46,6 +46,9 @@ static int seq_open_net(struct inode *inode, struct file *file) | |||
| 46 | 46 | ||
| 47 | WARN_ON_ONCE(state_size < sizeof(*p)); | 47 | WARN_ON_ONCE(state_size < sizeof(*p)); |
| 48 | 48 | ||
| 49 | if (file->f_mode & FMODE_WRITE && !PDE(inode)->write) | ||
| 50 | return -EACCES; | ||
| 51 | |||
| 49 | net = get_proc_net(inode); | 52 | net = get_proc_net(inode); |
| 50 | if (!net) | 53 | if (!net) |
| 51 | return -ENXIO; | 54 | return -ENXIO; |
| @@ -73,6 +76,7 @@ static int seq_release_net(struct inode *ino, struct file *f) | |||
| 73 | static const struct file_operations proc_net_seq_fops = { | 76 | static const struct file_operations proc_net_seq_fops = { |
| 74 | .open = seq_open_net, | 77 | .open = seq_open_net, |
| 75 | .read = seq_read, | 78 | .read = seq_read, |
| 79 | .write = proc_simple_write, | ||
| 76 | .llseek = seq_lseek, | 80 | .llseek = seq_lseek, |
| 77 | .release = seq_release_net, | 81 | .release = seq_release_net, |
| 78 | }; | 82 | }; |
| @@ -93,6 +97,50 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode, | |||
| 93 | } | 97 | } |
| 94 | EXPORT_SYMBOL_GPL(proc_create_net_data); | 98 | EXPORT_SYMBOL_GPL(proc_create_net_data); |
| 95 | 99 | ||
| 100 | /** | ||
| 101 | * proc_create_net_data_write - Create a writable net_ns-specific proc file | ||
| 102 | * @name: The name of the file. | ||
| 103 | * @mode: The file's access mode. | ||
| 104 | * @parent: The parent directory in which to create. | ||
| 105 | * @ops: The seq_file ops with which to read the file. | ||
| 106 | * @write: The write method which which to 'modify' the file. | ||
| 107 | * @data: Data for retrieval by PDE_DATA(). | ||
| 108 | * | ||
| 109 | * Create a network namespaced proc file in the @parent directory with the | ||
| 110 | * specified @name and @mode that allows reading of a file that displays a | ||
| 111 | * series of elements and also provides for the file accepting writes that have | ||
| 112 | * some arbitrary effect. | ||
| 113 | * | ||
| 114 | * The functions in the @ops table are used to iterate over items to be | ||
| 115 | * presented and extract the readable content using the seq_file interface. | ||
| 116 | * | ||
| 117 | * The @write function is called with the data copied into a kernel space | ||
| 118 | * scratch buffer and has a NUL appended for convenience. The buffer may be | ||
| 119 | * modified by the @write function. @write should return 0 on success. | ||
| 120 | * | ||
| 121 | * The @data value is accessible from the @show and @write functions by calling | ||
| 122 | * PDE_DATA() on the file inode. The network namespace must be accessed by | ||
| 123 | * calling seq_file_net() on the seq_file struct. | ||
| 124 | */ | ||
| 125 | struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode, | ||
| 126 | struct proc_dir_entry *parent, | ||
| 127 | const struct seq_operations *ops, | ||
| 128 | proc_write_t write, | ||
| 129 | unsigned int state_size, void *data) | ||
| 130 | { | ||
| 131 | struct proc_dir_entry *p; | ||
| 132 | |||
| 133 | p = proc_create_reg(name, mode, &parent, data); | ||
| 134 | if (!p) | ||
| 135 | return NULL; | ||
| 136 | p->proc_fops = &proc_net_seq_fops; | ||
| 137 | p->seq_ops = ops; | ||
| 138 | p->state_size = state_size; | ||
| 139 | p->write = write; | ||
| 140 | return proc_register(parent, p); | ||
| 141 | } | ||
| 142 | EXPORT_SYMBOL_GPL(proc_create_net_data_write); | ||
| 143 | |||
| 96 | static int single_open_net(struct inode *inode, struct file *file) | 144 | static int single_open_net(struct inode *inode, struct file *file) |
| 97 | { | 145 | { |
| 98 | struct proc_dir_entry *de = PDE(inode); | 146 | struct proc_dir_entry *de = PDE(inode); |
| @@ -119,6 +167,7 @@ static int single_release_net(struct inode *ino, struct file *f) | |||
| 119 | static const struct file_operations proc_net_single_fops = { | 167 | static const struct file_operations proc_net_single_fops = { |
| 120 | .open = single_open_net, | 168 | .open = single_open_net, |
| 121 | .read = seq_read, | 169 | .read = seq_read, |
| 170 | .write = proc_simple_write, | ||
| 122 | .llseek = seq_lseek, | 171 | .llseek = seq_lseek, |
| 123 | .release = single_release_net, | 172 | .release = single_release_net, |
| 124 | }; | 173 | }; |
| @@ -138,6 +187,49 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode, | |||
| 138 | } | 187 | } |
| 139 | EXPORT_SYMBOL_GPL(proc_create_net_single); | 188 | EXPORT_SYMBOL_GPL(proc_create_net_single); |
| 140 | 189 | ||
| 190 | /** | ||
| 191 | * proc_create_net_single_write - Create a writable net_ns-specific proc file | ||
| 192 | * @name: The name of the file. | ||
| 193 | * @mode: The file's access mode. | ||
| 194 | * @parent: The parent directory in which to create. | ||
| 195 | * @show: The seqfile show method with which to read the file. | ||
| 196 | * @write: The write method which which to 'modify' the file. | ||
| 197 | * @data: Data for retrieval by PDE_DATA(). | ||
| 198 | * | ||
| 199 | * Create a network-namespaced proc file in the @parent directory with the | ||
| 200 | * specified @name and @mode that allows reading of a file that displays a | ||
| 201 | * single element rather than a series and also provides for the file accepting | ||
| 202 | * writes that have some arbitrary effect. | ||
| 203 | * | ||
| 204 | * The @show function is called to extract the readable content via the | ||
| 205 | * seq_file interface. | ||
| 206 | * | ||
| 207 | * The @write function is called with the data copied into a kernel space | ||
| 208 | * scratch buffer and has a NUL appended for convenience. The buffer may be | ||
| 209 | * modified by the @write function. @write should return 0 on success. | ||
| 210 | * | ||
| 211 | * The @data value is accessible from the @show and @write functions by calling | ||
| 212 | * PDE_DATA() on the file inode. The network namespace must be accessed by | ||
| 213 | * calling seq_file_single_net() on the seq_file struct. | ||
| 214 | */ | ||
| 215 | struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mode, | ||
| 216 | struct proc_dir_entry *parent, | ||
| 217 | int (*show)(struct seq_file *, void *), | ||
| 218 | proc_write_t write, | ||
| 219 | void *data) | ||
| 220 | { | ||
| 221 | struct proc_dir_entry *p; | ||
| 222 | |||
| 223 | p = proc_create_reg(name, mode, &parent, data); | ||
| 224 | if (!p) | ||
| 225 | return NULL; | ||
| 226 | p->proc_fops = &proc_net_single_fops; | ||
| 227 | p->single_show = show; | ||
| 228 | p->write = write; | ||
| 229 | return proc_register(parent, p); | ||
| 230 | } | ||
| 231 | EXPORT_SYMBOL_GPL(proc_create_net_single_write); | ||
| 232 | |||
| 141 | static struct net *get_proc_task_net(struct inode *dir) | 233 | static struct net *get_proc_task_net(struct inode *dir) |
| 142 | { | 234 | { |
| 143 | struct task_struct *task; | 235 | struct task_struct *task; |
diff --git a/fs/proc/root.c b/fs/proc/root.c index 61b7340b357a..f4b1a9d2eca6 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c | |||
| @@ -204,8 +204,7 @@ struct proc_dir_entry proc_root = { | |||
| 204 | .proc_fops = &proc_root_operations, | 204 | .proc_fops = &proc_root_operations, |
| 205 | .parent = &proc_root, | 205 | .parent = &proc_root, |
| 206 | .subdir = RB_ROOT, | 206 | .subdir = RB_ROOT, |
| 207 | .name = proc_root.inline_name, | 207 | .name = "/proc", |
| 208 | .inline_name = "/proc", | ||
| 209 | }; | 208 | }; |
| 210 | 209 | ||
| 211 | int pid_ns_prepare_proc(struct pid_namespace *ns) | 210 | int pid_ns_prepare_proc(struct pid_namespace *ns) |
diff --git a/fs/signalfd.c b/fs/signalfd.c index cbb42f77a2bd..4fcd1498acf5 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c | |||
| @@ -259,10 +259,8 @@ static const struct file_operations signalfd_fops = { | |||
| 259 | .llseek = noop_llseek, | 259 | .llseek = noop_llseek, |
| 260 | }; | 260 | }; |
| 261 | 261 | ||
| 262 | static int do_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, | 262 | static int do_signalfd4(int ufd, sigset_t *mask, int flags) |
| 263 | int flags) | ||
| 264 | { | 263 | { |
| 265 | sigset_t sigmask; | ||
| 266 | struct signalfd_ctx *ctx; | 264 | struct signalfd_ctx *ctx; |
| 267 | 265 | ||
| 268 | /* Check the SFD_* constants for consistency. */ | 266 | /* Check the SFD_* constants for consistency. */ |
| @@ -272,18 +270,15 @@ static int do_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, | |||
| 272 | if (flags & ~(SFD_CLOEXEC | SFD_NONBLOCK)) | 270 | if (flags & ~(SFD_CLOEXEC | SFD_NONBLOCK)) |
| 273 | return -EINVAL; | 271 | return -EINVAL; |
| 274 | 272 | ||
| 275 | if (sizemask != sizeof(sigset_t) || | 273 | sigdelsetmask(mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
| 276 | copy_from_user(&sigmask, user_mask, sizeof(sigmask))) | 274 | signotset(mask); |
| 277 | return -EINVAL; | ||
| 278 | sigdelsetmask(&sigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | ||
| 279 | signotset(&sigmask); | ||
| 280 | 275 | ||
| 281 | if (ufd == -1) { | 276 | if (ufd == -1) { |
| 282 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | 277 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
| 283 | if (!ctx) | 278 | if (!ctx) |
| 284 | return -ENOMEM; | 279 | return -ENOMEM; |
| 285 | 280 | ||
| 286 | ctx->sigmask = sigmask; | 281 | ctx->sigmask = *mask; |
| 287 | 282 | ||
| 288 | /* | 283 | /* |
| 289 | * When we call this, the initialization must be complete, since | 284 | * When we call this, the initialization must be complete, since |
| @@ -303,7 +298,7 @@ static int do_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, | |||
| 303 | return -EINVAL; | 298 | return -EINVAL; |
| 304 | } | 299 | } |
| 305 | spin_lock_irq(¤t->sighand->siglock); | 300 | spin_lock_irq(¤t->sighand->siglock); |
| 306 | ctx->sigmask = sigmask; | 301 | ctx->sigmask = *mask; |
| 307 | spin_unlock_irq(¤t->sighand->siglock); | 302 | spin_unlock_irq(¤t->sighand->siglock); |
| 308 | 303 | ||
| 309 | wake_up(¤t->sighand->signalfd_wqh); | 304 | wake_up(¤t->sighand->signalfd_wqh); |
| @@ -316,46 +311,51 @@ static int do_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, | |||
| 316 | SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask, | 311 | SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask, |
| 317 | size_t, sizemask, int, flags) | 312 | size_t, sizemask, int, flags) |
| 318 | { | 313 | { |
| 319 | return do_signalfd4(ufd, user_mask, sizemask, flags); | 314 | sigset_t mask; |
| 315 | |||
| 316 | if (sizemask != sizeof(sigset_t) || | ||
| 317 | copy_from_user(&mask, user_mask, sizeof(mask))) | ||
| 318 | return -EINVAL; | ||
| 319 | return do_signalfd4(ufd, &mask, flags); | ||
| 320 | } | 320 | } |
| 321 | 321 | ||
| 322 | SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask, | 322 | SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask, |
| 323 | size_t, sizemask) | 323 | size_t, sizemask) |
| 324 | { | 324 | { |
| 325 | return do_signalfd4(ufd, user_mask, sizemask, 0); | 325 | sigset_t mask; |
| 326 | |||
| 327 | if (sizemask != sizeof(sigset_t) || | ||
| 328 | copy_from_user(&mask, user_mask, sizeof(mask))) | ||
| 329 | return -EINVAL; | ||
| 330 | return do_signalfd4(ufd, &mask, 0); | ||
| 326 | } | 331 | } |
| 327 | 332 | ||
| 328 | #ifdef CONFIG_COMPAT | 333 | #ifdef CONFIG_COMPAT |
| 329 | static long do_compat_signalfd4(int ufd, | 334 | static long do_compat_signalfd4(int ufd, |
| 330 | const compat_sigset_t __user *sigmask, | 335 | const compat_sigset_t __user *user_mask, |
| 331 | compat_size_t sigsetsize, int flags) | 336 | compat_size_t sigsetsize, int flags) |
| 332 | { | 337 | { |
| 333 | sigset_t tmp; | 338 | sigset_t mask; |
| 334 | sigset_t __user *ksigmask; | ||
| 335 | 339 | ||
| 336 | if (sigsetsize != sizeof(compat_sigset_t)) | 340 | if (sigsetsize != sizeof(compat_sigset_t)) |
| 337 | return -EINVAL; | 341 | return -EINVAL; |
| 338 | if (get_compat_sigset(&tmp, sigmask)) | 342 | if (get_compat_sigset(&mask, user_mask)) |
| 339 | return -EFAULT; | ||
| 340 | ksigmask = compat_alloc_user_space(sizeof(sigset_t)); | ||
| 341 | if (copy_to_user(ksigmask, &tmp, sizeof(sigset_t))) | ||
| 342 | return -EFAULT; | 343 | return -EFAULT; |
| 343 | 344 | return do_signalfd4(ufd, &mask, flags); | |
| 344 | return do_signalfd4(ufd, ksigmask, sizeof(sigset_t), flags); | ||
| 345 | } | 345 | } |
| 346 | 346 | ||
| 347 | COMPAT_SYSCALL_DEFINE4(signalfd4, int, ufd, | 347 | COMPAT_SYSCALL_DEFINE4(signalfd4, int, ufd, |
| 348 | const compat_sigset_t __user *, sigmask, | 348 | const compat_sigset_t __user *, user_mask, |
| 349 | compat_size_t, sigsetsize, | 349 | compat_size_t, sigsetsize, |
| 350 | int, flags) | 350 | int, flags) |
| 351 | { | 351 | { |
| 352 | return do_compat_signalfd4(ufd, sigmask, sigsetsize, flags); | 352 | return do_compat_signalfd4(ufd, user_mask, sigsetsize, flags); |
| 353 | } | 353 | } |
| 354 | 354 | ||
| 355 | COMPAT_SYSCALL_DEFINE3(signalfd, int, ufd, | 355 | COMPAT_SYSCALL_DEFINE3(signalfd, int, ufd, |
| 356 | const compat_sigset_t __user *,sigmask, | 356 | const compat_sigset_t __user *, user_mask, |
| 357 | compat_size_t, sigsetsize) | 357 | compat_size_t, sigsetsize) |
| 358 | { | 358 | { |
| 359 | return do_compat_signalfd4(ufd, sigmask, sigsetsize, 0); | 359 | return do_compat_signalfd4(ufd, user_mask, sigsetsize, 0); |
| 360 | } | 360 | } |
| 361 | #endif | 361 | #endif |
diff --git a/fs/splice.c b/fs/splice.c index 2365ab073a27..b3daa971f597 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
| @@ -1243,38 +1243,26 @@ static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, | |||
| 1243 | * For lack of a better implementation, implement vmsplice() to userspace | 1243 | * For lack of a better implementation, implement vmsplice() to userspace |
| 1244 | * as a simple copy of the pipes pages to the user iov. | 1244 | * as a simple copy of the pipes pages to the user iov. |
| 1245 | */ | 1245 | */ |
| 1246 | static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov, | 1246 | static long vmsplice_to_user(struct file *file, struct iov_iter *iter, |
| 1247 | unsigned long nr_segs, unsigned int flags) | 1247 | unsigned int flags) |
| 1248 | { | 1248 | { |
| 1249 | struct pipe_inode_info *pipe; | 1249 | struct pipe_inode_info *pipe = get_pipe_info(file); |
| 1250 | struct splice_desc sd; | 1250 | struct splice_desc sd = { |
| 1251 | long ret; | 1251 | .total_len = iov_iter_count(iter), |
| 1252 | struct iovec iovstack[UIO_FASTIOV]; | 1252 | .flags = flags, |
| 1253 | struct iovec *iov = iovstack; | 1253 | .u.data = iter |
| 1254 | struct iov_iter iter; | 1254 | }; |
| 1255 | long ret = 0; | ||
| 1255 | 1256 | ||
| 1256 | pipe = get_pipe_info(file); | ||
| 1257 | if (!pipe) | 1257 | if (!pipe) |
| 1258 | return -EBADF; | 1258 | return -EBADF; |
| 1259 | 1259 | ||
| 1260 | ret = import_iovec(READ, uiov, nr_segs, | ||
| 1261 | ARRAY_SIZE(iovstack), &iov, &iter); | ||
| 1262 | if (ret < 0) | ||
| 1263 | return ret; | ||
| 1264 | |||
| 1265 | sd.total_len = iov_iter_count(&iter); | ||
| 1266 | sd.len = 0; | ||
| 1267 | sd.flags = flags; | ||
| 1268 | sd.u.data = &iter; | ||
| 1269 | sd.pos = 0; | ||
| 1270 | |||
| 1271 | if (sd.total_len) { | 1260 | if (sd.total_len) { |
| 1272 | pipe_lock(pipe); | 1261 | pipe_lock(pipe); |
| 1273 | ret = __splice_from_pipe(pipe, &sd, pipe_to_user); | 1262 | ret = __splice_from_pipe(pipe, &sd, pipe_to_user); |
| 1274 | pipe_unlock(pipe); | 1263 | pipe_unlock(pipe); |
| 1275 | } | 1264 | } |
| 1276 | 1265 | ||
| 1277 | kfree(iov); | ||
| 1278 | return ret; | 1266 | return ret; |
| 1279 | } | 1267 | } |
| 1280 | 1268 | ||
| @@ -1283,14 +1271,11 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov, | |||
| 1283 | * as splice-from-memory, where the regular splice is splice-from-file (or | 1271 | * as splice-from-memory, where the regular splice is splice-from-file (or |
| 1284 | * to file). In both cases the output is a pipe, naturally. | 1272 | * to file). In both cases the output is a pipe, naturally. |
| 1285 | */ | 1273 | */ |
| 1286 | static long vmsplice_to_pipe(struct file *file, const struct iovec __user *uiov, | 1274 | static long vmsplice_to_pipe(struct file *file, struct iov_iter *iter, |
| 1287 | unsigned long nr_segs, unsigned int flags) | 1275 | unsigned int flags) |
| 1288 | { | 1276 | { |
| 1289 | struct pipe_inode_info *pipe; | 1277 | struct pipe_inode_info *pipe; |
| 1290 | struct iovec iovstack[UIO_FASTIOV]; | 1278 | long ret = 0; |
| 1291 | struct iovec *iov = iovstack; | ||
| 1292 | struct iov_iter from; | ||
| 1293 | long ret; | ||
| 1294 | unsigned buf_flag = 0; | 1279 | unsigned buf_flag = 0; |
| 1295 | 1280 | ||
| 1296 | if (flags & SPLICE_F_GIFT) | 1281 | if (flags & SPLICE_F_GIFT) |
| @@ -1300,22 +1285,31 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *uiov, | |||
| 1300 | if (!pipe) | 1285 | if (!pipe) |
| 1301 | return -EBADF; | 1286 | return -EBADF; |
| 1302 | 1287 | ||
| 1303 | ret = import_iovec(WRITE, uiov, nr_segs, | ||
| 1304 | ARRAY_SIZE(iovstack), &iov, &from); | ||
| 1305 | if (ret < 0) | ||
| 1306 | return ret; | ||
| 1307 | |||
| 1308 | pipe_lock(pipe); | 1288 | pipe_lock(pipe); |
| 1309 | ret = wait_for_space(pipe, flags); | 1289 | ret = wait_for_space(pipe, flags); |
| 1310 | if (!ret) | 1290 | if (!ret) |
| 1311 | ret = iter_to_pipe(&from, pipe, buf_flag); | 1291 | ret = iter_to_pipe(iter, pipe, buf_flag); |
| 1312 | pipe_unlock(pipe); | 1292 | pipe_unlock(pipe); |
| 1313 | if (ret > 0) | 1293 | if (ret > 0) |
| 1314 | wakeup_pipe_readers(pipe); | 1294 | wakeup_pipe_readers(pipe); |
| 1315 | kfree(iov); | ||
| 1316 | return ret; | 1295 | return ret; |
| 1317 | } | 1296 | } |
| 1318 | 1297 | ||
| 1298 | static int vmsplice_type(struct fd f, int *type) | ||
| 1299 | { | ||
| 1300 | if (!f.file) | ||
| 1301 | return -EBADF; | ||
| 1302 | if (f.file->f_mode & FMODE_WRITE) { | ||
| 1303 | *type = WRITE; | ||
| 1304 | } else if (f.file->f_mode & FMODE_READ) { | ||
| 1305 | *type = READ; | ||
| 1306 | } else { | ||
| 1307 | fdput(f); | ||
| 1308 | return -EBADF; | ||
| 1309 | } | ||
| 1310 | return 0; | ||
| 1311 | } | ||
| 1312 | |||
| 1319 | /* | 1313 | /* |
| 1320 | * Note that vmsplice only really supports true splicing _from_ user memory | 1314 | * Note that vmsplice only really supports true splicing _from_ user memory |
| 1321 | * to a pipe, not the other way around. Splicing from user memory is a simple | 1315 | * to a pipe, not the other way around. Splicing from user memory is a simple |
| @@ -1332,57 +1326,69 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *uiov, | |||
| 1332 | * Currently we punt and implement it as a normal copy, see pipe_to_user(). | 1326 | * Currently we punt and implement it as a normal copy, see pipe_to_user(). |
| 1333 | * | 1327 | * |
| 1334 | */ | 1328 | */ |
| 1335 | static long do_vmsplice(int fd, const struct iovec __user *iov, | 1329 | static long do_vmsplice(struct file *f, struct iov_iter *iter, unsigned int flags) |
| 1336 | unsigned long nr_segs, unsigned int flags) | ||
| 1337 | { | 1330 | { |
| 1338 | struct fd f; | ||
| 1339 | long error; | ||
| 1340 | |||
| 1341 | if (unlikely(flags & ~SPLICE_F_ALL)) | 1331 | if (unlikely(flags & ~SPLICE_F_ALL)) |
| 1342 | return -EINVAL; | 1332 | return -EINVAL; |
| 1343 | if (unlikely(nr_segs > UIO_MAXIOV)) | ||
| 1344 | return -EINVAL; | ||
| 1345 | else if (unlikely(!nr_segs)) | ||
| 1346 | return 0; | ||
| 1347 | 1333 | ||
| 1348 | error = -EBADF; | 1334 | if (!iov_iter_count(iter)) |
| 1349 | f = fdget(fd); | 1335 | return 0; |
| 1350 | if (f.file) { | ||
| 1351 | if (f.file->f_mode & FMODE_WRITE) | ||
| 1352 | error = vmsplice_to_pipe(f.file, iov, nr_segs, flags); | ||
| 1353 | else if (f.file->f_mode & FMODE_READ) | ||
| 1354 | error = vmsplice_to_user(f.file, iov, nr_segs, flags); | ||
| 1355 | |||
| 1356 | fdput(f); | ||
| 1357 | } | ||
| 1358 | 1336 | ||
| 1359 | return error; | 1337 | if (iov_iter_rw(iter) == WRITE) |
| 1338 | return vmsplice_to_pipe(f, iter, flags); | ||
| 1339 | else | ||
| 1340 | return vmsplice_to_user(f, iter, flags); | ||
| 1360 | } | 1341 | } |
| 1361 | 1342 | ||
| 1362 | SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov, | 1343 | SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, uiov, |
| 1363 | unsigned long, nr_segs, unsigned int, flags) | 1344 | unsigned long, nr_segs, unsigned int, flags) |
| 1364 | { | 1345 | { |
| 1365 | return do_vmsplice(fd, iov, nr_segs, flags); | 1346 | struct iovec iovstack[UIO_FASTIOV]; |
| 1347 | struct iovec *iov = iovstack; | ||
| 1348 | struct iov_iter iter; | ||
| 1349 | long error; | ||
| 1350 | struct fd f; | ||
| 1351 | int type; | ||
| 1352 | |||
| 1353 | f = fdget(fd); | ||
| 1354 | error = vmsplice_type(f, &type); | ||
| 1355 | if (error) | ||
| 1356 | return error; | ||
| 1357 | |||
| 1358 | error = import_iovec(type, uiov, nr_segs, | ||
| 1359 | ARRAY_SIZE(iovstack), &iov, &iter); | ||
| 1360 | if (!error) { | ||
| 1361 | error = do_vmsplice(f.file, &iter, flags); | ||
| 1362 | kfree(iov); | ||
| 1363 | } | ||
| 1364 | fdput(f); | ||
| 1365 | return error; | ||
| 1366 | } | 1366 | } |
| 1367 | 1367 | ||
| 1368 | #ifdef CONFIG_COMPAT | 1368 | #ifdef CONFIG_COMPAT |
| 1369 | COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32, | 1369 | COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32, |
| 1370 | unsigned int, nr_segs, unsigned int, flags) | 1370 | unsigned int, nr_segs, unsigned int, flags) |
| 1371 | { | 1371 | { |
| 1372 | unsigned i; | 1372 | struct iovec iovstack[UIO_FASTIOV]; |
| 1373 | struct iovec __user *iov; | 1373 | struct iovec *iov = iovstack; |
| 1374 | if (nr_segs > UIO_MAXIOV) | 1374 | struct iov_iter iter; |
| 1375 | return -EINVAL; | 1375 | long error; |
| 1376 | iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec)); | 1376 | struct fd f; |
| 1377 | for (i = 0; i < nr_segs; i++) { | 1377 | int type; |
| 1378 | struct compat_iovec v; | 1378 | |
| 1379 | if (get_user(v.iov_base, &iov32[i].iov_base) || | 1379 | f = fdget(fd); |
| 1380 | get_user(v.iov_len, &iov32[i].iov_len) || | 1380 | error = vmsplice_type(f, &type); |
| 1381 | put_user(compat_ptr(v.iov_base), &iov[i].iov_base) || | 1381 | if (error) |
| 1382 | put_user(v.iov_len, &iov[i].iov_len)) | 1382 | return error; |
| 1383 | return -EFAULT; | 1383 | |
| 1384 | error = compat_import_iovec(type, iov32, nr_segs, | ||
| 1385 | ARRAY_SIZE(iovstack), &iov, &iter); | ||
| 1386 | if (!error) { | ||
| 1387 | error = do_vmsplice(f.file, &iter, flags); | ||
| 1388 | kfree(iov); | ||
| 1384 | } | 1389 | } |
| 1385 | return do_vmsplice(fd, iov, nr_segs, flags); | 1390 | fdput(f); |
| 1391 | return error; | ||
| 1386 | } | 1392 | } |
| 1387 | #endif | 1393 | #endif |
| 1388 | 1394 | ||
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index e64c0294f50b..b38964a7a521 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h | |||
| @@ -98,8 +98,6 @@ struct fsnotify_iter_info; | |||
| 98 | struct fsnotify_ops { | 98 | struct fsnotify_ops { |
| 99 | int (*handle_event)(struct fsnotify_group *group, | 99 | int (*handle_event)(struct fsnotify_group *group, |
| 100 | struct inode *inode, | 100 | struct inode *inode, |
| 101 | struct fsnotify_mark *inode_mark, | ||
| 102 | struct fsnotify_mark *vfsmount_mark, | ||
| 103 | u32 mask, const void *data, int data_type, | 101 | u32 mask, const void *data, int data_type, |
| 104 | const unsigned char *file_name, u32 cookie, | 102 | const unsigned char *file_name, u32 cookie, |
| 105 | struct fsnotify_iter_info *iter_info); | 103 | struct fsnotify_iter_info *iter_info); |
| @@ -201,6 +199,57 @@ struct fsnotify_group { | |||
| 201 | #define FSNOTIFY_EVENT_PATH 1 | 199 | #define FSNOTIFY_EVENT_PATH 1 |
| 202 | #define FSNOTIFY_EVENT_INODE 2 | 200 | #define FSNOTIFY_EVENT_INODE 2 |
| 203 | 201 | ||
| 202 | enum fsnotify_obj_type { | ||
| 203 | FSNOTIFY_OBJ_TYPE_INODE, | ||
| 204 | FSNOTIFY_OBJ_TYPE_VFSMOUNT, | ||
| 205 | FSNOTIFY_OBJ_TYPE_COUNT, | ||
| 206 | FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT | ||
| 207 | }; | ||
| 208 | |||
| 209 | #define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE) | ||
| 210 | #define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT) | ||
| 211 | #define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1) | ||
| 212 | |||
| 213 | struct fsnotify_iter_info { | ||
| 214 | struct fsnotify_mark *marks[FSNOTIFY_OBJ_TYPE_COUNT]; | ||
| 215 | unsigned int report_mask; | ||
| 216 | int srcu_idx; | ||
| 217 | }; | ||
| 218 | |||
| 219 | static inline bool fsnotify_iter_should_report_type( | ||
| 220 | struct fsnotify_iter_info *iter_info, int type) | ||
| 221 | { | ||
| 222 | return (iter_info->report_mask & (1U << type)); | ||
| 223 | } | ||
| 224 | |||
| 225 | static inline void fsnotify_iter_set_report_type( | ||
| 226 | struct fsnotify_iter_info *iter_info, int type) | ||
| 227 | { | ||
| 228 | iter_info->report_mask |= (1U << type); | ||
| 229 | } | ||
| 230 | |||
| 231 | static inline void fsnotify_iter_set_report_type_mark( | ||
| 232 | struct fsnotify_iter_info *iter_info, int type, | ||
| 233 | struct fsnotify_mark *mark) | ||
| 234 | { | ||
| 235 | iter_info->marks[type] = mark; | ||
| 236 | iter_info->report_mask |= (1U << type); | ||
| 237 | } | ||
| 238 | |||
| 239 | #define FSNOTIFY_ITER_FUNCS(name, NAME) \ | ||
| 240 | static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \ | ||
| 241 | struct fsnotify_iter_info *iter_info) \ | ||
| 242 | { \ | ||
| 243 | return (iter_info->report_mask & FSNOTIFY_OBJ_TYPE_##NAME##_FL) ? \ | ||
| 244 | iter_info->marks[FSNOTIFY_OBJ_TYPE_##NAME] : NULL; \ | ||
| 245 | } | ||
| 246 | |||
| 247 | FSNOTIFY_ITER_FUNCS(inode, INODE) | ||
| 248 | FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT) | ||
| 249 | |||
| 250 | #define fsnotify_foreach_obj_type(type) \ | ||
| 251 | for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++) | ||
| 252 | |||
| 204 | /* | 253 | /* |
| 205 | * Inode / vfsmount point to this structure which tracks all marks attached to | 254 | * Inode / vfsmount point to this structure which tracks all marks attached to |
| 206 | * the inode / vfsmount. The reference to inode / vfsmount is held by this | 255 | * the inode / vfsmount. The reference to inode / vfsmount is held by this |
| @@ -209,11 +258,7 @@ struct fsnotify_group { | |||
| 209 | */ | 258 | */ |
| 210 | struct fsnotify_mark_connector { | 259 | struct fsnotify_mark_connector { |
| 211 | spinlock_t lock; | 260 | spinlock_t lock; |
| 212 | #define FSNOTIFY_OBJ_TYPE_INODE 0x01 | 261 | unsigned int type; /* Type of object [lock] */ |
| 213 | #define FSNOTIFY_OBJ_TYPE_VFSMOUNT 0x02 | ||
| 214 | #define FSNOTIFY_OBJ_ALL_TYPES (FSNOTIFY_OBJ_TYPE_INODE | \ | ||
| 215 | FSNOTIFY_OBJ_TYPE_VFSMOUNT) | ||
| 216 | unsigned int flags; /* Type of object [lock] */ | ||
| 217 | union { /* Object pointer [lock] */ | 262 | union { /* Object pointer [lock] */ |
| 218 | struct inode *inode; | 263 | struct inode *inode; |
| 219 | struct vfsmount *mnt; | 264 | struct vfsmount *mnt; |
| @@ -356,7 +401,21 @@ extern struct fsnotify_mark *fsnotify_find_mark( | |||
| 356 | extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct inode *inode, | 401 | extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct inode *inode, |
| 357 | struct vfsmount *mnt, int allow_dups); | 402 | struct vfsmount *mnt, int allow_dups); |
| 358 | extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, | 403 | extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, |
| 359 | struct inode *inode, struct vfsmount *mnt, int allow_dups); | 404 | struct inode *inode, struct vfsmount *mnt, |
| 405 | int allow_dups); | ||
| 406 | /* attach the mark to the inode */ | ||
| 407 | static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark, | ||
| 408 | struct inode *inode, | ||
| 409 | int allow_dups) | ||
| 410 | { | ||
| 411 | return fsnotify_add_mark(mark, inode, NULL, allow_dups); | ||
| 412 | } | ||
| 413 | static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark, | ||
| 414 | struct inode *inode, | ||
| 415 | int allow_dups) | ||
| 416 | { | ||
| 417 | return fsnotify_add_mark_locked(mark, inode, NULL, allow_dups); | ||
| 418 | } | ||
| 360 | /* given a group and a mark, flag mark to be freed when all references are dropped */ | 419 | /* given a group and a mark, flag mark to be freed when all references are dropped */ |
| 361 | extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, | 420 | extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, |
| 362 | struct fsnotify_group *group); | 421 | struct fsnotify_group *group); |
| @@ -369,12 +428,12 @@ extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned | |||
| 369 | /* run all the marks in a group, and clear all of the vfsmount marks */ | 428 | /* run all the marks in a group, and clear all of the vfsmount marks */ |
| 370 | static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) | 429 | static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) |
| 371 | { | 430 | { |
| 372 | fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT); | 431 | fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL); |
| 373 | } | 432 | } |
| 374 | /* run all the marks in a group, and clear all of the inode marks */ | 433 | /* run all the marks in a group, and clear all of the inode marks */ |
| 375 | static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group) | 434 | static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group) |
| 376 | { | 435 | { |
| 377 | fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE); | 436 | fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL); |
| 378 | } | 437 | } |
| 379 | extern void fsnotify_get_mark(struct fsnotify_mark *mark); | 438 | extern void fsnotify_get_mark(struct fsnotify_mark *mark); |
| 380 | extern void fsnotify_put_mark(struct fsnotify_mark *mark); | 439 | extern void fsnotify_put_mark(struct fsnotify_mark *mark); |
diff --git a/include/linux/namei.h b/include/linux/namei.h index a982bb7cd480..a78606e8e3df 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h | |||
| @@ -81,6 +81,7 @@ extern void done_path_create(struct path *, struct dentry *); | |||
| 81 | extern struct dentry *kern_path_locked(const char *, struct path *); | 81 | extern struct dentry *kern_path_locked(const char *, struct path *); |
| 82 | extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int); | 82 | extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int); |
| 83 | 83 | ||
| 84 | extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int); | ||
| 84 | extern struct dentry *lookup_one_len(const char *, struct dentry *, int); | 85 | extern struct dentry *lookup_one_len(const char *, struct dentry *, int); |
| 85 | extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); | 86 | extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); |
| 86 | 87 | ||
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 04551af2ff23..dd2052f0efb7 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h | |||
| @@ -345,7 +345,7 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) | |||
| 345 | 345 | ||
| 346 | rcu_read_lock(); | 346 | rcu_read_lock(); |
| 347 | nat_hook = rcu_dereference(nf_nat_hook); | 347 | nat_hook = rcu_dereference(nf_nat_hook); |
| 348 | if (nat_hook->decode_session) | 348 | if (nat_hook && nat_hook->decode_session) |
| 349 | nat_hook->decode_session(skb, fl); | 349 | nat_hook->decode_session(skb, fl); |
| 350 | rcu_read_unlock(); | 350 | rcu_read_unlock(); |
| 351 | #endif | 351 | #endif |
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h index bfb3531fd88a..8ce271e187b6 100644 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ b/include/linux/netfilter/ipset/ip_set_timeout.h | |||
| @@ -23,6 +23,9 @@ | |||
| 23 | /* Set is defined with timeout support: timeout value may be 0 */ | 23 | /* Set is defined with timeout support: timeout value may be 0 */ |
| 24 | #define IPSET_NO_TIMEOUT UINT_MAX | 24 | #define IPSET_NO_TIMEOUT UINT_MAX |
| 25 | 25 | ||
| 26 | /* Max timeout value, see msecs_to_jiffies() in jiffies.h */ | ||
| 27 | #define IPSET_MAX_TIMEOUT (UINT_MAX >> 1)/MSEC_PER_SEC | ||
| 28 | |||
| 26 | #define ip_set_adt_opt_timeout(opt, set) \ | 29 | #define ip_set_adt_opt_timeout(opt, set) \ |
| 27 | ((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout) | 30 | ((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout) |
| 28 | 31 | ||
| @@ -32,11 +35,10 @@ ip_set_timeout_uget(struct nlattr *tb) | |||
| 32 | unsigned int timeout = ip_set_get_h32(tb); | 35 | unsigned int timeout = ip_set_get_h32(tb); |
| 33 | 36 | ||
| 34 | /* Normalize to fit into jiffies */ | 37 | /* Normalize to fit into jiffies */ |
| 35 | if (timeout > UINT_MAX/MSEC_PER_SEC) | 38 | if (timeout > IPSET_MAX_TIMEOUT) |
| 36 | timeout = UINT_MAX/MSEC_PER_SEC; | 39 | timeout = IPSET_MAX_TIMEOUT; |
| 37 | 40 | ||
| 38 | /* Userspace supplied TIMEOUT parameter: adjust crazy size */ | 41 | return timeout; |
| 39 | return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout; | ||
| 40 | } | 42 | } |
| 41 | 43 | ||
| 42 | static inline bool | 44 | static inline bool |
| @@ -65,8 +67,14 @@ ip_set_timeout_set(unsigned long *timeout, u32 value) | |||
| 65 | static inline u32 | 67 | static inline u32 |
| 66 | ip_set_timeout_get(const unsigned long *timeout) | 68 | ip_set_timeout_get(const unsigned long *timeout) |
| 67 | { | 69 | { |
| 68 | return *timeout == IPSET_ELEM_PERMANENT ? 0 : | 70 | u32 t; |
| 69 | jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; | 71 | |
| 72 | if (*timeout == IPSET_ELEM_PERMANENT) | ||
| 73 | return 0; | ||
| 74 | |||
| 75 | t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; | ||
| 76 | /* Zero value in userspace means no timeout */ | ||
| 77 | return t == 0 ? 1 : t; | ||
| 70 | } | 78 | } |
| 71 | 79 | ||
| 72 | #endif /* __KERNEL__ */ | 80 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h index 7c686d335c12..ee495d707f17 100644 --- a/include/linux/platform_data/shmob_drm.h +++ b/include/linux/platform_data/shmob_drm.h | |||
| @@ -18,9 +18,6 @@ | |||
| 18 | 18 | ||
| 19 | #include <drm/drm_mode.h> | 19 | #include <drm/drm_mode.h> |
| 20 | 20 | ||
| 21 | struct sh_mobile_meram_cfg; | ||
| 22 | struct sh_mobile_meram_info; | ||
| 23 | |||
| 24 | enum shmob_drm_clk_source { | 21 | enum shmob_drm_clk_source { |
| 25 | SHMOB_DRM_CLK_BUS, | 22 | SHMOB_DRM_CLK_BUS, |
| 26 | SHMOB_DRM_CLK_PERIPHERAL, | 23 | SHMOB_DRM_CLK_PERIPHERAL, |
| @@ -93,7 +90,6 @@ struct shmob_drm_platform_data { | |||
| 93 | struct shmob_drm_interface_data iface; | 90 | struct shmob_drm_interface_data iface; |
| 94 | struct shmob_drm_panel_data panel; | 91 | struct shmob_drm_panel_data panel; |
| 95 | struct shmob_drm_backlight_data backlight; | 92 | struct shmob_drm_backlight_data backlight; |
| 96 | const struct sh_mobile_meram_cfg *meram; | ||
| 97 | }; | 93 | }; |
| 98 | 94 | ||
| 99 | #endif /* __SHMOB_DRM_H__ */ | 95 | #endif /* __SHMOB_DRM_H__ */ |
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index e518352137e7..626fc65c4336 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
| @@ -14,6 +14,8 @@ struct seq_operations; | |||
| 14 | 14 | ||
| 15 | #ifdef CONFIG_PROC_FS | 15 | #ifdef CONFIG_PROC_FS |
| 16 | 16 | ||
| 17 | typedef int (*proc_write_t)(struct file *, char *, size_t); | ||
| 18 | |||
| 17 | extern void proc_root_init(void); | 19 | extern void proc_root_init(void); |
| 18 | extern void proc_flush_task(struct task_struct *); | 20 | extern void proc_flush_task(struct task_struct *); |
| 19 | 21 | ||
| @@ -61,6 +63,16 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode, | |||
| 61 | struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode, | 63 | struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode, |
| 62 | struct proc_dir_entry *parent, | 64 | struct proc_dir_entry *parent, |
| 63 | int (*show)(struct seq_file *, void *), void *data); | 65 | int (*show)(struct seq_file *, void *), void *data); |
| 66 | struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode, | ||
| 67 | struct proc_dir_entry *parent, | ||
| 68 | const struct seq_operations *ops, | ||
| 69 | proc_write_t write, | ||
| 70 | unsigned int state_size, void *data); | ||
| 71 | struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mode, | ||
| 72 | struct proc_dir_entry *parent, | ||
| 73 | int (*show)(struct seq_file *, void *), | ||
| 74 | proc_write_t write, | ||
| 75 | void *data); | ||
| 64 | 76 | ||
| 65 | #else /* CONFIG_PROC_FS */ | 77 | #else /* CONFIG_PROC_FS */ |
| 66 | 78 | ||
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index bbf32524ab27..fab02133a919 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h | |||
| @@ -35,7 +35,7 @@ static inline void virtio_rmb(bool weak_barriers) | |||
| 35 | if (weak_barriers) | 35 | if (weak_barriers) |
| 36 | virt_rmb(); | 36 | virt_rmb(); |
| 37 | else | 37 | else |
| 38 | rmb(); | 38 | dma_rmb(); |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | static inline void virtio_wmb(bool weak_barriers) | 41 | static inline void virtio_wmb(bool weak_barriers) |
| @@ -43,7 +43,7 @@ static inline void virtio_wmb(bool weak_barriers) | |||
| 43 | if (weak_barriers) | 43 | if (weak_barriers) |
| 44 | virt_wmb(); | 44 | virt_wmb(); |
| 45 | else | 45 | else |
| 46 | wmb(); | 46 | dma_wmb(); |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | static inline void virtio_store_mb(bool weak_barriers, | 49 | static inline void virtio_store_mb(bool weak_barriers, |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 6d6e21dee462..a0bec23c6d5e 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
| @@ -631,6 +631,7 @@ struct ip_vs_service { | |||
| 631 | 631 | ||
| 632 | /* alternate persistence engine */ | 632 | /* alternate persistence engine */ |
| 633 | struct ip_vs_pe __rcu *pe; | 633 | struct ip_vs_pe __rcu *pe; |
| 634 | int conntrack_afmask; | ||
| 634 | 635 | ||
| 635 | struct rcu_head rcu_head; | 636 | struct rcu_head rcu_head; |
| 636 | }; | 637 | }; |
| @@ -1611,6 +1612,35 @@ static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, | |||
| 1611 | return false; | 1612 | return false; |
| 1612 | } | 1613 | } |
| 1613 | 1614 | ||
| 1615 | static inline int ip_vs_register_conntrack(struct ip_vs_service *svc) | ||
| 1616 | { | ||
| 1617 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) | ||
| 1618 | int afmask = (svc->af == AF_INET6) ? 2 : 1; | ||
| 1619 | int ret = 0; | ||
| 1620 | |||
| 1621 | if (!(svc->conntrack_afmask & afmask)) { | ||
| 1622 | ret = nf_ct_netns_get(svc->ipvs->net, svc->af); | ||
| 1623 | if (ret >= 0) | ||
| 1624 | svc->conntrack_afmask |= afmask; | ||
| 1625 | } | ||
| 1626 | return ret; | ||
| 1627 | #else | ||
| 1628 | return 0; | ||
| 1629 | #endif | ||
| 1630 | } | ||
| 1631 | |||
| 1632 | static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc) | ||
| 1633 | { | ||
| 1634 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) | ||
| 1635 | int afmask = (svc->af == AF_INET6) ? 2 : 1; | ||
| 1636 | |||
| 1637 | if (svc->conntrack_afmask & afmask) { | ||
| 1638 | nf_ct_netns_put(svc->ipvs->net, svc->af); | ||
| 1639 | svc->conntrack_afmask &= ~afmask; | ||
| 1640 | } | ||
| 1641 | #endif | ||
| 1642 | } | ||
| 1643 | |||
| 1614 | static inline int | 1644 | static inline int |
| 1615 | ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) | 1645 | ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) |
| 1616 | { | 1646 | { |
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h index 1910b6572430..3a188a0923a3 100644 --- a/include/net/netfilter/nf_conntrack_count.h +++ b/include/net/netfilter/nf_conntrack_count.h | |||
| @@ -20,7 +20,8 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, | |||
| 20 | bool *addit); | 20 | bool *addit); |
| 21 | 21 | ||
| 22 | bool nf_conncount_add(struct hlist_head *head, | 22 | bool nf_conncount_add(struct hlist_head *head, |
| 23 | const struct nf_conntrack_tuple *tuple); | 23 | const struct nf_conntrack_tuple *tuple, |
| 24 | const struct nf_conntrack_zone *zone); | ||
| 24 | 25 | ||
| 25 | void nf_conncount_cache_free(struct hlist_head *hhead); | 26 | void nf_conncount_cache_free(struct hlist_head *hhead); |
| 26 | 27 | ||
diff --git a/include/net/netfilter/nft_dup.h b/include/net/netfilter/nft_dup.h deleted file mode 100644 index 4d9d512984b2..000000000000 --- a/include/net/netfilter/nft_dup.h +++ /dev/null | |||
| @@ -1,10 +0,0 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef _NFT_DUP_H_ | ||
| 3 | #define _NFT_DUP_H_ | ||
| 4 | |||
| 5 | struct nft_dup_inet { | ||
| 6 | enum nft_registers sreg_addr:8; | ||
| 7 | enum nft_registers sreg_dev:8; | ||
| 8 | }; | ||
| 9 | |||
| 10 | #endif /* _NFT_DUP_H_ */ | ||
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index ebf809eed33a..dbe1b911a24d 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
| @@ -1133,6 +1133,11 @@ struct sctp_input_cb { | |||
| 1133 | }; | 1133 | }; |
| 1134 | #define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0])) | 1134 | #define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0])) |
| 1135 | 1135 | ||
| 1136 | struct sctp_output_cb { | ||
| 1137 | struct sk_buff *last; | ||
| 1138 | }; | ||
| 1139 | #define SCTP_OUTPUT_CB(__skb) ((struct sctp_output_cb *)&((__skb)->cb[0])) | ||
| 1140 | |||
| 1136 | static inline const struct sk_buff *sctp_gso_headskb(const struct sk_buff *skb) | 1141 | static inline const struct sk_buff *sctp_gso_headskb(const struct sk_buff *skb) |
| 1137 | { | 1142 | { |
| 1138 | const struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; | 1143 | const struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; |
diff --git a/include/net/tls.h b/include/net/tls.h index 70c273777fe9..7f84ea3e217c 100644 --- a/include/net/tls.h +++ b/include/net/tls.h | |||
| @@ -109,8 +109,7 @@ struct tls_sw_context_rx { | |||
| 109 | 109 | ||
| 110 | struct strparser strp; | 110 | struct strparser strp; |
| 111 | void (*saved_data_ready)(struct sock *sk); | 111 | void (*saved_data_ready)(struct sock *sk); |
| 112 | unsigned int (*sk_poll)(struct file *file, struct socket *sock, | 112 | __poll_t (*sk_poll_mask)(struct socket *sock, __poll_t events); |
| 113 | struct poll_table_struct *wait); | ||
| 114 | struct sk_buff *recv_pkt; | 113 | struct sk_buff *recv_pkt; |
| 115 | u8 control; | 114 | u8 control; |
| 116 | bool decrypted; | 115 | bool decrypted; |
| @@ -225,8 +224,7 @@ void tls_sw_free_resources_tx(struct sock *sk); | |||
| 225 | void tls_sw_free_resources_rx(struct sock *sk); | 224 | void tls_sw_free_resources_rx(struct sock *sk); |
| 226 | int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | 225 | int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
| 227 | int nonblock, int flags, int *addr_len); | 226 | int nonblock, int flags, int *addr_len); |
| 228 | unsigned int tls_sw_poll(struct file *file, struct socket *sock, | 227 | __poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events); |
| 229 | struct poll_table_struct *wait); | ||
| 230 | ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, | 228 | ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, |
| 231 | struct pipe_inode_info *pipe, | 229 | struct pipe_inode_info *pipe, |
| 232 | size_t len, unsigned int flags); | 230 | size_t len, unsigned int flags); |
diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h index 75846164290e..d00221345c19 100644 --- a/include/uapi/linux/aio_abi.h +++ b/include/uapi/linux/aio_abi.h | |||
| @@ -109,7 +109,7 @@ struct iocb { | |||
| 109 | #undef IFLITTLE | 109 | #undef IFLITTLE |
| 110 | 110 | ||
| 111 | struct __aio_sigset { | 111 | struct __aio_sigset { |
| 112 | sigset_t __user *sigmask; | 112 | const sigset_t __user *sigmask; |
| 113 | size_t sigsetsize; | 113 | size_t sigsetsize; |
| 114 | }; | 114 | }; |
| 115 | 115 | ||
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h index c712eb6879f1..336014bf8868 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h | |||
| @@ -112,7 +112,7 @@ enum ip_conntrack_status { | |||
| 112 | IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING | | 112 | IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING | |
| 113 | IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD), | 113 | IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD), |
| 114 | 114 | ||
| 115 | __IPS_MAX_BIT = 14, | 115 | __IPS_MAX_BIT = 15, |
| 116 | }; | 116 | }; |
| 117 | 117 | ||
| 118 | /* Connection tracking event types */ | 118 | /* Connection tracking event types */ |
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index c9bf74b94f37..89438e68dc03 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h | |||
| @@ -266,7 +266,7 @@ enum nft_rule_compat_attributes { | |||
| 266 | * @NFT_SET_INTERVAL: set contains intervals | 266 | * @NFT_SET_INTERVAL: set contains intervals |
| 267 | * @NFT_SET_MAP: set is used as a dictionary | 267 | * @NFT_SET_MAP: set is used as a dictionary |
| 268 | * @NFT_SET_TIMEOUT: set uses timeouts | 268 | * @NFT_SET_TIMEOUT: set uses timeouts |
| 269 | * @NFT_SET_EVAL: set contains expressions for evaluation | 269 | * @NFT_SET_EVAL: set can be updated from the evaluation path |
| 270 | * @NFT_SET_OBJECT: set contains stateful objects | 270 | * @NFT_SET_OBJECT: set contains stateful objects |
| 271 | */ | 271 | */ |
| 272 | enum nft_set_flags { | 272 | enum nft_set_flags { |
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 28b36545de24..27e4e441caac 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h | |||
| @@ -981,18 +981,18 @@ | |||
| 981 | * only the %NL80211_ATTR_IE data is used and updated with this command. | 981 | * only the %NL80211_ATTR_IE data is used and updated with this command. |
| 982 | * | 982 | * |
| 983 | * @NL80211_CMD_SET_PMK: For offloaded 4-Way handshake, set the PMK or PMK-R0 | 983 | * @NL80211_CMD_SET_PMK: For offloaded 4-Way handshake, set the PMK or PMK-R0 |
| 984 | * for the given authenticator address (specified with &NL80211_ATTR_MAC). | 984 | * for the given authenticator address (specified with %NL80211_ATTR_MAC). |
| 985 | * When &NL80211_ATTR_PMKR0_NAME is set, &NL80211_ATTR_PMK specifies the | 985 | * When %NL80211_ATTR_PMKR0_NAME is set, %NL80211_ATTR_PMK specifies the |
| 986 | * PMK-R0, otherwise it specifies the PMK. | 986 | * PMK-R0, otherwise it specifies the PMK. |
| 987 | * @NL80211_CMD_DEL_PMK: For offloaded 4-Way handshake, delete the previously | 987 | * @NL80211_CMD_DEL_PMK: For offloaded 4-Way handshake, delete the previously |
| 988 | * configured PMK for the authenticator address identified by | 988 | * configured PMK for the authenticator address identified by |
| 989 | * &NL80211_ATTR_MAC. | 989 | * %NL80211_ATTR_MAC. |
| 990 | * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates that the 4 way | 990 | * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates that the 4 way |
| 991 | * handshake was completed successfully by the driver. The BSSID is | 991 | * handshake was completed successfully by the driver. The BSSID is |
| 992 | * specified with &NL80211_ATTR_MAC. Drivers that support 4 way handshake | 992 | * specified with %NL80211_ATTR_MAC. Drivers that support 4 way handshake |
| 993 | * offload should send this event after indicating 802.11 association with | 993 | * offload should send this event after indicating 802.11 association with |
| 994 | * &NL80211_CMD_CONNECT or &NL80211_CMD_ROAM. If the 4 way handshake failed | 994 | * %NL80211_CMD_CONNECT or %NL80211_CMD_ROAM. If the 4 way handshake failed |
| 995 | * &NL80211_CMD_DISCONNECT should be indicated instead. | 995 | * %NL80211_CMD_DISCONNECT should be indicated instead. |
| 996 | * | 996 | * |
| 997 | * @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request | 997 | * @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request |
| 998 | * and RX notification. This command is used both as a request to transmit | 998 | * and RX notification. This command is used both as a request to transmit |
| @@ -1029,9 +1029,9 @@ | |||
| 1029 | * initiated the connection through the connect request. | 1029 | * initiated the connection through the connect request. |
| 1030 | * | 1030 | * |
| 1031 | * @NL80211_CMD_STA_OPMODE_CHANGED: An event that notify station's | 1031 | * @NL80211_CMD_STA_OPMODE_CHANGED: An event that notify station's |
| 1032 | * ht opmode or vht opmode changes using any of &NL80211_ATTR_SMPS_MODE, | 1032 | * ht opmode or vht opmode changes using any of %NL80211_ATTR_SMPS_MODE, |
| 1033 | * &NL80211_ATTR_CHANNEL_WIDTH,&NL80211_ATTR_NSS attributes with its | 1033 | * %NL80211_ATTR_CHANNEL_WIDTH,%NL80211_ATTR_NSS attributes with its |
| 1034 | * address(specified in &NL80211_ATTR_MAC). | 1034 | * address(specified in %NL80211_ATTR_MAC). |
| 1035 | * | 1035 | * |
| 1036 | * @NL80211_CMD_MAX: highest used command number | 1036 | * @NL80211_CMD_MAX: highest used command number |
| 1037 | * @__NL80211_CMD_AFTER_LAST: internal use | 1037 | * @__NL80211_CMD_AFTER_LAST: internal use |
| @@ -2218,7 +2218,7 @@ enum nl80211_commands { | |||
| 2218 | * @NL80211_ATTR_EXTERNAL_AUTH_ACTION: Identify the requested external | 2218 | * @NL80211_ATTR_EXTERNAL_AUTH_ACTION: Identify the requested external |
| 2219 | * authentication operation (u32 attribute with an | 2219 | * authentication operation (u32 attribute with an |
| 2220 | * &enum nl80211_external_auth_action value). This is used with the | 2220 | * &enum nl80211_external_auth_action value). This is used with the |
| 2221 | * &NL80211_CMD_EXTERNAL_AUTH request event. | 2221 | * %NL80211_CMD_EXTERNAL_AUTH request event. |
| 2222 | * @NL80211_ATTR_EXTERNAL_AUTH_SUPPORT: Flag attribute indicating that the user | 2222 | * @NL80211_ATTR_EXTERNAL_AUTH_SUPPORT: Flag attribute indicating that the user |
| 2223 | * space supports external authentication. This attribute shall be used | 2223 | * space supports external authentication. This attribute shall be used |
| 2224 | * only with %NL80211_CMD_CONNECT request. The driver may offload | 2224 | * only with %NL80211_CMD_CONNECT request. The driver may offload |
| @@ -3491,7 +3491,7 @@ enum nl80211_sched_scan_match_attr { | |||
| 3491 | * @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated | 3491 | * @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated |
| 3492 | * base on contiguous rules and wider channels will be allowed to cross | 3492 | * base on contiguous rules and wider channels will be allowed to cross |
| 3493 | * multiple contiguous/overlapping frequency ranges. | 3493 | * multiple contiguous/overlapping frequency ranges. |
| 3494 | * @NL80211_RRF_IR_CONCURRENT: See &NL80211_FREQUENCY_ATTR_IR_CONCURRENT | 3494 | * @NL80211_RRF_IR_CONCURRENT: See %NL80211_FREQUENCY_ATTR_IR_CONCURRENT |
| 3495 | * @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation | 3495 | * @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation |
| 3496 | * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation | 3496 | * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation |
| 3497 | * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed | 3497 | * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed |
| @@ -5643,11 +5643,11 @@ enum nl80211_nan_func_attributes { | |||
| 5643 | * @NL80211_NAN_SRF_INCLUDE: present if the include bit of the SRF set. | 5643 | * @NL80211_NAN_SRF_INCLUDE: present if the include bit of the SRF set. |
| 5644 | * This is a flag. | 5644 | * This is a flag. |
| 5645 | * @NL80211_NAN_SRF_BF: Bloom Filter. Present if and only if | 5645 | * @NL80211_NAN_SRF_BF: Bloom Filter. Present if and only if |
| 5646 | * &NL80211_NAN_SRF_MAC_ADDRS isn't present. This attribute is binary. | 5646 | * %NL80211_NAN_SRF_MAC_ADDRS isn't present. This attribute is binary. |
| 5647 | * @NL80211_NAN_SRF_BF_IDX: index of the Bloom Filter. Mandatory if | 5647 | * @NL80211_NAN_SRF_BF_IDX: index of the Bloom Filter. Mandatory if |
| 5648 | * &NL80211_NAN_SRF_BF is present. This is a u8. | 5648 | * %NL80211_NAN_SRF_BF is present. This is a u8. |
| 5649 | * @NL80211_NAN_SRF_MAC_ADDRS: list of MAC addresses for the SRF. Present if | 5649 | * @NL80211_NAN_SRF_MAC_ADDRS: list of MAC addresses for the SRF. Present if |
| 5650 | * and only if &NL80211_NAN_SRF_BF isn't present. This is a nested | 5650 | * and only if %NL80211_NAN_SRF_BF isn't present. This is a nested |
| 5651 | * attribute. Each nested attribute is a MAC address. | 5651 | * attribute. Each nested attribute is a MAC address. |
| 5652 | * @NUM_NL80211_NAN_SRF_ATTR: internal | 5652 | * @NUM_NL80211_NAN_SRF_ATTR: internal |
| 5653 | * @NL80211_NAN_SRF_ATTR_MAX: highest NAN SRF attribute | 5653 | * @NL80211_NAN_SRF_ATTR_MAX: highest NAN SRF attribute |
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index 308e2096291f..449132c76b1c 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h | |||
| @@ -45,11 +45,14 @@ | |||
| 45 | /* We've given up on this device. */ | 45 | /* We've given up on this device. */ |
| 46 | #define VIRTIO_CONFIG_S_FAILED 0x80 | 46 | #define VIRTIO_CONFIG_S_FAILED 0x80 |
| 47 | 47 | ||
| 48 | /* Some virtio feature bits (currently bits 28 through 32) are reserved for the | 48 | /* |
| 49 | * transport being used (eg. virtio_ring), the rest are per-device feature | 49 | * Virtio feature bits VIRTIO_TRANSPORT_F_START through |
| 50 | * bits. */ | 50 | * VIRTIO_TRANSPORT_F_END are reserved for the transport |
| 51 | * being used (e.g. virtio_ring, virtio_pci etc.), the | ||
| 52 | * rest are per-device feature bits. | ||
| 53 | */ | ||
| 51 | #define VIRTIO_TRANSPORT_F_START 28 | 54 | #define VIRTIO_TRANSPORT_F_START 28 |
| 52 | #define VIRTIO_TRANSPORT_F_END 34 | 55 | #define VIRTIO_TRANSPORT_F_END 38 |
| 53 | 56 | ||
| 54 | #ifndef VIRTIO_CONFIG_NO_LEGACY | 57 | #ifndef VIRTIO_CONFIG_NO_LEGACY |
| 55 | /* Do we get callbacks when the ring is completely used, even if we've | 58 | /* Do we get callbacks when the ring is completely used, even if we've |
| @@ -71,4 +74,9 @@ | |||
| 71 | * this is for compatibility with legacy systems. | 74 | * this is for compatibility with legacy systems. |
| 72 | */ | 75 | */ |
| 73 | #define VIRTIO_F_IOMMU_PLATFORM 33 | 76 | #define VIRTIO_F_IOMMU_PLATFORM 33 |
| 77 | |||
| 78 | /* | ||
| 79 | * Does the device support Single Root I/O Virtualization? | ||
| 80 | */ | ||
| 81 | #define VIRTIO_F_SR_IOV 37 | ||
| 74 | #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ | 82 | #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ |
diff --git a/include/video/auo_k190xfb.h b/include/video/auo_k190xfb.h deleted file mode 100644 index ac329ee1d753..000000000000 --- a/include/video/auo_k190xfb.h +++ /dev/null | |||
| @@ -1,107 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Definitions for AUO-K190X framebuffer drivers | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef _LINUX_VIDEO_AUO_K190XFB_H_ | ||
| 12 | #define _LINUX_VIDEO_AUO_K190XFB_H_ | ||
| 13 | |||
| 14 | /* Controller standby command needs a param */ | ||
| 15 | #define AUOK190X_QUIRK_STANDBYPARAM (1 << 0) | ||
| 16 | |||
| 17 | /* Controller standby is completely broken */ | ||
| 18 | #define AUOK190X_QUIRK_STANDBYBROKEN (1 << 1) | ||
| 19 | |||
| 20 | /* | ||
| 21 | * Resolutions for the displays | ||
| 22 | */ | ||
| 23 | #define AUOK190X_RESOLUTION_800_600 0 | ||
| 24 | #define AUOK190X_RESOLUTION_1024_768 1 | ||
| 25 | #define AUOK190X_RESOLUTION_600_800 4 | ||
| 26 | #define AUOK190X_RESOLUTION_768_1024 5 | ||
| 27 | |||
| 28 | /* | ||
| 29 | * struct used by auok190x. board specific stuff comes from *board | ||
| 30 | */ | ||
| 31 | struct auok190xfb_par { | ||
| 32 | struct fb_info *info; | ||
| 33 | struct auok190x_board *board; | ||
| 34 | |||
| 35 | struct regulator *regulator; | ||
| 36 | |||
| 37 | struct mutex io_lock; | ||
| 38 | struct delayed_work work; | ||
| 39 | wait_queue_head_t waitq; | ||
| 40 | int resolution; | ||
| 41 | int rotation; | ||
| 42 | int consecutive_threshold; | ||
| 43 | int update_cnt; | ||
| 44 | |||
| 45 | /* panel and controller informations */ | ||
| 46 | int epd_type; | ||
| 47 | int panel_size_int; | ||
| 48 | int panel_size_float; | ||
| 49 | int panel_model; | ||
| 50 | int tcon_version; | ||
| 51 | int lut_version; | ||
| 52 | |||
| 53 | /* individual controller callbacks */ | ||
| 54 | void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2); | ||
| 55 | void (*update_all)(struct auok190xfb_par *par); | ||
| 56 | bool (*need_refresh)(struct auok190xfb_par *par); | ||
| 57 | void (*init)(struct auok190xfb_par *par); | ||
| 58 | void (*recover)(struct auok190xfb_par *par); | ||
| 59 | |||
| 60 | int update_mode; /* mode to use for updates */ | ||
| 61 | int last_mode; /* update mode last used */ | ||
| 62 | int flash; | ||
| 63 | |||
| 64 | /* power management */ | ||
| 65 | int autosuspend_delay; | ||
| 66 | bool standby; | ||
| 67 | bool manual_standby; | ||
| 68 | }; | ||
| 69 | |||
| 70 | /** | ||
| 71 | * Board specific platform-data | ||
| 72 | * @init: initialize the controller interface | ||
| 73 | * @cleanup: cleanup the controller interface | ||
| 74 | * @wait_for_rdy: wait until the controller is not busy anymore | ||
| 75 | * @set_ctl: change an interface control | ||
| 76 | * @set_hdb: write a value to the data register | ||
| 77 | * @get_hdb: read a value from the data register | ||
| 78 | * @setup_irq: method to setup the irq handling on the busy gpio | ||
| 79 | * @gpio_nsleep: sleep gpio | ||
| 80 | * @gpio_nrst: reset gpio | ||
| 81 | * @gpio_nbusy: busy gpio | ||
| 82 | * @resolution: one of the AUOK190X_RESOLUTION constants | ||
| 83 | * @rotation: rotation of the framebuffer | ||
| 84 | * @quirks: controller quirks to honor | ||
| 85 | * @fps: frames per second for defio | ||
| 86 | */ | ||
| 87 | struct auok190x_board { | ||
| 88 | int (*init)(struct auok190xfb_par *); | ||
| 89 | void (*cleanup)(struct auok190xfb_par *); | ||
| 90 | int (*wait_for_rdy)(struct auok190xfb_par *); | ||
| 91 | |||
| 92 | void (*set_ctl)(struct auok190xfb_par *, unsigned char, u8); | ||
| 93 | void (*set_hdb)(struct auok190xfb_par *, u16); | ||
| 94 | u16 (*get_hdb)(struct auok190xfb_par *); | ||
| 95 | |||
| 96 | int (*setup_irq)(struct fb_info *); | ||
| 97 | |||
| 98 | int gpio_nsleep; | ||
| 99 | int gpio_nrst; | ||
| 100 | int gpio_nbusy; | ||
| 101 | |||
| 102 | int resolution; | ||
| 103 | int quirks; | ||
| 104 | int fps; | ||
| 105 | }; | ||
| 106 | |||
| 107 | #endif | ||
diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h index f706b0fed399..84aa976ca4ea 100644 --- a/include/video/sh_mobile_lcdc.h +++ b/include/video/sh_mobile_lcdc.h | |||
| @@ -3,7 +3,6 @@ | |||
| 3 | #define __ASM_SH_MOBILE_LCDC_H__ | 3 | #define __ASM_SH_MOBILE_LCDC_H__ |
| 4 | 4 | ||
| 5 | #include <linux/fb.h> | 5 | #include <linux/fb.h> |
| 6 | #include <video/sh_mobile_meram.h> | ||
| 7 | 6 | ||
| 8 | /* Register definitions */ | 7 | /* Register definitions */ |
| 9 | #define _LDDCKR 0x410 | 8 | #define _LDDCKR 0x410 |
| @@ -184,7 +183,6 @@ struct sh_mobile_lcdc_chan_cfg { | |||
| 184 | struct sh_mobile_lcdc_panel_cfg panel_cfg; | 183 | struct sh_mobile_lcdc_panel_cfg panel_cfg; |
| 185 | struct sh_mobile_lcdc_bl_info bl_info; | 184 | struct sh_mobile_lcdc_bl_info bl_info; |
| 186 | struct sh_mobile_lcdc_sys_bus_cfg sys_bus_cfg; /* only for SYSn I/F */ | 185 | struct sh_mobile_lcdc_sys_bus_cfg sys_bus_cfg; /* only for SYSn I/F */ |
| 187 | const struct sh_mobile_meram_cfg *meram_cfg; | ||
| 188 | 186 | ||
| 189 | struct platform_device *tx_dev; /* HDMI/DSI transmitter device */ | 187 | struct platform_device *tx_dev; /* HDMI/DSI transmitter device */ |
| 190 | }; | 188 | }; |
| @@ -193,7 +191,6 @@ struct sh_mobile_lcdc_info { | |||
| 193 | int clock_source; | 191 | int clock_source; |
| 194 | struct sh_mobile_lcdc_chan_cfg ch[2]; | 192 | struct sh_mobile_lcdc_chan_cfg ch[2]; |
| 195 | struct sh_mobile_lcdc_overlay_cfg overlays[4]; | 193 | struct sh_mobile_lcdc_overlay_cfg overlays[4]; |
| 196 | struct sh_mobile_meram_info *meram_dev; | ||
| 197 | }; | 194 | }; |
| 198 | 195 | ||
| 199 | #endif /* __ASM_SH_MOBILE_LCDC_H__ */ | 196 | #endif /* __ASM_SH_MOBILE_LCDC_H__ */ |
diff --git a/include/video/sh_mobile_meram.h b/include/video/sh_mobile_meram.h deleted file mode 100644 index f4efc21e205d..000000000000 --- a/include/video/sh_mobile_meram.h +++ /dev/null | |||
| @@ -1,95 +0,0 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef __VIDEO_SH_MOBILE_MERAM_H__ | ||
| 3 | #define __VIDEO_SH_MOBILE_MERAM_H__ | ||
| 4 | |||
| 5 | /* For sh_mobile_meram_info.addr_mode */ | ||
| 6 | enum { | ||
| 7 | SH_MOBILE_MERAM_MODE0 = 0, | ||
| 8 | SH_MOBILE_MERAM_MODE1 | ||
| 9 | }; | ||
| 10 | |||
| 11 | enum { | ||
| 12 | SH_MOBILE_MERAM_PF_NV = 0, | ||
| 13 | SH_MOBILE_MERAM_PF_RGB, | ||
| 14 | SH_MOBILE_MERAM_PF_NV24 | ||
| 15 | }; | ||
| 16 | |||
| 17 | |||
| 18 | struct sh_mobile_meram_priv; | ||
| 19 | |||
| 20 | /* | ||
| 21 | * struct sh_mobile_meram_info - MERAM platform data | ||
| 22 | * @reserved_icbs: Bitmask of reserved ICBs (for instance used through UIO) | ||
| 23 | */ | ||
| 24 | struct sh_mobile_meram_info { | ||
| 25 | int addr_mode; | ||
| 26 | u32 reserved_icbs; | ||
| 27 | struct sh_mobile_meram_priv *priv; | ||
| 28 | struct platform_device *pdev; | ||
| 29 | }; | ||
| 30 | |||
| 31 | /* icb config */ | ||
| 32 | struct sh_mobile_meram_icb_cfg { | ||
| 33 | unsigned int meram_size; /* MERAM Buffer Size to use */ | ||
| 34 | }; | ||
| 35 | |||
| 36 | struct sh_mobile_meram_cfg { | ||
| 37 | struct sh_mobile_meram_icb_cfg icb[2]; | ||
| 38 | }; | ||
| 39 | |||
| 40 | #if defined(CONFIG_FB_SH_MOBILE_MERAM) || \ | ||
| 41 | defined(CONFIG_FB_SH_MOBILE_MERAM_MODULE) | ||
| 42 | unsigned long sh_mobile_meram_alloc(struct sh_mobile_meram_info *meram_dev, | ||
| 43 | size_t size); | ||
| 44 | void sh_mobile_meram_free(struct sh_mobile_meram_info *meram_dev, | ||
| 45 | unsigned long mem, size_t size); | ||
| 46 | void *sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *dev, | ||
| 47 | const struct sh_mobile_meram_cfg *cfg, | ||
| 48 | unsigned int xres, unsigned int yres, | ||
| 49 | unsigned int pixelformat, | ||
| 50 | unsigned int *pitch); | ||
| 51 | void sh_mobile_meram_cache_free(struct sh_mobile_meram_info *dev, void *data); | ||
| 52 | void sh_mobile_meram_cache_update(struct sh_mobile_meram_info *dev, void *data, | ||
| 53 | unsigned long base_addr_y, | ||
| 54 | unsigned long base_addr_c, | ||
| 55 | unsigned long *icb_addr_y, | ||
| 56 | unsigned long *icb_addr_c); | ||
| 57 | #else | ||
| 58 | static inline unsigned long | ||
| 59 | sh_mobile_meram_alloc(struct sh_mobile_meram_info *meram_dev, size_t size) | ||
| 60 | { | ||
| 61 | return 0; | ||
| 62 | } | ||
| 63 | |||
| 64 | static inline void | ||
| 65 | sh_mobile_meram_free(struct sh_mobile_meram_info *meram_dev, | ||
| 66 | unsigned long mem, size_t size) | ||
| 67 | { | ||
| 68 | } | ||
| 69 | |||
| 70 | static inline void * | ||
| 71 | sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *dev, | ||
| 72 | const struct sh_mobile_meram_cfg *cfg, | ||
| 73 | unsigned int xres, unsigned int yres, | ||
| 74 | unsigned int pixelformat, | ||
| 75 | unsigned int *pitch) | ||
| 76 | { | ||
| 77 | return ERR_PTR(-ENODEV); | ||
| 78 | } | ||
| 79 | |||
| 80 | static inline void | ||
| 81 | sh_mobile_meram_cache_free(struct sh_mobile_meram_info *dev, void *data) | ||
| 82 | { | ||
| 83 | } | ||
| 84 | |||
| 85 | static inline void | ||
| 86 | sh_mobile_meram_cache_update(struct sh_mobile_meram_info *dev, void *data, | ||
| 87 | unsigned long base_addr_y, | ||
| 88 | unsigned long base_addr_c, | ||
| 89 | unsigned long *icb_addr_y, | ||
| 90 | unsigned long *icb_addr_c) | ||
| 91 | { | ||
| 92 | } | ||
| 93 | #endif | ||
| 94 | |||
| 95 | #endif /* __VIDEO_SH_MOBILE_MERAM_H__ */ | ||
diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c index 52f368b6561e..fba78047fb37 100644 --- a/kernel/audit_fsnotify.c +++ b/kernel/audit_fsnotify.c | |||
| @@ -109,7 +109,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa | |||
| 109 | audit_update_mark(audit_mark, dentry->d_inode); | 109 | audit_update_mark(audit_mark, dentry->d_inode); |
| 110 | audit_mark->rule = krule; | 110 | audit_mark->rule = krule; |
| 111 | 111 | ||
| 112 | ret = fsnotify_add_mark(&audit_mark->mark, inode, NULL, true); | 112 | ret = fsnotify_add_inode_mark(&audit_mark->mark, inode, true); |
| 113 | if (ret < 0) { | 113 | if (ret < 0) { |
| 114 | fsnotify_put_mark(&audit_mark->mark); | 114 | fsnotify_put_mark(&audit_mark->mark); |
| 115 | audit_mark = ERR_PTR(ret); | 115 | audit_mark = ERR_PTR(ret); |
| @@ -165,12 +165,11 @@ static void audit_autoremove_mark_rule(struct audit_fsnotify_mark *audit_mark) | |||
| 165 | /* Update mark data in audit rules based on fsnotify events. */ | 165 | /* Update mark data in audit rules based on fsnotify events. */ |
| 166 | static int audit_mark_handle_event(struct fsnotify_group *group, | 166 | static int audit_mark_handle_event(struct fsnotify_group *group, |
| 167 | struct inode *to_tell, | 167 | struct inode *to_tell, |
| 168 | struct fsnotify_mark *inode_mark, | ||
| 169 | struct fsnotify_mark *vfsmount_mark, | ||
| 170 | u32 mask, const void *data, int data_type, | 168 | u32 mask, const void *data, int data_type, |
| 171 | const unsigned char *dname, u32 cookie, | 169 | const unsigned char *dname, u32 cookie, |
| 172 | struct fsnotify_iter_info *iter_info) | 170 | struct fsnotify_iter_info *iter_info) |
| 173 | { | 171 | { |
| 172 | struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); | ||
| 174 | struct audit_fsnotify_mark *audit_mark; | 173 | struct audit_fsnotify_mark *audit_mark; |
| 175 | const struct inode *inode = NULL; | 174 | const struct inode *inode = NULL; |
| 176 | 175 | ||
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 67e6956c0b61..c99ebaae5abc 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
| @@ -288,8 +288,8 @@ static void untag_chunk(struct node *p) | |||
| 288 | if (!new) | 288 | if (!new) |
| 289 | goto Fallback; | 289 | goto Fallback; |
| 290 | 290 | ||
| 291 | if (fsnotify_add_mark_locked(&new->mark, entry->connector->inode, | 291 | if (fsnotify_add_inode_mark_locked(&new->mark, entry->connector->inode, |
| 292 | NULL, 1)) { | 292 | 1)) { |
| 293 | fsnotify_put_mark(&new->mark); | 293 | fsnotify_put_mark(&new->mark); |
| 294 | goto Fallback; | 294 | goto Fallback; |
| 295 | } | 295 | } |
| @@ -354,7 +354,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) | |||
| 354 | return -ENOMEM; | 354 | return -ENOMEM; |
| 355 | 355 | ||
| 356 | entry = &chunk->mark; | 356 | entry = &chunk->mark; |
| 357 | if (fsnotify_add_mark(entry, inode, NULL, 0)) { | 357 | if (fsnotify_add_inode_mark(entry, inode, 0)) { |
| 358 | fsnotify_put_mark(entry); | 358 | fsnotify_put_mark(entry); |
| 359 | return -ENOSPC; | 359 | return -ENOSPC; |
| 360 | } | 360 | } |
| @@ -434,8 +434,8 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |||
| 434 | return -ENOENT; | 434 | return -ENOENT; |
| 435 | } | 435 | } |
| 436 | 436 | ||
| 437 | if (fsnotify_add_mark_locked(chunk_entry, | 437 | if (fsnotify_add_inode_mark_locked(chunk_entry, |
| 438 | old_entry->connector->inode, NULL, 1)) { | 438 | old_entry->connector->inode, 1)) { |
| 439 | spin_unlock(&old_entry->lock); | 439 | spin_unlock(&old_entry->lock); |
| 440 | mutex_unlock(&old_entry->group->mark_mutex); | 440 | mutex_unlock(&old_entry->group->mark_mutex); |
| 441 | fsnotify_put_mark(chunk_entry); | 441 | fsnotify_put_mark(chunk_entry); |
| @@ -989,8 +989,6 @@ static void evict_chunk(struct audit_chunk *chunk) | |||
| 989 | 989 | ||
| 990 | static int audit_tree_handle_event(struct fsnotify_group *group, | 990 | static int audit_tree_handle_event(struct fsnotify_group *group, |
| 991 | struct inode *to_tell, | 991 | struct inode *to_tell, |
| 992 | struct fsnotify_mark *inode_mark, | ||
| 993 | struct fsnotify_mark *vfsmount_mark, | ||
| 994 | u32 mask, const void *data, int data_type, | 992 | u32 mask, const void *data, int data_type, |
| 995 | const unsigned char *file_name, u32 cookie, | 993 | const unsigned char *file_name, u32 cookie, |
| 996 | struct fsnotify_iter_info *iter_info) | 994 | struct fsnotify_iter_info *iter_info) |
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index f1ba88994508..c17c0c268436 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c | |||
| @@ -160,7 +160,7 @@ static struct audit_parent *audit_init_parent(struct path *path) | |||
| 160 | 160 | ||
| 161 | fsnotify_init_mark(&parent->mark, audit_watch_group); | 161 | fsnotify_init_mark(&parent->mark, audit_watch_group); |
| 162 | parent->mark.mask = AUDIT_FS_WATCH; | 162 | parent->mark.mask = AUDIT_FS_WATCH; |
| 163 | ret = fsnotify_add_mark(&parent->mark, inode, NULL, 0); | 163 | ret = fsnotify_add_inode_mark(&parent->mark, inode, 0); |
| 164 | if (ret < 0) { | 164 | if (ret < 0) { |
| 165 | audit_free_parent(parent); | 165 | audit_free_parent(parent); |
| 166 | return ERR_PTR(ret); | 166 | return ERR_PTR(ret); |
| @@ -472,12 +472,11 @@ void audit_remove_watch_rule(struct audit_krule *krule) | |||
| 472 | /* Update watch data in audit rules based on fsnotify events. */ | 472 | /* Update watch data in audit rules based on fsnotify events. */ |
| 473 | static int audit_watch_handle_event(struct fsnotify_group *group, | 473 | static int audit_watch_handle_event(struct fsnotify_group *group, |
| 474 | struct inode *to_tell, | 474 | struct inode *to_tell, |
| 475 | struct fsnotify_mark *inode_mark, | ||
| 476 | struct fsnotify_mark *vfsmount_mark, | ||
| 477 | u32 mask, const void *data, int data_type, | 475 | u32 mask, const void *data, int data_type, |
| 478 | const unsigned char *dname, u32 cookie, | 476 | const unsigned char *dname, u32 cookie, |
| 479 | struct fsnotify_iter_info *iter_info) | 477 | struct fsnotify_iter_info *iter_info) |
| 480 | { | 478 | { |
| 479 | struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); | ||
| 481 | const struct inode *inode; | 480 | const struct inode *inode; |
| 482 | struct audit_parent *parent; | 481 | struct audit_parent *parent; |
| 483 | 482 | ||
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index ed13645bd80c..76efe9a183f5 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c | |||
| @@ -295,6 +295,15 @@ static const struct file_operations bpffs_map_fops = { | |||
| 295 | .release = bpffs_map_release, | 295 | .release = bpffs_map_release, |
| 296 | }; | 296 | }; |
| 297 | 297 | ||
| 298 | static int bpffs_obj_open(struct inode *inode, struct file *file) | ||
| 299 | { | ||
| 300 | return -EIO; | ||
| 301 | } | ||
| 302 | |||
| 303 | static const struct file_operations bpffs_obj_fops = { | ||
| 304 | .open = bpffs_obj_open, | ||
| 305 | }; | ||
| 306 | |||
| 298 | static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw, | 307 | static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw, |
| 299 | const struct inode_operations *iops, | 308 | const struct inode_operations *iops, |
| 300 | const struct file_operations *fops) | 309 | const struct file_operations *fops) |
| @@ -314,7 +323,8 @@ static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw, | |||
| 314 | 323 | ||
| 315 | static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg) | 324 | static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg) |
| 316 | { | 325 | { |
| 317 | return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops, NULL); | 326 | return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops, |
| 327 | &bpffs_obj_fops); | ||
| 318 | } | 328 | } |
| 319 | 329 | ||
| 320 | static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg) | 330 | static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg) |
| @@ -322,7 +332,7 @@ static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg) | |||
| 322 | struct bpf_map *map = arg; | 332 | struct bpf_map *map = arg; |
| 323 | 333 | ||
| 324 | return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops, | 334 | return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops, |
| 325 | map->btf ? &bpffs_map_fops : NULL); | 335 | map->btf ? &bpffs_map_fops : &bpffs_obj_fops); |
| 326 | } | 336 | } |
| 327 | 337 | ||
| 328 | static struct dentry * | 338 | static struct dentry * |
diff --git a/kernel/module.c b/kernel/module.c index 68469b37d61a..f475f30eed8c 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -274,9 +274,7 @@ static void module_assert_mutex_or_preempt(void) | |||
| 274 | } | 274 | } |
| 275 | 275 | ||
| 276 | static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); | 276 | static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); |
| 277 | #ifndef CONFIG_MODULE_SIG_FORCE | ||
| 278 | module_param(sig_enforce, bool_enable_only, 0644); | 277 | module_param(sig_enforce, bool_enable_only, 0644); |
| 279 | #endif /* !CONFIG_MODULE_SIG_FORCE */ | ||
| 280 | 278 | ||
| 281 | /* | 279 | /* |
| 282 | * Export sig_enforce kernel cmdline parameter to allow other subsystems rely | 280 | * Export sig_enforce kernel cmdline parameter to allow other subsystems rely |
| @@ -2785,7 +2783,7 @@ static int module_sig_check(struct load_info *info, int flags) | |||
| 2785 | } | 2783 | } |
| 2786 | 2784 | ||
| 2787 | /* Not having a signature is only an error if we're strict. */ | 2785 | /* Not having a signature is only an error if we're strict. */ |
| 2788 | if (err == -ENOKEY && !sig_enforce) | 2786 | if (err == -ENOKEY && !is_module_sig_enforced()) |
| 2789 | err = 0; | 2787 | err = 0; |
| 2790 | 2788 | ||
| 2791 | return err; | 2789 | return err; |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 684b66bfa199..491828713e0b 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
| @@ -411,6 +411,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par, | |||
| 411 | watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0); | 411 | watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0); |
| 412 | if (IS_ERR(watcher)) | 412 | if (IS_ERR(watcher)) |
| 413 | return PTR_ERR(watcher); | 413 | return PTR_ERR(watcher); |
| 414 | |||
| 415 | if (watcher->family != NFPROTO_BRIDGE) { | ||
| 416 | module_put(watcher->me); | ||
| 417 | return -ENOENT; | ||
| 418 | } | ||
| 419 | |||
| 414 | w->u.watcher = watcher; | 420 | w->u.watcher = watcher; |
| 415 | 421 | ||
| 416 | par->target = watcher; | 422 | par->target = watcher; |
| @@ -709,6 +715,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net, | |||
| 709 | } | 715 | } |
| 710 | i = 0; | 716 | i = 0; |
| 711 | 717 | ||
| 718 | memset(&mtpar, 0, sizeof(mtpar)); | ||
| 719 | memset(&tgpar, 0, sizeof(tgpar)); | ||
| 712 | mtpar.net = tgpar.net = net; | 720 | mtpar.net = tgpar.net = net; |
| 713 | mtpar.table = tgpar.table = name; | 721 | mtpar.table = tgpar.table = name; |
| 714 | mtpar.entryinfo = tgpar.entryinfo = e; | 722 | mtpar.entryinfo = tgpar.entryinfo = e; |
| @@ -730,6 +738,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net, | |||
| 730 | goto cleanup_watchers; | 738 | goto cleanup_watchers; |
| 731 | } | 739 | } |
| 732 | 740 | ||
| 741 | /* Reject UNSPEC, xtables verdicts/return values are incompatible */ | ||
| 742 | if (target->family != NFPROTO_BRIDGE) { | ||
| 743 | module_put(target->me); | ||
| 744 | ret = -ENOENT; | ||
| 745 | goto cleanup_watchers; | ||
| 746 | } | ||
| 747 | |||
| 733 | t->u.target = target; | 748 | t->u.target = target; |
| 734 | if (t->u.target == &ebt_standard_target) { | 749 | if (t->u.target == &ebt_standard_target) { |
| 735 | if (gap < sizeof(struct ebt_standard_target)) { | 750 | if (gap < sizeof(struct ebt_standard_target)) { |
| @@ -1606,16 +1621,16 @@ struct compat_ebt_entry_mwt { | |||
| 1606 | compat_uptr_t ptr; | 1621 | compat_uptr_t ptr; |
| 1607 | } u; | 1622 | } u; |
| 1608 | compat_uint_t match_size; | 1623 | compat_uint_t match_size; |
| 1609 | compat_uint_t data[0]; | 1624 | compat_uint_t data[0] __attribute__ ((aligned (__alignof__(struct compat_ebt_replace)))); |
| 1610 | }; | 1625 | }; |
| 1611 | 1626 | ||
| 1612 | /* account for possible padding between match_size and ->data */ | 1627 | /* account for possible padding between match_size and ->data */ |
| 1613 | static int ebt_compat_entry_padsize(void) | 1628 | static int ebt_compat_entry_padsize(void) |
| 1614 | { | 1629 | { |
| 1615 | BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) < | 1630 | BUILD_BUG_ON(sizeof(struct ebt_entry_match) < |
| 1616 | COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt))); | 1631 | sizeof(struct compat_ebt_entry_mwt)); |
| 1617 | return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) - | 1632 | return (int) sizeof(struct ebt_entry_match) - |
| 1618 | COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)); | 1633 | sizeof(struct compat_ebt_entry_mwt); |
| 1619 | } | 1634 | } |
| 1620 | 1635 | ||
| 1621 | static int ebt_compat_match_offset(const struct xt_match *match, | 1636 | static int ebt_compat_match_offset(const struct xt_match *match, |
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index eaf05de37f75..6de981270566 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c | |||
| @@ -261,7 +261,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net, | |||
| 261 | if (!reject6_br_csum_ok(oldskb, hook)) | 261 | if (!reject6_br_csum_ok(oldskb, hook)) |
| 262 | return; | 262 | return; |
| 263 | 263 | ||
| 264 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) + | 264 | nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) + |
| 265 | LL_MAX_HEADER + len, GFP_ATOMIC); | 265 | LL_MAX_HEADER + len, GFP_ATOMIC); |
| 266 | if (!nskb) | 266 | if (!nskb) |
| 267 | return; | 267 | return; |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index a7a9c3d738ba..8e3fda9e725c 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -119,13 +119,14 @@ unsigned long neigh_rand_reach_time(unsigned long base) | |||
| 119 | EXPORT_SYMBOL(neigh_rand_reach_time); | 119 | EXPORT_SYMBOL(neigh_rand_reach_time); |
| 120 | 120 | ||
| 121 | 121 | ||
| 122 | static bool neigh_del(struct neighbour *n, __u8 state, | 122 | static bool neigh_del(struct neighbour *n, __u8 state, __u8 flags, |
| 123 | struct neighbour __rcu **np, struct neigh_table *tbl) | 123 | struct neighbour __rcu **np, struct neigh_table *tbl) |
| 124 | { | 124 | { |
| 125 | bool retval = false; | 125 | bool retval = false; |
| 126 | 126 | ||
| 127 | write_lock(&n->lock); | 127 | write_lock(&n->lock); |
| 128 | if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state)) { | 128 | if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state) && |
| 129 | !(n->flags & flags)) { | ||
| 129 | struct neighbour *neigh; | 130 | struct neighbour *neigh; |
| 130 | 131 | ||
| 131 | neigh = rcu_dereference_protected(n->next, | 132 | neigh = rcu_dereference_protected(n->next, |
| @@ -157,7 +158,7 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl) | |||
| 157 | while ((n = rcu_dereference_protected(*np, | 158 | while ((n = rcu_dereference_protected(*np, |
| 158 | lockdep_is_held(&tbl->lock)))) { | 159 | lockdep_is_held(&tbl->lock)))) { |
| 159 | if (n == ndel) | 160 | if (n == ndel) |
| 160 | return neigh_del(n, 0, np, tbl); | 161 | return neigh_del(n, 0, 0, np, tbl); |
| 161 | np = &n->next; | 162 | np = &n->next; |
| 162 | } | 163 | } |
| 163 | return false; | 164 | return false; |
| @@ -185,7 +186,8 @@ static int neigh_forced_gc(struct neigh_table *tbl) | |||
| 185 | * - nobody refers to it. | 186 | * - nobody refers to it. |
| 186 | * - it is not permanent | 187 | * - it is not permanent |
| 187 | */ | 188 | */ |
| 188 | if (neigh_del(n, NUD_PERMANENT, np, tbl)) { | 189 | if (neigh_del(n, NUD_PERMANENT, NTF_EXT_LEARNED, np, |
| 190 | tbl)) { | ||
| 189 | shrunk = 1; | 191 | shrunk = 1; |
| 190 | continue; | 192 | continue; |
| 191 | } | 193 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index f333d75ef1a9..bcc41829a16d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -728,22 +728,9 @@ int sock_setsockopt(struct socket *sock, int level, int optname, | |||
| 728 | sock_valbool_flag(sk, SOCK_DBG, valbool); | 728 | sock_valbool_flag(sk, SOCK_DBG, valbool); |
| 729 | break; | 729 | break; |
| 730 | case SO_REUSEADDR: | 730 | case SO_REUSEADDR: |
| 731 | val = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); | 731 | sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); |
| 732 | if ((sk->sk_family == PF_INET || sk->sk_family == PF_INET6) && | ||
| 733 | inet_sk(sk)->inet_num && | ||
| 734 | (sk->sk_reuse != val)) { | ||
| 735 | ret = (sk->sk_state == TCP_ESTABLISHED) ? -EISCONN : -EUCLEAN; | ||
| 736 | break; | ||
| 737 | } | ||
| 738 | sk->sk_reuse = val; | ||
| 739 | break; | 732 | break; |
| 740 | case SO_REUSEPORT: | 733 | case SO_REUSEPORT: |
| 741 | if ((sk->sk_family == PF_INET || sk->sk_family == PF_INET6) && | ||
| 742 | inet_sk(sk)->inet_num && | ||
| 743 | (sk->sk_reuseport != valbool)) { | ||
| 744 | ret = (sk->sk_state == TCP_ESTABLISHED) ? -EISCONN : -EUCLEAN; | ||
| 745 | break; | ||
| 746 | } | ||
| 747 | sk->sk_reuseport = valbool; | 734 | sk->sk_reuseport = valbool; |
| 748 | break; | 735 | break; |
| 749 | case SO_TYPE: | 736 | case SO_TYPE: |
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index 7d20e1f3de28..56197f0d9608 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c | |||
| @@ -75,7 +75,8 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 75 | if (!skb->dev) | 75 | if (!skb->dev) |
| 76 | return NULL; | 76 | return NULL; |
| 77 | 77 | ||
| 78 | pskb_trim_rcsum(skb, skb->len - 4); | 78 | if (pskb_trim_rcsum(skb, skb->len - 4)) |
| 79 | return NULL; | ||
| 79 | 80 | ||
| 80 | return skb; | 81 | return skb; |
| 81 | } | 82 | } |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 38ab97b0a2ec..ca0dad90803a 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
| @@ -531,6 +531,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, | |||
| 531 | return -ENOMEM; | 531 | return -ENOMEM; |
| 532 | 532 | ||
| 533 | j = 0; | 533 | j = 0; |
| 534 | memset(&mtpar, 0, sizeof(mtpar)); | ||
| 534 | mtpar.net = net; | 535 | mtpar.net = net; |
| 535 | mtpar.table = name; | 536 | mtpar.table = name; |
| 536 | mtpar.entryinfo = &e->ip; | 537 | mtpar.entryinfo = &e->ip; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index fed3f1c66167..bea17f1e8302 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -1730,6 +1730,10 @@ process: | |||
| 1730 | reqsk_put(req); | 1730 | reqsk_put(req); |
| 1731 | goto discard_it; | 1731 | goto discard_it; |
| 1732 | } | 1732 | } |
| 1733 | if (tcp_checksum_complete(skb)) { | ||
| 1734 | reqsk_put(req); | ||
| 1735 | goto csum_error; | ||
| 1736 | } | ||
| 1733 | if (unlikely(sk->sk_state != TCP_LISTEN)) { | 1737 | if (unlikely(sk->sk_state != TCP_LISTEN)) { |
| 1734 | inet_csk_reqsk_queue_drop_and_put(sk, req); | 1738 | inet_csk_reqsk_queue_drop_and_put(sk, req); |
| 1735 | goto lookup; | 1739 | goto lookup; |
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 4d58e2ce0b5b..8cc7c3487330 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c | |||
| @@ -268,8 +268,6 @@ found: | |||
| 268 | goto out_check_final; | 268 | goto out_check_final; |
| 269 | } | 269 | } |
| 270 | 270 | ||
| 271 | p = *head; | ||
| 272 | th2 = tcp_hdr(p); | ||
| 273 | tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); | 271 | tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); |
| 274 | 272 | ||
| 275 | out_check_final: | 273 | out_check_final: |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 89019bf59f46..c134286d6a41 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -1324,6 +1324,7 @@ retry: | |||
| 1324 | } | 1324 | } |
| 1325 | } | 1325 | } |
| 1326 | 1326 | ||
| 1327 | memset(&cfg, 0, sizeof(cfg)); | ||
| 1327 | cfg.valid_lft = min_t(__u32, ifp->valid_lft, | 1328 | cfg.valid_lft = min_t(__u32, ifp->valid_lft, |
| 1328 | idev->cnf.temp_valid_lft + age); | 1329 | idev->cnf.temp_valid_lft + age); |
| 1329 | cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor; | 1330 | cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor; |
| @@ -1357,7 +1358,6 @@ retry: | |||
| 1357 | 1358 | ||
| 1358 | cfg.pfx = &addr; | 1359 | cfg.pfx = &addr; |
| 1359 | cfg.scope = ipv6_addr_scope(cfg.pfx); | 1360 | cfg.scope = ipv6_addr_scope(cfg.pfx); |
| 1360 | cfg.rt_priority = 0; | ||
| 1361 | 1361 | ||
| 1362 | ift = ipv6_add_addr(idev, &cfg, block, NULL); | 1362 | ift = ipv6_add_addr(idev, &cfg, block, NULL); |
| 1363 | if (IS_ERR(ift)) { | 1363 | if (IS_ERR(ift)) { |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 7aa4c41a3bd9..39d1d487eca2 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
| @@ -934,6 +934,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, | |||
| 934 | { | 934 | { |
| 935 | struct fib6_info *leaf = rcu_dereference_protected(fn->leaf, | 935 | struct fib6_info *leaf = rcu_dereference_protected(fn->leaf, |
| 936 | lockdep_is_held(&rt->fib6_table->tb6_lock)); | 936 | lockdep_is_held(&rt->fib6_table->tb6_lock)); |
| 937 | enum fib_event_type event = FIB_EVENT_ENTRY_ADD; | ||
| 937 | struct fib6_info *iter = NULL, *match = NULL; | 938 | struct fib6_info *iter = NULL, *match = NULL; |
| 938 | struct fib6_info __rcu **ins; | 939 | struct fib6_info __rcu **ins; |
| 939 | int replace = (info->nlh && | 940 | int replace = (info->nlh && |
| @@ -1013,6 +1014,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, | |||
| 1013 | "Can not append to a REJECT route"); | 1014 | "Can not append to a REJECT route"); |
| 1014 | return -EINVAL; | 1015 | return -EINVAL; |
| 1015 | } | 1016 | } |
| 1017 | event = FIB_EVENT_ENTRY_APPEND; | ||
| 1016 | rt->fib6_nsiblings = match->fib6_nsiblings; | 1018 | rt->fib6_nsiblings = match->fib6_nsiblings; |
| 1017 | list_add_tail(&rt->fib6_siblings, &match->fib6_siblings); | 1019 | list_add_tail(&rt->fib6_siblings, &match->fib6_siblings); |
| 1018 | match->fib6_nsiblings++; | 1020 | match->fib6_nsiblings++; |
| @@ -1034,15 +1036,12 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, | |||
| 1034 | * insert node | 1036 | * insert node |
| 1035 | */ | 1037 | */ |
| 1036 | if (!replace) { | 1038 | if (!replace) { |
| 1037 | enum fib_event_type event; | ||
| 1038 | |||
| 1039 | if (!add) | 1039 | if (!add) |
| 1040 | pr_warn("NLM_F_CREATE should be set when creating new route\n"); | 1040 | pr_warn("NLM_F_CREATE should be set when creating new route\n"); |
| 1041 | 1041 | ||
| 1042 | add: | 1042 | add: |
| 1043 | nlflags |= NLM_F_CREATE; | 1043 | nlflags |= NLM_F_CREATE; |
| 1044 | 1044 | ||
| 1045 | event = append ? FIB_EVENT_ENTRY_APPEND : FIB_EVENT_ENTRY_ADD; | ||
| 1046 | err = call_fib6_entry_notifiers(info->nl_net, event, rt, | 1045 | err = call_fib6_entry_notifiers(info->nl_net, event, rt, |
| 1047 | extack); | 1046 | extack); |
| 1048 | if (err) | 1047 | if (err) |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 0758b5bcfb29..7eab959734bc 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
| @@ -550,6 +550,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, | |||
| 550 | return -ENOMEM; | 550 | return -ENOMEM; |
| 551 | 551 | ||
| 552 | j = 0; | 552 | j = 0; |
| 553 | memset(&mtpar, 0, sizeof(mtpar)); | ||
| 553 | mtpar.net = net; | 554 | mtpar.net = net; |
| 554 | mtpar.table = name; | 555 | mtpar.table = name; |
| 555 | mtpar.entryinfo = &e->ipv6; | 556 | mtpar.entryinfo = &e->ipv6; |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index fb956989adaf..86a0e4333d42 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -2307,9 +2307,6 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, | |||
| 2307 | const struct in6_addr *daddr, *saddr; | 2307 | const struct in6_addr *daddr, *saddr; |
| 2308 | struct rt6_info *rt6 = (struct rt6_info *)dst; | 2308 | struct rt6_info *rt6 = (struct rt6_info *)dst; |
| 2309 | 2309 | ||
| 2310 | if (rt6->rt6i_flags & RTF_LOCAL) | ||
| 2311 | return; | ||
| 2312 | |||
| 2313 | if (dst_metric_locked(dst, RTAX_MTU)) | 2310 | if (dst_metric_locked(dst, RTAX_MTU)) |
| 2314 | return; | 2311 | return; |
| 2315 | 2312 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index b620d9b72e59..7efa9fd7e109 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -1479,6 +1479,10 @@ process: | |||
| 1479 | reqsk_put(req); | 1479 | reqsk_put(req); |
| 1480 | goto discard_it; | 1480 | goto discard_it; |
| 1481 | } | 1481 | } |
| 1482 | if (tcp_checksum_complete(skb)) { | ||
| 1483 | reqsk_put(req); | ||
| 1484 | goto csum_error; | ||
| 1485 | } | ||
| 1482 | if (unlikely(sk->sk_state != TCP_LISTEN)) { | 1486 | if (unlikely(sk->sk_state != TCP_LISTEN)) { |
| 1483 | inet_csk_reqsk_queue_drop_and_put(sk, req); | 1487 | inet_csk_reqsk_queue_drop_and_put(sk, req); |
| 1484 | goto lookup; | 1488 | goto lookup; |
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 6616c9fd292f..5b9900889e31 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c | |||
| @@ -553,6 +553,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf | |||
| 553 | goto out_tunnel; | 553 | goto out_tunnel; |
| 554 | } | 554 | } |
| 555 | 555 | ||
| 556 | /* L2TPv2 only accepts PPP pseudo-wires */ | ||
| 557 | if (tunnel->version == 2 && cfg.pw_type != L2TP_PWTYPE_PPP) { | ||
| 558 | ret = -EPROTONOSUPPORT; | ||
| 559 | goto out_tunnel; | ||
| 560 | } | ||
| 561 | |||
| 556 | if (tunnel->version > 2) { | 562 | if (tunnel->version > 2) { |
| 557 | if (info->attrs[L2TP_ATTR_DATA_SEQ]) | 563 | if (info->attrs[L2TP_ATTR_DATA_SEQ]) |
| 558 | cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); | 564 | cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); |
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index b56cb1df4fc0..55188382845c 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
| @@ -612,6 +612,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
| 612 | u32 session_id, peer_session_id; | 612 | u32 session_id, peer_session_id; |
| 613 | bool drop_refcnt = false; | 613 | bool drop_refcnt = false; |
| 614 | bool drop_tunnel = false; | 614 | bool drop_tunnel = false; |
| 615 | bool new_session = false; | ||
| 616 | bool new_tunnel = false; | ||
| 615 | int ver = 2; | 617 | int ver = 2; |
| 616 | int fd; | 618 | int fd; |
| 617 | 619 | ||
| @@ -701,6 +703,15 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
| 701 | .encap = L2TP_ENCAPTYPE_UDP, | 703 | .encap = L2TP_ENCAPTYPE_UDP, |
| 702 | .debug = 0, | 704 | .debug = 0, |
| 703 | }; | 705 | }; |
| 706 | |||
| 707 | /* Prevent l2tp_tunnel_register() from trying to set up | ||
| 708 | * a kernel socket. | ||
| 709 | */ | ||
| 710 | if (fd < 0) { | ||
| 711 | error = -EBADF; | ||
| 712 | goto end; | ||
| 713 | } | ||
| 714 | |||
| 704 | error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel); | 715 | error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel); |
| 705 | if (error < 0) | 716 | if (error < 0) |
| 706 | goto end; | 717 | goto end; |
| @@ -713,6 +724,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
| 713 | goto end; | 724 | goto end; |
| 714 | } | 725 | } |
| 715 | drop_tunnel = true; | 726 | drop_tunnel = true; |
| 727 | new_tunnel = true; | ||
| 716 | } | 728 | } |
| 717 | } else { | 729 | } else { |
| 718 | /* Error if we can't find the tunnel */ | 730 | /* Error if we can't find the tunnel */ |
| @@ -734,6 +746,12 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
| 734 | session = l2tp_session_get(sock_net(sk), tunnel, session_id); | 746 | session = l2tp_session_get(sock_net(sk), tunnel, session_id); |
| 735 | if (session) { | 747 | if (session) { |
| 736 | drop_refcnt = true; | 748 | drop_refcnt = true; |
| 749 | |||
| 750 | if (session->pwtype != L2TP_PWTYPE_PPP) { | ||
| 751 | error = -EPROTOTYPE; | ||
| 752 | goto end; | ||
| 753 | } | ||
| 754 | |||
| 737 | ps = l2tp_session_priv(session); | 755 | ps = l2tp_session_priv(session); |
| 738 | 756 | ||
| 739 | /* Using a pre-existing session is fine as long as it hasn't | 757 | /* Using a pre-existing session is fine as long as it hasn't |
| @@ -751,6 +769,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
| 751 | /* Default MTU must allow space for UDP/L2TP/PPP headers */ | 769 | /* Default MTU must allow space for UDP/L2TP/PPP headers */ |
| 752 | cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; | 770 | cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; |
| 753 | cfg.mru = cfg.mtu; | 771 | cfg.mru = cfg.mtu; |
| 772 | cfg.pw_type = L2TP_PWTYPE_PPP; | ||
| 754 | 773 | ||
| 755 | session = l2tp_session_create(sizeof(struct pppol2tp_session), | 774 | session = l2tp_session_create(sizeof(struct pppol2tp_session), |
| 756 | tunnel, session_id, | 775 | tunnel, session_id, |
| @@ -772,6 +791,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
| 772 | goto end; | 791 | goto end; |
| 773 | } | 792 | } |
| 774 | drop_refcnt = true; | 793 | drop_refcnt = true; |
| 794 | new_session = true; | ||
| 775 | } | 795 | } |
| 776 | 796 | ||
| 777 | /* Special case: if source & dest session_id == 0x0000, this | 797 | /* Special case: if source & dest session_id == 0x0000, this |
| @@ -818,6 +838,12 @@ out_no_ppp: | |||
| 818 | session->name); | 838 | session->name); |
| 819 | 839 | ||
| 820 | end: | 840 | end: |
| 841 | if (error) { | ||
| 842 | if (new_session) | ||
| 843 | l2tp_session_delete(session); | ||
| 844 | if (new_tunnel) | ||
| 845 | l2tp_tunnel_delete(tunnel); | ||
| 846 | } | ||
| 821 | if (drop_refcnt) | 847 | if (drop_refcnt) |
| 822 | l2tp_session_dec_refcount(session); | 848 | l2tp_session_dec_refcount(session); |
| 823 | if (drop_tunnel) | 849 | if (drop_tunnel) |
| @@ -1175,7 +1201,7 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, | |||
| 1175 | l2tp_session_get(sock_net(sk), tunnel, | 1201 | l2tp_session_get(sock_net(sk), tunnel, |
| 1176 | stats.session_id); | 1202 | stats.session_id); |
| 1177 | 1203 | ||
| 1178 | if (session) { | 1204 | if (session && session->pwtype == L2TP_PWTYPE_PPP) { |
| 1179 | err = pppol2tp_session_ioctl(session, cmd, | 1205 | err = pppol2tp_session_ioctl(session, cmd, |
| 1180 | arg); | 1206 | arg); |
| 1181 | l2tp_session_dec_refcount(session); | 1207 | l2tp_session_dec_refcount(session); |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index fb1b1f9e7e5e..fb73451ed85e 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
| @@ -1098,6 +1098,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
| 1098 | 1098 | ||
| 1099 | ieee80211_led_init(local); | 1099 | ieee80211_led_init(local); |
| 1100 | 1100 | ||
| 1101 | result = ieee80211_txq_setup_flows(local); | ||
| 1102 | if (result) | ||
| 1103 | goto fail_flows; | ||
| 1104 | |||
| 1101 | rtnl_lock(); | 1105 | rtnl_lock(); |
| 1102 | 1106 | ||
| 1103 | result = ieee80211_init_rate_ctrl_alg(local, | 1107 | result = ieee80211_init_rate_ctrl_alg(local, |
| @@ -1120,10 +1124,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
| 1120 | 1124 | ||
| 1121 | rtnl_unlock(); | 1125 | rtnl_unlock(); |
| 1122 | 1126 | ||
| 1123 | result = ieee80211_txq_setup_flows(local); | ||
| 1124 | if (result) | ||
| 1125 | goto fail_flows; | ||
| 1126 | |||
| 1127 | #ifdef CONFIG_INET | 1127 | #ifdef CONFIG_INET |
| 1128 | local->ifa_notifier.notifier_call = ieee80211_ifa_changed; | 1128 | local->ifa_notifier.notifier_call = ieee80211_ifa_changed; |
| 1129 | result = register_inetaddr_notifier(&local->ifa_notifier); | 1129 | result = register_inetaddr_notifier(&local->ifa_notifier); |
| @@ -1149,8 +1149,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
| 1149 | #if defined(CONFIG_INET) || defined(CONFIG_IPV6) | 1149 | #if defined(CONFIG_INET) || defined(CONFIG_IPV6) |
| 1150 | fail_ifa: | 1150 | fail_ifa: |
| 1151 | #endif | 1151 | #endif |
| 1152 | ieee80211_txq_teardown_flows(local); | ||
| 1153 | fail_flows: | ||
| 1154 | rtnl_lock(); | 1152 | rtnl_lock(); |
| 1155 | rate_control_deinitialize(local); | 1153 | rate_control_deinitialize(local); |
| 1156 | ieee80211_remove_interfaces(local); | 1154 | ieee80211_remove_interfaces(local); |
| @@ -1158,6 +1156,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
| 1158 | rtnl_unlock(); | 1156 | rtnl_unlock(); |
| 1159 | ieee80211_led_exit(local); | 1157 | ieee80211_led_exit(local); |
| 1160 | ieee80211_wep_free(local); | 1158 | ieee80211_wep_free(local); |
| 1159 | ieee80211_txq_teardown_flows(local); | ||
| 1160 | fail_flows: | ||
| 1161 | destroy_workqueue(local->workqueue); | 1161 | destroy_workqueue(local->workqueue); |
| 1162 | fail_workqueue: | 1162 | fail_workqueue: |
| 1163 | wiphy_unregister(local->hw.wiphy); | 1163 | wiphy_unregister(local->hw.wiphy); |
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index bbad940c0137..8a33dac4e805 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h | |||
| @@ -1234,7 +1234,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, | |||
| 1234 | pr_debug("Create set %s with family %s\n", | 1234 | pr_debug("Create set %s with family %s\n", |
| 1235 | set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6"); | 1235 | set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6"); |
| 1236 | 1236 | ||
| 1237 | #ifndef IP_SET_PROTO_UNDEF | 1237 | #ifdef IP_SET_PROTO_UNDEF |
| 1238 | if (set->family != NFPROTO_UNSPEC) | ||
| 1239 | return -IPSET_ERR_INVALID_FAMILY; | ||
| 1240 | #else | ||
| 1238 | if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) | 1241 | if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) |
| 1239 | return -IPSET_ERR_INVALID_FAMILY; | 1242 | return -IPSET_ERR_INVALID_FAMILY; |
| 1240 | #endif | 1243 | #endif |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 0c03c0e16a96..dd21782e2f12 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
| @@ -839,6 +839,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, | |||
| 839 | * For now only for NAT! | 839 | * For now only for NAT! |
| 840 | */ | 840 | */ |
| 841 | ip_vs_rs_hash(ipvs, dest); | 841 | ip_vs_rs_hash(ipvs, dest); |
| 842 | /* FTP-NAT requires conntrack for mangling */ | ||
| 843 | if (svc->port == FTPPORT) | ||
| 844 | ip_vs_register_conntrack(svc); | ||
| 842 | } | 845 | } |
| 843 | atomic_set(&dest->conn_flags, conn_flags); | 846 | atomic_set(&dest->conn_flags, conn_flags); |
| 844 | 847 | ||
| @@ -1462,6 +1465,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup) | |||
| 1462 | */ | 1465 | */ |
| 1463 | static void ip_vs_unlink_service(struct ip_vs_service *svc, bool cleanup) | 1466 | static void ip_vs_unlink_service(struct ip_vs_service *svc, bool cleanup) |
| 1464 | { | 1467 | { |
| 1468 | ip_vs_unregister_conntrack(svc); | ||
| 1465 | /* Hold svc to avoid double release from dest_trash */ | 1469 | /* Hold svc to avoid double release from dest_trash */ |
| 1466 | atomic_inc(&svc->refcnt); | 1470 | atomic_inc(&svc->refcnt); |
| 1467 | /* | 1471 | /* |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index ba0a0fd045c8..473cce2a5231 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
| @@ -168,7 +168,7 @@ static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb, | |||
| 168 | bool new_rt_is_local) | 168 | bool new_rt_is_local) |
| 169 | { | 169 | { |
| 170 | bool rt_mode_allow_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL); | 170 | bool rt_mode_allow_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL); |
| 171 | bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL); | 171 | bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_NON_LOCAL); |
| 172 | bool rt_mode_allow_redirect = !!(rt_mode & IP_VS_RT_MODE_RDR); | 172 | bool rt_mode_allow_redirect = !!(rt_mode & IP_VS_RT_MODE_RDR); |
| 173 | bool source_is_loopback; | 173 | bool source_is_loopback; |
| 174 | bool old_rt_is_local; | 174 | bool old_rt_is_local; |
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index 3b5059a8dcdd..d8383609fe28 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | struct nf_conncount_tuple { | 46 | struct nf_conncount_tuple { |
| 47 | struct hlist_node node; | 47 | struct hlist_node node; |
| 48 | struct nf_conntrack_tuple tuple; | 48 | struct nf_conntrack_tuple tuple; |
| 49 | struct nf_conntrack_zone zone; | ||
| 49 | }; | 50 | }; |
| 50 | 51 | ||
| 51 | struct nf_conncount_rb { | 52 | struct nf_conncount_rb { |
| @@ -80,7 +81,8 @@ static int key_diff(const u32 *a, const u32 *b, unsigned int klen) | |||
| 80 | } | 81 | } |
| 81 | 82 | ||
| 82 | bool nf_conncount_add(struct hlist_head *head, | 83 | bool nf_conncount_add(struct hlist_head *head, |
| 83 | const struct nf_conntrack_tuple *tuple) | 84 | const struct nf_conntrack_tuple *tuple, |
| 85 | const struct nf_conntrack_zone *zone) | ||
| 84 | { | 86 | { |
| 85 | struct nf_conncount_tuple *conn; | 87 | struct nf_conncount_tuple *conn; |
| 86 | 88 | ||
| @@ -88,6 +90,7 @@ bool nf_conncount_add(struct hlist_head *head, | |||
| 88 | if (conn == NULL) | 90 | if (conn == NULL) |
| 89 | return false; | 91 | return false; |
| 90 | conn->tuple = *tuple; | 92 | conn->tuple = *tuple; |
| 93 | conn->zone = *zone; | ||
| 91 | hlist_add_head(&conn->node, head); | 94 | hlist_add_head(&conn->node, head); |
| 92 | return true; | 95 | return true; |
| 93 | } | 96 | } |
| @@ -108,7 +111,7 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, | |||
| 108 | 111 | ||
| 109 | /* check the saved connections */ | 112 | /* check the saved connections */ |
| 110 | hlist_for_each_entry_safe(conn, n, head, node) { | 113 | hlist_for_each_entry_safe(conn, n, head, node) { |
| 111 | found = nf_conntrack_find_get(net, zone, &conn->tuple); | 114 | found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple); |
| 112 | if (found == NULL) { | 115 | if (found == NULL) { |
| 113 | hlist_del(&conn->node); | 116 | hlist_del(&conn->node); |
| 114 | kmem_cache_free(conncount_conn_cachep, conn); | 117 | kmem_cache_free(conncount_conn_cachep, conn); |
| @@ -117,7 +120,8 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, | |||
| 117 | 120 | ||
| 118 | found_ct = nf_ct_tuplehash_to_ctrack(found); | 121 | found_ct = nf_ct_tuplehash_to_ctrack(found); |
| 119 | 122 | ||
| 120 | if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple)) { | 123 | if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) && |
| 124 | nf_ct_zone_equal(found_ct, zone, zone->dir)) { | ||
| 121 | /* | 125 | /* |
| 122 | * Just to be sure we have it only once in the list. | 126 | * Just to be sure we have it only once in the list. |
| 123 | * We should not see tuples twice unless someone hooks | 127 | * We should not see tuples twice unless someone hooks |
| @@ -196,7 +200,7 @@ count_tree(struct net *net, struct rb_root *root, | |||
| 196 | if (!addit) | 200 | if (!addit) |
| 197 | return count; | 201 | return count; |
| 198 | 202 | ||
| 199 | if (!nf_conncount_add(&rbconn->hhead, tuple)) | 203 | if (!nf_conncount_add(&rbconn->hhead, tuple, zone)) |
| 200 | return 0; /* hotdrop */ | 204 | return 0; /* hotdrop */ |
| 201 | 205 | ||
| 202 | return count + 1; | 206 | return count + 1; |
| @@ -238,6 +242,7 @@ count_tree(struct net *net, struct rb_root *root, | |||
| 238 | } | 242 | } |
| 239 | 243 | ||
| 240 | conn->tuple = *tuple; | 244 | conn->tuple = *tuple; |
| 245 | conn->zone = *zone; | ||
| 241 | memcpy(rbconn->key, key, sizeof(u32) * keylen); | 246 | memcpy(rbconn->key, key, sizeof(u32) * keylen); |
| 242 | 247 | ||
| 243 | INIT_HLIST_HEAD(&rbconn->hhead); | 248 | INIT_HLIST_HEAD(&rbconn->hhead); |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 39327a42879f..20a2e37c76d1 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
| @@ -1446,7 +1446,8 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct, | |||
| 1446 | } | 1446 | } |
| 1447 | nfnl_lock(NFNL_SUBSYS_CTNETLINK); | 1447 | nfnl_lock(NFNL_SUBSYS_CTNETLINK); |
| 1448 | rcu_read_lock(); | 1448 | rcu_read_lock(); |
| 1449 | if (nat_hook->parse_nat_setup) | 1449 | nat_hook = rcu_dereference(nf_nat_hook); |
| 1450 | if (nat_hook) | ||
| 1450 | return -EAGAIN; | 1451 | return -EAGAIN; |
| 1451 | #endif | 1452 | #endif |
| 1452 | return -EOPNOTSUPP; | 1453 | return -EOPNOTSUPP; |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index f0411fbffe77..896d4a36081d 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -2890,12 +2890,13 @@ static struct nft_set *nft_set_lookup_byid(const struct net *net, | |||
| 2890 | u32 id = ntohl(nla_get_be32(nla)); | 2890 | u32 id = ntohl(nla_get_be32(nla)); |
| 2891 | 2891 | ||
| 2892 | list_for_each_entry(trans, &net->nft.commit_list, list) { | 2892 | list_for_each_entry(trans, &net->nft.commit_list, list) { |
| 2893 | struct nft_set *set = nft_trans_set(trans); | 2893 | if (trans->msg_type == NFT_MSG_NEWSET) { |
| 2894 | struct nft_set *set = nft_trans_set(trans); | ||
| 2894 | 2895 | ||
| 2895 | if (trans->msg_type == NFT_MSG_NEWSET && | 2896 | if (id == nft_trans_set_id(trans) && |
| 2896 | id == nft_trans_set_id(trans) && | 2897 | nft_active_genmask(set, genmask)) |
| 2897 | nft_active_genmask(set, genmask)) | 2898 | return set; |
| 2898 | return set; | 2899 | } |
| 2899 | } | 2900 | } |
| 2900 | return ERR_PTR(-ENOENT); | 2901 | return ERR_PTR(-ENOENT); |
| 2901 | } | 2902 | } |
| @@ -5836,18 +5837,23 @@ static int nf_tables_flowtable_event(struct notifier_block *this, | |||
| 5836 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | 5837 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
| 5837 | struct nft_flowtable *flowtable; | 5838 | struct nft_flowtable *flowtable; |
| 5838 | struct nft_table *table; | 5839 | struct nft_table *table; |
| 5840 | struct net *net; | ||
| 5839 | 5841 | ||
| 5840 | if (event != NETDEV_UNREGISTER) | 5842 | if (event != NETDEV_UNREGISTER) |
| 5841 | return 0; | 5843 | return 0; |
| 5842 | 5844 | ||
| 5845 | net = maybe_get_net(dev_net(dev)); | ||
| 5846 | if (!net) | ||
| 5847 | return 0; | ||
| 5848 | |||
| 5843 | nfnl_lock(NFNL_SUBSYS_NFTABLES); | 5849 | nfnl_lock(NFNL_SUBSYS_NFTABLES); |
| 5844 | list_for_each_entry(table, &dev_net(dev)->nft.tables, list) { | 5850 | list_for_each_entry(table, &net->nft.tables, list) { |
| 5845 | list_for_each_entry(flowtable, &table->flowtables, list) { | 5851 | list_for_each_entry(flowtable, &table->flowtables, list) { |
| 5846 | nft_flowtable_event(event, dev, flowtable); | 5852 | nft_flowtable_event(event, dev, flowtable); |
| 5847 | } | 5853 | } |
| 5848 | } | 5854 | } |
| 5849 | nfnl_unlock(NFNL_SUBSYS_NFTABLES); | 5855 | nfnl_unlock(NFNL_SUBSYS_NFTABLES); |
| 5850 | 5856 | put_net(net); | |
| 5851 | return NOTIFY_DONE; | 5857 | return NOTIFY_DONE; |
| 5852 | } | 5858 | } |
| 5853 | 5859 | ||
| @@ -6438,7 +6444,7 @@ static void nf_tables_abort_release(struct nft_trans *trans) | |||
| 6438 | kfree(trans); | 6444 | kfree(trans); |
| 6439 | } | 6445 | } |
| 6440 | 6446 | ||
| 6441 | static int nf_tables_abort(struct net *net, struct sk_buff *skb) | 6447 | static int __nf_tables_abort(struct net *net) |
| 6442 | { | 6448 | { |
| 6443 | struct nft_trans *trans, *next; | 6449 | struct nft_trans *trans, *next; |
| 6444 | struct nft_trans_elem *te; | 6450 | struct nft_trans_elem *te; |
| @@ -6554,6 +6560,11 @@ static void nf_tables_cleanup(struct net *net) | |||
| 6554 | nft_validate_state_update(net, NFT_VALIDATE_SKIP); | 6560 | nft_validate_state_update(net, NFT_VALIDATE_SKIP); |
| 6555 | } | 6561 | } |
| 6556 | 6562 | ||
| 6563 | static int nf_tables_abort(struct net *net, struct sk_buff *skb) | ||
| 6564 | { | ||
| 6565 | return __nf_tables_abort(net); | ||
| 6566 | } | ||
| 6567 | |||
| 6557 | static bool nf_tables_valid_genid(struct net *net, u32 genid) | 6568 | static bool nf_tables_valid_genid(struct net *net, u32 genid) |
| 6558 | { | 6569 | { |
| 6559 | return net->nft.base_seq == genid; | 6570 | return net->nft.base_seq == genid; |
| @@ -7148,9 +7159,12 @@ static int __net_init nf_tables_init_net(struct net *net) | |||
| 7148 | 7159 | ||
| 7149 | static void __net_exit nf_tables_exit_net(struct net *net) | 7160 | static void __net_exit nf_tables_exit_net(struct net *net) |
| 7150 | { | 7161 | { |
| 7162 | nfnl_lock(NFNL_SUBSYS_NFTABLES); | ||
| 7163 | if (!list_empty(&net->nft.commit_list)) | ||
| 7164 | __nf_tables_abort(net); | ||
| 7151 | __nft_release_tables(net); | 7165 | __nft_release_tables(net); |
| 7166 | nfnl_unlock(NFNL_SUBSYS_NFTABLES); | ||
| 7152 | WARN_ON_ONCE(!list_empty(&net->nft.tables)); | 7167 | WARN_ON_ONCE(!list_empty(&net->nft.tables)); |
| 7153 | WARN_ON_ONCE(!list_empty(&net->nft.commit_list)); | ||
| 7154 | } | 7168 | } |
| 7155 | 7169 | ||
| 7156 | static struct pernet_operations nf_tables_net_ops = { | 7170 | static struct pernet_operations nf_tables_net_ops = { |
| @@ -7192,13 +7206,13 @@ err1: | |||
| 7192 | 7206 | ||
| 7193 | static void __exit nf_tables_module_exit(void) | 7207 | static void __exit nf_tables_module_exit(void) |
| 7194 | { | 7208 | { |
| 7195 | unregister_pernet_subsys(&nf_tables_net_ops); | ||
| 7196 | nfnetlink_subsys_unregister(&nf_tables_subsys); | 7209 | nfnetlink_subsys_unregister(&nf_tables_subsys); |
| 7197 | unregister_netdevice_notifier(&nf_tables_flowtable_notifier); | 7210 | unregister_netdevice_notifier(&nf_tables_flowtable_notifier); |
| 7211 | nft_chain_filter_fini(); | ||
| 7212 | unregister_pernet_subsys(&nf_tables_net_ops); | ||
| 7198 | rcu_barrier(); | 7213 | rcu_barrier(); |
| 7199 | nf_tables_core_module_exit(); | 7214 | nf_tables_core_module_exit(); |
| 7200 | kfree(info); | 7215 | kfree(info); |
| 7201 | nft_chain_filter_fini(); | ||
| 7202 | } | 7216 | } |
| 7203 | 7217 | ||
| 7204 | module_init(nf_tables_module_init); | 7218 | module_init(nf_tables_module_init); |
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index deff10adef9c..8de912ca53d3 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c | |||
| @@ -183,7 +183,8 @@ next_rule: | |||
| 183 | 183 | ||
| 184 | switch (regs.verdict.code) { | 184 | switch (regs.verdict.code) { |
| 185 | case NFT_JUMP: | 185 | case NFT_JUMP: |
| 186 | BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE); | 186 | if (WARN_ON_ONCE(stackptr >= NFT_JUMP_STACK_SIZE)) |
| 187 | return NF_DROP; | ||
| 187 | jumpstack[stackptr].chain = chain; | 188 | jumpstack[stackptr].chain = chain; |
| 188 | jumpstack[stackptr].rules = rules + 1; | 189 | jumpstack[stackptr].rules = rules + 1; |
| 189 | stackptr++; | 190 | stackptr++; |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 4d0da7042aff..e1b6be29848d 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
| @@ -429,7 +429,7 @@ replay: | |||
| 429 | */ | 429 | */ |
| 430 | if (err == -EAGAIN) { | 430 | if (err == -EAGAIN) { |
| 431 | status |= NFNL_BATCH_REPLAY; | 431 | status |= NFNL_BATCH_REPLAY; |
| 432 | goto next; | 432 | goto done; |
| 433 | } | 433 | } |
| 434 | } | 434 | } |
| 435 | ack: | 435 | ack: |
| @@ -456,7 +456,7 @@ ack: | |||
| 456 | if (err) | 456 | if (err) |
| 457 | status |= NFNL_BATCH_FAILURE; | 457 | status |= NFNL_BATCH_FAILURE; |
| 458 | } | 458 | } |
| 459 | next: | 459 | |
| 460 | msglen = NLMSG_ALIGN(nlh->nlmsg_len); | 460 | msglen = NLMSG_ALIGN(nlh->nlmsg_len); |
| 461 | if (msglen > skb->len) | 461 | if (msglen > skb->len) |
| 462 | msglen = skb->len; | 462 | msglen = skb->len; |
| @@ -464,7 +464,11 @@ next: | |||
| 464 | } | 464 | } |
| 465 | done: | 465 | done: |
| 466 | if (status & NFNL_BATCH_REPLAY) { | 466 | if (status & NFNL_BATCH_REPLAY) { |
| 467 | ss->abort(net, oskb); | 467 | const struct nfnetlink_subsystem *ss2; |
| 468 | |||
| 469 | ss2 = nfnl_dereference_protected(subsys_id); | ||
| 470 | if (ss2 == ss) | ||
| 471 | ss->abort(net, oskb); | ||
| 468 | nfnl_err_reset(&err_list); | 472 | nfnl_err_reset(&err_list); |
| 469 | nfnl_unlock(subsys_id); | 473 | nfnl_unlock(subsys_id); |
| 470 | kfree_skb(skb); | 474 | kfree_skb(skb); |
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c index 84c902477a91..d21834bed805 100644 --- a/net/netfilter/nft_chain_filter.c +++ b/net/netfilter/nft_chain_filter.c | |||
| @@ -318,6 +318,10 @@ static int nf_tables_netdev_event(struct notifier_block *this, | |||
| 318 | event != NETDEV_CHANGENAME) | 318 | event != NETDEV_CHANGENAME) |
| 319 | return NOTIFY_DONE; | 319 | return NOTIFY_DONE; |
| 320 | 320 | ||
| 321 | ctx.net = maybe_get_net(ctx.net); | ||
| 322 | if (!ctx.net) | ||
| 323 | return NOTIFY_DONE; | ||
| 324 | |||
| 321 | nfnl_lock(NFNL_SUBSYS_NFTABLES); | 325 | nfnl_lock(NFNL_SUBSYS_NFTABLES); |
| 322 | list_for_each_entry(table, &ctx.net->nft.tables, list) { | 326 | list_for_each_entry(table, &ctx.net->nft.tables, list) { |
| 323 | if (table->family != NFPROTO_NETDEV) | 327 | if (table->family != NFPROTO_NETDEV) |
| @@ -334,6 +338,7 @@ static int nf_tables_netdev_event(struct notifier_block *this, | |||
| 334 | } | 338 | } |
| 335 | } | 339 | } |
| 336 | nfnl_unlock(NFNL_SUBSYS_NFTABLES); | 340 | nfnl_unlock(NFNL_SUBSYS_NFTABLES); |
| 341 | put_net(ctx.net); | ||
| 337 | 342 | ||
| 338 | return NOTIFY_DONE; | 343 | return NOTIFY_DONE; |
| 339 | } | 344 | } |
diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c index 50c068d660e5..a832c59f0a9c 100644 --- a/net/netfilter/nft_connlimit.c +++ b/net/netfilter/nft_connlimit.c | |||
| @@ -52,7 +52,7 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv, | |||
| 52 | if (!addit) | 52 | if (!addit) |
| 53 | goto out; | 53 | goto out; |
| 54 | 54 | ||
| 55 | if (!nf_conncount_add(&priv->hhead, tuple_ptr)) { | 55 | if (!nf_conncount_add(&priv->hhead, tuple_ptr, zone)) { |
| 56 | regs->verdict.code = NF_DROP; | 56 | regs->verdict.code = NF_DROP; |
| 57 | spin_unlock_bh(&priv->lock); | 57 | spin_unlock_bh(&priv->lock); |
| 58 | return; | 58 | return; |
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 4d49529cff61..27d7e4598ab6 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c | |||
| @@ -203,9 +203,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx, | |||
| 203 | goto err1; | 203 | goto err1; |
| 204 | set->ops->gc_init(set); | 204 | set->ops->gc_init(set); |
| 205 | } | 205 | } |
| 206 | 206 | } | |
| 207 | } else if (set->flags & NFT_SET_EVAL) | ||
| 208 | return -EINVAL; | ||
| 209 | 207 | ||
| 210 | nft_set_ext_prepare(&priv->tmpl); | 208 | nft_set_ext_prepare(&priv->tmpl); |
| 211 | nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_KEY, set->klen); | 209 | nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_KEY, set->klen); |
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index d260ce2d6671..7f3a9a211034 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c | |||
| @@ -66,7 +66,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set | |||
| 66 | parent = rcu_dereference_raw(parent->rb_left); | 66 | parent = rcu_dereference_raw(parent->rb_left); |
| 67 | if (interval && | 67 | if (interval && |
| 68 | nft_rbtree_equal(set, this, interval) && | 68 | nft_rbtree_equal(set, this, interval) && |
| 69 | nft_rbtree_interval_end(this) && | 69 | nft_rbtree_interval_end(rbe) && |
| 70 | !nft_rbtree_interval_end(interval)) | 70 | !nft_rbtree_interval_end(interval)) |
| 71 | continue; | 71 | continue; |
| 72 | interval = rbe; | 72 | interval = rbe; |
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c index f28a0b944087..74e1b3bd6954 100644 --- a/net/netfilter/nft_socket.c +++ b/net/netfilter/nft_socket.c | |||
| @@ -142,3 +142,4 @@ module_exit(nft_socket_module_exit); | |||
| 142 | MODULE_LICENSE("GPL"); | 142 | MODULE_LICENSE("GPL"); |
| 143 | MODULE_AUTHOR("Máté Eckl"); | 143 | MODULE_AUTHOR("Máté Eckl"); |
| 144 | MODULE_DESCRIPTION("nf_tables socket match module"); | 144 | MODULE_DESCRIPTION("nf_tables socket match module"); |
| 145 | MODULE_ALIAS_NFT_EXPR("socket"); | ||
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 8790190c6feb..03b9a50ec93b 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c | |||
| @@ -245,12 +245,22 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par, | |||
| 245 | } | 245 | } |
| 246 | 246 | ||
| 247 | if (info->helper[0]) { | 247 | if (info->helper[0]) { |
| 248 | if (strnlen(info->helper, sizeof(info->helper)) == sizeof(info->helper)) { | ||
| 249 | ret = -ENAMETOOLONG; | ||
| 250 | goto err3; | ||
| 251 | } | ||
| 252 | |||
| 248 | ret = xt_ct_set_helper(ct, info->helper, par); | 253 | ret = xt_ct_set_helper(ct, info->helper, par); |
| 249 | if (ret < 0) | 254 | if (ret < 0) |
| 250 | goto err3; | 255 | goto err3; |
| 251 | } | 256 | } |
| 252 | 257 | ||
| 253 | if (info->timeout[0]) { | 258 | if (info->timeout[0]) { |
| 259 | if (strnlen(info->timeout, sizeof(info->timeout)) == sizeof(info->timeout)) { | ||
| 260 | ret = -ENAMETOOLONG; | ||
| 261 | goto err4; | ||
| 262 | } | ||
| 263 | |||
| 254 | ret = xt_ct_set_timeout(ct, par, info->timeout); | 264 | ret = xt_ct_set_timeout(ct, par, info->timeout); |
| 255 | if (ret < 0) | 265 | if (ret < 0) |
| 256 | goto err4; | 266 | goto err4; |
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index 94df000abb92..29c38aa7f726 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c | |||
| @@ -211,7 +211,7 @@ static int __init connmark_mt_init(void) | |||
| 211 | static void __exit connmark_mt_exit(void) | 211 | static void __exit connmark_mt_exit(void) |
| 212 | { | 212 | { |
| 213 | xt_unregister_match(&connmark_mt_reg); | 213 | xt_unregister_match(&connmark_mt_reg); |
| 214 | xt_unregister_target(connmark_tg_reg); | 214 | xt_unregister_targets(connmark_tg_reg, ARRAY_SIZE(connmark_tg_reg)); |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | module_init(connmark_mt_init); | 217 | module_init(connmark_mt_init); |
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 6f4c5217d835..bf2890b13212 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c | |||
| @@ -372,8 +372,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) | |||
| 372 | 372 | ||
| 373 | /* Normalize to fit into jiffies */ | 373 | /* Normalize to fit into jiffies */ |
| 374 | if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && | 374 | if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && |
| 375 | add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC) | 375 | add_opt.ext.timeout > IPSET_MAX_TIMEOUT) |
| 376 | add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC; | 376 | add_opt.ext.timeout = IPSET_MAX_TIMEOUT; |
| 377 | if (info->add_set.index != IPSET_INVALID_ID) | 377 | if (info->add_set.index != IPSET_INVALID_ID) |
| 378 | ip_set_add(info->add_set.index, skb, par, &add_opt); | 378 | ip_set_add(info->add_set.index, skb, par, &add_opt); |
| 379 | if (info->del_set.index != IPSET_INVALID_ID) | 379 | if (info->del_set.index != IPSET_INVALID_ID) |
| @@ -407,8 +407,8 @@ set_target_v3(struct sk_buff *skb, const struct xt_action_param *par) | |||
| 407 | 407 | ||
| 408 | /* Normalize to fit into jiffies */ | 408 | /* Normalize to fit into jiffies */ |
| 409 | if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && | 409 | if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && |
| 410 | add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC) | 410 | add_opt.ext.timeout > IPSET_MAX_TIMEOUT) |
| 411 | add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC; | 411 | add_opt.ext.timeout = IPSET_MAX_TIMEOUT; |
| 412 | if (info->add_set.index != IPSET_INVALID_ID) | 412 | if (info->add_set.index != IPSET_INVALID_ID) |
| 413 | ip_set_add(info->add_set.index, skb, par, &add_opt); | 413 | ip_set_add(info->add_set.index, skb, par, &add_opt); |
| 414 | if (info->del_set.index != IPSET_INVALID_ID) | 414 | if (info->del_set.index != IPSET_INVALID_ID) |
| @@ -470,7 +470,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par) | |||
| 470 | } | 470 | } |
| 471 | if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) | | 471 | if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) | |
| 472 | (info->flags & IPSET_FLAG_MAP_SKBQUEUE)) && | 472 | (info->flags & IPSET_FLAG_MAP_SKBQUEUE)) && |
| 473 | !(par->hook_mask & (1 << NF_INET_FORWARD | | 473 | (par->hook_mask & ~(1 << NF_INET_FORWARD | |
| 474 | 1 << NF_INET_LOCAL_OUT | | 474 | 1 << NF_INET_LOCAL_OUT | |
| 475 | 1 << NF_INET_POST_ROUTING))) { | 475 | 1 << NF_INET_POST_ROUTING))) { |
| 476 | pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n"); | 476 | pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n"); |
diff --git a/net/rds/loop.c b/net/rds/loop.c index f2bf78de5688..dac6218a460e 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c | |||
| @@ -193,4 +193,5 @@ struct rds_transport rds_loop_transport = { | |||
| 193 | .inc_copy_to_user = rds_message_inc_copy_to_user, | 193 | .inc_copy_to_user = rds_message_inc_copy_to_user, |
| 194 | .inc_free = rds_loop_inc_free, | 194 | .inc_free = rds_loop_inc_free, |
| 195 | .t_name = "loopback", | 195 | .t_name = "loopback", |
| 196 | .t_type = RDS_TRANS_LOOP, | ||
| 196 | }; | 197 | }; |
diff --git a/net/rds/rds.h b/net/rds/rds.h index b04c333d9d1c..f2272fb8cd45 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h | |||
| @@ -479,6 +479,11 @@ struct rds_notifier { | |||
| 479 | int n_status; | 479 | int n_status; |
| 480 | }; | 480 | }; |
| 481 | 481 | ||
| 482 | /* Available as part of RDS core, so doesn't need to participate | ||
| 483 | * in get_preferred transport etc | ||
| 484 | */ | ||
| 485 | #define RDS_TRANS_LOOP 3 | ||
| 486 | |||
| 482 | /** | 487 | /** |
| 483 | * struct rds_transport - transport specific behavioural hooks | 488 | * struct rds_transport - transport specific behavioural hooks |
| 484 | * | 489 | * |
diff --git a/net/rds/recv.c b/net/rds/recv.c index dc67458b52f0..192ac6f78ded 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c | |||
| @@ -103,6 +103,11 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk, | |||
| 103 | rds_stats_add(s_recv_bytes_added_to_socket, delta); | 103 | rds_stats_add(s_recv_bytes_added_to_socket, delta); |
| 104 | else | 104 | else |
| 105 | rds_stats_add(s_recv_bytes_removed_from_socket, -delta); | 105 | rds_stats_add(s_recv_bytes_removed_from_socket, -delta); |
| 106 | |||
| 107 | /* loop transport doesn't send/recv congestion updates */ | ||
| 108 | if (rs->rs_transport->t_type == RDS_TRANS_LOOP) | ||
| 109 | return; | ||
| 110 | |||
| 106 | now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs); | 111 | now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs); |
| 107 | 112 | ||
| 108 | rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d " | 113 | rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d " |
diff --git a/net/sctp/output.c b/net/sctp/output.c index e672dee302c7..7f849b01ec8e 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
| @@ -409,6 +409,21 @@ static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) | |||
| 409 | refcount_inc(&sk->sk_wmem_alloc); | 409 | refcount_inc(&sk->sk_wmem_alloc); |
| 410 | } | 410 | } |
| 411 | 411 | ||
| 412 | static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) | ||
| 413 | { | ||
| 414 | if (SCTP_OUTPUT_CB(head)->last == head) | ||
| 415 | skb_shinfo(head)->frag_list = skb; | ||
| 416 | else | ||
| 417 | SCTP_OUTPUT_CB(head)->last->next = skb; | ||
| 418 | SCTP_OUTPUT_CB(head)->last = skb; | ||
| 419 | |||
| 420 | head->truesize += skb->truesize; | ||
| 421 | head->data_len += skb->len; | ||
| 422 | head->len += skb->len; | ||
| 423 | |||
| 424 | __skb_header_release(skb); | ||
| 425 | } | ||
| 426 | |||
| 412 | static int sctp_packet_pack(struct sctp_packet *packet, | 427 | static int sctp_packet_pack(struct sctp_packet *packet, |
| 413 | struct sk_buff *head, int gso, gfp_t gfp) | 428 | struct sk_buff *head, int gso, gfp_t gfp) |
| 414 | { | 429 | { |
| @@ -422,7 +437,7 @@ static int sctp_packet_pack(struct sctp_packet *packet, | |||
| 422 | 437 | ||
| 423 | if (gso) { | 438 | if (gso) { |
| 424 | skb_shinfo(head)->gso_type = sk->sk_gso_type; | 439 | skb_shinfo(head)->gso_type = sk->sk_gso_type; |
| 425 | NAPI_GRO_CB(head)->last = head; | 440 | SCTP_OUTPUT_CB(head)->last = head; |
| 426 | } else { | 441 | } else { |
| 427 | nskb = head; | 442 | nskb = head; |
| 428 | pkt_size = packet->size; | 443 | pkt_size = packet->size; |
| @@ -503,15 +518,8 @@ merge: | |||
| 503 | &packet->chunk_list); | 518 | &packet->chunk_list); |
| 504 | } | 519 | } |
| 505 | 520 | ||
| 506 | if (gso) { | 521 | if (gso) |
| 507 | if (skb_gro_receive(&head, nskb)) { | 522 | sctp_packet_gso_append(head, nskb); |
| 508 | kfree_skb(nskb); | ||
| 509 | return 0; | ||
| 510 | } | ||
| 511 | if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >= | ||
| 512 | sk->sk_gso_max_segs)) | ||
| 513 | return 0; | ||
| 514 | } | ||
| 515 | 523 | ||
| 516 | pkt_count++; | 524 | pkt_count++; |
| 517 | } while (!list_empty(&packet->chunk_list)); | 525 | } while (!list_empty(&packet->chunk_list)); |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 973b4471b532..da7f02edcd37 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
| @@ -1273,8 +1273,7 @@ static __poll_t smc_accept_poll(struct sock *parent) | |||
| 1273 | return mask; | 1273 | return mask; |
| 1274 | } | 1274 | } |
| 1275 | 1275 | ||
| 1276 | static __poll_t smc_poll(struct file *file, struct socket *sock, | 1276 | static __poll_t smc_poll_mask(struct socket *sock, __poll_t events) |
| 1277 | poll_table *wait) | ||
| 1278 | { | 1277 | { |
| 1279 | struct sock *sk = sock->sk; | 1278 | struct sock *sk = sock->sk; |
| 1280 | __poll_t mask = 0; | 1279 | __poll_t mask = 0; |
| @@ -1290,7 +1289,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, | |||
| 1290 | if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { | 1289 | if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { |
| 1291 | /* delegate to CLC child sock */ | 1290 | /* delegate to CLC child sock */ |
| 1292 | release_sock(sk); | 1291 | release_sock(sk); |
| 1293 | mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); | 1292 | mask = smc->clcsock->ops->poll_mask(smc->clcsock, events); |
| 1294 | lock_sock(sk); | 1293 | lock_sock(sk); |
| 1295 | sk->sk_err = smc->clcsock->sk->sk_err; | 1294 | sk->sk_err = smc->clcsock->sk->sk_err; |
| 1296 | if (sk->sk_err) { | 1295 | if (sk->sk_err) { |
| @@ -1308,11 +1307,6 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, | |||
| 1308 | } | 1307 | } |
| 1309 | } | 1308 | } |
| 1310 | } else { | 1309 | } else { |
| 1311 | if (sk->sk_state != SMC_CLOSED) { | ||
| 1312 | release_sock(sk); | ||
| 1313 | sock_poll_wait(file, sk_sleep(sk), wait); | ||
| 1314 | lock_sock(sk); | ||
| 1315 | } | ||
| 1316 | if (sk->sk_err) | 1310 | if (sk->sk_err) |
| 1317 | mask |= EPOLLERR; | 1311 | mask |= EPOLLERR; |
| 1318 | if ((sk->sk_shutdown == SHUTDOWN_MASK) || | 1312 | if ((sk->sk_shutdown == SHUTDOWN_MASK) || |
| @@ -1625,7 +1619,7 @@ static const struct proto_ops smc_sock_ops = { | |||
| 1625 | .socketpair = sock_no_socketpair, | 1619 | .socketpair = sock_no_socketpair, |
| 1626 | .accept = smc_accept, | 1620 | .accept = smc_accept, |
| 1627 | .getname = smc_getname, | 1621 | .getname = smc_getname, |
| 1628 | .poll = smc_poll, | 1622 | .poll_mask = smc_poll_mask, |
| 1629 | .ioctl = smc_ioctl, | 1623 | .ioctl = smc_ioctl, |
| 1630 | .listen = smc_listen, | 1624 | .listen = smc_listen, |
| 1631 | .shutdown = smc_shutdown, | 1625 | .shutdown = smc_shutdown, |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 301f22430469..a127d61e8af9 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c | |||
| @@ -712,7 +712,7 @@ static int __init tls_register(void) | |||
| 712 | build_protos(tls_prots[TLSV4], &tcp_prot); | 712 | build_protos(tls_prots[TLSV4], &tcp_prot); |
| 713 | 713 | ||
| 714 | tls_sw_proto_ops = inet_stream_ops; | 714 | tls_sw_proto_ops = inet_stream_ops; |
| 715 | tls_sw_proto_ops.poll = tls_sw_poll; | 715 | tls_sw_proto_ops.poll_mask = tls_sw_poll_mask; |
| 716 | tls_sw_proto_ops.splice_read = tls_sw_splice_read; | 716 | tls_sw_proto_ops.splice_read = tls_sw_splice_read; |
| 717 | 717 | ||
| 718 | #ifdef CONFIG_TLS_DEVICE | 718 | #ifdef CONFIG_TLS_DEVICE |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 8ca57d01b18f..f127fac88acf 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
| @@ -191,18 +191,12 @@ static void tls_free_both_sg(struct sock *sk) | |||
| 191 | } | 191 | } |
| 192 | 192 | ||
| 193 | static int tls_do_encryption(struct tls_context *tls_ctx, | 193 | static int tls_do_encryption(struct tls_context *tls_ctx, |
| 194 | struct tls_sw_context_tx *ctx, size_t data_len, | 194 | struct tls_sw_context_tx *ctx, |
| 195 | gfp_t flags) | 195 | struct aead_request *aead_req, |
| 196 | size_t data_len) | ||
| 196 | { | 197 | { |
| 197 | unsigned int req_size = sizeof(struct aead_request) + | ||
| 198 | crypto_aead_reqsize(ctx->aead_send); | ||
| 199 | struct aead_request *aead_req; | ||
| 200 | int rc; | 198 | int rc; |
| 201 | 199 | ||
| 202 | aead_req = kzalloc(req_size, flags); | ||
| 203 | if (!aead_req) | ||
| 204 | return -ENOMEM; | ||
| 205 | |||
| 206 | ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size; | 200 | ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size; |
| 207 | ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size; | 201 | ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size; |
| 208 | 202 | ||
| @@ -219,7 +213,6 @@ static int tls_do_encryption(struct tls_context *tls_ctx, | |||
| 219 | ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size; | 213 | ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size; |
| 220 | ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size; | 214 | ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size; |
| 221 | 215 | ||
| 222 | kfree(aead_req); | ||
| 223 | return rc; | 216 | return rc; |
| 224 | } | 217 | } |
| 225 | 218 | ||
| @@ -228,8 +221,14 @@ static int tls_push_record(struct sock *sk, int flags, | |||
| 228 | { | 221 | { |
| 229 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 222 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 230 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); | 223 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); |
| 224 | struct aead_request *req; | ||
| 231 | int rc; | 225 | int rc; |
| 232 | 226 | ||
| 227 | req = kzalloc(sizeof(struct aead_request) + | ||
| 228 | crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation); | ||
| 229 | if (!req) | ||
| 230 | return -ENOMEM; | ||
| 231 | |||
| 233 | sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1); | 232 | sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1); |
| 234 | sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1); | 233 | sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1); |
| 235 | 234 | ||
| @@ -245,15 +244,14 @@ static int tls_push_record(struct sock *sk, int flags, | |||
| 245 | tls_ctx->pending_open_record_frags = 0; | 244 | tls_ctx->pending_open_record_frags = 0; |
| 246 | set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags); | 245 | set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags); |
| 247 | 246 | ||
| 248 | rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size, | 247 | rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size); |
| 249 | sk->sk_allocation); | ||
| 250 | if (rc < 0) { | 248 | if (rc < 0) { |
| 251 | /* If we are called from write_space and | 249 | /* If we are called from write_space and |
| 252 | * we fail, we need to set this SOCK_NOSPACE | 250 | * we fail, we need to set this SOCK_NOSPACE |
| 253 | * to trigger another write_space in the future. | 251 | * to trigger another write_space in the future. |
| 254 | */ | 252 | */ |
| 255 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 253 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
| 256 | return rc; | 254 | goto out_req; |
| 257 | } | 255 | } |
| 258 | 256 | ||
| 259 | free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, | 257 | free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, |
| @@ -268,6 +266,8 @@ static int tls_push_record(struct sock *sk, int flags, | |||
| 268 | tls_err_abort(sk, EBADMSG); | 266 | tls_err_abort(sk, EBADMSG); |
| 269 | 267 | ||
| 270 | tls_advance_record_sn(sk, &tls_ctx->tx); | 268 | tls_advance_record_sn(sk, &tls_ctx->tx); |
| 269 | out_req: | ||
| 270 | kfree(req); | ||
| 271 | return rc; | 271 | return rc; |
| 272 | } | 272 | } |
| 273 | 273 | ||
| @@ -754,7 +754,7 @@ int tls_sw_recvmsg(struct sock *sk, | |||
| 754 | struct sk_buff *skb; | 754 | struct sk_buff *skb; |
| 755 | ssize_t copied = 0; | 755 | ssize_t copied = 0; |
| 756 | bool cmsg = false; | 756 | bool cmsg = false; |
| 757 | int err = 0; | 757 | int target, err = 0; |
| 758 | long timeo; | 758 | long timeo; |
| 759 | 759 | ||
| 760 | flags |= nonblock; | 760 | flags |= nonblock; |
| @@ -764,6 +764,7 @@ int tls_sw_recvmsg(struct sock *sk, | |||
| 764 | 764 | ||
| 765 | lock_sock(sk); | 765 | lock_sock(sk); |
| 766 | 766 | ||
| 767 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); | ||
| 767 | timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); | 768 | timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
| 768 | do { | 769 | do { |
| 769 | bool zc = false; | 770 | bool zc = false; |
| @@ -856,6 +857,9 @@ fallback_to_reg_recv: | |||
| 856 | goto recv_end; | 857 | goto recv_end; |
| 857 | } | 858 | } |
| 858 | } | 859 | } |
| 860 | /* If we have a new message from strparser, continue now. */ | ||
| 861 | if (copied >= target && !ctx->recv_pkt) | ||
| 862 | break; | ||
| 859 | } while (len); | 863 | } while (len); |
| 860 | 864 | ||
| 861 | recv_end: | 865 | recv_end: |
| @@ -915,23 +919,22 @@ splice_read_end: | |||
| 915 | return copied ? : err; | 919 | return copied ? : err; |
| 916 | } | 920 | } |
| 917 | 921 | ||
| 918 | unsigned int tls_sw_poll(struct file *file, struct socket *sock, | 922 | __poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events) |
| 919 | struct poll_table_struct *wait) | ||
| 920 | { | 923 | { |
| 921 | unsigned int ret; | ||
| 922 | struct sock *sk = sock->sk; | 924 | struct sock *sk = sock->sk; |
| 923 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 925 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 924 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 926 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
| 927 | __poll_t mask; | ||
| 925 | 928 | ||
| 926 | /* Grab POLLOUT and POLLHUP from the underlying socket */ | 929 | /* Grab EPOLLOUT and EPOLLHUP from the underlying socket */ |
| 927 | ret = ctx->sk_poll(file, sock, wait); | 930 | mask = ctx->sk_poll_mask(sock, events); |
| 928 | 931 | ||
| 929 | /* Clear POLLIN bits, and set based on recv_pkt */ | 932 | /* Clear EPOLLIN bits, and set based on recv_pkt */ |
| 930 | ret &= ~(POLLIN | POLLRDNORM); | 933 | mask &= ~(EPOLLIN | EPOLLRDNORM); |
| 931 | if (ctx->recv_pkt) | 934 | if (ctx->recv_pkt) |
| 932 | ret |= POLLIN | POLLRDNORM; | 935 | mask |= EPOLLIN | EPOLLRDNORM; |
| 933 | 936 | ||
| 934 | return ret; | 937 | return mask; |
| 935 | } | 938 | } |
| 936 | 939 | ||
| 937 | static int tls_read_size(struct strparser *strp, struct sk_buff *skb) | 940 | static int tls_read_size(struct strparser *strp, struct sk_buff *skb) |
| @@ -1188,7 +1191,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) | |||
| 1188 | sk->sk_data_ready = tls_data_ready; | 1191 | sk->sk_data_ready = tls_data_ready; |
| 1189 | write_unlock_bh(&sk->sk_callback_lock); | 1192 | write_unlock_bh(&sk->sk_callback_lock); |
| 1190 | 1193 | ||
| 1191 | sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll; | 1194 | sw_ctx_rx->sk_poll_mask = sk->sk_socket->ops->poll_mask; |
| 1192 | 1195 | ||
| 1193 | strp_check_rcv(&sw_ctx_rx->strp); | 1196 | strp_check_rcv(&sw_ctx_rx->strp); |
| 1194 | } | 1197 | } |
diff --git a/net/wireless/core.c b/net/wireless/core.c index 5fe35aafdd9c..48e8097339ab 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
| @@ -1012,6 +1012,7 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) | |||
| 1012 | nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); | 1012 | nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); |
| 1013 | 1013 | ||
| 1014 | list_del_rcu(&wdev->list); | 1014 | list_del_rcu(&wdev->list); |
| 1015 | synchronize_rcu(); | ||
| 1015 | rdev->devlist_generation++; | 1016 | rdev->devlist_generation++; |
| 1016 | 1017 | ||
| 1017 | switch (wdev->iftype) { | 1018 | switch (wdev->iftype) { |
diff --git a/net/wireless/util.c b/net/wireless/util.c index b5bb1c309914..3c654cd7ba56 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
| @@ -1746,6 +1746,8 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr, | |||
| 1746 | if (!rdev->ops->get_station) | 1746 | if (!rdev->ops->get_station) |
| 1747 | return -EOPNOTSUPP; | 1747 | return -EOPNOTSUPP; |
| 1748 | 1748 | ||
| 1749 | memset(sinfo, 0, sizeof(*sinfo)); | ||
| 1750 | |||
| 1749 | return rdev_get_station(rdev, dev, mac_addr, sinfo); | 1751 | return rdev_get_station(rdev, dev, mac_addr, sinfo); |
| 1750 | } | 1752 | } |
| 1751 | EXPORT_SYMBOL(cfg80211_get_station); | 1753 | EXPORT_SYMBOL(cfg80211_get_station); |
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index b9ef487c4618..f47abb46c587 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c | |||
| @@ -204,7 +204,8 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem) | |||
| 204 | long npgs; | 204 | long npgs; |
| 205 | int err; | 205 | int err; |
| 206 | 206 | ||
| 207 | umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs), GFP_KERNEL); | 207 | umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs), |
| 208 | GFP_KERNEL | __GFP_NOWARN); | ||
| 208 | if (!umem->pgs) | 209 | if (!umem->pgs) |
| 209 | return -ENOMEM; | 210 | return -ENOMEM; |
| 210 | 211 | ||
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 607ed8729c06..7a6214e9ae58 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile | |||
| @@ -16,9 +16,7 @@ LDLIBS += -lcap -lelf -lrt -lpthread | |||
| 16 | TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read | 16 | TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read |
| 17 | all: $(TEST_CUSTOM_PROGS) | 17 | all: $(TEST_CUSTOM_PROGS) |
| 18 | 18 | ||
| 19 | $(TEST_CUSTOM_PROGS): urandom_read | 19 | $(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c |
| 20 | |||
| 21 | urandom_read: urandom_read.c | ||
| 22 | $(CC) -o $(TEST_CUSTOM_PROGS) -static $< -Wl,--build-id | 20 | $(CC) -o $(TEST_CUSTOM_PROGS) -static $< -Wl,--build-id |
| 23 | 21 | ||
| 24 | # Order correspond to 'make run_tests' order | 22 | # Order correspond to 'make run_tests' order |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json index de97e4ff705c..637ea0219617 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json | |||
| @@ -568,7 +568,7 @@ | |||
| 568 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1", | 568 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1", |
| 569 | "matchCount": "1", | 569 | "matchCount": "1", |
| 570 | "teardown": [ | 570 | "teardown": [ |
| 571 | "$TC actions flush action skbedit" | 571 | "$TC actions flush action ife" |
| 572 | ] | 572 | ] |
| 573 | }, | 573 | }, |
| 574 | { | 574 | { |
