/[linux-patches]/genpatches-2.6/trunk/3.4/1022_linux-3.4.23.patch
Gentoo

Contents of /genpatches-2.6/trunk/3.4/1022_linux-3.4.23.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2254 - (show annotations) (download)
Wed Dec 19 19:51:16 2012 UTC (22 months ago) by mpagano
File size: 37141 byte(s)
Linux patches 3.4.12 through and including 3.4.24
1 diff --git a/Makefile b/Makefile
2 index 320663d..bf1df55 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 4
8 -SUBLEVEL = 22
9 +SUBLEVEL = 23
10 EXTRAVERSION =
11 NAME = Saber-toothed Squirrel
12
13 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
14 index e14ae11..7fe19a3 100644
15 --- a/arch/arm/Kconfig
16 +++ b/arch/arm/Kconfig
17 @@ -579,6 +579,7 @@ config ARCH_KIRKWOOD
18 bool "Marvell Kirkwood"
19 select CPU_FEROCEON
20 select PCI
21 + select PCI_QUIRKS
22 select ARCH_REQUIRE_GPIOLIB
23 select GENERIC_CLOCKEVENTS
24 select NEED_MACH_IO_H
25 diff --git a/arch/arm/mach-dove/include/mach/pm.h b/arch/arm/mach-dove/include/mach/pm.h
26 index 3ad9f94..11799c3 100644
27 --- a/arch/arm/mach-dove/include/mach/pm.h
28 +++ b/arch/arm/mach-dove/include/mach/pm.h
29 @@ -45,7 +45,7 @@ static inline int pmu_to_irq(int pin)
30
31 static inline int irq_to_pmu(int irq)
32 {
33 - if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS)
34 + if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS)
35 return irq - IRQ_DOVE_PMU_START;
36
37 return -EINVAL;
38 diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c
39 index f07fd16..9f2fd10 100644
40 --- a/arch/arm/mach-dove/irq.c
41 +++ b/arch/arm/mach-dove/irq.c
42 @@ -61,8 +61,20 @@ static void pmu_irq_ack(struct irq_data *d)
43 int pin = irq_to_pmu(d->irq);
44 u32 u;
45
46 + /*
47 + * The PMU mask register is not RW0C: it is RW. This means that
48 + * the bits take whatever value is written to them; if you write
49 + * a '1', you will set the interrupt.
50 + *
51 + * Unfortunately this means there is NO race free way to clear
52 + * these interrupts.
53 + *
54 + * So, let's structure the code so that the window is as small as
55 + * possible.
56 + */
57 u = ~(1 << (pin & 31));
58 - writel(u, PMU_INTERRUPT_CAUSE);
59 + u &= readl_relaxed(PMU_INTERRUPT_CAUSE);
60 + writel_relaxed(u, PMU_INTERRUPT_CAUSE);
61 }
62
63 static struct irq_chip pmu_irq_chip = {
64 diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
65 index f56a011..c46d20e 100644
66 --- a/arch/arm/mach-kirkwood/pcie.c
67 +++ b/arch/arm/mach-kirkwood/pcie.c
68 @@ -212,14 +212,19 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
69 return 1;
70 }
71
72 +/*
73 + * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it
74 + * is operating as a root complex this needs to be switched to
75 + * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on
76 + * the device. Decoding setup is handled by the orion code.
77 + */
78 static void __devinit rc_pci_fixup(struct pci_dev *dev)
79 {
80 - /*
81 - * Prevent enumeration of root complex.
82 - */
83 if (dev->bus->parent == NULL && dev->devfn == 0) {
84 int i;
85
86 + dev->class &= 0xff;
87 + dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
88 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
89 dev->resource[i].start = 0;
90 dev->resource[i].end = 0;
91 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
92 index 4fa8815..92e05b6 100644
93 --- a/arch/x86/include/asm/fpu-internal.h
94 +++ b/arch/x86/include/asm/fpu-internal.h
95 @@ -334,14 +334,17 @@ static inline void __thread_fpu_begin(struct task_struct *tsk)
96 typedef struct { int preload; } fpu_switch_t;
97
98 /*
99 - * FIXME! We could do a totally lazy restore, but we need to
100 - * add a per-cpu "this was the task that last touched the FPU
101 - * on this CPU" variable, and the task needs to have a "I last
102 - * touched the FPU on this CPU" and check them.
103 + * Must be run with preemption disabled: this clears the fpu_owner_task,
104 + * on this CPU.
105 *
106 - * We don't do that yet, so "fpu_lazy_restore()" always returns
107 - * false, but some day..
108 + * This will disable any lazy FPU state restore of the current FPU state,
109 + * but if the current thread owns the FPU, it will still be saved by.
110 */
111 +static inline void __cpu_disable_lazy_restore(unsigned int cpu)
112 +{
113 + per_cpu(fpu_owner_task, cpu) = NULL;
114 +}
115 +
116 static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
117 {
118 return new == percpu_read_stable(fpu_owner_task) &&
119 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
120 index 6e1e406..849cdcf 100644
121 --- a/arch/x86/kernel/smpboot.c
122 +++ b/arch/x86/kernel/smpboot.c
123 @@ -66,6 +66,8 @@
124 #include <asm/mwait.h>
125 #include <asm/apic.h>
126 #include <asm/io_apic.h>
127 +#include <asm/i387.h>
128 +#include <asm/fpu-internal.h>
129 #include <asm/setup.h>
130 #include <asm/uv/uv.h>
131 #include <linux/mc146818rtc.h>
132 @@ -851,6 +853,9 @@ int __cpuinit native_cpu_up(unsigned int cpu)
133
134 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
135
136 + /* the FPU context is blank, nobody can own it */
137 + __cpu_disable_lazy_restore(cpu);
138 +
139 err = do_boot_cpu(apicid, cpu);
140 if (err) {
141 pr_debug("do_boot_cpu failed %d\n", err);
142 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
143 index bbac51e..4a2c131 100644
144 --- a/drivers/acpi/processor_driver.c
145 +++ b/drivers/acpi/processor_driver.c
146 @@ -407,6 +407,7 @@ static void acpi_processor_notify(struct acpi_device *device, u32 event)
147 acpi_bus_generate_proc_event(device, event, 0);
148 acpi_bus_generate_netlink_event(device->pnp.device_class,
149 dev_name(&device->dev), event, 0);
150 + break;
151 default:
152 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
153 "Unsupported event [0x%x]\n", event));
154 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
155 index 3bafa3b..f4059e9 100644
156 --- a/drivers/edac/i7300_edac.c
157 +++ b/drivers/edac/i7300_edac.c
158 @@ -215,8 +215,8 @@ static const char *ferr_fat_fbd_name[] = {
159 [0] = "Memory Write error on non-redundant retry or "
160 "FBD configuration Write error on retry",
161 };
162 -#define GET_FBD_FAT_IDX(fbderr) (fbderr & (3 << 28))
163 -#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3))
164 +#define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3)
165 +#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
166
167 #define FERR_NF_FBD 0xa0
168 static const char *ferr_nf_fbd_name[] = {
169 @@ -243,7 +243,7 @@ static const char *ferr_nf_fbd_name[] = {
170 [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
171 [0] = "Uncorrectable Data ECC on Replay",
172 };
173 -#define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28))
174 +#define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3)
175 #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
176 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
177 (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
178 @@ -485,7 +485,7 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
179 errnum = find_first_bit(&errors,
180 ARRAY_SIZE(ferr_nf_fbd_name));
181 specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
182 - branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
183 + branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
184
185 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
186 REDMEMA, &syndrome);
187 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
188 index 4ff7d5f..802fec2 100644
189 --- a/drivers/gpu/drm/i915/intel_lvds.c
190 +++ b/drivers/gpu/drm/i915/intel_lvds.c
191 @@ -785,6 +785,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
192 DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
193 },
194 },
195 + {
196 + .callback = intel_no_lvds_dmi_callback,
197 + .ident = "Gigabyte GA-D525TUD",
198 + .matches = {
199 + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
200 + DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
201 + },
202 + },
203 + {
204 + .callback = intel_no_lvds_dmi_callback,
205 + .ident = "Supermicro X7SPA-H",
206 + .matches = {
207 + DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
208 + DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
209 + },
210 + },
211
212 { } /* terminating entry */
213 };
214 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
215 index e5328da..4a1d8f3 100644
216 --- a/drivers/gpu/drm/radeon/evergreen.c
217 +++ b/drivers/gpu/drm/radeon/evergreen.c
218 @@ -37,6 +37,16 @@
219 #define EVERGREEN_PFP_UCODE_SIZE 1120
220 #define EVERGREEN_PM4_UCODE_SIZE 1376
221
222 +static const u32 crtc_offsets[6] =
223 +{
224 + EVERGREEN_CRTC0_REGISTER_OFFSET,
225 + EVERGREEN_CRTC1_REGISTER_OFFSET,
226 + EVERGREEN_CRTC2_REGISTER_OFFSET,
227 + EVERGREEN_CRTC3_REGISTER_OFFSET,
228 + EVERGREEN_CRTC4_REGISTER_OFFSET,
229 + EVERGREEN_CRTC5_REGISTER_OFFSET
230 +};
231 +
232 static void evergreen_gpu_init(struct radeon_device *rdev);
233 void evergreen_fini(struct radeon_device *rdev);
234 void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
235 @@ -101,17 +111,19 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
236
237 void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
238 {
239 - struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
240 int i;
241
242 - if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) {
243 + if (crtc >= rdev->num_crtc)
244 + return;
245 +
246 + if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
247 for (i = 0; i < rdev->usec_timeout; i++) {
248 - if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK))
249 + if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
250 break;
251 udelay(1);
252 }
253 for (i = 0; i < rdev->usec_timeout; i++) {
254 - if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)
255 + if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
256 break;
257 udelay(1);
258 }
259 @@ -1117,116 +1129,105 @@ void evergreen_agp_enable(struct radeon_device *rdev)
260
261 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
262 {
263 + u32 crtc_enabled, tmp, frame_count, blackout;
264 + int i, j;
265 +
266 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
267 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
268
269 - /* Stop all video */
270 + /* disable VGA render */
271 WREG32(VGA_RENDER_CONTROL, 0);
272 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
273 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
274 - if (rdev->num_crtc >= 4) {
275 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
276 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
277 - }
278 - if (rdev->num_crtc >= 6) {
279 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
280 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
281 - }
282 - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
283 - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
284 - if (rdev->num_crtc >= 4) {
285 - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
286 - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
287 - }
288 - if (rdev->num_crtc >= 6) {
289 - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
290 - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
291 - }
292 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
293 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
294 - if (rdev->num_crtc >= 4) {
295 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
296 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
297 - }
298 - if (rdev->num_crtc >= 6) {
299 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
300 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
301 + /* blank the display controllers */
302 + for (i = 0; i < rdev->num_crtc; i++) {
303 + crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
304 + if (crtc_enabled) {
305 + save->crtc_enabled[i] = true;
306 + if (ASIC_IS_DCE6(rdev)) {
307 + tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
308 + if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
309 + radeon_wait_for_vblank(rdev, i);
310 + tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
311 + WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
312 + }
313 + } else {
314 + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
315 + if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
316 + radeon_wait_for_vblank(rdev, i);
317 + tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
318 + WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
319 + }
320 + }
321 + /* wait for the next frame */
322 + frame_count = radeon_get_vblank_counter(rdev, i);
323 + for (j = 0; j < rdev->usec_timeout; j++) {
324 + if (radeon_get_vblank_counter(rdev, i) != frame_count)
325 + break;
326 + udelay(1);
327 + }
328 + } else {
329 + save->crtc_enabled[i] = false;
330 + }
331 }
332
333 - WREG32(D1VGA_CONTROL, 0);
334 - WREG32(D2VGA_CONTROL, 0);
335 - if (rdev->num_crtc >= 4) {
336 - WREG32(EVERGREEN_D3VGA_CONTROL, 0);
337 - WREG32(EVERGREEN_D4VGA_CONTROL, 0);
338 - }
339 - if (rdev->num_crtc >= 6) {
340 - WREG32(EVERGREEN_D5VGA_CONTROL, 0);
341 - WREG32(EVERGREEN_D6VGA_CONTROL, 0);
342 + radeon_mc_wait_for_idle(rdev);
343 +
344 + blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
345 + if ((blackout & BLACKOUT_MODE_MASK) != 1) {
346 + /* Block CPU access */
347 + WREG32(BIF_FB_EN, 0);
348 + /* blackout the MC */
349 + blackout &= ~BLACKOUT_MODE_MASK;
350 + WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
351 }
352 }
353
354 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
355 {
356 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
357 - upper_32_bits(rdev->mc.vram_start));
358 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
359 - upper_32_bits(rdev->mc.vram_start));
360 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
361 - (u32)rdev->mc.vram_start);
362 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
363 - (u32)rdev->mc.vram_start);
364 -
365 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
366 - upper_32_bits(rdev->mc.vram_start));
367 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
368 - upper_32_bits(rdev->mc.vram_start));
369 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
370 - (u32)rdev->mc.vram_start);
371 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
372 - (u32)rdev->mc.vram_start);
373 -
374 - if (rdev->num_crtc >= 4) {
375 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
376 - upper_32_bits(rdev->mc.vram_start));
377 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
378 - upper_32_bits(rdev->mc.vram_start));
379 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
380 - (u32)rdev->mc.vram_start);
381 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
382 - (u32)rdev->mc.vram_start);
383 -
384 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
385 - upper_32_bits(rdev->mc.vram_start));
386 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
387 - upper_32_bits(rdev->mc.vram_start));
388 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
389 - (u32)rdev->mc.vram_start);
390 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
391 - (u32)rdev->mc.vram_start);
392 - }
393 - if (rdev->num_crtc >= 6) {
394 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
395 - upper_32_bits(rdev->mc.vram_start));
396 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
397 - upper_32_bits(rdev->mc.vram_start));
398 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
399 - (u32)rdev->mc.vram_start);
400 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
401 - (u32)rdev->mc.vram_start);
402 + u32 tmp, frame_count;
403 + int i, j;
404
405 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
406 + /* update crtc base addresses */
407 + for (i = 0; i < rdev->num_crtc; i++) {
408 + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
409 upper_32_bits(rdev->mc.vram_start));
410 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
411 + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
412 upper_32_bits(rdev->mc.vram_start));
413 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
414 + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
415 (u32)rdev->mc.vram_start);
416 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
417 + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
418 (u32)rdev->mc.vram_start);
419 }
420 -
421 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
422 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
423 - /* Unlock host access */
424 +
425 + /* unblackout the MC */
426 + tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
427 + tmp &= ~BLACKOUT_MODE_MASK;
428 + WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
429 + /* allow CPU access */
430 + WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
431 +
432 + for (i = 0; i < rdev->num_crtc; i++) {
433 + if (save->crtc_enabled) {
434 + if (ASIC_IS_DCE6(rdev)) {
435 + tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
436 + tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
437 + WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
438 + } else {
439 + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
440 + tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
441 + WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
442 + }
443 + /* wait for the next frame */
444 + frame_count = radeon_get_vblank_counter(rdev, i);
445 + for (j = 0; j < rdev->usec_timeout; j++) {
446 + if (radeon_get_vblank_counter(rdev, i) != frame_count)
447 + break;
448 + udelay(1);
449 + }
450 + }
451 + }
452 + /* Unlock vga access */
453 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
454 mdelay(1);
455 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
456 diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
457 index 96c10b3..34a0e85 100644
458 --- a/drivers/gpu/drm/radeon/evergreen_reg.h
459 +++ b/drivers/gpu/drm/radeon/evergreen_reg.h
460 @@ -218,6 +218,8 @@
461 #define EVERGREEN_CRTC_CONTROL 0x6e70
462 # define EVERGREEN_CRTC_MASTER_EN (1 << 0)
463 # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
464 +#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74
465 +# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
466 #define EVERGREEN_CRTC_STATUS 0x6e8c
467 # define EVERGREEN_CRTC_V_BLANK (1 << 0)
468 #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
469 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
470 index 2eaaea0..81e744f 100644
471 --- a/drivers/gpu/drm/radeon/evergreend.h
472 +++ b/drivers/gpu/drm/radeon/evergreend.h
473 @@ -77,6 +77,10 @@
474
475 #define CONFIG_MEMSIZE 0x5428
476
477 +#define BIF_FB_EN 0x5490
478 +#define FB_READ_EN (1 << 0)
479 +#define FB_WRITE_EN (1 << 1)
480 +
481 #define CP_STRMOUT_CNTL 0x84FC
482
483 #define CP_COHER_CNTL 0x85F0
484 @@ -200,6 +204,9 @@
485 #define NOOFCHAN_MASK 0x00003000
486 #define MC_SHARED_CHREMAP 0x2008
487
488 +#define MC_SHARED_BLACKOUT_CNTL 0x20ac
489 +#define BLACKOUT_MODE_MASK 0x00000007
490 +
491 #define MC_ARB_RAMCFG 0x2760
492 #define NOOFBANK_SHIFT 0
493 #define NOOFBANK_MASK 0x00000003
494 diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
495 index 665df87..917e49c 100644
496 --- a/drivers/gpu/drm/radeon/radeon_asic.h
497 +++ b/drivers/gpu/drm/radeon/radeon_asic.h
498 @@ -400,6 +400,7 @@ void r700_cp_fini(struct radeon_device *rdev);
499 struct evergreen_mc_save {
500 u32 vga_render_control;
501 u32 vga_hdp_control;
502 + bool crtc_enabled[RADEON_MAX_CRTCS];
503 };
504
505 void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
506 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
507 index a2b5304..6137d00 100644
508 --- a/drivers/md/raid10.c
509 +++ b/drivers/md/raid10.c
510 @@ -1182,18 +1182,21 @@ retry_write:
511 blocked_rdev = rrdev;
512 break;
513 }
514 + if (rdev && (test_bit(Faulty, &rdev->flags)
515 + || test_bit(Unmerged, &rdev->flags)))
516 + rdev = NULL;
517 if (rrdev && (test_bit(Faulty, &rrdev->flags)
518 || test_bit(Unmerged, &rrdev->flags)))
519 rrdev = NULL;
520
521 r10_bio->devs[i].bio = NULL;
522 r10_bio->devs[i].repl_bio = NULL;
523 - if (!rdev || test_bit(Faulty, &rdev->flags) ||
524 - test_bit(Unmerged, &rdev->flags)) {
525 +
526 + if (!rdev && !rrdev) {
527 set_bit(R10BIO_Degraded, &r10_bio->state);
528 continue;
529 }
530 - if (test_bit(WriteErrorSeen, &rdev->flags)) {
531 + if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
532 sector_t first_bad;
533 sector_t dev_sector = r10_bio->devs[i].addr;
534 int bad_sectors;
535 @@ -1235,8 +1238,10 @@ retry_write:
536 max_sectors = good_sectors;
537 }
538 }
539 - r10_bio->devs[i].bio = bio;
540 - atomic_inc(&rdev->nr_pending);
541 + if (rdev) {
542 + r10_bio->devs[i].bio = bio;
543 + atomic_inc(&rdev->nr_pending);
544 + }
545 if (rrdev) {
546 r10_bio->devs[i].repl_bio = bio;
547 atomic_inc(&rrdev->nr_pending);
548 @@ -1292,51 +1297,52 @@ retry_write:
549 for (i = 0; i < conf->copies; i++) {
550 struct bio *mbio;
551 int d = r10_bio->devs[i].devnum;
552 - if (!r10_bio->devs[i].bio)
553 - continue;
554 -
555 - mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
556 - md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
557 - max_sectors);
558 - r10_bio->devs[i].bio = mbio;
559 -
560 - mbio->bi_sector = (r10_bio->devs[i].addr+
561 - conf->mirrors[d].rdev->data_offset);
562 - mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
563 - mbio->bi_end_io = raid10_end_write_request;
564 - mbio->bi_rw = WRITE | do_sync | do_fua;
565 - mbio->bi_private = r10_bio;
566 -
567 - atomic_inc(&r10_bio->remaining);
568 - spin_lock_irqsave(&conf->device_lock, flags);
569 - bio_list_add(&conf->pending_bio_list, mbio);
570 - conf->pending_count++;
571 - spin_unlock_irqrestore(&conf->device_lock, flags);
572 -
573 - if (!r10_bio->devs[i].repl_bio)
574 - continue;
575 + if (r10_bio->devs[i].bio) {
576 + struct md_rdev *rdev = conf->mirrors[d].rdev;
577 + mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
578 + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
579 + max_sectors);
580 + r10_bio->devs[i].bio = mbio;
581 +
582 + mbio->bi_sector = (r10_bio->devs[i].addr+
583 + rdev->data_offset);
584 + mbio->bi_bdev = rdev->bdev;
585 + mbio->bi_end_io = raid10_end_write_request;
586 + mbio->bi_rw = WRITE | do_sync | do_fua;
587 + mbio->bi_private = r10_bio;
588
589 - mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
590 - md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
591 - max_sectors);
592 - r10_bio->devs[i].repl_bio = mbio;
593 + atomic_inc(&r10_bio->remaining);
594 + spin_lock_irqsave(&conf->device_lock, flags);
595 + bio_list_add(&conf->pending_bio_list, mbio);
596 + conf->pending_count++;
597 + spin_unlock_irqrestore(&conf->device_lock, flags);
598 + }
599
600 - /* We are actively writing to the original device
601 - * so it cannot disappear, so the replacement cannot
602 - * become NULL here
603 - */
604 - mbio->bi_sector = (r10_bio->devs[i].addr+
605 - conf->mirrors[d].replacement->data_offset);
606 - mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
607 - mbio->bi_end_io = raid10_end_write_request;
608 - mbio->bi_rw = WRITE | do_sync | do_fua;
609 - mbio->bi_private = r10_bio;
610 + if (r10_bio->devs[i].repl_bio) {
611 + struct md_rdev *rdev = conf->mirrors[d].replacement;
612 + if (rdev == NULL) {
613 + /* Replacement just got moved to main 'rdev' */
614 + smp_mb();
615 + rdev = conf->mirrors[d].rdev;
616 + }
617 + mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
618 + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
619 + max_sectors);
620 + r10_bio->devs[i].repl_bio = mbio;
621 +
622 + mbio->bi_sector = (r10_bio->devs[i].addr+
623 + rdev->data_offset);
624 + mbio->bi_bdev = rdev->bdev;
625 + mbio->bi_end_io = raid10_end_write_request;
626 + mbio->bi_rw = WRITE | do_sync | do_fua;
627 + mbio->bi_private = r10_bio;
628
629 - atomic_inc(&r10_bio->remaining);
630 - spin_lock_irqsave(&conf->device_lock, flags);
631 - bio_list_add(&conf->pending_bio_list, mbio);
632 - conf->pending_count++;
633 - spin_unlock_irqrestore(&conf->device_lock, flags);
634 + atomic_inc(&r10_bio->remaining);
635 + spin_lock_irqsave(&conf->device_lock, flags);
636 + bio_list_add(&conf->pending_bio_list, mbio);
637 + conf->pending_count++;
638 + spin_unlock_irqrestore(&conf->device_lock, flags);
639 + }
640 }
641
642 /* Don't remove the bias on 'remaining' (one_write_done) until
643 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
644 index 6af3101..b8e7f3e 100644
645 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
646 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
647 @@ -9131,10 +9131,13 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
648 */
649 static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
650 {
651 - u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
652 - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
653 - BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
654 - REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
655 + if (!CHIP_IS_E1x(bp)) {
656 + u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
657 + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
658 + BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
659 + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
660 + 1 << BP_FUNC(bp));
661 + }
662 }
663 }
664
665 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
666 index 7f6a23f..d16dae2 100644
667 --- a/fs/nfs/blocklayout/blocklayout.c
668 +++ b/fs/nfs/blocklayout/blocklayout.c
669 @@ -162,25 +162,39 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
670 return bio;
671 }
672
673 -static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
674 +static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
675 sector_t isect, struct page *page,
676 struct pnfs_block_extent *be,
677 void (*end_io)(struct bio *, int err),
678 - struct parallel_io *par)
679 + struct parallel_io *par,
680 + unsigned int offset, int len)
681 {
682 + isect = isect + (offset >> SECTOR_SHIFT);
683 + dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
684 + npg, rw, (unsigned long long)isect, offset, len);
685 retry:
686 if (!bio) {
687 bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
688 if (!bio)
689 return ERR_PTR(-ENOMEM);
690 }
691 - if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
692 + if (bio_add_page(bio, page, len, offset) < len) {
693 bio = bl_submit_bio(rw, bio);
694 goto retry;
695 }
696 return bio;
697 }
698
699 +static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
700 + sector_t isect, struct page *page,
701 + struct pnfs_block_extent *be,
702 + void (*end_io)(struct bio *, int err),
703 + struct parallel_io *par)
704 +{
705 + return do_add_page_to_bio(bio, npg, rw, isect, page, be,
706 + end_io, par, 0, PAGE_CACHE_SIZE);
707 +}
708 +
709 /* This is basically copied from mpage_end_io_read */
710 static void bl_end_io_read(struct bio *bio, int err)
711 {
712 @@ -443,6 +457,107 @@ map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
713 return;
714 }
715
716 +static void
717 +bl_read_single_end_io(struct bio *bio, int error)
718 +{
719 + struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
720 + struct page *page = bvec->bv_page;
721 +
722 + /* Only one page in bvec */
723 + unlock_page(page);
724 +}
725 +
726 +static int
727 +bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
728 + unsigned int offset, unsigned int len)
729 +{
730 + struct bio *bio;
731 + struct page *shadow_page;
732 + sector_t isect;
733 + char *kaddr, *kshadow_addr;
734 + int ret = 0;
735 +
736 + dprintk("%s: offset %u len %u\n", __func__, offset, len);
737 +
738 + shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
739 + if (shadow_page == NULL)
740 + return -ENOMEM;
741 +
742 + bio = bio_alloc(GFP_NOIO, 1);
743 + if (bio == NULL)
744 + return -ENOMEM;
745 +
746 + isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
747 + (offset / SECTOR_SIZE);
748 +
749 + bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
750 + bio->bi_bdev = be->be_mdev;
751 + bio->bi_end_io = bl_read_single_end_io;
752 +
753 + lock_page(shadow_page);
754 + if (bio_add_page(bio, shadow_page,
755 + SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
756 + unlock_page(shadow_page);
757 + bio_put(bio);
758 + return -EIO;
759 + }
760 +
761 + submit_bio(READ, bio);
762 + wait_on_page_locked(shadow_page);
763 + if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
764 + ret = -EIO;
765 + } else {
766 + kaddr = kmap_atomic(page);
767 + kshadow_addr = kmap_atomic(shadow_page);
768 + memcpy(kaddr + offset, kshadow_addr + offset, len);
769 + kunmap_atomic(kshadow_addr);
770 + kunmap_atomic(kaddr);
771 + }
772 + __free_page(shadow_page);
773 + bio_put(bio);
774 +
775 + return ret;
776 +}
777 +
778 +static int
779 +bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
780 + unsigned int dirty_offset, unsigned int dirty_len,
781 + bool full_page)
782 +{
783 + int ret = 0;
784 + unsigned int start, end;
785 +
786 + if (full_page) {
787 + start = 0;
788 + end = PAGE_CACHE_SIZE;
789 + } else {
790 + start = round_down(dirty_offset, SECTOR_SIZE);
791 + end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
792 + }
793 +
794 + dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
795 + if (!be) {
796 + zero_user_segments(page, start, dirty_offset,
797 + dirty_offset + dirty_len, end);
798 + if (start == 0 && end == PAGE_CACHE_SIZE &&
799 + trylock_page(page)) {
800 + SetPageUptodate(page);
801 + unlock_page(page);
802 + }
803 + return ret;
804 + }
805 +
806 + if (start != dirty_offset)
807 + ret = bl_do_readpage_sync(page, be, start,
808 + dirty_offset - start);
809 +
810 + if (!ret && (dirty_offset + dirty_len < end))
811 + ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
812 + end - dirty_offset - dirty_len);
813 +
814 + return ret;
815 +}
816 +
817 /* Given an unmapped page, zero it or read in page for COW, page is locked
818 * by caller.
819 */
820 @@ -476,7 +591,6 @@ init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
821 SetPageUptodate(page);
822
823 cleanup:
824 - bl_put_extent(cow_read);
825 if (bh)
826 free_buffer_head(bh);
827 if (ret) {
828 @@ -547,6 +661,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
829 struct parallel_io *par;
830 loff_t offset = wdata->args.offset;
831 size_t count = wdata->args.count;
832 + unsigned int pg_offset, pg_len, saved_len;
833 struct page **pages = wdata->args.pages;
834 struct page *page;
835 pgoff_t index;
836 @@ -651,10 +766,11 @@ next_page:
837 if (!extent_length) {
838 /* We've used up the previous extent */
839 bl_put_extent(be);
840 + bl_put_extent(cow_read);
841 bio = bl_submit_bio(WRITE, bio);
842 /* Get the next one */
843 be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
844 - isect, NULL);
845 + isect, &cow_read);
846 if (!be || !is_writable(be, isect)) {
847 wdata->pnfs_error = -EINVAL;
848 goto out;
849 @@ -671,7 +787,26 @@ next_page:
850 extent_length = be->be_length -
851 (isect - be->be_f_offset);
852 }
853 - if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
854 +
855 + dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
856 + pg_offset = offset & ~PAGE_CACHE_MASK;
857 + if (pg_offset + count > PAGE_CACHE_SIZE)
858 + pg_len = PAGE_CACHE_SIZE - pg_offset;
859 + else
860 + pg_len = count;
861 +
862 + saved_len = pg_len;
863 + if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
864 + !bl_is_sector_init(be->be_inval, isect)) {
865 + ret = bl_read_partial_page_sync(pages[i], cow_read,
866 + pg_offset, pg_len, true);
867 + if (ret) {
868 + dprintk("%s bl_read_partial_page_sync fail %d\n",
869 + __func__, ret);
870 + wdata->pnfs_error = ret;
871 + goto out;
872 + }
873 +
874 ret = bl_mark_sectors_init(be->be_inval, isect,
875 PAGE_CACHE_SECTORS);
876 if (unlikely(ret)) {
877 @@ -680,15 +815,33 @@ next_page:
878 wdata->pnfs_error = ret;
879 goto out;
880 }
881 +
882 + /* Expand to full page write */
883 + pg_offset = 0;
884 + pg_len = PAGE_CACHE_SIZE;
885 + } else if ((pg_offset & (SECTOR_SIZE - 1)) ||
886 + (pg_len & (SECTOR_SIZE - 1))) {
887 + /* ahh, nasty case. We have to do sync full sector
888 + * read-modify-write cycles.
889 + */
890 + unsigned int saved_offset = pg_offset;
891 + ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
892 + pg_len, false);
893 + pg_offset = round_down(pg_offset, SECTOR_SIZE);
894 + pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
895 + - pg_offset;
896 }
897 - bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
898 + bio = do_add_page_to_bio(bio, wdata->npages - i, WRITE,
899 isect, pages[i], be,
900 - bl_end_io_write, par);
901 + bl_end_io_write, par,
902 + pg_offset, pg_len);
903 if (IS_ERR(bio)) {
904 wdata->pnfs_error = PTR_ERR(bio);
905 bio = NULL;
906 goto out;
907 }
908 + offset += saved_len;
909 + count -= saved_len;
910 isect += PAGE_CACHE_SECTORS;
911 last_isect = isect;
912 extent_length -= PAGE_CACHE_SECTORS;
913 @@ -706,17 +859,16 @@ next_page:
914 }
915
916 write_done:
917 - wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
918 - if (count < wdata->res.count) {
919 - wdata->res.count = count;
920 - }
921 + wdata->res.count = wdata->args.count;
922 out:
923 bl_put_extent(be);
924 + bl_put_extent(cow_read);
925 bl_submit_bio(WRITE, bio);
926 put_parallel(par);
927 return PNFS_ATTEMPTED;
928 out_mds:
929 bl_put_extent(be);
930 + bl_put_extent(cow_read);
931 kfree(par);
932 return PNFS_NOT_ATTEMPTED;
933 }
934 diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
935 index 0335069..39bb51a 100644
936 --- a/fs/nfs/blocklayout/blocklayout.h
937 +++ b/fs/nfs/blocklayout/blocklayout.h
938 @@ -41,6 +41,7 @@
939
940 #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
941 #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
942 +#define SECTOR_SIZE (1 << SECTOR_SHIFT)
943
944 struct block_mount_id {
945 spinlock_t bm_lock; /* protects list */
946 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
947 index 0984a21..15f60d0 100644
948 --- a/kernel/sched/auto_group.c
949 +++ b/kernel/sched/auto_group.c
950 @@ -143,15 +143,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
951
952 p->signal->autogroup = autogroup_kref_get(ag);
953
954 - if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
955 - goto out;
956 -
957 t = p;
958 do {
959 sched_move_task(t);
960 } while_each_thread(p, t);
961
962 -out:
963 unlock_task_sighand(p, &flags);
964 autogroup_kref_put(prev);
965 }
966 diff --git a/kernel/sched/auto_group.h b/kernel/sched/auto_group.h
967 index 8bd0471..443232e 100644
968 --- a/kernel/sched/auto_group.h
969 +++ b/kernel/sched/auto_group.h
970 @@ -4,11 +4,6 @@
971 #include <linux/rwsem.h>
972
973 struct autogroup {
974 - /*
975 - * reference doesn't mean how many thread attach to this
976 - * autogroup now. It just stands for the number of task
977 - * could use this autogroup.
978 - */
979 struct kref kref;
980 struct task_group *tg;
981 struct rw_semaphore lock;
982 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
983 index 56f793d..bcb9d34 100644
984 --- a/kernel/workqueue.c
985 +++ b/kernel/workqueue.c
986 @@ -2040,8 +2040,10 @@ static int rescuer_thread(void *__wq)
987 repeat:
988 set_current_state(TASK_INTERRUPTIBLE);
989
990 - if (kthread_should_stop())
991 + if (kthread_should_stop()) {
992 + __set_current_state(TASK_RUNNING);
993 return 0;
994 + }
995
996 /*
997 * See whether any cpu is asking for help. Unbounded
998 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
999 index 274c3cc..d86fb20 100644
1000 --- a/mm/memory-failure.c
1001 +++ b/mm/memory-failure.c
1002 @@ -1481,9 +1481,17 @@ int soft_offline_page(struct page *page, int flags)
1003 {
1004 int ret;
1005 unsigned long pfn = page_to_pfn(page);
1006 + struct page *hpage = compound_trans_head(page);
1007
1008 if (PageHuge(page))
1009 return soft_offline_huge_page(page, flags);
1010 + if (PageTransHuge(hpage)) {
1011 + if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
1012 + pr_info("soft offline: %#lx: failed to split THP\n",
1013 + pfn);
1014 + return -EBUSY;
1015 + }
1016 + }
1017
1018 ret = get_any_page(page, pfn, flags);
1019 if (ret < 0)
1020 diff --git a/mm/sparse.c b/mm/sparse.c
1021 index a8bc7d3..290dba2 100644
1022 --- a/mm/sparse.c
1023 +++ b/mm/sparse.c
1024 @@ -619,7 +619,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
1025 {
1026 return; /* XXX: Not implemented yet */
1027 }
1028 -static void free_map_bootmem(struct page *page, unsigned long nr_pages)
1029 +static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
1030 {
1031 }
1032 #else
1033 @@ -660,10 +660,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
1034 get_order(sizeof(struct page) * nr_pages));
1035 }
1036
1037 -static void free_map_bootmem(struct page *page, unsigned long nr_pages)
1038 +static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
1039 {
1040 unsigned long maps_section_nr, removing_section_nr, i;
1041 unsigned long magic;
1042 + struct page *page = virt_to_page(memmap);
1043
1044 for (i = 0; i < nr_pages; i++, page++) {
1045 magic = (unsigned long) page->lru.next;
1046 @@ -712,13 +713,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
1047 */
1048
1049 if (memmap) {
1050 - struct page *memmap_page;
1051 - memmap_page = virt_to_page(memmap);
1052 -
1053 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
1054 >> PAGE_SHIFT;
1055
1056 - free_map_bootmem(memmap_page, nr_pages);
1057 + free_map_bootmem(memmap, nr_pages);
1058 }
1059 }
1060
1061 diff --git a/scripts/package/buildtar b/scripts/package/buildtar
1062 index 8a7b155..d0d748e 100644
1063 --- a/scripts/package/buildtar
1064 +++ b/scripts/package/buildtar
1065 @@ -109,7 +109,7 @@ esac
1066 if tar --owner=root --group=root --help >/dev/null 2>&1; then
1067 opts="--owner=root --group=root"
1068 fi
1069 - tar cf - . $opts | ${compress} > "${tarball}${file_ext}"
1070 + tar cf - boot/* lib/* $opts | ${compress} > "${tarball}${file_ext}"
1071 )
1072
1073 echo "Tarball successfully created in ${tarball}${file_ext}"

  ViewVC Help
Powered by ViewVC 1.1.20